aboutsummaryrefslogtreecommitdiffstats
path: root/keystone-moon
diff options
context:
space:
mode:
authorRuan HE <ruan.he@orange.com>2016-06-09 08:12:34 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2016-06-09 08:12:34 +0000
commit4bc079a2664f9a407e332291f34d174625a9d5ea (patch)
tree7481cd5d0a9b3ce37c44c797a1e0d39881221cbe /keystone-moon
parent2f179c5790fbbf6144205d3c6e5089e6eb5f048a (diff)
parent2e7b4f2027a1147ca28301e4f88adf8274b39a1f (diff)
Merge "Update Keystone core to Mitaka."
Diffstat (limited to 'keystone-moon')
-rw-r--r--keystone-moon/.gitignore4
-rw-r--r--keystone-moon/.gitreview1
-rw-r--r--keystone-moon/.mailmap1
-rw-r--r--keystone-moon/.testr.conf3
-rw-r--r--keystone-moon/config-generator/keystone.conf1
-rw-r--r--keystone-moon/doc/source/apache-httpd.rst74
-rw-r--r--keystone-moon/doc/source/api_curl_examples.rst2
-rw-r--r--keystone-moon/doc/source/architecture.rst8
-rw-r--r--keystone-moon/doc/source/auth-totp.rst136
-rw-r--r--keystone-moon/doc/source/community.rst4
-rw-r--r--keystone-moon/doc/source/conf.py15
-rw-r--r--keystone-moon/doc/source/configuration.rst331
-rw-r--r--keystone-moon/doc/source/configure_federation.rst21
-rw-r--r--keystone-moon/doc/source/configure_tokenless_x509.rst4
-rw-r--r--keystone-moon/doc/source/configuringservices.rst76
-rw-r--r--keystone-moon/doc/source/developing.rst149
-rw-r--r--keystone-moon/doc/source/developing_drivers.rst2
-rw-r--r--keystone-moon/doc/source/devref/development.environment.rst175
-rw-r--r--keystone-moon/doc/source/event_notifications.rst27
-rw-r--r--keystone-moon/doc/source/extensions.rst96
-rw-r--r--keystone-moon/doc/source/federation/shibboleth.rst11
-rw-r--r--keystone-moon/doc/source/http-api.rst4
-rw-r--r--keystone-moon/doc/source/index.rst16
-rw-r--r--keystone-moon/doc/source/installing.rst21
-rw-r--r--keystone-moon/doc/source/key_terms.rst2
-rw-r--r--keystone-moon/doc/source/man/keystone-manage.rst7
-rw-r--r--keystone-moon/doc/source/mapping_combinations.rst65
-rw-r--r--keystone-moon/doc/source/online_schema_migration_examples.rst24
-rw-r--r--keystone-moon/doc/source/policy_mapping.rst18
-rw-r--r--keystone-moon/doc/source/sample_config.rst12
-rw-r--r--keystone-moon/doc/source/services.rst200
-rw-r--r--keystone-moon/etc/default_catalog.templates37
-rw-r--r--keystone-moon/etc/keystone-paste.ini41
-rw-r--r--keystone-moon/etc/keystone.conf.sample814
-rw-r--r--keystone-moon/etc/policy.json18
-rw-r--r--keystone-moon/etc/policy.v3cloudsample.json66
-rwxr-xr-xkeystone-moon/examples/pki/gen_pki.sh24
-rw-r--r--keystone-moon/httpd/keystone-uwsgi-admin.ini22
-rw-r--r--keystone-moon/httpd/keystone-uwsgi-public.ini22
-rw-r--r--keystone-moon/httpd/keystone.py18
-rw-r--r--keystone-moon/httpd/uwsgi-keystone.conf13
-rw-r--r--keystone-moon/httpd/wsgi-keystone.conf22
-rw-r--r--keystone-moon/keystone/assignment/V8_backends/__init__.py0
-rw-r--r--keystone-moon/keystone/assignment/V8_backends/sql.py452
-rw-r--r--keystone-moon/keystone/assignment/V8_role_backends/__init__.py0
-rw-r--r--keystone-moon/keystone/assignment/V8_role_backends/sql.py80
-rw-r--r--keystone-moon/keystone/assignment/__init__.py1
-rw-r--r--keystone-moon/keystone/assignment/backends/sql.py252
-rw-r--r--keystone-moon/keystone/assignment/controllers.py383
-rw-r--r--keystone-moon/keystone/assignment/core.py1188
-rw-r--r--keystone-moon/keystone/assignment/role_backends/sql.py144
-rw-r--r--keystone-moon/keystone/assignment/routers.py48
-rw-r--r--keystone-moon/keystone/auth/__init__.py1
-rw-r--r--keystone-moon/keystone/auth/controllers.py46
-rw-r--r--keystone-moon/keystone/auth/core.py2
-rw-r--r--keystone-moon/keystone/auth/plugins/core.py36
-rw-r--r--keystone-moon/keystone/auth/plugins/external.py2
-rw-r--r--keystone-moon/keystone/auth/plugins/mapped.py38
-rw-r--r--keystone-moon/keystone/auth/plugins/oauth1.py9
-rw-r--r--keystone-moon/keystone/auth/plugins/password.py6
-rw-r--r--keystone-moon/keystone/auth/plugins/saml2.py23
-rw-r--r--keystone-moon/keystone/auth/plugins/totp.py99
-rw-r--r--keystone-moon/keystone/catalog/__init__.py1
-rw-r--r--keystone-moon/keystone/catalog/backends/sql.py429
-rw-r--r--keystone-moon/keystone/catalog/backends/templated.py160
-rw-r--r--keystone-moon/keystone/catalog/controllers.py234
-rw-r--r--keystone-moon/keystone/catalog/core.py388
-rw-r--r--keystone-moon/keystone/catalog/routers.py142
-rw-r--r--keystone-moon/keystone/catalog/schema.py21
-rw-r--r--keystone-moon/keystone/cmd/cli.py381
-rw-r--r--keystone-moon/keystone/cmd/manage.py2
-rw-r--r--keystone-moon/keystone/common/authorization.py17
-rw-r--r--keystone-moon/keystone/common/cache/_context_cache.py129
-rw-r--r--keystone-moon/keystone/common/cache/backends/memcache_pool.py53
-rw-r--r--keystone-moon/keystone/common/cache/backends/mongo.py554
-rw-r--r--keystone-moon/keystone/common/cache/backends/noop.py7
-rw-r--r--keystone-moon/keystone/common/cache/core.py352
-rw-r--r--keystone-moon/keystone/common/config.py555
-rw-r--r--keystone-moon/keystone/common/controller.py196
-rw-r--r--keystone-moon/keystone/common/dependency.py2
-rw-r--r--keystone-moon/keystone/common/driver_hints.py47
-rw-r--r--keystone-moon/keystone/common/environment/__init__.py5
-rw-r--r--keystone-moon/keystone/common/environment/eventlet_server.py30
-rw-r--r--keystone-moon/keystone/common/extension.py1
-rw-r--r--keystone-moon/keystone/common/json_home.py1
-rw-r--r--keystone-moon/keystone/common/kvs/__init__.py1
-rw-r--r--keystone-moon/keystone/common/kvs/backends/inmemdb.py5
-rw-r--r--keystone-moon/keystone/common/kvs/backends/memcached.py14
-rw-r--r--keystone-moon/keystone/common/kvs/core.py66
-rw-r--r--keystone-moon/keystone/common/ldap/core.py137
-rw-r--r--keystone-moon/keystone/common/manager.py104
-rw-r--r--keystone-moon/keystone/common/models.py13
-rw-r--r--keystone-moon/keystone/common/openssl.py66
-rw-r--r--keystone-moon/keystone/common/router.py14
-rw-r--r--keystone-moon/keystone/common/sql/core.py110
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/README2
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/__init__.py17
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py21
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py41
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py90
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py25
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py41
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py35
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py39
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py46
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py40
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py)100
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py3
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py)4
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py18
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py54
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py97
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py75
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py55
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py70
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py)17
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py60
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py76
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py42
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py66
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py46
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py125
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py43
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py62
-rw-r--r--keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py50
-rw-r--r--keystone-moon/keystone/common/sql/migration_helpers.py129
-rw-r--r--keystone-moon/keystone/common/tokenless_auth.py11
-rw-r--r--keystone-moon/keystone/common/utils.py92
-rw-r--r--keystone-moon/keystone/common/validation/__init__.py13
-rw-r--r--keystone-moon/keystone/common/validation/parameter_types.py9
-rw-r--r--keystone-moon/keystone/common/validation/validators.py3
-rw-r--r--keystone-moon/keystone/common/wsgi.py129
-rw-r--r--keystone-moon/keystone/contrib/admin_crud/core.py235
-rw-r--r--keystone-moon/keystone/contrib/ec2/controllers.py52
-rw-r--r--keystone-moon/keystone/contrib/ec2/core.py4
-rw-r--r--keystone-moon/keystone/contrib/ec2/routers.py6
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/__init__.py15
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py61
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py219
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py23
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py26
-rw-r--r--keystone-moon/keystone/contrib/endpoint_filter/routers.py153
-rw-r--r--keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py4
-rw-r--r--keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py25
-rw-r--r--keystone-moon/keystone/contrib/endpoint_policy/routers.py4
-rw-r--r--keystone-moon/keystone/contrib/federation/__init__.py15
-rw-r--r--keystone-moon/keystone/contrib/federation/backends/sql.py357
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py29
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py14
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py13
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py10
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py18
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py27
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py30
-rw-r--r--keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py26
-rw-r--r--keystone-moon/keystone/contrib/federation/routers.py244
-rw-r--r--keystone-moon/keystone/contrib/moon/backends/sql.py148
-rw-r--r--keystone-moon/keystone/contrib/moon/core.py7
-rw-r--r--keystone-moon/keystone/contrib/oauth1/__init__.py15
-rw-r--r--keystone-moon/keystone/contrib/oauth1/backends/sql.py262
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py42
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py22
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py7
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py10
-rw-r--r--keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py19
-rw-r--r--keystone-moon/keystone/contrib/oauth1/routers.py145
-rw-r--r--keystone-moon/keystone/contrib/revoke/__init__.py13
-rw-r--r--keystone-moon/keystone/contrib/revoke/backends/sql.py96
-rw-r--r--keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py23
-rw-r--r--keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py15
-rw-r--r--keystone-moon/keystone/contrib/revoke/routers.py26
-rw-r--r--keystone-moon/keystone/contrib/s3/core.py59
-rw-r--r--keystone-moon/keystone/contrib/simple_cert/__init__.py1
-rw-r--r--keystone-moon/keystone/contrib/simple_cert/routers.py36
-rw-r--r--keystone-moon/keystone/contrib/user_crud/core.py122
-rw-r--r--keystone-moon/keystone/credential/__init__.py1
-rw-r--r--keystone-moon/keystone/credential/backends/sql.py54
-rw-r--r--keystone-moon/keystone/credential/core.py16
-rw-r--r--keystone-moon/keystone/endpoint_policy/__init__.py1
-rw-r--r--keystone-moon/keystone/endpoint_policy/backends/sql.py20
-rw-r--r--keystone-moon/keystone/endpoint_policy/core.py27
-rw-r--r--keystone-moon/keystone/exception.py175
-rw-r--r--keystone-moon/keystone/federation/V8_backends/__init__.py0
-rw-r--r--keystone-moon/keystone/federation/V8_backends/sql.py389
-rw-r--r--keystone-moon/keystone/federation/__init__.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py)15
-rw-r--r--keystone-moon/keystone/federation/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/federation/backends/sql.py393
-rw-r--r--keystone-moon/keystone/federation/constants.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py)18
-rw-r--r--keystone-moon/keystone/federation/controllers.py519
-rw-r--r--keystone-moon/keystone/federation/core.py611
-rw-r--r--keystone-moon/keystone/federation/idp.py615
-rw-r--r--keystone-moon/keystone/federation/routers.py252
-rw-r--r--keystone-moon/keystone/federation/schema.py115
-rw-r--r--keystone-moon/keystone/federation/utils.py872
-rw-r--r--keystone-moon/keystone/identity/__init__.py1
-rw-r--r--keystone-moon/keystone/identity/backends/ldap.py62
-rw-r--r--keystone-moon/keystone/identity/backends/sql.py345
-rw-r--r--keystone-moon/keystone/identity/controllers.py10
-rw-r--r--keystone-moon/keystone/identity/core.py303
-rw-r--r--keystone-moon/keystone/identity/mapping_backends/sql.py63
-rw-r--r--keystone-moon/keystone/identity/shadow_backends/__init__.py0
-rw-r--r--keystone-moon/keystone/identity/shadow_backends/sql.py73
-rw-r--r--keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/de/LC_MESSAGES/keystone.po640
-rw-r--r--keystone-moon/keystone/locale/el/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/es/LC_MESSAGES/keystone.po847
-rw-r--r--keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone.po970
-rw-r--r--keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/it/LC_MESSAGES/keystone.po599
-rw-r--r--keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone.po655
-rw-r--r--keystone-moon/keystone/locale/keystone-log-critical.pot12
-rw-r--r--keystone-moon/keystone/locale/keystone-log-error.pot99
-rw-r--r--keystone-moon/keystone/locale/keystone-log-info.pot152
-rw-r--r--keystone-moon/keystone/locale/keystone-log-warning.pot158
-rw-r--r--keystone-moon/keystone/locale/keystone.pot1138
-rw-r--r--keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po20
-rw-r--r--keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-error.po165
-rw-r--r--keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-info.po210
-rw-r--r--keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-warning.po325
-rw-r--r--keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone.po581
-rw-r--r--keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po795
-rw-r--r--keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone.po592
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po16
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po28
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po25
-rw-r--r--keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po147
-rw-r--r--keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po18
-rw-r--r--keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po35
-rw-r--r--keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone.po621
-rw-r--r--keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po18
-rw-r--r--keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone.po646
-rw-r--r--keystone-moon/keystone/middleware/__init__.py1
-rw-r--r--keystone-moon/keystone/middleware/auth.py222
-rw-r--r--keystone-moon/keystone/middleware/core.py199
-rw-r--r--keystone-moon/keystone/models/revoke_model.py373
-rw-r--r--keystone-moon/keystone/models/token_model.py28
-rw-r--r--keystone-moon/keystone/notifications.py246
-rw-r--r--keystone-moon/keystone/oauth1/__init__.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py)16
-rw-r--r--keystone-moon/keystone/oauth1/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/oauth1/backends/sql.py258
-rw-r--r--keystone-moon/keystone/oauth1/controllers.py409
-rw-r--r--keystone-moon/keystone/oauth1/core.py367
-rw-r--r--keystone-moon/keystone/oauth1/routers.py154
-rw-r--r--keystone-moon/keystone/oauth1/schema.py34
-rw-r--r--keystone-moon/keystone/oauth1/validator.py177
-rw-r--r--keystone-moon/keystone/policy/__init__.py1
-rw-r--r--keystone-moon/keystone/policy/backends/rules.py24
-rw-r--r--keystone-moon/keystone/policy/backends/sql.py26
-rw-r--r--keystone-moon/keystone/policy/core.py8
-rw-r--r--keystone-moon/keystone/resource/V8_backends/__init__.py0
-rw-r--r--keystone-moon/keystone/resource/V8_backends/sql.py260
-rw-r--r--keystone-moon/keystone/resource/__init__.py1
-rw-r--r--keystone-moon/keystone/resource/backends/sql.py239
-rw-r--r--keystone-moon/keystone/resource/config_backends/sql.py28
-rw-r--r--keystone-moon/keystone/resource/controllers.py62
-rw-r--r--keystone-moon/keystone/resource/core.py1321
-rw-r--r--keystone-moon/keystone/resource/routers.py31
-rw-r--r--keystone-moon/keystone/resource/schema.py8
-rw-r--r--keystone-moon/keystone/revoke/__init__.py13
-rw-r--r--keystone-moon/keystone/revoke/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/revoke/backends/sql.py100
-rw-r--r--keystone-moon/keystone/revoke/controllers.py44
-rw-r--r--keystone-moon/keystone/revoke/core.py261
-rw-r--r--keystone-moon/keystone/revoke/model.py13
-rw-r--r--keystone-moon/keystone/revoke/routers.py29
-rw-r--r--keystone-moon/keystone/server/backends.py22
-rw-r--r--keystone-moon/keystone/server/common.py10
-rw-r--r--keystone-moon/keystone/server/eventlet.py4
-rw-r--r--keystone-moon/keystone/server/wsgi.py30
-rw-r--r--keystone-moon/keystone/service.py138
-rw-r--r--keystone-moon/keystone/tests/common/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/common/auth.py109
-rw-r--r--keystone-moon/keystone/tests/functional/core.py85
-rw-r--r--keystone-moon/keystone/tests/functional/shared/test_running.py22
-rw-r--r--keystone-moon/keystone/tests/hacking/checks.py45
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py2
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py4
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py4
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py2
-rw-r--r--keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py2
-rw-r--r--keystone-moon/keystone/tests/unit/__init__.py19
-rw-r--r--keystone-moon/keystone/tests/unit/assignment/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/assignment/role_backends/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/assignment/role_backends/test_sql.py112
-rw-r--r--keystone-moon/keystone/tests/unit/assignment/test_backends.py3755
-rw-r--r--keystone-moon/keystone/tests/unit/assignment/test_core.py123
-rw-r--r--keystone-moon/keystone/tests/unit/backend/core_ldap.py4
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py39
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py108
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py71
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py30
-rw-r--r--keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/catalog/test_backends.py588
-rw-r--r--keystone-moon/keystone/tests/unit/catalog/test_core.py30
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_authorization.py161
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_ldap.py36
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_manager.py5
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_notifications.py329
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_sql_core.py10
-rw-r--r--keystone-moon/keystone/tests/unit/common/test_utils.py48
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf4
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf3
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_sql.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf3
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf2
-rw-r--r--keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf3
-rw-r--r--keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py299
-rw-r--r--keystone-moon/keystone/tests/unit/core.py388
-rw-r--r--keystone-moon/keystone/tests/unit/default_fixtures.py61
-rw-r--r--keystone-moon/keystone/tests/unit/external/README.rst9
-rw-r--r--keystone-moon/keystone/tests/unit/external/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/external/test_timeutils.py33
-rw-r--r--keystone-moon/keystone/tests/unit/fakeldap.py61
-rw-r--r--keystone-moon/keystone/tests/unit/filtering.py3
-rw-r--r--keystone-moon/keystone/tests/unit/identity/test_backends.py1297
-rw-r--r--keystone-moon/keystone/tests/unit/identity/test_controllers.py65
-rw-r--r--keystone-moon/keystone/tests/unit/identity/test_core.py4
-rw-r--r--keystone-moon/keystone/tests/unit/identity_mapping.py7
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/__init__.py2
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/appserver.py6
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/auth_plugins.py34
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/cache.py17
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/database.py75
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/hacking.py176
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py3
-rw-r--r--keystone-moon/keystone/tests/unit/ksfixtures/policy.py33
-rw-r--r--keystone-moon/keystone/tests/unit/mapping_fixtures.py176
-rw-r--r--keystone-moon/keystone/tests/unit/policy/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/policy/test_backends.py86
-rw-r--r--keystone-moon/keystone/tests/unit/resource/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/resource/backends/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/resource/backends/test_sql.py (renamed from keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py)21
-rw-r--r--keystone-moon/keystone/tests/unit/resource/config_backends/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/resource/config_backends/test_sql.py53
-rw-r--r--keystone-moon/keystone/tests/unit/resource/test_backends.py1669
-rw-r--r--keystone-moon/keystone/tests/unit/resource/test_controllers.py57
-rw-r--r--keystone-moon/keystone/tests/unit/resource/test_core.py692
-rw-r--r--keystone-moon/keystone/tests/unit/rest.py28
-rw-r--r--keystone-moon/keystone/tests/unit/schema/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/schema/v2.py161
-rw-r--r--keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py453
-rw-r--r--keystone-moon/keystone/tests/unit/test_auth.py202
-rw-r--r--keystone-moon/keystone/tests/unit/test_auth_plugin.py2
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py23
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py5
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_kvs.py66
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_ldap.py1285
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py29
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_rules.py19
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_sql.py619
-rw-r--r--keystone-moon/keystone/tests/unit/test_backend_templated.py52
-rw-r--r--keystone-moon/keystone/tests/unit/test_catalog.py131
-rw-r--r--keystone-moon/keystone/tests/unit/test_cert_setup.py37
-rw-r--r--keystone-moon/keystone/tests/unit/test_cli.py242
-rw-r--r--keystone-moon/keystone/tests/unit/test_config.py2
-rw-r--r--keystone-moon/keystone/tests/unit/test_contrib_s3_core.py56
-rw-r--r--keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_credential.py265
-rw-r--r--keystone-moon/keystone/tests/unit/test_driver_hints.py2
-rw-r--r--keystone-moon/keystone/tests/unit/test_entry_points.py48
-rw-r--r--keystone-moon/keystone/tests/unit/test_exception.py74
-rw-r--r--keystone-moon/keystone/tests/unit/test_hacking_checks.py42
-rw-r--r--keystone-moon/keystone/tests/unit/test_kvs.py38
-rw-r--r--keystone-moon/keystone/tests/unit/test_ldap_livetest.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py3
-rw-r--r--keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py4
-rw-r--r--keystone-moon/keystone/tests/unit/test_middleware.py620
-rw-r--r--keystone-moon/keystone/tests/unit/test_policy.py41
-rw-r--r--keystone-moon/keystone/tests/unit/test_revoke.py76
-rw-r--r--keystone-moon/keystone/tests/unit/test_sql_livetest.py24
-rw-r--r--keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py353
-rw-r--r--keystone-moon/keystone/tests/unit/test_sql_upgrade.py1234
-rw-r--r--keystone-moon/keystone/tests/unit/test_token_provider.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_url_middleware.py1
-rw-r--r--keystone-moon/keystone/tests/unit/test_v2.py150
-rw-r--r--keystone-moon/keystone/tests/unit/test_v2_controller.py75
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3.py681
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_assignment.py2419
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_auth.py3237
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_catalog.py349
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_credential.py242
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_domain_config.py259
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py58
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_federation.py562
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_filters.py57
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_identity.py461
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_oauth1.py66
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_os_revoke.py10
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_policy.py29
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_protection.py739
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_resource.py1434
-rw-r--r--keystone-moon/keystone/tests/unit/test_v3_trust.py403
-rw-r--r--keystone-moon/keystone/tests/unit/test_validation.py352
-rw-r--r--keystone-moon/keystone/tests/unit/test_versions.py257
-rw-r--r--keystone-moon/keystone/tests/unit/test_wsgi.py141
-rw-r--r--keystone-moon/keystone/tests/unit/tests/test_core.py2
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_backends.py551
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_fernet_provider.py428
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_provider.py4
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_token_data_helper.py3
-rw-r--r--keystone-moon/keystone/tests/unit/token/test_token_model.py2
-rw-r--r--keystone-moon/keystone/tests/unit/trust/__init__.py0
-rw-r--r--keystone-moon/keystone/tests/unit/trust/test_backends.py172
-rw-r--r--keystone-moon/keystone/tests/unit/utils.py4
-rw-r--r--keystone-moon/keystone/token/__init__.py1
-rw-r--r--keystone-moon/keystone/token/_simple_cert.py91
-rw-r--r--keystone-moon/keystone/token/controllers.py22
-rw-r--r--keystone-moon/keystone/token/persistence/__init__.py2
-rw-r--r--keystone-moon/keystone/token/persistence/backends/kvs.py23
-rw-r--r--keystone-moon/keystone/token/persistence/backends/memcache.py6
-rw-r--r--keystone-moon/keystone/token/persistence/backends/memcache_pool.py6
-rw-r--r--keystone-moon/keystone/token/persistence/backends/sql.py141
-rw-r--r--keystone-moon/keystone/token/persistence/core.py25
-rw-r--r--keystone-moon/keystone/token/provider.py128
-rw-r--r--keystone-moon/keystone/token/providers/common.py248
-rw-r--r--keystone-moon/keystone/token/providers/fernet/core.py239
-rw-r--r--keystone-moon/keystone/token/providers/fernet/token_formatters.py543
-rw-r--r--keystone-moon/keystone/token/providers/fernet/utils.py41
-rw-r--r--keystone-moon/keystone/token/providers/pki.py5
-rw-r--r--keystone-moon/keystone/token/providers/pkiz.py5
-rw-r--r--keystone-moon/keystone/trust/__init__.py1
-rw-r--r--keystone-moon/keystone/trust/backends/sql.py78
-rw-r--r--keystone-moon/keystone/trust/controllers.py42
-rw-r--r--keystone-moon/keystone/trust/core.py29
-rw-r--r--keystone-moon/keystone/v2_crud/__init__.py0
-rw-r--r--keystone-moon/keystone/v2_crud/admin_crud.py240
-rw-r--r--keystone-moon/keystone/v2_crud/user_crud.py134
-rw-r--r--keystone-moon/keystone/version/__init__.py0
-rw-r--r--keystone-moon/keystone/version/controllers.py215
-rw-r--r--keystone-moon/keystone/version/routers.py80
-rw-r--r--keystone-moon/keystone/version/service.py161
-rw-r--r--keystone-moon/rally-jobs/keystone.yaml14
-rw-r--r--keystone-moon/releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml13
-rw-r--r--keystone-moon/releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml11
-rw-r--r--keystone-moon/releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml5
-rw-r--r--keystone-moon/releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml17
-rw-r--r--keystone-moon/releasenotes/notes/admin_token-a5678d712783c145.yaml14
-rw-r--r--keystone-moon/releasenotes/notes/admin_token-c634ec12fc714255.yaml11
-rw-r--r--keystone-moon/releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml13
-rw-r--r--keystone-moon/releasenotes/notes/bug-1519210-de76097c974f9c93.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml8
-rw-r--r--keystone-moon/releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml21
-rw-r--r--keystone-moon/releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml9
-rw-r--r--keystone-moon/releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml8
-rw-r--r--keystone-moon/releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml26
-rw-r--r--keystone-moon/releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml10
-rw-r--r--keystone-moon/releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml10
-rw-r--r--keystone-moon/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml25
-rw-r--r--keystone-moon/releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml9
-rw-r--r--keystone-moon/releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml12
-rw-r--r--keystone-moon/releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/is-admin-24b34238c83b3a82.yaml14
-rw-r--r--keystone-moon/releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml8
-rw-r--r--keystone-moon/releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml8
-rw-r--r--keystone-moon/releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml5
-rw-r--r--keystone-moon/releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml17
-rw-r--r--keystone-moon/releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml4
-rw-r--r--keystone-moon/releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml44
-rw-r--r--keystone-moon/releasenotes/notes/request_context-e143ba9c446a5952.yaml7
-rw-r--r--keystone-moon/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yaml12
-rw-r--r--keystone-moon/releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/totp-40d93231714c6a20.yaml9
-rw-r--r--keystone-moon/releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml6
-rw-r--r--keystone-moon/releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml5
-rw-r--r--keystone-moon/releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml6
-rw-r--r--keystone-moon/requirements.txt63
-rw-r--r--keystone-moon/setup.cfg43
-rw-r--r--keystone-moon/test-requirements.txt27
-rw-r--r--keystone-moon/tests-py3-blacklist.txt10
-rwxr-xr-xkeystone-moon/tools/pretty_tox_py3.sh12
-rwxr-xr-xkeystone-moon/tools/sample_data.sh49
-rw-r--r--keystone-moon/tox.ini145
514 files changed, 49269 insertions, 18743 deletions
diff --git a/keystone-moon/.gitignore b/keystone-moon/.gitignore
index efa3c29d..9c909712 100644
--- a/keystone-moon/.gitignore
+++ b/keystone-moon/.gitignore
@@ -31,3 +31,7 @@ keystone/tests/tmp/
keystone/locale/*/LC_MESSAGES/*.mo
.testrepository/
*.db
+# Files created by releasenotes build
+releasenotes/build
+# sample config included in docs
+doc/source/_static/keystone.conf.sample
diff --git a/keystone-moon/.gitreview b/keystone-moon/.gitreview
index cd914fe0..19bb5d17 100644
--- a/keystone-moon/.gitreview
+++ b/keystone-moon/.gitreview
@@ -2,3 +2,4 @@
host=review.openstack.org
port=29418
project=openstack/keystone.git
+defaultbranch=stable/mitaka
diff --git a/keystone-moon/.mailmap b/keystone-moon/.mailmap
index ed8b7759..25416f95 100644
--- a/keystone-moon/.mailmap
+++ b/keystone-moon/.mailmap
@@ -26,3 +26,4 @@ Sirish Bitra <sirish.bitra@gmail.com> root <root@bsirish.(none)>
Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
Chmouel Boudjnah <chmouel@enovance.com> <chmouel@chmouel.com>
Zhenguo Niu <zhenguo@unitedstack.com> <Niu.ZGlinux@gmail.com>
+<guang.yee@hpe.com> <guang.yee@hp.com>
diff --git a/keystone-moon/.testr.conf b/keystone-moon/.testr.conf
index 74698954..20703b35 100644
--- a/keystone-moon/.testr.conf
+++ b/keystone-moon/.testr.conf
@@ -1,8 +1,5 @@
[DEFAULT]
test_command=
- OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
- OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
- OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./keystone/tests/unit} $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
diff --git a/keystone-moon/config-generator/keystone.conf b/keystone-moon/config-generator/keystone.conf
index 0e00d489..fc5ef29c 100644
--- a/keystone-moon/config-generator/keystone.conf
+++ b/keystone-moon/config-generator/keystone.conf
@@ -3,6 +3,7 @@ output_file = etc/keystone.conf.sample
wrap_width = 79
namespace = keystone
namespace = keystone.notifications
+namespace = oslo.cache
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
diff --git a/keystone-moon/doc/source/apache-httpd.rst b/keystone-moon/doc/source/apache-httpd.rst
index dbebc86e..1436ddad 100644
--- a/keystone-moon/doc/source/apache-httpd.rst
+++ b/keystone-moon/doc/source/apache-httpd.rst
@@ -19,17 +19,73 @@
Running Keystone in HTTPD
=========================
-.. WARNING::
+mod_proxy_uwsgi
+---------------
+
+The recommended keystone deployment is to have a real web server such as Apache
+HTTPD or nginx handle the HTTP connections and proxy requests to an independent
+keystone server (or servers) running under a wsgi container such as uwsgi or
+gunicorn. The typical deployment will have several applications proxied by the
+web server (for example horizon on /dashboard and keystone on /identity,
+/identity_admin, port :5000, and :35357). Proxying allows the applications to
+be shut down and restarted independently, and a problem in one application
+isn't going to affect the web server or other applications. The servers can
+easily be run in their own virtualenvs.
+
+The httpd/ directory contains sample files for configuring HTTPD to proxy
+requests to keystone servers running under uwsgi.
+
+Copy the `httpd/uwsgi-keystone.conf` sample configuration file to the
+appropriate location for your Apache server, on Debian/Ubuntu systems it is::
+
+ /etc/apache2/sites-available/uwsgi-keystone.conf
+
+On Red Hat based systems it is::
+
+ /etc/httpd/conf.d/uwsgi-keystone.conf
+
+Update the file to match your system configuration. Enable TLS by supplying the
+correct certificates.
+
+Enable mod_proxy_uwsgi.
+
+* On Ubuntu the required package is libapache2-mod-proxy-uwsgi; enable using
+ ``sudo a2enmod proxy``
+* On Fedora the required package is mod_proxy_uwsgi; enable by creating a file
+ ``/etc/httpd/conf.modules.d/11-proxy_uwsgi.conf`` containing
+ ``LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so``
- Running Keystone under HTTPD in the recommended (and tested) configuration
- does not support the use of ``Transfer-Encoding: chunked``. This is due to
- a limitation with the WSGI spec and the implementation used by
- ``mod_wsgi``. It is recommended that all clients assume Keystone will not
- support ``Transfer-Encoding: chunked``.
+Enable the site by creating a symlink from the file in ``sites-available`` to
+``sites-enabled``, for example, on Debian/Ubuntu systems
+(not required on Red Hat based systems)::
+
+ ln -s /etc/apache2/sites-available/uwsgi-keystone.conf /etc/apache2/sites-enabled/
+
+Start or restart HTTPD to pick up the new configuration.
+
+Now configure and start the uwsgi services. Copy the
+`httpd/keystone-uwsgi-admin.ini` and `httpd/keystone-uwsgi-public.ini` files to
+`/etc/keystone`. Update the files to match your system configuration (for
+example, you'll want to set the number of threads for the public and admin
+servers).
+Start up the keystone servers using uwsgi::
+
+ $ sudo pip install uwsgi
+ $ uwsgi /etc/keystone/keystone-uwsgi-admin.ini
+ $ uwsgi /etc/keystone/keystone-uwsgi-public.ini
+
+
+mod_wsgi
+--------
+
+.. WARNING::
-Files
------
+ Running Keystone under HTTPD in this configuration does not support the use
+ of ``Transfer-Encoding: chunked``. This is due to a limitation with the
+ WSGI spec and the implementation used by ``mod_wsgi``. It is recommended
+ that all clients assume Keystone will not support
+ ``Transfer-Encoding: chunked``.
Copy the ``httpd/wsgi-keystone.conf`` sample configuration file to the
appropriate location for your Apache server, on Debian/Ubuntu systems
@@ -55,7 +111,7 @@ Enable the site by creating a symlink from the file in ``sites-available`` to
``sites-enabled``, for example, on Debian/Ubuntu systems
(not required on Red Hat based systems)::
- ln -s /etc/apache2/sites-available/keystone.conf /etc/apache2/sites-enabled/
+ ln -s /etc/apache2/sites-available/wsgi-keystone.conf /etc/apache2/sites-enabled/
Restart Apache to have it start serving keystone.
diff --git a/keystone-moon/doc/source/api_curl_examples.rst b/keystone-moon/doc/source/api_curl_examples.rst
index c88c7fd0..066efe97 100644
--- a/keystone-moon/doc/source/api_curl_examples.rst
+++ b/keystone-moon/doc/source/api_curl_examples.rst
@@ -28,7 +28,7 @@ Tokens
Default scope
-------------
-Get an token with default scope (may be unscoped):
+Get a token with default scope (may be unscoped):
.. code-block:: bash
diff --git a/keystone-moon/doc/source/architecture.rst b/keystone-moon/doc/source/architecture.rst
index c119e2bd..773aa6d4 100644
--- a/keystone-moon/doc/source/architecture.rst
+++ b/keystone-moon/doc/source/architecture.rst
@@ -165,10 +165,18 @@ The corresponding drivers for the services are:
* :mod:`keystone.assignment.core.AssignmentDriverV8`
* :mod:`keystone.assignment.core.RoleDriverV8`
* :mod:`keystone.catalog.core.CatalogDriverV8`
+* :mod:`keystone.credential.core.CredentialDriverV8`
+* :mod:`keystone.endpoint_policy.core.EndpointPolicyDriverV8`
+* :mod:`keystone.federation.core.FederationDriverV8`
* :mod:`keystone.identity.core.IdentityDriverV8`
+* :mod:`keystone.identity.core.MappingDriverV8`
+* :mod:`keystone.oauth1.core.Oauth1DriverV8`
* :mod:`keystone.policy.core.PolicyDriverV8`
+* :mod:`keystone.resource.core.DomainConfigDriverV8`
* :mod:`keystone.resource.core.ResourceDriverV8`
+* :mod:`keystone.revoke.core.RevokeDriverV8`
* :mod:`keystone.token.core.TokenDriverV8`
+* :mod:`keystone.trust.core.TrustDriverV8`
If you implement a backend driver for one of the Keystone services, you're
expected to subclass from these classes.
diff --git a/keystone-moon/doc/source/auth-totp.rst b/keystone-moon/doc/source/auth-totp.rst
new file mode 100644
index 00000000..4e81757f
--- /dev/null
+++ b/keystone-moon/doc/source/auth-totp.rst
@@ -0,0 +1,136 @@
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+===================================
+Time-based One-time Password (TOTP)
+===================================
+
+Configuring TOTP
+================
+
+TOTP is not enabled in Keystone by default. To enable it add the ``totp``
+authentication method to the ``[auth]`` section in ``keystone.conf``:
+
+.. code-block:: ini
+
+ [auth]
+ methods = external,password,token,oauth1,totp
+
+For a user to have access to TOTP, he must have configured TOTP credentials in
+Keystone and a TOTP device (i.e. `Google Authenticator`_).
+
+.. _Google Authenticator: http://www.google.com/2step
+
+TOTP uses a base32 encoded string for the secret. The secret must be at least
+148 bits (16 bytes). The following python code can be used to generate a TOTP
+secret:
+
+.. code-block:: python
+
+ import base64
+ message = '1234567890123456'
+ print base64.b32encode(message).rstrip('=')
+
+Example output::
+
+ GEZDGNBVGY3TQOJQGEZDGNBVGY
+
+This generated secret can then be used to add new 'totp' credentials to a
+specific user.
+
+Create a TOTP credential
+------------------------
+
+Create ``totp`` credentials for user:
+
+.. code-block:: bash
+
+ USER_ID=b7793000f8d84c79af4e215e9da78654
+ SECRET=GEZDGNBVGY3TQOJQGEZDGNBVGY
+
+ curl -i \
+ -H "Content-Type: application/json" \
+ -d '
+ {
+ "credential": {
+ "blob": "'$SECRET'",
+ "type": "totp",
+ "user_id": "'$USER_ID'"
+ }
+ }' \
+ http://localhost:5000/v3/credentials ; echo
+
+Google Authenticator
+--------------------
+
+On a device install Google Authenticator and inside the app click on 'Set up
+account' and then click on 'Enter provided key'. In the input fields enter
+account name and secret. Optionally a QR code can be generated programatically
+to avoid having to type the information.
+
+QR code
+-------
+
+Create TOTP QR code for device:
+
+.. code-block:: python
+
+ import qrcode
+
+ secret='GEZDGNBVGY3TQOJQGEZDGNBVGY'
+ uri = 'otpauth://totp/{name}?secret={secret}&issuer={issuer}'.format(
+ name='name',
+ secret=secret,
+ issuer='Keystone')
+
+ img = qrcode.make(uri)
+ img.save('totp.png')
+
+In Google Authenticator app click on 'Set up account' and then click on 'Scan
+a barcode', and then scan the 'totp.png' image. This should create a new TOTP
+entry in the application.
+
+Authenticate with TOTP
+======================
+
+Google Authenticator will generate a 6 digit PIN (passcode) every few seconds.
+Use the passcode and your user ID to authenticate using the ``totp`` method.
+
+Tokens
+------
+
+Get a token with default scope (may be unscoped) using totp:
+
+.. code-block:: bash
+
+ USER_ID=b7793000f8d84c79af4e215e9da78654
+ PASSCODE=012345
+
+ curl -i \
+ -H "Content-Type: application/json" \
+ -d '
+ { "auth": {
+ "identity": {
+ "methods": [
+ "totp"
+ ],
+ "totp": {
+ "user": {
+ "id": "'$USER_ID'",
+ "passcode": "'$PASSCODE'"
+ }
+ }
+ }
+ }
+ }' \
+ http://localhost:5000/v3/auth/tokens ; echo
diff --git a/keystone-moon/doc/source/community.rst b/keystone-moon/doc/source/community.rst
index dfb0870f..f3296efb 100644
--- a/keystone-moon/doc/source/community.rst
+++ b/keystone-moon/doc/source/community.rst
@@ -29,7 +29,6 @@ Our community welcomes all people interested in open source cloud computing,
and there are no formal membership requirements. The best way to join the
community is to talk with others online or at a meetup and offer contributions
through Launchpad_, the wiki_, or blogs. We welcome all types of contributions,
-
from blueprint designs to documentation to testing to deployment scripts.
.. _Launchpad: https://launchpad.net/keystone
@@ -96,6 +95,5 @@ Twitter
-------
Because all the cool kids do it: `@openstack <http://twitter.com/openstack>`_.
-Also follow the `#openstack <http://search.twitter.com/search?q=%23openstack>`_
+Also follow the `#openstack <https://twitter.com/search?q=%23openstack>`_
tag for relevant tweets.
-
diff --git a/keystone-moon/doc/source/conf.py b/keystone-moon/doc/source/conf.py
index 7cca2857..1037c39e 100644
--- a/keystone-moon/doc/source/conf.py
+++ b/keystone-moon/doc/source/conf.py
@@ -24,8 +24,7 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import os
-
+import subprocess
# NOTE(dstanek): adds _ to the builtins so keystone modules can be imported
__builtins__['_'] = str
@@ -42,9 +41,13 @@ extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
+ 'oslo_config.sphinxconfiggen',
'oslosphinx',
]
+config_generator_config_file = '../../config-generator/keystone.conf'
+sample_config_basename = '_static/keystone'
+
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
@@ -148,13 +151,15 @@ man_pages = [
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['images']
+html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
-git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
-html_last_updated_fmt = os.popen(git_cmd).read()
+git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
+ "-n1"]
+html_last_updated_fmt = subprocess.Popen(
+ git_cmd, stdout=subprocess.PIPE).communicate()[0]
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
diff --git a/keystone-moon/doc/source/configuration.rst b/keystone-moon/doc/source/configuration.rst
index 574b26be..e78c0ac6 100644
--- a/keystone-moon/doc/source/configuration.rst
+++ b/keystone-moon/doc/source/configuration.rst
@@ -52,8 +52,8 @@ Starting and Stopping Keystone under Eventlet
Running keystone under eventlet has been deprecated as of the Kilo release.
Support for utilizing eventlet will be removed as of the M-release. The
- recommended deployment is to run keystone in a WSGI server
- (e.g. ``mod_wsgi`` under ``HTTPD``).
+ recommended deployment is to run keystone in a WSGI server such as Apache
+ httpd with ``mod_wsgi``.
Keystone can be run using either its built-in eventlet server or it can be run
embedded in a web server. While the eventlet server is convenient and easy to
@@ -73,7 +73,7 @@ services are configured to run in a single process.
.. NOTE::
- The separation into ``admin`` and ``main`` interfaces is an historical
+ The separation into ``admin`` and ``main`` interfaces is a historical
anomaly. The new V3 API provides the same interface on both the admin and
main interfaces (this can be configured in ``keystone-paste.ini``, but the
default is to have both the same). The V2.0 API provides a limited public
@@ -113,8 +113,8 @@ The primary configuration file is organized into the following sections:
* ``[cache]`` - Caching layer configuration
* ``[catalog]`` - Service catalog driver configuration
* ``[credential]`` - Credential system driver configuration
-* ``[endpoint_filter]`` - Endpoint filtering extension configuration
-* ``[endpoint_policy]`` - Endpoint policy extension configuration
+* ``[endpoint_filter]`` - Endpoint filtering configuration
+* ``[endpoint_policy]`` - Endpoint policy configuration
* ``[eventlet_server]`` - Eventlet server configuration
* ``[eventlet_server_ssl]`` - Eventlet server SSL configuration
* ``[federation]`` - Federation driver configuration
@@ -124,7 +124,7 @@ The primary configuration file is organized into the following sections:
* ``[ldap]`` - LDAP configuration options
* ``[memcache]`` - Memcache configuration options
* ``[oauth1]`` - OAuth 1.0a system driver configuration
-* ``[os_inherit]`` - Inherited role assignment extension
+* ``[os_inherit]`` - Inherited role assignment configuration
* ``[paste_deploy]`` - Pointer to the PasteDeploy configuration file
* ``[policy]`` - Policy system driver configuration for RBAC
* ``[resource]`` - Resource system driver configuration
@@ -134,7 +134,7 @@ The primary configuration file is organized into the following sections:
* ``[signing]`` - Cryptographic signatures for PKI based tokens
* ``[ssl]`` - SSL certificate generation configuration
* ``[token]`` - Token driver & token provider configuration
-* ``[trust]`` - Trust extension configuration
+* ``[trust]`` - Trust configuration
The Keystone primary configuration file is expected to be named
``keystone.conf``. When starting Keystone, you can specify a different
@@ -423,7 +423,7 @@ Token Persistence Driver
Keystone supports customizable token persistence drivers. These can be
specified in the ``[token]`` section of the configuration file. Keystone
provides three non-test persistence backends. These can be set with the
-``[token]\driver`` configuration option.
+``[token] driver`` configuration option.
The drivers Keystone provides are:
@@ -438,7 +438,7 @@ The drivers Keystone provides are:
* ``memcache`` - The memcached based token persistence backend. This backend
relies on ``dogpile.cache`` and stores the token data in a set of memcached
- servers. The servers URLs are specified in the ``[memcache]\servers``
+ servers. The servers URLs are specified in the ``[memcache] servers``
configuration option in the Keystone config. Implemented by
:class:`keystone.token.persistence.backends.memcache.Token`
@@ -446,9 +446,10 @@ The drivers Keystone provides are:
.. WARNING::
It is recommended you use the ``memcache_pool`` backend instead of
``memcache`` as the token persistence driver if you are deploying Keystone
- under eventlet instead of Apache + mod_wsgi. This recommendation is due to
- known issues with the use of ``thread.local`` under eventlet that can allow
- the leaking of memcache client objects and consumption of extra sockets.
+ under eventlet instead of Apache httpd with ``mod_wsgi``. This
+ recommendation is due to known issues with the use of ``thread.local``
+ under eventlet that can allow the leaking of memcache client objects and
+ consumption of extra sockets.
Token Provider
@@ -539,7 +540,8 @@ disabled.
backend will need to be specified. Current functional backends are:
* ``dogpile.cache.memcached`` - Memcached backend using the standard
- `python-memcached`_ library
+ `python-memcached`_ library (recommended for use with Apache httpd with
+ ``mod_wsgi``)
* ``dogpile.cache.pylibmc`` - Memcached backend using the `pylibmc`_
library
* ``dogpile.cache.bmemcached`` - Memcached using `python-binary-memcached`_
@@ -548,7 +550,7 @@ disabled.
* ``dogpile.cache.dbm`` - local DBM file backend
* ``dogpile.cache.memory`` - in-memory cache
* ``keystone.cache.mongo`` - MongoDB as caching backend
- * ``keystone.cache.memcache_pool`` - An eventlet safe implementation of
+ * ``keystone.cache.memcache_pool`` - An eventlet-safe implementation of
``dogpile.cache.memcached``. This implementation also provides client
connection re-use.
@@ -651,7 +653,6 @@ For more information about the different backends (and configuration options):
.. _`dogpile.cache.backends.redis`: http://dogpilecache.readthedocs.org/en/latest/api.html#redis-backends
.. _`dogpile.cache.backends.file`: http://dogpilecache.readthedocs.org/en/latest/api.html#file-backends
.. _`ProxyBackends`: http://dogpilecache.readthedocs.org/en/latest/api.html#proxy-backends
-.. _`PyMongo API`: http://api.mongodb.org/python/current/api/pymongo/index.html
Certificates for PKI
@@ -897,17 +898,46 @@ Another such example is `available in devstack
(files/default_catalog.templates)
<https://git.openstack.org/cgit/openstack-dev/devstack/tree/files/default_catalog.templates>`_.
+Endpoint Filtering enables creation of ad-hoc catalogs for each project-scoped
+token request.
+
+Configure the endpoint filter catalog driver in the ``[catalog]`` section.
+For example:
+
+.. code-block:: ini
+
+ [catalog]
+ driver = catalog_sql
+
+In the ``[endpoint_filter]`` section, set ``return_all_endpoints_if_no_filter``
+to ``False`` to return an empty catalog if no associations are made.
+For example:
+
+.. code-block:: ini
+
+ [endpoint_filter]
+ return_all_endpoints_if_no_filter = False
+
+See `API Specification for Endpoint Filtering <http://specs.openstack.org/
+openstack/keystone-specs/api/v3/identity-api-v3-os-ep-filter-ext.html>`_ for
+the details of API definition.
+
+.. NOTE:: Support status for Endpoint Filtering
+
+ *Experimental* (Icehouse, Juno)
+ *Stable* (Kilo)
+
Logging
-------
Logging is configured externally to the rest of Keystone. Configure the path to
-your logging configuration file using the ``[DEFAULT] log_config`` option of
-``keystone.conf``. If you wish to route all your logging through syslog, set
-the ``[DEFAULT] use_syslog`` option.
+your logging configuration file using the ``[DEFAULT] log_config_append``
+option of ``keystone.conf``. If you wish to route all your logging through
+syslog, set the ``[DEFAULT] use_syslog`` option.
-A sample ``log_config`` file is included with the project at
+A sample ``log_config_append`` file is included with the project at
``etc/logging.conf.sample``. Like other OpenStack projects, Keystone uses the
-`Python logging module`, which includes extensive configuration options for
+`Python logging module`_, which includes extensive configuration options for
choosing the output levels and formats.
.. _Paste: http://pythonpaste.org/
@@ -926,7 +956,7 @@ this section. Here is the description of each of them and their purpose:
The SSL configuration options available to the eventlet server
(``keystone-all``) described here are severely limited. A secure
deployment should have Keystone running in a web server (such as Apache
- HTTPd), or behind an SSL terminator. When running Keystone in a web server
+ httpd), or behind an SSL terminator. When running Keystone in a web server
or behind an SSL terminator the options described in this section have no
effect and SSL is configured in the web server or SSL terminator.
@@ -1005,28 +1035,11 @@ and is only recommended for developments environment. We do not recommend using
``ssl_setup`` for production environments.
-User CRUD extension for the V2.0 API
+User CRUD additions for the V2.0 API
------------------------------------
-.. NOTE::
-
- The core V3 API includes user operations so no extension needs to be
- enabled for the V3 API.
-
-For the V2.0 API, Keystone provides a user CRUD filter that can be added to the
-public_api pipeline. This user crud filter allows users to use a HTTP PATCH to
-change their own password. To enable this extension you should define a
-user_crud_extension filter, insert it after the ``*_body`` middleware and
-before the ``public_service`` app in the public_api WSGI pipeline in
-``keystone-paste.ini`` e.g.:
-
-.. code-block:: ini
-
- [filter:user_crud_extension]
- paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
-
- [pipeline:public_api]
- pipeline = url_normalize token_auth admin_token_auth json_body debug ec2_extension user_crud_extension public_service
+For the V2.0 API, Keystone provides an additional capability that allows users
+to use a HTTP PATCH to change their own password.
Each user can then change their own password with a HTTP PATCH :
@@ -1039,19 +1052,112 @@ In addition to changing their password all of the user's current tokens will be
revoked.
-Inherited Role Assignment Extension
------------------------------------
+Inherited Role Assignments
+--------------------------
-Keystone provides an optional extension that adds the capability to assign
-roles on a project or domain that, rather than affect the project or domain
-itself, are instead inherited to the project subtree or to all projects owned
-by that domain. This extension is disabled by default, but can be enabled by
-including the following in ``keystone.conf``:
+Keystone provides an optional capability to assign roles on a project or domain
+that, rather than affect the project or domain itself, are instead inherited to
+the project subtree or to all projects owned by that domain. This capability is
+enabled by default, but can be disabled by including the following in
+``keystone.conf``:
.. code-block:: ini
[os_inherit]
- enabled = True
+ enabled = False
+
+
+Endpoint Policy
+---------------
+
+The Endpoint Policy feature provides associations between service endpoints
+and policies that are already stored in the Identity server and referenced
+by a policy ID.
+
+Configure the endpoint policy backend driver in the ``[endpoint_policy]``
+section. For example:
+
+.. code-block:: ini
+
+ [endpoint_policy]
+ driver = sql
+
+See `API Specification for Endpoint Policy <http://specs.openstack.org/
+openstack/keystone-specs/api/v3/identity-api-v3-os-endpoint-policy.html>`_
+for the details of API definition.
+
+.. NOTE:: Support status for Endpoint Policy
+
+ *Experimental* (Juno)
+ *Stable* (Kilo)
+
+
+OAuth1 1.0a
+-----------
+
+The OAuth 1.0a feature provides the ability for Identity users to delegate
+roles to third party consumers via the OAuth 1.0a specification.
+
+To enable OAuth1:
+
+1. Add the oauth1 driver to the ``[oauth1]`` section in ``keystone.conf``. For
+ example:
+
+.. code-block:: ini
+
+ [oauth1]
+ driver = sql
+
+2. Add the ``oauth1`` authentication method to the ``[auth]`` section in
+ ``keystone.conf``:
+
+.. code-block:: ini
+
+ [auth]
+ methods = external,password,token,oauth1
+
+3. If deploying under Apache httpd with ``mod_wsgi``, set the
+ `WSGIPassAuthorization` to allow the OAuth Authorization headers to pass
+ through `mod_wsgi`. For example, add the following to the keystone virtual
+ host file:
+
+.. code-block:: ini
+
+ WSGIPassAuthorization On
+
+See `API Specification for OAuth 1.0a <http://specs.openstack.org/openstack/
+keystone-specs/api/v3/identity-api-v3-os-oauth1-ext.html>`_ for the details of
+API definition.
+
+.. NOTE:: Support status for OAuth 1.0a
+
+ *Experimental* (Havana, Icehouse)
+ *Stable* (Juno)
+
+
+Revocation Events
+-----------------
+
+The Revocation Events feature provides a list of token revocations. Each event
+expresses a set of criteria which describes a set of tokens that are
+no longer valid.
+
+Add the revoke backend driver to the ``[revoke]`` section in
+``keystone.conf``. For example:
+
+.. code-block:: ini
+
+ [revoke]
+ driver = sql
+
+See `API Specification for Revocation Events <https://specs.openstack.org/
+openstack/keystone-specs/api/v3/identity-api-v3-os-revoke-ext.html>`_ for
+the details of API definition.
+
+.. NOTE:: Support status for Revocation Events
+
+ *Experimental* (Juno)
+ *Stable* (Kilo)
Token Binding
@@ -1114,6 +1220,39 @@ If a response to ``list_{entity}`` call has been truncated, then the response
status code will still be 200 (OK), but the ``truncated`` attribute in the
collection will be set to ``true``.
+
+URL safe naming of projects and domains
+---------------------------------------
+
+In the future, keystone may offer the ability to identify a project in a
+hierarchy via a URL style of naming from the root of the hierarchy (for example
+specifying 'projectA/projectB/projectC' as the project name in an
+authentication request). In order to prepare for this, keystone supports the
+optional ability to ensure both projects and domains are named without
+including any of the reserverd characters specified in section 2.2 of
+`rfc3986 <http://tools.ietf.org/html/rfc3986>`_.
+
+The safety of the names of projects and domains can be controlled via two
+configuration options:
+
+.. code-block:: ini
+
+ [resource]
+ project_name_url_safe = off
+ domain_name_url_safe = off
+
+When set to ``off`` (which is the default), no checking is done on the URL
+safeness of names. When set to ``new``, an attempt to create a new project or
+domain with an unsafe name (or update the name of a project or domain to be
+unsafe) will cause a status code of 400 (Bad Request) to be returned. Setting
+the configuration option to ``strict`` will, in addition to preventing the
+creation and updating of entities with unsafe names, cause an authentication
+attempt which specifies a project or domain name that is unsafe to return a
+status code of 401 (Unauthorized).
+
+It is recommended that installations take the steps necessary to where they
+can run with both options set to ``strict`` as soon as is practical.
+
Sample Configuration Files
--------------------------
@@ -1124,6 +1263,7 @@ files for each Server application.
* ``etc/keystone-paste.ini``
* ``etc/logging.conf.sample``
* ``etc/default_catalog.templates``
+* ``etc/sso_callback_template.html``
.. _`API protection with RBAC`:
@@ -1189,6 +1329,7 @@ are filtered out (e.g. user passwords).
List of object attributes:
* role:
+ * target.role.domain_id
* target.role.id
* target.role.name
@@ -1441,14 +1582,6 @@ as follows:
$ openstack --os-username=admin --os-password=secrete --os-project-name=admin --os-auth-url=http://localhost:35357/v2.0 user list
$ openstack --os-username=admin --os-password=secrete --os-project-name=admin --os-auth-url=http://localhost:35357/v2.0 project create demo
-For additional examples using ``python-keystoneclient`` refer to
-`python-keystoneclient examples`_, likewise, for additional examples using
-``python-openstackclient``, refer to `python-openstackclient examples`_.
-
-.. _`python-keystoneclient examples`: cli_examples.html#using-python-keystoneclient-v2-0
-.. _`python-openstackclient examples`: cli_examples.html#using-python-openstackclient-v3
-
-
Removing Expired Tokens
=======================
@@ -1506,12 +1639,6 @@ The corresponding entries in the Keystone configuration file are:
user_tree_dn = ou=Users,dc=openstack,dc=org
user_objectclass = inetOrgPerson
- project_tree_dn = ou=Projects,dc=openstack,dc=org
- project_objectclass = groupOfNames
-
- role_tree_dn = ou=Roles,dc=openstack,dc=org
- role_objectclass = organizationalRole
-
The default object classes and attributes are intentionally simplistic. They
reflect the common standard objects according to the LDAP RFCs. However, in a
live deployment, the correct attributes can be overridden to support a
@@ -1539,14 +1666,6 @@ and you have only read access, in such case the configuration is:
user_allow_update = False
user_allow_delete = False
- project_allow_create = True
- project_allow_update = True
- project_allow_delete = True
-
- role_allow_create = True
- role_allow_update = True
- role_allow_delete = True
-
There are some configuration options for filtering users, tenants and roles, if
the backend is providing too much output, in such case the configuration will
look like:
@@ -1555,8 +1674,6 @@ look like:
[ldap]
user_filter = (memberof=CN=openstack-users,OU=workgroups,DC=openstack,DC=org)
- project_filter =
- role_filter =
In case that the directory server does not have an attribute enabled of type
boolean for the user, there is several configuration parameters that can be
@@ -1588,26 +1705,15 @@ specified classes in the LDAP module so you can configure them like:
.. code-block:: ini
[ldap]
- user_objectclass = person
- user_id_attribute = cn
- user_name_attribute = cn
- user_mail_attribute = mail
- user_enabled_attribute = userAccountControl
- user_enabled_mask = 2
- user_enabled_default = 512
- user_attribute_ignore = tenant_id,tenants
- project_objectclass = groupOfNames
- project_id_attribute = cn
- project_member_attribute = member
- project_name_attribute = ou
- project_desc_attribute = description
- project_enabled_attribute = extensionName
- project_attribute_ignore =
- role_objectclass = organizationalRole
- role_id_attribute = cn
- role_name_attribute = ou
- role_member_attribute = roleOccupant
- role_attribute_ignore =
+ user_objectclass = person
+ user_id_attribute = cn
+ user_name_attribute = cn
+ user_description_attribute = displayName
+ user_mail_attribute = mail
+ user_enabled_attribute = userAccountControl
+ user_enabled_mask = 2
+ user_enabled_default = 512
+ user_attribute_ignore = tenant_id,tenants
Debugging LDAP
--------------
@@ -1632,14 +1738,13 @@ Enabled Emulation
-----------------
Some directory servers do not provide any enabled attribute. For these servers,
-the ``user_enabled_emulation`` and ``project_enabled_emulation`` attributes
-have been created. They are enabled by setting their respective flags to True.
-Then the attributes ``user_enabled_emulation_dn`` and
-``project_enabled_emulation_dn`` may be set to specify how the enabled users
-and projects (tenants) are selected. These attributes work by using a
-``groupOfNames`` entry and adding whichever users or projects (tenants) that
-you want enabled to the respective group with the ``member`` attribute. For
-example, this will mark any user who is a member of ``enabled_users`` as enabled:
+the ``user_enabled_emulation`` attribute has been created. It is enabled by
+setting the respective flags to True. Then the attribute
+``user_enabled_emulation_dn`` may be set to specify how the enabled users are
+selected. This attribute works by using a ``groupOfNames`` entry and adding
+whichever users or that you want enabled to the respective group with the
+``member`` attribute. For example, this will mark any user who is a member of
+``enabled_users`` as enabled:
.. code-block:: ini
@@ -1647,15 +1752,14 @@ example, this will mark any user who is a member of ``enabled_users`` as enabled
user_enabled_emulation = True
user_enabled_emulation_dn = cn=enabled_users,cn=groups,dc=openstack,dc=org
-The default values for user and project (tenant) enabled emulation DN is
-``cn=enabled_users,$user_tree_dn`` and ``cn=enabled_tenants,$project_tree_dn``
-respectively.
+The default values for user enabled emulation DN is
+``cn=enabled_users,$user_tree_dn``.
+
If a different LDAP schema is used for group membership, it is possible to use
the ``group_objectclass`` and ``group_member_attribute`` attributes to
determine membership in the enabled emulation group by setting the
-``user_enabled_emulation_use_group_config`` and
-``project_enabled_emulation_use_group_config`` attributes to True.
+``user_enabled_emulation_use_group_config`` attribute to True.
Secure Connection
-----------------
@@ -1760,7 +1864,7 @@ with the user's DN and provided password. This kind of authentication bind
can fill up the pool pretty quickly, so a separate pool is provided for end
user authentication bind calls. If a deployment does not want to use a pool for
those binds, then it can disable pooling selectively by setting
-``use_auth_pool`` to false. If a deployment wants to use a pool for those
+``use_auth_pool`` to false. If a deployment wants to use a pool for those
authentication binds, then ``use_auth_pool`` needs to be set to true. For the
authentication pool, a different pool size (``auth_pool_size``) and connection
lifetime (``auth_pool_connection_lifetime``) can be specified. With an enabled
@@ -1805,3 +1909,16 @@ Connection pool configuration is part of the ``[ldap]`` configuration section:
# End user auth connection lifetime in seconds. (integer value)
auth_pool_connection_lifetime=60
+Specifying Multiple LDAP servers
+--------------------------------
+
+Multiple LDAP server URLs can be provided to keystone to provide
+high-availability support for a single LDAP backend. To specify multiple LDAP
+servers, simply change the ``url`` option in the ``[ldap]`` section. The new
+option should list the different servers, each separated by a comma. For
+example:
+
+.. code-block:: ini
+
+ [ldap]
+ url = "ldap://localhost,ldap://backup.localhost"
diff --git a/keystone-moon/doc/source/configure_federation.rst b/keystone-moon/doc/source/configure_federation.rst
index 09d0984d..644d3175 100644
--- a/keystone-moon/doc/source/configure_federation.rst
+++ b/keystone-moon/doc/source/configure_federation.rst
@@ -90,14 +90,19 @@ configure ``federation``.
Configure authentication drivers in ``keystone.conf``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. NOTE::
+ ``saml2`` has been deprecated as of the Mitaka release. Support for the
+ ``saml2`` wrapper will be removed as of the "O" release. The recommended authentication method
+ is ``mapped``, which supports ``saml2``.
+
Add the authentication methods to the ``[auth]`` section in ``keystone.conf``.
Names should be equal to protocol names added via Identity API v3. Here we use
-examples ``saml2`` and ``openid``.
+examples ``mapped`` and ``openid``.
.. code-block:: bash
[auth]
- methods = external,password,token,saml2,openid
+ methods = external,password,token,mapped,openid
Create keystone groups and assign roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -182,7 +187,7 @@ issue an HTTP POST request with authentication data in the request body. To
start federated authentication a user must access the dedicated URL with
Identity Provider's and Protocol's identifiers stored within a protected URL.
The URL has a format of:
-``/v3/OS-FEDERATION/identity_providers/{identity_provider}/protocols/{protocol}/auth``.
+``/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/auth``.
In this instance we follow a standard SAML2 authentication procedure, that is,
the user will be redirected to the Identity Provider's authentication webpage
@@ -207,7 +212,7 @@ SAML authentication procedure.
.. code-block:: bash
- $ curl -X GET -D - http://localhost:5000/v3/OS-FEDERATION/identity_providers/{identity_provider}/protocols/{protocol}/auth
+ $ curl -X GET -D - http://localhost:5000/v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/auth
Determine accessible resources
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -251,7 +256,7 @@ Example cURL
.. code-block:: bash
- $ curl -X POST -H "Content-Type: application/json" -d '{"auth":{"identity":{"methods":["saml2"],"saml2":{"id":"<unscoped_token_id>"}},"scope":{"project":{"domain": {"name": "Default"},"name":"service"}}}}' -D - http://localhost:5000/v3/auth/tokens
+ $ curl -X POST -H "Content-Type: application/json" -d '{"auth":{"identity":{"methods":["mapped"],"saml2":{"id":"<unscoped_token_id>"}},"scope":{"project":{"domain": {"name": "Default"},"name":"service"}}}}' -D - http://localhost:5000/v3/auth/tokens
--------------------------------------
Keystone as an Identity Provider (IdP)
@@ -332,7 +337,7 @@ Create a Service Provider (SP)
------------------------------
In this example we are creating a new Service Provider with an ID of ``BETA``,
-a ``sp_url`` of ``http://beta.example.com/Shibboleth.sso/POST/ECP`` and a
+a ``sp_url`` of ``http://beta.example.com/Shibboleth.sso/SAML2/ECP`` and a
``auth_url`` of ``http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth``
. The ``sp_url`` will be used when creating a SAML assertion for ``BETA`` and
signed by the current keystone IdP. The ``auth_url`` is used to retrieve the
@@ -345,8 +350,8 @@ field is optional we are passing it set to ``true`` otherwise it will be set to
$ curl -s -X PUT \
-H "X-Auth-Token: $OS_TOKEN" \
-H "Content-Type: application/json" \
- -d '{"service_provider": {"auth_url": "http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth", "sp_url": "https://example.com:5000/Shibboleth.sso/SAML2/ECP", "enabled": true}' \
- http://localhost:5000/v3/service_providers/BETA | python -mjson.tool
+ -d '{"service_provider": {"auth_url": "http://beta.example.com:5000/v3/OS-FEDERATION/identity_providers/beta/protocols/saml2/auth", "sp_url": "https://example.com:5000/Shibboleth.sso/SAML2/ECP", "enabled": true}}' \
+ http://localhost:5000/v3/OS-FEDERATION/service_providers/BETA | python -mjson.tool
Testing it all out
------------------
diff --git a/keystone-moon/doc/source/configure_tokenless_x509.rst b/keystone-moon/doc/source/configure_tokenless_x509.rst
index 40b9fd20..614b1179 100644
--- a/keystone-moon/doc/source/configure_tokenless_x509.rst
+++ b/keystone-moon/doc/source/configure_tokenless_x509.rst
@@ -87,7 +87,7 @@ The following options can be defined in `keystone.conf`:
* ``issuer_attribute`` - The issuer attribute that is served as an IdP ID for
the X.509 tokenless authorization along with the protocol to look up its
corresponding mapping. It is the environment variable in the WSGI
- enviornment that references to the Issuer of the client certificate. It
+ environment that references to the Issuer of the client certificate. It
defaults to ``SSL_CLIENT_I_DN``.
This is a sample configuration for two `trusted_issuer` and a `protocol` set
@@ -127,7 +127,7 @@ like in a certificate.
C=US
The issuer DN should be constructed as a string that contains no spaces
-and have the right order seperated by commas like the example below.
+and have the right order separated by commas like the example below.
Please be aware that ``emailAddress`` and ``ST`` should be used instead
of ``E`` and ``S`` that are shown in the above example. The following is
the sample Python code used to create the IdP ID.
diff --git a/keystone-moon/doc/source/configuringservices.rst b/keystone-moon/doc/source/configuringservices.rst
index 3ffa13e7..40fe03a2 100644
--- a/keystone-moon/doc/source/configuringservices.rst
+++ b/keystone-moon/doc/source/configuringservices.rst
@@ -38,8 +38,80 @@ The middleware will pass those data down to the service as headers. More
details on the architecture of that setup is described in the
`authentication middleware documentation`_.
-Setting up credentials
-======================
+Setting up credentials with ``keystone-manage bootstrap``
+=========================================================
+
+Setting up projects, users, and roles
+-------------------------------------
+
+The ``keystone-manage bootstrap`` command will create a user, project and role,
+and will assign the newly created role to the newly created user on the newly
+created project. By default, the names of these new resources will be called
+``admin``.
+
+The defaults may be overridden by calling ``--bootstrap-username``,
+``--bootstrap-project-name`` and ``--bootstrap-role-name``. Each of these have
+an environment variable equivalent: ``OS_BOOTSTRAP_USERNAME``,
+``OS_BOOTSTRAP_PROJECT_NAME`` and ``OS_BOOTSTRAP_ROLE_NAME``.
+
+A user password must also be supplied. This can be passed in as either
+``--bootstrap-password``, or set as an environment variable using
+``OS_BOOTSTRAP_PASSWORD``.
+
+Optionally, if specified by ``--bootstrap-public-url``,
+``--bootstrap-admin-url`` and/or ``--bootstrap-internal-url`` or the equivalent
+environment variables, the command will create an identity service with the
+specified endpoint information. You may also configure the
+``--bootstrap-region-id`` and ``--bootstrap-service-name`` for the endpoints to
+your deployment's requirements.
+
+.. NOTE::
+
+ It is strongly encouraged to configure the identity service and its
+ endpoints while bootstrapping keystone.
+
+Minimally, keystone can be bootstrapped with:
+
+.. code-block:: bash
+
+ $ keystone-manage bootstrap --bootstrap-password s3cr3t
+
+Verbosely, keystone can be bootstrapped with:
+
+.. code-block:: bash
+
+ $ keystone-manage bootstrap --bootstrap-password s3cr3t
+ --bootstrap-username admin \
+ --bootstrap-project-name admin \
+ --bootstrap-role-name admin \
+ --bootstrap-service-name keystone \
+ --bootstrap-region-id RegionOne \
+ --bootstrap-admin-url http://localhost:35357 \
+ --bootstrap-public-url http://localhost:5000 \
+ --bootstrap-internal-url http://localhost:5000
+
+This will create an ``admin`` user with the ``admin`` role on the ``admin``
+project. The user will have the password specified in the command. Note that
+both the user and the project will be created in the ``default`` domain. By not
+creating an endpoint in the catalog users will need to provide endpoint
+overrides to perform additional identity operations.
+
+By creating an ``admin`` user and an identity endpoint deployers may
+authenticate to keystone and perform identity operations like creating
+additional services and endpoints using that ``admin`` user. This will preclude
+the need to ever use or configure the ``admin_token`` (described below).
+
+To test a proper configuration, a user can use OpenStackClient CLI:
+
+.. code-block:: bash
+
+ $ openstack project list --os-username admin --os-project-name admin \
+ --os-user-domain-id default --os-project-domain-id default \
+ --os-identity-api-version 3 --os-auth-url http://localhost:5000 \
+ --os-password s3cr3t
+
+Setting up credentials with Admin Token
+=======================================
Admin Token
-----------
diff --git a/keystone-moon/doc/source/developing.rst b/keystone-moon/doc/source/developing.rst
index 50fed9e5..d49d1e14 100644
--- a/keystone-moon/doc/source/developing.rst
+++ b/keystone-moon/doc/source/developing.rst
@@ -21,10 +21,9 @@ Developing with Keystone
Setup
-----
-Get your development environment set up according to :doc:`setup`. The
-instructions from here will assume that you have installed Keystone into a
-virtualenv. If you chose not to, simply exclude "tools/with_venv.sh" from the
-example commands below.
+Get your development environment set up according to
+:doc:`devref/development.environment`. It is recommended that you install
+Keystone into a virtualenv.
Configuring Keystone
@@ -48,7 +47,7 @@ To run the Keystone Admin and API server instances, use:
.. code-block:: bash
- $ tools/with_venv.sh keystone-all
+ $ keystone-all
This runs Keystone with the configuration the etc/ directory of the project.
See :doc:`configuration` for details on how Keystone is configured. By default,
@@ -72,7 +71,7 @@ place:
.. code-block:: bash
- $ bin/keystone-manage db_sync
+ $ keystone-manage db_sync
.. _`python-keystoneclient`: https://git.openstack.org/cgit/openstack/python-keystoneclient
.. _`openstackclient`: https://git.openstack.org/cgit/openstack/python-openstackclient
@@ -100,15 +99,7 @@ the script, the version is the number before the underline.
For example, if the script is named ``001_add_X_table.py`` then the
version of the SQL migration is ``1``.
-.. _SQLAlchemy-migrate: http://code.google.com/p/sqlalchemy-migrate/
-
-Extensions should be created as directories under ``keystone/contrib``. An
-extension that requires SQL migrations should not change the common repository,
-but should instead have its own repository. This repository must be in the
-extension's directory in ``keystone/contrib/<extension>/migrate_repo``. In
-addition, it needs a subdirectory named ``versions``. For example, if the
-extension name is ``my_extension`` then the directory structure would be
-``keystone/contrib/my_extension/migrate_repo/versions/``.
+.. _SQLAlchemy-migrate: https://git.openstack.org/cgit/openstack/sqlalchemy-migrate
For the migration to work, both the ``migrate_repo`` and ``versions``
subdirectories must have ``__init__.py`` files. SQLAlchemy-migrate will look
@@ -121,10 +112,7 @@ the minimal set of values is::
version_table=migrate_version
required_dbs=[]
-The directory ``keystone/contrib/example`` contains a sample extension
-migration.
-
-For core components, to run a migration for upgrade, simply run:
+To run a migration for upgrade, simply run:
.. code-block:: bash
@@ -134,22 +122,73 @@ For core components, to run a migration for upgrade, simply run:
If no version is specified, then the most recent migration will be used.
-For extensions, migrations must be explicitly run for each extension individually.
-To run a migration for a specific extension, simply run:
+.. NOTE::
-.. code-block:: bash
+ Schema downgrades are not supported.
- $ keystone-manage db_sync --extension <name>
+.. _online-migration:
-.. NOTE::
+From Mitaka release, we are starting to write the migration scripts in a
+backward compatible way to support `online schema migration`_. The following
+guidelines for schema and data migrations should be followed:
- The meaning of "extension" here has been changed since all of the
- "extension" are loaded and the migrations are run by default, but
- the source is maintained in a separate directory.
+* Additive schema migrations - In general, almost all schema migrations should
+ be additive. Put simply, they should only create elements like columns,
+ indices, and tables.
-.. NOTE::
+* Subtractive schema migrations - To remove an element like a column or table:
+
+ #. Expand phase: The element must be deprecated and retained for backward
+ compatibility. This allows for graceful upgrade from X release to X+1.
+
+ #. Migrate phase: Data migration must completely migrate data from the old
+ version of the schema to the new version. Data migrations should have the
+ ability to run online, while the service is operating normally, so the
+ keystone service implementation (typically the SQLAlchemy model) has to
+ be aware that data should be retrieved and/or written from/to more than
+ one place and format, to maintain consistency (see examples below).
+
+ #. Contract phase: The column can then be removed with a schema migration at
+ the start of X+2. Contract phase can't happen if the data migration isn't
+ finished (see last point in this section).
+
+* Release notes - There should be a release note in case an operation is
+ "blocking", "expensive", or both. You can find information on which DDL
+ operations are expensive in `MySQL docs`_. Other supported SQL DBs support
+ `transactional DDL`_, and experienced DBA's know to take advantage of this
+ feature.
+
+* Constraints - When adding a foreign or unique key constraint, the schema
+ migration code needs to handle possible problems with data before applying
+ the constraint. For example, a unique constraint must clean up duplicate
+ records before applying said constraint.
+
+* Data migrations - should be done in an online fashion by custom code in the
+ SQLAlchemy layer that handles moving data between the old and new portions
+ of the schema. In addition, for each type of data migration performed,
+ a keystone-manage command can be added for the operator to manually request
+ that rows be migrated (see examples below, like the nova flavor migration).
+
+* All schema migrations should be idempotent. For example, a migration
+ should check if an element exists in the schema before attempting to add
+ it. This logic comes for free in the autogenerated workflow of
+ the online migrations.
+
+* Before running `contract` in the expand/migrate/contract schema migration
+ workflow, the remaining data migrations should be performed by the contract
+ script. Alternatively, running a relevant keystone-manage migration should
+ be enforced, to ensure that all remaining data migrations are completed.
+ It is a good practice to move data out of the old columns, and ensure they
+ are filled with null values before removing them.
+
+A good example of an online schema migration is documented in a `cinder spec`_.
+See more examples in :doc:`online_schema_migration_examples`.
+
+.. _`online schema migration`: https://specs.openstack.org/openstack/keystone-specs/specs/mitaka/online-schema-migration.html
+.. _`MySQL docs`: https://dev.mysql.com/doc/refman/5.7/en/innodb-create-index-overview.html
+.. _`transactional DDL`: https://wiki.postgresql.org/wiki/Transactional_DDL_in_PostgreSQL:_A_Competitive_Analysis
+.. _`cinder spec`: https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/online-schema-upgrades.html
- Schema downgrades are not supported for both core components and extensions.
Initial Sample Data
-------------------
@@ -159,7 +198,7 @@ data for use with keystone:
.. code-block:: bash
- $ OS_TOKEN=ADMIN tools/with_venv.sh tools/sample_data.sh
+ $ OS_TOKEN=ADMIN tools/sample_data.sh
Notice it requires a service token read from an environment variable for
authentication. The default value "ADMIN" is from the ``admin_token``
@@ -170,13 +209,13 @@ Once run, you can see the sample data that has been created by using the
.. code-block:: bash
- $ tools/with_venv.sh openstack --os-token ADMIN --os-url http://127.0.0.1:35357/v2.0/ user list
+ $ openstack --os-token ADMIN --os-url http://127.0.0.1:35357/v2.0/ user list
The `openstackclient`_ can be installed using the following:
.. code-block:: bash
- $ tools/with_venv.sh pip install python-openstackclient
+ $ pip install python-openstackclient
Filtering responsibilities between controllers and drivers
----------------------------------------------------------
@@ -247,7 +286,8 @@ Running Tests
=============
Before running tests, you should have ``tox`` installed and available in your
-environment (in addition to the other external dependencies in :doc:`setup`):
+environment (in addition to the other external dependencies in
+:doc:`devref/development.environment`):
.. code-block:: bash
@@ -328,7 +368,7 @@ Not all of the tests in the keystone/tests/unit directory are strictly unit
tests. Keystone intentionally includes tests that run the service locally and
drives the entire configuration to achieve basic functional testing.
-For the functional tests, an in-memory key-value store or in-memory sqlite
+For the functional tests, an in-memory key-value store or in-memory SQLite
database is used to keep the tests fast.
Within the tests directory, the general structure of the backend tests is a
@@ -819,3 +859,44 @@ The documentation is generated with Sphinx using the tox command. To create HTM
$ tox -e docs
The results are in the doc/build/html and doc/build/man directories respectively.
+
+
+Release Notes
+-------------
+
+The release notes for a patch should be included in the patch. If not, the
+release notes should be in a follow-on review.
+
+If the following applies to the patch, a release note is required:
+
+* The deployer needs to take an action when upgrading
+* The backend driver interface changes
+* A new feature is implemented
+* Function was removed (hopefully it was deprecated)
+* Current behavior is changed
+* A new config option is added that the deployer should consider changing from
+ the default
+* A security bug is fixed
+
+A release note is suggested if a long-standing or important bug is fixed.
+Otherwise, a release note is not required.
+
+Keystone uses `reno <http://docs.openstack.org/developer/reno/usage.html>`_ to
+generate release notes. Please read the docs for details. In summary, use
+
+.. code-block:: bash
+
+ $ tox -e venv -- reno new <bug-,bp-,whatever>
+
+Then edit the sample file that was created and push it with your change.
+
+To see the results:
+
+.. code-block:: bash
+
+ $ git commit # Commit the change because reno scans git log.
+
+ $ tox -e releasenotes
+
+Then look at the generated release notes files in releasenotes/build/html in
+your favorite browser.
diff --git a/keystone-moon/doc/source/developing_drivers.rst b/keystone-moon/doc/source/developing_drivers.rst
index 1e3996de..38cd7319 100644
--- a/keystone-moon/doc/source/developing_drivers.rst
+++ b/keystone-moon/doc/source/developing_drivers.rst
@@ -25,7 +25,7 @@ Each major subsystem (that has data access needs) implements the data access
by using drivers. Some examples of Keystone's drivers:
- :class:`keystone.identity.backends.ldap.Identity`
-- :class:`keystone.token.providers.fernet.Provider`
+- :class:`keystone.token.providers.fernet.core.Provider`
- :class:`keystone.contrib.federation.backends.sql.Federation`
In/Out of Tree
diff --git a/keystone-moon/doc/source/devref/development.environment.rst b/keystone-moon/doc/source/devref/development.environment.rst
new file mode 100644
index 00000000..2718966a
--- /dev/null
+++ b/keystone-moon/doc/source/devref/development.environment.rst
@@ -0,0 +1,175 @@
+..
+ Copyright 2011-2012 OpenStack Foundation
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+=============================================
+Setting up a Keystone development environment
+=============================================
+
+This document describes getting the source from keystone's `Git repository`_
+for development purposes.
+
+To install Keystone from packaging, refer instead to Keystone's `User
+Documentation`_.
+
+.. _`Git Repository`: http://git.openstack.org/cgit/openstack/keystone
+.. _`User Documentation`: http://docs.openstack.org/
+
+Prerequisites
+=============
+
+This document assumes you are using Ubuntu, Fedora or openSUSE (SLE)
+
+And that you have the following tools available on your system:
+
+- Python_ 2.7 and 3.4
+- git_
+- setuptools_
+- pip_
+- msgfmt (part of the gettext package)
+- virtualenv_
+- tox_
+
+**Reminder**: If you're successfully using a different platform, or a
+different version of the above, please document your configuration here!
+
+.. _Python: http://www.python.org/
+.. _git: http://git-scm.com/
+.. _setuptools: http://pypi.python.org/pypi/setuptools
+.. _tox: https://pypi.python.org/pypi/tox
+
+Getting the latest code
+=======================
+
+Make a clone of the code from our `Git repository`:
+
+.. code-block:: bash
+
+ $ git clone https://git.openstack.org/openstack/keystone.git
+
+When that is complete, you can:
+
+.. code-block:: bash
+
+ $ cd keystone
+
+Installing dependencies
+=======================
+
+Keystone maintains two lists of dependencies::
+
+ requirements.txt
+ test-requirements.txt
+
+The first is the list of dependencies needed for running keystone, the second list includes dependencies used for active development and testing of Keystone itself.
+
+These dependencies can be installed from PyPi_ using the Python tool pip_.
+
+.. _PyPi: http://pypi.python.org/
+.. _pip: http://pypi.python.org/pypi/pip
+
+However, your system *may* need additional dependencies that `pip` (and by
+extension, PyPi) cannot satisfy. These dependencies should be installed
+prior to using `pip`, and the installation method may vary depending on
+your platform.
+
+Ubuntu 14.04, 15.10:
+
+.. code-block:: bash
+
+ $ sudo apt-get install python-dev python3-dev libxml2-dev libxslt1-dev \
+ libsasl2-dev libsqlite3-dev libssl-dev libldap2-dev libffi-dev
+
+
+Fedora 19+:
+
+.. code-block:: bash
+
+ $ sudo yum install python-lxml python-greenlet-devel python-ldap \
+ sqlite-devel openldap-devel python-devel libxslt-devel \
+ openssl-devel libffi-devel
+
+openSUSE 13.2 (SLE 12):
+
+.. code-block:: bash
+
+ $ sudo zypper install libxslt-devel openldap2-devel libopenssl-devel \
+ python-devel python-greenlet-devel python-ldap python-lxml \
+ python-pysqlite sqlite3-devel
+
+PyPi Packages and VirtualEnv
+----------------------------
+
+We recommend establishing a virtualenv to run Keystone within. virtualenv
+limits the Python environment to just what you're installing as dependencies,
+useful to keep a clean environment for working on Keystone.
+
+.. code-block:: bash
+
+ $ tox -e venv --notest
+
+This will create a local virtual environment in the directory ``.tox``.
+Once created, you can activate this virtualenv for your current shell using:
+
+.. code-block:: bash
+
+ $ source .tox/venv/bin/activate
+
+The virtual environment can be disabled using the command:
+
+.. code-block:: bash
+
+ $ deactivate
+
+You can also use ``tox -e venv`` to prefix commands so that they run
+within the virtual environment. For more information on virtual environments,
+see virtualenv_.
+
+.. _virtualenv: http://www.virtualenv.org/
+
+If you want to run Keystone outside of a virtualenv, you can install the
+dependencies directly into your system from the requirements files:
+
+.. code-block:: bash
+
+ # Install the dependencies for running keystone
+ $ pip install -r requirements.txt
+
+ # Install the dependencies for developing, testing, and running keystone
+ $ pip install -r test-requirements.txt
+
+ # Use 'python setup.py' to link Keystone into Python's site-packages
+ $ python setup.py develop
+
+
+Verifying Keystone is set up
+============================
+
+Once set up, either directly or within a virtualenv, you should be able to
+invoke Python and import the libraries. If you're using a virtualenv, don't
+forget to activate it:
+
+.. code-block:: bash
+
+ $ source .tox/venv/bin/activate
+
+You should then be able to `import keystone` using Python without issue:
+
+.. code-block:: bash
+
+ $ python -c "import keystone"
+
+If you can import Keystone without a traceback, you should be ready to move on
+to :doc:`../developing`.
diff --git a/keystone-moon/doc/source/event_notifications.rst b/keystone-moon/doc/source/event_notifications.rst
index 740986b1..d9225c56 100644
--- a/keystone-moon/doc/source/event_notifications.rst
+++ b/keystone-moon/doc/source/event_notifications.rst
@@ -28,7 +28,7 @@ The supported operations between the two types of notification formats are
documented below.
Common Notification Structure
-==============================
+=============================
Notifications generated by Keystone are generated in JSON format. An external
application can format them into ATOM format and publish them as a feed.
@@ -365,7 +365,7 @@ the unique identifier of the resource type.
.. code-block:: javascript
{
- "event_type": "identity.created.role_assignment",
+ "event_type": "identity.role_assignment.created",
"message_id": "a5901371-d5fd-b3bb-448f-a14dead6f4cb",
"payload": {
"typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
@@ -414,3 +414,26 @@ ensures this has an immediate impact on the accessibility of the project's
resources by revoking tokens with authorization on the project, but should
**not** have a direct impact on the projects resources (in other words, virtual
machines should **not** be deleted).
+
+Opting out of certain notifications
+===================================
+
+There are many notifications that Keystone emits and some deployers may only
+care about certain events. In Keystone there is a way to opt-out of certain
+notifications. In ``/etc/keystone/keystone.conf`` you can set ``opt_out`` to
+the event you wish to opt-out of. It is possible to opt-out of multiple events.
+
+Example:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ notification_opt_out = identity.user.created
+ notification_opt_out = identity.role_assignment.created
+ notification_opt_out = identity.authenticate.pending
+
+This will opt-out notifications for user creation, role assignment creation and
+successful authentications. For a list of event types that can be used, refer
+to: `Telemetry Measurements`_.
+
+.. _Telemetry Measurements: http://docs.openstack.org/admin-guide-cloud/telemetry-measurements.html#openstack-identity
diff --git a/keystone-moon/doc/source/extensions.rst b/keystone-moon/doc/source/extensions.rst
index 0a6b34a2..4d171f05 100644
--- a/keystone-moon/doc/source/extensions.rst
+++ b/keystone-moon/doc/source/extensions.rst
@@ -43,99 +43,3 @@ Removal Process
It is not intended that functionality should stay in experimental for a long
period, functionality that stays `experimental` for more than **two** releases
would be expected to make a transition to either `stable` or `out-of-tree`.
-
-Current Extensions
-==================
-
-------------------
-Endpoint Filtering
-------------------
-
-The Endpoint Filtering extension enables creation of ad-hoc catalogs for each
-project-scoped token request.
-
-.. NOTE:: Support status for Endpoint Filtering
-
- *Experimental* (Icehouse, Juno)
- *Stable* (Kilo)
-
-.. toctree::
- :maxdepth: 1
-
- extensions/endpoint_filter.rst
-
-* `API Specification for Endpoint Filtering <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-ep-filter-ext.html>`__
-
----------------
-Endpoint Policy
----------------
-
-The Endpoint Policy extension provides associations between service endpoints
-and policies that are already stored in the Identity server and referenced by
-a policy ID.
-
-.. NOTE:: Support status for Endpoint Policy
-
- *Experimental* (Juno)
- *Stable* (Kilo)
-
-.. toctree::
- :maxdepth: 1
-
- extensions/endpoint_policy.rst
-
-* `API Specification for Endpoint Policy <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-endpoint-policy.html>`__
-
--------
-Inherit
--------
-
-The Inherit extension provides the ability for projects to inherit role
-assignments from their owning domain, or from projects higher in the
-hierarchy.
-
-.. NOTE:: Support status for Inherit
-
- *Experimental* (Havava, Icehouse)
- *Stable* (Juno)
-
-* `API Specification for Inherit <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-inherit-ext.html>`__
-
-----------
-OAuth 1.0a
-----------
-
-The OAuth 1.0a extension provides the ability for Identity users to delegate
-roles to third party consumers via the OAuth 1.0a specification.
-
-.. NOTE:: Support status for OAuth 1.0a
-
- *Experimental* (Havana, Icehouse)
- *Stable* (Juno)
-
-.. toctree::
- :maxdepth: 1
-
- extensions/oauth1.rst
-
-* `API Specification for OAuth 1.0a <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-oauth1-ext.html>`__
-
------------------
-Revocation Events
------------------
-
-The Revocation Events extension provides a list of token revocations. Each
-event expresses a set of criteria which describes a set of tokens that are
-no longer valid.
-
-.. NOTE:: Support status for Revocation Events
-
- *Experimental* (Juno)
- *Stable* (Kilo)
-
-.. toctree::
- :maxdepth: 1
-
- extensions/revoke.rst
-
-* `API Specification for Revocation Events <http://specs.openstack.org/openstack/keystone-specs/api/v3/identity-api-v3-os-revoke-ext.html>`__
diff --git a/keystone-moon/doc/source/federation/shibboleth.rst b/keystone-moon/doc/source/federation/shibboleth.rst
index d67cfa1a..b82bd703 100644
--- a/keystone-moon/doc/source/federation/shibboleth.rst
+++ b/keystone-moon/doc/source/federation/shibboleth.rst
@@ -48,10 +48,13 @@ a *<Location>* directive for each identity provider::
ShibRequestSetting requireSession 1
ShibRequestSetting applicationId idp_1
AuthType shibboleth
- ShibRequireAll On
- ShibRequireSession On
ShibExportAssertion Off
Require valid-user
+
+ <IfVersion < 2.4>
+ ShibRequireSession On
+ ShibRequireAll On
+ </IfVersion>
</Location>
.. NOTE::
@@ -61,7 +64,7 @@ a *<Location>* directive for each identity provider::
The same name is used inside the shibboleth2.xml configuration file but they could
be different.
* The ``ShibRequireSession`` and ``ShibRequireAll`` rules are invalid in
- Apache 2.4+ and should be dropped in that specific setup.
+ Apache 2.4+.
* You are advised to carefully examine `Shibboleth Apache configuration
documentation
<https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPApacheConfig>`_
@@ -265,7 +268,7 @@ environment variable is present so make sure Shibboleth doesn't set the
``/etc/shibboleth/shibboleth2.xml`` configuration file and remove the
``REMOTE_USER`` directives.
-Examine your attributes map file ``/etc/shibboleth/attributes-map.xml`` and adjust
+Examine your attributes map file ``/etc/shibboleth/attribute-map.xml`` and adjust
your requirements if needed. For more information see
`attributes documentation <https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPAddAttribute>`_
diff --git a/keystone-moon/doc/source/http-api.rst b/keystone-moon/doc/source/http-api.rst
index a31b5e69..3b915881 100644
--- a/keystone-moon/doc/source/http-api.rst
+++ b/keystone-moon/doc/source/http-api.rst
@@ -73,14 +73,14 @@ method:
.. code-block:: ini
[app:service_v3]
- paste.app_factory = keystone.service:v3_app_factory
+ use = egg:keystone#service_v3
Then define a v3 pipeline, which terminates with the v3 application you defined
above:
.. code-block:: ini
- [app:app_v3]
+ [pipeline:api_v3]
pipeline = ... service_v3
Replace "..." with whatever middleware you'd like to run in front of the API
diff --git a/keystone-moon/doc/source/index.rst b/keystone-moon/doc/source/index.rst
index 511bc89f..00e55176 100644
--- a/keystone-moon/doc/source/index.rst
+++ b/keystone-moon/doc/source/index.rst
@@ -47,7 +47,7 @@ Getting Started
.. toctree::
:maxdepth: 1
- setup
+ devref/development.environment
installing
configuration
policy_mapping
@@ -55,6 +55,7 @@ Getting Started
mapping_combinations
mapping_schema
configure_tokenless_x509
+ auth-totp
configuringservices
extensions
key_terms
@@ -80,11 +81,20 @@ Developers Documentation
middlewarearchitecture
http-api
api_curl_examples
- cli_examples
apache-httpd
external-auth
event_notifications
- extension_development
+ services
+ online_schema_migration_examples
+
+
+Sample Configuration File
+=========================
+
+.. toctree::
+ :maxdepth: 1
+
+ sample_config
Code Documentation
==================
diff --git a/keystone-moon/doc/source/installing.rst b/keystone-moon/doc/source/installing.rst
index e38663b7..b00ac6d2 100644
--- a/keystone-moon/doc/source/installing.rst
+++ b/keystone-moon/doc/source/installing.rst
@@ -21,7 +21,7 @@ Installing Keystone
This document describes how to install Keystone in order to use it. If you are
intending to develop on or with Keystone, please read :doc:`developing` and
-:doc:`setup`.
+:doc:`devref/development.environment`.
Installing from Source
----------------------
@@ -49,7 +49,14 @@ Install the Keystone web service:
.. code-block:: bash
- $ python setup.py install
+ $ pip install .
+
+.. NOTE::
+
+ This step is guaranteed to fail if you do not have the proper binary
+ dependencies already installed on your development system. Maintaining a
+ list of platform-specific dependencies is outside the scope of this
+ documentation, but is within scope of DEVSTACK_.
You should have all the pieces you need to run Keystone installed on your
system. The following commands should be available on the command-line path:
@@ -92,8 +99,7 @@ bash script called `lib/keystone`_
Installing from packages: Ubuntu
--------------------------------
-Ubuntu is providing packages for Keystone for Precise. To install keystone
-on Ubuntu:
+To install keystone on Ubuntu:
.. code-block:: bash
@@ -101,9 +107,8 @@ on Ubuntu:
In using Ubuntu's packages, the packages will set up a user account for
the Keystone service (`keystone`), and place default configurations in
-``/etc/keystone``. The Debian installer will also ask you about configuration
-options for setting up and running Keystone. As of this writing, the defaults
-for Keystone backends are all SQL based, stored locally in a SQLite.
+``/etc/keystone``. As of this writing, the defaults for Keystone backends are
+all SQL based, stored locally in SQLite.
Once installed, you still need to initialize data in Keystone, which you can
find described in :doc:`configuringservices`.
@@ -123,4 +128,4 @@ To install the packages:
Once installed, you still need to initialize data in Keystone, which you can
find described in :doc:`configuringservices`.
-.. _`OpenStack Install Guide`: http://docs.openstack.org/juno/install-guide/install/yum/content/keystone-install.html
+.. _`OpenStack Install Guide`: http://docs.openstack.org/liberty/install-guide-rdo/keystone-install.html
diff --git a/keystone-moon/doc/source/key_terms.rst b/keystone-moon/doc/source/key_terms.rst
index 93aec532..11ae576e 100644
--- a/keystone-moon/doc/source/key_terms.rst
+++ b/keystone-moon/doc/source/key_terms.rst
@@ -28,7 +28,7 @@ globally unique, but only unique to their domain.
Resources
=========
-The Identity portion of keystone includes ``Projects`` and ``Domains``, and
+The Resources portion of keystone includes ``Projects`` and ``Domains``, and
are commonly stored in an SQL backend.
Projects (Tenants)
diff --git a/keystone-moon/doc/source/man/keystone-manage.rst b/keystone-moon/doc/source/man/keystone-manage.rst
index 2ef2d51a..a69cf374 100644
--- a/keystone-moon/doc/source/man/keystone-manage.rst
+++ b/keystone-moon/doc/source/man/keystone-manage.rst
@@ -7,9 +7,9 @@ Keystone Management Utility
---------------------------
:Author: openstack@lists.openstack.org
-:Date: 2015-10-15
+:Date: 2016-4-7
:Copyright: OpenStack Foundation
-:Version: 8.0.0
+:Version: 9.0.0
:Manual section: 1
:Manual group: cloud computing
@@ -40,6 +40,7 @@ Invoking ``keystone-manage`` by itself will give you some usage information.
Available commands:
+* ``bootstrap``: Perform the basic bootstrap process.
* ``db_sync``: Sync the database.
* ``db_version``: Print the current migration version of the database.
* ``domain_config_upload``: Upload domain configuration file.
@@ -47,7 +48,7 @@ Available commands:
* ``fernet_setup``: Setup a Fernet key repository.
* ``mapping_purge``: Purge the identity mapping table.
* ``mapping_engine``: Test your federation mapping rules.
-* ``pki_setup``: Initialize the certificates used to sign tokens.
+* ``pki_setup``: Initialize the certificates used to sign tokens. **deprecated**
* ``saml_idp_metadata``: Generate identity provider metadata.
* ``ssl_setup``: Generate certificates for SSL.
* ``token_flush``: Purge expired tokens.
diff --git a/keystone-moon/doc/source/mapping_combinations.rst b/keystone-moon/doc/source/mapping_combinations.rst
index 9aa411ad..1b275a4a 100644
--- a/keystone-moon/doc/source/mapping_combinations.rst
+++ b/keystone-moon/doc/source/mapping_combinations.rst
@@ -206,12 +206,12 @@ In ``<other_condition>`` shown below, please supply one of the following:
{
"user": {
"name": "{0}"
- },
- "groups": {
- "name": "{1}",
- "domain": {
- "id": "0cd5e9"
- }
+ }
+ },
+ {
+ "groups": "{1}",
+ "domain": {
+ "id": "0cd5e9"
}
}
],
@@ -594,4 +594,57 @@ global username mapping.
]
}
+Keystone to Keystone
+--------------------
+
+Keystone to Keystone federation also utilizes mappings, but has some
+differences.
+
+An attribute file (``/etc/shibboleth/attribute-map.xml``) is used to add
+attributes to the Keystone Identity Provider. Attributes look as follows:
+
+.. code-block:: xml
+
+ <Attribute name="openstack_user" id="openstack_user"/>
+ <Attribute name="openstack_user_domain" id="openstack_user_domain"/>
+
+The Keystone Service Provider must contain a mapping as shown below.
+``openstack_user``, and ``openstack_user_domain`` match to the attribute
+names we have in the Identity Provider. It will map any user with the name
+``user1`` or ``admin`` in the ``openstack_user`` attribute and
+``openstack_domain`` attribute ``default`` to a group with id ``abc1234``.
+
+.. code-block:: javascript
+
+ {
+ rules = [
+ {
+ "local": [
+ {
+ "group": {
+ "id": "abc1234"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "openstack_user",
+ "any_one_of": [
+ "user1",
+ "admin"
+ ]
+ },
+ {
+ "type":"openstack_user_domain",
+ "any_one_of": [
+ "Default"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+The possible attributes that can be used in a mapping are `openstack_user`,
+`openstack_user_domain`, `openstack_roles`, `openstack_project`, and
+`openstack_project_domain`.
diff --git a/keystone-moon/doc/source/online_schema_migration_examples.rst b/keystone-moon/doc/source/online_schema_migration_examples.rst
new file mode 100644
index 00000000..0e0fb399
--- /dev/null
+++ b/keystone-moon/doc/source/online_schema_migration_examples.rst
@@ -0,0 +1,24 @@
+
+====================================
+Online SQL schema migration examples
+====================================
+
+This document links to several examples implementing online SQL schema
+migrations to facilitate simultaneously running OpenStack services in
+different versions with the same DB schema.
+
+
+* Nova `data migration example
+ <http://specs.openstack.org/openstack/nova-specs/specs/kilo/implemented/flavor-from-sysmeta-to-blob.html>`_
+* Nova `data migration enforcement example
+ <https://review.openstack.org/#/c/174480/15/nova/db/sqlalchemy/migrate_repo/versions/291_enforce_flavors_migrated.py>`_
+ of sqlalchemy migrate/deprecated scripts
+* Nova `flavor migration spec
+ <http://specs.openstack.org/openstack/nova-specs/specs/kilo/implemented/flavor-from-sysmeta-to-blob.html>`_
+ example of data migrations in the object layer
+* Cinder `online schema upgrades spec <https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/online-schema-upgrades.html>`_
+ example of migrating a column to a many-to-many relation table
+
+
+For documentation on how to make online migrations move on to
+:ref:`Database Schema Migrations <online-migration>`.
diff --git a/keystone-moon/doc/source/policy_mapping.rst b/keystone-moon/doc/source/policy_mapping.rst
index 9b11efd6..2d3cd60a 100644
--- a/keystone-moon/doc/source/policy_mapping.rst
+++ b/keystone-moon/doc/source/policy_mapping.rst
@@ -73,12 +73,26 @@ identity:create_role POST /v3/roles
identity:update_role PATCH /v3/roles/{role_id}
identity:delete_role DELETE /v3/roles/{role_id}
+identity:get_domain_role GET /v3/roles/{role_id} where role.domain_id is not null
+identity:list_domain_roles GET /v3/roles?domain_id where role.domain_id is not null
+identity:create_domain_role POST /v3/roles where role.domain_id is not null
+identity:update_domain_role PATCH /v3/roles/{role_id} where role.domain_id is not null
+identity:delete_domain_role DELETE /v3/roles/{role_id} where role.domain_id is not null
+
+identity:get_implied_role GET /v3/roles/{prior_role_id}/implies/{implied_role_id}
+identity:list_implied_roles GET /v3/roles/{prior_role_id}/implies
+identity:create_implied_role PUT /v3/roles/{prior_role_id}/implies/{implied_role_id}
+identity:delete_implied_role DELETE /v3/roles/{prior_role_id}/implies/{implied_role_id}
+identity:list_role_inference_rules GET /v3/role_inferences
+identity:check_implied_role HEAD /v3/roles/{prior_role_id}/implies/{implied_role_id}
+
identity:check_grant GET `grant_resources`_
identity:list_grants GET `grant_collections`_
identity:create_grant PUT `grant_resources`_
identity:revoke_grant DELETE `grant_resources`_
identity:list_role_assignments GET /v3/role_assignments
+identity:list_role_assignments_for_tree GET /v3/role_assignments?include_subtree
identity:get_policy GET /v3/policy/{policy_id}
identity:list_policies GET /v3/policy
@@ -185,7 +199,9 @@ identity:update_domain_config - PATCH /v3/domains/{
identity:delete_domain_config - DELETE /v3/domains/{domain_id}/config
- DELETE /v3/domains/{domain_id}/config/{group}
- DELETE /v3/domains/{domain_id}/config/{group}/{option}
-
+identity:get_domain_config_default - GET /v3/domains/config/default
+ - GET /v3/domains/config/{group}/default
+ - GET /v3/domains/config/{group}/{option}/default
========================================================= ===
.. _grant_resources:
diff --git a/keystone-moon/doc/source/sample_config.rst b/keystone-moon/doc/source/sample_config.rst
new file mode 100644
index 00000000..b170f848
--- /dev/null
+++ b/keystone-moon/doc/source/sample_config.rst
@@ -0,0 +1,12 @@
+==============================
+Keystone Configuration Options
+==============================
+
+The following is a sample keystone configuration for adaptation and use. It is
+auto-generated from keystone when this documentation is built, so if you are
+having issues with an option, please compare your version of keystone with the
+version of this documentation.
+
+The sample configuration can also be viewed in `file form <_static/keystone.conf.sample>`_.
+
+.. literalinclude:: _static/keystone.conf.sample
diff --git a/keystone-moon/doc/source/services.rst b/keystone-moon/doc/source/services.rst
new file mode 100644
index 00000000..2c71e450
--- /dev/null
+++ b/keystone-moon/doc/source/services.rst
@@ -0,0 +1,200 @@
+===========================
+Keystone for other services
+===========================
+
+This document provides a summary of some things that other services need to
+know about how keystone works, and specifically about how they can take
+advantage of the v3 API.
+
+The v3 API was introduced as a stable API in the Grizzly release and included
+in the default pipeline ever since. Until recently, its use has been hidden
+from other services because the ``auth_token`` middleware translated the token
+format so that both versions look the same. Once the services need to make use
+of v3 features they need to know about how it works.
+
+
+Glossary
+========
+
+Service
+ OpenStack services like identity, compute, image, etc.
+
+Project
+ A project provides namespace and resource isolation for groups of OpenStack
+ entities. Users must be assigned a role on a project in order to interact
+ with it. Prior to the introduction of the v3 API, projects were referred to
+ as tenants and the term is still used in reference to the v2.0 API.
+
+
+Domains
+=======
+
+A major new feature in v3 is domains. Every project, user, and user group is
+owned by a domain (reflected by their ``domain_id`` value) which provides them
+their own namespace. For example, unlike in v2.0, usernames are no longer
+unique across the deployment. You can have two users with the same name, but
+they must be in different domains. However, user IDs are assigned to users by
+keystone and are expected to be unique across the deployment. All of this logic
+applies to both projects and user groups as well. Note that roles are *not*
+namespaced by domains.
+
+One of the great things about domains is that you can have one domain backed by
+SQL (for service users) and another backed by LDAP (the cloud is deployed into
+existing infrastructure).
+
+The "default" domain
+====================
+
+Conventionally the "default" domain has a domain ID of ``default`` and a domain
+name of ``Default``. It is created by ``keystone-manage db_sync`` and thus
+should always exist, although the domain ID is configurable in
+``keystone.conf`` and the domain name is mutable through the v3 API.
+
+Because only the v3 API is domain-aware, we must work to avoid perceived
+namespace conflicts to v2.0 clients. The solution to this is to have a single
+domain serve as the implied namespace for all user and tenant references in
+v2.0. Thus, v2.0 clients can continue to be domain-unaware and avoid the
+security risk posed by potential namespace conflicts. *This is the only purpose
+of the default domain.*
+
+For example, I could otherwise create a domain in v3, create a user in that
+domain called "admin", authenticate using v2.0, and a domain-unaware v2.0
+client might assume I'm the same "admin" user it has seen before and grant me
+escalated privileges. Instead, users outside of the default domain simply
+cannot authenticate against v2.0, nor can such tokens with references to users
+and projects outside the default domain be validated on the v2.0 API.
+
+From a v2.0 client's perspective, there's no way to specify the domain, so v2.0
+operations implicitly work against the default domain. So if your client is
+only capable of using v2.0 and you need to get a token, then you can only get
+tokens for users and tenants (projects) in the default domain. In the real
+world, this means that if your default domain is backed by SQL and you have a
+separate domain for LDAP users, then you can't authenticate as an LDAP user
+using v2.0. Conversely, if your default domain is backed by a read-only LDAP
+driver, then you won't be able to create the service users using v2.0 clients
+because any SQL-backed domain is unreachable.
+
+From a v3 client's perspective, the default domain is not special, other than
+the fact that such a domain can generally be assumed to exist (assuming the
+deployment is also running the v2.0 API). It would be reasonable for a v3
+client to assume a default user domain ID of ``default`` and a default project
+domain ID of ``default`` unless overridden by more specific configuration.
+
+To summarize, avoiding namespace conflicts in the v2.0 API is achieved by
+limiting the v2.0 API and its clients to working with users and projects which
+are namespaced by a single, arbitrary domain in v3.
+
+Token differences
+=================
+
+The keystone service runs both v2.0 and v3, where v2.0 requests go to the
+``/v2.0`` endpoint and v3 requests go to the ``/v3`` endpoint. If you're using
+the default pipeline that ships with keystone, then you don't need "enable" the
+v3 API in keystone, as it runs by default as a parallel pipeline to the v2.0
+API.
+
+If you get a token using the v2.0 API, then you can use it to do v3 operations
+(such as list users). The reverse, using a v3 token against v2.0, is possible
+only in certain circumstances. For example, if you're using a project-scoped
+token wherein the user and project are both owned by the "default" domain,
+everything will work. Otherwise, token validation against the v2.0 API will
+fail.
+
+You can get a v2.0 token using ``POST /v2.0/tokens``. You can get a v3 token
+using ``POST /v3/auth/tokens``. Note that the responses are significantly
+different. For example, the service catalog is in a different format, and the
+v3 token conveys additional context (such as the user's domain and the
+project's domain).
+
+Domain-scoped tokens
+--------------------
+
+Domain-scoped tokens are scoped to a domain rather than a project. These are
+useful for operating against keystone but are generally useless in other
+services that don't have use cases for domain-level operations. Unless a
+service has a real case for handling such authorization, they don't need to
+concern themselves with domain-scoped tokens.
+
+
+Auth Token middleware
+=====================
+
+The ``auth_token`` middleware handles token validation for the different
+services. Conceptually, what happens is that ``auth_token`` pulls the token out
+of the ``X-Auth-Token`` request header, validates the token using keystone,
+produces information about the identity (the API user) and authorization
+context (the project, roles, etc) of the token, and sets environment variables
+with that data. The services typically take the environment variables, put them
+in the service's "context", and use the context for policy enforcement via
+``oslo.policy``.
+
+Service tokens
+--------------
+
+Service tokens are a feature where the ``auth_token`` middleware will also
+accept a service token in the ``X-Service-Token`` header. It does the same
+thing with the service token as the user token, but the results of the token
+are passed separately in environment variables for the service token (the
+service user, project, and roles). If the service knows about these then it can
+put this info in its "context" and use it for policy checks. For example,
+assuming there's a special policy rule called ``service_role`` that works like
+the ``role`` rule except checks the service roles, you could have an
+``oslo.policy`` rule like ``service_role:service and user_id:%(user_id)s`` such
+that a service token is required along with the user owning the object.
+
+v2.0 or v3?
+-----------
+
+By default, the ``auth_token`` middleware will use discovery to determine the
+best available API to use, or can be explicitly configured to use either v2.0
+or v3. When discovery is used, ``auth_token`` will use v3 if keystone reports
+that v3 is available. If ``auth_token`` is configured to use v2.0, then it will
+fail when it receives a v3 token wherein the user is not in the default domain
+(for example, the domain that heat creates users in). So if at all possible,
+the ``auth_token`` middleware should be allowed to use v3.
+
+Additionally, as other services begin to utilize features which are only found
+in the v3 API, you'll need to use the v3 API in order to utilize those
+services. For example, heat creates users in an isolated domain, and thus
+requires the v3 API.
+
+Do this, not that
+=================
+
+Config options for authentication
+---------------------------------
+
+If you need to get a token, don't define options for username and password and
+get a token using v2.0. We've got an interface for using authentication plugins
+where there's an option for that supports v2.0 or v3 and potentially other
+authentication mechanisms (X.509 client certs!).
+
+If your config file doesn't have the domain for the user, it's not going to be
+able to use v3 for authentication.
+
+Picking the version
+-------------------
+
+Use version discovery to figure out what version the identity server supports
+rather than configuring the version.
+
+Use OpenStack CLI not keystone CLI
+----------------------------------
+
+The keystone CLI is deprecated and will be removed soon. The `OpenStack CLI
+<http://docs.openstack.org/developer/python-openstackclient/>`_ has all the
+keystone CLI commands and even supports v3.
+
+
+Hierarchical Multitenancy
+=========================
+
+This feature allows maintenance of a hierarchy of projects with "parent"
+projects operating as domains.
+
+The token format is the same (the token doesn't contain any info about the
+hierarchy). If the service needs to know the hierarchy it will have to use the
+v3 API to fetch the hierarchy.
+
+While you can't use v2.0 to set up the hierarchy, you can get a v2.0 token
+scoped to a project that's part of a hierarchy.
diff --git a/keystone-moon/etc/default_catalog.templates b/keystone-moon/etc/default_catalog.templates
index a69b7f06..e885b52e 100644
--- a/keystone-moon/etc/default_catalog.templates
+++ b/keystone-moon/etc/default_catalog.templates
@@ -6,22 +6,37 @@ catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0
catalog.RegionOne.identity.name = Identity Service
# fake compute service for now to help novaclient tests work
-catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s
-catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s
-catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s
-catalog.RegionOne.compute.name = Compute Service
+catalog.RegionOne.computev21.publicURL = http://localhost:8774/v2.1/$(tenant_id)s
+catalog.RegionOne.computev21.adminURL = http://localhost:8774/v2.1/$(tenant_id)s
+catalog.RegionOne.computev21.internalURL = http://localhost:8774/v2.1/$(tenant_id)s
+catalog.RegionOne.computev21.name = Compute Service V2.1
-catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s
-catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s
-catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s
-catalog.RegionOne.volume.name = Volume Service
+catalog.RegionOne.volumev2.publicURL = http://localhost:8776/v2/$(tenant_id)s
+catalog.RegionOne.volumev2.adminURL = http://localhost:8776/v2/$(tenant_id)s
+catalog.RegionOne.volumev2.internalURL = http://localhost:8776/v2/$(tenant_id)s
+catalog.RegionOne.volumev2.name = Volume Service V2
catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud
catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin
catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud
catalog.RegionOne.ec2.name = EC2 Service
-catalog.RegionOne.image.publicURL = http://localhost:9292/v1
-catalog.RegionOne.image.adminURL = http://localhost:9292/v1
-catalog.RegionOne.image.internalURL = http://localhost:9292/v1
+catalog.RegionOne.image.publicURL = http://localhost:9292
+catalog.RegionOne.image.adminURL = http://localhost:9292
+catalog.RegionOne.image.internalURL = http://localhost:9292
catalog.RegionOne.image.name = Image Service
+
+catalog.RegionOne.network.publicURL = http://localhost:9696
+catalog.RegionOne.network.adminURL = http://localhost:9696
+catalog.RegionOne.network.internalURL = http://localhost:9696
+catalog.RegionOne.network.name = Network Service
+
+catalog.RegionOne.orchestration.publicURL = http://localhost:8004/v1/$(tenant_id)s
+catalog.RegionOne.orchestration.adminURL = http://localhost:8004/v1/$(tenant_id)s
+catalog.RegionOne.orchestration.internalURL = http://localhost:8004/v1/$(tenant_id)s
+catalog.RegionOne.orchestration.name = Orchestration Service
+
+catalog.RegionOne.metering.publicURL = http://localhost:8777
+catalog.RegionOne.metering.adminURL = http://localhost:8777
+catalog.RegionOne.metering.internalURL = http://localhost:8777
+catalog.RegionOne.metering.name = Telemetry Service
diff --git a/keystone-moon/etc/keystone-paste.ini b/keystone-moon/etc/keystone-paste.ini
index 70db3823..4f3b0a28 100644
--- a/keystone-moon/etc/keystone-paste.ini
+++ b/keystone-moon/etc/keystone-paste.ini
@@ -1,10 +1,10 @@
# Keystone PasteDeploy configuration file.
[filter:debug]
-use = egg:keystone#debug
+use = egg:oslo.middleware#debug
[filter:request_id]
-use = egg:keystone#request_id
+use = egg:oslo.middleware#request_id
[filter:build_auth_context]
use = egg:keystone#build_auth_context
@@ -13,16 +13,16 @@ use = egg:keystone#build_auth_context
use = egg:keystone#token_auth
[filter:admin_token_auth]
+# This is deprecated in the M release and will be removed in the O release.
+# Use `keystone-manage bootstrap` and remove this from the pipelines below.
use = egg:keystone#admin_token_auth
[filter:json_body]
use = egg:keystone#json_body
-[filter:user_crud_extension]
-use = egg:keystone#user_crud_extension
-
-[filter:crud_extension]
-use = egg:keystone#crud_extension
+[filter:cors]
+use = egg:oslo.middleware#cors
+oslo_config_project = keystone
[filter:ec2_extension]
use = egg:keystone#ec2_extension
@@ -30,29 +30,14 @@ use = egg:keystone#ec2_extension
[filter:ec2_extension_v3]
use = egg:keystone#ec2_extension_v3
-[filter:federation_extension]
-use = egg:keystone#federation_extension
-
-[filter:oauth1_extension]
-use = egg:keystone#oauth1_extension
-
[filter:s3_extension]
use = egg:keystone#s3_extension
-[filter:endpoint_filter_extension]
-use = egg:keystone#endpoint_filter_extension
-
-[filter:simple_cert_extension]
-use = egg:keystone#simple_cert_extension
-
-[filter:revoke_extension]
-use = egg:keystone#revoke_extension
-
[filter:url_normalize]
use = egg:keystone#url_normalize
[filter:sizelimit]
-use = egg:keystone#sizelimit
+use = egg:oslo.middleware#sizelimit
[app:public_service]
use = egg:keystone#public_service
@@ -66,17 +51,17 @@ use = egg:keystone#admin_service
[pipeline:public_api]
# The last item in this pipeline must be public_service or an equivalent
# application. It cannot be a filter.
-pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension user_crud_extension public_service
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension public_service
[pipeline:admin_api]
# The last item in this pipeline must be admin_service or an equivalent
# application. It cannot be a filter.
-pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension s3_extension crud_extension admin_service
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension s3_extension admin_service
[pipeline:api_v3]
# The last item in this pipeline must be service_v3 or an equivalent
# application. It cannot be a filter.
-pipeline = sizelimit url_normalize request_id build_auth_context token_auth admin_token_auth json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension federation_extension oauth1_extension endpoint_filter_extension service_v3
+pipeline = cors sizelimit url_normalize request_id admin_token_auth build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
[app:public_version_service]
use = egg:keystone#public_version_service
@@ -85,10 +70,10 @@ use = egg:keystone#public_version_service
use = egg:keystone#admin_version_service
[pipeline:public_version_api]
-pipeline = sizelimit url_normalize public_version_service
+pipeline = cors sizelimit url_normalize public_version_service
[pipeline:admin_version_api]
-pipeline = sizelimit url_normalize admin_version_service
+pipeline = cors sizelimit url_normalize admin_version_service
[composite:main]
use = egg:Paste#urlmap
diff --git a/keystone-moon/etc/keystone.conf.sample b/keystone-moon/etc/keystone.conf.sample
index 8e5ea13b..cce0876a 100644
--- a/keystone-moon/etc/keystone.conf.sample
+++ b/keystone-moon/etc/keystone.conf.sample
@@ -5,11 +5,12 @@
#
# A "shared secret" that can be used to bootstrap Keystone. This "token" does
-# not represent a user, and carries no explicit authorization. To disable in
-# production (highly recommended), remove AdminTokenAuthMiddleware from your
-# paste application pipelines (for example, in keystone-paste.ini). (string
-# value)
-#admin_token = ADMIN
+# not represent a user, and carries no explicit authorization. If set to
+# `None`, the value is ignored and the `admin_token` log in mechanism is
+# effectively disabled. To completely disable `admin_token` in production
+# (highly recommended), remove AdminTokenAuthMiddleware from your paste
+# application pipelines (for example, in keystone-paste.ini). (string value)
+#admin_token = <None>
# The base public endpoint URL for Keystone that is advertised to clients
# (NOTE: this does NOT affect how Keystone listens for connections). Defaults
@@ -27,8 +28,9 @@
# found on a different server. (string value)
#admin_endpoint = <None>
-# Maximum depth of the project hierarchy. WARNING: setting it to a large value
-# may adversely impact performance. (integer value)
+# Maximum depth of the project hierarchy, excluding the project acting as a
+# domain at the top of the hierarchy. WARNING: setting it to a large value may
+# adversely impact performance. (integer value)
#max_project_tree_depth = 5
# Limit the sizes of user & project ID/names. (integer value)
@@ -64,7 +66,10 @@
# project entities to be moved between domains by updating their domain_id.
# Allowing such movement is not recommended if the scope of a domain admin is
# being restricted by use of an appropriate policy file (see
-# policy.v3cloudsample as an example). (boolean value)
+# policy.v3cloudsample as an example). This ability is deprecated and will be
+# removed in a future release. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#domain_id_immutable = true
# If set to true, strict password length checking is performed for password
@@ -74,9 +79,14 @@
#strict_password_check = false
# The HTTP header used to determine the scheme for the original request, even
-# if it was removed by an SSL terminating proxy. Typical value is
-# "HTTP_X_FORWARDED_PROTO". (string value)
-#secure_proxy_ssl_header = <None>
+# if it was removed by an SSL terminating proxy. (string value)
+#secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
+
+# If set to true the server will return information in the response that may
+# allow an unauthenticated or authenticated user to get more information than
+# normal, such as why authentication failed. This may be useful for debugging
+# but is insecure. (boolean value)
+#insecure_debug = false
#
# From keystone.notifications
@@ -92,78 +102,93 @@
# Allowed values: basic, cadf
#notification_format = basic
+# Define the notification options to opt-out from. The value expected is:
+# identity.<resource_type>.<operation>. This field can be set multiple times in
+# order to add more notifications to opt-out from. For example:
+# notification_opt_out=identity.user.created
+# notification_opt_out=identity.authenticate.success (multi valued)
+#notification_opt_out =
+
#
# From oslo.log
#
-# Print debugging output (set logging level to DEBUG instead of default INFO
-# level). (boolean value)
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
#debug = false
-# If set to false, will disable INFO logging level, making WARNING the default.
-# (boolean value)
+# If set to false, the logging level will be set to WARNING instead of the
+# default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
-# files, see the Python logging module documentation. (string value)
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
-# DEPRECATED. A logging.Formatter log message format string which may use any
-# of the available logging.LogRecord attributes. This option is deprecated.
-# Please use logging_context_format_string and logging_default_format_string
-# instead. (string value)
-#log_format = <None>
-
-# Format string for %%(asctime)s in log records. Default: %(default)s . (string
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S
-# (Optional) Name of log file to output to. If no default is set, logging will
-# go to stdout. (string value)
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
-# (Optional) The base directory used for relative --log-file paths. (string
-# value)
+# (Optional) The base directory used for relative log_file paths. This option
+# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
# Use syslog for logging. Existing syslog format is DEPRECATED and will be
-# changed later to honor RFC5424. (boolean value)
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
#use_syslog = false
-# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled,
-# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The
-# format without the APP-NAME is deprecated in Kilo, and will be removed in
-# Mitaka, along with this option. (boolean value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#use_syslog_rfc_format = true
-
-# Syslog facility to receive log lines. (string value)
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
-# Log output to standard error. (boolean value)
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
-# Format string to use for log messages without context. (string value)
+# Format string to use for log messages when context is undefined. (string
+# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
-# Data to append to log format when level is DEBUG. (string value)
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
-# List of logger=LEVEL pairs. (list value)
-#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -192,10 +217,11 @@
#rpc_zmq_bind_address = *
# MatchMaker driver. (string value)
-#rpc_zmq_matchmaker = local
+# Allowed values: redis, dummy
+#rpc_zmq_matchmaker = redis
-# ZeroMQ receiver listening port. (integer value)
-#rpc_zmq_port = 9501
+# Type of concurrency used. Either "native" or "eventlet" (string value)
+#rpc_zmq_concurrency = eventlet
# Number of ZeroMQ contexts, defaults to 1. (integer value)
#rpc_zmq_contexts = 1
@@ -211,28 +237,42 @@
# "host" option, if running Nova. (string value)
#rpc_zmq_host = localhost
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# (integer value)
-#rpc_cast_timeout = 30
+# Seconds to wait before a cast expires (TTL). The default value of -1
+# specifies an infinite linger period. The value of 0 specifies no linger
+# period. Pending messages shall be discarded immediately when the socket is
+# closed. Only supported by impl_zmq. (integer value)
+#rpc_cast_timeout = -1
-# Heartbeat frequency. (integer value)
-#matchmaker_heartbeat_freq = 300
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
-# Heartbeat time-to-live. (integer value)
-#matchmaker_heartbeat_ttl = 600
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 120
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = true
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49152
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
# Size of executor thread pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
#executor_thread_pool_size = 64
-# The Drivers(s) to handle sending notifications. Possible values are
-# messaging, messagingv2, routing, log, test, noop (multi valued)
-#notification_driver =
-
-# AMQP topic used for OpenStack notifications. (list value)
-# Deprecated group/name - [rpc_notifier2]/topics
-#notification_topics = notifications
-
# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout = 60
@@ -241,7 +281,7 @@
# configuration. (string value)
#transport_url = <None>
-# The messaging driver to use, defaults to rabbit. Other drivers include qpid
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
# and zmq. (string value)
#rpc_backend = rabbit
@@ -261,10 +301,20 @@
# The chosen port is displayed in the service's log file. (string value)
#backdoor_port = <None>
+# Enable eventlet backdoor, using the provided path as a unix socket that can
+# receive connections. This option is mutually exclusive with 'backdoor_port'
+# in that only one should be provided. If both are provided then the existence
+# of this option overrides the usage of that option. (string value)
+#backdoor_socket = <None>
+
# Enables or disables logging values of all registered options when starting a
# service (at DEBUG level). (boolean value)
#log_options = true
+# Specify a timeout after which a gracefully shutdown server will exit. Zero
+# value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
[assignment]
@@ -273,11 +323,16 @@
#
# Entrypoint for the assignment backend driver in the keystone.assignment
-# namespace. Supplied drivers are ldap and sql. If an assignment driver is not
-# specified, the identity driver will choose the assignment driver. (string
-# value)
+# namespace. Only an SQL driver is supplied. If an assignment driver is not
+# specified, the identity driver will choose the assignment driver (driver
+# selection based on `[identity]/driver` option is deprecated and will be
+# removed in the "O" release). (string value)
#driver = <None>
+# A list of role names which are prohibited from being an implied role. (list
+# value)
+#prohibited_implied_role = admin
+
[auth]
@@ -309,13 +364,13 @@
[cache]
#
-# From keystone
+# From oslo.cache
#
# Prefix for building the configuration dictionary for the cache region. This
# should not need to be changed unless there is another dogpile.cache region
# with the same configuration name. (string value)
-#config_prefix = cache.keystone
+#config_prefix = cache.oslo
# Default TTL, in seconds, for any cached item in the dogpile.cache region.
# This applies to any cached method that doesn't have an explicit cache
@@ -323,10 +378,10 @@
#expiration_time = 600
# Dogpile.cache backend module. It is recommended that Memcache with pooling
-# (keystone.cache.memcache_pool) or Redis (dogpile.cache.redis) be used in
+# (oslo_cache.memcache_pool) or Redis (dogpile.cache.redis) be used in
# production deployments. Small workloads (single process) like devstack can
# use the dogpile.cache.memory backend. (string value)
-#backend = keystone.common.cache.noop
+#backend = dogpile.cache.null
# Arguments supplied to the backend module. Specify this option once per
# argument to be passed to the dogpile.cache backend. Example format:
@@ -338,8 +393,7 @@
# (list value)
#proxies =
-# Global toggle for all caching using the should_cache_fn mechanism. (boolean
-# value)
+# Global toggle for caching. (boolean value)
#enabled = false
# Extra debugging from the cache backend (cache keys, get/set/delete/etc
@@ -349,24 +403,24 @@
#debug_cache_backend = false
# Memcache servers in the format of "host:port". (dogpile.cache.memcache and
-# keystone.cache.memcache_pool backends only). (list value)
+# oslo_cache.memcache_pool backends only). (list value)
#memcache_servers = localhost:11211
# Number of seconds memcached server is considered dead before it is tried
-# again. (dogpile.cache.memcache and keystone.cache.memcache_pool backends
-# only). (integer value)
+# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
#memcache_dead_retry = 300
# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
-# keystone.cache.memcache_pool backends only). (integer value)
+# oslo_cache.memcache_pool backends only). (integer value)
#memcache_socket_timeout = 3
# Max total number of open connections to every memcached server.
-# (keystone.cache.memcache_pool backend only). (integer value)
+# (oslo_cache.memcache_pool backend only). (integer value)
#memcache_pool_maxsize = 10
# Number of seconds a connection to memcached is held unused in the pool before
-# it is closed. (keystone.cache.memcache_pool backend only). (integer value)
+# it is closed. (oslo_cache.memcache_pool backend only). (integer value)
#memcache_pool_unused_timeout = 60
# Number of seconds that an operation will wait to get a memcache client
@@ -409,7 +463,7 @@
#
# Indicate whether this resource may be shared with the domain received in the
-# requests "origin" header. (string value)
+# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
@@ -417,17 +471,17 @@
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
-#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
-#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
-#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name
[cors.subdomain]
@@ -437,7 +491,7 @@
#
# Indicate whether this resource may be shared with the domain received in the
-# requests "origin" header. (string value)
+# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
@@ -445,17 +499,17 @@
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
-#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
-#allow_methods = GET,POST,PUT,DELETE,OPTIONS
+#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
-#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name
[credential]
@@ -534,7 +588,7 @@
# If set, use this value for max_overflow with SQLAlchemy. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
-#max_overflow = <None>
+#max_overflow = 50
# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
# value)
@@ -609,6 +663,11 @@
#
# Enable endpoint_policy functionality. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: The option to enable the OS-ENDPOINT-POLICY extension has been
+# deprecated in the M release and will be removed in the O release. The OS-
+# ENDPOINT-POLICY extension will be enabled by default.
#enabled = true
# Entrypoint for the endpoint policy backend driver in the
@@ -644,8 +703,8 @@
# Its value may be silently ignored in the future.
#public_bind_host = 0.0.0.0
-# The port number which the public service listens on. (integer value)
-# Minimum value: 1
+# The port number which the public service listens on. (port value)
+# Minimum value: 0
# Maximum value: 65535
# Deprecated group/name - [DEFAULT]/public_port
# This option is deprecated for removal.
@@ -660,8 +719,8 @@
# Its value may be silently ignored in the future.
#admin_bind_host = 0.0.0.0
-# The port number which the admin service listens on. (integer value)
-# Minimum value: 1
+# The port number which the admin service listens on. (port value)
+# Minimum value: 0
# Maximum value: 65535
# Deprecated group/name - [DEFAULT]/admin_port
# This option is deprecated for removal.
@@ -674,7 +733,7 @@
# Timeout for socket operations on a client connection. If an incoming
# connection is idle for this number of seconds it will be closed. A value of
-# '0' means wait forever. (integer value)
+# "0" means wait forever. (integer value)
#client_socket_timeout = 900
# Set this to true if you want to enable TCP_KEEPALIVE on server sockets, i.e.
@@ -686,7 +745,8 @@
#tcp_keepalive = false
# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Only
-# applies if tcp_keepalive is true. (integer value)
+# applies if tcp_keepalive is true. Ignored if system does not support it.
+# (integer value)
# Deprecated group/name - [DEFAULT]/tcp_keepidle
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
@@ -760,8 +820,8 @@
# A list of trusted dashboard hosts. Before accepting a Single Sign-On request
# to return a token, the origin host must be a member of the trusted_dashboard
# list. This configuration option may be repeated for multiple values. For
-# example: trusted_dashboard=http://acme.com trusted_dashboard=http://beta.com
-# (multi valued)
+# example: trusted_dashboard=http://acme.com/auth/websso
+# trusted_dashboard=http://beta.com/auth/websso (multi valued)
#trusted_dashboard =
# Location of Single Sign-On callback handler, will return a token to a trusted
@@ -866,7 +926,7 @@
# mapping for even the default LDAP driver. It is only safe to do this if you
# do not already have assignments for users and groups from the default LDAP
# domain, and it is acceptable for Keystone to provide the different IDs to
-# clients than it did previously. Typically this means that the only time you
+# clients than it did previously. Typically this means that the only time you
# can set this value to False is when configuring a fresh installation.
# (boolean value)
#backward_compatible_ids = true
@@ -902,7 +962,9 @@
# From keystone
#
-# URL for connecting to the LDAP server. (string value)
+# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified
+# as a comma separated string. The first URL to successfully bind is used for
+# the connection. (string value)
#url = ldap://localhost
# User BindDN to query the LDAP server. (string value)
@@ -965,6 +1027,9 @@
# LDAP attribute mapped to user name. (string value)
#user_name_attribute = sn
+# LDAP attribute mapped to user description. (string value)
+#user_description_attribute = description
+
# LDAP attribute mapped to user email. (string value)
#user_mail_attribute = mail
@@ -1002,12 +1067,24 @@
#user_default_project_id_attribute = <None>
# Allow user creation in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
#user_allow_create = true
# Allow user updates in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
#user_allow_update = true
# Allow user deletion in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
#user_allow_delete = true
# If true, Keystone uses an alternative method to determine if a user is
@@ -1029,168 +1106,6 @@
# Identity API attribute. (list value)
#user_additional_attribute_mapping =
-# Search base for projects. Defaults to the suffix value. (string value)
-# Deprecated group/name - [ldap]/tenant_tree_dn
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_tree_dn = <None>
-
-# LDAP search filter for projects. (string value)
-# Deprecated group/name - [ldap]/tenant_filter
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_filter = <None>
-
-# LDAP objectclass for projects. (string value)
-# Deprecated group/name - [ldap]/tenant_objectclass
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_objectclass = groupOfNames
-
-# LDAP attribute mapped to project id. (string value)
-# Deprecated group/name - [ldap]/tenant_id_attribute
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_id_attribute = cn
-
-# LDAP attribute mapped to project membership for user. (string value)
-# Deprecated group/name - [ldap]/tenant_member_attribute
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_member_attribute = member
-
-# LDAP attribute mapped to project name. (string value)
-# Deprecated group/name - [ldap]/tenant_name_attribute
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_name_attribute = ou
-
-# LDAP attribute mapped to project description. (string value)
-# Deprecated group/name - [ldap]/tenant_desc_attribute
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_desc_attribute = description
-
-# LDAP attribute mapped to project enabled. (string value)
-# Deprecated group/name - [ldap]/tenant_enabled_attribute
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_enabled_attribute = enabled
-
-# LDAP attribute mapped to project domain_id. (string value)
-# Deprecated group/name - [ldap]/tenant_domain_id_attribute
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_domain_id_attribute = businessCategory
-
-# List of attributes stripped off the project on update. (list value)
-# Deprecated group/name - [ldap]/tenant_attribute_ignore
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_attribute_ignore =
-
-# Allow project creation in LDAP backend. (boolean value)
-# Deprecated group/name - [ldap]/tenant_allow_create
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_allow_create = true
-
-# Allow project update in LDAP backend. (boolean value)
-# Deprecated group/name - [ldap]/tenant_allow_update
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_allow_update = true
-
-# Allow project deletion in LDAP backend. (boolean value)
-# Deprecated group/name - [ldap]/tenant_allow_delete
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_allow_delete = true
-
-# If true, Keystone uses an alternative method to determine if a project is
-# enabled or not by checking if they are a member of the
-# "project_enabled_emulation_dn" group. (boolean value)
-# Deprecated group/name - [ldap]/tenant_enabled_emulation
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_enabled_emulation = false
-
-# DN of the group entry to hold enabled projects when using enabled emulation.
-# (string value)
-# Deprecated group/name - [ldap]/tenant_enabled_emulation_dn
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_enabled_emulation_dn = <None>
-
-# Use the "group_member_attribute" and "group_objectclass" settings to
-# determine membership in the emulated enabled group. (boolean value)
-#project_enabled_emulation_use_group_config = false
-
-# Additional attribute mappings for projects. Attribute mapping format is
-# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
-# and user_attr is the Identity API attribute. (list value)
-# Deprecated group/name - [ldap]/tenant_additional_attribute_mapping
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#project_additional_attribute_mapping =
-
-# Search base for roles. Defaults to the suffix value. (string value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_tree_dn = <None>
-
-# LDAP search filter for roles. (string value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_filter = <None>
-
-# LDAP objectclass for roles. (string value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_objectclass = organizationalRole
-
-# LDAP attribute mapped to role id. (string value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_id_attribute = cn
-
-# LDAP attribute mapped to role name. (string value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_name_attribute = ou
-
-# LDAP attribute mapped to role membership. (string value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_member_attribute = roleOccupant
-
-# List of attributes stripped off the role on update. (list value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_attribute_ignore =
-
-# Allow role creation in LDAP backend. (boolean value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_allow_create = true
-
-# Allow role update in LDAP backend. (boolean value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_allow_update = true
-
-# Allow role deletion in LDAP backend. (boolean value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_allow_delete = true
-
-# Additional attribute mappings for roles. Attribute mapping format is
-# <ldap_attr>:<user_attr>, where ldap_attr is the attribute in the LDAP entry
-# and user_attr is the Identity API attribute. (list value)
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
-#role_additional_attribute_mapping =
-
# Search base for groups. Defaults to the suffix value. (string value)
#group_tree_dn = <None>
@@ -1216,12 +1131,24 @@
#group_attribute_ignore =
# Allow group creation in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
#group_allow_create = true
# Allow group update in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
#group_allow_update = true
# Allow group deletion in LDAP backend. (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Write support for Identity LDAP backends has been deprecated in the M
+# release and will be removed in the O release.
#group_allow_delete = true
# Additional attribute mappings for groups. Attribute mapping format is
@@ -1245,7 +1172,7 @@
#tls_req_cert = demand
# Enable LDAP connection pooling. (boolean value)
-#use_pool = false
+#use_pool = true
# Connection pool size. (integer value)
#pool_size = 10
@@ -1267,7 +1194,7 @@
# Enable LDAP connection pooling for end user authentication. If use_pool is
# disabled, then this setting is meaningless and is not used at all. (boolean
# value)
-#use_auth_pool = false
+#use_auth_pool = true
# End user auth connection pool size. (integer value)
#auth_pool_size = 100
@@ -1275,6 +1202,11 @@
# End user auth connection lifetime in seconds. (integer value)
#auth_pool_connection_lifetime = 60
+# If the members of the group objectclass are user IDs rather than DNs, set
+# this to true. This is the case when using posixGroup as the group objectclass
+# and OpenDirectory. (boolean value)
+#group_members_are_ids = false
+
[matchmaker_redis]
@@ -1285,22 +1217,29 @@
# Host to locate redis. (string value)
#host = 127.0.0.1
-# Use this port to connect to redis host. (integer value)
+# Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
#port = 6379
# Password for Redis server (optional). (string value)
-#password = <None>
+#password =
+# List of Redis Sentinel hosts (fault tolerance mode) e.g.
+# [host:port, host1:port ... ] (list value)
+#sentinel_hosts =
-[matchmaker_ring]
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
-#
-# From oslo.messaging
-#
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 500
-# Matchmaker ring file (JSON). (string value)
-# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
-#ringfile = /etc/oslo/matchmaker_ring.json
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations (integer value)
+#socket_timeout = 1000
[memcache]
@@ -1344,7 +1283,7 @@
# From keystone
#
-# Entrypoint for hte OAuth backend driver in the keystone.oauth1 namespace.
+# Entrypoint for the OAuth backend driver in the keystone.oauth1 namespace.
# (string value)
#driver = sql
@@ -1362,8 +1301,15 @@
#
# role-assignment inheritance to projects from owning domain or from projects
-# higher in the hierarchy can be optionally enabled. (boolean value)
-#enabled = false
+# higher in the hierarchy can be optionally disabled. In the future, this
+# option will be removed and the hierarchy will be always enabled. (boolean
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: The option to enable the OS-INHERIT extension has been deprecated in
+# the M release and will be removed in the O release. The OS-INHERIT extension
+# will be enabled by default.
+#enabled = true
[oslo_messaging_amqp]
@@ -1416,78 +1362,47 @@
# Deprecated group/name - [amqp1]/allow_insecure_clients
#allow_insecure_clients = false
+# Space separated list of acceptable SASL mechanisms (string value)
+# Deprecated group/name - [amqp1]/sasl_mechanisms
+#sasl_mechanisms =
-[oslo_messaging_qpid]
-
-#
-# From oslo.messaging
-#
-
-# Use durable queues in AMQP. (boolean value)
-# Deprecated group/name - [DEFAULT]/amqp_durable_queues
-# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
-#amqp_durable_queues = false
-
-# Auto-delete queues in AMQP. (boolean value)
-# Deprecated group/name - [DEFAULT]/amqp_auto_delete
-#amqp_auto_delete = false
-
-# Send a single AMQP reply to call message. The current behaviour since oslo-
-# incubator is to send two AMQP replies - first one with the payload, a second
-# one to ensure the other have finish to send the payload. We are going to
-# remove it in the N release, but we must keep backward compatible at the same
-# time. This option provides such compatibility - it defaults to False in
-# Liberty and can be turned on for early adopters with a new installations or
-# for testing. Please note, that this option will be removed in the Mitaka
-# release. (boolean value)
-#send_single_reply = false
-
-# Qpid broker hostname. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_hostname
-#qpid_hostname = localhost
+# Path to directory that contains the SASL configuration (string value)
+# Deprecated group/name - [amqp1]/sasl_config_dir
+#sasl_config_dir =
-# Qpid broker port. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_port
-#qpid_port = 5672
+# Name of configuration file (without .conf suffix) (string value)
+# Deprecated group/name - [amqp1]/sasl_config_name
+#sasl_config_name =
-# Qpid HA cluster host:port pairs. (list value)
-# Deprecated group/name - [DEFAULT]/qpid_hosts
-#qpid_hosts = $qpid_hostname:$qpid_port
+# User name for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/username
+#username =
-# Username for Qpid connection. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_username
-#qpid_username =
+# Password for message broker authentication (string value)
+# Deprecated group/name - [amqp1]/password
+#password =
-# Password for Qpid connection. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_password
-#qpid_password =
-# Space separated list of SASL mechanisms to use for auth. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
-#qpid_sasl_mechanisms =
+[oslo_messaging_notifications]
-# Seconds between connection keepalive heartbeats. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_heartbeat
-#qpid_heartbeat = 60
-
-# Transport to use, either 'tcp' or 'ssl'. (string value)
-# Deprecated group/name - [DEFAULT]/qpid_protocol
-#qpid_protocol = tcp
+#
+# From oslo.messaging
+#
-# Whether to disable the Nagle algorithm. (boolean value)
-# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
-#qpid_tcp_nodelay = true
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
-# The number of prefetched messages held by receiver. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
-#qpid_receiver_capacity = 1
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
-# The qpid topology version to use. Version 1 is what was originally used by
-# impl_qpid. Version 2 includes some backwards-incompatible changes that allow
-# broker federation to work. Users should update to version 2 when they are
-# able to take everything down, as it requires a clean break. (integer value)
-# Deprecated group/name - [DEFAULT]/qpid_topology_version
-#qpid_topology_version = 1
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
[oslo_messaging_rabbit]
@@ -1505,16 +1420,6 @@
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
#amqp_auto_delete = false
-# Send a single AMQP reply to call message. The current behaviour since oslo-
-# incubator is to send two AMQP replies - first one with the payload, a second
-# one to ensure the other have finish to send the payload. We are going to
-# remove it in the N release, but we must keep backward compatible at the same
-# time. This option provides such compatibility - it defaults to False in
-# Liberty and can be turned on for early adopters with a new installations or
-# for testing. Please note, that this option will be removed in the Mitaka
-# release. (boolean value)
-#send_single_reply = false
-
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
@@ -1538,15 +1443,28 @@
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
#kombu_reconnect_delay = 1.0
-# How long to wait before considering a reconnect attempt to have failed. This
-# value should not be longer than rpc_response_timeout. (integer value)
-#kombu_reconnect_timeout = 60
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may notbe available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client beforce abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Allowed values: round-robin, shuffle
+#kombu_failover_strategy = round-robin
# The RabbitMQ broker address where a single node is used. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_host
#rabbit_host = localhost
-# The RabbitMQ broker port where a single node is used. (integer value)
+# The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
# Deprecated group/name - [DEFAULT]/rabbit_port
#rabbit_port = 5672
@@ -1582,16 +1500,34 @@
# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
#rabbit_retry_backoff = 2
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
+
# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
# count). (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_max_retries
#rabbit_max_retries = 0
-# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
-# must wipe the RabbitMQ database. (boolean value)
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
#rabbit_ha_queues = false
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
# Number of seconds after which the Rabbit broker is considered down if
# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
# value)
@@ -1605,6 +1541,104 @@
# Deprecated group/name - [DEFAULT]/fake_rabbit
#fake_rabbit = false
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 1
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 10
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more then one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
[oslo_middleware]
@@ -1617,13 +1651,11 @@
# Deprecated group/name - [DEFAULT]/max_request_body_size
#max_request_body_size = 114688
-#
-# From oslo.middleware
-#
-
# The HTTP Header that will be used to determine what the original request
# protocol scheme was, even if it was hidden by an SSL termination proxy.
# (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
#secure_proxy_ssl_header = X-Forwarded-Proto
@@ -1647,8 +1679,6 @@
# directories to be searched. Missing or empty directories are ignored. (multi
# valued)
# Deprecated group/name - [DEFAULT]/policy_dirs
-# This option is deprecated for removal.
-# Its value may be silently ignored in the future.
#policy_dirs = policy.d
@@ -1685,7 +1715,7 @@
#
# Entrypoint for the resource backend driver in the keystone.resource
-# namespace. Supplied drivers are ldap and sql. If a resource driver is not
+# namespace. Only an SQL driver is supplied. If a resource driver is not
# specified, the assignment driver will choose the resource driver. (string
# value)
#driver = <None>
@@ -1705,6 +1735,31 @@
# Deprecated group/name - [assignment]/list_limit
#list_limit = <None>
+# Name of the domain that owns the `admin_project_name`. Defaults to None.
+# (string value)
+#admin_project_domain_name = <None>
+
+# Special project for performing administrative operations on remote services.
+# Tokens scoped to this project will contain the key/value
+# `is_admin_project=true`. Defaults to None. (string value)
+#admin_project_name = <None>
+
+# Whether the names of projects are restricted from containing url reserved
+# characters. If set to new, attempts to create or update a project with a url
+# unsafe name will return an error. In addition, if set to strict, attempts to
+# scope a token using an unsafe project name will return an error. (string
+# value)
+# Allowed values: off, new, strict
+#project_name_url_safe = off
+
+# Whether the names of domains are restricted from containing url reserved
+# characters. If set to new, attempts to create or update a domain with a url
+# unsafe name will return an error. In addition, if set to strict, attempts to
+# scope a token using a domain name which is unsafe will return an error.
+# (string value)
+# Allowed values: off, new, strict
+#domain_name_url_safe = off
+
[revoke]
@@ -1831,6 +1886,17 @@
#relay_state_prefix = ss:mem:
+[shadow_users]
+
+#
+# From keystone
+#
+
+# Entrypoint for the shadow users backend driver in the
+# keystone.identity.shadow_users namespace. (string value)
+#driver = sql
+
+
[signing]
#
@@ -1840,28 +1906,56 @@
# Path of the certfile for token signing. For non-production environments, you
# may be interested in using `keystone-manage pki_setup` to generate self-
# signed certificates. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
# Path of the keyfile for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#keyfile = /etc/keystone/ssl/private/signing_key.pem
# Path of the CA for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#ca_certs = /etc/keystone/ssl/certs/ca.pem
# Path of the CA key for token signing. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#ca_key = /etc/keystone/ssl/private/cakey.pem
# Key size (in bits) for token signing cert (auto generated certificate).
# (integer value)
# Minimum value: 1024
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#key_size = 2048
# Days the token signing cert is valid for (auto generated certificate).
# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#valid_days = 3650
# Certificate subject (auto generated certificate) for token signing. (string
# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
@@ -1939,8 +2033,16 @@
# that hashlib supports. WARNING: Before changing this value, the auth_token
# middleware must be configured with the hash_algorithms, otherwise token
# revocation will not be processed correctly. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: PKI token support has been deprecated in the M release and will be
+# removed in the O release. Fernet or UUID tokens are recommended.
#hash_algorithm = md5
+# Add roles to token that are not explicitly added, but that are linked
+# implicitly to other roles. (boolean value)
+#infer_roles = true
+
[tokenless_auth]
diff --git a/keystone-moon/etc/policy.json b/keystone-moon/etc/policy.json
index ebb94b02..797af24d 100644
--- a/keystone-moon/etc/policy.json
+++ b/keystone-moon/etc/policy.json
@@ -34,7 +34,7 @@
"identity:update_domain": "rule:admin_required",
"identity:delete_domain": "rule:admin_required",
- "identity:get_project": "rule:admin_required",
+ "identity:get_project": "rule:admin_required or project_id:%(target.project.id)s",
"identity:list_projects": "rule:admin_required",
"identity:list_user_projects": "rule:admin_or_owner",
"identity:create_project": "rule:admin_required",
@@ -75,6 +75,18 @@
"identity:create_role": "rule:admin_required",
"identity:update_role": "rule:admin_required",
"identity:delete_role": "rule:admin_required",
+ "identity:get_domain_role": "rule:admin_required",
+ "identity:list_domain_roles": "rule:admin_required",
+ "identity:create_domain_role": "rule:admin_required",
+ "identity:update_domain_role": "rule:admin_required",
+ "identity:delete_domain_role": "rule:admin_required",
+
+ "identity:get_implied_role": "rule:admin_required ",
+ "identity:list_implied_roles": "rule:admin_required",
+ "identity:create_implied_role": "rule:admin_required",
+ "identity:delete_implied_role": "rule:admin_required",
+ "identity:list_role_inference_rules": "rule:admin_required",
+ "identity:check_implied_role": "rule:admin_required",
"identity:check_grant": "rule:admin_required",
"identity:list_grants": "rule:admin_required",
@@ -82,6 +94,7 @@
"identity:revoke_grant": "rule:admin_required",
"identity:list_role_assignments": "rule:admin_required",
+ "identity:list_role_assignments_for_tree": "rule:admin_required",
"identity:get_policy": "rule:admin_required",
"identity:list_policies": "rule:admin_required",
@@ -180,5 +193,6 @@
"identity:create_domain_config": "rule:admin_required",
"identity:get_domain_config": "rule:admin_required",
"identity:update_domain_config": "rule:admin_required",
- "identity:delete_domain_config": "rule:admin_required"
+ "identity:delete_domain_config": "rule:admin_required",
+ "identity:get_domain_config_default": "rule:admin_required"
}
diff --git a/keystone-moon/etc/policy.v3cloudsample.json b/keystone-moon/etc/policy.v3cloudsample.json
index a96996c6..4ec1aa95 100644
--- a/keystone-moon/etc/policy.v3cloudsample.json
+++ b/keystone-moon/etc/policy.v3cloudsample.json
@@ -1,11 +1,10 @@
{
"admin_required": "role:admin",
- "cloud_admin": "rule:admin_required and domain_id:admin_domain_id",
+ "cloud_admin": "role:admin and (token.is_admin_project:True or domain_id:admin_domain_id)",
"service_role": "role:service",
"service_or_admin": "rule:admin_required or rule:service_role",
"owner" : "user_id:%(user_id)s or user_id:%(target.token.user_id)s",
"admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner",
- "admin_or_cloud_admin": "rule:admin_required or rule:cloud_admin",
"admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s",
"service_admin_or_owner": "rule:service_or_admin or rule:owner",
@@ -17,14 +16,14 @@
"identity:update_region": "rule:cloud_admin",
"identity:delete_region": "rule:cloud_admin",
- "identity:get_service": "rule:admin_or_cloud_admin",
- "identity:list_services": "rule:admin_or_cloud_admin",
+ "identity:get_service": "rule:admin_required",
+ "identity:list_services": "rule:admin_required",
"identity:create_service": "rule:cloud_admin",
"identity:update_service": "rule:cloud_admin",
"identity:delete_service": "rule:cloud_admin",
- "identity:get_endpoint": "rule:admin_or_cloud_admin",
- "identity:list_endpoints": "rule:admin_or_cloud_admin",
+ "identity:get_endpoint": "rule:admin_required",
+ "identity:list_endpoints": "rule:admin_required",
"identity:create_endpoint": "rule:cloud_admin",
"identity:update_endpoint": "rule:cloud_admin",
"identity:delete_endpoint": "rule:cloud_admin",
@@ -37,7 +36,7 @@
"admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s",
"admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s",
- "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
+ "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id or project_id:%(target.project.id)s",
"identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id",
"identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id",
@@ -71,28 +70,56 @@
"identity:update_credential": "rule:admin_required",
"identity:delete_credential": "rule:admin_required",
- "identity:ec2_get_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)",
- "identity:ec2_list_credentials": "rule:admin_or_cloud_admin or rule:owner",
- "identity:ec2_create_credential": "rule:admin_or_cloud_admin or rule:owner",
- "identity:ec2_delete_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)",
+ "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
+ "identity:ec2_list_credentials": "rule:admin_required or rule:owner",
+ "identity:ec2_create_credential": "rule:admin_required or rule:owner",
+ "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
- "identity:get_role": "rule:admin_or_cloud_admin",
- "identity:list_roles": "rule:admin_or_cloud_admin",
+ "identity:get_role": "rule:admin_required",
+ "identity:list_roles": "rule:admin_required",
"identity:create_role": "rule:cloud_admin",
"identity:update_role": "rule:cloud_admin",
"identity:delete_role": "rule:cloud_admin",
- "domain_admin_for_grants": "rule:admin_required and (domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s)",
- "project_admin_for_grants": "rule:admin_required and project_id:%(project_id)s",
+ "identity:get_domain_role": "rule:cloud_admin or rule:get_domain_roles",
+ "identity:list_domain_roles": "rule:cloud_admin or rule:list_domain_roles",
+ "identity:create_domain_role": "rule:cloud_admin or rule:domain_admin_matches_domain_role",
+ "identity:update_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
+ "identity:delete_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
+ "domain_admin_matches_domain_role": "rule:admin_required and domain_id:%(role.domain_id)s",
+ "get_domain_roles": "rule:domain_admin_matches_target_domain_role or rule:project_admin_matches_target_domain_role",
+ "domain_admin_matches_target_domain_role": "rule:admin_required and domain_id:%(target.role.domain_id)s",
+ "project_admin_matches_target_domain_role": "rule:admin_required and project_domain_id:%(target.role.domain_id)s",
+ "list_domain_roles": "rule:domain_admin_matches_filter_on_list_domain_roles or rule:project_admin_matches_filter_on_list_domain_roles",
+ "domain_admin_matches_filter_on_list_domain_roles": "rule:admin_required and domain_id:%(domain_id)s",
+ "project_admin_matches_filter_on_list_domain_roles": "rule:admin_required and project_domain_id:%(domain_id)s",
+
+ "identity:get_implied_role": "rule:cloud_admin",
+ "identity:list_implied_roles": "rule:cloud_admin",
+ "identity:create_implied_role": "rule:cloud_admin",
+ "identity:delete_implied_role": "rule:cloud_admin",
+ "identity:list_role_inference_rules": "rule:cloud_admin",
+ "identity:check_implied_role": "rule:cloud_admin",
+
"identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
- "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
+ "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_list_grants or rule:project_admin_for_list_grants",
"identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
"identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
-
+ "domain_admin_for_grants": "rule:domain_admin_for_global_role_grants or rule:domain_admin_for_domain_role_grants",
+ "domain_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and rule:domain_admin_grant_match",
+ "domain_admin_for_domain_role_grants": "rule:admin_required and domain_id:%(target.role.domain_id)s and rule:domain_admin_grant_match",
+ "domain_admin_grant_match": "domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s",
+ "project_admin_for_grants": "rule:project_admin_for_global_role_grants or rule:project_admin_for_domain_role_grants",
+ "project_admin_for_global_role_grants": "rule:admin_required and None:%(target.role.domain_id)s and project_id:%(project_id)s",
+ "project_admin_for_domain_role_grants": "rule:admin_required and project_domain_id:%(target.role.domain_id)s and project_id:%(project_id)s",
+ "domain_admin_for_list_grants": "rule:admin_required and rule:domain_admin_grant_match",
+ "project_admin_for_list_grants": "rule:admin_required and project_id:%(project_id)s",
+
"admin_on_domain_filter" : "rule:admin_required and domain_id:%(scope.domain.id)s",
"admin_on_project_filter" : "rule:admin_required and project_id:%(scope.project.id)s",
+ "admin_on_domain_of_project_filter" : "rule:admin_required and domain_id:%(target.project.domain_id)s",
"identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter",
-
+ "identity:list_role_assignments_for_tree": "rule:cloud_admin or rule:admin_on_domain_of_project_filter",
"identity:get_policy": "rule:cloud_admin",
"identity:list_policies": "rule:cloud_admin",
"identity:create_policy": "rule:cloud_admin",
@@ -191,5 +218,6 @@
"identity:create_domain_config": "rule:cloud_admin",
"identity:get_domain_config": "rule:cloud_admin",
"identity:update_domain_config": "rule:cloud_admin",
- "identity:delete_domain_config": "rule:cloud_admin"
+ "identity:delete_domain_config": "rule:cloud_admin",
+ "identity:get_domain_config_default": "rule:cloud_admin"
}
diff --git a/keystone-moon/examples/pki/gen_pki.sh b/keystone-moon/examples/pki/gen_pki.sh
index 65550265..da800413 100755
--- a/keystone-moon/examples/pki/gen_pki.sh
+++ b/keystone-moon/examples/pki/gen_pki.sh
@@ -157,14 +157,16 @@ function check_error {
function generate_ca {
echo 'Generating New CA Certificate ...'
- openssl req -x509 -newkey rsa:2048 -days 21360 -out $CERTS_DIR/cacert.pem -keyout $PRIVATE_DIR/cakey.pem -outform PEM -config ca.conf -nodes
+ openssl req -x509 -newkey rsa:2048 -days 21360 -out $CERTS_DIR/cacert.pem \
+ -keyout $PRIVATE_DIR/cakey.pem -outform PEM -config ca.conf -nodes
check_error $?
}
function ssl_cert_req {
echo 'Generating SSL Certificate Request ...'
generate_ssl_req_conf
- openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/ssl_key.pem -keyform PEM -out ssl_req.pem -outform PEM -config ssl_req.conf -nodes
+ openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/ssl_key.pem \
+ -keyform PEM -out ssl_req.pem -outform PEM -config ssl_req.conf -nodes
check_error $?
#openssl req -in req.pem -text -noout
}
@@ -172,7 +174,9 @@ function ssl_cert_req {
function cms_signing_cert_req {
echo 'Generating CMS Signing Certificate Request ...'
generate_cms_signing_req_conf
- openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/signing_key.pem -keyform PEM -out cms_signing_req.pem -outform PEM -config cms_signing_req.conf -nodes
+ openssl req -newkey rsa:2048 -keyout $PRIVATE_DIR/signing_key.pem \
+ -keyform PEM -out cms_signing_req.pem -outform PEM \
+ -config cms_signing_req.conf -nodes
check_error $?
#openssl req -in req.pem -text -noout
}
@@ -187,7 +191,8 @@ function issue_certs {
echo 'Issuing CMS Signing Certificate ...'
openssl ca -in cms_signing_req.pem -config signing.conf -batch
check_error $?
- openssl x509 -in $CURRENT_DIR/newcerts/11.pem -out $CERTS_DIR/signing_cert.pem
+ openssl x509 -in $CURRENT_DIR/newcerts/11.pem \
+ -out $CERTS_DIR/signing_cert.pem
check_error $?
}
@@ -203,8 +208,15 @@ function check_openssl {
}
function gen_sample_cms {
- for json_file in "${CMS_DIR}/auth_token_revoked.json" "${CMS_DIR}/auth_token_unscoped.json" "${CMS_DIR}/auth_token_scoped.json" "${CMS_DIR}/revocation_list.json"; do
- openssl cms -sign -in $json_file -nosmimecap -signer $CERTS_DIR/signing_cert.pem -inkey $PRIVATE_DIR/signing_key.pem -outform PEM -nodetach -nocerts -noattr -out ${json_file/.json/.pem}
+ FILES="${CMS_DIR}/auth_token_revoked.json"
+ FILES+=" ${CMS_DIR}/auth_token_unscoped.json"
+ FILES+=" ${CMS_DIR}/auth_token_scoped.json"
+ FILES+=" ${CMS_DIR}/revocation_list.json"
+ for json_file in $FILES; do
+ openssl cms -sign -in $json_file -nosmimecap \
+ -signer $CERTS_DIR/signing_cert.pem \
+ -inkey $PRIVATE_DIR/signing_key.pem -outform PEM -nodetach \
+ -nocerts -noattr -out ${json_file/.json/.pem}
done
}
diff --git a/keystone-moon/httpd/keystone-uwsgi-admin.ini b/keystone-moon/httpd/keystone-uwsgi-admin.ini
new file mode 100644
index 00000000..8624ee3e
--- /dev/null
+++ b/keystone-moon/httpd/keystone-uwsgi-admin.ini
@@ -0,0 +1,22 @@
+[uwsgi]
+wsgi-file = /usr/local/bin/keystone-wsgi-admin
+
+# Versions of mod_proxy_uwsgi>=2.0.6 should use a UNIX socket, see
+# http://uwsgi-docs.readthedocs.org/en/latest/Apache.html#mod-proxy-uwsgi
+uwsgi-socket = 127.0.0.1:35358
+
+# Override the default size for headers from the 4k default.
+buffer-size = 65535
+
+# This is running standalone
+master = true
+
+enable-threads = true
+
+# Tune this to your environment.
+threads = 4
+
+# uwsgi recommends this to prevent thundering herd on accept.
+thunder-lock = true
+
+plugins = python
diff --git a/keystone-moon/httpd/keystone-uwsgi-public.ini b/keystone-moon/httpd/keystone-uwsgi-public.ini
new file mode 100644
index 00000000..c9521163
--- /dev/null
+++ b/keystone-moon/httpd/keystone-uwsgi-public.ini
@@ -0,0 +1,22 @@
+[uwsgi]
+wsgi-file = /usr/local/bin/keystone-wsgi-public
+
+# Versions of mod_proxy_uwsgi>=2.0.6 should use a UNIX socket, see
+# http://uwsgi-docs.readthedocs.org/en/latest/Apache.html#mod-proxy-uwsgi
+uwsgi-socket = 127.0.0.1:5001
+
+# Override the default size for headers from the 4k default.
+buffer-size = 65535
+
+# This is running standalone
+master = true
+
+enable-threads = true
+
+# Tune this to your environment.
+threads = 4
+
+# uwsgi recommends this to prevent thundering herd on accept.
+thunder-lock = true
+
+plugins = python
diff --git a/keystone-moon/httpd/keystone.py b/keystone-moon/httpd/keystone.py
index 0c7018ff..05683a91 100644
--- a/keystone-moon/httpd/keystone.py
+++ b/keystone-moon/httpd/keystone.py
@@ -14,12 +14,28 @@
import os
+from oslo_log import log
+from oslo_log import versionutils
+
+from keystone.i18n import _LW
from keystone.server import wsgi as wsgi_server
name = os.path.basename(__file__)
+LOG = log.getLogger(__name__)
+
+
+def deprecation_warning():
+ versionutils.report_deprecated_feature(
+ LOG,
+ _LW('httpd/keystone.py is deprecated as of Mitaka'
+ ' in favor of keystone-wsgi-admin and keystone-wsgi-public'
+ ' and may be removed in O.')
+ )
# NOTE(ldbragst): 'application' is required in this context by WSGI spec.
# The following is a reference to Python Paste Deploy documentation
# http://pythonpaste.org/deploy/
-application = wsgi_server.initialize_application(name)
+application = wsgi_server.initialize_application(
+ name,
+ post_log_configured_function=deprecation_warning)
diff --git a/keystone-moon/httpd/uwsgi-keystone.conf b/keystone-moon/httpd/uwsgi-keystone.conf
new file mode 100644
index 00000000..2d61a5bf
--- /dev/null
+++ b/keystone-moon/httpd/uwsgi-keystone.conf
@@ -0,0 +1,13 @@
+Listen 5000
+Listen 35357
+
+<VirtualHost *:5000>
+ ProxyPass / uwsgi://127.0.0.1:5001/
+</VirtualHost>
+
+<VirtualHost *:35357>
+ ProxyPass / uwsgi://127.0.0.1:35358/
+</VirtualHost>
+
+ProxyPass /identity uwsgi://127.0.0.1:5001/
+ProxyPass /identity_admin uwsgi://127.0.0.1:35358/
diff --git a/keystone-moon/httpd/wsgi-keystone.conf b/keystone-moon/httpd/wsgi-keystone.conf
index c2224d42..52a46370 100644
--- a/keystone-moon/httpd/wsgi-keystone.conf
+++ b/keystone-moon/httpd/wsgi-keystone.conf
@@ -7,6 +7,7 @@ Listen 35357
WSGIScriptAlias / /usr/local/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
+ LimitRequestBody 114688
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
@@ -30,6 +31,7 @@ Listen 35357
WSGIScriptAlias / /usr/local/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
+ LimitRequestBody 114688
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
@@ -46,3 +48,23 @@ Listen 35357
</IfVersion>
</Directory>
</VirtualHost>
+
+Alias /identity /usr/local/bin/keystone-wsgi-public
+<Location /identity>
+ SetHandler wsgi-script
+ Options +ExecCGI
+
+ WSGIProcessGroup keystone-public
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+</Location>
+
+Alias /identity_admin /usr/local/bin/keystone-wsgi-admin
+<Location /identity_admin>
+ SetHandler wsgi-script
+ Options +ExecCGI
+
+ WSGIProcessGroup keystone-admin
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+</Location>
diff --git a/keystone-moon/keystone/assignment/V8_backends/__init__.py b/keystone-moon/keystone/assignment/V8_backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/assignment/V8_backends/__init__.py
diff --git a/keystone-moon/keystone/assignment/V8_backends/sql.py b/keystone-moon/keystone/assignment/V8_backends/sql.py
new file mode 100644
index 00000000..88c10a6a
--- /dev/null
+++ b/keystone-moon/keystone/assignment/V8_backends/sql.py
@@ -0,0 +1,452 @@
+# Copyright 2012-13 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import sqlalchemy
+from sqlalchemy.sql.expression import false
+
+from keystone import assignment as keystone_assignment
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _
+
+
+CONF = cfg.CONF
+
+
+class AssignmentType(object):
+ USER_PROJECT = 'UserProject'
+ GROUP_PROJECT = 'GroupProject'
+ USER_DOMAIN = 'UserDomain'
+ GROUP_DOMAIN = 'GroupDomain'
+
+ @classmethod
+ def calculate_type(cls, user_id, group_id, project_id, domain_id):
+ if user_id:
+ if project_id:
+ return cls.USER_PROJECT
+ if domain_id:
+ return cls.USER_DOMAIN
+ if group_id:
+ if project_id:
+ return cls.GROUP_PROJECT
+ if domain_id:
+ return cls.GROUP_DOMAIN
+ # Invalid parameters combination
+ raise exception.AssignmentTypeCalculationError(**locals())
+
+
+class Assignment(keystone_assignment.AssignmentDriverV8):
+
+ def default_role_driver(self):
+ return 'sql'
+
+ def default_resource_driver(self):
+ return 'sql'
+
+ def list_user_ids_for_project(self, tenant_id):
+ with sql.session_for_read() as session:
+ query = session.query(RoleAssignment.actor_id)
+ query = query.filter_by(type=AssignmentType.USER_PROJECT)
+ query = query.filter_by(target_id=tenant_id)
+ query = query.distinct('actor_id')
+ assignments = query.all()
+ return [assignment.actor_id for assignment in assignments]
+
+ def create_grant(self, role_id, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+
+ assignment_type = AssignmentType.calculate_type(
+ user_id, group_id, project_id, domain_id)
+ try:
+ with sql.session_for_write() as session:
+ session.add(RoleAssignment(
+ type=assignment_type,
+ actor_id=user_id or group_id,
+ target_id=project_id or domain_id,
+ role_id=role_id,
+ inherited=inherited_to_projects))
+ except sql.DBDuplicateEntry: # nosec : The v3 grant APIs are silent if
+ # the assignment already exists
+ pass
+
+ def list_grant_role_ids(self, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+ with sql.session_for_read() as session:
+ q = session.query(RoleAssignment.role_id)
+ q = q.filter(RoleAssignment.actor_id == (user_id or group_id))
+ q = q.filter(RoleAssignment.target_id == (project_id or domain_id))
+ q = q.filter(RoleAssignment.inherited == inherited_to_projects)
+ return [x.role_id for x in q.all()]
+
+ def _build_grant_filter(self, session, role_id, user_id, group_id,
+ domain_id, project_id, inherited_to_projects):
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=user_id or group_id)
+ q = q.filter_by(target_id=project_id or domain_id)
+ q = q.filter_by(role_id=role_id)
+ q = q.filter_by(inherited=inherited_to_projects)
+ return q
+
+ def check_grant_role_id(self, role_id, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+ with sql.session_for_read() as session:
+ try:
+ q = self._build_grant_filter(
+ session, role_id, user_id, group_id, domain_id, project_id,
+ inherited_to_projects)
+ q.one()
+ except sql.NotFound:
+ actor_id = user_id or group_id
+ target_id = domain_id or project_id
+ raise exception.RoleAssignmentNotFound(role_id=role_id,
+ actor_id=actor_id,
+ target_id=target_id)
+
+ def delete_grant(self, role_id, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+ with sql.session_for_write() as session:
+ q = self._build_grant_filter(
+ session, role_id, user_id, group_id, domain_id, project_id,
+ inherited_to_projects)
+ if not q.delete(False):
+ actor_id = user_id or group_id
+ target_id = domain_id or project_id
+ raise exception.RoleAssignmentNotFound(role_id=role_id,
+ actor_id=actor_id,
+ target_id=target_id)
+
+ def _list_project_ids_for_actor(self, actors, hints, inherited,
+ group_only=False):
+ # TODO(henry-nash): Now that we have a single assignment table, we
+ # should be able to honor the hints list that is provided.
+
+ assignment_type = [AssignmentType.GROUP_PROJECT]
+ if not group_only:
+ assignment_type.append(AssignmentType.USER_PROJECT)
+
+ sql_constraints = sqlalchemy.and_(
+ RoleAssignment.type.in_(assignment_type),
+ RoleAssignment.inherited == inherited,
+ RoleAssignment.actor_id.in_(actors))
+
+ with sql.session_for_read() as session:
+ query = session.query(RoleAssignment.target_id).filter(
+ sql_constraints).distinct()
+
+ return [x.target_id for x in query.all()]
+
+ def list_project_ids_for_user(self, user_id, group_ids, hints,
+ inherited=False):
+ actor_list = [user_id]
+ if group_ids:
+ actor_list = actor_list + group_ids
+
+ return self._list_project_ids_for_actor(actor_list, hints, inherited)
+
+ def list_domain_ids_for_user(self, user_id, group_ids, hints,
+ inherited=False):
+ with sql.session_for_read() as session:
+ query = session.query(RoleAssignment.target_id)
+ filters = []
+
+ if user_id:
+ sql_constraints = sqlalchemy.and_(
+ RoleAssignment.actor_id == user_id,
+ RoleAssignment.inherited == inherited,
+ RoleAssignment.type == AssignmentType.USER_DOMAIN)
+ filters.append(sql_constraints)
+
+ if group_ids:
+ sql_constraints = sqlalchemy.and_(
+ RoleAssignment.actor_id.in_(group_ids),
+ RoleAssignment.inherited == inherited,
+ RoleAssignment.type == AssignmentType.GROUP_DOMAIN)
+ filters.append(sql_constraints)
+
+ if not filters:
+ return []
+
+ query = query.filter(sqlalchemy.or_(*filters)).distinct()
+
+ return [assignment.target_id for assignment in query.all()]
+
+ def list_role_ids_for_groups_on_domain(self, group_ids, domain_id):
+ if not group_ids:
+ # If there's no groups then there will be no domain roles.
+ return []
+
+ sql_constraints = sqlalchemy.and_(
+ RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
+ RoleAssignment.target_id == domain_id,
+ RoleAssignment.inherited == false(),
+ RoleAssignment.actor_id.in_(group_ids))
+
+ with sql.session_for_read() as session:
+ query = session.query(RoleAssignment.role_id).filter(
+ sql_constraints).distinct()
+ return [role.role_id for role in query.all()]
+
+ def list_role_ids_for_groups_on_project(
+ self, group_ids, project_id, project_domain_id, project_parents):
+
+ if not group_ids:
+ # If there's no groups then there will be no project roles.
+ return []
+
+ # NOTE(rodrigods): First, we always include projects with
+ # non-inherited assignments
+ sql_constraints = sqlalchemy.and_(
+ RoleAssignment.type == AssignmentType.GROUP_PROJECT,
+ RoleAssignment.inherited == false(),
+ RoleAssignment.target_id == project_id)
+
+ if CONF.os_inherit.enabled:
+ # Inherited roles from domains
+ sql_constraints = sqlalchemy.or_(
+ sql_constraints,
+ sqlalchemy.and_(
+ RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
+ RoleAssignment.inherited,
+ RoleAssignment.target_id == project_domain_id))
+
+ # Inherited roles from projects
+ if project_parents:
+ sql_constraints = sqlalchemy.or_(
+ sql_constraints,
+ sqlalchemy.and_(
+ RoleAssignment.type == AssignmentType.GROUP_PROJECT,
+ RoleAssignment.inherited,
+ RoleAssignment.target_id.in_(project_parents)))
+
+ sql_constraints = sqlalchemy.and_(
+ sql_constraints, RoleAssignment.actor_id.in_(group_ids))
+
+ with sql.session_for_read() as session:
+ # NOTE(morganfainberg): Only select the columns we actually care
+ # about here, in this case role_id.
+ query = session.query(RoleAssignment.role_id).filter(
+ sql_constraints).distinct()
+
+ return [result.role_id for result in query.all()]
+
+ def list_project_ids_for_groups(self, group_ids, hints,
+ inherited=False):
+ return self._list_project_ids_for_actor(
+ group_ids, hints, inherited, group_only=True)
+
+ def list_domain_ids_for_groups(self, group_ids, inherited=False):
+ if not group_ids:
+ # If there's no groups then there will be no domains.
+ return []
+
+ group_sql_conditions = sqlalchemy.and_(
+ RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
+ RoleAssignment.inherited == inherited,
+ RoleAssignment.actor_id.in_(group_ids))
+
+ with sql.session_for_read() as session:
+ query = session.query(RoleAssignment.target_id).filter(
+ group_sql_conditions).distinct()
+ return [x.target_id for x in query.all()]
+
+ def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
+ try:
+ with sql.session_for_write() as session:
+ session.add(RoleAssignment(
+ type=AssignmentType.USER_PROJECT,
+ actor_id=user_id, target_id=tenant_id,
+ role_id=role_id, inherited=False))
+ except sql.DBDuplicateEntry:
+ msg = ('User %s already has role %s in tenant %s'
+ % (user_id, role_id, tenant_id))
+ raise exception.Conflict(type='role grant', details=msg)
+
+ def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
+ with sql.session_for_write() as session:
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=user_id)
+ q = q.filter_by(target_id=tenant_id)
+ q = q.filter_by(role_id=role_id)
+ if q.delete() == 0:
+ raise exception.RoleNotFound(message=_(
+ 'Cannot remove role that has not been granted, %s') %
+ role_id)
+
+ def _get_user_assignment_types(self):
+ return [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN]
+
+ def _get_group_assignment_types(self):
+ return [AssignmentType.GROUP_PROJECT, AssignmentType.GROUP_DOMAIN]
+
+ def _get_project_assignment_types(self):
+ return [AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT]
+
+ def _get_domain_assignment_types(self):
+ return [AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN]
+
+ def _get_assignment_types(self, user, group, project, domain):
+ """Returns a list of role assignment types based on provided entities
+
+ If one of user or group (the "actor") as well as one of project or
+ domain (the "target") are provided, the list will contain the role
+ assignment type for that specific pair of actor and target.
+
+ If only an actor or target is provided, the list will contain the
+ role assignment types that satisfy the specified entity.
+
+ For example, if user and project are provided, the return will be:
+
+ [AssignmentType.USER_PROJECT]
+
+ However, if only user was provided, the return would be:
+
+ [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN]
+
+ It is not expected that user and group (or project and domain) are
+ specified - but if they are, the most fine-grained value will be
+ chosen (i.e. user over group, project over domain).
+
+ """
+ actor_types = []
+ if user:
+ actor_types = self._get_user_assignment_types()
+ elif group:
+ actor_types = self._get_group_assignment_types()
+
+ target_types = []
+ if project:
+ target_types = self._get_project_assignment_types()
+ elif domain:
+ target_types = self._get_domain_assignment_types()
+
+ if actor_types and target_types:
+ return list(set(actor_types).intersection(target_types))
+
+ return actor_types or target_types
+
+ def list_role_assignments(self, role_id=None,
+ user_id=None, group_ids=None,
+ domain_id=None, project_ids=None,
+ inherited_to_projects=None):
+
+ def denormalize_role(ref):
+ assignment = {}
+ if ref.type == AssignmentType.USER_PROJECT:
+ assignment['user_id'] = ref.actor_id
+ assignment['project_id'] = ref.target_id
+ elif ref.type == AssignmentType.USER_DOMAIN:
+ assignment['user_id'] = ref.actor_id
+ assignment['domain_id'] = ref.target_id
+ elif ref.type == AssignmentType.GROUP_PROJECT:
+ assignment['group_id'] = ref.actor_id
+ assignment['project_id'] = ref.target_id
+ elif ref.type == AssignmentType.GROUP_DOMAIN:
+ assignment['group_id'] = ref.actor_id
+ assignment['domain_id'] = ref.target_id
+ else:
+ raise exception.Error(message=_(
+ 'Unexpected assignment type encountered, %s') %
+ ref.type)
+ assignment['role_id'] = ref.role_id
+ if ref.inherited:
+ assignment['inherited_to_projects'] = 'projects'
+ return assignment
+
+ with sql.session_for_read() as session:
+ assignment_types = self._get_assignment_types(
+ user_id, group_ids, project_ids, domain_id)
+
+ targets = None
+ if project_ids:
+ targets = project_ids
+ elif domain_id:
+ targets = [domain_id]
+
+ actors = None
+ if group_ids:
+ actors = group_ids
+ elif user_id:
+ actors = [user_id]
+
+ query = session.query(RoleAssignment)
+
+ if role_id:
+ query = query.filter_by(role_id=role_id)
+ if actors:
+ query = query.filter(RoleAssignment.actor_id.in_(actors))
+ if targets:
+ query = query.filter(RoleAssignment.target_id.in_(targets))
+ if assignment_types:
+ query = query.filter(RoleAssignment.type.in_(assignment_types))
+ if inherited_to_projects is not None:
+ query = query.filter_by(inherited=inherited_to_projects)
+
+ return [denormalize_role(ref) for ref in query.all()]
+
+ def delete_project_assignments(self, project_id):
+ with sql.session_for_write() as session:
+ q = session.query(RoleAssignment)
+ q = q.filter_by(target_id=project_id)
+ q.delete(False)
+
+ def delete_role_assignments(self, role_id):
+ with sql.session_for_write() as session:
+ q = session.query(RoleAssignment)
+ q = q.filter_by(role_id=role_id)
+ q.delete(False)
+
+ def delete_user_assignments(self, user_id):
+ with sql.session_for_write() as session:
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=user_id)
+ q.delete(False)
+
+ def delete_group_assignments(self, group_id):
+ with sql.session_for_write() as session:
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=group_id)
+ q.delete(False)
+
+
+class RoleAssignment(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'assignment'
+ attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited']
+ # NOTE(henry-nash); Postgres requires a name to be defined for an Enum
+ type = sql.Column(
+ sql.Enum(AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT,
+ AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN,
+ name='type'),
+ nullable=False)
+ actor_id = sql.Column(sql.String(64), nullable=False)
+ target_id = sql.Column(sql.String(64), nullable=False)
+ role_id = sql.Column(sql.String(64), nullable=False)
+ inherited = sql.Column(sql.Boolean, default=False, nullable=False)
+ __table_args__ = (
+ sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id',
+ 'inherited'),
+ sql.Index('ix_actor_id', 'actor_id'),
+ )
+
+ def to_dict(self):
+ """Override parent method with a simpler implementation.
+
+ RoleAssignment doesn't have non-indexed 'extra' attributes, so the
+ parent implementation is not applicable.
+ """
+ return dict(self.items())
diff --git a/keystone-moon/keystone/assignment/V8_role_backends/__init__.py b/keystone-moon/keystone/assignment/V8_role_backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/assignment/V8_role_backends/__init__.py
diff --git a/keystone-moon/keystone/assignment/V8_role_backends/sql.py b/keystone-moon/keystone/assignment/V8_role_backends/sql.py
new file mode 100644
index 00000000..2e2e119a
--- /dev/null
+++ b/keystone-moon/keystone/assignment/V8_role_backends/sql.py
@@ -0,0 +1,80 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import assignment
+from keystone.common import sql
+from keystone import exception
+
+
+class Role(assignment.RoleDriverV8):
+
+ @sql.handle_conflicts(conflict_type='role')
+ def create_role(self, role_id, role):
+ with sql.session_for_write() as session:
+ ref = RoleTable.from_dict(role)
+ session.add(ref)
+ return ref.to_dict()
+
+ @sql.truncated
+ def list_roles(self, hints):
+ with sql.session_for_read() as session:
+ query = session.query(RoleTable)
+ refs = sql.filter_limit_query(RoleTable, query, hints)
+ return [ref.to_dict() for ref in refs]
+
+ def list_roles_from_ids(self, ids):
+ if not ids:
+ return []
+ else:
+ with sql.session_for_read() as session:
+ query = session.query(RoleTable)
+ query = query.filter(RoleTable.id.in_(ids))
+ role_refs = query.all()
+ return [role_ref.to_dict() for role_ref in role_refs]
+
+ def _get_role(self, session, role_id):
+ ref = session.query(RoleTable).get(role_id)
+ if ref is None:
+ raise exception.RoleNotFound(role_id=role_id)
+ return ref
+
+ def get_role(self, role_id):
+ with sql.session_for_read() as session:
+ return self._get_role(session, role_id).to_dict()
+
+ @sql.handle_conflicts(conflict_type='role')
+ def update_role(self, role_id, role):
+ with sql.session_for_write() as session:
+ ref = self._get_role(session, role_id)
+ old_dict = ref.to_dict()
+ for k in role:
+ old_dict[k] = role[k]
+ new_role = RoleTable.from_dict(old_dict)
+ for attr in RoleTable.attributes:
+ if attr != 'id':
+ setattr(ref, attr, getattr(new_role, attr))
+ ref.extra = new_role.extra
+ return ref.to_dict()
+
+ def delete_role(self, role_id):
+ with sql.session_for_write() as session:
+ ref = self._get_role(session, role_id)
+ session.delete(ref)
+
+
+class RoleTable(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'role'
+ attributes = ['id', 'name']
+ id = sql.Column(sql.String(64), primary_key=True)
+ name = sql.Column(sql.String(255), unique=True, nullable=False)
+ extra = sql.Column(sql.JsonBlob())
+ __table_args__ = (sql.UniqueConstraint('name'),)
diff --git a/keystone-moon/keystone/assignment/__init__.py b/keystone-moon/keystone/assignment/__init__.py
index 49ad7594..4aa04ee6 100644
--- a/keystone-moon/keystone/assignment/__init__.py
+++ b/keystone-moon/keystone/assignment/__init__.py
@@ -14,4 +14,3 @@
from keystone.assignment import controllers # noqa
from keystone.assignment.core import * # noqa
-from keystone.assignment import routers # noqa
diff --git a/keystone-moon/keystone/assignment/backends/sql.py b/keystone-moon/keystone/assignment/backends/sql.py
index e249ba34..e089726a 100644
--- a/keystone-moon/keystone/assignment/backends/sql.py
+++ b/keystone-moon/keystone/assignment/backends/sql.py
@@ -12,21 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
-from oslo_log import log
-import sqlalchemy
-from sqlalchemy.sql.expression import false
-
from keystone import assignment as keystone_assignment
from keystone.common import sql
from keystone import exception
from keystone.i18n import _
-CONF = cfg.CONF
-LOG = log.getLogger(__name__)
-
-
class AssignmentType(object):
USER_PROJECT = 'UserProject'
GROUP_PROJECT = 'GroupProject'
@@ -49,7 +40,7 @@ class AssignmentType(object):
raise exception.AssignmentTypeCalculationError(**locals())
-class Assignment(keystone_assignment.AssignmentDriverV8):
+class Assignment(keystone_assignment.AssignmentDriverV9):
def default_role_driver(self):
return 'sql'
@@ -57,60 +48,6 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
def default_resource_driver(self):
return 'sql'
- def list_user_ids_for_project(self, tenant_id):
- with sql.transaction() as session:
- query = session.query(RoleAssignment.actor_id)
- query = query.filter_by(type=AssignmentType.USER_PROJECT)
- query = query.filter_by(target_id=tenant_id)
- query = query.distinct('actor_id')
- assignments = query.all()
- return [assignment.actor_id for assignment in assignments]
-
- def _get_metadata(self, user_id=None, tenant_id=None,
- domain_id=None, group_id=None, session=None):
- # TODO(henry-nash): This method represents the last vestiges of the old
- # metadata concept in this driver. Although we no longer need it here,
- # since the Manager layer uses the metadata concept across all
- # assignment drivers, we need to remove it from all of them in order to
- # finally remove this method.
-
- # We aren't given a session when called by the manager directly.
- if session is None:
- session = sql.get_session()
-
- q = session.query(RoleAssignment)
-
- def _calc_assignment_type():
- # Figure out the assignment type we're checking for from the args.
- if user_id:
- if tenant_id:
- return AssignmentType.USER_PROJECT
- else:
- return AssignmentType.USER_DOMAIN
- else:
- if tenant_id:
- return AssignmentType.GROUP_PROJECT
- else:
- return AssignmentType.GROUP_DOMAIN
-
- q = q.filter_by(type=_calc_assignment_type())
- q = q.filter_by(actor_id=user_id or group_id)
- q = q.filter_by(target_id=tenant_id or domain_id)
- refs = q.all()
- if not refs:
- raise exception.MetadataNotFound()
-
- metadata_ref = {}
- metadata_ref['roles'] = []
- for assignment in refs:
- role_ref = {}
- role_ref['id'] = assignment.role_id
- if assignment.inherited:
- role_ref['inherited_to'] = 'projects'
- metadata_ref['roles'].append(role_ref)
-
- return metadata_ref
-
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
@@ -118,21 +55,21 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
assignment_type = AssignmentType.calculate_type(
user_id, group_id, project_id, domain_id)
try:
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
session.add(RoleAssignment(
type=assignment_type,
actor_id=user_id or group_id,
target_id=project_id or domain_id,
role_id=role_id,
inherited=inherited_to_projects))
- except sql.DBDuplicateEntry:
- # The v3 grant APIs are silent if the assignment already exists
+ except sql.DBDuplicateEntry: # nosec : The v3 grant APIs are silent if
+ # the assignment already exists
pass
def list_grant_role_ids(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
q = session.query(RoleAssignment.role_id)
q = q.filter(RoleAssignment.actor_id == (user_id or group_id))
q = q.filter(RoleAssignment.target_id == (project_id or domain_id))
@@ -151,7 +88,7 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
def check_grant_role_id(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
try:
q = self._build_grant_filter(
session, role_id, user_id, group_id, domain_id, project_id,
@@ -167,7 +104,7 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
q = self._build_grant_filter(
session, role_id, user_id, group_id, domain_id, project_id,
inherited_to_projects)
@@ -178,143 +115,9 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
actor_id=actor_id,
target_id=target_id)
- def _list_project_ids_for_actor(self, actors, hints, inherited,
- group_only=False):
- # TODO(henry-nash): Now that we have a single assignment table, we
- # should be able to honor the hints list that is provided.
-
- assignment_type = [AssignmentType.GROUP_PROJECT]
- if not group_only:
- assignment_type.append(AssignmentType.USER_PROJECT)
-
- sql_constraints = sqlalchemy.and_(
- RoleAssignment.type.in_(assignment_type),
- RoleAssignment.inherited == inherited,
- RoleAssignment.actor_id.in_(actors))
-
- with sql.transaction() as session:
- query = session.query(RoleAssignment.target_id).filter(
- sql_constraints).distinct()
-
- return [x.target_id for x in query.all()]
-
- def list_project_ids_for_user(self, user_id, group_ids, hints,
- inherited=False):
- actor_list = [user_id]
- if group_ids:
- actor_list = actor_list + group_ids
-
- return self._list_project_ids_for_actor(actor_list, hints, inherited)
-
- def list_domain_ids_for_user(self, user_id, group_ids, hints,
- inherited=False):
- with sql.transaction() as session:
- query = session.query(RoleAssignment.target_id)
- filters = []
-
- if user_id:
- sql_constraints = sqlalchemy.and_(
- RoleAssignment.actor_id == user_id,
- RoleAssignment.inherited == inherited,
- RoleAssignment.type == AssignmentType.USER_DOMAIN)
- filters.append(sql_constraints)
-
- if group_ids:
- sql_constraints = sqlalchemy.and_(
- RoleAssignment.actor_id.in_(group_ids),
- RoleAssignment.inherited == inherited,
- RoleAssignment.type == AssignmentType.GROUP_DOMAIN)
- filters.append(sql_constraints)
-
- if not filters:
- return []
-
- query = query.filter(sqlalchemy.or_(*filters)).distinct()
-
- return [assignment.target_id for assignment in query.all()]
-
- def list_role_ids_for_groups_on_domain(self, group_ids, domain_id):
- if not group_ids:
- # If there's no groups then there will be no domain roles.
- return []
-
- sql_constraints = sqlalchemy.and_(
- RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
- RoleAssignment.target_id == domain_id,
- RoleAssignment.inherited == false(),
- RoleAssignment.actor_id.in_(group_ids))
-
- with sql.transaction() as session:
- query = session.query(RoleAssignment.role_id).filter(
- sql_constraints).distinct()
- return [role.role_id for role in query.all()]
-
- def list_role_ids_for_groups_on_project(
- self, group_ids, project_id, project_domain_id, project_parents):
-
- if not group_ids:
- # If there's no groups then there will be no project roles.
- return []
-
- # NOTE(rodrigods): First, we always include projects with
- # non-inherited assignments
- sql_constraints = sqlalchemy.and_(
- RoleAssignment.type == AssignmentType.GROUP_PROJECT,
- RoleAssignment.inherited == false(),
- RoleAssignment.target_id == project_id)
-
- if CONF.os_inherit.enabled:
- # Inherited roles from domains
- sql_constraints = sqlalchemy.or_(
- sql_constraints,
- sqlalchemy.and_(
- RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
- RoleAssignment.inherited,
- RoleAssignment.target_id == project_domain_id))
-
- # Inherited roles from projects
- if project_parents:
- sql_constraints = sqlalchemy.or_(
- sql_constraints,
- sqlalchemy.and_(
- RoleAssignment.type == AssignmentType.GROUP_PROJECT,
- RoleAssignment.inherited,
- RoleAssignment.target_id.in_(project_parents)))
-
- sql_constraints = sqlalchemy.and_(
- sql_constraints, RoleAssignment.actor_id.in_(group_ids))
-
- with sql.transaction() as session:
- # NOTE(morganfainberg): Only select the columns we actually care
- # about here, in this case role_id.
- query = session.query(RoleAssignment.role_id).filter(
- sql_constraints).distinct()
-
- return [result.role_id for result in query.all()]
-
- def list_project_ids_for_groups(self, group_ids, hints,
- inherited=False):
- return self._list_project_ids_for_actor(
- group_ids, hints, inherited, group_only=True)
-
- def list_domain_ids_for_groups(self, group_ids, inherited=False):
- if not group_ids:
- # If there's no groups then there will be no domains.
- return []
-
- group_sql_conditions = sqlalchemy.and_(
- RoleAssignment.type == AssignmentType.GROUP_DOMAIN,
- RoleAssignment.inherited == inherited,
- RoleAssignment.actor_id.in_(group_ids))
-
- with sql.transaction() as session:
- query = session.query(RoleAssignment.target_id).filter(
- group_sql_conditions).distinct()
- return [x.target_id for x in query.all()]
-
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
try:
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
session.add(RoleAssignment(
type=AssignmentType.USER_PROJECT,
actor_id=user_id, target_id=tenant_id,
@@ -325,7 +128,7 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
raise exception.Conflict(type='role grant', details=msg)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
q = session.query(RoleAssignment)
q = q.filter_by(actor_id=user_id)
q = q.filter_by(target_id=tenant_id)
@@ -415,7 +218,7 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
assignment['inherited_to_projects'] = 'projects'
return assignment
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
assignment_types = self._get_assignment_types(
user_id, group_ids, project_ids, domain_id)
@@ -447,34 +250,51 @@ class Assignment(keystone_assignment.AssignmentDriverV8):
return [denormalize_role(ref) for ref in query.all()]
def delete_project_assignments(self, project_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
q = session.query(RoleAssignment)
- q = q.filter_by(target_id=project_id)
+ q = q.filter_by(target_id=project_id).filter(
+ RoleAssignment.type.in_((AssignmentType.USER_PROJECT,
+ AssignmentType.GROUP_PROJECT))
+ )
q.delete(False)
def delete_role_assignments(self, role_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
q = session.query(RoleAssignment)
q = q.filter_by(role_id=role_id)
q.delete(False)
+ def delete_domain_assignments(self, domain_id):
+ with sql.session_for_write() as session:
+ q = session.query(RoleAssignment)
+ q = q.filter(RoleAssignment.target_id == domain_id).filter(
+ (RoleAssignment.type == AssignmentType.USER_DOMAIN) |
+ (RoleAssignment.type == AssignmentType.GROUP_DOMAIN))
+ q.delete(False)
+
def delete_user_assignments(self, user_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
q = session.query(RoleAssignment)
- q = q.filter_by(actor_id=user_id)
+ q = q.filter_by(actor_id=user_id).filter(
+ RoleAssignment.type.in_((AssignmentType.USER_PROJECT,
+ AssignmentType.USER_DOMAIN))
+ )
q.delete(False)
def delete_group_assignments(self, group_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
q = session.query(RoleAssignment)
- q = q.filter_by(actor_id=group_id)
+ q = q.filter_by(actor_id=group_id).filter(
+ RoleAssignment.type.in_((AssignmentType.GROUP_PROJECT,
+ AssignmentType.GROUP_DOMAIN))
+ )
q.delete(False)
class RoleAssignment(sql.ModelBase, sql.DictBase):
__tablename__ = 'assignment'
attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited']
- # NOTE(henry-nash); Postgres requires a name to be defined for an Enum
+ # NOTE(henry-nash): Postgres requires a name to be defined for an Enum
type = sql.Column(
sql.Enum(AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT,
AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN,
@@ -491,7 +311,7 @@ class RoleAssignment(sql.ModelBase, sql.DictBase):
)
def to_dict(self):
- """Override parent to_dict() method with a simpler implementation.
+ """Override parent method with a simpler implementation.
RoleAssignment doesn't have non-indexed 'extra' attributes, so the
parent implementation is not applicable.
diff --git a/keystone-moon/keystone/assignment/controllers.py b/keystone-moon/keystone/assignment/controllers.py
index bbaf9437..1b163013 100644
--- a/keystone-moon/keystone/assignment/controllers.py
+++ b/keystone-moon/keystone/assignment/controllers.py
@@ -27,6 +27,7 @@ from keystone.common import controller
from keystone.common import dependency
from keystone.common import utils
from keystone.common import validation
+from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _
from keystone import notifications
@@ -40,7 +41,7 @@ LOG = log.getLogger(__name__)
class TenantAssignment(controller.V2Controller):
"""The V2 Project APIs that are processing assignments."""
- @controller.v2_deprecated
+ @controller.v2_auth_deprecated
def get_projects_for_token(self, context, **kw):
"""Get valid tenants for token based on token used to authenticate.
@@ -138,6 +139,11 @@ class RoleAssignmentV2(controller.V2Controller):
"""
self.assert_admin(context)
+ # NOTE(davechen): Router without project id is defined,
+ # but we don't plan on implementing this.
+ if tenant_id is None:
+ raise exception.NotImplemented(
+ message=_('User roles not supported: tenant_id required'))
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, tenant_id)
return {'roles': [self.role_api.get_role(x)
@@ -261,7 +267,7 @@ class ProjectAssignmentV3(controller.V3Controller):
super(ProjectAssignmentV3, self).__init__()
self.get_member_from_driver = self.resource_api.get_project
- @controller.filterprotected('enabled', 'name')
+ @controller.filterprotected('domain_id', 'enabled', 'name')
def list_user_projects(self, context, filters, user_id):
hints = ProjectAssignmentV3.build_driver_hints(context, filters)
refs = self.assignment_api.list_projects_for_user(user_id,
@@ -271,7 +277,19 @@ class ProjectAssignmentV3(controller.V3Controller):
@dependency.requires('role_api')
class RoleV3(controller.V3Controller):
- """The V3 Role CRUD APIs."""
+ """The V3 Role CRUD APIs.
+
+ To ease complexity (and hence risk) in writing the policy rules for the
+ role APIs, we create separate policy actions for roles that are domain
+ specific, as opposed to those that are global. In order to achieve this
+ each of the role API methods has a wrapper method that checks to see if the
+ role is global or domain specific.
+
+ NOTE (henry-nash): If this separate global vs scoped policy action pattern
+ becomes repeated for other entities, we should consider encapsulating this
+ into a specialized router class.
+
+ """
collection_name = 'roles'
member_name = 'role'
@@ -280,9 +298,104 @@ class RoleV3(controller.V3Controller):
super(RoleV3, self).__init__()
self.get_member_from_driver = self.role_api.get_role
+ def _is_domain_role(self, role):
+ return role.get('domain_id') is not None
+
+ def _is_domain_role_target(self, role_id):
+ try:
+ role = self.role_api.get_role(role_id)
+ except exception.RoleNotFound:
+ # We hide this error since we have not yet carried out a policy
+ # check - and it maybe that the caller isn't authorized to make
+ # this call. If so, we want that error to be raised instead.
+ return False
+ return self._is_domain_role(role)
+
+ def create_role_wrapper(self, context, role):
+ if self._is_domain_role(role):
+ return self.create_domain_role(context, role=role)
+ else:
+ return self.create_role(context, role=role)
+
@controller.protected()
@validation.validated(schema.role_create, 'role')
def create_role(self, context, role):
+ return self._create_role(context, role)
+
+ @controller.protected()
+ @validation.validated(schema.role_create, 'role')
+ def create_domain_role(self, context, role):
+ return self._create_role(context, role)
+
+ def list_roles_wrapper(self, context):
+ # If there is no domain_id filter defined, then we only want to return
+ # global roles, so we set the domain_id filter to None.
+ params = context['query_string']
+ if 'domain_id' not in params:
+ context['query_string']['domain_id'] = None
+
+ if context['query_string']['domain_id'] is not None:
+ return self.list_domain_roles(context)
+ else:
+ return self.list_roles(context)
+
+ @controller.filterprotected('name', 'domain_id')
+ def list_roles(self, context, filters):
+ return self._list_roles(context, filters)
+
+ @controller.filterprotected('name', 'domain_id')
+ def list_domain_roles(self, context, filters):
+ return self._list_roles(context, filters)
+
+ def get_role_wrapper(self, context, role_id):
+ if self._is_domain_role_target(role_id):
+ return self.get_domain_role(context, role_id=role_id)
+ else:
+ return self.get_role(context, role_id=role_id)
+
+ @controller.protected()
+ def get_role(self, context, role_id):
+ return self._get_role(context, role_id)
+
+ @controller.protected()
+ def get_domain_role(self, context, role_id):
+ return self._get_role(context, role_id)
+
+ def update_role_wrapper(self, context, role_id, role):
+ # Since we don't allow you change whether a role is global or domain
+ # specific, we can ignore the new update attributes and just look at
+ # the existing role.
+ if self._is_domain_role_target(role_id):
+ return self.update_domain_role(
+ context, role_id=role_id, role=role)
+ else:
+ return self.update_role(context, role_id=role_id, role=role)
+
+ @controller.protected()
+ @validation.validated(schema.role_update, 'role')
+ def update_role(self, context, role_id, role):
+ return self._update_role(context, role_id, role)
+
+ @controller.protected()
+ @validation.validated(schema.role_update, 'role')
+ def update_domain_role(self, context, role_id, role):
+ return self._update_role(context, role_id, role)
+
+ def delete_role_wrapper(self, context, role_id):
+ if self._is_domain_role_target(role_id):
+ return self.delete_domain_role(context, role_id=role_id)
+ else:
+ return self.delete_role(context, role_id=role_id)
+
+ @controller.protected()
+ def delete_role(self, context, role_id):
+ return self._delete_role(context, role_id)
+
+ @controller.protected()
+ def delete_domain_role(self, context, role_id):
+ return self._delete_role(context, role_id)
+
+ def _create_role(self, context, role):
if role['name'] == CONF.member_role_name:
# Use the configured member role ID when creating the configured
# member role name. This avoids the potential of creating a
@@ -297,32 +410,146 @@ class RoleV3(controller.V3Controller):
ref = self.role_api.create_role(ref['id'], ref, initiator)
return RoleV3.wrap_member(context, ref)
- @controller.filterprotected('name')
- def list_roles(self, context, filters):
+ def _list_roles(self, context, filters):
hints = RoleV3.build_driver_hints(context, filters)
refs = self.role_api.list_roles(
hints=hints)
return RoleV3.wrap_collection(context, refs, hints=hints)
- @controller.protected()
- def get_role(self, context, role_id):
+ def _get_role(self, context, role_id):
ref = self.role_api.get_role(role_id)
return RoleV3.wrap_member(context, ref)
- @controller.protected()
- @validation.validated(schema.role_update, 'role')
- def update_role(self, context, role_id, role):
+ def _update_role(self, context, role_id, role):
self._require_matching_id(role_id, role)
initiator = notifications._get_request_audit_info(context)
ref = self.role_api.update_role(role_id, role, initiator)
return RoleV3.wrap_member(context, ref)
- @controller.protected()
- def delete_role(self, context, role_id):
+ def _delete_role(self, context, role_id):
initiator = notifications._get_request_audit_info(context)
self.role_api.delete_role(role_id, initiator)
+@dependency.requires('role_api')
+class ImpliedRolesV3(controller.V3Controller):
+ """The V3 ImpliedRoles CRD APIs. There is no Update."""
+
+ def _prior_role_stanza(self, endpoint, prior_role_id, prior_role_name):
+ return {
+ "id": prior_role_id,
+ "links": {
+ "self": endpoint + "/v3/roles/" + prior_role_id
+ },
+ "name": prior_role_name
+ }
+
+ def _implied_role_stanza(self, endpoint, implied_role):
+ implied_id = implied_role['id']
+ implied_response = {
+ "id": implied_id,
+ "links": {
+ "self": endpoint + "/v3/roles/" + implied_id
+ },
+ "name": implied_role['name']
+ }
+ return implied_response
+
+ def _populate_prior_role_response(self, endpoint, prior_id):
+ prior_role = self.role_api.get_role(prior_id)
+ response = {
+ "role_inference": {
+ "prior_role": self._prior_role_stanza(
+ endpoint, prior_id, prior_role['name'])
+ }
+ }
+ return response
+
+ def _populate_implied_roles_response(self, endpoint,
+ prior_id, implied_ids):
+ response = self._populate_prior_role_response(endpoint, prior_id)
+ response["role_inference"]['implies'] = []
+ for implied_id in implied_ids:
+ implied_role = self.role_api.get_role(implied_id)
+ implied_response = self._implied_role_stanza(
+ endpoint, implied_role)
+ response["role_inference"]['implies'].append(implied_response)
+ return response
+
+ def _populate_implied_role_response(self, endpoint, prior_id, implied_id):
+ response = self._populate_prior_role_response(endpoint, prior_id)
+ implied_role = self.role_api.get_role(implied_id)
+ stanza = self._implied_role_stanza(endpoint, implied_role)
+ response["role_inference"]['implies'] = stanza
+ return response
+
+ @controller.protected()
+ def get_implied_role(self, context, prior_role_id, implied_role_id):
+ ref = self.role_api.get_implied_role(prior_role_id, implied_role_id)
+
+ prior_id = ref['prior_role_id']
+ implied_id = ref['implied_role_id']
+ endpoint = super(controller.V3Controller, ImpliedRolesV3).base_url(
+ context, 'public')
+ response = self._populate_implied_role_response(
+ endpoint, prior_id, implied_id)
+ return response
+
+ @controller.protected()
+ def check_implied_role(self, context, prior_role_id, implied_role_id):
+ self.role_api.get_implied_role(prior_role_id, implied_role_id)
+
+ @controller.protected()
+ def create_implied_role(self, context, prior_role_id, implied_role_id):
+ self.role_api.create_implied_role(prior_role_id, implied_role_id)
+ return wsgi.render_response(
+ self.get_implied_role(context, prior_role_id, implied_role_id),
+ status=(201, 'Created'))
+
+ @controller.protected()
+ def delete_implied_role(self, context, prior_role_id, implied_role_id):
+ self.role_api.delete_implied_role(prior_role_id, implied_role_id)
+
+ @controller.protected()
+ def list_implied_roles(self, context, prior_role_id):
+ ref = self.role_api.list_implied_roles(prior_role_id)
+ implied_ids = [r['implied_role_id'] for r in ref]
+ endpoint = super(controller.V3Controller, ImpliedRolesV3).base_url(
+ context, 'public')
+
+ results = self._populate_implied_roles_response(
+ endpoint, prior_role_id, implied_ids)
+
+ return results
+
+ @controller.protected()
+ def list_role_inference_rules(self, context):
+ refs = self.role_api.list_role_inference_rules()
+ role_dict = {role_ref['id']: role_ref
+ for role_ref in self.role_api.list_roles()}
+
+ rules = dict()
+ endpoint = super(controller.V3Controller, ImpliedRolesV3).base_url(
+ context, 'public')
+
+ for ref in refs:
+ implied_role_id = ref['implied_role_id']
+ prior_role_id = ref['prior_role_id']
+ implied = rules.get(prior_role_id, [])
+ implied.append(self._implied_role_stanza(
+ endpoint, role_dict[implied_role_id]))
+ rules[prior_role_id] = implied
+
+ inferences = []
+ for prior_id, implied in rules.items():
+ prior_response = self._prior_role_stanza(
+ endpoint, prior_id, role_dict[prior_id]['name'])
+ inferences.append({'prior_role': prior_response,
+ 'implies': implied})
+ results = {'role_inferences': inferences}
+ return results
+
+
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
'role_api')
class GrantAssignmentV3(controller.V3Controller):
@@ -475,6 +702,13 @@ class RoleAssignmentV3(controller.V3Controller):
'role_id': role_id,
'indirect': {'project_id': parent_id}}
+ or, for a role that was implied by a prior role:
+
+ {'user_id': user_id,
+ 'project_id': project_id,
+ 'role_id': role_id,
+ 'indirect': {'role_id': prior role_id}}
+
It is possible to deduce if a role assignment came from group
membership if it has both 'user_id' in the main body of the dict and
'group_id' in the 'indirect' subdict, as well as it is possible to
@@ -505,13 +739,19 @@ class RoleAssignmentV3(controller.V3Controller):
}
"""
-
formatted_entity = {'links': {}}
inherited_assignment = entity.get('inherited_to_projects')
if 'project_id' in entity:
- formatted_entity['scope'] = (
- {'project': {'id': entity['project_id']}})
+ if 'project_name' in entity:
+ formatted_entity['scope'] = {'project': {
+ 'id': entity['project_id'],
+ 'name': entity['project_name'],
+ 'domain': {'id': entity['project_domain_id'],
+ 'name': entity['project_domain_name']}}}
+ else:
+ formatted_entity['scope'] = {
+ 'project': {'id': entity['project_id']}}
if 'domain_id' in entity.get('indirect', {}):
inherited_assignment = True
@@ -524,12 +764,24 @@ class RoleAssignmentV3(controller.V3Controller):
else:
formatted_link = '/projects/%s' % entity['project_id']
elif 'domain_id' in entity:
- formatted_entity['scope'] = {'domain': {'id': entity['domain_id']}}
+ if 'domain_name' in entity:
+ formatted_entity['scope'] = {
+ 'domain': {'id': entity['domain_id'],
+ 'name': entity['domain_name']}}
+ else:
+ formatted_entity['scope'] = {
+ 'domain': {'id': entity['domain_id']}}
formatted_link = '/domains/%s' % entity['domain_id']
if 'user_id' in entity:
- formatted_entity['user'] = {'id': entity['user_id']}
-
+ if 'user_name' in entity:
+ formatted_entity['user'] = {
+ 'id': entity['user_id'],
+ 'name': entity['user_name'],
+ 'domain': {'id': entity['user_domain_id'],
+ 'name': entity['user_domain_name']}}
+ else:
+ formatted_entity['user'] = {'id': entity['user_id']}
if 'group_id' in entity.get('indirect', {}):
membership_url = (
self.base_url(context, '/groups/%s/users/%s' % (
@@ -539,11 +791,31 @@ class RoleAssignmentV3(controller.V3Controller):
else:
formatted_link += '/users/%s' % entity['user_id']
elif 'group_id' in entity:
- formatted_entity['group'] = {'id': entity['group_id']}
+ if 'group_name' in entity:
+ formatted_entity['group'] = {
+ 'id': entity['group_id'],
+ 'name': entity['group_name'],
+ 'domain': {'id': entity['group_domain_id'],
+ 'name': entity['group_domain_name']}}
+ else:
+ formatted_entity['group'] = {'id': entity['group_id']}
formatted_link += '/groups/%s' % entity['group_id']
- formatted_entity['role'] = {'id': entity['role_id']}
- formatted_link += '/roles/%s' % entity['role_id']
+ if 'role_name' in entity:
+ formatted_entity['role'] = {'id': entity['role_id'],
+ 'name': entity['role_name']}
+ else:
+ formatted_entity['role'] = {'id': entity['role_id']}
+ prior_role_link = ''
+ if 'role_id' in entity.get('indirect', {}):
+ formatted_link += '/roles/%s' % entity['indirect']['role_id']
+ prior_role_link = (
+ '/prior_role/%(prior)s/implies/%(implied)s' % {
+ 'prior': entity['role_id'],
+ 'implied': entity['indirect']['role_id']
+ })
+ else:
+ formatted_link += '/roles/%s' % entity['role_id']
if inherited_assignment:
formatted_entity['scope']['OS-INHERIT:inherited_to'] = (
@@ -553,6 +825,9 @@ class RoleAssignmentV3(controller.V3Controller):
formatted_entity['links']['assignment'] = self.base_url(context,
formatted_link)
+ if prior_role_link:
+ formatted_entity['links']['prior_role'] = (
+ self.base_url(context, prior_role_link))
return formatted_entity
@@ -586,10 +861,7 @@ class RoleAssignmentV3(controller.V3Controller):
msg = _('Specify a user or group, not both')
raise exception.ValidationError(msg)
- @controller.filterprotected('group.id', 'role.id',
- 'scope.domain.id', 'scope.project.id',
- 'scope.OS-INHERIT:inherited_to', 'user.id')
- def list_role_assignments(self, context, filters):
+ def _list_role_assignments(self, context, filters, include_subtree=False):
"""List role assignments to user and groups on domains and projects.
Return a list of all existing role assignments in the system, filtered
@@ -615,6 +887,8 @@ class RoleAssignmentV3(controller.V3Controller):
params = context['query_string']
effective = 'effective' in params and (
self.query_filter_is_true(params['effective']))
+ include_names = ('include_names' in params and
+ self.query_filter_is_true(params['include_names']))
if 'scope.OS-INHERIT:inherited_to' in params:
inherited = (
@@ -640,20 +914,59 @@ class RoleAssignmentV3(controller.V3Controller):
group_id=params.get('group.id'),
domain_id=params.get('scope.domain.id'),
project_id=params.get('scope.project.id'),
- inherited=inherited, effective=effective)
+ include_subtree=include_subtree,
+ inherited=inherited, effective=effective,
+ include_names=include_names)
formatted_refs = [self._format_entity(context, ref) for ref in refs]
return self.wrap_collection(context, formatted_refs)
- @controller.protected()
- def get_role_assignment(self, context):
- raise exception.NotImplemented()
+ @controller.filterprotected('group.id', 'role.id',
+ 'scope.domain.id', 'scope.project.id',
+ 'scope.OS-INHERIT:inherited_to', 'user.id')
+ def list_role_assignments(self, context, filters):
+ return self._list_role_assignments(context, filters)
- @controller.protected()
- def update_role_assignment(self, context):
- raise exception.NotImplemented()
+ def _check_list_tree_protection(self, context, protection_info):
+ """Check protection for list assignment for tree API.
- @controller.protected()
- def delete_role_assignment(self, context):
- raise exception.NotImplemented()
+ The policy rule might want to inspect the domain of any project filter
+ so if one is defined, then load the project ref and pass it to the
+ check protection method.
+
+ """
+ ref = {}
+ for filter, value in protection_info['filter_attr'].items():
+ if filter == 'scope.project.id' and value:
+ ref['project'] = self.resource_api.get_project(value)
+
+ self.check_protection(context, protection_info, ref)
+
+ @controller.filterprotected('group.id', 'role.id',
+ 'scope.domain.id', 'scope.project.id',
+ 'scope.OS-INHERIT:inherited_to', 'user.id',
+ callback=_check_list_tree_protection)
+ def list_role_assignments_for_tree(self, context, filters):
+ if not context['query_string'].get('scope.project.id'):
+ msg = _('scope.project.id must be specified if include_subtree '
+ 'is also specified')
+ raise exception.ValidationError(message=msg)
+ return self._list_role_assignments(context, filters,
+ include_subtree=True)
+
+ def list_role_assignments_wrapper(self, context):
+ """Main entry point from router for list role assignments.
+
+ Since we want different policy file rules to be applicable based on
+ whether there the include_subtree query parameter is part of the API
+ call, this method checks for this and then calls the appropriate
+ protected entry point.
+
+ """
+ params = context['query_string']
+ if 'include_subtree' in params and (
+ self.query_filter_is_true(params['include_subtree'])):
+ return self.list_role_assignments_for_tree(context)
+ else:
+ return self.list_role_assignments(context)
diff --git a/keystone-moon/keystone/assignment/core.py b/keystone-moon/keystone/assignment/core.py
index a510c3c1..05368fbf 100644
--- a/keystone-moon/keystone/assignment/core.py
+++ b/keystone-moon/keystone/assignment/core.py
@@ -17,8 +17,10 @@
import abc
import copy
+from oslo_cache import core as oslo_cache
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
import six
from keystone.common import cache
@@ -27,22 +29,34 @@ from keystone.common import driver_hints
from keystone.common import manager
from keystone import exception
from keystone.i18n import _
-from keystone.i18n import _LI
+from keystone.i18n import _LI, _LE, _LW
from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-MEMOIZE = cache.get_memoization_decorator(section='role')
+# This is a general cache region for assignment administration (CRUD
+# operations).
+MEMOIZE = cache.get_memoization_decorator(group='role')
+# This builds a discrete cache region dedicated to role assignments computed
+# for a given user + project/domain pair. Any write operation to add or remove
+# any role assignment should invalidate this entire cache region.
+COMPUTED_ASSIGNMENTS_REGION = oslo_cache.create_region()
+MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator(
+ group='role',
+ region=COMPUTED_ASSIGNMENTS_REGION)
+
+
+@notifications.listener
@dependency.provider('assignment_api')
@dependency.requires('credential_api', 'identity_api', 'resource_api',
'revoke_api', 'role_api')
class Manager(manager.Manager):
"""Default pivot point for the Assignment backend.
- See :mod:`keystone.common.manager.Manager` for more details on how this
+ See :class:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
@@ -55,17 +69,48 @@ class Manager(manager.Manager):
def __init__(self):
assignment_driver = CONF.assignment.driver
-
# If there is no explicit assignment driver specified, we let the
# identity driver tell us what to use. This is for backward
# compatibility reasons from the time when identity, resource and
# assignment were all part of identity.
if assignment_driver is None:
- identity_driver = dependency.get_provider('identity_api').driver
- assignment_driver = identity_driver.default_assignment_driver()
-
+ msg = _('Use of the identity driver config to automatically '
+ 'configure the same assignment driver has been '
+ 'deprecated, in the "O" release, the assignment driver '
+ 'will need to be expicitly configured if different '
+ 'than the default (SQL).')
+ versionutils.report_deprecated_feature(LOG, msg)
+ try:
+ identity_driver = dependency.get_provider(
+ 'identity_api').driver
+ assignment_driver = identity_driver.default_assignment_driver()
+ except ValueError:
+ msg = _('Attempted automatic driver selection for assignment '
+ 'based upon [identity]\driver option failed since '
+ 'driver %s is not found. Set [assignment]/driver to '
+ 'a valid driver in keystone config.')
+ LOG.critical(msg)
+ raise exception.KeystoneConfigurationError(msg)
super(Manager, self).__init__(assignment_driver)
+ # Make sure it is a driver version we support, and if it is a legacy
+ # driver, then wrap it.
+ if isinstance(self.driver, AssignmentDriverV8):
+ self.driver = V9AssignmentWrapperForV8Driver(self.driver)
+ elif not isinstance(self.driver, AssignmentDriverV9):
+ raise exception.UnsupportedDriverVersion(driver=assignment_driver)
+
+ self.event_callbacks = {
+ notifications.ACTIONS.deleted: {
+ 'domain': [self._delete_domain_assignments],
+ },
+ }
+
+ def _delete_domain_assignments(self, service, resource_type, operations,
+ payload):
+ domain_id = payload['resource_info']
+ self.driver.delete_domain_assignments(domain_id)
+
def _get_group_ids_for_user_id(self, user_id):
# TODO(morganfainberg): Implement a way to get only group_ids
# instead of the more expensive to_dict() call for each record.
@@ -74,7 +119,10 @@ class Manager(manager.Manager):
def list_user_ids_for_project(self, tenant_id):
self.resource_api.get_project(tenant_id)
- return self.driver.list_user_ids_for_project(tenant_id)
+ assignment_list = self.list_role_assignments(
+ project_id=tenant_id, effective=True)
+ # Use set() to process the list to remove any duplicates
+ return list(set([x['user_id'] for x in assignment_list]))
def _list_parent_ids_of_project(self, project_id):
if CONF.os_inherit.enabled:
@@ -83,127 +131,62 @@ class Manager(manager.Manager):
else:
return []
+ @MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given project.
This includes roles directly assigned to the user on the
- project, as well as those by virtue of group membership. If
- the OS-INHERIT extension is enabled, then this will also
- include roles inherited from the domain.
+ project, as well as those by virtue of group membership or
+ inheritance.
:returns: a list of role ids.
- :raises: keystone.exception.UserNotFound,
- keystone.exception.ProjectNotFound
+ :raises keystone.exception.ProjectNotFound: If the project doesn't
+ exist.
"""
- def _get_group_project_roles(user_id, project_ref):
- group_ids = self._get_group_ids_for_user_id(user_id)
- return self.list_role_ids_for_groups_on_project(
- group_ids,
- project_ref['id'],
- project_ref['domain_id'],
- self._list_parent_ids_of_project(project_ref['id']))
-
- def _get_user_project_roles(user_id, project_ref):
- role_list = []
- try:
- metadata_ref = self._get_metadata(user_id=user_id,
- tenant_id=project_ref['id'])
- role_list = self._roles_from_role_dicts(
- metadata_ref.get('roles', {}), False)
- except exception.MetadataNotFound:
- pass
-
- if CONF.os_inherit.enabled:
- # Now get any inherited roles for the owning domain
- try:
- metadata_ref = self._get_metadata(
- user_id=user_id, domain_id=project_ref['domain_id'])
- role_list += self._roles_from_role_dicts(
- metadata_ref.get('roles', {}), True)
- except (exception.MetadataNotFound, exception.NotImplemented):
- pass
- # As well inherited roles from parent projects
- for p in self.resource_api.list_project_parents(
- project_ref['id']):
- p_roles = self.list_grants(
- user_id=user_id, project_id=p['id'],
- inherited_to_projects=True)
- role_list += [x['id'] for x in p_roles]
-
- return role_list
-
- project_ref = self.resource_api.get_project(tenant_id)
- user_role_list = _get_user_project_roles(user_id, project_ref)
- group_role_list = _get_group_project_roles(user_id, project_ref)
+ self.resource_api.get_project(tenant_id)
+ assignment_list = self.list_role_assignments(
+ user_id=user_id, project_id=tenant_id, effective=True)
# Use set() to process the list to remove any duplicates
- return list(set(user_role_list + group_role_list))
+ return list(set([x['role_id'] for x in assignment_list]))
+ @MEMOIZE_COMPUTED_ASSIGNMENTS
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
- :raises: keystone.exception.UserNotFound,
- keystone.exception.DomainNotFound
+ :raises keystone.exception.DomainNotFound: If the domain doesn't exist.
"""
-
- def _get_group_domain_roles(user_id, domain_id):
- role_list = []
- group_ids = self._get_group_ids_for_user_id(user_id)
- for group_id in group_ids:
- try:
- metadata_ref = self._get_metadata(group_id=group_id,
- domain_id=domain_id)
- role_list += self._roles_from_role_dicts(
- metadata_ref.get('roles', {}), False)
- except (exception.MetadataNotFound, exception.NotImplemented):
- # MetadataNotFound implies no group grant, so skip.
- # Ignore NotImplemented since not all backends support
- # domains.
- pass
- return role_list
-
- def _get_user_domain_roles(user_id, domain_id):
- metadata_ref = {}
- try:
- metadata_ref = self._get_metadata(user_id=user_id,
- domain_id=domain_id)
- except (exception.MetadataNotFound, exception.NotImplemented):
- # MetadataNotFound implies no user grants.
- # Ignore NotImplemented since not all backends support
- # domains
- pass
- return self._roles_from_role_dicts(
- metadata_ref.get('roles', {}), False)
-
self.resource_api.get_domain(domain_id)
- user_role_list = _get_user_domain_roles(user_id, domain_id)
- group_role_list = _get_group_domain_roles(user_id, domain_id)
+ assignment_list = self.list_role_assignments(
+ user_id=user_id, domain_id=domain_id, effective=True)
# Use set() to process the list to remove any duplicates
- return list(set(user_role_list + group_role_list))
+ return list(set([x['role_id'] for x in assignment_list]))
def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None):
"""Get a list of roles for this group on domain and/or project."""
-
if project_id is not None:
- project = self.resource_api.get_project(project_id)
- role_ids = self.list_role_ids_for_groups_on_project(
- group_ids, project_id, project['domain_id'],
- self._list_parent_ids_of_project(project_id))
+ self.resource_api.get_project(project_id)
+ assignment_list = self.list_role_assignments(
+ source_from_group_ids=group_ids, project_id=project_id,
+ effective=True)
elif domain_id is not None:
- role_ids = self.list_role_ids_for_groups_on_domain(
- group_ids, domain_id)
+ assignment_list = self.list_role_assignments(
+ source_from_group_ids=group_ids, domain_id=domain_id,
+ effective=True)
else:
raise AttributeError(_("Must specify either domain or project"))
+ role_ids = list(set([x['role_id'] for x in assignment_list]))
return self.role_api.list_roles_from_ids(role_ids)
def add_user_to_project(self, tenant_id, user_id):
"""Add user to a tenant by creating a default role relationship.
- :raises: keystone.exception.ProjectNotFound,
- keystone.exception.UserNotFound
+ :raises keystone.exception.ProjectNotFound: If the project doesn't
+ exist.
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
"""
self.resource_api.get_project(tenant_id)
@@ -230,6 +213,7 @@ class Manager(manager.Manager):
user_id,
tenant_id,
CONF.member_role_id)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
@notifications.role_assignment('created')
def _add_role_to_user_and_project_adapter(self, role_id, user_id=None,
@@ -249,12 +233,14 @@ class Manager(manager.Manager):
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self._add_role_to_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
def remove_user_from_project(self, tenant_id, user_id):
"""Remove user from a tenant
- :raises: keystone.exception.ProjectNotFound,
- keystone.exception.UserNotFound
+ :raises keystone.exception.ProjectNotFound: If the project doesn't
+ exist.
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
"""
roles = self.get_roles_for_user_and_project(user_id, tenant_id)
@@ -271,101 +257,41 @@ class Manager(manager.Manager):
except exception.RoleNotFound:
LOG.debug("Removing role %s failed because it does not exist.",
role_id)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
def list_projects_for_user(self, user_id, hints=None):
- # NOTE(henry-nash): In order to get a complete list of user projects,
- # the driver will need to look at group assignments. To avoid cross
- # calling between the assignment and identity driver we get the group
- # list here and pass it in. The rest of the detailed logic of listing
- # projects for a user is pushed down into the driver to enable
- # optimization with the various backend technologies (SQL, LDAP etc.).
-
- group_ids = self._get_group_ids_for_user_id(user_id)
- project_ids = self.list_project_ids_for_user(
- user_id, group_ids, hints or driver_hints.Hints())
-
- if not CONF.os_inherit.enabled:
- return self.resource_api.list_projects_from_ids(project_ids)
-
- # Inherited roles are enabled, so check to see if this user has any
- # inherited role (direct or group) on any parent project, in which
- # case we must add in all the projects in that parent's subtree.
- project_ids = set(project_ids)
- project_ids_inherited = self.list_project_ids_for_user(
- user_id, group_ids, hints or driver_hints.Hints(), inherited=True)
- for proj_id in project_ids_inherited:
- project_ids.update(
- (x['id'] for x in
- self.resource_api.list_projects_in_subtree(proj_id)))
-
- # Now do the same for any domain inherited roles
- domain_ids = self.list_domain_ids_for_user(
- user_id, group_ids, hints or driver_hints.Hints(),
- inherited=True)
- project_ids.update(
- self.resource_api.list_project_ids_from_domain_ids(domain_ids))
-
+ assignment_list = self.list_role_assignments(
+ user_id=user_id, effective=True)
+ # Use set() to process the list to remove any duplicates
+ project_ids = list(set([x['project_id'] for x in assignment_list
+ if x.get('project_id')]))
return self.resource_api.list_projects_from_ids(list(project_ids))
# TODO(henry-nash): We might want to consider list limiting this at some
# point in the future.
def list_domains_for_user(self, user_id, hints=None):
- # NOTE(henry-nash): In order to get a complete list of user domains,
- # the driver will need to look at group assignments. To avoid cross
- # calling between the assignment and identity driver we get the group
- # list here and pass it in. The rest of the detailed logic of listing
- # projects for a user is pushed down into the driver to enable
- # optimization with the various backend technologies (SQL, LDAP etc.).
- group_ids = self._get_group_ids_for_user_id(user_id)
- domain_ids = self.list_domain_ids_for_user(
- user_id, group_ids, hints or driver_hints.Hints())
+ assignment_list = self.list_role_assignments(
+ user_id=user_id, effective=True)
+ # Use set() to process the list to remove any duplicates
+ domain_ids = list(set([x['domain_id'] for x in assignment_list
+ if x.get('domain_id')]))
return self.resource_api.list_domains_from_ids(domain_ids)
def list_domains_for_groups(self, group_ids):
- domain_ids = self.list_domain_ids_for_groups(group_ids)
+ assignment_list = self.list_role_assignments(
+ source_from_group_ids=group_ids, effective=True)
+ domain_ids = list(set([x['domain_id'] for x in assignment_list
+ if x.get('domain_id')]))
return self.resource_api.list_domains_from_ids(domain_ids)
def list_projects_for_groups(self, group_ids):
- project_ids = (
- self.list_project_ids_for_groups(group_ids, driver_hints.Hints()))
- if not CONF.os_inherit.enabled:
- return self.resource_api.list_projects_from_ids(project_ids)
-
- # os_inherit extension is enabled, so check to see if these groups have
- # any inherited role assignment on: i) any domain, in which case we
- # must add in all the projects in that domain; ii) any project, in
- # which case we must add in all the subprojects under that project in
- # the hierarchy.
-
- domain_ids = self.list_domain_ids_for_groups(group_ids, inherited=True)
-
- project_ids_from_domains = (
- self.resource_api.list_project_ids_from_domain_ids(domain_ids))
-
- parents_ids = self.list_project_ids_for_groups(group_ids,
- driver_hints.Hints(),
- inherited=True)
-
- subproject_ids = []
- for parent_id in parents_ids:
- subtree = self.resource_api.list_projects_in_subtree(parent_id)
- subproject_ids += [subproject['id'] for subproject in subtree]
-
- return self.resource_api.list_projects_from_ids(
- list(set(project_ids + project_ids_from_domains + subproject_ids)))
-
- def list_role_assignments_for_role(self, role_id=None):
- # NOTE(henry-nash): Currently the efficiency of the key driver
- # implementation (SQL) of list_role_assignments is severely hampered by
- # the existence of the multiple grant tables - hence there is little
- # advantage in pushing the logic of this method down into the driver.
- # Once the single assignment table is implemented, then this situation
- # will be different, and this method should have its own driver
- # implementation.
- return [r for r in self.driver.list_role_assignments()
- if r['role_id'] == role_id]
+ assignment_list = self.list_role_assignments(
+ source_from_group_ids=group_ids, effective=True)
+ project_ids = list(set([x['project_id'] for x in assignment_list
+ if x.get('project_id')]))
+ return self.resource_api.list_projects_from_ids(project_ids)
@notifications.role_assignment('deleted')
def _remove_role_from_user_and_project_adapter(self, role_id, user_id=None,
@@ -391,11 +317,19 @@ class Manager(manager.Manager):
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
self._remove_role_from_user_and_project_adapter(
role_id, user_id=user_id, project_id=tenant_id)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
- @notifications.internal(notifications.INVALIDATE_USER_TOKEN_PERSISTENCE)
def _emit_invalidate_user_token_persistence(self, user_id):
self.identity_api.emit_invalidate_user_token_persistence(user_id)
+ # NOTE(lbragstad): The previous notification decorator behavior didn't
+ # send the notification unless the operation was successful. We
+ # maintain that behavior here by calling to the notification module
+ # after the call to emit invalid user tokens.
+ notifications.Audit.internal(
+ notifications.INVALIDATE_USER_TOKEN_PERSISTENCE, user_id
+ )
+
def _emit_invalidate_grant_token_persistence(self, user_id, project_id):
self.identity_api.emit_invalidate_grant_token_persistence(
{'user_id': user_id, 'project_id': project_id}
@@ -412,6 +346,7 @@ class Manager(manager.Manager):
self.resource_api.get_project(project_id)
self.driver.create_grant(role_id, user_id, group_id, domain_id,
project_id, inherited_to_projects)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
@@ -489,6 +424,7 @@ class Manager(manager.Manager):
self.resource_api.get_project(project_id)
self.driver.delete_grant(role_id, user_id, group_id, domain_id,
project_id, inherited_to_projects)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
# The methods _expand_indirect_assignment, _list_direct_role_assignments
# and _list_effective_role_assignments below are only used on
@@ -497,8 +433,8 @@ class Manager(manager.Manager):
# kept as it is in order to detect unnecessarily complex code, which is not
# this case.
- def _expand_indirect_assignment(self, ref, user_id=None,
- project_id=None):
+ def _expand_indirect_assignment(self, ref, user_id=None, project_id=None,
+ subtree_ids=None, expand_groups=True):
"""Returns a list of expanded role assignments.
This methods is called for each discovered assignment that either needs
@@ -508,11 +444,20 @@ class Manager(manager.Manager):
In all cases, if either user_id and/or project_id is specified, then we
filter the result on those values.
- """
+ If project_id is specified and subtree_ids is None, then this
+ indicates that we are only interested in that one project. If
+ subtree_ids is not None, then this is an indicator that any
+ inherited assignments need to be expanded down the tree. The
+ actual subtree_ids don't need to be used as a filter here, since we
+ already ensured only those assignments that could affect them
+ were passed to this method.
+ If expand_groups is True then we expand groups out to a list of
+ assignments, one for each member of that group.
+
+ """
def create_group_assignment(base_ref, user_id):
"""Creates a group assignment from the provided ref."""
-
ref = copy.deepcopy(base_ref)
ref['user_id'] = user_id
@@ -529,7 +474,7 @@ class Manager(manager.Manager):
of role assignments containing one for each user of that group on
that target.
- An example of accepted ref is:
+ An example of accepted ref is::
{
'group_id': group_id,
@@ -540,6 +485,8 @@ class Manager(manager.Manager):
Once expanded, it should be returned as a list of entities like the
one below, one for each each user_id in the provided group_id.
+ ::
+
{
'user_id': user_id,
'project_id': project_id,
@@ -562,18 +509,25 @@ class Manager(manager.Manager):
for m in self.identity_api.list_users_in_group(
ref['group_id'])]
- def expand_inherited_assignment(ref, user_id, project_id=None):
+ def expand_inherited_assignment(ref, user_id, project_id, subtree_ids,
+ expand_groups):
"""Expands inherited role assignments.
- If this is a group role assignment on a target, replace it by a
- list of role assignments containing one for each user of that
- group, on every project under that target.
-
- If this is a user role assignment on a target, replace it by a
+ If expand_groups is True and this is a group role assignment on a
+ target, replace it by a list of role assignments containing one for
+ each user of that group, on every project under that target. If
+ expand_groups is False, then return a group assignment on an
+ inherited target.
+
+ If this is a user role assignment on a specific target (i.e.
+ project_id is specified, but subtree_ids is None) then simply
+ format this as a single assignment (since we are effectively
+ filtering on project_id). If however, project_id is None or
+ subtree_ids is not None, then replace this one assignment with a
list of role assignments for that user on every project under
that target.
- An example of accepted ref is:
+ An example of accepted ref is::
{
'group_id': group_id,
@@ -586,6 +540,8 @@ class Manager(manager.Manager):
one below, one for each each user_id in the provided group_id and
for each subproject_id in the project_id subtree.
+ ::
+
{
'user_id': user_id,
'project_id': subproject_id,
@@ -626,10 +582,25 @@ class Manager(manager.Manager):
# Define expanded project list to which to apply this assignment
if project_id:
- # Since ref is an inherited assignment, it must have come from
- # the domain or a parent. We only need apply it to the project
- # requested.
+ # Since ref is an inherited assignment and we are filtering by
+ # project(s), we are only going to apply the assignment to the
+ # relevant project(s)
project_ids = [project_id]
+ if subtree_ids:
+ project_ids += subtree_ids
+ # If this is a domain inherited assignment, then we know
+ # that all the project_ids will get this assignment. If
+ # it's a project inherited assignment, and the assignment
+ # point is an ancestor of project_id, then we know that
+ # again all the project_ids will get the assignment. If,
+ # however, the assignment point is within the subtree,
+ # then only a partial tree will get the assignment.
+ if ref.get('project_id'):
+ if ref['project_id'] in project_ids:
+ project_ids = (
+ [x['id'] for x in
+ self.resource_api.list_projects_in_subtree(
+ ref['project_id'])])
elif ref.get('domain_id'):
# A domain inherited assignment, so apply it to all projects
# in this domain
@@ -638,7 +609,7 @@ class Manager(manager.Manager):
self.resource_api.list_projects_in_domain(
ref['domain_id'])])
else:
- # It must be a project assignment, so apply it to the subtree
+ # It must be a project assignment, so apply it to its subtree
project_ids = (
[x['id'] for x in
self.resource_api.list_projects_in_subtree(
@@ -646,8 +617,15 @@ class Manager(manager.Manager):
new_refs = []
if 'group_id' in ref:
- # Expand role assignment for all members and for all projects
- for ref in expand_group_assignment(ref, user_id):
+ if expand_groups:
+ # Expand role assignment to all group members on any
+ # inherited target of any of the projects
+ for ref in expand_group_assignment(ref, user_id):
+ new_refs += [create_inherited_assignment(ref, proj_id)
+ for proj_id in project_ids]
+ else:
+ # Just place the group assignment on any inherited target
+ # of any of the projects
new_refs += [create_inherited_assignment(ref, proj_id)
for proj_id in project_ids]
else:
@@ -658,13 +636,96 @@ class Manager(manager.Manager):
return new_refs
if ref.get('inherited_to_projects') == 'projects':
- return expand_inherited_assignment(ref, user_id, project_id)
- elif 'group_id' in ref:
+ return expand_inherited_assignment(
+ ref, user_id, project_id, subtree_ids, expand_groups)
+ elif 'group_id' in ref and expand_groups:
return expand_group_assignment(ref, user_id)
return [ref]
+ def add_implied_roles(self, role_refs):
+ """Expand out implied roles.
+
+ The role_refs passed in have had all inheritance and group assignments
+ expanded out. We now need to look at the role_id in each ref and see
+ if it is a prior role for some implied roles. If it is, then we need to
+ duplicate that ref, one for each implied role. We store the prior role
+ in the indirect dict that is part of such a duplicated ref, so that a
+ caller can determine where the assignment came from.
+
+ """
+ def _make_implied_ref_copy(prior_ref, implied_role_id):
+ # Create a ref for an implied role from the ref of a prior role,
+ # setting the new role_id to be the implied role and the indirect
+ # role_id to be the prior role
+ implied_ref = copy.deepcopy(prior_ref)
+ implied_ref['role_id'] = implied_role_id
+ indirect = implied_ref.setdefault('indirect', {})
+ indirect['role_id'] = prior_ref['role_id']
+ return implied_ref
+
+ if not CONF.token.infer_roles:
+ return role_refs
+ try:
+ implied_roles_cache = {}
+ role_refs_to_check = list(role_refs)
+ ref_results = list(role_refs)
+ checked_role_refs = list()
+ while(role_refs_to_check):
+ next_ref = role_refs_to_check.pop()
+ checked_role_refs.append(next_ref)
+ next_role_id = next_ref['role_id']
+ if next_role_id in implied_roles_cache:
+ implied_roles = implied_roles_cache[next_role_id]
+ else:
+ implied_roles = (
+ self.role_api.list_implied_roles(next_role_id))
+ implied_roles_cache[next_role_id] = implied_roles
+ for implied_role in implied_roles:
+ implied_ref = (
+ _make_implied_ref_copy(
+ next_ref, implied_role['implied_role_id']))
+ if implied_ref in checked_role_refs:
+ msg = _LE('Circular reference found '
+ 'role inference rules - %(prior_role_id)s.')
+ LOG.error(msg, {'prior_role_id': next_ref['role_id']})
+ else:
+ ref_results.append(implied_ref)
+ role_refs_to_check.append(implied_ref)
+ except exception.NotImplemented:
+ LOG.error('Role driver does not support implied roles.')
+
+ return ref_results
+
+ def _filter_by_role_id(self, role_id, ref_results):
+ # if we arrive here, we need to filer by role_id.
+ filter_results = []
+ for ref in ref_results:
+ if ref['role_id'] == role_id:
+ filter_results.append(ref)
+ return filter_results
+
+ def _strip_domain_roles(self, role_refs):
+ """Post process assignment list for domain roles.
+
+ Domain roles are only designed to do the job of inferring other roles
+ and since that has been done before this method is called, we need to
+ remove any assignments that include a domain role.
+
+ """
+ def _role_is_global(role_id):
+ ref = self.role_api.get_role(role_id)
+ return (ref['domain_id'] is None)
+
+ filter_results = []
+ for ref in role_refs:
+ if _role_is_global(ref['role_id']):
+ filter_results.append(ref)
+ return filter_results
+
def _list_effective_role_assignments(self, role_id, user_id, group_id,
- domain_id, project_id, inherited):
+ domain_id, project_id, subtree_ids,
+ inherited, source_from_group_ids,
+ strip_domain_roles):
"""List role assignments in effective mode.
When using effective mode, besides the direct assignments, the indirect
@@ -672,24 +733,24 @@ class Manager(manager.Manager):
be expanded.
The resulting list of assignments will be filtered by the provided
- parameters, although since we are in effective mode, group can never
- act as a filter (since group assignments are expanded into user roles)
- and domain can only be filter if we want non-inherited assignments,
- since domains can't inherit assignments.
+ parameters. If subtree_ids is not None, then we also want to include
+ all subtree_ids in the filter as well. Since we are in effective mode,
+ group can never act as a filter (since group assignments are expanded
+ into user roles) and domain can only be filter if we want non-inherited
+ assignments, since domains can't inherit assignments.
The goal of this method is to only ask the driver for those
assignments as could effect the result based on the parameter filters
specified, hence avoiding retrieving a huge list.
"""
-
def list_role_assignments_for_actor(
- role_id, inherited, user_id=None,
- group_ids=None, project_id=None, domain_id=None):
+ role_id, inherited, user_id=None, group_ids=None,
+ project_id=None, subtree_ids=None, domain_id=None):
"""List role assignments for actor on target.
List direct and indirect assignments for an actor, optionally
- for a given target (i.e. project or domain).
+ for a given target (i.e. projects or domain).
:param role_id: List for a specific role, can be None meaning all
roles
@@ -701,7 +762,16 @@ class Manager(manager.Manager):
:param group_ids: A list of groups required. Only one of user_id
and group_ids can be specified
:param project_id: If specified, only include those assignments
- that affect this project
+ that affect at least this project, with
+ additionally any projects specified in
+ subtree_ids
+ :param subtree_ids: The list of projects in the subtree. If
+ specified, also include those assignments that
+ affect these projects. These projects are
+ guaranteed to be in the same domain as the
+ project specified in project_id. subtree_ids
+ can only be specified if project_id has also
+ been specified.
:param domain_id: If specified, only include those assignments
that affect this domain - by definition this will
not include any inherited assignments
@@ -711,25 +781,31 @@ class Manager(manager.Manager):
response are included.
"""
+ project_ids_of_interest = None
+ if project_id:
+ if subtree_ids:
+ project_ids_of_interest = subtree_ids + [project_id]
+ else:
+ project_ids_of_interest = [project_id]
# List direct project role assignments
- project_ids = [project_id] if project_id else None
-
non_inherited_refs = []
if inherited is False or inherited is None:
# Get non inherited assignments
non_inherited_refs = self.driver.list_role_assignments(
role_id=role_id, domain_id=domain_id,
- project_ids=project_ids, user_id=user_id,
+ project_ids=project_ids_of_interest, user_id=user_id,
group_ids=group_ids, inherited_to_projects=False)
inherited_refs = []
if inherited is True or inherited is None:
# Get inherited assignments
if project_id:
- # If we are filtering by a specific project, then we can
- # only get inherited assignments from its domain or from
- # any of its parents.
+ # The project and any subtree are guaranteed to be owned by
+ # the same domain, so since we are filtering by these
+ # specific projects, then we can only get inherited
+ # assignments from their common domain or from any of
+ # their parents projects.
# List inherited assignments from the project's domain
proj_domain_id = self.resource_api.get_project(
@@ -739,14 +815,18 @@ class Manager(manager.Manager):
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
- # And those assignments that could be inherited from the
- # project's parents.
- parent_ids = [project['id'] for project in
+ # For inherited assignments from projects, since we know
+ # they are from the same tree the only places these can
+ # come from are from parents of the main project or
+ # inherited assignments on the project or subtree itself.
+ source_ids = [project['id'] for project in
self.resource_api.list_project_parents(
project_id)]
- if parent_ids:
+ if subtree_ids:
+ source_ids += project_ids_of_interest
+ if source_ids:
inherited_refs += self.driver.list_role_assignments(
- role_id=role_id, project_ids=parent_ids,
+ role_id=role_id, project_ids=source_ids,
user_id=user_id, group_ids=group_ids,
inherited_to_projects=True)
else:
@@ -758,61 +838,93 @@ class Manager(manager.Manager):
return non_inherited_refs + inherited_refs
# If filtering by group or inherited domain assignment the list is
- # guranteed to be empty
+ # guaranteed to be empty
if group_id or (domain_id and inherited):
return []
+ if user_id and source_from_group_ids:
+ # You can't do both - and since source_from_group_ids is only used
+ # internally, this must be a coding error by the caller.
+ msg = _('Cannot list assignments sourced from groups and filtered '
+ 'by user ID.')
+ raise exception.UnexpectedError(msg)
+
# If filtering by domain, then only non-inherited assignments are
# relevant, since domains don't inherit assignments
inherited = False if domain_id else inherited
- # List user assignments
+ # List user or explicit group assignments.
+ # Due to the need to expand implied roles, this call will skip
+ # filtering by role_id and instead return the whole set of roles.
+ # Matching on the specified role is performed at the end.
direct_refs = list_role_assignments_for_actor(
- role_id=role_id, user_id=user_id, project_id=project_id,
+ role_id=None, user_id=user_id, group_ids=source_from_group_ids,
+ project_id=project_id, subtree_ids=subtree_ids,
domain_id=domain_id, inherited=inherited)
- # And those from the user's groups
+ # And those from the user's groups, so long as we are not restricting
+ # to a set of source groups (in which case we already got those
+ # assignments in the direct listing above).
group_refs = []
- if user_id:
+ if not source_from_group_ids and user_id:
group_ids = self._get_group_ids_for_user_id(user_id)
if group_ids:
group_refs = list_role_assignments_for_actor(
- role_id=role_id, project_id=project_id,
- group_ids=group_ids, domain_id=domain_id,
- inherited=inherited)
+ role_id=None, project_id=project_id,
+ subtree_ids=subtree_ids, group_ids=group_ids,
+ domain_id=domain_id, inherited=inherited)
# Expand grouping and inheritance on retrieved role assignments
refs = []
+ expand_groups = (source_from_group_ids is None)
for ref in (direct_refs + group_refs):
- refs += self._expand_indirect_assignment(ref=ref, user_id=user_id,
- project_id=project_id)
+ refs += self._expand_indirect_assignment(
+ ref, user_id, project_id, subtree_ids, expand_groups)
+
+ refs = self.add_implied_roles(refs)
+ if strip_domain_roles:
+ refs = self._strip_domain_roles(refs)
+ if role_id:
+ refs = self._filter_by_role_id(role_id, refs)
return refs
def _list_direct_role_assignments(self, role_id, user_id, group_id,
- domain_id, project_id, inherited):
+ domain_id, project_id, subtree_ids,
+ inherited):
"""List role assignments without applying expansion.
Returns a list of direct role assignments, where their attributes match
- the provided filters.
+ the provided filters. If subtree_ids is not None, then we also want to
+ include all subtree_ids in the filter as well.
"""
group_ids = [group_id] if group_id else None
- project_ids = [project_id] if project_id else None
+ project_ids_of_interest = None
+ if project_id:
+ if subtree_ids:
+ project_ids_of_interest = subtree_ids + [project_id]
+ else:
+ project_ids_of_interest = [project_id]
return self.driver.list_role_assignments(
role_id=role_id, user_id=user_id, group_ids=group_ids,
- domain_id=domain_id, project_ids=project_ids,
+ domain_id=domain_id, project_ids=project_ids_of_interest,
inherited_to_projects=inherited)
def list_role_assignments(self, role_id=None, user_id=None, group_id=None,
- domain_id=None, project_id=None, inherited=None,
- effective=None):
+ domain_id=None, project_id=None,
+ include_subtree=False, inherited=None,
+ effective=None, include_names=False,
+ source_from_group_ids=None,
+ strip_domain_roles=True):
"""List role assignments, honoring effective mode and provided filters.
Returns a list of role assignments, where their attributes match the
provided filters (role_id, user_id, group_id, domain_id, project_id and
- inherited). The inherited filter defaults to None, meaning to get both
+ inherited). If include_subtree is True, then assignments on all
+ descendants of the project specified by project_id are also included.
+ The inherited filter defaults to None, meaning to get both
non-inherited and inherited role assignments.
If effective mode is specified, this means that rather than simply
@@ -823,25 +935,98 @@ class Manager(manager.Manager):
Think of effective mode as being the list of assignments that actually
affect a user, for example the roles that would be placed in a token.
+ If include_names is set to true the entities' names are returned
+ in addition to their id's.
+
+ source_from_group_ids is a list of group IDs and, if specified, then
+ only those assignments that are derived from membership of these groups
+ are considered, and any such assignments will not be expanded into
+ their user membership assignments. This is different to a group filter
+ of the resulting list, instead being a restriction on which assignments
+ should be considered before expansion of inheritance. This option is
+ only used internally (i.e. it is not exposed at the API level) and is
+ only supported in effective mode (since in regular mode there is no
+ difference between this and a group filter, other than it is a list of
+ groups).
+
+ In effective mode, any domain specific roles are usually stripped from
+ the returned assignments (since such roles are not placed in tokens).
+ This stripping can be disabled by specifying strip_domain_roles=False,
+ which is useful for internal calls like trusts which need to examine
+ the full set of roles.
+
If OS-INHERIT extension is disabled or the used driver does not support
inherited roles retrieval, inherited role assignments will be ignored.
"""
-
if not CONF.os_inherit.enabled:
if inherited:
return []
inherited = False
+ subtree_ids = None
+ if project_id and include_subtree:
+ subtree_ids = (
+ [x['id'] for x in
+ self.resource_api.list_projects_in_subtree(project_id)])
+
if effective:
- return self._list_effective_role_assignments(
- role_id, user_id, group_id, domain_id, project_id, inherited)
+ role_assignments = self._list_effective_role_assignments(
+ role_id, user_id, group_id, domain_id, project_id,
+ subtree_ids, inherited, source_from_group_ids,
+ strip_domain_roles)
else:
- return self._list_direct_role_assignments(
- role_id, user_id, group_id, domain_id, project_id, inherited)
+ role_assignments = self._list_direct_role_assignments(
+ role_id, user_id, group_id, domain_id, project_id,
+ subtree_ids, inherited)
+
+ if include_names:
+ return self._get_names_from_role_assignments(role_assignments)
+ return role_assignments
+
+ def _get_names_from_role_assignments(self, role_assignments):
+ role_assign_list = []
+
+ for role_asgmt in role_assignments:
+ new_assign = {}
+ for id_type, id_ in role_asgmt.items():
+ if id_type == 'domain_id':
+ _domain = self.resource_api.get_domain(id_)
+ new_assign['domain_id'] = _domain['id']
+ new_assign['domain_name'] = _domain['name']
+ elif id_type == 'user_id':
+ _user = self.identity_api.get_user(id_)
+ new_assign['user_id'] = _user['id']
+ new_assign['user_name'] = _user['name']
+ new_assign['user_domain_id'] = _user['domain_id']
+ new_assign['user_domain_name'] = (
+ self.resource_api.get_domain(_user['domain_id'])
+ ['name'])
+ elif id_type == 'group_id':
+ _group = self.identity_api.get_group(id_)
+ new_assign['group_id'] = _group['id']
+ new_assign['group_name'] = _group['name']
+ new_assign['group_domain_id'] = _group['domain_id']
+ new_assign['group_domain_name'] = (
+ self.resource_api.get_domain(_group['domain_id'])
+ ['name'])
+ elif id_type == 'project_id':
+ _project = self.resource_api.get_project(id_)
+ new_assign['project_id'] = _project['id']
+ new_assign['project_name'] = _project['name']
+ new_assign['project_domain_id'] = _project['domain_id']
+ new_assign['project_domain_name'] = (
+ self.resource_api.get_domain(_project['domain_id'])
+ ['name'])
+ elif id_type == 'role_id':
+ _role = self.role_api.get_role(id_)
+ new_assign['role_id'] = _role['id']
+ new_assign['role_name'] = _role['name']
+ role_assign_list.append(new_assign)
+ return role_assign_list
def delete_tokens_for_role_assignments(self, role_id):
- assignments = self.list_role_assignments_for_role(role_id=role_id)
+ assignments = self.list_role_assignments(role_id=role_id)
# Iterate over the assignments for this role and build the list of
# user or user+project IDs for the tokens we need to delete
@@ -900,54 +1085,32 @@ class Manager(manager.Manager):
user_and_project_ids_to_action.append(user_and_project_id)
for user_id, project_id in user_and_project_ids_to_action:
- self._emit_invalidate_user_project_tokens_notification(
- {'user_id': user_id,
- 'project_id': project_id})
-
- @notifications.internal(
- notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE)
- def _emit_invalidate_user_project_tokens_notification(self, payload):
- # This notification's payload is a dict of user_id and
- # project_id so the token provider can invalidate the tokens
- # from persistence if persistence is enabled.
- pass
-
-
+ payload = {'user_id': user_id, 'project_id': project_id}
+ notifications.Audit.internal(
+ notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
+ payload
+ )
+
+
+# The AssignmentDriverBase class is the set of driver methods from earlier
+# drivers that we still support, that have not been removed or modified. This
+# class is then used to created the augmented V8 and V9 version abstract driver
+# classes, without having to duplicate a lot of abstract method signatures.
+# If you remove a method from V9, then move the abstract methods from this Base
+# class to the V8 class. Do not modify any of the method signatures in the Base
+# class - changes should only be made in the V8 and subsequent classes.
@six.add_metaclass(abc.ABCMeta)
-class AssignmentDriverV8(object):
-
- def _role_to_dict(self, role_id, inherited):
- role_dict = {'id': role_id}
- if inherited:
- role_dict['inherited_to'] = 'projects'
- return role_dict
-
- def _roles_from_role_dicts(self, dict_list, inherited):
- role_list = []
- for d in dict_list:
- if ((not d.get('inherited_to') and not inherited) or
- (d.get('inherited_to') == 'projects' and inherited)):
- role_list.append(d['id'])
- return role_list
+class AssignmentDriverBase(object):
def _get_list_limit(self):
return CONF.assignment.list_limit or CONF.list_limit
@abc.abstractmethod
- def list_user_ids_for_project(self, tenant_id):
- """Lists all user IDs with a role assignment in the specified project.
-
- :returns: a list of user_ids or an empty set.
-
- """
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
"""Add a role to a user within given tenant.
- :raises: keystone.exception.Conflict
-
+ :raises keystone.exception.Conflict: If a duplicate role assignment
+ exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -956,7 +1119,7 @@ class AssignmentDriverV8(object):
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
"""Remove a role from a user within given tenant.
- :raises: keystone.exception.RoleNotFound
+ :raises keystone.exception.RoleNotFound: If the role doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -981,7 +1144,6 @@ class AssignmentDriverV8(object):
domain_id=None, project_id=None,
inherited_to_projects=False):
"""Lists role ids for assignments/grants."""
-
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
@@ -990,7 +1152,8 @@ class AssignmentDriverV8(object):
inherited_to_projects=False):
"""Checks an assignment/grant role id.
- :raises: keystone.exception.RoleAssignmentNotFound
+ :raises keystone.exception.RoleAssignmentNotFound: If the role
+ assignment doesn't exist.
:returns: None or raises an exception if grant not found
"""
@@ -1002,7 +1165,8 @@ class AssignmentDriverV8(object):
inherited_to_projects=False):
"""Deletes assignments/grants.
- :raises: keystone.exception.RoleAssignmentNotFound
+ :raises keystone.exception.RoleAssignmentNotFound: If the role
+ assignment doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1021,6 +1185,59 @@ class AssignmentDriverV8(object):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
+ def delete_project_assignments(self, project_id):
+ """Deletes all assignments for a project.
+
+ :raises keystone.exception.ProjectNotFound: If the project doesn't
+ exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_role_assignments(self, role_id):
+ """Deletes all assignments for a role."""
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_user_assignments(self, user_id):
+ """Deletes all assignments for a user.
+
+ :raises keystone.exception.RoleNotFound: If the role doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_group_assignments(self, group_id):
+ """Deletes all assignments for a group.
+
+ :raises keystone.exception.RoleNotFound: If the role doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+
+class AssignmentDriverV8(AssignmentDriverBase):
+ """Removed or redefined methods from V8.
+
+ Move the abstract methods of any methods removed or modified in later
+ versions of the driver from AssignmentDriverBase to here. We maintain this
+ so that legacy drivers, which will be a subclass of AssignmentDriverV8, can
+ still reference them.
+
+ """
+
+ @abc.abstractmethod
+ def list_user_ids_for_project(self, tenant_id):
+ """Lists all user IDs with a role assignment in the specified project.
+
+ :returns: a list of user_ids or an empty set.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
def list_project_ids_for_user(self, user_id, group_ids, hints,
inherited=False):
"""List all project ids associated with a given user.
@@ -1044,25 +1261,6 @@ class AssignmentDriverV8(object):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def list_project_ids_for_groups(self, group_ids, hints,
- inherited=False):
- """List project ids accessible to specified groups.
-
- :param group_ids: List of group ids.
- :param hints: filter hints which the driver should
- implement if at all possible.
- :param inherited: whether assignments marked as inherited should
- be included.
- :returns: List of project ids accessible to specified groups.
-
- This method should not try and expand any inherited assignments,
- just report the projects that have the role for this group. The manager
- method is responsible for expanding out inherited assignments.
-
- """
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
def list_domain_ids_for_user(self, user_id, group_ids, hints,
inherited=False):
"""List all domain ids associated with a given user.
@@ -1082,6 +1280,25 @@ class AssignmentDriverV8(object):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
+ def list_project_ids_for_groups(self, group_ids, hints,
+ inherited=False):
+ """List project ids accessible to specified groups.
+
+ :param group_ids: List of group ids.
+ :param hints: filter hints which the driver should
+ implement if at all possible.
+ :param inherited: whether assignments marked as inherited should
+ be included.
+ :returns: List of project ids accessible to specified groups.
+
+ This method should not try and expand any inherited assignments,
+ just report the projects that have the role for this group. The manager
+ method is responsible for expanding out inherited assignments.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
def list_domain_ids_for_groups(self, group_ids, inherited=False):
"""List domain ids accessible to specified groups.
@@ -1127,38 +1344,125 @@ class AssignmentDriverV8(object):
"""
raise exception.NotImplemented()
- @abc.abstractmethod
- def delete_project_assignments(self, project_id):
- """Deletes all assignments for a project.
- :raises: keystone.exception.ProjectNotFound
+class AssignmentDriverV9(AssignmentDriverBase):
+ """New or redefined methods from V8.
- """
- raise exception.NotImplemented() # pragma: no cover
+ Add any new V9 abstract methods (or those with modified signatures) to
+ this class.
+
+ """
@abc.abstractmethod
- def delete_role_assignments(self, role_id):
- """Deletes all assignments for a role."""
+ def delete_domain_assignments(self, domain_id):
+ """Deletes all assignments for a domain."""
+ raise exception.NotImplemented()
- raise exception.NotImplemented() # pragma: no cover
- @abc.abstractmethod
- def delete_user_assignments(self, user_id):
- """Deletes all assignments for a user.
+class V9AssignmentWrapperForV8Driver(AssignmentDriverV9):
+ """Wrapper class to supported a V8 legacy driver.
- :raises: keystone.exception.RoleNotFound
+ In order to support legacy drivers without having to make the manager code
+ driver-version aware, we wrap legacy drivers so that they look like the
+ latest version. For the various changes made in a new driver, here are the
+ actions needed in this wrapper:
- """
- raise exception.NotImplemented() # pragma: no cover
+ Method removed from new driver - remove the call-through method from this
+ class, since the manager will no longer be
+ calling it.
+ Method signature (or meaning) changed - wrap the old method in a new
+ signature here, and munge the input
+ and output parameters accordingly.
+ New method added to new driver - add a method to implement the new
+ functionality here if possible. If that is
+ not possible, then return NotImplemented,
+ since we do not guarantee to support new
+ functionality with legacy drivers.
- @abc.abstractmethod
- def delete_group_assignments(self, group_id):
- """Deletes all assignments for a group.
+ """
- :raises: keystone.exception.RoleNotFound
+ @versionutils.deprecated(
+ as_of=versionutils.deprecated.MITAKA,
+ what='keystone.assignment.AssignmentDriverV8',
+ in_favor_of='keystone.assignment.AssignmentDriverV9',
+ remove_in=+2)
+ def __init__(self, wrapped_driver):
+ self.driver = wrapped_driver
- """
- raise exception.NotImplemented() # pragma: no cover
+ def delete_domain_assignments(self, domain_id):
+ """Deletes all assignments for a domain."""
+ msg = _LW('delete_domain_assignments method not found in custom '
+ 'assignment driver. Domain assignments for domain (%s) to '
+ 'users from other domains will not be removed. This was '
+ 'added in V9 of the assignment driver.')
+ LOG.warning(msg, domain_id)
+
+ def default_role_driver(self):
+ return self.driver.default_role_driver()
+
+ def default_resource_driver(self):
+ return self.driver.default_resource_driver()
+
+ def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
+ self.driver.add_role_to_user_and_project(user_id, tenant_id, role_id)
+
+ def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
+ self.driver.remove_role_from_user_and_project(
+ user_id, tenant_id, role_id)
+
+ def create_grant(self, role_id, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+ self.driver.create_grant(
+ role_id, user_id=user_id, group_id=group_id,
+ domain_id=domain_id, project_id=project_id,
+ inherited_to_projects=inherited_to_projects)
+
+ def list_grant_role_ids(self, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+ return self.driver.list_grant_role_ids(
+ user_id=user_id, group_id=group_id,
+ domain_id=domain_id, project_id=project_id,
+ inherited_to_projects=inherited_to_projects)
+
+ def check_grant_role_id(self, role_id, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+ self.driver.check_grant_role_id(
+ role_id, user_id=user_id, group_id=group_id,
+ domain_id=domain_id, project_id=project_id,
+ inherited_to_projects=inherited_to_projects)
+
+ def delete_grant(self, role_id, user_id=None, group_id=None,
+ domain_id=None, project_id=None,
+ inherited_to_projects=False):
+ self.driver.delete_grant(
+ role_id, user_id=user_id, group_id=group_id,
+ domain_id=domain_id, project_id=project_id,
+ inherited_to_projects=inherited_to_projects)
+
+ def list_role_assignments(self, role_id=None,
+ user_id=None, group_ids=None,
+ domain_id=None, project_ids=None,
+ inherited_to_projects=None):
+ return self.driver.list_role_assignments(
+ role_id=role_id,
+ user_id=user_id, group_ids=group_ids,
+ domain_id=domain_id, project_ids=project_ids,
+ inherited_to_projects=inherited_to_projects)
+
+ def delete_project_assignments(self, project_id):
+ self.driver.delete_project_assignments(project_id)
+
+ def delete_role_assignments(self, role_id):
+ self.driver.delete_role_assignments(role_id)
+
+ def delete_user_assignments(self, user_id):
+ self.driver.delete_user_assignments(user_id)
+
+ def delete_group_assignments(self, group_id):
+ self.driver.delete_group_assignments(group_id)
Driver = manager.create_legacy_driver(AssignmentDriverV8)
@@ -1184,6 +1488,13 @@ class RoleManager(manager.Manager):
super(RoleManager, self).__init__(role_driver)
+ # Make sure it is a driver version we support, and if it is a legacy
+ # driver, then wrap it.
+ if isinstance(self.driver, RoleDriverV8):
+ self.driver = V9RoleWrapperForV8Driver(self.driver)
+ elif not isinstance(self.driver, RoleDriverV9):
+ raise exception.UnsupportedDriverVersion(driver=role_driver)
+
@MEMOIZE
def get_role(self, role_id):
return self.driver.get_role(role_id)
@@ -1200,32 +1511,50 @@ class RoleManager(manager.Manager):
return self.driver.list_roles(hints or driver_hints.Hints())
def update_role(self, role_id, role, initiator=None):
+ original_role = self.driver.get_role(role_id)
+ if ('domain_id' in role and
+ role['domain_id'] != original_role['domain_id']):
+ raise exception.ValidationError(
+ message=_('Update of `domain_id` is not allowed.'))
+
ret = self.driver.update_role(role_id, role)
notifications.Audit.updated(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
return ret
def delete_role(self, role_id, initiator=None):
- try:
- self.assignment_api.delete_tokens_for_role_assignments(role_id)
- except exception.NotImplemented:
- # FIXME(morganfainberg): Not all backends (ldap) implement
- # `list_role_assignments_for_role` which would have previously
- # caused a NotImplmented error to be raised when called through
- # the controller. Now error or proper action will always come from
- # the `delete_role` method logic. Work needs to be done to make
- # the behavior between drivers consistent (capable of revoking
- # tokens for the same circumstances). This is related to the bug
- # https://bugs.launchpad.net/keystone/+bug/1221805
- pass
+ self.assignment_api.delete_tokens_for_role_assignments(role_id)
self.assignment_api.delete_role_assignments(role_id)
self.driver.delete_role(role_id)
notifications.Audit.deleted(self._ROLE, role_id, initiator)
self.get_role.invalidate(self, role_id)
-
-
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
+
+ # TODO(ayoung): Add notification
+ def create_implied_role(self, prior_role_id, implied_role_id):
+ implied_role = self.driver.get_role(implied_role_id)
+ self.driver.get_role(prior_role_id)
+ if implied_role['name'] in CONF.assignment.prohibited_implied_role:
+ raise exception.InvalidImpliedRole(role_id=implied_role_id)
+ response = self.driver.create_implied_role(
+ prior_role_id, implied_role_id)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
+ return response
+
+ def delete_implied_role(self, prior_role_id, implied_role_id):
+ self.driver.delete_implied_role(prior_role_id, implied_role_id)
+ COMPUTED_ASSIGNMENTS_REGION.invalidate()
+
+
+# The RoleDriverBase class is the set of driver methods from earlier
+# drivers that we still support, that have not been removed or modified. This
+# class is then used to created the augmented V8 and V9 version abstract driver
+# classes, without having to duplicate a lot of abstract method signatures.
+# If you remove a method from V9, then move the abstract methods from this Base
+# class to the V8 class. Do not modify any of the method signatures in the Base
+# class - changes should only be made in the V8 and subsequent classes.
@six.add_metaclass(abc.ABCMeta)
-class RoleDriverV8(object):
+class RoleDriverBase(object):
def _get_list_limit(self):
return CONF.role.list_limit or CONF.list_limit
@@ -1234,7 +1563,7 @@ class RoleDriverV8(object):
def create_role(self, role_id, role):
"""Creates a new role.
- :raises: keystone.exception.Conflict
+ :raises keystone.exception.Conflict: If a duplicate role exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1270,7 +1599,7 @@ class RoleDriverV8(object):
"""Get a role by ID.
:returns: role_ref
- :raises: keystone.exception.RoleNotFound
+ :raises keystone.exception.RoleNotFound: If the role doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1279,8 +1608,8 @@ class RoleDriverV8(object):
def update_role(self, role_id, role):
"""Updates an existing role.
- :raises: keystone.exception.RoleNotFound,
- keystone.exception.Conflict
+ :raises keystone.exception.RoleNotFound: If the role doesn't exist.
+ :raises keystone.exception.Conflict: If a duplicate role exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1289,10 +1618,173 @@ class RoleDriverV8(object):
def delete_role(self, role_id):
"""Deletes an existing role.
- :raises: keystone.exception.RoleNotFound
+ :raises keystone.exception.RoleNotFound: If the role doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+
+class RoleDriverV8(RoleDriverBase):
+ """Removed or redefined methods from V8.
+
+ Move the abstract methods of any methods removed or modified in later
+ versions of the driver from RoleDriverBase to here. We maintain this
+ so that legacy drivers, which will be a subclass of RoleDriverV8, can
+ still reference them.
+
+ """
+
+ pass
+
+
+class RoleDriverV9(RoleDriverBase):
+ """New or redefined methods from V8.
+
+ Add any new V9 abstract methods (or those with modified signatures) to
+ this class.
+
+ """
+
+ @abc.abstractmethod
+ def get_implied_role(self, prior_role_id, implied_role_id):
+ """Fetches a role inference rule
+
+ :raises keystone.exception.ImpliedRoleNotFound: If the implied role
+ doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_implied_role(self, prior_role_id, implied_role_id):
+ """Creates a role inference rule
+
+ :raises: keystone.exception.RoleNotFound: If the role doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_implied_role(self, prior_role_id, implied_role_id):
+ """Deletes a role inference rule
+
+ :raises keystone.exception.ImpliedRoleNotFound: If the implied role
+ doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
+ @abc.abstractmethod
+ def list_role_inference_rules(self):
+ """Lists all the rules used to imply one role from another"""
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_implied_roles(self, prior_role_id):
+ """Lists roles implied from the prior role ID"""
+ raise exception.NotImplemented() # pragma: no cover
+
+
+class V9RoleWrapperForV8Driver(RoleDriverV9):
+ """Wrapper class to supported a V8 legacy driver.
+
+ In order to support legacy drivers without having to make the manager code
+ driver-version aware, we wrap legacy drivers so that they look like the
+ latest version. For the various changes made in a new driver, here are the
+ actions needed in this wrapper:
+
+ Method removed from new driver - remove the call-through method from this
+ class, since the manager will no longer be
+ calling it.
+ Method signature (or meaning) changed - wrap the old method in a new
+ signature here, and munge the input
+ and output parameters accordingly.
+ New method added to new driver - add a method to implement the new
+ functionality here if possible. If that is
+ not possible, then return NotImplemented,
+ since we do not guarantee to support new
+ functionality with legacy drivers.
+
+ This V8 wrapper contains the following support for newer manager code:
+
+ - The current manager code expects a role entity to have a domain_id
+ attribute, with a non-None value indicating a domain specific role. V8
+ drivers will only understand global roles, hence if a non-None domain_id
+ is passed to this wrapper, it will raise a NotImplemented exception.
+ If a None-valued domain_id is passed in, it will be trimmed off before
+ the underlying driver is called (and a None-valued domain_id attribute
+ is added in for any entities returned to the manager.
+
+ """
+
+ @versionutils.deprecated(
+ as_of=versionutils.deprecated.MITAKA,
+ what='keystone.assignment.RoleDriverV8',
+ in_favor_of='keystone.assignment.RoleDriverV9',
+ remove_in=+2)
+ def __init__(self, wrapped_driver):
+ self.driver = wrapped_driver
+
+ def _append_null_domain_id(self, role_or_list):
+ def _append_null_domain_id_to_dict(role):
+ if 'domain_id' not in role:
+ role['domain_id'] = None
+ return role
+
+ if isinstance(role_or_list, list):
+ return [_append_null_domain_id_to_dict(x) for x in role_or_list]
+ else:
+ return _append_null_domain_id_to_dict(role_or_list)
+
+ def _trim_and_assert_null_domain_id(self, role):
+ if 'domain_id' in role:
+ if role['domain_id'] is not None:
+ raise exception.NotImplemented(
+ _('Domain specific roles are not supported in the V8 '
+ 'role driver'))
+ else:
+ new_role = role.copy()
+ new_role.pop('domain_id')
+ return new_role
+ else:
+ return role
+
+ def create_role(self, role_id, role):
+ new_role = self._trim_and_assert_null_domain_id(role)
+ return self._append_null_domain_id(
+ self.driver.create_role(role_id, new_role))
+
+ def list_roles(self, hints):
+ return self._append_null_domain_id(self.driver.list_roles(hints))
+
+ def list_roles_from_ids(self, role_ids):
+ return self._append_null_domain_id(
+ self.driver.list_roles_from_ids(role_ids))
+
+ def get_role(self, role_id):
+ return self._append_null_domain_id(self.driver.get_role(role_id))
+
+ def update_role(self, role_id, role):
+ update_role = self._trim_and_assert_null_domain_id(role)
+ return self._append_null_domain_id(
+ self.driver.update_role(role_id, update_role))
+
+ def delete_role(self, role_id):
+ self.driver.delete_role(role_id)
+
+ def get_implied_role(self, prior_role_id, implied_role_id):
+ raise exception.NotImplemented() # pragma: no cover
+
+ def create_implied_role(self, prior_role_id, implied_role_id):
+ raise exception.NotImplemented() # pragma: no cover
+
+ def delete_implied_role(self, prior_role_id, implied_role_id):
+ raise exception.NotImplemented() # pragma: no cover
+
+ def list_implied_roles(self, prior_role_id):
+ raise exception.NotImplemented() # pragma: no cover
+
+ def list_role_inference_rules(self):
+ raise exception.NotImplemented() # pragma: no cover
RoleDriver = manager.create_legacy_driver(RoleDriverV8)
diff --git a/keystone-moon/keystone/assignment/role_backends/sql.py b/keystone-moon/keystone/assignment/role_backends/sql.py
index 3c707aa8..1045f23a 100644
--- a/keystone-moon/keystone/assignment/role_backends/sql.py
+++ b/keystone-moon/keystone/assignment/role_backends/sql.py
@@ -9,24 +9,44 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exception
from keystone import assignment
+from keystone.common import driver_hints
from keystone.common import sql
from keystone import exception
+# NOTE(henry-nash): From the manager and above perspective, the domain_id
+# attribute of a role is nullable. However, to ensure uniqueness in
+# multi-process configurations, it is better to still use a sql uniqueness
+# constraint. Since the support for a nullable component of a uniqueness
+# constraint across different sql databases is mixed, we instead store a
+# special value to represent null, as defined in NULL_DOMAIN_ID below.
+NULL_DOMAIN_ID = '<<null>>'
-class Role(assignment.RoleDriverV8):
+
+class Role(assignment.RoleDriverV9):
@sql.handle_conflicts(conflict_type='role')
def create_role(self, role_id, role):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
ref = RoleTable.from_dict(role)
session.add(ref)
return ref.to_dict()
- @sql.truncated
+ @driver_hints.truncated
def list_roles(self, hints):
- with sql.transaction() as session:
+ # If there is a filter on domain_id and the value is None, then to
+ # ensure that the sql filtering works correctly, we need to patch
+ # the value to be NULL_DOMAIN_ID. This is safe to do here since we
+ # know we are able to satisfy any filter of this type in the call to
+ # filter_limit_query() below, which will remove the filter from the
+ # hints (hence ensuring our substitution is not exposed to the caller).
+ for f in hints.filters:
+ if (f['name'] == 'domain_id' and f['value'] is None):
+ f['value'] = NULL_DOMAIN_ID
+
+ with sql.session_for_read() as session:
query = session.query(RoleTable)
refs = sql.filter_limit_query(RoleTable, query, hints)
return [ref.to_dict() for ref in refs]
@@ -35,7 +55,7 @@ class Role(assignment.RoleDriverV8):
if not ids:
return []
else:
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(RoleTable)
query = query.filter(RoleTable.id.in_(ids))
role_refs = query.all()
@@ -48,12 +68,12 @@ class Role(assignment.RoleDriverV8):
return ref
def get_role(self, role_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
return self._get_role(session, role_id).to_dict()
@sql.handle_conflicts(conflict_type='role')
def update_role(self, role_id, role):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
ref = self._get_role(session, role_id)
old_dict = ref.to_dict()
for k in role:
@@ -66,15 +86,117 @@ class Role(assignment.RoleDriverV8):
return ref.to_dict()
def delete_role(self, role_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
ref = self._get_role(session, role_id)
session.delete(ref)
+ def _get_implied_role(self, session, prior_role_id, implied_role_id):
+ query = session.query(
+ ImpliedRoleTable).filter(
+ ImpliedRoleTable.prior_role_id == prior_role_id).filter(
+ ImpliedRoleTable.implied_role_id == implied_role_id)
+ try:
+ ref = query.one()
+ except sql.NotFound:
+ raise exception.ImpliedRoleNotFound(
+ prior_role_id=prior_role_id,
+ implied_role_id=implied_role_id)
+ return ref
+
+ @sql.handle_conflicts(conflict_type='implied_role')
+ def create_implied_role(self, prior_role_id, implied_role_id):
+ with sql.session_for_write() as session:
+ inference = {'prior_role_id': prior_role_id,
+ 'implied_role_id': implied_role_id}
+ ref = ImpliedRoleTable.from_dict(inference)
+ try:
+ session.add(ref)
+ except db_exception.DBReferenceError:
+ # We don't know which role threw this.
+ # Query each to trigger the exception.
+ self._get_role(session, prior_role_id)
+ self._get_role(session, implied_role_id)
+ return ref.to_dict()
+
+ def delete_implied_role(self, prior_role_id, implied_role_id):
+ with sql.session_for_write() as session:
+ ref = self._get_implied_role(session, prior_role_id,
+ implied_role_id)
+ session.delete(ref)
+
+ def list_implied_roles(self, prior_role_id):
+ with sql.session_for_read() as session:
+ query = session.query(
+ ImpliedRoleTable).filter(
+ ImpliedRoleTable.prior_role_id == prior_role_id)
+ refs = query.all()
+ return [ref.to_dict() for ref in refs]
+
+ def list_role_inference_rules(self):
+ with sql.session_for_read() as session:
+ query = session.query(ImpliedRoleTable)
+ refs = query.all()
+ return [ref.to_dict() for ref in refs]
+
+ def get_implied_role(self, prior_role_id, implied_role_id):
+ with sql.session_for_read() as session:
+ ref = self._get_implied_role(session, prior_role_id,
+ implied_role_id)
+ return ref.to_dict()
+
+
+class ImpliedRoleTable(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'implied_role'
+ attributes = ['prior_role_id', 'implied_role_id']
+ prior_role_id = sql.Column(
+ sql.String(64),
+ sql.ForeignKey('role.id', ondelete="CASCADE"),
+ primary_key=True)
+ implied_role_id = sql.Column(
+ sql.String(64),
+ sql.ForeignKey('role.id', ondelete="CASCADE"),
+ primary_key=True)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes.
+
+ overrides the `to_dict` function from the base class
+ to avoid having an `extra` field.
+ """
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
class RoleTable(sql.ModelBase, sql.DictBase):
+
+ def to_dict(self, include_extra_dict=False):
+ d = super(RoleTable, self).to_dict(
+ include_extra_dict=include_extra_dict)
+ if d['domain_id'] == NULL_DOMAIN_ID:
+ d['domain_id'] = None
+ return d
+
+ @classmethod
+ def from_dict(cls, role_dict):
+ if 'domain_id' in role_dict and role_dict['domain_id'] is None:
+ new_dict = role_dict.copy()
+ new_dict['domain_id'] = NULL_DOMAIN_ID
+ else:
+ new_dict = role_dict
+ return super(RoleTable, cls).from_dict(new_dict)
+
__tablename__ = 'role'
- attributes = ['id', 'name']
+ attributes = ['id', 'name', 'domain_id']
id = sql.Column(sql.String(64), primary_key=True)
- name = sql.Column(sql.String(255), unique=True, nullable=False)
+ name = sql.Column(sql.String(255), nullable=False)
+ domain_id = sql.Column(sql.String(64), nullable=False,
+ server_default=NULL_DOMAIN_ID)
extra = sql.Column(sql.JsonBlob())
- __table_args__ = (sql.UniqueConstraint('name'), {})
+ __table_args__ = (sql.UniqueConstraint('name', 'domain_id'),)
diff --git a/keystone-moon/keystone/assignment/routers.py b/keystone-moon/keystone/assignment/routers.py
index 49549a0b..9bef401e 100644
--- a/keystone-moon/keystone/assignment/routers.py
+++ b/keystone-moon/keystone/assignment/routers.py
@@ -71,7 +71,43 @@ class Routers(wsgi.RoutersBase):
routers.append(
router.Router(controllers.RoleV3(), 'roles', 'role',
- resource_descriptions=self.v3_resources))
+ resource_descriptions=self.v3_resources,
+ method_template='%s_wrapper'))
+
+ implied_roles_controller = controllers.ImpliedRolesV3()
+ self._add_resource(
+ mapper, implied_roles_controller,
+ path='/roles/{prior_role_id}/implies',
+ rel=json_home.build_v3_resource_relation('implied_roles'),
+ get_action='list_implied_roles',
+ status=json_home.Status.EXPERIMENTAL,
+ path_vars={
+ 'prior_role_id': json_home.Parameters.ROLE_ID,
+ }
+ )
+
+ self._add_resource(
+ mapper, implied_roles_controller,
+ path='/roles/{prior_role_id}/implies/{implied_role_id}',
+ put_action='create_implied_role',
+ delete_action='delete_implied_role',
+ head_action='check_implied_role',
+ get_action='get_implied_role',
+ rel=json_home.build_v3_resource_relation('implied_role'),
+ status=json_home.Status.EXPERIMENTAL,
+ path_vars={
+ 'prior_role_id': json_home.Parameters.ROLE_ID,
+ 'implied_role_id': json_home.Parameters.ROLE_ID
+ }
+ )
+ self._add_resource(
+ mapper, implied_roles_controller,
+ path='/role_inferences',
+ get_action='list_role_inference_rules',
+ rel=json_home.build_v3_resource_relation('role_inferences'),
+ status=json_home.Status.EXPERIMENTAL,
+ path_vars={}
+ )
grant_controller = controllers.GrantAssignmentV3()
self._add_resource(
@@ -159,11 +195,11 @@ class Routers(wsgi.RoutersBase):
'group_id': json_home.Parameters.GROUP_ID,
})
- routers.append(
- router.Router(controllers.RoleAssignmentV3(),
- 'role_assignments', 'role_assignment',
- resource_descriptions=self.v3_resources,
- is_entity_implemented=False))
+ self._add_resource(
+ mapper, controllers.RoleAssignmentV3(),
+ path='/role_assignments',
+ get_action='list_role_assignments_wrapper',
+ rel=json_home.build_v3_resource_relation('role_assignments'))
if CONF.os_inherit.enabled:
self._add_resource(
diff --git a/keystone-moon/keystone/auth/__init__.py b/keystone-moon/keystone/auth/__init__.py
index b1e4203e..bcbf69fd 100644
--- a/keystone-moon/keystone/auth/__init__.py
+++ b/keystone-moon/keystone/auth/__init__.py
@@ -14,4 +14,3 @@
from keystone.auth import controllers # noqa
from keystone.auth.core import * # noqa
-from keystone.auth import routers # noqa
diff --git a/keystone-moon/keystone/auth/controllers.py b/keystone-moon/keystone/auth/controllers.py
index 133230d6..3e6af80f 100644
--- a/keystone-moon/keystone/auth/controllers.py
+++ b/keystone-moon/keystone/auth/controllers.py
@@ -23,13 +23,13 @@ from oslo_utils import importutils
import six
import stevedore
+from keystone.common import config
from keystone.common import controller
from keystone.common import dependency
from keystone.common import utils
from keystone.common import wsgi
-from keystone import config
-from keystone.contrib.federation import constants as federation_constants
from keystone import exception
+from keystone.federation import constants
from keystone.i18n import _, _LI, _LW
from keystone.resource import controllers as resource_controllers
@@ -45,8 +45,8 @@ AUTH_PLUGINS_LOADED = False
def load_auth_method(method):
plugin_name = CONF.auth.get(method) or 'default'
+ namespace = 'keystone.auth.%s' % method
try:
- namespace = 'keystone.auth.%s' % method
driver_manager = stevedore.DriverManager(namespace, plugin_name,
invoke_on_load=True)
return driver_manager.driver
@@ -55,13 +55,16 @@ def load_auth_method(method):
'attempt to load using import_object instead.',
method, plugin_name)
- @versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
- in_favor_of='entrypoints',
- what='direct import of driver')
- def _load_using_import(plugin_name):
- return importutils.import_object(plugin_name)
+ driver = importutils.import_object(plugin_name)
- return _load_using_import(plugin_name)
+ msg = (_(
+ 'Direct import of auth plugin %(name)r is deprecated as of Liberty in '
+ 'favor of its entrypoint from %(namespace)r and may be removed in '
+ 'N.') %
+ {'name': plugin_name, 'namespace': namespace})
+ versionutils.report_deprecated_feature(LOG, msg)
+
+ return driver
def load_auth_methods():
@@ -174,6 +177,10 @@ class AuthInfo(object):
target='domain')
try:
if domain_name:
+ if (CONF.resource.domain_name_url_safe == 'strict' and
+ utils.is_not_url_safe(domain_name)):
+ msg = _('Domain name cannot contain reserved characters.')
+ raise exception.Unauthorized(message=msg)
domain_ref = self.resource_api.get_domain_by_name(
domain_name)
else:
@@ -193,6 +200,10 @@ class AuthInfo(object):
target='project')
try:
if project_name:
+ if (CONF.resource.project_name_url_safe == 'strict' and
+ utils.is_not_url_safe(project_name)):
+ msg = _('Project name cannot contain reserved characters.')
+ raise exception.Unauthorized(message=msg)
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
@@ -423,7 +434,7 @@ class Auth(controller.V3Controller):
return
# Skip scoping when unscoped federated token is being issued
- if federation_constants.IDENTITY_PROVIDER in auth_context:
+ if constants.IDENTITY_PROVIDER in auth_context:
return
# Do not scope if request is for explicitly unscoped token
@@ -479,7 +490,6 @@ class Auth(controller.V3Controller):
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
-
# The 'external' method allows any 'REMOTE_USER' based authentication
# In some cases the server can set REMOTE_USER as '' instead of
# dropping it, so this must be filtered out
@@ -549,13 +559,23 @@ class Auth(controller.V3Controller):
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
+
+ audit_id_only = ('audit_id_only' in context['query_string'])
+
tokens = self.token_provider_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = utils.isotime(expires)
+ if audit_id_only:
+ t.pop('id', None)
data = {'revoked': tokens}
+
+ if audit_id_only:
+ # No need to obfuscate if no token IDs.
+ return data
+
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
@@ -580,7 +600,7 @@ class Auth(controller.V3Controller):
if user_id:
try:
user_refs = self.assignment_api.list_projects_for_user(user_id)
- except exception.UserNotFound:
+ except exception.UserNotFound: # nosec
# federated users have an id but they don't link to anything
pass
@@ -601,7 +621,7 @@ class Auth(controller.V3Controller):
if user_id:
try:
user_refs = self.assignment_api.list_domains_for_user(user_id)
- except exception.UserNotFound:
+ except exception.UserNotFound: # nosec
# federated users have an id but they don't link to anything
pass
diff --git a/keystone-moon/keystone/auth/core.py b/keystone-moon/keystone/auth/core.py
index 9da2c123..b865d82b 100644
--- a/keystone-moon/keystone/auth/core.py
+++ b/keystone-moon/keystone/auth/core.py
@@ -89,6 +89,6 @@ class AuthMethodHandler(object):
Authentication payload in the form of a dictionary for the
next authentication step if this is a multi step
authentication.
- :raises: exception.Unauthorized for authentication failure
+ :raises keystone.exception.Unauthorized: for authentication failure
"""
raise exception.Unauthorized()
diff --git a/keystone-moon/keystone/auth/plugins/core.py b/keystone-moon/keystone/auth/plugins/core.py
index bcad27e5..c513f815 100644
--- a/keystone-moon/keystone/auth/plugins/core.py
+++ b/keystone-moon/keystone/auth/plugins/core.py
@@ -99,18 +99,17 @@ def convert_integer_to_method_list(method_int):
@dependency.requires('identity_api', 'resource_api')
-class UserAuthInfo(object):
+class BaseUserInfo(object):
- @staticmethod
- def create(auth_payload, method_name):
- user_auth_info = UserAuthInfo()
+ @classmethod
+ def create(cls, auth_payload, method_name):
+ user_auth_info = cls()
user_auth_info._validate_and_normalize_auth_data(auth_payload)
user_auth_info.METHOD_NAME = method_name
return user_auth_info
def __init__(self):
self.user_id = None
- self.password = None
self.user_ref = None
self.METHOD_NAME = None
@@ -164,7 +163,6 @@ class UserAuthInfo(object):
if not user_id and not user_name:
raise exception.ValidationError(attribute='id or name',
target='user')
- self.password = user_info.get('password')
try:
if user_name:
if 'domain' not in user_info:
@@ -185,3 +183,29 @@ class UserAuthInfo(object):
self.user_ref = user_ref
self.user_id = user_ref['id']
self.domain_id = domain_ref['id']
+
+
+class UserAuthInfo(BaseUserInfo):
+
+ def __init__(self):
+ super(UserAuthInfo, self).__init__()
+ self.password = None
+
+ def _validate_and_normalize_auth_data(self, auth_payload):
+ super(UserAuthInfo, self)._validate_and_normalize_auth_data(
+ auth_payload)
+ user_info = auth_payload['user']
+ self.password = user_info.get('password')
+
+
+class TOTPUserInfo(BaseUserInfo):
+
+ def __init__(self):
+ super(TOTPUserInfo, self).__init__()
+ self.passcode = None
+
+ def _validate_and_normalize_auth_data(self, auth_payload):
+ super(TOTPUserInfo, self)._validate_and_normalize_auth_data(
+ auth_payload)
+ user_info = auth_payload['user']
+ self.passcode = user_info.get('passcode')
diff --git a/keystone-moon/keystone/auth/plugins/external.py b/keystone-moon/keystone/auth/plugins/external.py
index cabe6282..b00b808a 100644
--- a/keystone-moon/keystone/auth/plugins/external.py
+++ b/keystone-moon/keystone/auth/plugins/external.py
@@ -78,7 +78,6 @@ class Domain(Base):
The domain will be extracted from the REMOTE_DOMAIN environment
variable if present. If not, the default domain will be used.
"""
-
username = remote_user
try:
domain_name = context['environment']['REMOTE_DOMAIN']
@@ -94,6 +93,7 @@ class Domain(Base):
class KerberosDomain(Domain):
"""Allows `kerberos` as a method."""
+
def _authenticate(self, remote_user, context):
auth_type = context['environment'].get('AUTH_TYPE')
if auth_type != 'Negotiate':
diff --git a/keystone-moon/keystone/auth/plugins/mapped.py b/keystone-moon/keystone/auth/plugins/mapped.py
index 220ff013..e9716201 100644
--- a/keystone-moon/keystone/auth/plugins/mapped.py
+++ b/keystone-moon/keystone/auth/plugins/mapped.py
@@ -12,23 +12,20 @@
import functools
-from oslo_log import log
from pycadf import cadftaxonomy as taxonomy
from six.moves.urllib import parse
from keystone import auth
from keystone.auth import plugins as auth_plugins
from keystone.common import dependency
-from keystone.contrib.federation import constants as federation_constants
-from keystone.contrib.federation import utils
from keystone import exception
+from keystone.federation import constants as federation_constants
+from keystone.federation import utils
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
-LOG = log.getLogger(__name__)
-
METHOD_NAME = 'mapped'
@@ -56,7 +53,6 @@ class Mapped(auth.AuthMethodHandler):
``OS-FEDERATION:protocol``
"""
-
if 'id' in auth_payload:
token_ref = self._get_token_ref(auth_payload)
handle_scoped_token(context, auth_payload, auth_context, token_ref,
@@ -139,12 +135,22 @@ def handle_unscoped_token(context, auth_payload, auth_context,
user_id = None
try:
- mapped_properties, mapping_id = apply_mapping_filter(
- identity_provider, protocol, assertion, resource_api,
- federation_api, identity_api)
+ try:
+ mapped_properties, mapping_id = apply_mapping_filter(
+ identity_provider, protocol, assertion, resource_api,
+ federation_api, identity_api)
+ except exception.ValidationError as e:
+ # if mapping is either invalid or yield no valid identity,
+ # it is considered a failed authentication
+ raise exception.Unauthorized(e)
if is_ephemeral_user(mapped_properties):
- user = setup_username(context, mapped_properties)
+ unique_id, display_name = (
+ get_user_unique_id_and_display_name(context, mapped_properties)
+ )
+ user = identity_api.shadow_federated_user(identity_provider,
+ protocol, unique_id,
+ display_name)
user_id = user['id']
group_ids = mapped_properties['group_ids']
utils.validate_groups_cardinality(group_ids, mapping_id)
@@ -205,7 +211,7 @@ def apply_mapping_filter(identity_provider, protocol, assertion,
return mapped_properties, mapping_id
-def setup_username(context, mapped_properties):
+def get_user_unique_id_and_display_name(context, mapped_properties):
"""Setup federated username.
Function covers all the cases for properly setting user id, a primary
@@ -225,9 +231,10 @@ def setup_username(context, mapped_properties):
:param mapped_properties: Properties issued by a RuleProcessor.
:type: dictionary
- :raises: exception.Unauthorized
- :returns: dictionary with user identification
- :rtype: dict
+ :raises keystone.exception.Unauthorized: If neither `user_name` nor
+ `user_id` is set.
+ :returns: tuple with user identification
+ :rtype: tuple
"""
user = mapped_properties['user']
@@ -248,5 +255,4 @@ def setup_username(context, mapped_properties):
user_id = user_name
user['id'] = parse.quote(user_id)
-
- return user
+ return (user['id'], user['name'])
diff --git a/keystone-moon/keystone/auth/plugins/oauth1.py b/keystone-moon/keystone/auth/plugins/oauth1.py
index e081cd62..bf60f91c 100644
--- a/keystone-moon/keystone/auth/plugins/oauth1.py
+++ b/keystone-moon/keystone/auth/plugins/oauth1.py
@@ -12,26 +12,21 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log
from oslo_utils import timeutils
from keystone import auth
from keystone.common import controller
from keystone.common import dependency
-from keystone.contrib.oauth1 import core as oauth
-from keystone.contrib.oauth1 import validator
from keystone import exception
from keystone.i18n import _
-
-
-LOG = log.getLogger(__name__)
+from keystone.oauth1 import core as oauth
+from keystone.oauth1 import validator
@dependency.requires('oauth_api')
class OAuth(auth.AuthMethodHandler):
def authenticate(self, context, auth_info, auth_context):
"""Turn a signed request with an access key into a keystone token."""
-
headers = context['headers']
oauth_headers = oauth.get_oauth_headers(headers)
access_token_id = oauth_headers.get('oauth_token')
diff --git a/keystone-moon/keystone/auth/plugins/password.py b/keystone-moon/keystone/auth/plugins/password.py
index 16492a32..a16887b4 100644
--- a/keystone-moon/keystone/auth/plugins/password.py
+++ b/keystone-moon/keystone/auth/plugins/password.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log
-
from keystone import auth
from keystone.auth import plugins as auth_plugins
from keystone.common import dependency
@@ -23,8 +21,6 @@ from keystone.i18n import _
METHOD_NAME = 'password'
-LOG = log.getLogger(__name__)
-
@dependency.requires('identity_api')
class Password(auth.AuthMethodHandler):
@@ -33,8 +29,6 @@ class Password(auth.AuthMethodHandler):
"""Try to authenticate against the identity backend."""
user_info = auth_plugins.UserAuthInfo.create(auth_payload, METHOD_NAME)
- # FIXME(gyee): identity.authenticate() can use some refactoring since
- # all we care is password matches
try:
self.identity_api.authenticate(
context,
diff --git a/keystone-moon/keystone/auth/plugins/saml2.py b/keystone-moon/keystone/auth/plugins/saml2.py
index cf7a8a50..0e7ec6bc 100644
--- a/keystone-moon/keystone/auth/plugins/saml2.py
+++ b/keystone-moon/keystone/auth/plugins/saml2.py
@@ -10,17 +10,26 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
+
from keystone.auth.plugins import mapped
-""" Provide an entry point to authenticate with SAML2
-This plugin subclasses mapped.Mapped, and may be specified in keystone.conf:
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.auth.plugins.saml2.Saml2',
+ in_favor_of='keystone.auth.plugins.mapped.Mapped',
+ remove_in=+2)
+class Saml2(mapped.Mapped):
+ """Provide an entry point to authenticate with SAML2.
+
+ This plugin subclasses ``mapped.Mapped``, and may be specified in
+ keystone.conf::
- [auth]
- methods = external,password,token,saml2
- saml2 = keystone.auth.plugins.mapped.Mapped
-"""
+ [auth]
+ methods = external,password,token,saml2
+ saml2 = keystone.auth.plugins.mapped.Mapped
+ """
-class Saml2(mapped.Mapped):
pass
diff --git a/keystone-moon/keystone/auth/plugins/totp.py b/keystone-moon/keystone/auth/plugins/totp.py
new file mode 100644
index 00000000..d0b61b3b
--- /dev/null
+++ b/keystone-moon/keystone/auth/plugins/totp.py
@@ -0,0 +1,99 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Time-based One-time Password Algorithm (TOTP) auth plugin
+
+TOTP is an algorithm that computes a one-time password from a shared secret
+key and the current time.
+
+TOTP is an implementation of a hash-based message authentication code (HMAC).
+It combines a secret key with the current timestamp using a cryptographic hash
+function to generate a one-time password. The timestamp typically increases in
+30-second intervals, so passwords generated close together in time from the
+same secret key will be equal.
+"""
+
+import base64
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.twofactor import totp as crypto_totp
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone import auth
+from keystone.auth import plugins
+from keystone.common import dependency
+from keystone import exception
+from keystone.i18n import _
+
+
+METHOD_NAME = 'totp'
+
+LOG = log.getLogger(__name__)
+
+
+def _generate_totp_passcode(secret):
+ """Generate TOTP passcode.
+
+ :param bytes secret: A base32 encoded secret for the TOTP authentication
+ :returns: totp passcode as bytes
+ """
+ if isinstance(secret, six.text_type):
+ # NOTE(dstanek): since this may be coming from the JSON stored in the
+ # database it may be UTF-8 encoded
+ secret = secret.encode('utf-8')
+
+ # NOTE(nonameentername): cryptography takes a non base32 encoded value for
+ # TOTP. Add the correct padding to be able to base32 decode
+ while len(secret) % 8 != 0:
+ secret = secret + b'='
+
+ decoded = base64.b32decode(secret)
+ totp = crypto_totp.TOTP(
+ decoded, 6, hashes.SHA1(), 30, backend=default_backend())
+ return totp.generate(timeutils.utcnow_ts(microsecond=True))
+
+
+@dependency.requires('credential_api')
+class TOTP(auth.AuthMethodHandler):
+
+ def authenticate(self, context, auth_payload, auth_context):
+ """Try to authenticate using TOTP"""
+ user_info = plugins.TOTPUserInfo.create(auth_payload, METHOD_NAME)
+ auth_passcode = auth_payload.get('user').get('passcode')
+
+ credentials = self.credential_api.list_credentials_for_user(
+ user_info.user_id, type='totp')
+
+ valid_passcode = False
+ for credential in credentials:
+ try:
+ generated_passcode = _generate_totp_passcode(
+ credential['blob'])
+ if auth_passcode == generated_passcode:
+ valid_passcode = True
+ break
+ except (ValueError, KeyError):
+ LOG.debug('No TOTP match; credential id: %s, user_id: %s',
+ credential['id'], user_info.user_id)
+ except (TypeError):
+ LOG.debug('Base32 decode failed for TOTP credential %s',
+ credential['id'])
+
+ if not valid_passcode:
+ # authentication failed because of invalid username or passcode
+ msg = _('Invalid username or TOTP passcode')
+ raise exception.Unauthorized(msg)
+
+ auth_context['user_id'] = user_info.user_id
diff --git a/keystone-moon/keystone/catalog/__init__.py b/keystone-moon/keystone/catalog/__init__.py
index 8d4d1567..29f297d6 100644
--- a/keystone-moon/keystone/catalog/__init__.py
+++ b/keystone-moon/keystone/catalog/__init__.py
@@ -14,4 +14,3 @@
from keystone.catalog import controllers # noqa
from keystone.catalog.core import * # noqa
-from keystone.catalog import routers # noqa
diff --git a/keystone-moon/keystone/catalog/backends/sql.py b/keystone-moon/keystone/catalog/backends/sql.py
index fe69db58..bd92f107 100644
--- a/keystone-moon/keystone/catalog/backends/sql.py
+++ b/keystone-moon/keystone/catalog/backends/sql.py
@@ -21,8 +21,10 @@ from sqlalchemy.sql import true
from keystone import catalog
from keystone.catalog import core
+from keystone.common import driver_hints
from keystone.common import sql
from keystone import exception
+from keystone.i18n import _
CONF = cfg.CONF
@@ -43,13 +45,6 @@ class Region(sql.ModelBase, sql.DictBase):
# "left" and "right" and provide support for a nested set
# model.
parent_region_id = sql.Column(sql.String(255), nullable=True)
-
- # TODO(jaypipes): I think it's absolutely stupid that every single model
- # is required to have an "extra" column because of the
- # DictBase in the keystone.common.sql.core module. Forcing
- # tables to have pointless columns in the database is just
- # bad. Remove all of this extra JSON blob stuff.
- # See: https://bugs.launchpad.net/keystone/+bug/1265071
extra = sql.Column(sql.JsonBlob())
endpoints = sqlalchemy.orm.relationship("Endpoint", backref="region")
@@ -89,10 +84,10 @@ class Endpoint(sql.ModelBase, sql.DictBase):
class Catalog(catalog.CatalogDriverV8):
# Regions
def list_regions(self, hints):
- session = sql.get_session()
- regions = session.query(Region)
- regions = sql.filter_limit_query(Region, regions, hints)
- return [s.to_dict() for s in list(regions)]
+ with sql.session_for_read() as session:
+ regions = session.query(Region)
+ regions = sql.filter_limit_query(Region, regions, hints)
+ return [s.to_dict() for s in list(regions)]
def _get_region(self, session, region_id):
ref = session.query(Region).get(region_id)
@@ -141,12 +136,11 @@ class Catalog(catalog.CatalogDriverV8):
return False
def get_region(self, region_id):
- session = sql.get_session()
- return self._get_region(session, region_id).to_dict()
+ with sql.session_for_read() as session:
+ return self._get_region(session, region_id).to_dict()
def delete_region(self, region_id):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_region(session, region_id)
if self._has_endpoints(session, ref, ref):
raise exception.RegionDeletionError(region_id=region_id)
@@ -155,16 +149,14 @@ class Catalog(catalog.CatalogDriverV8):
@sql.handle_conflicts(conflict_type='region')
def create_region(self, region_ref):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
self._check_parent_region(session, region_ref)
region = Region.from_dict(region_ref)
session.add(region)
- return region.to_dict()
+ return region.to_dict()
def update_region(self, region_id, region_ref):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
self._check_parent_region(session, region_ref)
ref = self._get_region(session, region_id)
old_dict = ref.to_dict()
@@ -174,15 +166,15 @@ class Catalog(catalog.CatalogDriverV8):
for attr in Region.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_region, attr))
- return ref.to_dict()
+ return ref.to_dict()
# Services
- @sql.truncated
+ @driver_hints.truncated
def list_services(self, hints):
- session = sql.get_session()
- services = session.query(Service)
- services = sql.filter_limit_query(Service, services, hints)
- return [s.to_dict() for s in list(services)]
+ with sql.session_for_read() as session:
+ services = session.query(Service)
+ services = sql.filter_limit_query(Service, services, hints)
+ return [s.to_dict() for s in list(services)]
def _get_service(self, session, service_id):
ref = session.query(Service).get(service_id)
@@ -191,26 +183,23 @@ class Catalog(catalog.CatalogDriverV8):
return ref
def get_service(self, service_id):
- session = sql.get_session()
- return self._get_service(session, service_id).to_dict()
+ with sql.session_for_read() as session:
+ return self._get_service(session, service_id).to_dict()
def delete_service(self, service_id):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_service(session, service_id)
session.query(Endpoint).filter_by(service_id=service_id).delete()
session.delete(ref)
def create_service(self, service_id, service_ref):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
service = Service.from_dict(service_ref)
session.add(service)
- return service.to_dict()
+ return service.to_dict()
def update_service(self, service_id, service_ref):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_service(session, service_id)
old_dict = ref.to_dict()
old_dict.update(service_ref)
@@ -219,20 +208,17 @@ class Catalog(catalog.CatalogDriverV8):
if attr != 'id':
setattr(ref, attr, getattr(new_service, attr))
ref.extra = new_service.extra
- return ref.to_dict()
+ return ref.to_dict()
# Endpoints
def create_endpoint(self, endpoint_id, endpoint_ref):
- session = sql.get_session()
new_endpoint = Endpoint.from_dict(endpoint_ref)
-
- with session.begin():
+ with sql.session_for_write() as session:
session.add(new_endpoint)
return new_endpoint.to_dict()
def delete_endpoint(self, endpoint_id):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_endpoint(session, endpoint_id)
session.delete(ref)
@@ -243,20 +229,18 @@ class Catalog(catalog.CatalogDriverV8):
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
def get_endpoint(self, endpoint_id):
- session = sql.get_session()
- return self._get_endpoint(session, endpoint_id).to_dict()
+ with sql.session_for_read() as session:
+ return self._get_endpoint(session, endpoint_id).to_dict()
- @sql.truncated
+ @driver_hints.truncated
def list_endpoints(self, hints):
- session = sql.get_session()
- endpoints = session.query(Endpoint)
- endpoints = sql.filter_limit_query(Endpoint, endpoints, hints)
- return [e.to_dict() for e in list(endpoints)]
+ with sql.session_for_read() as session:
+ endpoints = session.query(Endpoint)
+ endpoints = sql.filter_limit_query(Endpoint, endpoints, hints)
+ return [e.to_dict() for e in list(endpoints)]
def update_endpoint(self, endpoint_id, endpoint_ref):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_endpoint(session, endpoint_id)
old_dict = ref.to_dict()
old_dict.update(endpoint_ref)
@@ -265,7 +249,7 @@ class Catalog(catalog.CatalogDriverV8):
if attr != 'id':
setattr(ref, attr, getattr(new_endpoint, attr))
ref.extra = new_endpoint.extra
- return ref.to_dict()
+ return ref.to_dict()
def get_catalog(self, user_id, tenant_id):
"""Retrieve and format the V2 service catalog.
@@ -287,44 +271,47 @@ class Catalog(catalog.CatalogDriverV8):
substitutions.update({'user_id': user_id})
silent_keyerror_failures = []
if tenant_id:
- substitutions.update({'tenant_id': tenant_id})
+ substitutions.update({
+ 'tenant_id': tenant_id,
+ 'project_id': tenant_id
+ })
else:
- silent_keyerror_failures = ['tenant_id']
-
- session = sql.get_session()
- endpoints = (session.query(Endpoint).
- options(sql.joinedload(Endpoint.service)).
- filter(Endpoint.enabled == true()).all())
-
- catalog = {}
-
- for endpoint in endpoints:
- if not endpoint.service['enabled']:
- continue
- try:
- formatted_url = core.format_url(
- endpoint['url'], substitutions,
- silent_keyerror_failures=silent_keyerror_failures)
- if formatted_url is not None:
- url = formatted_url
- else:
+ silent_keyerror_failures = ['tenant_id', 'project_id', ]
+
+ with sql.session_for_read() as session:
+ endpoints = (session.query(Endpoint).
+ options(sql.joinedload(Endpoint.service)).
+ filter(Endpoint.enabled == true()).all())
+
+ catalog = {}
+
+ for endpoint in endpoints:
+ if not endpoint.service['enabled']:
continue
- except exception.MalformedEndpoint:
- continue # this failure is already logged in format_url()
-
- region = endpoint['region_id']
- service_type = endpoint.service['type']
- default_service = {
- 'id': endpoint['id'],
- 'name': endpoint.service.extra.get('name', ''),
- 'publicURL': ''
- }
- catalog.setdefault(region, {})
- catalog[region].setdefault(service_type, default_service)
- interface_url = '%sURL' % endpoint['interface']
- catalog[region][service_type][interface_url] = url
-
- return catalog
+ try:
+ formatted_url = core.format_url(
+ endpoint['url'], substitutions,
+ silent_keyerror_failures=silent_keyerror_failures)
+ if formatted_url is not None:
+ url = formatted_url
+ else:
+ continue
+ except exception.MalformedEndpoint:
+ continue # this failure is already logged in format_url()
+
+ region = endpoint['region_id']
+ service_type = endpoint.service['type']
+ default_service = {
+ 'id': endpoint['id'],
+ 'name': endpoint.service.extra.get('name', ''),
+ 'publicURL': ''
+ }
+ catalog.setdefault(region, {})
+ catalog[region].setdefault(service_type, default_service)
+ interface_url = '%sURL' % endpoint['interface']
+ catalog[region][service_type][interface_url] = url
+
+ return catalog
def get_v3_catalog(self, user_id, tenant_id):
"""Retrieve and format the current V3 service catalog.
@@ -344,40 +331,242 @@ class Catalog(catalog.CatalogDriverV8):
d.update({'user_id': user_id})
silent_keyerror_failures = []
if tenant_id:
- d.update({'tenant_id': tenant_id})
+ d.update({
+ 'tenant_id': tenant_id,
+ 'project_id': tenant_id,
+ })
else:
- silent_keyerror_failures = ['tenant_id']
-
- session = sql.get_session()
- services = (session.query(Service).filter(Service.enabled == true()).
- options(sql.joinedload(Service.endpoints)).
- all())
-
- def make_v3_endpoints(endpoints):
- for endpoint in (ep.to_dict() for ep in endpoints if ep.enabled):
- del endpoint['service_id']
- del endpoint['legacy_endpoint_id']
- del endpoint['enabled']
- endpoint['region'] = endpoint['region_id']
- try:
- formatted_url = core.format_url(
- endpoint['url'], d,
- silent_keyerror_failures=silent_keyerror_failures)
- if formatted_url:
- endpoint['url'] = formatted_url
- else:
+ silent_keyerror_failures = ['tenant_id', 'project_id', ]
+
+ with sql.session_for_read() as session:
+ services = (session.query(Service).filter(
+ Service.enabled == true()).options(
+ sql.joinedload(Service.endpoints)).all())
+
+ def make_v3_endpoints(endpoints):
+ for endpoint in (ep.to_dict()
+ for ep in endpoints if ep.enabled):
+ del endpoint['service_id']
+ del endpoint['legacy_endpoint_id']
+ del endpoint['enabled']
+ endpoint['region'] = endpoint['region_id']
+ try:
+ formatted_url = core.format_url(
+ endpoint['url'], d,
+ silent_keyerror_failures=silent_keyerror_failures)
+ if formatted_url:
+ endpoint['url'] = formatted_url
+ else:
+ continue
+ except exception.MalformedEndpoint:
+ # this failure is already logged in format_url()
continue
- except exception.MalformedEndpoint:
- continue # this failure is already logged in format_url()
- yield endpoint
+ yield endpoint
+
+ # TODO(davechen): If there is service with no endpoints, we should
+ # skip the service instead of keeping it in the catalog,
+ # see bug #1436704.
+ def make_v3_service(svc):
+ eps = list(make_v3_endpoints(svc.endpoints))
+ service = {'endpoints': eps, 'id': svc.id, 'type': svc.type}
+ service['name'] = svc.extra.get('name', '')
+ return service
+
+ return [make_v3_service(svc) for svc in services]
+
+ @sql.handle_conflicts(conflict_type='project_endpoint')
+ def add_endpoint_to_project(self, endpoint_id, project_id):
+ with sql.session_for_write() as session:
+ endpoint_filter_ref = ProjectEndpoint(endpoint_id=endpoint_id,
+ project_id=project_id)
+ session.add(endpoint_filter_ref)
+
+ def _get_project_endpoint_ref(self, session, endpoint_id, project_id):
+ endpoint_filter_ref = session.query(ProjectEndpoint).get(
+ (endpoint_id, project_id))
+ if endpoint_filter_ref is None:
+ msg = _('Endpoint %(endpoint_id)s not found in project '
+ '%(project_id)s') % {'endpoint_id': endpoint_id,
+ 'project_id': project_id}
+ raise exception.NotFound(msg)
+ return endpoint_filter_ref
+
+ def check_endpoint_in_project(self, endpoint_id, project_id):
+ with sql.session_for_read() as session:
+ self._get_project_endpoint_ref(session, endpoint_id, project_id)
+
+ def remove_endpoint_from_project(self, endpoint_id, project_id):
+ with sql.session_for_write() as session:
+ endpoint_filter_ref = self._get_project_endpoint_ref(
+ session, endpoint_id, project_id)
+ session.delete(endpoint_filter_ref)
+
+ def list_endpoints_for_project(self, project_id):
+ with sql.session_for_read() as session:
+ query = session.query(ProjectEndpoint)
+ query = query.filter_by(project_id=project_id)
+ endpoint_filter_refs = query.all()
+ return [ref.to_dict() for ref in endpoint_filter_refs]
+
+ def list_projects_for_endpoint(self, endpoint_id):
+ with sql.session_for_read() as session:
+ query = session.query(ProjectEndpoint)
+ query = query.filter_by(endpoint_id=endpoint_id)
+ endpoint_filter_refs = query.all()
+ return [ref.to_dict() for ref in endpoint_filter_refs]
+
+ def delete_association_by_endpoint(self, endpoint_id):
+ with sql.session_for_write() as session:
+ query = session.query(ProjectEndpoint)
+ query = query.filter_by(endpoint_id=endpoint_id)
+ query.delete(synchronize_session=False)
+
+ def delete_association_by_project(self, project_id):
+ with sql.session_for_write() as session:
+ query = session.query(ProjectEndpoint)
+ query = query.filter_by(project_id=project_id)
+ query.delete(synchronize_session=False)
+
+ def create_endpoint_group(self, endpoint_group_id, endpoint_group):
+ with sql.session_for_write() as session:
+ endpoint_group_ref = EndpointGroup.from_dict(endpoint_group)
+ session.add(endpoint_group_ref)
+ return endpoint_group_ref.to_dict()
+
+ def _get_endpoint_group(self, session, endpoint_group_id):
+ endpoint_group_ref = session.query(EndpointGroup).get(
+ endpoint_group_id)
+ if endpoint_group_ref is None:
+ raise exception.EndpointGroupNotFound(
+ endpoint_group_id=endpoint_group_id)
+ return endpoint_group_ref
+
+ def get_endpoint_group(self, endpoint_group_id):
+ with sql.session_for_read() as session:
+ endpoint_group_ref = self._get_endpoint_group(session,
+ endpoint_group_id)
+ return endpoint_group_ref.to_dict()
+
+ def update_endpoint_group(self, endpoint_group_id, endpoint_group):
+ with sql.session_for_write() as session:
+ endpoint_group_ref = self._get_endpoint_group(session,
+ endpoint_group_id)
+ old_endpoint_group = endpoint_group_ref.to_dict()
+ old_endpoint_group.update(endpoint_group)
+ new_endpoint_group = EndpointGroup.from_dict(old_endpoint_group)
+ for attr in EndpointGroup.mutable_attributes:
+ setattr(endpoint_group_ref, attr,
+ getattr(new_endpoint_group, attr))
+ return endpoint_group_ref.to_dict()
+
+ def delete_endpoint_group(self, endpoint_group_id):
+ with sql.session_for_write() as session:
+ endpoint_group_ref = self._get_endpoint_group(session,
+ endpoint_group_id)
+ self._delete_endpoint_group_association_by_endpoint_group(
+ session, endpoint_group_id)
+ session.delete(endpoint_group_ref)
+
+ def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
+ with sql.session_for_read() as session:
+ ref = self._get_endpoint_group_in_project(session,
+ endpoint_group_id,
+ project_id)
+ return ref.to_dict()
+
+ @sql.handle_conflicts(conflict_type='project_endpoint_group')
+ def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+ with sql.session_for_write() as session:
+ # Create a new Project Endpoint group entity
+ endpoint_group_project_ref = ProjectEndpointGroupMembership(
+ endpoint_group_id=endpoint_group_id, project_id=project_id)
+ session.add(endpoint_group_project_ref)
+
+ def _get_endpoint_group_in_project(self, session,
+ endpoint_group_id, project_id):
+ endpoint_group_project_ref = session.query(
+ ProjectEndpointGroupMembership).get((endpoint_group_id,
+ project_id))
+ if endpoint_group_project_ref is None:
+ msg = _('Endpoint Group Project Association not found')
+ raise exception.NotFound(msg)
+ else:
+ return endpoint_group_project_ref
+
+ def list_endpoint_groups(self):
+ with sql.session_for_read() as session:
+ query = session.query(EndpointGroup)
+ endpoint_group_refs = query.all()
+ return [e.to_dict() for e in endpoint_group_refs]
+
+ def list_endpoint_groups_for_project(self, project_id):
+ with sql.session_for_read() as session:
+ query = session.query(ProjectEndpointGroupMembership)
+ query = query.filter_by(project_id=project_id)
+ endpoint_group_refs = query.all()
+ return [ref.to_dict() for ref in endpoint_group_refs]
+
+ def remove_endpoint_group_from_project(self, endpoint_group_id,
+ project_id):
+ with sql.session_for_write() as session:
+ endpoint_group_project_ref = self._get_endpoint_group_in_project(
+ session, endpoint_group_id, project_id)
+ session.delete(endpoint_group_project_ref)
+
+ def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
+ with sql.session_for_read() as session:
+ query = session.query(ProjectEndpointGroupMembership)
+ query = query.filter_by(endpoint_group_id=endpoint_group_id)
+ endpoint_group_refs = query.all()
+ return [ref.to_dict() for ref in endpoint_group_refs]
+
+ def _delete_endpoint_group_association_by_endpoint_group(
+ self, session, endpoint_group_id):
+ query = session.query(ProjectEndpointGroupMembership)
+ query = query.filter_by(endpoint_group_id=endpoint_group_id)
+ query.delete()
+
+ def delete_endpoint_group_association_by_project(self, project_id):
+ with sql.session_for_write() as session:
+ query = session.query(ProjectEndpointGroupMembership)
+ query = query.filter_by(project_id=project_id)
+ query.delete()
+
+
+class ProjectEndpoint(sql.ModelBase, sql.ModelDictMixin):
+ """project-endpoint relationship table."""
+
+ __tablename__ = 'project_endpoint'
+ attributes = ['endpoint_id', 'project_id']
+ endpoint_id = sql.Column(sql.String(64),
+ primary_key=True,
+ nullable=False)
+ project_id = sql.Column(sql.String(64),
+ primary_key=True,
+ nullable=False)
+
- # TODO(davechen): If there is service with no endpoints, we should skip
- # the service instead of keeping it in the catalog, see bug #1436704.
- def make_v3_service(svc):
- eps = list(make_v3_endpoints(svc.endpoints))
- service = {'endpoints': eps, 'id': svc.id, 'type': svc.type}
- service['name'] = svc.extra.get('name', '')
- return service
+class EndpointGroup(sql.ModelBase, sql.ModelDictMixin):
+ """Endpoint Groups table."""
- return [make_v3_service(svc) for svc in services]
+ __tablename__ = 'endpoint_group'
+ attributes = ['id', 'name', 'description', 'filters']
+ mutable_attributes = frozenset(['name', 'description', 'filters'])
+ id = sql.Column(sql.String(64), primary_key=True)
+ name = sql.Column(sql.String(255), nullable=False)
+ description = sql.Column(sql.Text, nullable=True)
+ filters = sql.Column(sql.JsonBlob(), nullable=False)
+
+
+class ProjectEndpointGroupMembership(sql.ModelBase, sql.ModelDictMixin):
+ """Project to Endpoint group relationship table."""
+
+ __tablename__ = 'project_endpoint_group'
+ attributes = ['endpoint_group_id', 'project_id']
+ endpoint_group_id = sql.Column(sql.String(64),
+ sql.ForeignKey('endpoint_group.id'),
+ nullable=False)
+ project_id = sql.Column(sql.String(64), nullable=False)
+ __table_args__ = (sql.PrimaryKeyConstraint('endpoint_group_id',
+ 'project_id'),)
diff --git a/keystone-moon/keystone/catalog/backends/templated.py b/keystone-moon/keystone/catalog/backends/templated.py
index 31d8b9e0..2e80fd32 100644
--- a/keystone-moon/keystone/catalog/backends/templated.py
+++ b/keystone-moon/keystone/catalog/backends/templated.py
@@ -1,4 +1,4 @@
-# Copyright 2012 OpenStack Foundationc
+# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -17,8 +17,8 @@ import os.path
from oslo_config import cfg
from oslo_log import log
+import six
-from keystone.catalog.backends import kvs
from keystone.catalog import core
from keystone import exception
from keystone.i18n import _LC
@@ -56,7 +56,7 @@ def parse_templates(template_lines):
return o
-class Catalog(kvs.Catalog):
+class Catalog(core.Driver):
"""A backend that generates endpoints for the Catalog based on templates.
It is usually configured via config entries that look like:
@@ -100,11 +100,101 @@ class Catalog(kvs.Catalog):
def _load_templates(self, template_file):
try:
- self.templates = parse_templates(open(template_file))
+ with open(template_file) as f:
+ self.templates = parse_templates(f)
except IOError:
LOG.critical(_LC('Unable to open template file %s'), template_file)
raise
+ # region crud
+
+ def create_region(self, region_ref):
+ raise exception.NotImplemented()
+
+ def list_regions(self, hints):
+ return [{'id': region_id, 'description': '', 'parent_region_id': ''}
+ for region_id in self.templates]
+
+ def get_region(self, region_id):
+ if region_id in self.templates:
+ return {'id': region_id, 'description': '', 'parent_region_id': ''}
+ raise exception.RegionNotFound(region_id=region_id)
+
+ def update_region(self, region_id, region_ref):
+ raise exception.NotImplemented()
+
+ def delete_region(self, region_id):
+ raise exception.NotImplemented()
+
+ # service crud
+
+ def create_service(self, service_id, service_ref):
+ raise exception.NotImplemented()
+
+ def _list_services(self, hints):
+ for region_ref in six.itervalues(self.templates):
+ for service_type, service_ref in six.iteritems(region_ref):
+ yield {
+ 'id': service_type,
+ 'enabled': True,
+ 'name': service_ref.get('name', ''),
+ 'description': service_ref.get('description', ''),
+ 'type': service_type,
+ }
+
+ def list_services(self, hints):
+ return list(self._list_services(hints=None))
+
+ def get_service(self, service_id):
+ for service in self._list_services(hints=None):
+ if service['id'] == service_id:
+ return service
+ raise exception.ServiceNotFound(service_id=service_id)
+
+ def update_service(self, service_id, service_ref):
+ raise exception.NotImplemented()
+
+ def delete_service(self, service_id):
+ raise exception.NotImplemented()
+
+ # endpoint crud
+
+ def create_endpoint(self, endpoint_id, endpoint_ref):
+ raise exception.NotImplemented()
+
+ def _list_endpoints(self):
+ for region_id, region_ref in six.iteritems(self.templates):
+ for service_type, service_ref in six.iteritems(region_ref):
+ for key in service_ref:
+ if key.endswith('URL'):
+ interface = key[:-3]
+ endpoint_id = ('%s-%s-%s' %
+ (region_id, service_type, interface))
+ yield {
+ 'id': endpoint_id,
+ 'service_id': service_type,
+ 'interface': interface,
+ 'url': service_ref[key],
+ 'legacy_endpoint_id': None,
+ 'region_id': region_id,
+ 'enabled': True,
+ }
+
+ def list_endpoints(self, hints):
+ return list(self._list_endpoints())
+
+ def get_endpoint(self, endpoint_id):
+ for endpoint in self._list_endpoints():
+ if endpoint['id'] == endpoint_id:
+ return endpoint
+ raise exception.EndpointNotFound(endpoint_id=endpoint_id)
+
+ def update_endpoint(self, endpoint_id, endpoint_ref):
+ raise exception.NotImplemented()
+
+ def delete_endpoint(self, endpoint_id):
+ raise exception.NotImplemented()
+
def get_catalog(self, user_id, tenant_id):
"""Retrieve and format the V2 service catalog.
@@ -124,9 +214,12 @@ class Catalog(kvs.Catalog):
substitutions.update({'user_id': user_id})
silent_keyerror_failures = []
if tenant_id:
- substitutions.update({'tenant_id': tenant_id})
+ substitutions.update({
+ 'tenant_id': tenant_id,
+ 'project_id': tenant_id,
+ })
else:
- silent_keyerror_failures = ['tenant_id']
+ silent_keyerror_failures = ['tenant_id', 'project_id', ]
catalog = {}
# TODO(davechen): If there is service with no endpoints, we should
@@ -148,3 +241,58 @@ class Catalog(kvs.Catalog):
catalog[region][service] = service_data
return catalog
+
+ def add_endpoint_to_project(self, endpoint_id, project_id):
+ raise exception.NotImplemented()
+
+ def remove_endpoint_from_project(self, endpoint_id, project_id):
+ raise exception.NotImplemented()
+
+ def check_endpoint_in_project(self, endpoint_id, project_id):
+ raise exception.NotImplemented()
+
+ def list_endpoints_for_project(self, project_id):
+ raise exception.NotImplemented()
+
+ def list_projects_for_endpoint(self, endpoint_id):
+ raise exception.NotImplemented()
+
+ def delete_association_by_endpoint(self, endpoint_id):
+ raise exception.NotImplemented()
+
+ def delete_association_by_project(self, project_id):
+ raise exception.NotImplemented()
+
+ def create_endpoint_group(self, endpoint_group):
+ raise exception.NotImplemented()
+
+ def get_endpoint_group(self, endpoint_group_id):
+ raise exception.NotImplemented()
+
+ def update_endpoint_group(self, endpoint_group_id, endpoint_group):
+ raise exception.NotImplemented()
+
+ def delete_endpoint_group(self, endpoint_group_id):
+ raise exception.NotImplemented()
+
+ def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+ raise exception.NotImplemented()
+
+ def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
+ raise exception.NotImplemented()
+
+ def list_endpoint_groups(self):
+ raise exception.NotImplemented()
+
+ def list_endpoint_groups_for_project(self, project_id):
+ raise exception.NotImplemented()
+
+ def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
+ raise exception.NotImplemented()
+
+ def remove_endpoint_group_from_project(self, endpoint_group_id,
+ project_id):
+ raise exception.NotImplemented()
+
+ def delete_endpoint_group_association_by_project(self, project_id):
+ raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/catalog/controllers.py b/keystone-moon/keystone/catalog/controllers.py
index e14b268a..fc64c922 100644
--- a/keystone-moon/keystone/catalog/controllers.py
+++ b/keystone-moon/keystone/catalog/controllers.py
@@ -15,6 +15,8 @@
import uuid
+import six
+
from keystone.catalog import core
from keystone.catalog import schema
from keystone.common import controller
@@ -24,6 +26,7 @@ from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _
from keystone import notifications
+from keystone import resource
INTERFACES = ['public', 'internal', 'admin']
@@ -379,3 +382,234 @@ class EndpointV3(controller.V3Controller):
def delete_endpoint(self, context, endpoint_id):
initiator = notifications._get_request_audit_info(context)
return self.catalog_api.delete_endpoint(endpoint_id, initiator)
+
+
+@dependency.requires('catalog_api', 'resource_api')
+class EndpointFilterV3Controller(controller.V3Controller):
+
+ def __init__(self):
+ super(EndpointFilterV3Controller, self).__init__()
+ notifications.register_event_callback(
+ notifications.ACTIONS.deleted, 'project',
+ self._on_project_or_endpoint_delete)
+ notifications.register_event_callback(
+ notifications.ACTIONS.deleted, 'endpoint',
+ self._on_project_or_endpoint_delete)
+
+ def _on_project_or_endpoint_delete(self, service, resource_type, operation,
+ payload):
+ project_or_endpoint_id = payload['resource_info']
+ if resource_type == 'project':
+ self.catalog_api.delete_association_by_project(
+ project_or_endpoint_id)
+ else:
+ self.catalog_api.delete_association_by_endpoint(
+ project_or_endpoint_id)
+
+ @controller.protected()
+ def add_endpoint_to_project(self, context, project_id, endpoint_id):
+ """Establishes an association between an endpoint and a project."""
+ # NOTE(gyee): we just need to make sure endpoint and project exist
+ # first. We don't really care whether if project is disabled.
+ # The relationship can still be established even with a disabled
+ # project as there are no security implications.
+ self.catalog_api.get_endpoint(endpoint_id)
+ self.resource_api.get_project(project_id)
+ self.catalog_api.add_endpoint_to_project(endpoint_id,
+ project_id)
+
+ @controller.protected()
+ def check_endpoint_in_project(self, context, project_id, endpoint_id):
+ """Verifies endpoint is currently associated with given project."""
+ self.catalog_api.get_endpoint(endpoint_id)
+ self.resource_api.get_project(project_id)
+ self.catalog_api.check_endpoint_in_project(endpoint_id,
+ project_id)
+
+ @controller.protected()
+ def list_endpoints_for_project(self, context, project_id):
+ """List all endpoints currently associated with a given project."""
+ self.resource_api.get_project(project_id)
+ filtered_endpoints = self.catalog_api.list_endpoints_for_project(
+ project_id)
+
+ return EndpointV3.wrap_collection(
+ context, [v for v in six.itervalues(filtered_endpoints)])
+
+ @controller.protected()
+ def remove_endpoint_from_project(self, context, project_id, endpoint_id):
+ """Remove the endpoint from the association with given project."""
+ self.catalog_api.remove_endpoint_from_project(endpoint_id,
+ project_id)
+
+ @controller.protected()
+ def list_projects_for_endpoint(self, context, endpoint_id):
+ """Return a list of projects associated with the endpoint."""
+ self.catalog_api.get_endpoint(endpoint_id)
+ refs = self.catalog_api.list_projects_for_endpoint(endpoint_id)
+
+ projects = [self.resource_api.get_project(
+ ref['project_id']) for ref in refs]
+ return resource.controllers.ProjectV3.wrap_collection(context,
+ projects)
+
+
+@dependency.requires('catalog_api', 'resource_api')
+class EndpointGroupV3Controller(controller.V3Controller):
+ collection_name = 'endpoint_groups'
+ member_name = 'endpoint_group'
+
+ VALID_FILTER_KEYS = ['service_id', 'region_id', 'interface']
+
+ def __init__(self):
+ super(EndpointGroupV3Controller, self).__init__()
+
+ @classmethod
+ def base_url(cls, context, path=None):
+ """Construct a path and pass it to V3Controller.base_url method."""
+ path = '/OS-EP-FILTER/' + cls.collection_name
+ return super(EndpointGroupV3Controller, cls).base_url(context,
+ path=path)
+
+ @controller.protected()
+ @validation.validated(schema.endpoint_group_create, 'endpoint_group')
+ def create_endpoint_group(self, context, endpoint_group):
+ """Creates an Endpoint Group with the associated filters."""
+ ref = self._assign_unique_id(self._normalize_dict(endpoint_group))
+ self._require_attribute(ref, 'filters')
+ self._require_valid_filter(ref)
+ ref = self.catalog_api.create_endpoint_group(ref['id'], ref)
+ return EndpointGroupV3Controller.wrap_member(context, ref)
+
+ def _require_valid_filter(self, endpoint_group):
+ filters = endpoint_group.get('filters')
+ for key in six.iterkeys(filters):
+ if key not in self.VALID_FILTER_KEYS:
+ raise exception.ValidationError(
+ attribute=self._valid_filter_keys(),
+ target='endpoint_group')
+
+ def _valid_filter_keys(self):
+ return ' or '.join(self.VALID_FILTER_KEYS)
+
+ @controller.protected()
+ def get_endpoint_group(self, context, endpoint_group_id):
+ """Retrieve the endpoint group associated with the id if exists."""
+ ref = self.catalog_api.get_endpoint_group(endpoint_group_id)
+ return EndpointGroupV3Controller.wrap_member(
+ context, ref)
+
+ @controller.protected()
+ @validation.validated(schema.endpoint_group_update, 'endpoint_group')
+ def update_endpoint_group(self, context, endpoint_group_id,
+ endpoint_group):
+ """Update fixed values and/or extend the filters."""
+ if 'filters' in endpoint_group:
+ self._require_valid_filter(endpoint_group)
+ ref = self.catalog_api.update_endpoint_group(endpoint_group_id,
+ endpoint_group)
+ return EndpointGroupV3Controller.wrap_member(
+ context, ref)
+
+ @controller.protected()
+ def delete_endpoint_group(self, context, endpoint_group_id):
+ """Delete endpoint_group."""
+ self.catalog_api.delete_endpoint_group(endpoint_group_id)
+
+ @controller.protected()
+ def list_endpoint_groups(self, context):
+ """List all endpoint groups."""
+ refs = self.catalog_api.list_endpoint_groups()
+ return EndpointGroupV3Controller.wrap_collection(
+ context, refs)
+
+ @controller.protected()
+ def list_endpoint_groups_for_project(self, context, project_id):
+ """List all endpoint groups associated with a given project."""
+ return EndpointGroupV3Controller.wrap_collection(
+ context,
+ self.catalog_api.get_endpoint_groups_for_project(project_id))
+
+ @controller.protected()
+ def list_projects_associated_with_endpoint_group(self,
+ context,
+ endpoint_group_id):
+ """List all projects associated with endpoint group."""
+ endpoint_group_refs = (self.catalog_api.
+ list_projects_associated_with_endpoint_group(
+ endpoint_group_id))
+ projects = []
+ for endpoint_group_ref in endpoint_group_refs:
+ project = self.resource_api.get_project(
+ endpoint_group_ref['project_id'])
+ if project:
+ projects.append(project)
+ return resource.controllers.ProjectV3.wrap_collection(context,
+ projects)
+
+ @controller.protected()
+ def list_endpoints_associated_with_endpoint_group(self,
+ context,
+ endpoint_group_id):
+ """List all the endpoints filtered by a specific endpoint group."""
+ filtered_endpoints = (self.catalog_api.
+ get_endpoints_filtered_by_endpoint_group(
+ endpoint_group_id))
+ return EndpointV3.wrap_collection(context, filtered_endpoints)
+
+
+@dependency.requires('catalog_api', 'resource_api')
+class ProjectEndpointGroupV3Controller(controller.V3Controller):
+ collection_name = 'project_endpoint_groups'
+ member_name = 'project_endpoint_group'
+
+ def __init__(self):
+ super(ProjectEndpointGroupV3Controller, self).__init__()
+ notifications.register_event_callback(
+ notifications.ACTIONS.deleted, 'project',
+ self._on_project_delete)
+
+ def _on_project_delete(self, service, resource_type,
+ operation, payload):
+ project_id = payload['resource_info']
+ (self.catalog_api.
+ delete_endpoint_group_association_by_project(
+ project_id))
+
+ @controller.protected()
+ def get_endpoint_group_in_project(self, context, endpoint_group_id,
+ project_id):
+ """Retrieve the endpoint group associated with the id if exists."""
+ self.resource_api.get_project(project_id)
+ self.catalog_api.get_endpoint_group(endpoint_group_id)
+ ref = self.catalog_api.get_endpoint_group_in_project(
+ endpoint_group_id, project_id)
+ return ProjectEndpointGroupV3Controller.wrap_member(
+ context, ref)
+
+ @controller.protected()
+ def add_endpoint_group_to_project(self, context, endpoint_group_id,
+ project_id):
+ """Creates an association between an endpoint group and project."""
+ self.resource_api.get_project(project_id)
+ self.catalog_api.get_endpoint_group(endpoint_group_id)
+ self.catalog_api.add_endpoint_group_to_project(
+ endpoint_group_id, project_id)
+
+ @controller.protected()
+ def remove_endpoint_group_from_project(self, context, endpoint_group_id,
+ project_id):
+ """Remove the endpoint group from associated project."""
+ self.resource_api.get_project(project_id)
+ self.catalog_api.get_endpoint_group(endpoint_group_id)
+ self.catalog_api.remove_endpoint_group_from_project(
+ endpoint_group_id, project_id)
+
+ @classmethod
+ def _add_self_referential_link(cls, context, ref):
+ url = ('/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s'
+ '/projects/%(project_id)s' % {
+ 'endpoint_group_id': ref['endpoint_group_id'],
+ 'project_id': ref['project_id']})
+ ref.setdefault('links', {})
+ ref['links']['self'] = url
diff --git a/keystone-moon/keystone/catalog/core.py b/keystone-moon/keystone/catalog/core.py
index 8bb72619..384a9b2b 100644
--- a/keystone-moon/keystone/catalog/core.py
+++ b/keystone-moon/keystone/catalog/core.py
@@ -18,6 +18,7 @@
import abc
import itertools
+from oslo_cache import core as oslo_cache
from oslo_config import cfg
from oslo_log import log
import six
@@ -35,12 +36,24 @@ from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-MEMOIZE = cache.get_memoization_decorator(section='catalog')
WHITELISTED_PROPERTIES = [
- 'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
+ 'tenant_id', 'project_id', 'user_id',
+ 'public_bind_host', 'admin_bind_host',
'compute_host', 'admin_port', 'public_port',
'public_endpoint', 'admin_endpoint', ]
+# This is a general cache region for catalog administration (CRUD operations).
+MEMOIZE = cache.get_memoization_decorator(group='catalog')
+
+# This builds a discrete cache region dedicated to complete service catalogs
+# computed for a given user + project pair. Any write operation to create,
+# modify or delete elements of the service catalog should invalidate this
+# entire cache region.
+COMPUTED_CATALOG_REGION = oslo_cache.create_region()
+MEMOIZE_COMPUTED_CATALOG = cache.get_memoization_decorator(
+ group='catalog',
+ region=COMPUTED_CATALOG_REGION)
+
def format_url(url, substitutions, silent_keyerror_failures=None):
"""Formats a user-defined URL with the given substitutions.
@@ -52,7 +65,6 @@ def format_url(url, substitutions, silent_keyerror_failures=None):
:returns: a formatted URL
"""
-
substitutions = utils.WhiteListedItemFilter(
WHITELISTED_PROPERTIES,
substitutions)
@@ -108,6 +120,7 @@ def check_endpoint_url(url):
@dependency.provider('catalog_api')
+@dependency.requires('resource_api')
class Manager(manager.Manager):
"""Default pivot point for the Catalog backend.
@@ -129,7 +142,8 @@ class Manager(manager.Manager):
# Check duplicate ID
try:
self.get_region(region_ref['id'])
- except exception.RegionNotFound:
+ except exception.RegionNotFound: # nosec
+ # A region with the same id doesn't exist already, good.
pass
else:
msg = _('Duplicate ID, %s.') % region_ref['id']
@@ -148,6 +162,7 @@ class Manager(manager.Manager):
raise exception.RegionNotFound(region_id=parent_region_id)
notifications.Audit.created(self._REGION, ret['id'], initiator)
+ COMPUTED_CATALOG_REGION.invalidate()
return ret
@MEMOIZE
@@ -166,6 +181,7 @@ class Manager(manager.Manager):
ref = self.driver.update_region(region_id, region_ref)
notifications.Audit.updated(self._REGION, region_id, initiator)
self.get_region.invalidate(self, region_id)
+ COMPUTED_CATALOG_REGION.invalidate()
return ref
def delete_region(self, region_id, initiator=None):
@@ -173,6 +189,7 @@ class Manager(manager.Manager):
ret = self.driver.delete_region(region_id)
notifications.Audit.deleted(self._REGION, region_id, initiator)
self.get_region.invalidate(self, region_id)
+ COMPUTED_CATALOG_REGION.invalidate()
return ret
except exception.NotFound:
raise exception.RegionNotFound(region_id=region_id)
@@ -186,6 +203,7 @@ class Manager(manager.Manager):
service_ref.setdefault('name', '')
ref = self.driver.create_service(service_id, service_ref)
notifications.Audit.created(self._SERVICE, service_id, initiator)
+ COMPUTED_CATALOG_REGION.invalidate()
return ref
@MEMOIZE
@@ -199,6 +217,7 @@ class Manager(manager.Manager):
ref = self.driver.update_service(service_id, service_ref)
notifications.Audit.updated(self._SERVICE, service_id, initiator)
self.get_service.invalidate(self, service_id)
+ COMPUTED_CATALOG_REGION.invalidate()
return ref
def delete_service(self, service_id, initiator=None):
@@ -210,6 +229,7 @@ class Manager(manager.Manager):
for endpoint in endpoints:
if endpoint['service_id'] == service_id:
self.get_endpoint.invalidate(self, endpoint['id'])
+ COMPUTED_CATALOG_REGION.invalidate()
return ret
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
@@ -240,6 +260,7 @@ class Manager(manager.Manager):
ref = self.driver.create_endpoint(endpoint_id, endpoint_ref)
notifications.Audit.created(self._ENDPOINT, endpoint_id, initiator)
+ COMPUTED_CATALOG_REGION.invalidate()
return ref
def update_endpoint(self, endpoint_id, endpoint_ref, initiator=None):
@@ -248,6 +269,7 @@ class Manager(manager.Manager):
ref = self.driver.update_endpoint(endpoint_id, endpoint_ref)
notifications.Audit.updated(self._ENDPOINT, endpoint_id, initiator)
self.get_endpoint.invalidate(self, endpoint_id)
+ COMPUTED_CATALOG_REGION.invalidate()
return ref
def delete_endpoint(self, endpoint_id, initiator=None):
@@ -255,6 +277,7 @@ class Manager(manager.Manager):
ret = self.driver.delete_endpoint(endpoint_id)
notifications.Audit.deleted(self._ENDPOINT, endpoint_id, initiator)
self.get_endpoint.invalidate(self, endpoint_id)
+ COMPUTED_CATALOG_REGION.invalidate()
return ret
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
@@ -270,12 +293,96 @@ class Manager(manager.Manager):
def list_endpoints(self, hints=None):
return self.driver.list_endpoints(hints or driver_hints.Hints())
+ @MEMOIZE_COMPUTED_CATALOG
def get_catalog(self, user_id, tenant_id):
try:
return self.driver.get_catalog(user_id, tenant_id)
except exception.NotFound:
raise exception.NotFound('Catalog not found for user and tenant')
+ @MEMOIZE_COMPUTED_CATALOG
+ def get_v3_catalog(self, user_id, tenant_id):
+ return self.driver.get_v3_catalog(user_id, tenant_id)
+
+ def add_endpoint_to_project(self, endpoint_id, project_id):
+ self.driver.add_endpoint_to_project(endpoint_id, project_id)
+ COMPUTED_CATALOG_REGION.invalidate()
+
+ def remove_endpoint_from_project(self, endpoint_id, project_id):
+ self.driver.remove_endpoint_from_project(endpoint_id, project_id)
+ COMPUTED_CATALOG_REGION.invalidate()
+
+ def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+ self.driver.add_endpoint_group_to_project(
+ endpoint_group_id, project_id)
+ COMPUTED_CATALOG_REGION.invalidate()
+
+ def remove_endpoint_group_from_project(self, endpoint_group_id,
+ project_id):
+ self.driver.remove_endpoint_group_from_project(
+ endpoint_group_id, project_id)
+ COMPUTED_CATALOG_REGION.invalidate()
+
+ def get_endpoint_groups_for_project(self, project_id):
+ # recover the project endpoint group memberships and for each
+ # membership recover the endpoint group
+ self.resource_api.get_project(project_id)
+ try:
+ refs = self.list_endpoint_groups_for_project(project_id)
+ endpoint_groups = [self.get_endpoint_group(
+ ref['endpoint_group_id']) for ref in refs]
+ return endpoint_groups
+ except exception.EndpointGroupNotFound:
+ return []
+
+ def get_endpoints_filtered_by_endpoint_group(self, endpoint_group_id):
+ endpoints = self.list_endpoints()
+ filters = self.get_endpoint_group(endpoint_group_id)['filters']
+ filtered_endpoints = []
+
+ for endpoint in endpoints:
+ is_candidate = True
+ for key, value in filters.items():
+ if endpoint[key] != value:
+ is_candidate = False
+ break
+ if is_candidate:
+ filtered_endpoints.append(endpoint)
+ return filtered_endpoints
+
+ def list_endpoints_for_project(self, project_id):
+ """List all endpoints associated with a project.
+
+ :param project_id: project identifier to check
+ :type project_id: string
+ :returns: a list of endpoint ids or an empty list.
+
+ """
+ refs = self.driver.list_endpoints_for_project(project_id)
+ filtered_endpoints = {}
+ for ref in refs:
+ try:
+ endpoint = self.get_endpoint(ref['endpoint_id'])
+ filtered_endpoints.update({ref['endpoint_id']: endpoint})
+ except exception.EndpointNotFound:
+ # remove bad reference from association
+ self.remove_endpoint_from_project(ref['endpoint_id'],
+ project_id)
+
+ # need to recover endpoint_groups associated with project
+ # then for each endpoint group return the endpoints.
+ endpoint_groups = self.get_endpoint_groups_for_project(project_id)
+ for endpoint_group in endpoint_groups:
+ endpoint_refs = self.get_endpoints_filtered_by_endpoint_group(
+ endpoint_group['id'])
+ # now check if any endpoints for current endpoint group are not
+ # contained in the list of filtered endpoints
+ for endpoint_ref in endpoint_refs:
+ if endpoint_ref['id'] not in filtered_endpoints:
+ filtered_endpoints[endpoint_ref['id']] = endpoint_ref
+
+ return filtered_endpoints
+
@six.add_metaclass(abc.ABCMeta)
class CatalogDriverV8(object):
@@ -304,8 +411,9 @@ class CatalogDriverV8(object):
def create_region(self, region_ref):
"""Creates a new region.
- :raises: keystone.exception.Conflict
- :raises: keystone.exception.RegionNotFound (if parent region invalid)
+ :raises keystone.exception.Conflict: If the region already exists.
+ :raises keystone.exception.RegionNotFound: If the parent region
+ is invalid.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -328,7 +436,7 @@ class CatalogDriverV8(object):
"""Get region by id.
:returns: region_ref dict
- :raises: keystone.exception.RegionNotFound
+ :raises keystone.exception.RegionNotFound: If the region doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -338,7 +446,7 @@ class CatalogDriverV8(object):
"""Update region by id.
:returns: region_ref dict
- :raises: keystone.exception.RegionNotFound
+ :raises keystone.exception.RegionNotFound: If the region doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -347,7 +455,7 @@ class CatalogDriverV8(object):
def delete_region(self, region_id):
"""Deletes an existing region.
- :raises: keystone.exception.RegionNotFound
+ :raises keystone.exception.RegionNotFound: If the region doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -356,7 +464,7 @@ class CatalogDriverV8(object):
def create_service(self, service_id, service_ref):
"""Creates a new service.
- :raises: keystone.exception.Conflict
+ :raises keystone.exception.Conflict: If a duplicate service exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -379,7 +487,8 @@ class CatalogDriverV8(object):
"""Get service by id.
:returns: service_ref dict
- :raises: keystone.exception.ServiceNotFound
+ :raises keystone.exception.ServiceNotFound: If the service doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -389,7 +498,8 @@ class CatalogDriverV8(object):
"""Update service by id.
:returns: service_ref dict
- :raises: keystone.exception.ServiceNotFound
+ :raises keystone.exception.ServiceNotFound: If the service doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -398,7 +508,8 @@ class CatalogDriverV8(object):
def delete_service(self, service_id):
"""Deletes an existing service.
- :raises: keystone.exception.ServiceNotFound
+ :raises keystone.exception.ServiceNotFound: If the service doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -407,8 +518,9 @@ class CatalogDriverV8(object):
def create_endpoint(self, endpoint_id, endpoint_ref):
"""Creates a new endpoint for a service.
- :raises: keystone.exception.Conflict,
- keystone.exception.ServiceNotFound
+ :raises keystone.exception.Conflict: If a duplicate endpoint exists.
+ :raises keystone.exception.ServiceNotFound: If the service doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -418,7 +530,8 @@ class CatalogDriverV8(object):
"""Get endpoint by id.
:returns: endpoint_ref dict
- :raises: keystone.exception.EndpointNotFound
+ :raises keystone.exception.EndpointNotFound: If the endpoint doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -441,8 +554,10 @@ class CatalogDriverV8(object):
"""Get endpoint by id.
:returns: endpoint_ref dict
- :raises: keystone.exception.EndpointNotFound
- keystone.exception.ServiceNotFound
+ :raises keystone.exception.EndpointNotFound: If the endpoint doesn't
+ exist.
+ :raises keystone.exception.ServiceNotFound: If the service doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -451,7 +566,8 @@ class CatalogDriverV8(object):
def delete_endpoint(self, endpoint_id):
"""Deletes an endpoint for a service.
- :raises: keystone.exception.EndpointNotFound
+ :raises keystone.exception.EndpointNotFound: If the endpoint doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -476,7 +592,7 @@ class CatalogDriverV8(object):
:returns: A nested dict representing the service catalog or an
empty dict.
- :raises: keystone.exception.NotFound
+ :raises keystone.exception.NotFound: If the endpoint doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -508,7 +624,7 @@ class CatalogDriverV8(object):
}]
:returns: A list representing the service catalog or an empty list
- :raises: keystone.exception.NotFound
+ :raises keystone.exception.NotFound: If the endpoint doesn't exist.
"""
v2_catalog = self.get_catalog(user_id, tenant_id)
@@ -544,5 +660,235 @@ class CatalogDriverV8(object):
return v3_catalog
+ @abc.abstractmethod
+ def add_endpoint_to_project(self, endpoint_id, project_id):
+ """Create an endpoint to project association.
+
+ :param endpoint_id: identity of endpoint to associate
+ :type endpoint_id: string
+ :param project_id: identity of the project to be associated with
+ :type project_id: string
+ :raises: keystone.exception.Conflict: If the endpoint was already
+ added to project.
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def remove_endpoint_from_project(self, endpoint_id, project_id):
+ """Removes an endpoint to project association.
+
+ :param endpoint_id: identity of endpoint to remove
+ :type endpoint_id: string
+ :param project_id: identity of the project associated with
+ :type project_id: string
+ :raises keystone.exception.NotFound: If the endpoint was not found
+ in the project.
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def check_endpoint_in_project(self, endpoint_id, project_id):
+ """Checks if an endpoint is associated with a project.
+
+ :param endpoint_id: identity of endpoint to check
+ :type endpoint_id: string
+ :param project_id: identity of the project associated with
+ :type project_id: string
+ :raises keystone.exception.NotFound: If the endpoint was not found
+ in the project.
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_endpoints_for_project(self, project_id):
+ """List all endpoints associated with a project.
+
+ :param project_id: identity of the project to check
+ :type project_id: string
+ :returns: a list of identity endpoint ids or an empty list.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_projects_for_endpoint(self, endpoint_id):
+ """List all projects associated with an endpoint.
+
+ :param endpoint_id: identity of endpoint to check
+ :type endpoint_id: string
+ :returns: a list of projects or an empty list.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_association_by_endpoint(self, endpoint_id):
+ """Removes all the endpoints to project association with endpoint.
+
+ :param endpoint_id: identity of endpoint to check
+ :type endpoint_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented()
+
+ @abc.abstractmethod
+ def delete_association_by_project(self, project_id):
+ """Removes all the endpoints to project association with project.
+
+ :param project_id: identity of the project to check
+ :type project_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented()
+
+ @abc.abstractmethod
+ def create_endpoint_group(self, endpoint_group):
+ """Create an endpoint group.
+
+ :param endpoint_group: endpoint group to create
+ :type endpoint_group: dictionary
+ :raises: keystone.exception.Conflict: If a duplicate endpoint group
+ already exists.
+ :returns: an endpoint group representation.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_endpoint_group(self, endpoint_group_id):
+ """Get an endpoint group.
+
+ :param endpoint_group_id: identity of endpoint group to retrieve
+ :type endpoint_group_id: string
+ :raises keystone.exception.NotFound: If the endpoint group was not
+ found.
+ :returns: an endpoint group representation.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def update_endpoint_group(self, endpoint_group_id, endpoint_group):
+ """Update an endpoint group.
+
+ :param endpoint_group_id: identity of endpoint group to retrieve
+ :type endpoint_group_id: string
+ :param endpoint_group: A full or partial endpoint_group
+ :type endpoint_group: dictionary
+ :raises keystone.exception.NotFound: If the endpoint group was not
+ found.
+ :returns: an endpoint group representation.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_endpoint_group(self, endpoint_group_id):
+ """Delete an endpoint group.
+
+ :param endpoint_group_id: identity of endpoint group to delete
+ :type endpoint_group_id: string
+ :raises keystone.exception.NotFound: If the endpoint group was not
+ found.
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
+ """Adds an endpoint group to project association.
+
+ :param endpoint_group_id: identity of endpoint to associate
+ :type endpoint_group_id: string
+ :param project_id: identity of project to associate
+ :type project_id: string
+ :raises keystone.exception.Conflict: If the endpoint group was already
+ added to the project.
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
+ """Get endpoint group to project association.
+
+ :param endpoint_group_id: identity of endpoint group to retrieve
+ :type endpoint_group_id: string
+ :param project_id: identity of project to associate
+ :type project_id: string
+ :raises keystone.exception.NotFound: If the endpoint group to the
+ project association was not found.
+ :returns: a project endpoint group representation.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_endpoint_groups(self):
+ """List all endpoint groups.
+
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_endpoint_groups_for_project(self, project_id):
+ """List all endpoint group to project associations for a project.
+
+ :param project_id: identity of project to associate
+ :type project_id: string
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
+ """List all projects associated with endpoint group.
+
+ :param endpoint_group_id: identity of endpoint to associate
+ :type endpoint_group_id: string
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def remove_endpoint_group_from_project(self, endpoint_group_id,
+ project_id):
+ """Remove an endpoint to project association.
+
+ :param endpoint_group_id: identity of endpoint to associate
+ :type endpoint_group_id: string
+ :param project_id: identity of project to associate
+ :type project_id: string
+ :raises keystone.exception.NotFound: If endpoint group project
+ association was not found.
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_endpoint_group_association_by_project(self, project_id):
+ """Remove endpoint group to project associations.
+
+ :param project_id: identity of the project to check
+ :type project_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
Driver = manager.create_legacy_driver(CatalogDriverV8)
diff --git a/keystone-moon/keystone/catalog/routers.py b/keystone-moon/keystone/catalog/routers.py
index f3bd988b..8c6e96f0 100644
--- a/keystone-moon/keystone/catalog/routers.py
+++ b/keystone-moon/keystone/catalog/routers.py
@@ -12,15 +12,72 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
from keystone.catalog import controllers
+from keystone.common import json_home
from keystone.common import router
from keystone.common import wsgi
+build_resource_relation = functools.partial(
+ json_home.build_v3_extension_resource_relation,
+ extension_name='OS-EP-FILTER', extension_version='1.0')
+
+build_parameter_relation = functools.partial(
+ json_home.build_v3_extension_parameter_relation,
+ extension_name='OS-EP-FILTER', extension_version='1.0')
+
+ENDPOINT_GROUP_PARAMETER_RELATION = build_parameter_relation(
+ parameter_name='endpoint_group_id')
+
+
class Routers(wsgi.RoutersBase):
+ """API for the keystone catalog.
+
+ The API Endpoint Filter looks like::
+
+ PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
+ GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
+ GET /OS-EP-FILTER/projects/{project_id}/endpoints
+ GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups
+
+ GET /OS-EP-FILTER/endpoint_groups
+ POST /OS-EP-FILTER/endpoint_groups
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+ HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+ PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+ DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
+
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints
+
+ PUT /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
+ GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
+ HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
+ DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
+ {project_id}
+
+ """
+
+ PATH_PREFIX = '/OS-EP-FILTER'
+ PATH_PROJECT_ENDPOINT = '/projects/{project_id}/endpoints/{endpoint_id}'
+ PATH_ENDPOINT_GROUPS = '/endpoint_groups/{endpoint_group_id}'
+ PATH_ENDPOINT_GROUP_PROJECTS = PATH_ENDPOINT_GROUPS + (
+ '/projects/{project_id}')
def append_v3_routers(self, mapper, routers):
regions_controller = controllers.RegionV3()
+ endpoint_filter_controller = controllers.EndpointFilterV3Controller()
+ endpoint_group_controller = controllers.EndpointGroupV3Controller()
+ project_endpoint_group_controller = (
+ controllers.ProjectEndpointGroupV3Controller())
routers.append(router.Router(regions_controller,
'regions', 'region',
resource_descriptions=self.v3_resources))
@@ -38,3 +95,88 @@ class Routers(wsgi.RoutersBase):
routers.append(router.Router(controllers.EndpointV3(),
'endpoints', 'endpoint',
resource_descriptions=self.v3_resources))
+
+ self._add_resource(
+ mapper, endpoint_filter_controller,
+ path=self.PATH_PREFIX + '/endpoints/{endpoint_id}/projects',
+ get_action='list_projects_for_endpoint',
+ rel=build_resource_relation(resource_name='endpoint_projects'),
+ path_vars={
+ 'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+ })
+ self._add_resource(
+ mapper, endpoint_filter_controller,
+ path=self.PATH_PREFIX + self.PATH_PROJECT_ENDPOINT,
+ get_head_action='check_endpoint_in_project',
+ put_action='add_endpoint_to_project',
+ delete_action='remove_endpoint_from_project',
+ rel=build_resource_relation(resource_name='project_endpoint'),
+ path_vars={
+ 'endpoint_id': json_home.Parameters.ENDPOINT_ID,
+ 'project_id': json_home.Parameters.PROJECT_ID,
+ })
+ self._add_resource(
+ mapper, endpoint_filter_controller,
+ path=self.PATH_PREFIX + '/projects/{project_id}/endpoints',
+ get_action='list_endpoints_for_project',
+ rel=build_resource_relation(resource_name='project_endpoints'),
+ path_vars={
+ 'project_id': json_home.Parameters.PROJECT_ID,
+ })
+ self._add_resource(
+ mapper, endpoint_group_controller,
+ path=self.PATH_PREFIX + '/projects/{project_id}/endpoint_groups',
+ get_action='list_endpoint_groups_for_project',
+ rel=build_resource_relation(
+ resource_name='project_endpoint_groups'),
+ path_vars={
+ 'project_id': json_home.Parameters.PROJECT_ID,
+ })
+ self._add_resource(
+ mapper, endpoint_group_controller,
+ path=self.PATH_PREFIX + '/endpoint_groups',
+ get_action='list_endpoint_groups',
+ post_action='create_endpoint_group',
+ rel=build_resource_relation(resource_name='endpoint_groups'))
+ self._add_resource(
+ mapper, endpoint_group_controller,
+ path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS,
+ get_head_action='get_endpoint_group',
+ patch_action='update_endpoint_group',
+ delete_action='delete_endpoint_group',
+ rel=build_resource_relation(resource_name='endpoint_group'),
+ path_vars={
+ 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+ })
+ self._add_resource(
+ mapper, project_endpoint_group_controller,
+ path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUP_PROJECTS,
+ get_head_action='get_endpoint_group_in_project',
+ put_action='add_endpoint_group_to_project',
+ delete_action='remove_endpoint_group_from_project',
+ rel=build_resource_relation(
+ resource_name='endpoint_group_to_project_association'),
+ path_vars={
+ 'project_id': json_home.Parameters.PROJECT_ID,
+ 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+ })
+ self._add_resource(
+ mapper, endpoint_group_controller,
+ path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
+ '/projects'),
+ get_action='list_projects_associated_with_endpoint_group',
+ rel=build_resource_relation(
+ resource_name='projects_associated_with_endpoint_group'),
+ path_vars={
+ 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+ })
+ self._add_resource(
+ mapper, endpoint_group_controller,
+ path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
+ '/endpoints'),
+ get_action='list_endpoints_associated_with_endpoint_group',
+ rel=build_resource_relation(
+ resource_name='endpoints_in_endpoint_group'),
+ path_vars={
+ 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
+ })
diff --git a/keystone-moon/keystone/catalog/schema.py b/keystone-moon/keystone/catalog/schema.py
index 671f1233..b9643131 100644
--- a/keystone-moon/keystone/catalog/schema.py
+++ b/keystone-moon/keystone/catalog/schema.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from keystone.common import validation
from keystone.common.validation import parameter_types
@@ -96,3 +97,23 @@ endpoint_update = {
'minProperties': 1,
'additionalProperties': True
}
+
+_endpoint_group_properties = {
+ 'description': validation.nullable(parameter_types.description),
+ 'filters': {
+ 'type': 'object'
+ },
+ 'name': parameter_types.name
+}
+
+endpoint_group_create = {
+ 'type': 'object',
+ 'properties': _endpoint_group_properties,
+ 'required': ['name', 'filters']
+}
+
+endpoint_group_update = {
+ 'type': 'object',
+ 'properties': _endpoint_group_properties,
+ 'minProperties': 1
+}
diff --git a/keystone-moon/keystone/cmd/cli.py b/keystone-moon/keystone/cmd/cli.py
index d993d71c..f95007e0 100644
--- a/keystone-moon/keystone/cmd/cli.py
+++ b/keystone-moon/keystone/cmd/cli.py
@@ -16,20 +16,25 @@ from __future__ import absolute_import
from __future__ import print_function
import os
+import sys
+import uuid
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_serialization import jsonutils
import pbr.version
+from keystone.common import config
from keystone.common import driver_hints
from keystone.common import openssl
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone.common import utils
-from keystone import config
from keystone import exception
-from keystone.i18n import _, _LW
+from keystone.federation import idp
+from keystone.federation import utils as mapping_engine
+from keystone.i18n import _, _LW, _LI
from keystone.server import backends
from keystone import token
@@ -49,6 +54,295 @@ class BaseApp(object):
return parser
+class BootStrap(BaseApp):
+ """Perform the basic bootstrap process"""
+
+ name = "bootstrap"
+
+ def __init__(self):
+ self.load_backends()
+ self.project_id = uuid.uuid4().hex
+ self.role_id = uuid.uuid4().hex
+ self.service_id = None
+ self.service_name = None
+ self.username = None
+ self.project_name = None
+ self.role_name = None
+ self.password = None
+ self.public_url = None
+ self.internal_url = None
+ self.admin_url = None
+ self.region_id = None
+ self.endpoints = {}
+
+ @classmethod
+ def add_argument_parser(cls, subparsers):
+ parser = super(BootStrap, cls).add_argument_parser(subparsers)
+ parser.add_argument('--bootstrap-username', default='admin',
+ metavar='OS_BOOTSTRAP_USERNAME',
+ help=('The username of the initial keystone '
+ 'user during bootstrap process.'))
+ # NOTE(morganfainberg): See below for ENV Variable that can be used
+ # in lieu of the command-line arguments.
+ parser.add_argument('--bootstrap-password', default=None,
+ metavar='OS_BOOTSTRAP_PASSWORD',
+ help='The bootstrap user password')
+ parser.add_argument('--bootstrap-project-name', default='admin',
+ metavar='OS_BOOTSTRAP_PROJECT_NAME',
+ help=('The initial project created during the '
+ 'keystone bootstrap process.'))
+ parser.add_argument('--bootstrap-role-name', default='admin',
+ metavar='OS_BOOTSTRAP_ROLE_NAME',
+ help=('The initial role-name created during the '
+ 'keystone bootstrap process.'))
+ parser.add_argument('--bootstrap-service-name', default='keystone',
+ metavar='OS_BOOTSTRAP_SERVICE_NAME',
+ help=('The initial name for the initial identity '
+ 'service created during the keystone '
+ 'bootstrap process.'))
+ parser.add_argument('--bootstrap-admin-url',
+ metavar='OS_BOOTSTRAP_ADMIN_URL',
+ help=('The initial identity admin url created '
+ 'during the keystone bootstrap process. '
+ 'e.g. http://127.0.0.1:35357/v2.0'))
+ parser.add_argument('--bootstrap-public-url',
+ metavar='OS_BOOTSTRAP_PUBLIC_URL',
+ help=('The initial identity public url created '
+ 'during the keystone bootstrap process. '
+ 'e.g. http://127.0.0.1:5000/v2.0'))
+ parser.add_argument('--bootstrap-internal-url',
+ metavar='OS_BOOTSTRAP_INTERNAL_URL',
+ help=('The initial identity internal url created '
+ 'during the keystone bootstrap process. '
+ 'e.g. http://127.0.0.1:5000/v2.0'))
+ parser.add_argument('--bootstrap-region-id',
+ metavar='OS_BOOTSTRAP_REGION_ID',
+ help=('The initial region_id endpoints will be '
+ 'placed in during the keystone bootstrap '
+ 'process.'))
+ return parser
+
+ def load_backends(self):
+ drivers = backends.load_backends()
+ self.resource_manager = drivers['resource_api']
+ self.identity_manager = drivers['identity_api']
+ self.assignment_manager = drivers['assignment_api']
+ self.catalog_manager = drivers['catalog_api']
+ self.role_manager = drivers['role_api']
+
+ def _get_config(self):
+ self.username = (
+ os.environ.get('OS_BOOTSTRAP_USERNAME') or
+ CONF.command.bootstrap_username)
+ self.project_name = (
+ os.environ.get('OS_BOOTSTRAP_PROJECT_NAME') or
+ CONF.command.bootstrap_project_name)
+ self.role_name = (
+ os.environ.get('OS_BOOTSTRAP_ROLE_NAME') or
+ CONF.command.bootstrap_role_name)
+ self.password = (
+ os.environ.get('OS_BOOTSTRAP_PASSWORD') or
+ CONF.command.bootstrap_password)
+ self.service_name = (
+ os.environ.get('OS_BOOTSTRAP_SERVICE_NAME') or
+ CONF.command.bootstrap_service_name)
+ self.admin_url = (
+ os.environ.get('OS_BOOTSTRAP_ADMIN_URL') or
+ CONF.command.bootstrap_admin_url)
+ self.public_url = (
+ os.environ.get('OS_BOOTSTRAP_PUBLIC_URL') or
+ CONF.command.bootstrap_public_url)
+ self.internal_url = (
+ os.environ.get('OS_BOOTSTRAP_INTERNAL_URL') or
+ CONF.command.bootstrap_internal_url)
+ self.region_id = (
+ os.environ.get('OS_BOOTSTRAP_REGION_ID') or
+ CONF.command.bootstrap_region_id)
+
+ def do_bootstrap(self):
+ """Perform the bootstrap actions.
+
+ Create bootstrap user, project, and role so that CMS, humans, or
+ scripts can continue to perform initial setup (domains, projects,
+ services, endpoints, etc) of Keystone when standing up a new
+ deployment.
+ """
+ self._get_config()
+
+ if self.password is None:
+ print(_('Either --bootstrap-password argument or '
+ 'OS_BOOTSTRAP_PASSWORD must be set.'))
+ raise ValueError
+
+ # NOTE(morganfainberg): Ensure the default domain is in-fact created
+ default_domain = {
+ 'id': CONF.identity.default_domain_id,
+ 'name': 'Default',
+ 'enabled': True,
+ 'description': 'The default domain'
+ }
+ try:
+ self.resource_manager.create_domain(
+ domain_id=default_domain['id'],
+ domain=default_domain)
+ LOG.info(_LI('Created domain %s'), default_domain['id'])
+ except exception.Conflict:
+ # NOTE(morganfainberg): Domain already exists, continue on.
+ LOG.info(_LI('Domain %s already exists, skipping creation.'),
+ default_domain['id'])
+
+ try:
+ self.resource_manager.create_project(
+ project_id=self.project_id,
+ project={'enabled': True,
+ 'id': self.project_id,
+ 'domain_id': default_domain['id'],
+ 'description': 'Bootstrap project for initializing '
+ 'the cloud.',
+ 'name': self.project_name}
+ )
+ LOG.info(_LI('Created project %s'), self.project_name)
+ except exception.Conflict:
+ LOG.info(_LI('Project %s already exists, skipping creation.'),
+ self.project_name)
+ project = self.resource_manager.get_project_by_name(
+ self.project_name, default_domain['id'])
+ self.project_id = project['id']
+
+ # NOTE(morganfainberg): Do not create the user if it already exists.
+ try:
+ user = self.identity_manager.get_user_by_name(self.username,
+ default_domain['id'])
+ LOG.info(_LI('User %s already exists, skipping creation.'),
+ self.username)
+ except exception.UserNotFound:
+ user = self.identity_manager.create_user(
+ user_ref={'name': self.username,
+ 'enabled': True,
+ 'domain_id': default_domain['id'],
+ 'password': self.password
+ }
+ )
+ LOG.info(_LI('Created user %s'), self.username)
+
+ # NOTE(morganfainberg): Do not create the role if it already exists.
+ try:
+ self.role_manager.create_role(
+ role_id=self.role_id,
+ role={'name': self.role_name,
+ 'id': self.role_id},
+ )
+ LOG.info(_LI('Created Role %s'), self.role_name)
+ except exception.Conflict:
+ LOG.info(_LI('Role %s exists, skipping creation.'), self.role_name)
+ # NOTE(davechen): There is no backend method to get the role
+ # by name, so build the hints to list the roles and filter by
+ # name instead.
+ hints = driver_hints.Hints()
+ hints.add_filter('name', self.role_name)
+ role = self.role_manager.list_roles(hints)
+ self.role_id = role[0]['id']
+
+ # NOTE(morganfainberg): Handle the case that the role assignment has
+ # already occurred.
+ try:
+ self.assignment_manager.add_role_to_user_and_project(
+ user_id=user['id'],
+ tenant_id=self.project_id,
+ role_id=self.role_id
+ )
+ LOG.info(_LI('Granted %(role)s on %(project)s to user'
+ ' %(username)s.'),
+ {'role': self.role_name,
+ 'project': self.project_name,
+ 'username': self.username})
+ except exception.Conflict:
+ LOG.info(_LI('User %(username)s already has %(role)s on '
+ '%(project)s.'),
+ {'username': self.username,
+ 'role': self.role_name,
+ 'project': self.project_name})
+
+ if self.region_id:
+ try:
+ self.catalog_manager.create_region(
+ region_ref={'id': self.region_id}
+ )
+ LOG.info(_LI('Created Region %s'), self.region_id)
+ except exception.Conflict:
+ LOG.info(_LI('Region %s exists, skipping creation.'),
+ self.region_id)
+
+ if self.public_url or self.admin_url or self.internal_url:
+ hints = driver_hints.Hints()
+ hints.add_filter('type', 'identity')
+ services = self.catalog_manager.list_services(hints)
+
+ if services:
+ service_ref = services[0]
+
+ hints = driver_hints.Hints()
+ hints.add_filter('service_id', service_ref['id'])
+ if self.region_id:
+ hints.add_filter('region_id', self.region_id)
+
+ endpoints = self.catalog_manager.list_endpoints(hints)
+ else:
+ service_ref = {'id': uuid.uuid4().hex,
+ 'name': self.service_name,
+ 'type': 'identity',
+ 'enabled': True}
+
+ self.catalog_manager.create_service(
+ service_id=service_ref['id'],
+ service_ref=service_ref)
+
+ endpoints = []
+
+ self.service_id = service_ref['id']
+
+ available_interfaces = {e['interface']: e for e in endpoints}
+ expected_endpoints = {'public': self.public_url,
+ 'internal': self.internal_url,
+ 'admin': self.admin_url}
+
+ for interface, url in expected_endpoints.items():
+ if not url:
+ # not specified to bootstrap command
+ continue
+
+ try:
+ endpoint_ref = available_interfaces[interface]
+ except KeyError:
+ endpoint_ref = {'id': uuid.uuid4().hex,
+ 'interface': interface,
+ 'url': url,
+ 'service_id': self.service_id,
+ 'enabled': True}
+
+ if self.region_id:
+ endpoint_ref['region_id'] = self.region_id
+
+ self.catalog_manager.create_endpoint(
+ endpoint_id=endpoint_ref['id'],
+ endpoint_ref=endpoint_ref)
+
+ LOG.info(_LI('Created %(interface)s endpoint %(url)s'),
+ {'interface': interface, 'url': url})
+ else:
+ # NOTE(jamielennox): electing not to update existing
+ # endpoints here. There may be call to do so in future.
+ LOG.info(_LI('Skipping %s endpoint as already created'),
+ interface)
+
+ self.endpoints[interface] = endpoint_ref['id']
+
+ @classmethod
+ def main(cls):
+ klass = cls()
+ klass.do_bootstrap()
+
+
class DbSync(BaseApp):
"""Sync the database."""
@@ -148,15 +442,21 @@ class PKISetup(BaseCertificateSetup):
"""Set up Key pairs and certificates for token signing and verification.
This is NOT intended for production use, see Keystone Configuration
- documentation for details.
+ documentation for details. As of the Mitaka release, this command has
+ been DEPRECATED and may be removed in the 'O' release.
"""
name = 'pki_setup'
@classmethod
def main(cls):
- LOG.warn(_LW('keystone-manage pki_setup is not recommended for '
- 'production use.'))
+ versionutils.report_deprecated_feature(
+ LOG,
+ _LW("keystone-manage pki_setup is deprecated as of Mitaka in "
+ "favor of not using PKI tokens and may be removed in 'O' "
+ "release."))
+ LOG.warning(_LW('keystone-manage pki_setup is not recommended for '
+ 'production use.'))
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
@@ -174,8 +474,8 @@ class SSLSetup(BaseCertificateSetup):
@classmethod
def main(cls):
- LOG.warn(_LW('keystone-manage ssl_setup is not recommended for '
- 'production use.'))
+ LOG.warning(_LW('keystone-manage ssl_setup is not recommended for '
+ 'production use.'))
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_ssl = openssl.ConfigureSSL(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
@@ -199,7 +499,7 @@ class FernetSetup(BasePermissionsSetup):
keystone_user_id, keystone_group_id = cls.get_user_group()
fernet.create_key_directory(keystone_user_id, keystone_group_id)
- if fernet.validate_key_repository():
+ if fernet.validate_key_repository(requires_write=True):
fernet.initialize_key_repository(
keystone_user_id, keystone_group_id)
@@ -229,7 +529,7 @@ class FernetRotate(BasePermissionsSetup):
from keystone.token.providers.fernet import utils as fernet
keystone_user_id, keystone_group_id = cls.get_user_group()
- if fernet.validate_key_repository():
+ if fernet.validate_key_repository(requires_write=True):
fernet.rotate_keys(keystone_user_id, keystone_group_id)
@@ -271,7 +571,7 @@ class MappingPurge(BaseApp):
@staticmethod
def main():
def validate_options():
- # NOTE(henry-nash); It would be nice to use the argparse automated
+ # NOTE(henry-nash): It would be nice to use the argparse automated
# checking for this validation, but the only way I can see doing
# that is to make the default (i.e. if no optional parameters
# are specified) to purge all mappings - and that sounds too
@@ -328,11 +628,35 @@ DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
+def _domain_config_finder(conf_dir):
+ """Return a generator of all domain config files found in a directory.
+
+ Donmain configs match the filename pattern of
+ 'keystone.<domain_name>.conf'.
+
+ :returns: generator yeilding (filename, domain_name) tuples
+ """
+ LOG.info(_LI('Scanning %r for domain config files'), conf_dir)
+ for r, d, f in os.walk(conf_dir):
+ for fname in f:
+ if (fname.startswith(DOMAIN_CONF_FHEAD) and
+ fname.endswith(DOMAIN_CONF_FTAIL)):
+ if fname.count('.') >= 2:
+ domain_name = fname[len(DOMAIN_CONF_FHEAD):
+ -len(DOMAIN_CONF_FTAIL)]
+ yield (os.path.join(r, fname), domain_name)
+ continue
+
+ LOG.warning(_LW('Ignoring file (%s) while scanning '
+ 'domain config directory'), fname)
+
+
class DomainConfigUploadFiles(object):
- def __init__(self):
+ def __init__(self, domain_config_finder=_domain_config_finder):
super(DomainConfigUploadFiles, self).__init__()
self.load_backends()
+ self._domain_config_finder = domain_config_finder
def load_backends(self):
drivers = backends.load_backends()
@@ -368,11 +692,10 @@ class DomainConfigUploadFiles(object):
:param file_name: the file containing the config options
:param domain_name: the domain name
- :raises: ValueError: the domain does not exist or already has domain
- specific configurations defined
- :raises: Exceptions from oslo config: there is an issue with options
- defined in the config file or its
- format
+ :raises ValueError: the domain does not exist or already has domain
+ specific configurations defined.
+ :raises Exceptions from oslo config: there is an issue with options
+ defined in the config file or its format.
The caller of this method should catch the errors raised and handle
appropriately in order that the best UX experience can be provided for
@@ -428,7 +751,7 @@ class DomainConfigUploadFiles(object):
"""
try:
self.upload_config_to_database(file_name, domain_name)
- except ValueError:
+ except ValueError: # nosec
# We've already given all the info we can in a message, so carry
# on to the next one
pass
@@ -467,21 +790,8 @@ class DomainConfigUploadFiles(object):
os.path.join(conf_dir, fname), domain_name)
return
- # Request is to transfer all config files, so let's read all the
- # files in the config directory, and transfer those that match the
- # filename pattern of 'keystone.<domain_name>.conf'
- for r, d, f in os.walk(conf_dir):
- for fname in f:
- if (fname.startswith(DOMAIN_CONF_FHEAD) and
- fname.endswith(DOMAIN_CONF_FTAIL)):
- if fname.count('.') >= 2:
- self.upload_configs_to_database(
- os.path.join(r, fname),
- fname[len(DOMAIN_CONF_FHEAD):
- -len(DOMAIN_CONF_FTAIL)])
- else:
- LOG.warn(_LW('Ignoring file (%s) while scanning '
- 'domain config directory'), fname)
+ for filename, domain_name in self._domain_config_finder(conf_dir):
+ self.upload_configs_to_database(filename, domain_name)
def run(self):
# First off, let's just check we can talk to the domain database
@@ -528,7 +838,7 @@ class DomainConfigUpload(BaseApp):
dcu = DomainConfigUploadFiles()
status = dcu.run()
if status is not None:
- exit(status)
+ sys.exit(status)
class SamlIdentityProviderMetadata(BaseApp):
@@ -538,9 +848,6 @@ class SamlIdentityProviderMetadata(BaseApp):
@staticmethod
def main():
- # NOTE(marek-denis): Since federation is currently an extension import
- # corresponding modules only when they are really going to be used.
- from keystone.contrib.federation import idp
metadata = idp.MetadataGenerator().generate_metadata()
print(metadata.to_string())
@@ -598,7 +905,6 @@ class MappingEngineTester(BaseApp):
@classmethod
def main(cls):
- from keystone.contrib.federation import utils as mapping_engine
if not CONF.command.engine_debug:
mapping_engine.LOG.logger.setLevel('WARN')
@@ -644,6 +950,7 @@ class MappingEngineTester(BaseApp):
CMDS = [
+ BootStrap,
DbSync,
DbVersion,
DomainConfigUpload,
diff --git a/keystone-moon/keystone/cmd/manage.py b/keystone-moon/keystone/cmd/manage.py
index da38278e..707c9933 100644
--- a/keystone-moon/keystone/cmd/manage.py
+++ b/keystone-moon/keystone/cmd/manage.py
@@ -20,7 +20,7 @@ import sys
# If ../../keystone/__init__.py exists, add ../../ to Python search path, so
# that it will override what happens to be installed in
# /usr/(local/)lib/python...
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir,
os.pardir,
os.pardir))
diff --git a/keystone-moon/keystone/common/authorization.py b/keystone-moon/keystone/common/authorization.py
index 2c578dfd..414b9525 100644
--- a/keystone-moon/keystone/common/authorization.py
+++ b/keystone-moon/keystone/common/authorization.py
@@ -31,8 +31,12 @@ It is a dictionary with the following attributes:
* ``token``: Token from the request
* ``user_id``: user ID of the principal
+* ``user_domain_id`` (optional): Domain ID of the principal if the principal
+ has a domain.
* ``project_id`` (optional): project ID of the scoped project if auth is
project-scoped
+* ``project_domain_id`` (optional): Domain ID of the scoped project if auth is
+ project-scoped.
* ``domain_id`` (optional): domain ID of the scoped domain if auth is
domain-scoped
* ``domain_name`` (optional): domain name of the scoped domain if auth is
@@ -64,9 +68,11 @@ def token_to_auth_context(token):
except KeyError:
LOG.warning(_LW('RBAC: Invalid user data in token'))
raise exception.Unauthorized()
+ auth_context['user_domain_id'] = token.user_domain_id
if token.project_scoped:
auth_context['project_id'] = token.project_id
+ auth_context['project_domain_id'] = token.project_domain_id
elif token.domain_scoped:
auth_context['domain_id'] = token.domain_id
auth_context['domain_name'] = token.domain_name
@@ -79,6 +85,8 @@ def token_to_auth_context(token):
auth_context['trustor_id'] = token.trustor_user_id
auth_context['trustee_id'] = token.trustee_user_id
else:
+ # NOTE(lbragstad): These variables will already be set to None but we
+ # add the else statement here for readability.
auth_context['trust_id'] = None
auth_context['trustor_id'] = None
auth_context['trustee_id'] = None
@@ -89,8 +97,13 @@ def token_to_auth_context(token):
if token.oauth_scoped:
auth_context['is_delegated_auth'] = True
- auth_context['consumer_id'] = token.oauth_consumer_id
- auth_context['access_token_id'] = token.oauth_access_token_id
+ auth_context['consumer_id'] = token.oauth_consumer_id
+ auth_context['access_token_id'] = token.oauth_access_token_id
+ else:
+ # NOTE(lbragstad): These variables will already be set to None but we
+ # add the else statement here for readability.
+ auth_context['consumer_id'] = None
+ auth_context['access_token_id'] = None
if token.is_federated_user:
auth_context['group_ids'] = token.federation_group_ids
diff --git a/keystone-moon/keystone/common/cache/_context_cache.py b/keystone-moon/keystone/common/cache/_context_cache.py
new file mode 100644
index 00000000..3895ca1f
--- /dev/null
+++ b/keystone-moon/keystone/common/cache/_context_cache.py
@@ -0,0 +1,129 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""A dogpile.cache proxy that caches objects in the request local cache."""
+from dogpile.cache import api
+from dogpile.cache import proxy
+from oslo_context import context as oslo_context
+from oslo_serialization import msgpackutils
+
+from keystone.models import revoke_model
+
+
+class _RevokeModelHandler(object):
+ # NOTE(morganfainberg): There needs to be reserved "registry" entries set
+ # in oslo_serialization for application-specific handlers. We picked 127
+ # here since it's waaaaaay far out before oslo_serialization will use it.
+ identity = 127
+ handles = (revoke_model.RevokeTree,)
+
+ def __init__(self, registry):
+ self._registry = registry
+
+ def serialize(self, obj):
+ return msgpackutils.dumps(obj.revoke_map,
+ registry=self._registry)
+
+ def deserialize(self, data):
+ revoke_map = msgpackutils.loads(data, registry=self._registry)
+ revoke_tree = revoke_model.RevokeTree()
+ revoke_tree.revoke_map = revoke_map
+ return revoke_tree
+
+
+# Register our new handler.
+_registry = msgpackutils.default_registry
+_registry.frozen = False
+_registry.register(_RevokeModelHandler(registry=_registry))
+_registry.frozen = True
+
+
+class _ResponseCacheProxy(proxy.ProxyBackend):
+
+ __key_pfx = '_request_cache_%s'
+
+ def _get_request_context(self):
+ # Return the current context or a new/empty context.
+ return oslo_context.get_current() or oslo_context.RequestContext()
+
+ def _get_request_key(self, key):
+ return self.__key_pfx % key
+
+ def _set_local_cache(self, key, value, ctx=None):
+ # Set a serialized version of the returned value in local cache for
+ # subsequent calls to the memoized method.
+ if not ctx:
+ ctx = self._get_request_context()
+ serialize = {'payload': value.payload, 'metadata': value.metadata}
+ setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
+ ctx.update_store()
+
+ def _get_local_cache(self, key):
+ # Return the version from our local request cache if it exists.
+ ctx = self._get_request_context()
+ try:
+ value = getattr(ctx, self._get_request_key(key))
+ except AttributeError:
+ return api.NO_VALUE
+
+ value = msgpackutils.loads(value)
+ return api.CachedValue(payload=value['payload'],
+ metadata=value['metadata'])
+
+ def _delete_local_cache(self, key):
+ # On invalidate/delete remove the value from the local request cache
+ ctx = self._get_request_context()
+ try:
+ delattr(ctx, self._get_request_key(key))
+ ctx.update_store()
+ except AttributeError: # nosec
+ # NOTE(morganfainberg): We will simply pass here, this value has
+ # not been cached locally in the request.
+ pass
+
+ def get(self, key):
+ value = self._get_local_cache(key)
+ if value is api.NO_VALUE:
+ value = self.proxied.get(key)
+ if value is not api.NO_VALUE:
+ self._set_local_cache(key, value)
+ return value
+
+ def set(self, key, value):
+ self._set_local_cache(key, value)
+ self.proxied.set(key, value)
+
+ def delete(self, key):
+ self._delete_local_cache(key)
+ self.proxied.delete(key)
+
+ def get_multi(self, keys):
+ values = {}
+ for key in keys:
+ v = self._get_local_cache(key)
+ if v is not api.NO_VALUE:
+ values[key] = v
+ query_keys = set(keys).difference(set(values.keys()))
+ values.update(dict(
+ zip(query_keys, self.proxied.get_multi(query_keys))))
+ return [values[k] for k in keys]
+
+ def set_multi(self, mapping):
+ ctx = self._get_request_context()
+ for k, v in mapping.items():
+ self._set_local_cache(k, v, ctx)
+ self.proxied.set_multi(mapping)
+
+ def delete_multi(self, keys):
+ for k in keys:
+ self._delete_local_cache(k)
+ self.proxied.delete_multi(keys)
diff --git a/keystone-moon/keystone/common/cache/backends/memcache_pool.py b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
index f3990b12..bbe4785a 100644
--- a/keystone-moon/keystone/common/cache/backends/memcache_pool.py
+++ b/keystone-moon/keystone/common/cache/backends/memcache_pool.py
@@ -13,49 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""dogpile.cache backend that uses Memcached connection pool"""
+"""This module is deprecated."""
-import functools
-import logging
+from oslo_cache.backends import memcache_pool
+from oslo_log import versionutils
-from dogpile.cache.backends import memcached as memcached_backend
-from keystone.common.cache import _memcache_pool
-
-
-LOG = logging.getLogger(__name__)
-
-
-# Helper to ease backend refactoring
-class ClientProxy(object):
- def __init__(self, client_pool):
- self.client_pool = client_pool
-
- def _run_method(self, __name, *args, **kwargs):
- with self.client_pool.acquire() as client:
- return getattr(client, __name)(*args, **kwargs)
-
- def __getattr__(self, name):
- return functools.partial(self._run_method, name)
-
-
-class PooledMemcachedBackend(memcached_backend.MemcachedBackend):
- # Composed from GenericMemcachedBackend's and MemcacheArgs's __init__
- def __init__(self, arguments):
- super(PooledMemcachedBackend, self).__init__(arguments)
- self.client_pool = _memcache_pool.MemcacheClientPool(
- self.url,
- arguments={
- 'dead_retry': arguments.get('dead_retry', 5 * 60),
- 'socket_timeout': arguments.get('socket_timeout', 3),
- },
- maxsize=arguments.get('pool_maxsize', 10),
- unused_timeout=arguments.get('pool_unused_timeout', 60),
- conn_get_timeout=arguments.get('pool_connection_get_timeout', 10),
- )
-
- # Since all methods in backend just call one of methods of client, this
- # lets us avoid need to hack it too much
- @property
- def client(self):
- return ClientProxy(self.client_pool)
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.cache.memcache_pool backend',
+ in_favor_of='oslo_cache.memcache_pool backend',
+ remove_in=+1)
+class PooledMemcachedBackend(memcache_pool.PooledMemcachedBackend):
+ pass
diff --git a/keystone-moon/keystone/common/cache/backends/mongo.py b/keystone-moon/keystone/common/cache/backends/mongo.py
index cb5ad833..861aefed 100644
--- a/keystone-moon/keystone/common/cache/backends/mongo.py
+++ b/keystone-moon/keystone/common/cache/backends/mongo.py
@@ -12,550 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import abc
-import datetime
+from oslo_cache.backends import mongo
+from oslo_log import versionutils
-from dogpile.cache import api
-from dogpile.cache import util as dp_util
-from oslo_log import log
-from oslo_utils import importutils
-from oslo_utils import timeutils
-import six
-from keystone import exception
-from keystone.i18n import _, _LW
-
-
-NO_VALUE = api.NO_VALUE
-LOG = log.getLogger(__name__)
-
-
-class MongoCacheBackend(api.CacheBackend):
- """A MongoDB based caching backend implementing dogpile backend APIs.
-
- Arguments accepted in the arguments dictionary:
-
- :param db_hosts: string (required), hostname or IP address of the
- MongoDB server instance. This can be a single MongoDB connection URI,
- or a list of MongoDB connection URIs.
-
- :param db_name: string (required), the name of the database to be used.
-
- :param cache_collection: string (required), the name of collection to store
- cached data.
- *Note:* Different collection name can be provided if there is need to
- create separate container (i.e. collection) for cache data. So region
- configuration is done per collection.
-
- Following are optional parameters for MongoDB backend configuration,
-
- :param username: string, the name of the user to authenticate.
-
- :param password: string, the password of the user to authenticate.
-
- :param max_pool_size: integer, the maximum number of connections that the
- pool will open simultaneously. By default the pool size is 10.
-
- :param w: integer, write acknowledgement for MongoDB client
-
- If not provided, then no default is set on MongoDB and then write
- acknowledgement behavior occurs as per MongoDB default. This parameter
- name is same as what is used in MongoDB docs. This value is specified
- at collection level so its applicable to `cache_collection` db write
- operations.
-
- If this is a replica set, write operations will block until they have
- been replicated to the specified number or tagged set of servers.
- Setting w=0 disables write acknowledgement and all other write concern
- options.
-
- :param read_preference: string, the read preference mode for MongoDB client
- Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
- ``secondaryPreferred``, or ``nearest``. This read_preference is
- specified at collection level so its applicable to `cache_collection`
- db read operations.
-
- :param use_replica: boolean, flag to indicate if replica client to be
- used. Default is `False`. `replicaset_name` value is required if
- `True`.
-
- :param replicaset_name: string, name of replica set.
- Becomes required if `use_replica` is `True`
-
- :param son_manipulator: string, name of class with module name which
- implements MongoDB SONManipulator.
- Default manipulator used is :class:`.BaseTransform`.
-
- This manipulator is added per database. In multiple cache
- configurations, the manipulator name should be same if same
- database name ``db_name`` is used in those configurations.
-
- SONManipulator is used to manipulate custom data types as they are
- saved or retrieved from MongoDB. Custom impl is only needed if cached
- data is custom class and needs transformations when saving or reading
- from db. If dogpile cached value contains built-in data types, then
- BaseTransform class is sufficient as it already handles dogpile
- CachedValue class transformation.
-
- :param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
- time-to-live value.
- If value is greater than 0, then its assumed that cache_collection
- needs to be TTL type (has index at 'doc_date' field).
- By default, the value is -1 and its disabled.
- Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
-
- .. NOTE::
-
- This parameter is different from Dogpile own
- expiration_time, which is the number of seconds after which Dogpile
- will consider the value to be expired. When Dogpile considers a
- value to be expired, it continues to use the value until generation
- of a new value is complete, when using CacheRegion.get_or_create().
- Therefore, if you are setting `mongo_ttl_seconds`, you will want to
- make sure it is greater than expiration_time by at least enough
- seconds for new values to be generated, else the value would not
- be available during a regeneration, forcing all threads to wait for
- a regeneration each time a value expires.
-
- :param ssl: boolean, If True, create the connection to the server
- using SSL. Default is `False`. Client SSL connection parameters depends
- on server side SSL setup. For further reference on SSL configuration:
- <http://docs.mongodb.org/manual/tutorial/configure-ssl/>
-
- :param ssl_keyfile: string, the private keyfile used to identify the
- local connection against mongod. If included with the certfile then
- only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
-
- :param ssl_certfile: string, the certificate file used to identify the
- local connection against mongod. Used only when `ssl` is `True`.
-
- :param ssl_ca_certs: string, the ca_certs file contains a set of
- concatenated 'certification authority' certificates, which are used to
- validate certificates passed from the other end of the connection.
- Used only when `ssl` is `True`.
-
- :param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
- a certificate is required from the other side of the connection, and
- whether it will be validated if provided. It must be one of the three
- values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
- (not required, but validated if provided), or
- ``ssl.CERT_REQUIRED`` (required and validated). If the value of this
- parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
- must point to a file of CA certificates. Used only when `ssl`
- is `True`.
-
- Rest of arguments are passed to mongo calls for read, write and remove.
- So related options can be specified to pass to these operations.
-
- Further details of various supported arguments can be referred from
- <http://api.mongodb.org/python/current/api/pymongo/>
-
- """
-
- def __init__(self, arguments):
- self.api = MongoApi(arguments)
-
- @dp_util.memoized_property
- def client(self):
- """Initializes MongoDB connection and collection defaults.
-
- This initialization is done only once and performed as part of lazy
- inclusion of MongoDB dependency i.e. add imports only if related
- backend is used.
-
- :return: :class:`.MongoApi` instance
- """
- self.api.get_cache_collection()
- return self.api
-
- def get(self, key):
- value = self.client.get(key)
- if value is None:
- return NO_VALUE
- else:
- return value
-
- def get_multi(self, keys):
- values = self.client.get_multi(keys)
- return [
- NO_VALUE if key not in values
- else values[key] for key in keys
- ]
-
- def set(self, key, value):
- self.client.set(key, value)
-
- def set_multi(self, mapping):
- self.client.set_multi(mapping)
-
- def delete(self, key):
- self.client.delete(key)
-
- def delete_multi(self, keys):
- self.client.delete_multi(keys)
-
-
-class MongoApi(object):
- """Class handling MongoDB specific functionality.
-
- This class uses PyMongo APIs internally to create database connection
- with configured pool size, ensures unique index on key, does database
- authentication and ensure TTL collection index if configured so.
- This class also serves as handle to cache collection for dogpile cache
- APIs.
-
- In a single deployment, multiple cache configuration can be defined. In
- that case of multiple cache collections usage, db client connection pool
- is shared when cache collections are within same database.
- """
-
- # class level attributes for re-use of db client connection and collection
- _DB = {} # dict of db_name: db connection reference
- _MONGO_COLLS = {} # dict of cache_collection : db collection reference
-
- def __init__(self, arguments):
- self._init_args(arguments)
- self._data_manipulator = None
-
- def _init_args(self, arguments):
- """Helper logic for collecting and parsing MongoDB specific arguments.
-
- The arguments passed in are separated out in connection specific
- setting and rest of arguments are passed to create/update/delete
- db operations.
- """
- self.conn_kwargs = {} # connection specific arguments
-
- self.hosts = arguments.pop('db_hosts', None)
- if self.hosts is None:
- msg = _('db_hosts value is required')
- raise exception.ValidationError(message=msg)
-
- self.db_name = arguments.pop('db_name', None)
- if self.db_name is None:
- msg = _('database db_name is required')
- raise exception.ValidationError(message=msg)
-
- self.cache_collection = arguments.pop('cache_collection', None)
- if self.cache_collection is None:
- msg = _('cache_collection name is required')
- raise exception.ValidationError(message=msg)
-
- self.username = arguments.pop('username', None)
- self.password = arguments.pop('password', None)
- self.max_pool_size = arguments.pop('max_pool_size', 10)
-
- self.w = arguments.pop('w', -1)
- try:
- self.w = int(self.w)
- except ValueError:
- msg = _('integer value expected for w (write concern attribute)')
- raise exception.ValidationError(message=msg)
-
- self.read_preference = arguments.pop('read_preference', None)
-
- self.use_replica = arguments.pop('use_replica', False)
- if self.use_replica:
- if arguments.get('replicaset_name') is None:
- msg = _('replicaset_name required when use_replica is True')
- raise exception.ValidationError(message=msg)
- self.replicaset_name = arguments.get('replicaset_name')
-
- self.son_manipulator = arguments.pop('son_manipulator', None)
-
- # set if mongo collection needs to be TTL type.
- # This needs to be max ttl for any cache entry.
- # By default, -1 means don't use TTL collection.
- # With ttl set, it creates related index and have doc_date field with
- # needed expiration interval
- self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
- try:
- self.ttl_seconds = int(self.ttl_seconds)
- except ValueError:
- msg = _('integer value expected for mongo_ttl_seconds')
- raise exception.ValidationError(message=msg)
-
- self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
- if self.conn_kwargs['ssl']:
- ssl_keyfile = arguments.pop('ssl_keyfile', None)
- ssl_certfile = arguments.pop('ssl_certfile', None)
- ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
- ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
- if ssl_keyfile:
- self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
- if ssl_certfile:
- self.conn_kwargs['ssl_certfile'] = ssl_certfile
- if ssl_ca_certs:
- self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
- if ssl_cert_reqs:
- self.conn_kwargs['ssl_cert_reqs'] = (
- self._ssl_cert_req_type(ssl_cert_reqs))
-
- # rest of arguments are passed to mongo crud calls
- self.meth_kwargs = arguments
-
- def _ssl_cert_req_type(self, req_type):
- try:
- import ssl
- except ImportError:
- raise exception.ValidationError(_('no ssl support available'))
- req_type = req_type.upper()
- try:
- return {
- 'NONE': ssl.CERT_NONE,
- 'OPTIONAL': ssl.CERT_OPTIONAL,
- 'REQUIRED': ssl.CERT_REQUIRED
- }[req_type]
- except KeyError:
- msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
- '"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
- raise exception.ValidationError(message=msg)
-
- def _get_db(self):
- # defer imports until backend is used
- global pymongo
- import pymongo
- if self.use_replica:
- connection = pymongo.MongoReplicaSetClient(
- host=self.hosts, replicaSet=self.replicaset_name,
- max_pool_size=self.max_pool_size, **self.conn_kwargs)
- else: # used for standalone node or mongos in sharded setup
- connection = pymongo.MongoClient(
- host=self.hosts, max_pool_size=self.max_pool_size,
- **self.conn_kwargs)
-
- database = getattr(connection, self.db_name)
-
- self._assign_data_mainpulator()
- database.add_son_manipulator(self._data_manipulator)
- if self.username and self.password:
- database.authenticate(self.username, self.password)
- return database
-
- def _assign_data_mainpulator(self):
- if self._data_manipulator is None:
- if self.son_manipulator:
- self._data_manipulator = importutils.import_object(
- self.son_manipulator)
- else:
- self._data_manipulator = BaseTransform()
-
- def _get_doc_date(self):
- if self.ttl_seconds > 0:
- expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
- doc_date = timeutils.utcnow() + expire_delta
- else:
- doc_date = timeutils.utcnow()
- return doc_date
-
- def get_cache_collection(self):
- if self.cache_collection not in self._MONGO_COLLS:
- global pymongo
- import pymongo
- # re-use db client connection if already defined as part of
- # earlier dogpile cache configuration
- if self.db_name not in self._DB:
- self._DB[self.db_name] = self._get_db()
- coll = getattr(self._DB[self.db_name], self.cache_collection)
-
- self._assign_data_mainpulator()
- if self.read_preference:
- # pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name
- f = getattr(pymongo.read_preferences,
- 'read_pref_mode_from_name', None)
- if not f:
- f = pymongo.read_preferences.mongos_enum
- self.read_preference = f(self.read_preference)
- coll.read_preference = self.read_preference
- if self.w > -1:
- coll.write_concern['w'] = self.w
- if self.ttl_seconds > 0:
- kwargs = {'expireAfterSeconds': self.ttl_seconds}
- coll.ensure_index('doc_date', cache_for=5, **kwargs)
- else:
- self._validate_ttl_index(coll, self.cache_collection,
- self.ttl_seconds)
- self._MONGO_COLLS[self.cache_collection] = coll
-
- return self._MONGO_COLLS[self.cache_collection]
-
- def _get_cache_entry(self, key, value, meta, doc_date):
- """MongoDB cache data representation.
-
- Storing cache key as ``_id`` field as MongoDB by default creates
- unique index on this field. So no need to create separate field and
- index for storing cache key. Cache data has additional ``doc_date``
- field for MongoDB TTL collection support.
- """
- return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
-
- def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
- """Checks if existing TTL index is removed on a collection.
-
- This logs warning when existing collection has TTL index defined and
- new cache configuration tries to disable index with
- ``mongo_ttl_seconds < 0``. In that case, existing index needs
- to be addressed first to make new configuration effective.
- Refer to MongoDB documentation around TTL index for further details.
- """
- indexes = collection.index_information()
- for indx_name, index_data in indexes.items():
- if all(k in index_data for k in ('key', 'expireAfterSeconds')):
- existing_value = index_data['expireAfterSeconds']
- fld_present = 'doc_date' in index_data['key'][0]
- if fld_present and existing_value > -1 and ttl_seconds < 1:
- msg = _LW('TTL index already exists on db collection '
- '<%(c_name)s>, remove index <%(indx_name)s> '
- 'first to make updated mongo_ttl_seconds value '
- 'to be effective')
- LOG.warn(msg, {'c_name': coll_name,
- 'indx_name': indx_name})
-
- def get(self, key):
- critieria = {'_id': key}
- result = self.get_cache_collection().find_one(spec_or_id=critieria,
- **self.meth_kwargs)
- if result:
- return result['value']
- else:
- return None
-
- def get_multi(self, keys):
- db_results = self._get_results_as_dict(keys)
- return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)}
-
- def _get_results_as_dict(self, keys):
- critieria = {'_id': {'$in': keys}}
- db_results = self.get_cache_collection().find(spec=critieria,
- **self.meth_kwargs)
- return {doc['_id']: doc for doc in db_results}
-
- def set(self, key, value):
- doc_date = self._get_doc_date()
- ref = self._get_cache_entry(key, value.payload, value.metadata,
- doc_date)
- spec = {'_id': key}
- # find and modify does not have manipulator support
- # so need to do conversion as part of input document
- ref = self._data_manipulator.transform_incoming(ref, self)
- self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
- **self.meth_kwargs)
-
- def set_multi(self, mapping):
- """Insert multiple documents specified as key, value pairs.
-
- In this case, multiple documents can be added via insert provided they
- do not exist.
- Update of multiple existing documents is done one by one
- """
- doc_date = self._get_doc_date()
- insert_refs = []
- update_refs = []
- existing_docs = self._get_results_as_dict(list(mapping.keys()))
- for key, value in mapping.items():
- ref = self._get_cache_entry(key, value.payload, value.metadata,
- doc_date)
- if key in existing_docs:
- ref['_id'] = existing_docs[key]['_id']
- update_refs.append(ref)
- else:
- insert_refs.append(ref)
- if insert_refs:
- self.get_cache_collection().insert(insert_refs, manipulate=True,
- **self.meth_kwargs)
- for upd_doc in update_refs:
- self.get_cache_collection().save(upd_doc, manipulate=True,
- **self.meth_kwargs)
-
- def delete(self, key):
- critieria = {'_id': key}
- self.get_cache_collection().remove(spec_or_id=critieria,
- **self.meth_kwargs)
-
- def delete_multi(self, keys):
- critieria = {'_id': {'$in': keys}}
- self.get_cache_collection().remove(spec_or_id=critieria,
- **self.meth_kwargs)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class AbstractManipulator(object):
- """Abstract class with methods which need to be implemented for custom
- manipulation.
-
- Adding this as a base class for :class:`.BaseTransform` instead of adding
- import dependency of pymongo specific class i.e.
- `pymongo.son_manipulator.SONManipulator` and using that as base class.
- This is done to avoid pymongo dependency if MongoDB backend is not used.
- """
- @abc.abstractmethod
- def transform_incoming(self, son, collection):
- """Used while saving data to MongoDB.
-
- :param son: the SON object to be inserted into the database
- :param collection: the collection the object is being inserted into
-
- :returns: transformed SON object
-
- """
- raise exception.NotImplemented() # pragma: no cover
-
- @abc.abstractmethod
- def transform_outgoing(self, son, collection):
- """Used while reading data from MongoDB.
-
- :param son: the SON object being retrieved from the database
- :param collection: the collection this object was stored in
-
- :returns: transformed SON object
- """
- raise exception.NotImplemented() # pragma: no cover
-
- def will_copy(self):
- """Will this SON manipulator make a copy of the incoming document?
-
- Derived classes that do need to make a copy should override this
- method, returning `True` instead of `False`.
-
- :returns: boolean
- """
- return False
-
-
-class BaseTransform(AbstractManipulator):
- """Base transformation class to store and read dogpile cached data
- from MongoDB.
-
- This is needed as dogpile internally stores data as a custom class
- i.e. dogpile.cache.api.CachedValue
-
- Note: Custom manipulator needs to always override ``transform_incoming``
- and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
- checks that overridden method in instance and its super are different.
- """
-
- def transform_incoming(self, son, collection):
- """Used while saving data to MongoDB."""
- for (key, value) in list(son.items()):
- if isinstance(value, api.CachedValue):
- son[key] = value.payload # key is 'value' field here
- son['meta'] = value.metadata
- elif isinstance(value, dict): # Make sure we recurse into sub-docs
- son[key] = self.transform_incoming(value, collection)
- return son
-
- def transform_outgoing(self, son, collection):
- """Used while reading data from MongoDB."""
- metadata = None
- # make sure its top level dictionary with all expected fields names
- # present
- if isinstance(son, dict) and all(k in son for k in
- ('_id', 'value', 'meta', 'doc_date')):
- payload = son.pop('value', None)
- metadata = son.pop('meta', None)
- for (key, value) in list(son.items()):
- if isinstance(value, dict):
- son[key] = self.transform_outgoing(value, collection)
- if metadata is not None:
- son['value'] = api.CachedValue(payload, metadata)
- return son
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.cache.mongo backend',
+ in_favor_of='oslo_cache.mongo backend',
+ remove_in=+1)
+class MongoCacheBackend(mongo.MongoCacheBackend):
+ pass
diff --git a/keystone-moon/keystone/common/cache/backends/noop.py b/keystone-moon/keystone/common/cache/backends/noop.py
index 38329c94..eda06ec8 100644
--- a/keystone-moon/keystone/common/cache/backends/noop.py
+++ b/keystone-moon/keystone/common/cache/backends/noop.py
@@ -13,11 +13,17 @@
# under the License.
from dogpile.cache import api
+from oslo_log import versionutils
NO_VALUE = api.NO_VALUE
+@versionutils.deprecated(
+ versionutils.deprecated.MITAKA,
+ what='keystone.common.cache.noop backend',
+ in_favor_of="dogpile.cache's Null backend",
+ remove_in=+1)
class NoopCacheBackend(api.CacheBackend):
"""A no op backend as a default caching backend.
@@ -27,6 +33,7 @@ class NoopCacheBackend(api.CacheBackend):
mechanism to cleanup it's internal dict and therefore could cause run-away
memory utilization.
"""
+
def __init__(self, *args):
return
diff --git a/keystone-moon/keystone/common/cache/core.py b/keystone-moon/keystone/common/cache/core.py
index 306587b3..6bb0af51 100644
--- a/keystone-moon/keystone/common/cache/core.py
+++ b/keystone-moon/keystone/common/cache/core.py
@@ -13,23 +13,41 @@
# under the License.
"""Keystone Caching Layer Implementation."""
-
import dogpile.cache
-from dogpile.cache import proxy
-from dogpile.cache import util
+from dogpile.cache import api
+from oslo_cache import core as cache
from oslo_config import cfg
-from oslo_log import log
-from oslo_utils import importutils
-from keystone import exception
-from keystone.i18n import _, _LE
+from keystone.common.cache import _context_cache
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
+CACHE_REGION = cache.create_region()
+
+
+def configure_cache(region=None):
+ if region is None:
+ region = CACHE_REGION
+ # NOTE(morganfainberg): running cache.configure_cache_region()
+ # sets region.is_configured, this must be captured before
+ # cache.configure_cache_region is called.
+ configured = region.is_configured
+ cache.configure_cache_region(CONF, region)
+ # Only wrap the region if it was not configured. This should be pushed
+ # to oslo_cache lib somehow.
+ if not configured:
+ region.wrap(_context_cache._ResponseCacheProxy)
+
+
+def get_memoization_decorator(group, expiration_group=None, region=None):
+ if region is None:
+ region = CACHE_REGION
+ return cache.get_memoization_decorator(CONF, region, group,
+ expiration_group=expiration_group)
-make_region = dogpile.cache.make_region
+# NOTE(stevemar): When memcache_pool, mongo and noop backends are removed
+# we no longer need to register the backends here.
dogpile.cache.register_backend(
'keystone.common.cache.noop',
'keystone.common.cache.backends.noop',
@@ -46,263 +64,61 @@ dogpile.cache.register_backend(
'PooledMemcachedBackend')
-class DebugProxy(proxy.ProxyBackend):
- """Extra Logging ProxyBackend."""
- # NOTE(morganfainberg): Pass all key/values through repr to ensure we have
- # a clean description of the information. Without use of repr, it might
- # be possible to run into encode/decode error(s). For logging/debugging
- # purposes encode/decode is irrelevant and we should be looking at the
- # data exactly as it stands.
-
- def get(self, key):
- value = self.proxied.get(key)
- LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"',
- {'key': key, 'value': value})
- return value
-
- def get_multi(self, keys):
- values = self.proxied.get_multi(keys)
- LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"',
- {'keys': keys, 'values': values})
- return values
-
- def set(self, key, value):
- LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"',
- {'key': key, 'value': value})
- return self.proxied.set(key, value)
-
- def set_multi(self, keys):
- LOG.debug('CACHE_SET_MULTI: "%r"', keys)
- self.proxied.set_multi(keys)
-
- def delete(self, key):
- self.proxied.delete(key)
- LOG.debug('CACHE_DELETE: "%r"', key)
-
- def delete_multi(self, keys):
- LOG.debug('CACHE_DELETE_MULTI: "%r"', keys)
- self.proxied.delete_multi(keys)
-
-
-def build_cache_config():
- """Build the cache region dictionary configuration.
-
- :returns: dict
+# TODO(morganfainberg): Move this logic up into oslo.cache directly
+# so we can handle region-wide invalidations or alternatively propose
+# a fix to dogpile.cache to make region-wide invalidates possible to
+# work across distributed processes.
+class _RegionInvalidator(object):
+
+ def __init__(self, region, region_name):
+ self.region = region
+ self.region_name = region_name
+ region_key = '_RegionExpiration.%(type)s.%(region_name)s'
+ self.soft_region_key = region_key % {'type': 'soft',
+ 'region_name': self.region_name}
+ self.hard_region_key = region_key % {'type': 'hard',
+ 'region_name': self.region_name}
+
+ @property
+ def hard_invalidated(self):
+ invalidated = self.region.backend.get(self.hard_region_key)
+ if invalidated is not api.NO_VALUE:
+ return invalidated.payload
+ return None
+
+ @hard_invalidated.setter
+ def hard_invalidated(self, value):
+ self.region.set(self.hard_region_key, value)
+
+ @hard_invalidated.deleter
+ def hard_invalidated(self):
+ self.region.delete(self.hard_region_key)
+
+ @property
+ def soft_invalidated(self):
+ invalidated = self.region.backend.get(self.soft_region_key)
+ if invalidated is not api.NO_VALUE:
+ return invalidated.payload
+ return None
+
+ @soft_invalidated.setter
+ def soft_invalidated(self, value):
+ self.region.set(self.soft_region_key, value)
+
+ @soft_invalidated.deleter
+ def soft_invalidated(self):
+ self.region.delete(self.soft_region_key)
+
+
+def apply_invalidation_patch(region, region_name):
+ """Patch the region interfaces to ensure we share the expiration time.
+
+ This method is used to patch region.invalidate, region._hard_invalidated,
+ and region._soft_invalidated.
"""
- prefix = CONF.cache.config_prefix
- conf_dict = {}
- conf_dict['%s.backend' % prefix] = CONF.cache.backend
- conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time
- for argument in CONF.cache.backend_argument:
- try:
- (argname, argvalue) = argument.split(':', 1)
- except ValueError:
- msg = _LE('Unable to build cache config-key. Expected format '
- '"<argname>:<value>". Skipping unknown format: %s')
- LOG.error(msg, argument)
- continue
-
- arg_key = '.'.join([prefix, 'arguments', argname])
- conf_dict[arg_key] = argvalue
-
- LOG.debug('Keystone Cache Config: %s', conf_dict)
- # NOTE(yorik-sar): these arguments will be used for memcache-related
- # backends. Use setdefault for url to support old-style setting through
- # backend_argument=url:127.0.0.1:11211
- conf_dict.setdefault('%s.arguments.url' % prefix,
- CONF.cache.memcache_servers)
- for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
- 'pool_unused_timeout', 'pool_connection_get_timeout'):
- value = getattr(CONF.cache, 'memcache_' + arg)
- conf_dict['%s.arguments.%s' % (prefix, arg)] = value
-
- return conf_dict
-
-
-def configure_cache_region(region):
- """Configure a cache region.
-
- :param region: optional CacheRegion object, if not provided a new region
- will be instantiated
- :raises: exception.ValidationError
- :returns: dogpile.cache.CacheRegion
- """
- if not isinstance(region, dogpile.cache.CacheRegion):
- raise exception.ValidationError(
- _('region not type dogpile.cache.CacheRegion'))
-
- if not region.is_configured:
- # NOTE(morganfainberg): this is how you tell if a region is configured.
- # There is a request logged with dogpile.cache upstream to make this
- # easier / less ugly.
-
- config_dict = build_cache_config()
- region.configure_from_config(config_dict,
- '%s.' % CONF.cache.config_prefix)
-
- if CONF.cache.debug_cache_backend:
- region.wrap(DebugProxy)
-
- # NOTE(morganfainberg): if the backend requests the use of a
- # key_mangler, we should respect that key_mangler function. If a
- # key_mangler is not defined by the backend, use the sha1_mangle_key
- # mangler provided by dogpile.cache. This ensures we always use a fixed
- # size cache-key.
- if region.key_mangler is None:
- region.key_mangler = util.sha1_mangle_key
-
- for class_path in CONF.cache.proxies:
- # NOTE(morganfainberg): if we have any proxy wrappers, we should
- # ensure they are added to the cache region's backend. Since
- # configure_from_config doesn't handle the wrap argument, we need
- # to manually add the Proxies. For information on how the
- # ProxyBackends work, see the dogpile.cache documents on
- # "changing-backend-behavior"
- cls = importutils.import_class(class_path)
- LOG.debug("Adding cache-proxy '%s' to backend.", class_path)
- region.wrap(cls)
-
- return region
-
-
-def get_should_cache_fn(section):
- """Build a function that returns a config section's caching status.
-
- For any given driver in keystone that has caching capabilities, a boolean
- config option for that driver's section (e.g. ``token``) should exist and
- default to ``True``. This function will use that value to tell the caching
- decorator if caching for that driver is enabled. To properly use this
- with the decorator, pass this function the configuration section and assign
- the result to a variable. Pass the new variable to the caching decorator
- as the named argument ``should_cache_fn``. e.g.::
-
- from keystone.common import cache
-
- SHOULD_CACHE = cache.get_should_cache_fn('token')
-
- @cache.on_arguments(should_cache_fn=SHOULD_CACHE)
- def function(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :returns: function reference
- """
- def should_cache(value):
- if not CONF.cache.enabled:
- return False
- conf_group = getattr(CONF, section)
- return getattr(conf_group, 'caching', True)
- return should_cache
-
-
-def get_expiration_time_fn(section):
- """Build a function that returns a config section's expiration time status.
-
- For any given driver in keystone that has caching capabilities, an int
- config option called ``cache_time`` for that driver's section
- (e.g. ``token``) should exist and typically default to ``None``. This
- function will use that value to tell the caching decorator of the TTL
- override for caching the resulting objects. If the value of the config
- option is ``None`` the default value provided in the
- ``[cache] expiration_time`` option will be used by the decorator. The
- default may be set to something other than ``None`` in cases where the
- caching TTL should not be tied to the global default(s) (e.g.
- revocation_list changes very infrequently and can be cached for >1h by
- default).
-
- To properly use this with the decorator, pass this function the
- configuration section and assign the result to a variable. Pass the new
- variable to the caching decorator as the named argument
- ``expiration_time``. e.g.::
-
- from keystone.common import cache
-
- EXPIRATION_TIME = cache.get_expiration_time_fn('token')
-
- @cache.on_arguments(expiration_time=EXPIRATION_TIME)
- def function(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :rtype: function reference
- """
- def get_expiration_time():
- conf_group = getattr(CONF, section)
- return getattr(conf_group, 'cache_time', None)
- return get_expiration_time
-
-
-def key_generate_to_str(s):
- # NOTE(morganfainberg): Since we need to stringify all arguments, attempt
- # to stringify and handle the Unicode error explicitly as needed.
- try:
- return str(s)
- except UnicodeEncodeError:
- return s.encode('utf-8')
-
-
-def function_key_generator(namespace, fn, to_str=key_generate_to_str):
- # NOTE(morganfainberg): This wraps dogpile.cache's default
- # function_key_generator to change the default to_str mechanism.
- return util.function_key_generator(namespace, fn, to_str=to_str)
-
-
-REGION = dogpile.cache.make_region(
- function_key_generator=function_key_generator)
-on_arguments = REGION.cache_on_arguments
-
-
-def get_memoization_decorator(section, expiration_section=None):
- """Build a function based on the `on_arguments` decorator for the section.
-
- For any given driver in Keystone that has caching capabilities, a
- pair of functions is required to properly determine the status of the
- caching capabilities (a toggle to indicate caching is enabled and any
- override of the default TTL for cached data). This function will return
- an object that has the memoization decorator ``on_arguments``
- pre-configured for the driver.
-
- Example usage::
-
- from keystone.common import cache
-
- MEMOIZE = cache.get_memoization_decorator(section='token')
-
- @MEMOIZE
- def function(arg1, arg2):
- ...
-
-
- ALTERNATE_MEMOIZE = cache.get_memoization_decorator(
- section='token', expiration_section='revoke')
-
- @ALTERNATE_MEMOIZE
- def function2(arg1, arg2):
- ...
-
- :param section: name of the configuration section to examine
- :type section: string
- :param expiration_section: name of the configuration section to examine
- for the expiration option. This will fall back
- to using ``section`` if the value is unspecified
- or ``None``
- :type expiration_section: string
- :rtype: function reference
- """
- if expiration_section is None:
- expiration_section = section
- should_cache = get_should_cache_fn(section)
- expiration_time = get_expiration_time_fn(expiration_section)
-
- memoize = REGION.cache_on_arguments(should_cache_fn=should_cache,
- expiration_time=expiration_time)
-
- # Make sure the actual "should_cache" and "expiration_time" methods are
- # available. This is potentially interesting/useful to pre-seed cache
- # values.
- memoize.should_cache = should_cache
- memoize.get_expiration_time = expiration_time
-
- return memoize
+ # Patch the region object. This logic needs to be moved up into dogpile
+ # itself. Patching the internal interfaces, unfortunately, is the only
+ # way to handle this at the moment.
+ invalidator = _RegionInvalidator(region=region, region_name=region_name)
+ setattr(region, '_hard_invalidated', invalidator.hard_invalidated)
+ setattr(region, '_soft_invalidated', invalidator.soft_invalidated)
diff --git a/keystone-moon/keystone/common/config.py b/keystone-moon/keystone/common/config.py
index b42b29d6..56f419b6 100644
--- a/keystone-moon/keystone/common/config.py
+++ b/keystone-moon/keystone/common/config.py
@@ -12,23 +12,48 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
+import os
+
+from oslo_cache import core as cache
from oslo_config import cfg
+from oslo_log import log
import oslo_messaging
+from oslo_middleware import cors
import passlib.utils
+from keystone import exception
+
_DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1']
_CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem'
_KEYFILE = '/etc/keystone/ssl/private/signing_key.pem'
_SSO_CALLBACK = '/etc/keystone/sso_callback_template.html'
+_DEPRECATE_PKI_MSG = ('PKI token support has been deprecated in the M '
+ 'release and will be removed in the O release. Fernet '
+ 'or UUID tokens are recommended.')
+
+_DEPRECATE_INHERIT_MSG = ('The option to enable the OS-INHERIT extension has '
+ 'been deprecated in the M release and will be '
+ 'removed in the O release. The OS-INHERIT extension '
+ 'will be enabled by default.')
+
+_DEPRECATE_EP_MSG = ('The option to enable the OS-ENDPOINT-POLICY extension '
+ 'has been deprecated in the M release and will be '
+ 'removed in the O release. The OS-ENDPOINT-POLICY '
+ 'extension will be enabled by default.')
+
FILE_OPTIONS = {
None: [
- cfg.StrOpt('admin_token', secret=True, default='ADMIN',
+ cfg.StrOpt('admin_token', secret=True, default=None,
help='A "shared secret" that can be used to bootstrap '
'Keystone. This "token" does not represent a user, '
- 'and carries no explicit authorization. To disable '
+ 'and carries no explicit authorization. If set '
+ 'to `None`, the value is ignored and the '
+ '`admin_token` log in mechanism is effectively '
+ 'disabled. To completely disable `admin_token` '
'in production (highly recommended), remove '
'AdminTokenAuthMiddleware from your paste '
'application pipelines (for example, in '
@@ -54,9 +79,10 @@ FILE_OPTIONS = {
'(e.g. /prefix/v3) or the endpoint should be found '
'on a different server.'),
cfg.IntOpt('max_project_tree_depth', default=5,
- help='Maximum depth of the project hierarchy. WARNING: '
- 'setting it to a large value may adversely impact '
- 'performance.'),
+ help='Maximum depth of the project hierarchy, excluding '
+ 'the project acting as a domain at the top of the '
+ 'hierarchy. WARNING: setting it to a large value may '
+ 'adversely impact performance.'),
cfg.IntOpt('max_param_size', default=64,
help='Limit the sizes of user & project ID/names.'),
# we allow tokens to be a bit larger to accommodate PKI
@@ -96,7 +122,10 @@ FILE_OPTIONS = {
'domain_id. Allowing such movement is not '
'recommended if the scope of a domain admin is being '
'restricted by use of an appropriate policy file '
- '(see policy.v3cloudsample as an example).'),
+ '(see policy.v3cloudsample as an example). This '
+ 'ability is deprecated and will be removed in a '
+ 'future release.',
+ deprecated_for_removal=True),
cfg.BoolOpt('strict_password_check', default=False,
help='If set to true, strict password length checking is '
'performed for password manipulation. If a password '
@@ -104,11 +133,16 @@ FILE_OPTIONS = {
'with an HTTP 403 Forbidden error. If set to false, '
'passwords are automatically truncated to the '
'maximum length.'),
- cfg.StrOpt('secure_proxy_ssl_header',
+ cfg.StrOpt('secure_proxy_ssl_header', default='HTTP_X_FORWARDED_PROTO',
help='The HTTP header used to determine the scheme for the '
'original request, even if it was removed by an SSL '
- 'terminating proxy. Typical value is '
- '"HTTP_X_FORWARDED_PROTO".'),
+ 'terminating proxy.'),
+ cfg.BoolOpt('insecure_debug', default=False,
+ help='If set to true the server will return information '
+ 'in the response that may allow an unauthenticated '
+ 'or authenticated user to get more information than '
+ 'normal, such as why authentication failed. This may '
+ 'be useful for debugging but is insecure.'),
],
'identity': [
cfg.StrOpt('default_domain_id', default='default',
@@ -197,11 +231,17 @@ FILE_OPTIONS = {
'already have assignments for users and '
'groups from the default LDAP domain, and it is '
'acceptable for Keystone to provide the different '
- 'IDs to clients than it did previously. Typically '
+ 'IDs to clients than it did previously. Typically '
'this means that the only time you can set this '
'value to False is when configuring a fresh '
'installation.'),
],
+ 'shadow_users': [
+ cfg.StrOpt('driver',
+ default='sql',
+ help='Entrypoint for the shadow users backend driver '
+ 'in the keystone.identity.shadow_users namespace.'),
+ ],
'trust': [
cfg.BoolOpt('enabled', default=True,
help='Delegation and impersonation features can be '
@@ -215,10 +255,14 @@ FILE_OPTIONS = {
help='Entrypoint for the trust backend driver in the '
'keystone.trust namespace.')],
'os_inherit': [
- cfg.BoolOpt('enabled', default=False,
+ cfg.BoolOpt('enabled', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_INHERIT_MSG,
help='role-assignment inheritance to projects from '
'owning domain or from projects higher in the '
- 'hierarchy can be optionally enabled.'),
+ 'hierarchy can be optionally disabled. In the '
+ 'future, this option will be removed and the '
+ 'hierarchy will be always enabled.'),
],
'fernet_tokens': [
cfg.StrOpt('key_repository',
@@ -279,12 +323,17 @@ FILE_OPTIONS = {
'allow_rescoped_scoped_token to false prevents a user '
'from exchanging a scoped token for any other token.'),
cfg.StrOpt('hash_algorithm', default='md5',
- help="The hash algorithm to use for PKI tokens. This can "
- "be set to any algorithm that hashlib supports. "
- "WARNING: Before changing this value, the auth_token "
- "middleware must be configured with the "
- "hash_algorithms, otherwise token revocation will "
- "not be processed correctly."),
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
+ help='The hash algorithm to use for PKI tokens. This can '
+ 'be set to any algorithm that hashlib supports. '
+ 'WARNING: Before changing this value, the auth_token '
+ 'middleware must be configured with the '
+ 'hash_algorithms, otherwise token revocation will '
+ 'not be processed correctly.'),
+ cfg.BoolOpt('infer_roles', default=True,
+ help='Add roles to token that are not explicitly added, '
+ 'but that are linked implicitly to other roles.'),
],
'revoke': [
cfg.StrOpt('driver',
@@ -306,82 +355,6 @@ FILE_OPTIONS = {
deprecated_opts=[cfg.DeprecatedOpt(
'revocation_cache_time', group='token')]),
],
- 'cache': [
- cfg.StrOpt('config_prefix', default='cache.keystone',
- help='Prefix for building the configuration dictionary '
- 'for the cache region. This should not need to be '
- 'changed unless there is another dogpile.cache '
- 'region with the same configuration name.'),
- cfg.IntOpt('expiration_time', default=600,
- help='Default TTL, in seconds, for any cached item in '
- 'the dogpile.cache region. This applies to any '
- 'cached method that doesn\'t have an explicit '
- 'cache expiration time defined for it.'),
- # NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
- # and other such single-process/thread deployments. Running
- # dogpile.cache.memory in any other configuration has the same pitfalls
- # as the KVS token backend. It is recommended that either Redis or
- # Memcached are used as the dogpile backend for real workloads. To
- # prevent issues with the memory cache ending up in "production"
- # unintentionally, we register a no-op as the keystone default caching
- # backend.
- cfg.StrOpt('backend', default='keystone.common.cache.noop',
- help='Dogpile.cache backend module. It is recommended '
- 'that Memcache with pooling '
- '(keystone.cache.memcache_pool) or Redis '
- '(dogpile.cache.redis) be used in production '
- 'deployments. Small workloads (single process) '
- 'like devstack can use the dogpile.cache.memory '
- 'backend.'),
- cfg.MultiStrOpt('backend_argument', default=[], secret=True,
- help='Arguments supplied to the backend module. '
- 'Specify this option once per argument to be '
- 'passed to the dogpile.cache backend. Example '
- 'format: "<argname>:<value>".'),
- cfg.ListOpt('proxies', default=[],
- help='Proxy classes to import that will affect the way '
- 'the dogpile.cache backend functions. See the '
- 'dogpile.cache documentation on '
- 'changing-backend-behavior.'),
- cfg.BoolOpt('enabled', default=False,
- help='Global toggle for all caching using the '
- 'should_cache_fn mechanism.'),
- cfg.BoolOpt('debug_cache_backend', default=False,
- help='Extra debugging from the cache backend (cache '
- 'keys, get/set/delete/etc calls). This is only '
- 'really useful if you need to see the specific '
- 'cache-backend get/set/delete calls with the '
- 'keys/values. Typically this should be left set '
- 'to false.'),
- cfg.ListOpt('memcache_servers', default=['localhost:11211'],
- help='Memcache servers in the format of "host:port".'
- ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
- ' backends only).'),
- cfg.IntOpt('memcache_dead_retry',
- default=5 * 60,
- help='Number of seconds memcached server is considered dead'
- ' before it is tried again. (dogpile.cache.memcache and'
- ' keystone.cache.memcache_pool backends only).'),
- cfg.IntOpt('memcache_socket_timeout',
- default=3,
- help='Timeout in seconds for every call to a server.'
- ' (dogpile.cache.memcache and keystone.cache.memcache_pool'
- ' backends only).'),
- cfg.IntOpt('memcache_pool_maxsize',
- default=10,
- help='Max total number of open connections to every'
- ' memcached server. (keystone.cache.memcache_pool backend'
- ' only).'),
- cfg.IntOpt('memcache_pool_unused_timeout',
- default=60,
- help='Number of seconds a connection to memcached is held'
- ' unused in the pool before it is closed.'
- ' (keystone.cache.memcache_pool backend only).'),
- cfg.IntOpt('memcache_pool_connection_get_timeout',
- default=10,
- help='Number of seconds that an operation will wait to get '
- 'a memcache client connection.'),
- ],
'ssl': [
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
@@ -400,26 +373,40 @@ FILE_OPTIONS = {
'signing': [
cfg.StrOpt('certfile',
default=_CERTFILE,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Path of the certfile for token signing. For '
'non-production environments, you may be interested '
'in using `keystone-manage pki_setup` to generate '
'self-signed certificates.'),
cfg.StrOpt('keyfile',
default=_KEYFILE,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Path of the keyfile for token signing.'),
cfg.StrOpt('ca_certs',
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
default='/etc/keystone/ssl/certs/ca.pem',
help='Path of the CA for token signing.'),
cfg.StrOpt('ca_key',
default='/etc/keystone/ssl/private/cakey.pem',
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Path of the CA key for token signing.'),
cfg.IntOpt('key_size', default=2048, min=1024,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Key size (in bits) for token signing cert '
'(auto generated certificate).'),
cfg.IntOpt('valid_days', default=3650,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
help='Days the token signing cert is valid for '
'(auto generated certificate).'),
cfg.StrOpt('cert_subject',
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_PKI_MSG,
default=('/C=US/ST=Unset/L=Unset/O=Unset/'
'CN=www.example.com'),
help='Certificate subject (auto generated certificate) for '
@@ -428,16 +415,21 @@ FILE_OPTIONS = {
'assignment': [
cfg.StrOpt('driver',
help='Entrypoint for the assignment backend driver in the '
- 'keystone.assignment namespace. Supplied drivers are '
- 'ldap and sql. If an assignment driver is not '
+ 'keystone.assignment namespace. Only an SQL driver is '
+ 'supplied. If an assignment driver is not '
'specified, the identity driver will choose the '
- 'assignment driver.'),
+ 'assignment driver (driver selection based on '
+ '`[identity]/driver` option is deprecated and will be '
+ 'removed in the "O" release).'),
+ cfg.ListOpt('prohibited_implied_role', default=['admin'],
+ help='A list of role names which are prohibited from '
+ 'being an implied role.'),
],
'resource': [
cfg.StrOpt('driver',
help='Entrypoint for the resource backend driver in the '
- 'keystone.resource namespace. Supplied drivers are '
- 'ldap and sql. If a resource driver is not specified, '
+ 'keystone.resource namespace. Only an SQL driver is '
+ 'supplied. If a resource driver is not specified, '
'the assignment driver will choose the resource '
'driver.'),
cfg.BoolOpt('caching', default=True,
@@ -455,6 +447,30 @@ FILE_OPTIONS = {
group='assignment')],
help='Maximum number of entities that will be returned '
'in a resource collection.'),
+ cfg.StrOpt('admin_project_domain_name',
+ help='Name of the domain that owns the '
+ '`admin_project_name`. Defaults to None.'),
+ cfg.StrOpt('admin_project_name',
+ help='Special project for performing administrative '
+ 'operations on remote services. Tokens scoped to '
+ 'this project will contain the key/value '
+ '`is_admin_project=true`. Defaults to None.'),
+ cfg.StrOpt('project_name_url_safe',
+ choices=['off', 'new', 'strict'], default='off',
+ help='Whether the names of projects are restricted from '
+ 'containing url reserved characters. If set to new, '
+ 'attempts to create or update a project with a url '
+ 'unsafe name will return an error. In addition, if '
+ 'set to strict, attempts to scope a token using '
+ 'an unsafe project name will return an error.'),
+ cfg.StrOpt('domain_name_url_safe',
+ choices=['off', 'new', 'strict'], default='off',
+ help='Whether the names of domains are restricted from '
+ 'containing url reserved characters. If set to new, '
+ 'attempts to create or update a domain with a url '
+ 'unsafe name will return an error. In addition, if '
+ 'set to strict, attempts to scope a token using a '
+ 'domain name which is unsafe will return an error.'),
],
'domain_config': [
cfg.StrOpt('driver',
@@ -496,7 +512,7 @@ FILE_OPTIONS = {
'oauth1': [
cfg.StrOpt('driver',
default='sql',
- help='Entrypoint for hte OAuth backend driver in the '
+ help='Entrypoint for the OAuth backend driver in the '
'keystone.oauth1 namespace.'),
cfg.IntOpt('request_token_duration', default=28800,
help='Duration (in seconds) for the OAuth Request Token.'),
@@ -558,6 +574,8 @@ FILE_OPTIONS = {
'endpoint_policy': [
cfg.BoolOpt('enabled',
default=True,
+ deprecated_for_removal=True,
+ deprecated_reason=_DEPRECATE_EP_MSG,
help='Enable endpoint_policy functionality.'),
cfg.StrOpt('driver',
default='sql',
@@ -566,7 +584,10 @@ FILE_OPTIONS = {
],
'ldap': [
cfg.StrOpt('url', default='ldap://localhost',
- help='URL for connecting to the LDAP server.'),
+ help='URL(s) for connecting to the LDAP server. Multiple '
+ 'LDAP URLs may be specified as a comma separated '
+ 'string. The first URL to successfully bind is used '
+ 'for the connection.'),
cfg.StrOpt('user',
help='User BindDN to query the LDAP server.'),
cfg.StrOpt('password', secret=True,
@@ -618,6 +639,8 @@ FILE_OPTIONS = {
'WARNING: must not be a multivalued attribute.'),
cfg.StrOpt('user_name_attribute', default='sn',
help='LDAP attribute mapped to user name.'),
+ cfg.StrOpt('user_description_attribute', default='description',
+ help='LDAP attribute mapped to user description.'),
cfg.StrOpt('user_mail_attribute', default='mail',
help='LDAP attribute mapped to user email.'),
cfg.StrOpt('user_pass_attribute', default='userPassword',
@@ -655,10 +678,25 @@ FILE_OPTIONS = {
help='LDAP attribute mapped to default_project_id for '
'users.'),
cfg.BoolOpt('user_allow_create', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow user creation in LDAP backend.'),
cfg.BoolOpt('user_allow_update', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow user updates in LDAP backend.'),
cfg.BoolOpt('user_allow_delete', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow user deletion in LDAP backend.'),
cfg.BoolOpt('user_enabled_emulation', default=False,
help='If true, Keystone uses an alternative method to '
@@ -679,146 +717,6 @@ FILE_OPTIONS = {
'mapping format is <ldap_attr>:<user_attr>, where '
'ldap_attr is the attribute in the LDAP entry and '
'user_attr is the Identity API attribute.'),
-
- cfg.StrOpt('project_tree_dn',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_tree_dn', group='ldap')],
- deprecated_for_removal=True,
- help='Search base for projects. '
- 'Defaults to the suffix value.'),
- cfg.StrOpt('project_filter',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_filter', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP search filter for projects.'),
- cfg.StrOpt('project_objectclass', default='groupOfNames',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_objectclass', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP objectclass for projects.'),
- cfg.StrOpt('project_id_attribute', default='cn',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_id_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project id.'),
- cfg.StrOpt('project_member_attribute', default='member',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_member_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project membership for '
- 'user.'),
- cfg.StrOpt('project_name_attribute', default='ou',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_name_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project name.'),
- cfg.StrOpt('project_desc_attribute', default='description',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_desc_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project description.'),
- cfg.StrOpt('project_enabled_attribute', default='enabled',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_enabled_attribute', group='ldap')],
- deprecated_for_removal=True,
- help='LDAP attribute mapped to project enabled.'),
- cfg.StrOpt('project_domain_id_attribute',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_domain_id_attribute', group='ldap')],
- deprecated_for_removal=True,
- default='businessCategory',
- help='LDAP attribute mapped to project domain_id.'),
- cfg.ListOpt('project_attribute_ignore', default=[],
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_attribute_ignore', group='ldap')],
- deprecated_for_removal=True,
- help='List of attributes stripped off the project on '
- 'update.'),
- cfg.BoolOpt('project_allow_create', default=True,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_allow_create', group='ldap')],
- deprecated_for_removal=True,
- help='Allow project creation in LDAP backend.'),
- cfg.BoolOpt('project_allow_update', default=True,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_allow_update', group='ldap')],
- deprecated_for_removal=True,
- help='Allow project update in LDAP backend.'),
- cfg.BoolOpt('project_allow_delete', default=True,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_allow_delete', group='ldap')],
- deprecated_for_removal=True,
- help='Allow project deletion in LDAP backend.'),
- cfg.BoolOpt('project_enabled_emulation', default=False,
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_enabled_emulation', group='ldap')],
- deprecated_for_removal=True,
- help='If true, Keystone uses an alternative method to '
- 'determine if a project is enabled or not by '
- 'checking if they are a member of the '
- '"project_enabled_emulation_dn" group.'),
- cfg.StrOpt('project_enabled_emulation_dn',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_enabled_emulation_dn', group='ldap')],
- deprecated_for_removal=True,
- help='DN of the group entry to hold enabled projects when '
- 'using enabled emulation.'),
- cfg.BoolOpt('project_enabled_emulation_use_group_config',
- default=False,
- help='Use the "group_member_attribute" and '
- '"group_objectclass" settings to determine '
- 'membership in the emulated enabled group.'),
- cfg.ListOpt('project_additional_attribute_mapping',
- deprecated_opts=[cfg.DeprecatedOpt(
- 'tenant_additional_attribute_mapping', group='ldap')],
- deprecated_for_removal=True,
- default=[],
- help='Additional attribute mappings for projects. '
- 'Attribute mapping format is '
- '<ldap_attr>:<user_attr>, where ldap_attr is the '
- 'attribute in the LDAP entry and user_attr is the '
- 'Identity API attribute.'),
-
- cfg.StrOpt('role_tree_dn',
- deprecated_for_removal=True,
- help='Search base for roles. '
- 'Defaults to the suffix value.'),
- cfg.StrOpt('role_filter',
- deprecated_for_removal=True,
- help='LDAP search filter for roles.'),
- cfg.StrOpt('role_objectclass', default='organizationalRole',
- deprecated_for_removal=True,
- help='LDAP objectclass for roles.'),
- cfg.StrOpt('role_id_attribute', default='cn',
- deprecated_for_removal=True,
- help='LDAP attribute mapped to role id.'),
- cfg.StrOpt('role_name_attribute', default='ou',
- deprecated_for_removal=True,
- help='LDAP attribute mapped to role name.'),
- cfg.StrOpt('role_member_attribute', default='roleOccupant',
- deprecated_for_removal=True,
- help='LDAP attribute mapped to role membership.'),
- cfg.ListOpt('role_attribute_ignore', default=[],
- deprecated_for_removal=True,
- help='List of attributes stripped off the role on '
- 'update.'),
- cfg.BoolOpt('role_allow_create', default=True,
- deprecated_for_removal=True,
- help='Allow role creation in LDAP backend.'),
- cfg.BoolOpt('role_allow_update', default=True,
- deprecated_for_removal=True,
- help='Allow role update in LDAP backend.'),
- cfg.BoolOpt('role_allow_delete', default=True,
- deprecated_for_removal=True,
- help='Allow role deletion in LDAP backend.'),
- cfg.ListOpt('role_additional_attribute_mapping',
- deprecated_for_removal=True,
- default=[],
- help='Additional attribute mappings for roles. Attribute '
- 'mapping format is <ldap_attr>:<user_attr>, where '
- 'ldap_attr is the attribute in the LDAP entry and '
- 'user_attr is the Identity API attribute.'),
-
cfg.StrOpt('group_tree_dn',
help='Search base for groups. '
'Defaults to the suffix value.'),
@@ -838,10 +736,25 @@ FILE_OPTIONS = {
help='List of attributes stripped off the group on '
'update.'),
cfg.BoolOpt('group_allow_create', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow group creation in LDAP backend.'),
cfg.BoolOpt('group_allow_update', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow group update in LDAP backend.'),
cfg.BoolOpt('group_allow_delete', default=True,
+ deprecated_for_removal=True,
+ deprecated_reason="Write support for Identity LDAP "
+ "backends has been deprecated in the M "
+ "release and will be removed in the O "
+ "release.",
help='Allow group deletion in LDAP backend.'),
cfg.ListOpt('group_additional_attribute_mapping',
default=[],
@@ -862,7 +775,7 @@ FILE_OPTIONS = {
choices=['demand', 'never', 'allow'],
help='Specifies what checks to perform on client '
'certificates in an incoming TLS session.'),
- cfg.BoolOpt('use_pool', default=False,
+ cfg.BoolOpt('use_pool', default=True,
help='Enable LDAP connection pooling.'),
cfg.IntOpt('pool_size', default=10,
help='Connection pool size.'),
@@ -876,7 +789,7 @@ FILE_OPTIONS = {
'indefinite wait for response.'),
cfg.IntOpt('pool_connection_lifetime', default=600,
help='Connection lifetime in seconds.'),
- cfg.BoolOpt('use_auth_pool', default=False,
+ cfg.BoolOpt('use_auth_pool', default=True,
help='Enable LDAP connection pooling for end user '
'authentication. If use_pool is disabled, then this '
'setting is meaningless and is not used at all.'),
@@ -884,11 +797,17 @@ FILE_OPTIONS = {
help='End user auth connection pool size.'),
cfg.IntOpt('auth_pool_connection_lifetime', default=60,
help='End user auth connection lifetime in seconds.'),
+ cfg.BoolOpt('group_members_are_ids', default=False,
+ help='If the members of the group objectclass are user '
+ 'IDs rather than DNs, set this to true. This is the '
+ 'case when using posixGroup as the group '
+ 'objectclass and OpenDirectory.'),
],
'auth': [
cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
help='Allowed authentication methods.'),
- cfg.StrOpt('password',
+ cfg.StrOpt('password', # nosec : This is the name of the plugin, not
+ # a password that needs to be protected.
help='Entrypoint for the password auth plugin module in '
'the keystone.auth.password namespace.'),
cfg.StrOpt('token',
@@ -1090,7 +1009,8 @@ FILE_OPTIONS = {
'eventlet application. Defaults to number of CPUs '
'(minimum of 2).'),
cfg.StrOpt('public_bind_host',
- default='0.0.0.0',
+ default='0.0.0.0', # nosec : Bind to all interfaces by
+ # default for backwards compatibility.
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT'),
cfg.DeprecatedOpt('public_bind_host',
@@ -1098,14 +1018,15 @@ FILE_OPTIONS = {
deprecated_for_removal=True,
help='The IP address of the network interface for the '
'public service to listen on.'),
- cfg.IntOpt('public_port', default=5000, min=1, max=65535,
- deprecated_name='public_port',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The port number which the public service listens '
- 'on.'),
+ cfg.PortOpt('public_port', default=5000,
+ deprecated_name='public_port',
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ help='The port number which the public service listens '
+ 'on.'),
cfg.StrOpt('admin_bind_host',
- default='0.0.0.0',
+ default='0.0.0.0', # nosec : Bind to all interfaces by
+ # default for backwards compatibility.
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT'),
cfg.DeprecatedOpt('admin_bind_host',
@@ -1113,21 +1034,21 @@ FILE_OPTIONS = {
deprecated_for_removal=True,
help='The IP address of the network interface for the '
'admin service to listen on.'),
- cfg.IntOpt('admin_port', default=35357, min=1, max=65535,
- deprecated_name='admin_port',
- deprecated_group='DEFAULT',
- deprecated_for_removal=True,
- help='The port number which the admin service listens '
- 'on.'),
+ cfg.PortOpt('admin_port', default=35357,
+ deprecated_name='admin_port',
+ deprecated_group='DEFAULT',
+ deprecated_for_removal=True,
+ help='The port number which the admin service listens '
+ 'on.'),
cfg.BoolOpt('wsgi_keep_alive', default=True,
- help="If set to false, disables keepalives on the server; "
- "all connections will be closed after serving one "
- "request."),
+ help='If set to false, disables keepalives on the server; '
+ 'all connections will be closed after serving one '
+ 'request.'),
cfg.IntOpt('client_socket_timeout', default=900,
- help="Timeout for socket operations on a client "
- "connection. If an incoming connection is idle for "
- "this number of seconds it will be closed. A value "
- "of '0' means wait forever."),
+ help='Timeout for socket operations on a client '
+ 'connection. If an incoming connection is idle for '
+ 'this number of seconds it will be closed. A value '
+ 'of "0" means wait forever.'),
cfg.BoolOpt('tcp_keepalive', default=False,
deprecated_name='tcp_keepalive',
deprecated_group='DEFAULT',
@@ -1143,7 +1064,7 @@ FILE_OPTIONS = {
deprecated_for_removal=True,
help='Sets the value of TCP_KEEPIDLE in seconds for each '
'server socket. Only applies if tcp_keepalive is '
- 'true.'),
+ 'true. Ignored if system does not support it.'),
],
'eventlet_server_ssl': [
cfg.BoolOpt('enable', default=False, deprecated_name='enable',
@@ -1152,7 +1073,7 @@ FILE_OPTIONS = {
help='Toggle for SSL support on the Keystone '
'eventlet servers.'),
cfg.StrOpt('certfile',
- default="/etc/keystone/ssl/certs/keystone.pem",
+ default='/etc/keystone/ssl/certs/keystone.pem',
deprecated_name='certfile', deprecated_group='ssl',
deprecated_for_removal=True,
help='Path of the certfile for SSL. For non-production '
@@ -1173,7 +1094,7 @@ FILE_OPTIONS = {
deprecated_name='cert_required', deprecated_group='ssl',
deprecated_for_removal=True,
help='Require client certificate.'),
- ]
+ ],
}
@@ -1195,6 +1116,67 @@ def setup_authentication(conf=None):
_register_auth_plugin_opt(conf, option)
+def set_default_for_default_log_levels():
+ """Set the default for the default_log_levels option for keystone.
+
+ Keystone uses some packages that other OpenStack services don't use that do
+ logging. This will set the default_log_levels default level for those
+ packages.
+
+ This function needs to be called before CONF().
+
+ """
+ extra_log_level_defaults = [
+ 'dogpile=INFO',
+ 'routes=INFO',
+ ]
+
+ log.register_options(CONF)
+ log.set_defaults(default_log_levels=log.get_default_log_levels() +
+ extra_log_level_defaults)
+
+
+def setup_logging():
+ """Sets up logging for the keystone package."""
+ log.setup(CONF, 'keystone')
+ logging.captureWarnings(True)
+
+
+def find_paste_config():
+ """Find Keystone's paste.deploy configuration file.
+
+ Keystone's paste.deploy configuration file is specified in the
+ ``[paste_deploy]`` section of the main Keystone configuration file,
+ ``keystone.conf``.
+
+ For example::
+
+ [paste_deploy]
+ config_file = keystone-paste.ini
+
+ :returns: The selected configuration filename
+ :raises: exception.ConfigFileNotFound
+
+ """
+ if CONF.paste_deploy.config_file:
+ paste_config = CONF.paste_deploy.config_file
+ paste_config_value = paste_config
+ if not os.path.isabs(paste_config):
+ paste_config = CONF.find_file(paste_config)
+ elif CONF.config_file:
+ paste_config = CONF.config_file[0]
+ paste_config_value = paste_config
+ else:
+ # this provides backwards compatibility for keystone.conf files that
+ # still have the entire paste configuration included, rather than just
+ # a [paste_deploy] configuration section referring to an external file
+ paste_config = CONF.find_file('keystone.conf')
+ paste_config_value = 'keystone.conf'
+ if not paste_config or not os.path.exists(paste_config):
+ raise exception.ConfigFileNotFound(config_file=paste_config_value)
+ return paste_config
+
+
def configure(conf=None):
if conf is None:
conf = CONF
@@ -1206,8 +1188,8 @@ def configure(conf=None):
cfg.StrOpt('pydev-debug-host',
help='Host to connect to for remote debugger.'))
conf.register_cli_opt(
- cfg.IntOpt('pydev-debug-port', min=1, max=65535,
- help='Port to connect to for remote debugger.'))
+ cfg.PortOpt('pydev-debug-port',
+ help='Port to connect to for remote debugger.'))
for section in FILE_OPTIONS:
for option in FILE_OPTIONS[section]:
@@ -1218,6 +1200,8 @@ def configure(conf=None):
# register any non-default auth methods here (used by extensions, etc)
setup_authentication(conf)
+ # add oslo.cache related config options
+ cache.configure(conf)
def list_opts():
@@ -1242,3 +1226,34 @@ def list_opts():
:returns: a list of (group_name, opts) tuples
"""
return list(FILE_OPTIONS.items())
+
+
+def set_middleware_defaults():
+ """Update default configuration options for oslo.middleware."""
+ # CORS Defaults
+ # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
+ cfg.set_defaults(cors.CORS_OPTS,
+ allow_headers=['X-Auth-Token',
+ 'X-Openstack-Request-Id',
+ 'X-Subject-Token',
+ 'X-Project-Id',
+ 'X-Project-Name',
+ 'X-Project-Domain-Id',
+ 'X-Project-Domain-Name',
+ 'X-Domain-Id',
+ 'X-Domain-Name'],
+ expose_headers=['X-Auth-Token',
+ 'X-Openstack-Request-Id',
+ 'X-Subject-Token'],
+ allow_methods=['GET',
+ 'PUT',
+ 'POST',
+ 'DELETE',
+ 'PATCH']
+ )
+
+
+def set_config_defaults():
+ """Override all configuration default values for keystone."""
+ set_default_for_default_log_levels()
+ set_middleware_defaults()
diff --git a/keystone-moon/keystone/common/controller.py b/keystone-moon/keystone/common/controller.py
index 56bc211a..8672525f 100644
--- a/keystone-moon/keystone/common/controller.py
+++ b/keystone-moon/keystone/common/controller.py
@@ -36,21 +36,39 @@ CONF = cfg.CONF
def v2_deprecated(f):
- """No-op decorator in preparation for deprecating Identity API v2.
-
- This is a placeholder for the pending deprecation of v2. The implementation
- of this decorator can be replaced with::
-
- from oslo_log import versionutils
-
-
- v2_deprecated = versionutils.deprecated(
- what='v2 API',
- as_of=versionutils.deprecated.JUNO,
- in_favor_of='v3 API')
-
- """
- return f
+ @six.wraps(f)
+ def wrapper(*args, **kwargs):
+ deprecated = versionutils.deprecated(
+ what=f.__name__ + ' of the v2 API',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of='a similar function in the v3 API',
+ remove_in=+4)
+ return deprecated(f)
+ return wrapper()
+
+
+def v2_ec2_deprecated(f):
+ @six.wraps(f)
+ def wrapper(*args, **kwargs):
+ deprecated = versionutils.deprecated(
+ what=f.__name__ + ' of the v2 EC2 APIs',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of=('a similar function in the v3 Credential APIs'),
+ remove_in=0)
+ return deprecated(f)
+ return wrapper()
+
+
+def v2_auth_deprecated(f):
+ @six.wraps(f)
+ def wrapper(*args, **kwargs):
+ deprecated = versionutils.deprecated(
+ what=f.__name__ + ' of the v2 Authentication APIs',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of=('a similar function in the v3 Authentication APIs'),
+ remove_in=0)
+ return deprecated(f)
+ return wrapper()
def _build_policy_check_credentials(self, action, context, kwargs):
@@ -165,24 +183,32 @@ def protected(callback=None):
return wrapper
-def filterprotected(*filters):
- """Wraps filtered API calls with role based access controls (RBAC)."""
+def filterprotected(*filters, **callback):
+ """Wraps API list calls with role based access controls (RBAC).
+ This handles both the protection of the API parameters as well as any
+ filters supplied.
+
+ More complex API list calls (for example that need to examine the contents
+ of an entity referenced by one of the filters) should pass in a callback
+ function, that will be subsequently called to check protection for these
+ multiple entities. This callback function should gather the appropriate
+ entities needed and then call check_protection() in the V3Controller class.
+
+ """
def _filterprotected(f):
@functools.wraps(f)
def wrapper(self, context, **kwargs):
if not context['is_admin']:
- action = 'identity:%s' % f.__name__
- creds = _build_policy_check_credentials(self, action,
- context, kwargs)
- # Now, build the target dict for policy check. We include:
+ # The target dict for the policy check will include:
#
# - Any query filter parameters
# - Data from the main url (which will be in the kwargs
- # parameter) and would typically include the prime key
- # of a get/update/delete call
+ # parameter), which although most of our APIs do not utilize,
+ # in theory you could have.
#
- # First any query filter parameters
+
+ # First build the dict of filter parameters
target = dict()
if filters:
for item in filters:
@@ -193,15 +219,29 @@ def filterprotected(*filters):
', '.join(['%s=%s' % (item, target[item])
for item in target])))
- # Now any formal url parameters
- for key in kwargs:
- target[key] = kwargs[key]
-
- self.policy_api.enforce(creds,
- action,
- utils.flatten_dict(target))
-
- LOG.debug('RBAC: Authorization granted')
+ if 'callback' in callback and callback['callback'] is not None:
+ # A callback has been specified to load additional target
+ # data, so pass it the formal url params as well as the
+ # list of filters, so it can augment these and then call
+ # the check_protection() method.
+ prep_info = {'f_name': f.__name__,
+ 'input_attr': kwargs,
+ 'filter_attr': target}
+ callback['callback'](self, context, prep_info, **kwargs)
+ else:
+ # No callback, so we are going to check the protection here
+ action = 'identity:%s' % f.__name__
+ creds = _build_policy_check_credentials(self, action,
+ context, kwargs)
+ # Add in any formal url parameters
+ for key in kwargs:
+ target[key] = kwargs[key]
+
+ self.policy_api.enforce(creds,
+ action,
+ utils.flatten_dict(target))
+
+ LOG.debug('RBAC: Authorization granted')
else:
LOG.warning(_LW('RBAC: Bypassing authorization'))
return f(self, context, filters, **kwargs)
@@ -211,6 +251,7 @@ def filterprotected(*filters):
class V2Controller(wsgi.Application):
"""Base controller class for Identity API v2."""
+
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id since v2 calls are not domain-aware.
@@ -224,27 +265,13 @@ class V2Controller(wsgi.Application):
@staticmethod
def filter_domain_id(ref):
"""Remove domain_id since v2 calls are not domain-aware."""
- if 'domain_id' in ref:
- if ref['domain_id'] != CONF.identity.default_domain_id:
- raise exception.Unauthorized(
- _('Non-default domain is not supported'))
- del ref['domain_id']
+ ref.pop('domain_id', None)
return ref
@staticmethod
def filter_domain(ref):
- """Remove domain since v2 calls are not domain-aware.
-
- V3 Fernet tokens builds the users with a domain in the token data.
- This method will ensure that users create in v3 belong to the default
- domain.
-
- """
- if 'domain' in ref:
- if ref['domain'].get('id') != CONF.identity.default_domain_id:
- raise exception.Unauthorized(
- _('Non-default domain is not supported'))
- del ref['domain']
+ """Remove domain since v2 calls are not domain-aware."""
+ ref.pop('domain', None)
return ref
@staticmethod
@@ -287,20 +314,13 @@ class V2Controller(wsgi.Application):
def v3_to_v2_user(ref):
"""Convert a user_ref from v3 to v2 compatible.
- - v2.0 users are not domain aware, and should have domain_id validated
- to be the default domain, and then removed.
-
- - v2.0 users expect the use of tenantId instead of default_project_id.
-
- - v2.0 users have a username attribute.
-
- This method should only be applied to user_refs being returned from the
- v2.0 controller(s).
+ * v2.0 users are not domain aware, and should have domain_id removed
+ * v2.0 users expect the use of tenantId instead of default_project_id
+ * v2.0 users have a username attribute
If ref is a list type, we will iterate through each element and do the
conversion.
"""
-
def _format_default_project_id(ref):
"""Convert default_project_id to tenantId for v2 calls."""
default_project_id = ref.pop('default_project_id', None)
@@ -342,7 +362,6 @@ class V2Controller(wsgi.Application):
If ref is a list type, we will iterate through each element and do the
conversion.
"""
-
def _filter_project_properties(ref):
"""Run through the various filter methods."""
V2Controller.filter_domain_id(ref)
@@ -404,8 +423,6 @@ class V3Controller(wsgi.Application):
Class parameters:
- * `_mutable_parameters` - set of parameters that can be changed by users.
- Usually used by cls.check_immutable_params()
* `_public_parameters` - set of parameters that are exposed to the user.
Usually used by cls.filter_params()
@@ -450,7 +467,6 @@ class V3Controller(wsgi.Application):
True, including the absence of a value
"""
-
if (isinstance(filter_value, six.string_types) and
filter_value == '0'):
val = False
@@ -545,7 +561,6 @@ class V3Controller(wsgi.Application):
@classmethod
def filter_by_attributes(cls, refs, hints):
"""Filters a list of references by filter values."""
-
def _attr_match(ref_attr, val_attr):
"""Matches attributes allowing for booleans as strings.
@@ -565,7 +580,7 @@ class V3Controller(wsgi.Application):
:param filter: the filter in question
:param ref: the dict to check
- :returns True if there is a match
+ :returns: True if there is a match
"""
comparator = filter['comparator']
@@ -713,6 +728,8 @@ class V3Controller(wsgi.Application):
if token_ref.domain_scoped:
return token_ref.domain_id
+ elif token_ref.project_scoped:
+ return token_ref.project_domain_id
else:
LOG.warning(
_LW('No domain information specified as part of list request'))
@@ -726,7 +743,16 @@ class V3Controller(wsgi.Application):
being used.
"""
- token_ref = utils.get_token_ref(context)
+ try:
+ token_ref = utils.get_token_ref(context)
+ except exception.Unauthorized:
+ if context.get('is_admin'):
+ raise exception.ValidationError(
+ _('You have tried to create a resource using the admin '
+ 'token. As this token is not within a domain you must '
+ 'explicitly include a domain for this resource to '
+ 'belong to.'))
+ raise
if token_ref.domain_scoped:
return token_ref.domain_id
@@ -751,7 +777,7 @@ class V3Controller(wsgi.Application):
def _normalize_domain_id(self, context, ref):
"""Fill in domain_id if not specified in a v3 call."""
- if 'domain_id' not in ref:
+ if not ref.get('domain_id'):
ref['domain_id'] = self._get_domain_id_from_token(context)
return ref
@@ -768,7 +794,7 @@ class V3Controller(wsgi.Application):
additional entities or attributes (passed in target_attr), so that
they can be referenced by policy rules.
- """
+ """
if 'is_admin' in context and context['is_admin']:
LOG.warning(_LW('RBAC: Bypassing authorization'))
else:
@@ -785,43 +811,19 @@ class V3Controller(wsgi.Application):
if target_attr:
policy_dict = {'target': target_attr}
policy_dict.update(prep_info['input_attr'])
+ if 'filter_attr' in prep_info:
+ policy_dict.update(prep_info['filter_attr'])
self.policy_api.enforce(creds,
action,
utils.flatten_dict(policy_dict))
LOG.debug('RBAC: Authorization granted')
@classmethod
- def check_immutable_params(cls, ref):
- """Raise exception when disallowed parameter is in ref.
-
- Check whether the ref dictionary representing a request has only
- mutable parameters included. If not, raise an exception. This method
- checks only root-level keys from a ref dictionary.
-
- :param ref: a dictionary representing deserialized request to be
- stored
- :raises: :class:`keystone.exception.ImmutableAttributeError`
-
- """
- ref_keys = set(ref.keys())
- blocked_keys = ref_keys.difference(cls._mutable_parameters)
-
- if not blocked_keys:
- # No immutable parameters changed
- return
-
- exception_args = {'target': cls.__name__,
- 'attributes': ', '.join(blocked_keys)}
- raise exception.ImmutableAttributeError(**exception_args)
-
- @classmethod
def filter_params(cls, ref):
"""Remove unspecified parameters from the dictionary.
- This function removes unspecified parameters from the dictionary. See
- check_immutable_parameters for corresponding function that raises
- exceptions. This method checks only root-level keys from a ref
- dictionary.
+ This function removes unspecified parameters from the dictionary.
+ This method checks only root-level keys from a ref dictionary.
:param ref: a dictionary representing deserialized response to be
serialized
diff --git a/keystone-moon/keystone/common/dependency.py b/keystone-moon/keystone/common/dependency.py
index e19f705f..d52a1ec5 100644
--- a/keystone-moon/keystone/common/dependency.py
+++ b/keystone-moon/keystone/common/dependency.py
@@ -60,6 +60,7 @@ class UnresolvableDependencyException(Exception):
See ``resolve_future_dependencies()`` for more details.
"""
+
def __init__(self, name, targets):
msg = _('Unregistered dependency: %(name)s for %(targets)s') % {
'name': name, 'targets': targets}
@@ -225,6 +226,5 @@ def reset():
This is useful for unit testing to ensure that tests don't use providers
from previous tests.
"""
-
_REGISTRY.clear()
_future_dependencies.clear()
diff --git a/keystone-moon/keystone/common/driver_hints.py b/keystone-moon/keystone/common/driver_hints.py
index ff0a774c..e7c2f2ef 100644
--- a/keystone-moon/keystone/common/driver_hints.py
+++ b/keystone-moon/keystone/common/driver_hints.py
@@ -13,6 +13,50 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
+from keystone import exception
+from keystone.i18n import _
+
+
+def truncated(f):
+ """Ensure list truncation is detected in Driver list entity methods.
+
+ This is designed to wrap Driver list_{entity} methods in order to
+ calculate if the resultant list has been truncated. Provided a limit dict
+ is found in the hints list, we increment the limit by one so as to ask the
+ wrapped function for one more entity than the limit, and then once the list
+ has been generated, we check to see if the original limit has been
+ exceeded, in which case we truncate back to that limit and set the
+ 'truncated' boolean to 'true' in the hints limit dict.
+
+ """
+ @functools.wraps(f)
+ def wrapper(self, hints, *args, **kwargs):
+ if not hasattr(hints, 'limit'):
+ raise exception.UnexpectedError(
+ _('Cannot truncate a driver call without hints list as '
+ 'first parameter after self '))
+
+ if hints.limit is None:
+ return f(self, hints, *args, **kwargs)
+
+ # A limit is set, so ask for one more entry than we need
+ list_limit = hints.limit['limit']
+ hints.set_limit(list_limit + 1)
+ ref_list = f(self, hints, *args, **kwargs)
+
+ # If we got more than the original limit then trim back the list and
+ # mark it truncated. In both cases, make sure we set the limit back
+ # to its original value.
+ if len(ref_list) > list_limit:
+ hints.set_limit(list_limit, truncated=True)
+ return ref_list[:list_limit]
+ else:
+ hints.set_limit(list_limit)
+ return ref_list
+ return wrapper
+
class Hints(object):
"""Encapsulate driver hints for listing entities.
@@ -39,12 +83,13 @@ class Hints(object):
* ``name``: the name of the attribute being matched
* ``value``: the value against which it is being matched
* ``comparator``: the operation, which can be one of ``equals``,
- ``startswith`` or ``endswith``
+ ``contains``, ``startswith`` or ``endswith``
* ``case_sensitive``: whether any comparison should take account of
case
* ``type``: will always be 'filter'
"""
+
def __init__(self):
self.limit = None
self.filters = list()
diff --git a/keystone-moon/keystone/common/environment/__init__.py b/keystone-moon/keystone/common/environment/__init__.py
index 3edf6b0b..6748f115 100644
--- a/keystone-moon/keystone/common/environment/__init__.py
+++ b/keystone-moon/keystone/common/environment/__init__.py
@@ -21,7 +21,7 @@ from oslo_log import log
LOG = log.getLogger(__name__)
-__all__ = ['Server', 'httplib', 'subprocess']
+__all__ = ('Server', 'httplib', 'subprocess')
_configured = False
@@ -95,7 +95,8 @@ def use_stdlib():
global httplib, subprocess
import six.moves.http_client as _httplib
- import subprocess as _subprocess
+ import subprocess as _subprocess # nosec : This is used in .federation.idp
+ # and .common.openssl. See there.
httplib = _httplib
subprocess = _subprocess
diff --git a/keystone-moon/keystone/common/environment/eventlet_server.py b/keystone-moon/keystone/common/environment/eventlet_server.py
index 398952e1..430ca3e4 100644
--- a/keystone-moon/keystone/common/environment/eventlet_server.py
+++ b/keystone-moon/keystone/common/environment/eventlet_server.py
@@ -27,7 +27,6 @@ import eventlet.wsgi
import greenlet
from oslo_config import cfg
from oslo_log import log
-from oslo_log import loggers
from oslo_service import service
from keystone.i18n import _LE, _LI
@@ -46,15 +45,16 @@ LOG = log.getLogger(__name__)
POOL_SIZE = 1
-class EventletFilteringLogger(loggers.WritableLogger):
+class EventletFilteringLogger(object):
# NOTE(morganfainberg): This logger is designed to filter out specific
# Tracebacks to limit the amount of data that eventlet can log. In the
# case of broken sockets (EPIPE and ECONNRESET), we are seeing a huge
# volume of data being written to the logs due to ~14 lines+ per traceback.
# The traceback in these cases are, at best, useful for limited debugging
# cases.
- def __init__(self, *args, **kwargs):
- super(EventletFilteringLogger, self).__init__(*args, **kwargs)
+ def __init__(self, logger, level=log.INFO):
+ self.logger = logger
+ self.level = level
self.regex = re.compile(r'errno (%d|%d)' %
(errno.EPIPE, errno.ECONNRESET), re.IGNORECASE)
@@ -73,7 +73,8 @@ class Server(service.ServiceBase):
def __init__(self, application, host=None, port=None, keepalive=False,
keepidle=None):
self.application = application
- self.host = host or '0.0.0.0'
+ self.host = host or '0.0.0.0' # nosec : Bind to all interfaces by
+ # default for backwards compatibility.
self.port = port or 0
# Pool for a green thread in which wsgi server will be running
self.pool = eventlet.GreenPool(POOL_SIZE)
@@ -92,7 +93,6 @@ class Server(service.ServiceBase):
Raises Exception if this has already been called.
"""
-
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix.
@@ -120,7 +120,6 @@ class Server(service.ServiceBase):
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
-
if self.socket is None:
self.listen(key=key, backlog=backlog)
@@ -145,8 +144,13 @@ class Server(service.ServiceBase):
dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if self.keepidle is not None:
- dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
- self.keepidle)
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ dup_socket.setsockopt(socket.IPPROTO_TCP,
+ socket.TCP_KEEPIDLE,
+ self.keepidle)
+ else:
+ LOG.warning("System does not support TCP_KEEPIDLE but "
+ "tcp_keepidle has been set. Ignoring.")
self.greenthread = self.pool.spawn(self._run,
self.application,
@@ -168,9 +172,11 @@ class Server(service.ServiceBase):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
- except KeyboardInterrupt:
+ except KeyboardInterrupt: # nosec
+ # If CTRL-C, just break out of the loop.
pass
- except greenlet.GreenletExit:
+ except greenlet.GreenletExit: # nosec
+ # If exiting, break out of the loop.
pass
def reset(self):
@@ -198,7 +204,7 @@ class Server(service.ServiceBase):
socket, application, log=EventletFilteringLogger(logger),
debug=False, keepalive=CONF.eventlet_server.wsgi_keep_alive,
socket_timeout=socket_timeout)
- except greenlet.GreenletExit:
+ except greenlet.GreenletExit: # nosec
# Wait until all servers have completed running
pass
except Exception:
diff --git a/keystone-moon/keystone/common/extension.py b/keystone-moon/keystone/common/extension.py
index b2ea80bc..be5de631 100644
--- a/keystone-moon/keystone/common/extension.py
+++ b/keystone-moon/keystone/common/extension.py
@@ -41,5 +41,4 @@ def register_admin_extension(url_prefix, extension_data):
def register_public_extension(url_prefix, extension_data):
"""Same as register_admin_extension but for public extensions."""
-
PUBLIC_EXTENSIONS[url_prefix] = extension_data
diff --git a/keystone-moon/keystone/common/json_home.py b/keystone-moon/keystone/common/json_home.py
index c048a356..6876f8af 100644
--- a/keystone-moon/keystone/common/json_home.py
+++ b/keystone-moon/keystone/common/json_home.py
@@ -79,7 +79,6 @@ class Status(object):
def translate_urls(json_home, new_prefix):
"""Given a JSON Home document, sticks new_prefix on each of the urls."""
-
for dummy_rel, resource in json_home['resources'].items():
if 'href' in resource:
resource['href'] = new_prefix + resource['href']
diff --git a/keystone-moon/keystone/common/kvs/__init__.py b/keystone-moon/keystone/common/kvs/__init__.py
index 9a406a85..354bbd8a 100644
--- a/keystone-moon/keystone/common/kvs/__init__.py
+++ b/keystone-moon/keystone/common/kvs/__init__.py
@@ -15,7 +15,6 @@
from dogpile.cache import region
from keystone.common.kvs.core import * # noqa
-from keystone.common.kvs.legacy import Base, DictKvs, INMEMDB # noqa
# NOTE(morganfainberg): Provided backends are registered here in the __init__
diff --git a/keystone-moon/keystone/common/kvs/backends/inmemdb.py b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
index 68072ef4..379b54bf 100644
--- a/keystone-moon/keystone/common/kvs/backends/inmemdb.py
+++ b/keystone-moon/keystone/common/kvs/backends/inmemdb.py
@@ -12,9 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Keystone In-Memory Dogpile.cache backend implementation.
-"""
+"""Keystone In-Memory Dogpile.cache backend implementation."""
import copy
@@ -40,6 +38,7 @@ class MemoryBackend(api.CacheBackend):
'keystone.common.kvs.Memory'
)
"""
+
def __init__(self, arguments):
self._db = {}
diff --git a/keystone-moon/keystone/common/kvs/backends/memcached.py b/keystone-moon/keystone/common/kvs/backends/memcached.py
index f54c1a01..a65cf877 100644
--- a/keystone-moon/keystone/common/kvs/backends/memcached.py
+++ b/keystone-moon/keystone/common/kvs/backends/memcached.py
@@ -12,26 +12,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Keystone Memcached dogpile.cache backend implementation.
-"""
+"""Keystone Memcached dogpile.cache backend implementation."""
import random as _random
import time
from dogpile.cache import api
from dogpile.cache.backends import memcached
+from oslo_cache.backends import memcache_pool
from oslo_config import cfg
-from oslo_log import log
from six.moves import range
-from keystone.common.cache.backends import memcache_pool
from keystone import exception
from keystone.i18n import _
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
NO_VALUE = api.NO_VALUE
random = _random.SystemRandom()
@@ -49,6 +45,7 @@ class MemcachedLock(object):
http://amix.dk/blog/post/19386
"""
+
def __init__(self, client_fn, key, lock_timeout, max_lock_attempts):
self.client_fn = client_fn
self.key = "_lock" + key
@@ -63,7 +60,9 @@ class MemcachedLock(object):
elif not wait:
return False
else:
- sleep_time = random.random()
+ sleep_time = random.random() # nosec : random is not used for
+ # crypto or security, it's just the time to delay between
+ # retries.
time.sleep(sleep_time)
raise exception.UnexpectedError(
_('Maximum lock attempts on %s occurred.') % self.key)
@@ -81,6 +80,7 @@ class MemcachedBackend(object):
time `memcached`, `bmemcached`, `pylibmc` and `pooled_memcached` are
valid).
"""
+
def __init__(self, arguments):
self._key_mangler = None
self.raw_no_expiry_keys = set(arguments.pop('no_expiry_keys', set()))
diff --git a/keystone-moon/keystone/common/kvs/core.py b/keystone-moon/keystone/common/kvs/core.py
index 6ce7b318..064825f8 100644
--- a/keystone-moon/keystone/common/kvs/core.py
+++ b/keystone-moon/keystone/common/kvs/core.py
@@ -25,6 +25,7 @@ from dogpile.core import nameregistry
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
+from oslo_utils import reflection
from keystone import exception
from keystone.i18n import _
@@ -32,8 +33,8 @@ from keystone.i18n import _LI
from keystone.i18n import _LW
-__all__ = ['KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
- 'get_key_value_store']
+__all__ = ('KeyValueStore', 'KeyValueStoreLock', 'LockTimeout',
+ 'get_key_value_store')
BACKENDS_REGISTERED = False
@@ -66,6 +67,23 @@ def _register_backends():
BACKENDS_REGISTERED = True
+def sha1_mangle_key(key):
+ """Wrapper for dogpile's sha1_mangle_key.
+
+ Taken from oslo_cache.core._sha1_mangle_key
+
+ dogpile's sha1_mangle_key function expects an encoded string, so we
+ should take steps to properly handle multiple inputs before passing
+ the key through.
+ """
+ try:
+ key = key.encode('utf-8', errors='xmlcharrefreplace')
+ except (UnicodeError, AttributeError): # nosec
+ # NOTE(stevemar): if encoding fails just continue anyway.
+ pass
+ return dogpile_util.sha1_mangle_key(key)
+
+
class LockTimeout(exception.UnexpectedError):
debug_message_format = _('Lock Timeout occurred for key, %(target)s')
@@ -76,6 +94,7 @@ class KeyValueStore(object):
This manager also supports the concept of locking a given key resource to
allow for a guaranteed atomic transaction to the backend.
"""
+
def __init__(self, kvs_region):
self.locking = True
self._lock_timeout = 0
@@ -95,7 +114,6 @@ class KeyValueStore(object):
this instantiation
:param region_config_args: key-word args passed to the dogpile.cache
backend for configuration
- :return:
"""
if self.is_configured:
# NOTE(morganfainberg): It is a bad idea to reconfigure a backend,
@@ -130,12 +148,16 @@ class KeyValueStore(object):
if issubclass(pxy, proxy.ProxyBackend):
proxies.append(pxy)
else:
+ pxy_cls_name = reflection.get_class_name(
+ pxy, fully_qualified=False)
LOG.warning(_LW('%s is not a dogpile.proxy.ProxyBackend'),
- pxy.__name__)
+ pxy_cls_name)
for proxy_cls in reversed(proxies):
+ proxy_cls_name = reflection.get_class_name(
+ proxy_cls, fully_qualified=False)
LOG.info(_LI('Adding proxy \'%(proxy)s\' to KVS %(name)s.'),
- {'proxy': proxy_cls.__name__,
+ {'proxy': proxy_cls_name,
'name': self._region.name})
self._region.wrap(proxy_cls)
@@ -196,14 +218,14 @@ class KeyValueStore(object):
raise exception.ValidationError(
_('`key_mangler` option must be a function reference'))
else:
- LOG.info(_LI('Using default dogpile sha1_mangle_key as KVS '
- 'region %s key_mangler'), self._region.name)
- # NOTE(morganfainberg): Sane 'default' keymangler is the
- # dogpile sha1_mangle_key function. This ensures that unless
- # explicitly changed, we mangle keys. This helps to limit
- # unintended cases of exceeding cache-key in backends such
- # as memcache.
- self._region.key_mangler = dogpile_util.sha1_mangle_key
+ msg = _LI('Using default keystone.common.kvs.sha1_mangle_key '
+ 'as KVS region %s key_mangler')
+ LOG.info(msg, self._region.name)
+ # NOTE(morganfainberg): Use 'default' keymangler to ensure
+ # that unless explicitly changed, we mangle keys. This helps
+ # to limit unintended cases of exceeding cache-key in backends
+ # such as memcache.
+ self._region.key_mangler = sha1_mangle_key
self._set_keymangler_on_backend(self._region.key_mangler)
else:
LOG.info(_LI('KVS region %s key_mangler disabled.'),
@@ -251,6 +273,7 @@ class KeyValueStore(object):
class _LockWrapper(object):
"""weakref-capable threading.Lock wrapper."""
+
def __init__(self, lock_timeout):
self.lock = threading.Lock()
self.lock_timeout = lock_timeout
@@ -339,8 +362,9 @@ class KeyValueStore(object):
@contextlib.contextmanager
def _action_with_lock(self, key, lock=None):
- """Wrapper context manager to validate and handle the lock and lock
- timeout if passed in.
+ """Wrapper context manager.
+
+ Validates and handles the lock and lock timeout if passed in.
"""
if not isinstance(lock, KeyValueStoreLock):
# NOTE(morganfainberg): Locking only matters if a lock is passed in
@@ -362,11 +386,13 @@ class KeyValueStore(object):
class KeyValueStoreLock(object):
- """Basic KeyValueStoreLock context manager that hooks into the
- dogpile.cache backend mutex allowing for distributed locking on resources.
+ """Basic KeyValueStoreLock context manager.
- This is only a write lock, and will not prevent reads from occurring.
+ Hooks into the dogpile.cache backend mutex allowing for distributed locking
+ on resources. This is only a write lock, and will not prevent reads from
+ occurring.
"""
+
def __init__(self, mutex, key, locking_enabled=True, lock_timeout=0):
self.mutex = mutex
self.key = key
@@ -407,7 +433,9 @@ class KeyValueStoreLock(object):
def get_key_value_store(name, kvs_region=None):
- """Instantiate a new :class:`.KeyValueStore` or return a previous
+ """Retrieve key value store.
+
+ Instantiate a new :class:`.KeyValueStore` or return a previous
instantiation that has the same name.
"""
global KEY_VALUE_STORE_REGISTRY
diff --git a/keystone-moon/keystone/common/ldap/core.py b/keystone-moon/keystone/common/ldap/core.py
index 6386ae2a..d94aa04c 100644
--- a/keystone-moon/keystone/common/ldap/core.py
+++ b/keystone-moon/keystone/common/ldap/core.py
@@ -20,12 +20,15 @@ import re
import sys
import weakref
+import ldap.controls
import ldap.filter
import ldappool
from oslo_log import log
+from oslo_utils import reflection
import six
from six.moves import map, zip
+from keystone.common import driver_hints
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
@@ -62,15 +65,17 @@ def utf8_encode(value):
:param value: A basestring
:returns: UTF-8 encoded version of value
- :raises: TypeError if value is not basestring
+ :raises TypeError: If value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
+ value_cls_name = reflection.get_class_name(
+ value, fully_qualified=False)
raise TypeError("value must be basestring, "
- "not %s" % value.__class__.__name__)
+ "not %s" % value_cls_name)
_utf8_decoder = codecs.getdecoder('utf-8')
@@ -84,7 +89,7 @@ def utf8_decode(value):
:param value: value to be returned as unicode
:returns: value as unicode
- :raises: UnicodeDecodeError for invalid UTF-8 encoding
+ :raises UnicodeDecodeError: for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
@@ -110,14 +115,15 @@ def py2ldap(val):
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
-
try:
return LDAP_VALUES[val]
- except KeyError:
+ except KeyError: # nosec
+ # It wasn't a boolean value, will try as an int instead.
pass
try:
return int(val)
- except ValueError:
+ except ValueError: # nosec
+ # It wasn't an int either, will try as utf8 instead.
pass
return utf8_decode(val)
@@ -239,7 +245,6 @@ def is_ava_value_equal(attribute_type, val1, val2):
that function apply here.
"""
-
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
@@ -259,7 +264,6 @@ def is_rdn_equal(rdn1, rdn2):
limitations of that function apply here.
"""
-
if len(rdn1) != len(rdn2):
return False
@@ -292,7 +296,6 @@ def is_dn_equal(dn1, dn2):
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
-
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
@@ -314,7 +317,6 @@ def dn_startswith(descendant_dn, dn):
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
-
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
@@ -419,6 +421,7 @@ class LDAPHandler(object):
derived classes.
"""
+
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@@ -625,6 +628,7 @@ def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
class MsgId(list):
"""Wrapper class to hold connection and msgid."""
+
pass
@@ -665,6 +669,7 @@ class PooledLDAPHandler(LDAPHandler):
the methods in this class.
"""
+
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
@@ -815,7 +820,6 @@ class PooledLDAPHandler(LDAPHandler):
which requested msgId and used it in result3 exits.
"""
-
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@@ -957,7 +961,7 @@ class KeystoneLDAPHandler(LDAPHandler):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
- 'attrs=%s attrsonly=%s'
+ 'attrs=%s attrsonly=%s '
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
@@ -1041,7 +1045,11 @@ class KeystoneLDAPHandler(LDAPHandler):
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
- py_result = convert_ldap_result(ldap_result)
+ # ldap_result returned from result3 is a tuple of
+ # (rtype, rdata, rmsgid, serverctrls). We don't need use of these,
+ # except rdata.
+ rtype, rdata, rmsgid, serverctrls = ldap_result
+ py_result = convert_ldap_result(rdata)
return py_result
def modify_s(self, dn, modlist):
@@ -1221,7 +1229,7 @@ class BaseLdap(object):
try:
ldap_attr, attr_map = item.split(':')
except Exception:
- LOG.warn(_LW(
+ LOG.warning(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
@@ -1337,7 +1345,7 @@ class BaseLdap(object):
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
- LOG.warn(message)
+ LOG.warning(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
@@ -1354,7 +1362,8 @@ class BaseLdap(object):
continue
v = lower_res[map_attr.lower()]
- except KeyError:
+ except KeyError: # nosec
+ # Didn't find the attr, so don't add it.
pass
else:
try:
@@ -1383,7 +1392,8 @@ class BaseLdap(object):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
- except exception.NotFound:
+ except exception.NotFound: # nosec
+ # Didn't find it so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
@@ -1393,7 +1403,8 @@ class BaseLdap(object):
if values.get('id') is not None:
try:
self.get(values['id'])
- except exception.NotFound:
+ except exception.NotFound: # nosec
+ # Didn't find it, so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
@@ -1452,16 +1463,39 @@ class BaseLdap(object):
except IndexError:
return None
- def _ldap_get_all(self, ldap_filter=None):
+ def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit):
+ with self.get_connection() as conn:
+ try:
+ control = ldap.controls.libldap.SimplePagedResultsControl(
+ criticality=True,
+ size=sizelimit,
+ cookie='')
+ msgid = conn.search_ext(base, scope, filterstr, attrlist,
+ serverctrls=[control])
+ rdata = conn.result3(msgid)
+ return rdata
+ except ldap.NO_SUCH_OBJECT:
+ return []
+
+ @driver_hints.truncated
+ def _ldap_get_all(self, hints, ldap_filter=None):
query = u'(&%s(objectClass=%s)(%s=*))' % (
ldap_filter or self.ldap_filter or '',
self.object_class,
self.id_attr)
+ sizelimit = 0
+ attrs = list(set(([self.id_attr] +
+ list(self.attribute_mapping.values()) +
+ list(self.extra_attr_mapping.keys()))))
+ if hints.limit:
+ sizelimit = hints.limit['limit']
+ return self._ldap_get_limited(self.tree_dn,
+ self.LDAP_SCOPE,
+ query,
+ attrs,
+ sizelimit)
with self.get_connection() as conn:
try:
- attrs = list(set(([self.id_attr] +
- list(self.attribute_mapping.values()) +
- list(self.extra_attr_mapping.keys()))))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
@@ -1501,9 +1535,10 @@ class BaseLdap(object):
except IndexError:
raise self._not_found(name)
- def get_all(self, ldap_filter=None):
+ def get_all(self, ldap_filter=None, hints=None):
+ hints = hints or driver_hints.Hints()
return [self._ldap_res_to_model(x)
- for x in self._ldap_get_all(ldap_filter)]
+ for x in self._ldap_get_all(hints, ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
@@ -1565,7 +1600,7 @@ class BaseLdap(object):
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
- def deleteTree(self, object_id):
+ def delete_tree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
@@ -1609,8 +1644,8 @@ class BaseLdap(object):
:param member_list_dn: DN of group to which the
member will be added.
- :raises: exception.Conflict: If the user was already a member.
- self.NotFound: If the group entry didn't exist.
+ :raises keystone.exception.Conflict: If the user was already a member.
+ :raises self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
@@ -1632,8 +1667,8 @@ class BaseLdap(object):
:param member_list_dn: DN of group from which the
member will be removed.
- :raises: self.NotFound: If the group entry didn't exist.
- ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
+ :raises self.NotFound: If the group entry didn't exist.
+ :raises ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
@@ -1666,11 +1701,12 @@ class BaseLdap(object):
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
- LOG.warn(_LW("When deleting entries for %(search_base)s, could not"
- " delete nonexistent entries %(entries)s%(dots)s"),
- {'search_base': search_base,
- 'entries': not_deleted_nodes[:3],
- 'dots': '...' if len(not_deleted_nodes) > 3 else ''})
+ LOG.warning(_LW("When deleting entries for %(search_base)s, "
+ "could not delete nonexistent entries "
+ "%(entries)s%(dots)s"),
+ {'search_base': search_base,
+ 'entries': not_deleted_nodes[:3],
+ 'dots': '...' if len(not_deleted_nodes) > 3 else ''})
def filter_query(self, hints, query=None):
"""Applies filtering to a query.
@@ -1823,7 +1859,8 @@ class EnabledEmuMixIn(BaseLdap):
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
- query = '(%s=%s)' % (self.member_attribute, dn)
+ query = '(%s=%s)' % (self.member_attribute,
+ ldap.filter.escape_filter_chars(dn))
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
@@ -1857,7 +1894,8 @@ class EnabledEmuMixIn(BaseLdap):
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
- except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
+ except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): # nosec
+ # It's already gone, good.
pass
def create(self, values):
@@ -1880,11 +1918,12 @@ class EnabledEmuMixIn(BaseLdap):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
- def get_all(self, ldap_filter=None):
+ def get_all(self, ldap_filter=None, hints=None):
+ hints = hints or driver_hints.Hints()
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
- for x in self._ldap_get_all(ldap_filter)
+ for x in self._ldap_get_all(hints, ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for tenant_ref in tenant_list:
@@ -1892,7 +1931,7 @@ class EnabledEmuMixIn(BaseLdap):
tenant_ref['id'], conn)
return tenant_list
else:
- return super(EnabledEmuMixIn, self).get_all(ldap_filter)
+ return super(EnabledEmuMixIn, self).get_all(ldap_filter, hints)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
@@ -1914,23 +1953,3 @@ class EnabledEmuMixIn(BaseLdap):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
-
-
-class ProjectLdapStructureMixin(object):
- """Project LDAP Structure shared between LDAP backends.
-
- This is shared between the resource and assignment LDAP backends.
-
- """
- DEFAULT_OU = 'ou=Groups'
- DEFAULT_STRUCTURAL_CLASSES = []
- DEFAULT_OBJECTCLASS = 'groupOfNames'
- DEFAULT_ID_ATTR = 'cn'
- NotFound = exception.ProjectNotFound
- notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
- options_name = 'project'
- attribute_options_names = {'name': 'name',
- 'description': 'desc',
- 'enabled': 'enabled',
- 'domain_id': 'domain_id'}
- immutable_attrs = ['name']
diff --git a/keystone-moon/keystone/common/manager.py b/keystone-moon/keystone/common/manager.py
index f98a1763..4ce9f2a6 100644
--- a/keystone-moon/keystone/common/manager.py
+++ b/keystone-moon/keystone/common/manager.py
@@ -13,12 +13,19 @@
# under the License.
import functools
+import inspect
+import time
+import types
from oslo_log import log
from oslo_log import versionutils
from oslo_utils import importutils
+from oslo_utils import reflection
+import six
import stevedore
+from keystone.i18n import _
+
LOG = log.getLogger(__name__)
@@ -70,17 +77,93 @@ def load_driver(namespace, driver_name, *args):
LOG.debug('Failed to load %r using stevedore: %s', driver_name, e)
# Ignore failure and continue on.
- @versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
- in_favor_of='entrypoints',
- what='direct import of driver')
- def _load_using_import(driver_name, *args):
- return importutils.import_object(driver_name, *args)
+ driver = importutils.import_object(driver_name, *args)
+
+ msg = (_(
+ 'Direct import of driver %(name)r is deprecated as of Liberty in '
+ 'favor of its entrypoint from %(namespace)r and may be removed in '
+ 'N.') %
+ {'name': driver_name, 'namespace': namespace})
+ versionutils.report_deprecated_feature(LOG, msg)
+
+ return driver
- # For backwards-compatibility, an unregistered class reference can
- # still be used.
- return _load_using_import(driver_name, *args)
+class _TraceMeta(type):
+ """A metaclass that, in trace mode, will log entry and exit of methods.
+ This metaclass automatically wraps all methods on the class when
+ instantiated with a decorator that will log entry/exit from a method
+ when keystone is run in Trace log level.
+ """
+
+ @staticmethod
+ def wrapper(__f, __classname):
+ __argspec = inspect.getargspec(__f)
+ __fn_info = '%(module)s.%(classname)s.%(funcname)s' % {
+ 'module': inspect.getmodule(__f).__name__,
+ 'classname': __classname,
+ 'funcname': __f.__name__
+ }
+ # NOTE(morganfainberg): Omit "cls" and "self" when printing trace logs
+ # the index can be calculated at wrap time rather than at runtime.
+ if __argspec.args and __argspec.args[0] in ('self', 'cls'):
+ __arg_idx = 1
+ else:
+ __arg_idx = 0
+
+ @functools.wraps(__f)
+ def wrapped(*args, **kwargs):
+ __exc = None
+ __t = time.time()
+ __do_trace = LOG.logger.getEffectiveLevel() <= log.TRACE
+ __ret_val = None
+ try:
+ if __do_trace:
+ LOG.trace('CALL => %s', __fn_info)
+ __ret_val = __f(*args, **kwargs)
+ except Exception as e: # nosec
+ __exc = e
+ raise
+ finally:
+ if __do_trace:
+ __subst = {
+ 'run_time': (time.time() - __t),
+ 'passed_args': ', '.join([
+ ', '.join([repr(a)
+ for a in args[__arg_idx:]]),
+ ', '.join(['%(k)s=%(v)r' % {'k': k, 'v': v}
+ for k, v in kwargs.items()]),
+ ]),
+ 'function': __fn_info,
+ 'exception': __exc,
+ 'ret_val': __ret_val,
+ }
+ if __exc is not None:
+ __msg = ('[%(run_time)ss] %(function)s '
+ '(%(passed_args)s) => raised '
+ '%(exception)r')
+ else:
+ # TODO(morganfainberg): find a way to indicate if this
+ # was a cache hit or cache miss.
+ __msg = ('[%(run_time)ss] %(function)s'
+ '(%(passed_args)s) => %(ret_val)r')
+ LOG.trace(__msg, __subst)
+ return __ret_val
+ return wrapped
+
+ def __new__(meta, classname, bases, class_dict):
+ final_cls_dict = {}
+ for attr_name, attr in class_dict.items():
+ # NOTE(morganfainberg): only wrap public instances and methods.
+ if (isinstance(attr, types.FunctionType) and
+ not attr_name.startswith('_')):
+ attr = _TraceMeta.wrapper(attr, classname)
+ final_cls_dict[attr_name] = attr
+ return type.__new__(meta, classname, bases, final_cls_dict)
+
+
+@six.add_metaclass(_TraceMeta)
class Manager(object):
"""Base class for intermediary request layer.
@@ -121,16 +204,15 @@ def create_legacy_driver(driver_class):
Driver = create_legacy_driver(CatalogDriverV8)
"""
-
module_name = driver_class.__module__
- class_name = driver_class.__name__
+ class_name = reflection.get_class_name(driver_class)
class Driver(driver_class):
@versionutils.deprecated(
as_of=versionutils.deprecated.LIBERTY,
what='%s.Driver' % module_name,
- in_favor_of='%s.%s' % (module_name, class_name),
+ in_favor_of=class_name,
remove_in=+2)
def __init__(self, *args, **kwargs):
super(Driver, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/common/models.py b/keystone-moon/keystone/common/models.py
index 0bb37319..de996522 100644
--- a/keystone-moon/keystone/common/models.py
+++ b/keystone-moon/keystone/common/models.py
@@ -21,6 +21,7 @@ Unless marked otherwise, all fields are strings.
class Model(dict):
"""Base model class."""
+
def __hash__(self):
return self['id'].__hash__()
@@ -151,6 +152,18 @@ class Role(Model):
optional_keys = tuple()
+class ImpliedRole(Model):
+ """ImpliedRole object.
+
+ Required keys:
+ prior_role_id
+ implied_role_id
+ """
+
+ required_keys = ('prior_role_id', 'implied_role_id')
+ optional_keys = tuple()
+
+
class Trust(Model):
"""Trust object.
diff --git a/keystone-moon/keystone/common/openssl.py b/keystone-moon/keystone/common/openssl.py
index be56b9cc..0bea6d8e 100644
--- a/keystone-moon/keystone/common/openssl.py
+++ b/keystone-moon/keystone/common/openssl.py
@@ -63,42 +63,35 @@ class BaseCertificateConfigure(object):
'cert_subject': conf_obj.cert_subject}
try:
- # OpenSSL 1.0 and newer support default_md = default, olders do not
- openssl_ver = environment.subprocess.Popen(
- ['openssl', 'version'],
- stdout=environment.subprocess.PIPE).stdout.read()
- if "OpenSSL 0." in openssl_ver:
+ # OpenSSL 1.0 and newer support default_md = default,
+ # older versions do not
+ openssl_ver = environment.subprocess.check_output( # the arguments
+ # are hardcoded and just check the openssl version
+ ['openssl', 'version'])
+ if b'OpenSSL 0.' in openssl_ver:
self.ssl_dictionary['default_md'] = 'sha1'
- except OSError:
- LOG.warn(_LW('Failed to invoke ``openssl version``, '
- 'assuming is v1.0 or newer'))
+ except environment.subprocess.CalledProcessError:
+ LOG.warning(_LW('Failed to invoke ``openssl version``, '
+ 'assuming is v1.0 or newer'))
self.ssl_dictionary.update(kwargs)
def exec_command(self, command):
- to_exec = []
- for cmd_part in command:
- to_exec.append(cmd_part % self.ssl_dictionary)
+ to_exec = [part % self.ssl_dictionary for part in command]
LOG.info(_LI('Running command - %s'), ' '.join(to_exec))
- # NOTE(Jeffrey4l): Redirect both stdout and stderr to pipe, so the
- # output can be captured.
- # NOTE(Jeffrey4l): check_output is not compatible with Python 2.6.
- # So use Popen instead.
- process = environment.subprocess.Popen(
- to_exec,
- stdout=environment.subprocess.PIPE,
- stderr=environment.subprocess.STDOUT)
- output = process.communicate()[0]
- retcode = process.poll()
- if retcode:
- LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s'
+ try:
+ # NOTE(shaleh): use check_output instead of the simpler
+ # `check_call()` in order to log any output from an error.
+ environment.subprocess.check_output( # the arguments being passed
+ # in are defined in this file and trusted to build CAs, keys
+ # and certs
+ to_exec,
+ stderr=environment.subprocess.STDOUT)
+ except environment.subprocess.CalledProcessError as e:
+ LOG.error(_LE('Command %(to_exec)s exited with %(retcode)s '
'- %(output)s'),
{'to_exec': to_exec,
- 'retcode': retcode,
- 'output': output})
- e = environment.subprocess.CalledProcessError(retcode, to_exec[0])
- # NOTE(Jeffrey4l): Python 2.6 compatibility:
- # CalledProcessError did not have output keyword argument
- e.output = output
+ 'retcode': e.returncode,
+ 'output': e.output})
raise e
def clean_up_existing_files(self):
@@ -134,9 +127,8 @@ class BaseCertificateConfigure(object):
user=self.use_keystone_user,
group=self.use_keystone_group, log=LOG)
if not file_exists(self.ssl_config_file_name):
- ssl_config_file = open(self.ssl_config_file_name, 'w')
- ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
- ssl_config_file.close()
+ with open(self.ssl_config_file_name, 'w') as ssl_config_file:
+ ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
utils.set_permissions(self.ssl_config_file_name,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
@@ -144,9 +136,8 @@ class BaseCertificateConfigure(object):
index_file_name = os.path.join(self.conf_dir, 'index.txt')
if not file_exists(index_file_name):
- index_file = open(index_file_name, 'w')
- index_file.write('')
- index_file.close()
+ with open(index_file_name, 'w') as index_file:
+ index_file.write('')
utils.set_permissions(index_file_name,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
@@ -154,9 +145,8 @@ class BaseCertificateConfigure(object):
serial_file_name = os.path.join(self.conf_dir, 'serial')
if not file_exists(serial_file_name):
- index_file = open(serial_file_name, 'w')
- index_file.write('01')
- index_file.close()
+ with open(serial_file_name, 'w') as index_file:
+ index_file.write('01')
utils.set_permissions(serial_file_name,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
diff --git a/keystone-moon/keystone/common/router.py b/keystone-moon/keystone/common/router.py
index ce4e834d..74e03ad2 100644
--- a/keystone-moon/keystone/common/router.py
+++ b/keystone-moon/keystone/common/router.py
@@ -19,12 +19,14 @@ from keystone.common import wsgi
class Router(wsgi.ComposableRouter):
def __init__(self, controller, collection_key, key,
resource_descriptions=None,
- is_entity_implemented=True):
+ is_entity_implemented=True,
+ method_template=None):
self.controller = controller
self.key = key
self.collection_key = collection_key
self._resource_descriptions = resource_descriptions
self._is_entity_implemented = is_entity_implemented
+ self.method_template = method_template or '%s'
def add_routes(self, mapper):
collection_path = '/%(collection_key)s' % {
@@ -36,27 +38,27 @@ class Router(wsgi.ComposableRouter):
mapper.connect(
collection_path,
controller=self.controller,
- action='create_%s' % self.key,
+ action=self.method_template % 'create_%s' % self.key,
conditions=dict(method=['POST']))
mapper.connect(
collection_path,
controller=self.controller,
- action='list_%s' % self.collection_key,
+ action=self.method_template % 'list_%s' % self.collection_key,
conditions=dict(method=['GET']))
mapper.connect(
entity_path,
controller=self.controller,
- action='get_%s' % self.key,
+ action=self.method_template % 'get_%s' % self.key,
conditions=dict(method=['GET']))
mapper.connect(
entity_path,
controller=self.controller,
- action='update_%s' % self.key,
+ action=self.method_template % 'update_%s' % self.key,
conditions=dict(method=['PATCH']))
mapper.connect(
entity_path,
controller=self.controller,
- action='delete_%s' % self.key,
+ action=self.method_template % 'delete_%s' % self.key,
conditions=dict(method=['DELETE']))
# Add the collection resource and entity resource to the resource
diff --git a/keystone-moon/keystone/common/sql/core.py b/keystone-moon/keystone/common/sql/core.py
index ebd61bb7..cb026356 100644
--- a/keystone-moon/keystone/common/sql/core.py
+++ b/keystone-moon/keystone/common/sql/core.py
@@ -18,14 +18,13 @@ Before using this module, call initialize(). This has to be done before
CONF() because it sets up configuration options.
"""
-import contextlib
import functools
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db import options as db_options
+from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import models
-from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from oslo_serialization import jsonutils
import six
@@ -34,6 +33,7 @@ from sqlalchemy.ext import declarative
from sqlalchemy.orm.attributes import flag_modified, InstrumentedAttribute
from sqlalchemy import types as sql_types
+from keystone.common import driver_hints
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
@@ -68,7 +68,6 @@ flag_modified = flag_modified
def initialize():
"""Initialize the module."""
-
db_options.set_defaults(
CONF,
connection="sqlite:///keystone.db")
@@ -166,77 +165,47 @@ class ModelDictMixin(object):
return {name: getattr(self, name) for name in names}
-_engine_facade = None
+_main_context_manager = None
-def _get_engine_facade():
- global _engine_facade
+def _get_main_context_manager():
+ global _main_context_manager
- if not _engine_facade:
- _engine_facade = db_session.EngineFacade.from_config(CONF)
+ if not _main_context_manager:
+ _main_context_manager = enginefacade.transaction_context()
- return _engine_facade
+ return _main_context_manager
def cleanup():
- global _engine_facade
+ global _main_context_manager
- _engine_facade = None
+ _main_context_manager = None
-def get_engine():
- return _get_engine_facade().get_engine()
+_CONTEXT = None
-def get_session(expire_on_commit=False):
- return _get_engine_facade().get_session(expire_on_commit=expire_on_commit)
+def _get_context():
+ global _CONTEXT
+ if _CONTEXT is None:
+ # NOTE(dims): Delay the `threading.local` import to allow for
+ # eventlet/gevent monkeypatching to happen
+ import threading
+ _CONTEXT = threading.local()
+ return _CONTEXT
-@contextlib.contextmanager
-def transaction(expire_on_commit=False):
- """Return a SQLAlchemy session in a scoped transaction."""
- session = get_session(expire_on_commit=expire_on_commit)
- with session.begin():
- yield session
+def session_for_read():
+ return _get_main_context_manager().reader.using(_get_context())
-def truncated(f):
- """Ensure list truncation is detected in Driver list entity methods.
+def session_for_write():
+ return _get_main_context_manager().writer.using(_get_context())
- This is designed to wrap and sql Driver list_{entity} methods in order to
- calculate if the resultant list has been truncated. Provided a limit dict
- is found in the hints list, we increment the limit by one so as to ask the
- wrapped function for one more entity than the limit, and then once the list
- has been generated, we check to see if the original limit has been
- exceeded, in which case we truncate back to that limit and set the
- 'truncated' boolean to 'true' in the hints limit dict.
- """
- @functools.wraps(f)
- def wrapper(self, hints, *args, **kwargs):
- if not hasattr(hints, 'limit'):
- raise exception.UnexpectedError(
- _('Cannot truncate a driver call without hints list as '
- 'first parameter after self '))
-
- if hints.limit is None:
- return f(self, hints, *args, **kwargs)
-
- # A limit is set, so ask for one more entry than we need
- list_limit = hints.limit['limit']
- hints.set_limit(list_limit + 1)
- ref_list = f(self, hints, *args, **kwargs)
-
- # If we got more than the original limit then trim back the list and
- # mark it truncated. In both cases, make sure we set the limit back
- # to its original value.
- if len(ref_list) > list_limit:
- hints.set_limit(list_limit, truncated=True)
- return ref_list[:list_limit]
- else:
- hints.set_limit(list_limit)
- return ref_list
- return wrapper
+def truncated(f):
+ return driver_hints.truncated(f)
class _WontMatch(Exception):
@@ -325,42 +294,41 @@ def _filter(model, query, hints):
satisfied_filters.append(filter_)
return query.filter(query_term)
- def exact_filter(model, filter_, cumulative_filter_dict):
+ def exact_filter(model, query, filter_, satisfied_filters):
"""Applies an exact filter to a query.
:param model: the table model in question
+ :param query: query to apply filters to
:param dict filter_: describes this filter
- :param dict cumulative_filter_dict: describes the set of exact filters
- built up so far
-
+ :param list satisfied_filters: filter_ will be added if it is
+ satisfied.
+ :returns query: query updated to add any exact filters we could
+ satisfy
"""
key = filter_['name']
col = getattr(model, key)
if isinstance(col.property.columns[0].type, sql.types.Boolean):
- cumulative_filter_dict[key] = (
- utils.attr_as_boolean(filter_['value']))
+ filter_val = utils.attr_as_boolean(filter_['value'])
else:
_WontMatch.check(filter_['value'], col)
- cumulative_filter_dict[key] = filter_['value']
+ filter_val = filter_['value']
+
+ satisfied_filters.append(filter_)
+ return query.filter(col == filter_val)
try:
- filter_dict = {}
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in model.attributes:
continue
if filter_['comparator'] == 'equals':
- exact_filter(model, filter_, filter_dict)
- satisfied_filters.append(filter_)
+ query = exact_filter(model, query, filter_,
+ satisfied_filters)
else:
query = inexact_filter(model, query, filter_,
satisfied_filters)
- # Apply any exact filters we built up
- if filter_dict:
- query = query.filter_by(**filter_dict)
-
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
@@ -377,7 +345,7 @@ def _limit(query, hints):
:param query: query to apply filters to
:param hints: contains the list of filters and limit details.
- :returns updated query
+ :returns: updated query
"""
# NOTE(henry-nash): If we were to implement pagination, then we
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/README b/keystone-moon/keystone/common/sql/migrate_repo/README
index 6218f8ca..4ea8dd4f 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/README
+++ b/keystone-moon/keystone/common/sql/migrate_repo/README
@@ -1,4 +1,4 @@
This is a database migration repository.
More information at
-http://code.google.com/p/sqlalchemy-migrate/
+https://git.openstack.org/cgit/openstack/sqlalchemy-migrate
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
index f73dfc12..e69de29b 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/__init__.py
@@ -1,17 +0,0 @@
-# Copyright 2014 Mirantis.inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-DB_INIT_VERSION = 43
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/045_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/046_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/047_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
deleted file mode 100644
index 2a98fb90..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/049_placeholder.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
deleted file mode 100644
index c4b41580..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2014 Mirantis.inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-
-def upgrade(migrate_engine):
-
- if migrate_engine.name == 'mysql':
- meta = sa.MetaData(bind=migrate_engine)
- endpoint = sa.Table('endpoint', meta, autoload=True)
-
- # NOTE(i159): MySQL requires indexes on referencing columns, and those
- # indexes create automatically. That those indexes will have different
- # names, depending on version of MySQL used. We shoud make this naming
- # consistent, by reverting index name to a consistent condition.
- if any(i for i in endpoint.indexes if
- list(i.columns.keys()) == ['service_id']
- and i.name != 'service_id'):
- # NOTE(i159): by this action will be made re-creation of an index
- # with the new name. This can be considered as renaming under the
- # MySQL rules.
- sa.Index('service_id', endpoint.c.service_id).create()
-
- user_group_membership = sa.Table('user_group_membership',
- meta, autoload=True)
-
- if any(i for i in user_group_membership.indexes if
- list(i.columns.keys()) == ['group_id']
- and i.name != 'group_id'):
- sa.Index('group_id', user_group_membership.c.group_id).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
deleted file mode 100644
index 59720f6e..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/051_add_id_mapping.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.identity.mapping_backends import mapping
-
-
-MAPPING_TABLE = 'id_mapping'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- mapping_table = sql.Table(
- MAPPING_TABLE,
- meta,
- sql.Column('public_id', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- sql.Column('local_id', sql.String(64), nullable=False),
- sql.Column('entity_type', sql.Enum(
- mapping.EntityType.USER,
- mapping.EntityType.GROUP,
- name='entity_type'),
- nullable=False),
- sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- mapping_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
deleted file mode 100644
index c2be48f4..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/053_endpoint_to_region_association.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Migrated the endpoint 'region' column to 'region_id.
-
-In addition to the rename, the new column is made a foreign key to the
-respective 'region' in the region table, ensuring that we auto-create
-any regions that are missing. Further, since the old region column
-was 255 chars, and the id column in the region table is 64 chars, the size
-of the id column in the region table is increased to match.
-
-To Upgrade:
-
-
-Region Table
-
-Increase the size of the if column in the region table
-
-Endpoint Table
-
-a. Add the endpoint region_id column, that is a foreign key to the region table
-b. For each endpoint
- i. Ensure there is matching region in region table, and if not, create it
- ii. Assign the id to the region_id column
-c. Remove the column region
-
-"""
-
-import migrate
-import sqlalchemy as sql
-from sqlalchemy.orm import sessionmaker
-
-
-def _migrate_to_region_id(migrate_engine, region_table, endpoint_table):
- endpoints = list(endpoint_table.select().execute())
-
- for endpoint in endpoints:
- if endpoint.region is None:
- continue
-
- region = list(region_table.select(
- whereclause=region_table.c.id == endpoint.region).execute())
- if len(region) == 1:
- region_id = region[0].id
- else:
- region_id = endpoint.region
- region = {'id': region_id,
- 'description': '',
- 'extra': '{}'}
- session = sessionmaker(bind=migrate_engine)()
- region_table.insert(region).execute()
- session.commit()
-
- new_values = {'region_id': region_id}
- f = endpoint_table.c.id == endpoint.id
- update = endpoint_table.update().where(f).values(new_values)
- migrate_engine.execute(update)
-
- migrate.ForeignKeyConstraint(
- columns=[endpoint_table.c.region_id],
- refcolumns=[region_table.c.id],
- name='fk_endpoint_region_id').create()
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table('region', meta, autoload=True)
- region_table.c.id.alter(type=sql.String(length=255))
- region_table.c.parent_region_id.alter(type=sql.String(length=255))
- endpoint_table = sql.Table('endpoint', meta, autoload=True)
- region_id_column = sql.Column('region_id',
- sql.String(length=255), nullable=True)
- region_id_column.create(endpoint_table)
-
- _migrate_to_region_id(migrate_engine, region_table, endpoint_table)
-
- endpoint_table.c.region.drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
deleted file mode 100644
index a7f327ea..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/055_add_indexes_to_token_table.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Add indexes to `user_id` and `trust_id` columns for the `token` table."""
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- token = sql.Table('token', meta, autoload=True)
-
- sql.Index('ix_token_user_id', token.c.user_id).create()
- sql.Index('ix_token_trust_id', token.c.trust_id).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
deleted file mode 100644
index 8bb40490..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/060_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
deleted file mode 100644
index ca9b3ce2..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/061_add_parent_project.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
-
-
-_PROJECT_TABLE_NAME = 'project'
-_PARENT_ID_COLUMN_NAME = 'parent_id'
-
-
-def list_constraints(project_table):
- constraints = [{'table': project_table,
- 'fk_column': _PARENT_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id}]
-
- return constraints
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- parent_id = sql.Column(_PARENT_ID_COLUMN_NAME, sql.String(64),
- nullable=True)
- project_table.create_column(parent_id)
-
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.add_constraints(list_constraints(project_table))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
deleted file mode 100644
index f7a69bb6..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/062_drop_assignment_role_fk.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-from keystone.common.sql import migration_helpers
-
-
-def list_constraints(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
- assignment_table = sqlalchemy.Table('assignment', meta, autoload=True)
- role_table = sqlalchemy.Table('role', meta, autoload=True)
-
- constraints = [{'table': assignment_table,
- 'fk_column': 'role_id',
- 'ref_column': role_table.c.id}]
- return constraints
-
-
-def upgrade(migrate_engine):
- # SQLite does not support constraints, and querying the constraints
- # raises an exception
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.remove_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
deleted file mode 100644
index 637f2151..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/064_drop_user_and_group_fk.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-from keystone.common.sql import migration_helpers
-
-
-def list_constraints(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
- user_table = sqlalchemy.Table('user', meta, autoload=True)
- group_table = sqlalchemy.Table('group', meta, autoload=True)
- domain_table = sqlalchemy.Table('domain', meta, autoload=True)
-
- constraints = [{'table': user_table,
- 'fk_column': 'domain_id',
- 'ref_column': domain_table.c.id},
- {'table': group_table,
- 'fk_column': 'domain_id',
- 'ref_column': domain_table.c.id}]
- return constraints
-
-
-def upgrade(migrate_engine):
- # SQLite does not support constraints, and querying the constraints
- # raises an exception
- if migrate_engine.name == 'sqlite':
- return
- migration_helpers.remove_constraints(list_constraints(migrate_engine))
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
deleted file mode 100644
index 63a86c11..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/065_add_domain_config.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-
-
-WHITELIST_TABLE = 'whitelisted_config'
-SENSITIVE_TABLE = 'sensitive_config'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- whitelist_table = sql.Table(
- WHITELIST_TABLE,
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- whitelist_table.create(migrate_engine, checkfirst=True)
-
- sensitive_table = sql.Table(
- SENSITIVE_TABLE,
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- sensitive_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
deleted file mode 100644
index fe0cee88..00000000
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/066_fixup_service_name_value.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- service_table = sql.Table('service', meta, autoload=True)
- services = list(service_table.select().execute())
-
- for service in services:
- if service.extra is not None:
- extra_dict = jsonutils.loads(service.extra)
- else:
- extra_dict = {}
-
- # Skip records where service is not null
- if extra_dict.get('name') is not None:
- continue
- # Default the name to empty string
- extra_dict['name'] = ''
- new_values = {
- 'extra': jsonutils.dumps(extra_dict),
- }
- f = service_table.c.id == service.id
- update = service_table.update().where(f).values(new_values)
- migrate_engine.execute(update)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py
index 6f326ecf..a6dbed67 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/044_icehouse.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/067_kilo.py
@@ -12,18 +12,15 @@
import migrate
-from oslo_config import cfg
from oslo_log import log
import sqlalchemy as sql
-from sqlalchemy import orm
from keystone.assignment.backends import sql as assignment_sql
from keystone.common import sql as ks_sql
-from keystone.common.sql import migration_helpers
+from keystone.identity.mapping_backends import mapping as mapping_backend
LOG = log.getLogger(__name__)
-CONF = cfg.CONF
def upgrade(migrate_engine):
@@ -64,12 +61,12 @@ def upgrade(migrate_engine):
sql.Column('id', sql.String(length=64), primary_key=True),
sql.Column('legacy_endpoint_id', sql.String(length=64)),
sql.Column('interface', sql.String(length=8), nullable=False),
- sql.Column('region', sql.String(length=255)),
sql.Column('service_id', sql.String(length=64), nullable=False),
sql.Column('url', sql.Text, nullable=False),
sql.Column('extra', ks_sql.JsonBlob.impl),
sql.Column('enabled', sql.Boolean, nullable=False, default=True,
server_default='1'),
+ sql.Column('region_id', sql.String(length=255), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
@@ -100,6 +97,7 @@ def upgrade(migrate_engine):
sql.Column('description', sql.Text),
sql.Column('enabled', sql.Boolean),
sql.Column('domain_id', sql.String(length=64), nullable=False),
+ sql.Column('parent_id', sql.String(64), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8')
@@ -177,9 +175,9 @@ def upgrade(migrate_engine):
region = sql.Table(
'region',
meta,
- sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('id', sql.String(255), primary_key=True),
sql.Column('description', sql.String(255), nullable=False),
- sql.Column('parent_region_id', sql.String(64), nullable=True),
+ sql.Column('parent_region_id', sql.String(255), nullable=True),
sql.Column('extra', sql.Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
@@ -202,11 +200,45 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
+ mapping = sql.Table(
+ 'id_mapping',
+ meta,
+ sql.Column('public_id', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('local_id', sql.String(64), nullable=False),
+ sql.Column('entity_type', sql.Enum(
+ mapping_backend.EntityType.USER,
+ mapping_backend.EntityType.GROUP,
+ name='entity_type'),
+ nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ domain_config_whitelist = sql.Table(
+ 'whitelisted_config',
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ domain_config_sensitive = sql.Table(
+ 'sensitive_config',
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
# create all tables
- tables = [credential, domain, endpoint, group,
- policy, project, role, service,
- token, trust, trust_role, user,
- user_group_membership, region, assignment]
+ tables = [credential, domain, endpoint, group, policy, project, role,
+ service, token, trust, trust_role, user, user_group_membership,
+ region, assignment, mapping, domain_config_whitelist,
+ domain_config_sensitive]
for table in tables:
try:
@@ -229,11 +261,22 @@ def upgrade(migrate_engine):
name='ixu_project_name_domain_id').create()
migrate.UniqueConstraint(domain.c.name,
name='ixu_domain_name').create()
+ migrate.UniqueConstraint(mapping.c.domain_id,
+ mapping.c.local_id,
+ mapping.c.entity_type,
+ name='domain_id').create()
# Indexes
sql.Index('ix_token_expires', token.c.expires).create()
sql.Index('ix_token_expires_valid', token.c.expires,
token.c.valid).create()
+ sql.Index('ix_actor_id', assignment.c.actor_id).create()
+ sql.Index('ix_token_user_id', token.c.user_id).create()
+ sql.Index('ix_token_trust_id', token.c.trust_id).create()
+ # NOTE(stevemar): The two indexes below were named 'service_id' and
+ # 'group_id' in 050_fk_consistent_indexes.py, and need to be preserved
+ sql.Index('service_id', endpoint.c.service_id).create()
+ sql.Index('group_id', user_group_membership.c.group_id).create()
fkeys = [
{'columns': [endpoint.c.service_id],
@@ -247,33 +290,28 @@ def upgrade(migrate_engine):
'references':[user.c.id],
'name': 'fk_user_group_membership_user_id'},
- {'columns': [user.c.domain_id],
- 'references': [domain.c.id],
- 'name': 'fk_user_domain_id'},
-
- {'columns': [group.c.domain_id],
- 'references': [domain.c.id],
- 'name': 'fk_group_domain_id'},
-
{'columns': [project.c.domain_id],
'references': [domain.c.id],
'name': 'fk_project_domain_id'},
- {'columns': [assignment.c.role_id],
- 'references': [role.c.id]}
+ {'columns': [endpoint.c.region_id],
+ 'references': [region.c.id],
+ 'name': 'fk_endpoint_region_id'},
+
+ {'columns': [project.c.parent_id],
+ 'references': [project.c.id],
+ 'name': 'project_parent_id_fkey'},
]
+ if migrate_engine.name == 'sqlite':
+ # NOTE(stevemar): We need to keep this FK constraint due to 073, but
+ # only for sqlite, once we collapse 073 we can remove this constraint
+ fkeys.append(
+ {'columns': [assignment.c.role_id],
+ 'references': [role.c.id],
+ 'name': 'fk_assignment_role_id'})
+
for fkey in fkeys:
migrate.ForeignKeyConstraint(columns=fkey['columns'],
refcolumns=fkey['references'],
name=fkey.get('name')).create()
-
- # Create the default domain.
- session = orm.sessionmaker(bind=migrate_engine)()
- domain.insert(migration_helpers.get_default_domain()).execute()
- session.commit()
-
-
-def downgrade(migrate_engine):
- raise NotImplementedError('Downgrade to pre-Icehouse release db schema is '
- 'unsupported.')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
index ffa210c4..205f809e 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
@@ -18,7 +18,7 @@ from keystone.assignment.backends import sql as assignment_sql
def upgrade(migrate_engine):
- """Inserts inherited column to assignment table PK contraints.
+ """Inserts inherited column to assignment table PK constraints.
For non-SQLite databases, it changes the constraint in the existing table.
@@ -26,7 +26,6 @@ def upgrade(migrate_engine):
assignment table with the new PK constraint and migrates the existing data.
"""
-
ASSIGNMENT_TABLE_NAME = 'assignment'
metadata = sql.MetaData()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/056_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/076_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/057_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/077_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/058_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/078_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py
index 8bb40490..9f6e8415 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/059_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/079_placeholder.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Juno backports. Do not use this number for new
-# Kilo work. New Kilo work starts after all the placeholders.
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
def upgrade(migrate_engine):
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py
new file mode 100644
index 00000000..9f6e8415
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/080_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Liberty backports. Do not use this number for new
+# Mitaka work. New Mitaka work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
new file mode 100644
index 00000000..a0c307d0
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
@@ -0,0 +1,54 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='endpoint_policy',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to endpoint_policy extension migration 1. Only
+ # update if it has not been run.
+ if extension_version >= 1:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ endpoint_policy_table = sql.Table(
+ 'policy_association',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('policy_id', sql.String(64),
+ nullable=False),
+ sql.Column('endpoint_id', sql.String(64),
+ nullable=True),
+ sql.Column('service_id', sql.String(64),
+ nullable=True),
+ sql.Column('region_id', sql.String(64),
+ nullable=True),
+ sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+
+ endpoint_policy_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
new file mode 100644
index 00000000..7e426373
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
@@ -0,0 +1,97 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+CONF = cfg.CONF
+_RELAY_STATE_PREFIX = 'relay_state_prefix'
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='federation',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to federation extension migration 8. Only
+ # update if it has not been run.
+ if extension_version >= 8:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ idp_table = sql.Table(
+ 'identity_provider',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ idp_table.create(migrate_engine, checkfirst=True)
+
+ federation_protocol_table = sql.Table(
+ 'federation_protocol',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('idp_id', sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
+ primary_key=True),
+ sql.Column('mapping_id', sql.String(64), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ federation_protocol_table.create(migrate_engine, checkfirst=True)
+
+ mapping_table = sql.Table(
+ 'mapping',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('rules', sql.Text(), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ mapping_table.create(migrate_engine, checkfirst=True)
+
+ relay_state_prefix_default = CONF.saml.relay_state_prefix
+ sp_table = sql.Table(
+ 'service_provider',
+ meta,
+ sql.Column('auth_url', sql.String(256), nullable=False),
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ sql.Column('sp_url', sql.String(256), nullable=False),
+ sql.Column(_RELAY_STATE_PREFIX, sql.String(256), nullable=False,
+ server_default=relay_state_prefix_default),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ sp_table.create(migrate_engine, checkfirst=True)
+
+ idp_table = sql.Table('identity_provider', meta, autoload=True)
+ remote_id_table = sql.Table(
+ 'idp_remote_ids',
+ meta,
+ sql.Column('idp_id', sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE')),
+ sql.Column('remote_id', sql.String(255), primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ remote_id_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
new file mode 100644
index 00000000..5a859b4b
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
@@ -0,0 +1,75 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='oauth1',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to oauth extension migration 5. Only
+ # update if it has not been run.
+ if extension_version >= 5:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ consumer_table = sql.Table(
+ 'consumer',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('description', sql.String(64), nullable=True),
+ sql.Column('secret', sql.String(64), nullable=False),
+ sql.Column('extra', sql.Text(), nullable=False))
+ consumer_table.create(migrate_engine, checkfirst=True)
+
+ request_token_table = sql.Table(
+ 'request_token',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('request_secret', sql.String(64), nullable=False),
+ sql.Column('verifier', sql.String(64), nullable=True),
+ sql.Column('authorizing_user_id', sql.String(64), nullable=True),
+ sql.Column('requested_project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=True),
+ sql.Column('consumer_id', sql.String(64),
+ sql.ForeignKey('consumer.id'),
+ nullable=False, index=True),
+ sql.Column('expires_at', sql.String(64), nullable=True))
+ request_token_table.create(migrate_engine, checkfirst=True)
+
+ access_token_table = sql.Table(
+ 'access_token',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('access_secret', sql.String(64), nullable=False),
+ sql.Column('authorizing_user_id', sql.String(64),
+ nullable=False, index=True),
+ sql.Column('project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=False),
+ sql.Column('consumer_id', sql.String(64),
+ sql.ForeignKey('consumer.id'),
+ nullable=False, index=True),
+ sql.Column('expires_at', sql.String(64), nullable=True))
+ access_token_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
new file mode 100644
index 00000000..1a28a53c
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
@@ -0,0 +1,55 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='revoke',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to revoke extension migration 2. Only
+ # update if it has not been run.
+ if extension_version >= 2:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ service_table = sql.Table(
+ 'revocation_event',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64)),
+ sql.Column('project_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.Column('role_id', sql.String(64)),
+ sql.Column('trust_id', sql.String(64)),
+ sql.Column('consumer_id', sql.String(64)),
+ sql.Column('access_token_id', sql.String(64)),
+ sql.Column('issued_before', sql.DateTime(), nullable=False),
+ sql.Column('expires_at', sql.DateTime()),
+ sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
+ sql.Column('audit_id', sql.String(32), nullable=True),
+ sql.Column('audit_chain_id', sql.String(32), nullable=True))
+
+ service_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
new file mode 100644
index 00000000..5790bd98
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
@@ -0,0 +1,70 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+def upgrade(migrate_engine):
+ try:
+ extension_version = migration_helpers.get_db_version(
+ extension='endpoint_filter',
+ engine=migrate_engine)
+ except Exception:
+ extension_version = 0
+
+ # This migration corresponds to endpoint_filter extension migration 2. Only
+ # update if it has not been run.
+ if extension_version >= 2:
+ return
+
+ # Upgrade operations go here. Don't create your own engine; bind
+ # migrate_engine to your metadata
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ EP_GROUP_ID = 'endpoint_group_id'
+ PROJECT_ID = 'project_id'
+
+ endpoint_filtering_table = sql.Table(
+ 'project_endpoint',
+ meta,
+ sql.Column(
+ 'endpoint_id',
+ sql.String(64),
+ primary_key=True,
+ nullable=False),
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ primary_key=True,
+ nullable=False))
+ endpoint_filtering_table.create(migrate_engine, checkfirst=True)
+
+ endpoint_group_table = sql.Table(
+ 'endpoint_group',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.Column('description', sql.Text, nullable=True),
+ sql.Column('filters', sql.Text(), nullable=False))
+ endpoint_group_table.create(migrate_engine, checkfirst=True)
+
+ project_endpoint_group_table = sql.Table(
+ 'project_endpoint_group',
+ meta,
+ sql.Column(EP_GROUP_ID, sql.String(64),
+ sql.ForeignKey('endpoint_group.id'), nullable=False),
+ sql.Column(PROJECT_ID, sql.String(64), nullable=False),
+ sql.PrimaryKeyConstraint(EP_GROUP_ID, PROJECT_ID))
+ project_endpoint_group_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
index 2a98fb90..2b115ea4 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/048_placeholder.py
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
@@ -1,3 +1,6 @@
+# Copyright 2015 Intel Corporation
+# All Rights Reserved
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -10,12 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Icehouse backports. Do not use this number for new
-# Juno work. New Juno work starts after all the placeholders.
-#
-# See blueprint reserved-db-migrations-icehouse and the related discussion:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+from migrate import UniqueConstraint
+from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
- pass
+ meta = MetaData(bind=migrate_engine)
+ trusts = Table('trust', meta, autoload=True)
+
+ UniqueConstraint('trustor_user_id', 'trustee_user_id', 'project_id',
+ 'impersonation', 'expires_at', table=trusts,
+ name='duplicate_trust_constraint').create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
new file mode 100644
index 00000000..7713ce8f
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
@@ -0,0 +1,43 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import migrate
+import sqlalchemy as sql
+
+
+ROLE_TABLE = 'role'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ implied_role = sql.Table(
+ 'implied_role', meta,
+ sql.Column('prior_role_id', sql.String(length=64), primary_key=True),
+ sql.Column(
+ 'implied_role_id', sql.String(length=64), primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8')
+ implied_role.create()
+ role = sql.Table(ROLE_TABLE, meta, autoload=True)
+ fkeys = [
+ {'columns': [implied_role.c.prior_role_id],
+ 'references': [role.c.id]},
+ {'columns': [implied_role.c.implied_role_id],
+ 'references': [role.c.id]},
+ ]
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name')).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
new file mode 100644
index 00000000..8b792dfa
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+
+
+_ROLE_NAME_NEW_CONSTRAINT = 'ixu_role_name_domain_id'
+_ROLE_TABLE_NAME = 'role'
+_ROLE_NAME_COLUMN_NAME = 'name'
+_DOMAIN_ID_COLUMN_NAME = 'domain_id'
+_NULL_DOMAIN_ID = '<<null>>'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
+ domain_id = sql.Column(_DOMAIN_ID_COLUMN_NAME, sql.String(64),
+ nullable=False, server_default=_NULL_DOMAIN_ID)
+
+ # NOTE(morganfainberg): the `role_name` unique constraint is not
+ # guaranteed to be a fixed name, such as 'ixu_role_name`, so we need to
+ # search for the correct constraint that only affects role_table.c.name
+ # and drop that constraint.
+ to_drop = None
+ if migrate_engine.name == 'mysql':
+ for c in role_table.indexes:
+ if (c.unique and len(c.columns) == 1 and
+ _ROLE_NAME_COLUMN_NAME in c.columns):
+ to_drop = c
+ break
+ else:
+ for c in role_table.constraints:
+ if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
+ to_drop = c
+ break
+
+ if to_drop is not None:
+ migrate.UniqueConstraint(role_table.c.name,
+ name=to_drop.name).drop()
+
+ # perform changes after constraint is dropped.
+ if 'domain_id' not in role_table.columns:
+ # Only create the column if it doesn't already exist.
+ role_table.create_column(domain_id)
+
+ migrate.UniqueConstraint(role_table.c.name,
+ role_table.c.domain_id,
+ name=_ROLE_NAME_NEW_CONSTRAINT).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py
new file mode 100644
index 00000000..477c719a
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py
@@ -0,0 +1,76 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+_PROJECT_TABLE_NAME = 'project'
+_DOMAIN_TABLE_NAME = 'domain'
+NULL_DOMAIN_ID = '<<keystone.domain.root>>'
+
+
+def upgrade(migrate_engine):
+
+ def _generate_root_domain_project():
+ # Generate a project that will act as a root for all domains, in order
+ # for use to be able to use a FK constraint on domain_id. Projects
+ # acting as a domain will not reference this as their parent_id, just
+ # as domain_id.
+ #
+ # This special project is filtered out by the driver, so is never
+ # visible to the manager or API.
+
+ project_ref = {
+ 'id': NULL_DOMAIN_ID,
+ 'name': NULL_DOMAIN_ID,
+ 'enabled': False,
+ 'description': '',
+ 'domain_id': NULL_DOMAIN_ID,
+ 'is_domain': True,
+ 'parent_id': None,
+ 'extra': '{}'
+ }
+ return project_ref
+
+ def _generate_root_domain():
+ # Generate a similar root for the domain table, this is an interim
+ # step so as to allow continuation of current project domain_id FK.
+ #
+ # This special domain is filtered out by the driver, so is never
+ # visible to the manager or API.
+
+ domain_ref = {
+ 'id': NULL_DOMAIN_ID,
+ 'name': NULL_DOMAIN_ID,
+ 'enabled': False,
+ 'extra': '{}'
+ }
+ return domain_ref
+
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+ session = sql.orm.sessionmaker(bind=migrate_engine)()
+
+ project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+ domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
+
+ root_domain = _generate_root_domain()
+ new_entry = domain_table.insert().values(**root_domain)
+ session.execute(new_entry)
+ session.commit()
+
+ root_domain_project = _generate_root_domain_project()
+ new_entry = project_table.insert().values(**root_domain_project)
+ session.execute(new_entry)
+ session.commit()
+
+ session.close()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
new file mode 100644
index 00000000..800ba47e
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ user = sql.Table('user', meta, autoload=True)
+
+ local_user = sql.Table(
+ 'local_user',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('user_id', sql.String(64),
+ sql.ForeignKey(user.c.id, ondelete='CASCADE'),
+ nullable=False, unique=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.UniqueConstraint('domain_id', 'name'))
+ local_user.create(migrate_engine, checkfirst=True)
+
+ password = sql.Table(
+ 'password',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('local_user_id', sql.Integer,
+ sql.ForeignKey(local_user.c.id, ondelete='CASCADE'),
+ nullable=False),
+ sql.Column('password', sql.String(128), nullable=False))
+ password.create(migrate_engine, checkfirst=True)
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
new file mode 100644
index 00000000..1f41fd89
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
@@ -0,0 +1,66 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+from sqlalchemy import func
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ user_table = sql.Table('user', meta, autoload=True)
+ local_user_table = sql.Table('local_user', meta, autoload=True)
+ password_table = sql.Table('password', meta, autoload=True)
+
+ # migrate data to local_user table
+ local_user_values = []
+ for row in user_table.select().execute():
+ # skip the row that already exists in `local_user`, this could
+ # happen if run into a partially-migrated table due to the
+ # bug #1549705.
+ filter_by = local_user_table.c.user_id == row['id']
+ user_count = sql.select([func.count()]).select_from(
+ local_user_table).where(filter_by).execute().fetchone()[0]
+ if user_count == 0:
+ local_user_values.append({'user_id': row['id'],
+ 'domain_id': row['domain_id'],
+ 'name': row['name']})
+ if local_user_values:
+ local_user_table.insert().values(local_user_values).execute()
+
+ # migrate data to password table
+ sel = (
+ sql.select([user_table, local_user_table], use_labels=True)
+ .select_from(user_table.join(local_user_table, user_table.c.id ==
+ local_user_table.c.user_id))
+ )
+ user_rows = sel.execute()
+ password_values = []
+ for row in user_rows:
+ if row['user_password']:
+ password_values.append({'local_user_id': row['local_user_id'],
+ 'password': row['user_password']})
+ if password_values:
+ password_table.insert().values(password_values).execute()
+
+ # remove domain_id and name unique constraint
+ if migrate_engine.name != 'sqlite':
+ migrate.UniqueConstraint(user_table.c.domain_id,
+ user_table.c.name,
+ name='ixu_user_name_domain_id').drop()
+
+ # drop user columns
+ user_table.c.domain_id.drop()
+ user_table.c.name.drop()
+ user_table.c.password.drop()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
new file mode 100644
index 00000000..5e841899
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import migrate
+import sqlalchemy as sql
+
+
+ROLE_TABLE = 'role'
+IMPLIED_ROLE_TABLE = 'implied_role'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ role = sql.Table(ROLE_TABLE, meta, autoload=True)
+ implied_role = sql.Table(IMPLIED_ROLE_TABLE, meta, autoload=True)
+
+ fkeys = [
+ {'columns': [implied_role.c.prior_role_id],
+ 'references': [role.c.id]},
+ {'columns': [implied_role.c.implied_role_id],
+ 'references': [role.c.id]},
+ ]
+
+ # NOTE(stevemar): We need to divide these into two separate loops otherwise
+ # they may clobber each other and only end up with one foreign key.
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name')).drop()
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name'),
+ ondelete="CASCADE").create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
new file mode 100644
index 00000000..f6bba7d9
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
@@ -0,0 +1,125 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import sqlalchemy as sql
+
+from keystone.common.sql import migration_helpers
+
+
+_PROJECT_TABLE_NAME = 'project'
+_DOMAIN_TABLE_NAME = 'domain'
+_PARENT_ID_COLUMN_NAME = 'parent_id'
+_DOMAIN_ID_COLUMN_NAME = 'domain_id'
+
+# Above the driver level, the domain_id of a project acting as a domain is
+# None. However, in order to enable sql integrity constraints to still operate
+# on this column, we create a special "root of all domains" row, with an ID of
+# NULL_DOMAIN_ID, which all projects acting as a domain reference in their
+# domain_id attribute. This special row, as well as NULL_DOMAIN_ID, are never
+# exposed outside of sql driver layer.
+NULL_DOMAIN_ID = '<<keystone.domain.root>>'
+
+
+def list_existing_project_constraints(project_table, domain_table):
+ constraints = [{'table': project_table,
+ 'fk_column': _PARENT_ID_COLUMN_NAME,
+ 'ref_column': project_table.c.id},
+ {'table': project_table,
+ 'fk_column': _DOMAIN_ID_COLUMN_NAME,
+ 'ref_column': domain_table.c.id}]
+
+ return constraints
+
+
+def list_new_project_constraints(project_table):
+ constraints = [{'table': project_table,
+ 'fk_column': _PARENT_ID_COLUMN_NAME,
+ 'ref_column': project_table.c.id},
+ {'table': project_table,
+ 'fk_column': _DOMAIN_ID_COLUMN_NAME,
+ 'ref_column': project_table.c.id}]
+
+ return constraints
+
+
+def upgrade(migrate_engine):
+
+ def _project_from_domain(domain):
+ # Creates a project dict with is_domain=True from the provided
+ # domain.
+
+ description = None
+ extra = {}
+ if domain.extra is not None:
+ # 'description' property is an extra attribute in domains but a
+ # first class attribute in projects
+ extra = json.loads(domain.extra)
+ description = extra.pop('description', None)
+
+ return {
+ 'id': domain.id,
+ 'name': domain.name,
+ 'enabled': domain.enabled,
+ 'description': description,
+ 'domain_id': NULL_DOMAIN_ID,
+ 'is_domain': True,
+ 'parent_id': None,
+ 'extra': json.dumps(extra)
+ }
+
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+ session = sql.orm.sessionmaker(bind=migrate_engine)()
+
+ project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
+ domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
+
+ # NOTE(htruta): Remove the parent_id constraint during the migration
+ # because for every root project inside this domain, we will set
+ # the project domain_id to be its parent_id. We re-enable the constraint
+ # in the end of this method. We also remove the domain_id constraint,
+ # while be recreated a FK to the project_id at the end.
+ migration_helpers.remove_constraints(
+ list_existing_project_constraints(project_table, domain_table))
+
+ # For each domain, create a project acting as a domain. We ignore the
+ # "root of all domains" row, since we already have one of these in the
+ # project table.
+ domains = list(domain_table.select().execute())
+ for domain in domains:
+ if domain.id == NULL_DOMAIN_ID:
+ continue
+ is_domain_project = _project_from_domain(domain)
+ new_entry = project_table.insert().values(**is_domain_project)
+ session.execute(new_entry)
+ session.commit()
+
+ # For each project, that has no parent (i.e. a top level project), update
+ # it's parent_id to point at the project acting as its domain. We ignore
+ # the "root of all domains" row, since its parent_id must always be None.
+ projects = list(project_table.select().execute())
+ for project in projects:
+ if (project.parent_id is not None or project.is_domain or
+ project.id == NULL_DOMAIN_ID):
+ continue
+ values = {'parent_id': project.domain_id}
+ update = project_table.update().where(
+ project_table.c.id == project.id).values(values)
+ session.execute(update)
+ session.commit()
+
+ migration_helpers.add_constraints(
+ list_new_project_constraints(project_table))
+
+ session.close()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
new file mode 100644
index 00000000..6fd3f051
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
@@ -0,0 +1,43 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ user_table = sql.Table('user', meta, autoload=True)
+ idp_table = sql.Table('identity_provider', meta, autoload=True)
+ protocol_table = sql.Table('federation_protocol', meta, autoload=True)
+
+ federated_table = sql.Table(
+ 'federated_user',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('user_id', sql.String(64),
+ sql.ForeignKey(user_table.c.id, ondelete='CASCADE'),
+ nullable=False),
+ sql.Column('idp_id', sql.String(64),
+ sql.ForeignKey(idp_table.c.id, ondelete='CASCADE'),
+ nullable=False),
+ sql.Column('protocol_id', sql.String(64), nullable=False),
+ sql.Column('unique_id', sql.String(255), nullable=False),
+ sql.Column('display_name', sql.String(255), nullable=True),
+ sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'))
+ federated_table.create(migrate_engine, checkfirst=True)
+
+ migrate.ForeignKeyConstraint(
+ columns=[federated_table.c.protocol_id, federated_table.c.idp_id],
+ refcolumns=[protocol_table.c.id, protocol_table.c.idp_id]).create()
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
new file mode 100644
index 00000000..7a75f7b1
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
@@ -0,0 +1,62 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ # You can specify primary keys when creating tables, however adding
+ # auto-increment integer primary keys for existing tables is not
+ # cross-engine compatibility supported. Thus, the approach is to:
+ # (1) create a new revocation_event table with an int pkey,
+ # (2) migrate data from the old table to the new table,
+ # (3) delete the old revocation_event table
+ # (4) rename the new revocation_event table
+ revocation_table = sql.Table('revocation_event', meta, autoload=True)
+
+ revocation_table_new = sql.Table(
+ 'revocation_event_new',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True),
+ sql.Column('domain_id', sql.String(64)),
+ sql.Column('project_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.Column('role_id', sql.String(64)),
+ sql.Column('trust_id', sql.String(64)),
+ sql.Column('consumer_id', sql.String(64)),
+ sql.Column('access_token_id', sql.String(64)),
+ sql.Column('issued_before', sql.DateTime(), nullable=False),
+ sql.Column('expires_at', sql.DateTime()),
+ sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
+ sql.Column('audit_id', sql.String(32), nullable=True),
+ sql.Column('audit_chain_id', sql.String(32), nullable=True))
+ revocation_table_new.create(migrate_engine, checkfirst=True)
+
+ revocation_table_new.insert().from_select(['domain_id',
+ 'project_id',
+ 'user_id',
+ 'role_id',
+ 'trust_id',
+ 'consumer_id',
+ 'access_token_id',
+ 'issued_before',
+ 'expires_at',
+ 'revoked_at',
+ 'audit_id',
+ 'audit_chain_id'],
+ revocation_table.select())
+
+ revocation_table.drop()
+ revocation_table_new.rename('revocation_event')
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py b/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
new file mode 100644
index 00000000..0156de21
--- /dev/null
+++ b/keystone-moon/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import migrate
+import sqlalchemy as sql
+
+_ROLE_TABLE_NAME = 'role'
+_ROLE_NAME_COLUMN_NAME = 'name'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
+
+ # NOTE(morganfainberg): the `role_name` unique constraint is not
+ # guaranteed to be named 'ixu_role_name', so we need to search for the
+ # correct constraint that only affects role_table.c.name and drop
+ # that constraint.
+ #
+ # This is an idempotent change that reflects the fix to migration
+ # 88 if the role_name unique constraint was not named consistently and
+ # someone manually fixed the migrations / db without dropping the
+ # old constraint.
+ to_drop = None
+ if migrate_engine.name == 'mysql':
+ for c in role_table.indexes:
+ if (c.unique and len(c.columns) == 1 and
+ _ROLE_NAME_COLUMN_NAME in c.columns):
+ to_drop = c
+ break
+ else:
+ for c in role_table.constraints:
+ if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
+ to_drop = c
+ break
+
+ if to_drop is not None:
+ migrate.UniqueConstraint(role_table.c.name,
+ name=to_drop.name).drop()
diff --git a/keystone-moon/keystone/common/sql/migration_helpers.py b/keystone-moon/keystone/common/sql/migration_helpers.py
index aaa59f70..40c1fbb5 100644
--- a/keystone-moon/keystone/common/sql/migration_helpers.py
+++ b/keystone-moon/keystone/common/sql/migration_helpers.py
@@ -21,37 +21,25 @@ import migrate
from migrate import exceptions
from oslo_config import cfg
from oslo_db.sqlalchemy import migration
-from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import sqlalchemy
from keystone.common import sql
-from keystone.common.sql import migrate_repo
from keystone import contrib
from keystone import exception
from keystone.i18n import _
CONF = cfg.CONF
-DEFAULT_EXTENSIONS = ['endpoint_filter',
- 'endpoint_policy',
- 'federation',
- 'oauth1',
- 'revoke',
- ]
-
-
-def get_default_domain():
- # Return the reference used for the default domain structure during
- # sql migrations.
- return {
- 'id': CONF.identity.default_domain_id,
- 'name': 'Default',
- 'enabled': True,
- 'extra': jsonutils.dumps({'description': 'Owns users and tenants '
- '(i.e. projects) available '
- 'on Identity API v2.'})}
+DEFAULT_EXTENSIONS = []
+
+MIGRATED_EXTENSIONS = ['endpoint_policy',
+ 'federation',
+ 'oauth1',
+ 'revoke',
+ 'endpoint_filter'
+ ]
# Different RDBMSs use different schemes for naming the Foreign Key
@@ -117,9 +105,8 @@ def rename_tables_with_constraints(renames, constraints, engine):
`renames` is a dict, mapping {'to_table_name': from_table, ...}
"""
-
if engine.name != 'sqlite':
- # Sqlite doesn't support constraints, so nothing to remove.
+ # SQLite doesn't support constraints, so nothing to remove.
remove_constraints(constraints)
for to_table_name in renames:
@@ -141,11 +128,34 @@ def find_migrate_repo(package=None, repo_name='migrate_repo'):
def _sync_common_repo(version):
abs_path = find_migrate_repo()
- init_version = migrate_repo.DB_INIT_VERSION
- engine = sql.get_engine()
- _assert_not_schema_downgrade(version=version)
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version, sanity_check=False)
+ init_version = get_init_version()
+ with sql.session_for_write() as session:
+ engine = session.get_bind()
+ _assert_not_schema_downgrade(version=version)
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version, sanity_check=False)
+
+
+def get_init_version(abs_path=None):
+ """Get the initial version of a migrate repository
+
+ :param abs_path: Absolute path to migrate repository.
+ :return: initial version number or None, if DB is empty.
+ """
+ if abs_path is None:
+ abs_path = find_migrate_repo()
+
+ repo = migrate.versioning.repository.Repository(abs_path)
+
+ # Sadly, Repository has a `latest` but not an `oldest`.
+ # The value is a VerNum object which needs to be converted into an int.
+ oldest = int(min(repo.versions.versions))
+
+ if oldest < 1:
+ return None
+
+ # The initial version is one less
+ return oldest - 1
def _assert_not_schema_downgrade(extension=None, version=None):
@@ -153,40 +163,46 @@ def _assert_not_schema_downgrade(extension=None, version=None):
try:
current_ver = int(six.text_type(get_db_version(extension)))
if int(version) < current_ver:
- raise migration.exception.DbMigrationError()
- except exceptions.DatabaseNotControlledError:
+ raise migration.exception.DbMigrationError(
+ _("Unable to downgrade schema"))
+ except exceptions.DatabaseNotControlledError: # nosec
# NOTE(morganfainberg): The database is not controlled, this action
# cannot be a downgrade.
pass
def _sync_extension_repo(extension, version):
- init_version = 0
- engine = sql.get_engine()
+ if extension in MIGRATED_EXTENSIONS:
+ raise exception.MigrationMovedFailure(extension=extension)
+
+ with sql.session_for_write() as session:
+ engine = session.get_bind()
- try:
- package_name = '.'.join((contrib.__name__, extension))
- package = importutils.import_module(package_name)
- except ImportError:
- raise ImportError(_("%s extension does not exist.")
- % package_name)
- try:
- abs_path = find_migrate_repo(package)
try:
- migration.db_version_control(sql.get_engine(), abs_path)
- # Register the repo with the version control API
- # If it already knows about the repo, it will throw
- # an exception that we can safely ignore
- except exceptions.DatabaseAlreadyControlledError:
- pass
- except exception.MigrationNotProvided as e:
- print(e)
- sys.exit(1)
+ package_name = '.'.join((contrib.__name__, extension))
+ package = importutils.import_module(package_name)
+ except ImportError:
+ raise ImportError(_("%s extension does not exist.")
+ % package_name)
+ try:
+ abs_path = find_migrate_repo(package)
+ try:
+ migration.db_version_control(engine, abs_path)
+ # Register the repo with the version control API
+ # If it already knows about the repo, it will throw
+ # an exception that we can safely ignore
+ except exceptions.DatabaseAlreadyControlledError: # nosec
+ pass
+ except exception.MigrationNotProvided as e:
+ print(e)
+ sys.exit(1)
+
+ _assert_not_schema_downgrade(extension=extension, version=version)
- _assert_not_schema_downgrade(extension=extension, version=version)
+ init_version = get_init_version(abs_path=abs_path)
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version, sanity_check=False)
+ migration.db_sync(engine, abs_path, version=version,
+ init_version=init_version, sanity_check=False)
def sync_database_to_version(extension=None, version=None):
@@ -203,8 +219,10 @@ def sync_database_to_version(extension=None, version=None):
def get_db_version(extension=None):
if not extension:
- return migration.db_version(sql.get_engine(), find_migrate_repo(),
- migrate_repo.DB_INIT_VERSION)
+ with sql.session_for_write() as session:
+ return migration.db_version(session.get_bind(),
+ find_migrate_repo(),
+ get_init_version())
try:
package_name = '.'.join((contrib.__name__, extension))
@@ -213,8 +231,9 @@ def get_db_version(extension=None):
raise ImportError(_("%s extension does not exist.")
% package_name)
- return migration.db_version(
- sql.get_engine(), find_migrate_repo(package), 0)
+ with sql.session_for_write() as session:
+ return migration.db_version(
+ session.get_bind(), find_migrate_repo(package), 0)
def print_db_version(extension=None):
diff --git a/keystone-moon/keystone/common/tokenless_auth.py b/keystone-moon/keystone/common/tokenless_auth.py
index 7388b83c..fd9c1592 100644
--- a/keystone-moon/keystone/common/tokenless_auth.py
+++ b/keystone-moon/keystone/common/tokenless_auth.py
@@ -20,9 +20,9 @@ from oslo_log import log
from keystone.auth import controllers
from keystone.common import dependency
-from keystone.contrib.federation import constants as federation_constants
-from keystone.contrib.federation import utils
from keystone import exception
+from keystone.federation import constants as federation_constants
+from keystone.federation import utils
from keystone.i18n import _
@@ -45,7 +45,6 @@ class TokenlessAuthHelper(object):
SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O
:type env: dict
"""
-
self.env = env
def _build_scope_info(self):
@@ -86,13 +85,13 @@ class TokenlessAuthHelper(object):
def get_scope(self):
auth = {}
- # NOTE(chioleong): auth methods here are insignificant because
+ # NOTE(chioleong): Auth methods here are insignificant because
# we only care about using auth.controllers.AuthInfo
# to validate the scope information. Therefore,
# we don't provide any identity.
auth['scope'] = self._build_scope_info()
- # NOTE(chioleong): we'll let AuthInfo validate the scope for us
+ # NOTE(chioleong): We'll let AuthInfo validate the scope for us
auth_info = controllers.AuthInfo.create({}, auth, scope_only=True)
return auth_info.get_scope()
@@ -189,5 +188,5 @@ class TokenlessAuthHelper(object):
raise exception.TokenlessAuthConfigError(
issuer_attribute=CONF.tokenless_auth.issuer_attribute)
- hashed_idp = hashlib.sha256(idp)
+ hashed_idp = hashlib.sha256(idp.encode('utf-8'))
return hashed_idp.hexdigest()
diff --git a/keystone-moon/keystone/common/utils.py b/keystone-moon/keystone/common/utils.py
index 48336af7..5438ad43 100644
--- a/keystone-moon/keystone/common/utils.py
+++ b/keystone-moon/keystone/common/utils.py
@@ -22,10 +22,12 @@ import grp
import hashlib
import os
import pwd
+import uuid
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
+from oslo_utils import reflection
from oslo_utils import strutils
from oslo_utils import timeutils
import passlib.hash
@@ -42,6 +44,26 @@ CONF = cfg.CONF
LOG = log.getLogger(__name__)
+# NOTE(stevermar): This UUID must stay the same, forever, across
+# all of keystone to preserve its value as a URN namespace, which is
+# used for ID transformation.
+RESOURCE_ID_NAMESPACE = uuid.UUID('4332ecab-770b-4288-a680-b9aca3b1b153')
+
+
+def resource_uuid(value):
+ """Converts input to valid UUID hex digits."""
+ try:
+ uuid.UUID(value)
+ return value
+ except ValueError:
+ if len(value) <= 64:
+ if six.PY2 and isinstance(value, six.text_type):
+ value = value.encode('utf-8')
+ return uuid.uuid5(RESOURCE_ID_NAMESPACE, value).hex
+ raise ValueError(_('Length of transformable resource id > 64, '
+ 'which is max allowed characters'))
+
+
def flatten_dict(d, parent_key=''):
"""Flatten a nested dictionary
@@ -81,6 +103,7 @@ def read_cached_file(filename, cache_info, reload_func=None):
class SmarterEncoder(jsonutils.json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
+
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
@@ -89,6 +112,7 @@ class SmarterEncoder(jsonutils.json.JSONEncoder):
class PKIEncoder(SmarterEncoder):
"""Special encoder to make token JSON a bit shorter."""
+
item_separator = ','
key_separator = ':'
@@ -113,6 +137,8 @@ def verify_length_and_trunc_password(password):
def hash_access_key(access):
hash_ = hashlib.sha256()
+ if not isinstance(access, six.binary_type):
+ access = access.encode('utf-8')
hash_.update(access)
return hash_.hexdigest()
@@ -206,7 +232,7 @@ def auth_str_equal(provided, known):
:params provided: the first string
:params known: the second string
- :return: True if the strings are equal.
+ :returns: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
@@ -271,10 +297,9 @@ def get_unix_user(user=None):
:param object user: string, int or None specifying the user to
lookup.
- :return: tuple of (uid, name)
+ :returns: tuple of (uid, name)
"""
-
if isinstance(user, six.string_types):
try:
user_info = pwd.getpwnam(user)
@@ -295,8 +320,10 @@ def get_unix_user(user=None):
elif user is None:
user_info = pwd.getpwuid(os.geteuid())
else:
+ user_cls_name = reflection.get_class_name(user,
+ fully_qualified=False)
raise TypeError('user must be string, int or None; not %s (%r)' %
- (user.__class__.__name__, user))
+ (user_cls_name, user))
return user_info.pw_uid, user_info.pw_name
@@ -328,10 +355,9 @@ def get_unix_group(group=None):
:param object group: string, int or None specifying the group to
lookup.
- :return: tuple of (gid, name)
+ :returns: tuple of (gid, name)
"""
-
if isinstance(group, six.string_types):
try:
group_info = grp.getgrnam(group)
@@ -354,8 +380,10 @@ def get_unix_group(group=None):
elif group is None:
group_info = grp.getgrgid(os.getegid())
else:
+ group_cls_name = reflection.get_class_name(group,
+ fully_qualified=False)
raise TypeError('group must be string, int or None; not %s (%r)' %
- (group.__class__.__name__, group))
+ (group_cls_name, group))
return group_info.gr_gid, group_info.gr_name
@@ -380,7 +408,6 @@ def set_permissions(path, mode=None, user=None, group=None, log=None):
if None no logging is performed.
"""
-
if user is None:
user_uid, user_name = None, None
else:
@@ -447,7 +474,6 @@ def make_dirs(path, mode=None, user=None, group=None, log=None):
if None no logging is performed.
"""
-
if log:
if mode is None:
mode_string = str(mode)
@@ -483,7 +509,6 @@ _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
-
# Python provides a similar instance method for datetime.datetime objects
# called isoformat(). The format of the strings generated by isoformat()
# have a couple of problems:
@@ -515,7 +540,7 @@ def get_token_ref(context):
"""Retrieves KeystoneToken object from the auth context and returns it.
:param dict context: The request context.
- :raises: exception.Unauthorized if auth context cannot be found.
+ :raises keystone.exception.Unauthorized: If auth context cannot be found.
:returns: The KeystoneToken object.
"""
try:
@@ -526,3 +551,48 @@ def get_token_ref(context):
except KeyError:
LOG.warning(_LW("Couldn't find the auth context."))
raise exception.Unauthorized()
+
+
+URL_RESERVED_CHARS = ":/?#[]@!$&'()*+,;="
+
+
+def is_not_url_safe(name):
+ """Check if a string contains any url reserved characters."""
+ return len(list_url_unsafe_chars(name)) > 0
+
+
+def list_url_unsafe_chars(name):
+ """Return a list of the reserved characters."""
+ reserved_chars = ''
+ for i in name:
+ if i in URL_RESERVED_CHARS:
+ reserved_chars += i
+ return reserved_chars
+
+
+def lower_case_hostname(url):
+ """Change the URL's hostname to lowercase"""
+ # NOTE(gyee): according to
+ # https://www.w3.org/TR/WD-html40-970708/htmlweb.html, the netloc portion
+ # of the URL is case-insensitive
+ parsed = moves.urllib.parse.urlparse(url)
+ # Note: _replace method for named tuples is public and defined in docs
+ replaced = parsed._replace(netloc=parsed.netloc.lower())
+ return moves.urllib.parse.urlunparse(replaced)
+
+
+def remove_standard_port(url):
+ # remove the default ports specified in RFC2616 and 2818
+ o = moves.urllib.parse.urlparse(url)
+ separator = ':'
+ (host, separator, port) = o.netloc.partition(':')
+ if o.scheme.lower() == 'http' and port == '80':
+ # NOTE(gyee): _replace() is not a private method. It has an
+ # an underscore prefix to prevent conflict with field names.
+ # See https://docs.python.org/2/library/collections.html#
+ # collections.namedtuple
+ o = o._replace(netloc=host)
+ if o.scheme.lower() == 'https' and port == '443':
+ o = o._replace(netloc=host)
+
+ return moves.urllib.parse.urlunparse(o)
diff --git a/keystone-moon/keystone/common/validation/__init__.py b/keystone-moon/keystone/common/validation/__init__.py
index 1e5cc6a5..9d812f40 100644
--- a/keystone-moon/keystone/common/validation/__init__.py
+++ b/keystone-moon/keystone/common/validation/__init__.py
@@ -28,8 +28,7 @@ def validated(request_body_schema, resource_to_validate):
:param request_body_schema: a schema to validate the resource reference
:param resource_to_validate: the reference to validate
:raises keystone.exception.ValidationError: if `resource_to_validate` is
- not passed by or passed with an empty value (see wrapper method
- below).
+ None. (see wrapper method below).
:raises TypeError: at decoration time when the expected resource to
validate isn't found in the decorated method's
signature
@@ -49,15 +48,15 @@ def validated(request_body_schema, resource_to_validate):
@functools.wraps(func)
def wrapper(*args, **kwargs):
- if kwargs.get(resource_to_validate):
+ if (resource_to_validate in kwargs and
+ kwargs[resource_to_validate] is not None):
schema_validator.validate(kwargs[resource_to_validate])
else:
try:
resource = args[arg_index]
- # If resource to be validated is empty, no need to do
- # validation since the message given by jsonschema doesn't
- # help in this case.
- if resource:
+ # If the resource to be validated is not None but
+ # empty, it is possible to be validated by jsonschema.
+ if resource is not None:
schema_validator.validate(resource)
else:
raise exception.ValidationError(
diff --git a/keystone-moon/keystone/common/validation/parameter_types.py b/keystone-moon/keystone/common/validation/parameter_types.py
index 1bc81383..c0753827 100644
--- a/keystone-moon/keystone/common/validation/parameter_types.py
+++ b/keystone-moon/keystone/common/validation/parameter_types.py
@@ -43,6 +43,13 @@ id_string = {
'pattern': '^[a-zA-Z0-9-]+$'
}
+mapping_id_string = {
+ 'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 64,
+ 'pattern': '^[a-zA-Z0-9-_]+$'
+}
+
description = {
'type': 'string'
}
@@ -54,7 +61,7 @@ url = {
# NOTE(edmondsw): we could do more to validate per various RFCs, but
# decision was made to err on the side of leniency. The following is based
# on rfc1738 section 2.1
- 'pattern': '[a-zA-Z0-9+.-]+:.+'
+ 'pattern': '^[a-zA-Z0-9+.-]+:.+'
}
email = {
diff --git a/keystone-moon/keystone/common/validation/validators.py b/keystone-moon/keystone/common/validation/validators.py
index a4574176..c6d52e9a 100644
--- a/keystone-moon/keystone/common/validation/validators.py
+++ b/keystone-moon/keystone/common/validation/validators.py
@@ -20,7 +20,6 @@ from keystone.i18n import _
class SchemaValidator(object):
"""Resource reference validator class."""
- validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema):
@@ -43,7 +42,7 @@ class SchemaValidator(object):
except jsonschema.ValidationError as ex:
# NOTE: For whole OpenStack message consistency, this error
# message has been written in a format consistent with WSME.
- if len(ex.path) > 0:
+ if ex.path:
# NOTE(lbragstad): Here we could think about using iter_errors
# as a method of providing invalid parameters back to the
# user.
diff --git a/keystone-moon/keystone/common/wsgi.py b/keystone-moon/keystone/common/wsgi.py
index 8b99c87d..04528a0c 100644
--- a/keystone-moon/keystone/common/wsgi.py
+++ b/keystone-moon/keystone/common/wsgi.py
@@ -20,6 +20,7 @@
import copy
import itertools
+import re
import wsgiref.util
from oslo_config import cfg
@@ -71,9 +72,6 @@ def validate_token_bind(context, token_ref):
# permissive and strict modes don't require there to be a bind
permissive = bind_mode in ('permissive', 'strict')
- # get the named mode if bind_mode is not one of the known
- name = None if permissive or bind_mode == 'required' else bind_mode
-
if not bind:
if permissive:
# no bind provided and none required
@@ -82,6 +80,9 @@ def validate_token_bind(context, token_ref):
LOG.info(_LI("No bind information present in token"))
raise exception.Unauthorized()
+ # get the named mode if bind_mode is not one of the known
+ name = None if permissive or bind_mode == 'required' else bind_mode
+
if name and name not in bind:
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
@@ -112,10 +113,11 @@ def validate_token_bind(context, token_ref):
def best_match_language(req):
- """Determines the best available locale from the Accept-Language
- HTTP header passed in the request.
- """
+ """Determines the best available locale.
+ This returns best available locale based on the Accept-Language HTTP
+ header passed in the request.
+ """
if not req.accept_language:
return None
return req.accept_language.best_match(
@@ -208,8 +210,7 @@ class Application(BaseApplication):
context['headers'] = dict(req.headers.items())
context['path'] = req.environ['PATH_INFO']
- scheme = (None if not CONF.secure_proxy_ssl_header
- else req.environ.get(CONF.secure_proxy_ssl_header))
+ scheme = req.environ.get(CONF.secure_proxy_ssl_header)
if scheme:
# NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
# before the proxy removed it ('https' usually). So if
@@ -305,7 +306,6 @@ class Application(BaseApplication):
does not have the admin role
"""
-
if not context['is_admin']:
user_token_ref = utils.get_token_ref(context)
@@ -329,9 +329,7 @@ class Application(BaseApplication):
self.policy_api.enforce(creds, 'admin_required', {})
def _attribute_is_empty(self, ref, attribute):
- """Returns true if the attribute in the given ref (which is a
- dict) is empty or None.
- """
+ """Determine if the attribute in ref is empty or None."""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
@@ -378,13 +376,19 @@ class Application(BaseApplication):
itertools.chain(CONF.items(), CONF.eventlet_server.items()))
url = url % substitutions
+ elif 'environment' in context:
+ url = wsgiref.util.application_uri(context['environment'])
+ # remove version from the URL as it may be part of SCRIPT_NAME but
+ # it should not be part of base URL
+ url = re.sub(r'/v(3|(2\.0))/*$', '', url)
+
+ # now remove the standard port
+ url = utils.remove_standard_port(url)
else:
- # NOTE(jamielennox): if url is not set via the config file we
- # should set it relative to the url that the user used to get here
- # so as not to mess with version discovery. This is not perfect.
- # host_url omits the path prefix, but there isn't another good
- # solution that will work for all urls.
- url = context['host_url']
+ # if we don't have enough information to come up with a base URL,
+ # then fall back to localhost. This should never happen in
+ # production environment.
+ url = 'http://localhost:%d' % CONF.eventlet_server.public_port
return url.rstrip('/')
@@ -400,32 +404,10 @@ class Middleware(Application):
"""
@classmethod
- def factory(cls, global_config, **local_config):
- """Used for paste app factories in paste.deploy config files.
-
- Any local configuration (that is, values under the [filter:APPNAME]
- section of the paste config) will be passed into the `__init__` method
- as kwargs.
-
- A hypothetical configuration would look like:
-
- [filter:analytics]
- redis_host = 127.0.0.1
- paste.filter_factory = keystone.analytics:Analytics.factory
-
- which would result in a call to the `Analytics` class as
-
- import keystone.analytics
- keystone.analytics.Analytics(app, redis_host='127.0.0.1')
-
- You could of course re-implement the `factory` method in subclasses,
- but using the kwarg passing it shouldn't be necessary.
-
- """
+ def factory(cls, global_config):
+ """Used for paste app factories in paste.deploy config files."""
def _factory(app):
- conf = global_config.copy()
- conf.update(local_config)
- return cls(app, **local_config)
+ return cls(app)
return _factory
def __init__(self, application):
@@ -601,6 +583,7 @@ class ExtensionRouter(Router):
Expects to be subclassed.
"""
+
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
@@ -737,8 +720,8 @@ class V3ExtensionRouter(ExtensionRouter, RoutersBase):
response_data = jsonutils.loads(response.body)
self._update_version_response(response_data)
- response.body = jsonutils.dumps(response_data,
- cls=utils.SmarterEncoder)
+ response.body = jsonutils.dump_as_bytes(response_data,
+ cls=utils.SmarterEncoder)
return response
@@ -751,7 +734,7 @@ def render_response(body=None, status=None, headers=None, method=None):
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
- body = ''
+ body = b''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
@@ -761,11 +744,41 @@ def render_response(body=None, status=None, headers=None, method=None):
content_type = None
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
- body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
+ body = jsonutils.dump_as_bytes(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
+ # NOTE(davechen): `mod_wsgi` follows the standards from pep-3333 and
+ # requires the value in response header to be binary type(str) on python2,
+ # unicode based string(str) on python3, or else keystone will not work
+ # under apache with `mod_wsgi`.
+ # keystone needs to check the data type of each header and convert the
+ # type if needed.
+ # see bug:
+ # https://bugs.launchpad.net/keystone/+bug/1528981
+ # see pep-3333:
+ # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types
+ # see source from mod_wsgi:
+ # https://github.com/GrahamDumpleton/mod_wsgi(methods:
+ # wsgi_convert_headers_to_bytes(...), wsgi_convert_string_to_bytes(...)
+ # and wsgi_validate_header_value(...)).
+ def _convert_to_str(headers):
+ str_headers = []
+ for header in headers:
+ str_header = []
+ for value in header:
+ if not isinstance(value, str):
+ str_header.append(str(value))
+ else:
+ str_header.append(value)
+ # convert the list to the immutable tuple to build the headers.
+ # header's key/value will be guaranteed to be str type.
+ str_headers.append(tuple(str_header))
+ return str_headers
+
+ headers = _convert_to_str(headers)
+
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
@@ -789,7 +802,6 @@ def render_response(body=None, status=None, headers=None, method=None):
def render_exception(error, context=None, request=None, user_locale=None):
"""Forms a WSGI response based on the current error."""
-
error_message = error.args[0]
message = oslo_i18n.translate(error_message, desired_locale=user_locale)
if message is error_message:
@@ -806,18 +818,15 @@ def render_exception(error, context=None, request=None, user_locale=None):
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
- url = CONF.public_endpoint
- if not url:
- if request:
- context = {'host_url': request.host_url}
- if context:
- url = Application.base_url(context, 'public')
- else:
- url = 'http://localhost:%d' % CONF.eventlet_server.public_port
- else:
- substitutions = dict(
- itertools.chain(CONF.items(), CONF.eventlet_server.items()))
- url = url % substitutions
+ # NOTE(gyee): we only care about the request environment in the
+ # context. Also, its OK to pass the environemt as it is read-only in
+ # Application.base_url()
+ local_context = {}
+ if request:
+ local_context = {'environment': request.environ}
+ elif context and 'environment' in context:
+ local_context = {'environment': context['environment']}
+ url = Application.base_url(local_context, 'public')
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
return render_response(status=(error.code, error.title),
diff --git a/keystone-moon/keystone/contrib/admin_crud/core.py b/keystone-moon/keystone/contrib/admin_crud/core.py
index 5d69d249..739cc0ff 100644
--- a/keystone-moon/keystone/contrib/admin_crud/core.py
+++ b/keystone-moon/keystone/contrib/admin_crud/core.py
@@ -12,230 +12,21 @@
# License for the specific language governing permissions and limitations
# under the License.
-from keystone import assignment
-from keystone import catalog
-from keystone.common import extension
-from keystone.common import wsgi
-from keystone import identity
-from keystone import resource
-
-
-extension.register_admin_extension(
- 'OS-KSADM', {
- 'name': 'OpenStack Keystone Admin',
- 'namespace': 'http://docs.openstack.org/identity/api/ext/'
- 'OS-KSADM/v1.0',
- 'alias': 'OS-KSADM',
- 'updated': '2013-07-11T17:14:00-00:00',
- 'description': 'OpenStack extensions to Keystone v2.0 API '
- 'enabling Administrative Operations.',
- 'links': [
- {
- 'rel': 'describedby',
- # TODO(dolph): link needs to be revised after
- # bug 928059 merges
- 'type': 'text/html',
- 'href': 'https://github.com/openstack/identity-api',
- }
- ]})
-
-
-class CrudExtension(wsgi.ExtensionRouter):
- """Previously known as the OS-KSADM extension.
-
- Provides a bunch of CRUD operations for internal data types.
-
- """
-
- def add_routes(self, mapper):
- tenant_controller = resource.controllers.Tenant()
- assignment_tenant_controller = (
- assignment.controllers.TenantAssignment())
- user_controller = identity.controllers.User()
- role_controller = assignment.controllers.Role()
- assignment_role_controller = assignment.controllers.RoleAssignmentV2()
- service_controller = catalog.controllers.Service()
- endpoint_controller = catalog.controllers.Endpoint()
+from oslo_log import log
+from oslo_log import versionutils
- # Tenant Operations
- mapper.connect(
- '/tenants',
- controller=tenant_controller,
- action='create_project',
- conditions=dict(method=['POST']))
- mapper.connect(
- '/tenants/{tenant_id}',
- controller=tenant_controller,
- action='update_project',
- conditions=dict(method=['PUT', 'POST']))
- mapper.connect(
- '/tenants/{tenant_id}',
- controller=tenant_controller,
- action='delete_project',
- conditions=dict(method=['DELETE']))
- mapper.connect(
- '/tenants/{tenant_id}/users',
- controller=assignment_tenant_controller,
- action='get_project_users',
- conditions=dict(method=['GET']))
-
- # User Operations
- mapper.connect(
- '/users',
- controller=user_controller,
- action='get_users',
- conditions=dict(method=['GET']))
- mapper.connect(
- '/users',
- controller=user_controller,
- action='create_user',
- conditions=dict(method=['POST']))
- # NOTE(termie): not in diablo
- mapper.connect(
- '/users/{user_id}',
- controller=user_controller,
- action='update_user',
- conditions=dict(method=['PUT']))
- mapper.connect(
- '/users/{user_id}',
- controller=user_controller,
- action='delete_user',
- conditions=dict(method=['DELETE']))
-
- # COMPAT(diablo): the copy with no OS-KSADM is from diablo
- mapper.connect(
- '/users/{user_id}/password',
- controller=user_controller,
- action='set_user_password',
- conditions=dict(method=['PUT']))
- mapper.connect(
- '/users/{user_id}/OS-KSADM/password',
- controller=user_controller,
- action='set_user_password',
- conditions=dict(method=['PUT']))
-
- # COMPAT(diablo): the copy with no OS-KSADM is from diablo
- mapper.connect(
- '/users/{user_id}/tenant',
- controller=user_controller,
- action='update_user',
- conditions=dict(method=['PUT']))
- mapper.connect(
- '/users/{user_id}/OS-KSADM/tenant',
- controller=user_controller,
- action='update_user',
- conditions=dict(method=['PUT']))
-
- # COMPAT(diablo): the copy with no OS-KSADM is from diablo
- mapper.connect(
- '/users/{user_id}/enabled',
- controller=user_controller,
- action='set_user_enabled',
- conditions=dict(method=['PUT']))
- mapper.connect(
- '/users/{user_id}/OS-KSADM/enabled',
- controller=user_controller,
- action='set_user_enabled',
- conditions=dict(method=['PUT']))
-
- # User Roles
- mapper.connect(
- '/users/{user_id}/roles/OS-KSADM/{role_id}',
- controller=assignment_role_controller,
- action='add_role_to_user',
- conditions=dict(method=['PUT']))
- mapper.connect(
- '/users/{user_id}/roles/OS-KSADM/{role_id}',
- controller=assignment_role_controller,
- action='remove_role_from_user',
- conditions=dict(method=['DELETE']))
-
- # COMPAT(diablo): User Roles
- mapper.connect(
- '/users/{user_id}/roleRefs',
- controller=assignment_role_controller,
- action='get_role_refs',
- conditions=dict(method=['GET']))
- mapper.connect(
- '/users/{user_id}/roleRefs',
- controller=assignment_role_controller,
- action='create_role_ref',
- conditions=dict(method=['POST']))
- mapper.connect(
- '/users/{user_id}/roleRefs/{role_ref_id}',
- controller=assignment_role_controller,
- action='delete_role_ref',
- conditions=dict(method=['DELETE']))
+from keystone.common import wsgi
+from keystone.i18n import _
- # User-Tenant Roles
- mapper.connect(
- '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
- controller=assignment_role_controller,
- action='add_role_to_user',
- conditions=dict(method=['PUT']))
- mapper.connect(
- '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
- controller=assignment_role_controller,
- action='remove_role_from_user',
- conditions=dict(method=['DELETE']))
- # Service Operations
- mapper.connect(
- '/OS-KSADM/services',
- controller=service_controller,
- action='get_services',
- conditions=dict(method=['GET']))
- mapper.connect(
- '/OS-KSADM/services',
- controller=service_controller,
- action='create_service',
- conditions=dict(method=['POST']))
- mapper.connect(
- '/OS-KSADM/services/{service_id}',
- controller=service_controller,
- action='delete_service',
- conditions=dict(method=['DELETE']))
- mapper.connect(
- '/OS-KSADM/services/{service_id}',
- controller=service_controller,
- action='get_service',
- conditions=dict(method=['GET']))
+LOG = log.getLogger(__name__)
- # Endpoint Templates
- mapper.connect(
- '/endpoints',
- controller=endpoint_controller,
- action='get_endpoints',
- conditions=dict(method=['GET']))
- mapper.connect(
- '/endpoints',
- controller=endpoint_controller,
- action='create_endpoint',
- conditions=dict(method=['POST']))
- mapper.connect(
- '/endpoints/{endpoint_id}',
- controller=endpoint_controller,
- action='delete_endpoint',
- conditions=dict(method=['DELETE']))
- # Role Operations
- mapper.connect(
- '/OS-KSADM/roles',
- controller=role_controller,
- action='create_role',
- conditions=dict(method=['POST']))
- mapper.connect(
- '/OS-KSADM/roles',
- controller=role_controller,
- action='get_roles',
- conditions=dict(method=['GET']))
- mapper.connect(
- '/OS-KSADM/roles/{role_id}',
- controller=role_controller,
- action='get_role',
- conditions=dict(method=['GET']))
- mapper.connect(
- '/OS-KSADM/roles/{role_id}',
- controller=role_controller,
- action='delete_role',
- conditions=dict(method=['DELETE']))
+class CrudExtension(wsgi.Middleware):
+ def __init__(self, application):
+ super(CrudExtension, self).__init__(application)
+ msg = _("Remove admin_crud_extension from the paste pipeline, the "
+ "admin_crud extension is now always available. Update"
+ "the [pipeline:admin_api] section in keystone-paste.ini "
+ "accordingly, as it will be removed in the O release.")
+ versionutils.report_deprecated_feature(LOG, msg)
diff --git a/keystone-moon/keystone/contrib/ec2/controllers.py b/keystone-moon/keystone/contrib/ec2/controllers.py
index 78172ec9..c0f6067e 100644
--- a/keystone-moon/keystone/contrib/ec2/controllers.py
+++ b/keystone-moon/keystone/contrib/ec2/controllers.py
@@ -17,7 +17,7 @@
This service allows the creation of access/secret credentials used for
the ec2 interop layer of OpenStack.
-A user can create as many access/secret pairs, each of which map to a
+A user can create as many access/secret pairs, each of which is mapped to a
specific project. This is required because OpenStack supports a user
belonging to multiple projects, whereas the signatures created on ec2-style
requests don't allow specification of which project the user wishes to act
@@ -47,6 +47,8 @@ from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _
+CRED_TYPE_EC2 = 'ec2'
+
@dependency.requires('assignment_api', 'catalog_api', 'credential_api',
'identity_api', 'resource_api', 'role_api',
@@ -75,13 +77,14 @@ class Ec2ControllerCommon(object):
signature):
return True
raise exception.Unauthorized(
- message='Invalid EC2 signature.')
+ message=_('Invalid EC2 signature.'))
else:
raise exception.Unauthorized(
- message='EC2 signature not supplied.')
+ message=_('EC2 signature not supplied.'))
# Raise the exception when credentials.get('signature') is None
else:
- raise exception.Unauthorized(message='EC2 signature not supplied.')
+ raise exception.Unauthorized(
+ message=_('EC2 signature not supplied.'))
@abc.abstractmethod
def authenticate(self, context, credentials=None, ec2Credentials=None):
@@ -111,7 +114,6 @@ class Ec2ControllerCommon(object):
:returns: user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref
"""
-
# FIXME(ja): validate that a service token was used!
# NOTE(termie): backwards compat hack
@@ -119,7 +121,8 @@ class Ec2ControllerCommon(object):
credentials = ec2credentials
if 'access' not in credentials:
- raise exception.Unauthorized(message='EC2 signature not supplied.')
+ raise exception.Unauthorized(
+ message=_('EC2 signature not supplied.'))
creds_ref = self._get_credentials(credentials['access'])
self.check_signature(creds_ref, credentials)
@@ -152,7 +155,8 @@ class Ec2ControllerCommon(object):
roles = metadata_ref.get('roles', [])
if not roles:
- raise exception.Unauthorized(message='User not valid for tenant.')
+ raise exception.Unauthorized(
+ message=_('User not valid for tenant.'))
roles_ref = [self.role_api.get_role(role_id) for role_id in roles]
catalog_ref = self.catalog_api.get_catalog(
@@ -171,7 +175,6 @@ class Ec2ControllerCommon(object):
:param tenant_id: id of tenant
:returns: credential: dict of ec2 credential
"""
-
self.identity_api.get_user(user_id)
self.resource_api.get_project(tenant_id)
trust_id = self._get_trust_id_for_request(context)
@@ -183,7 +186,7 @@ class Ec2ControllerCommon(object):
'project_id': tenant_id,
'blob': jsonutils.dumps(blob),
'id': credential_id,
- 'type': 'ec2'}
+ 'type': CRED_TYPE_EC2}
self.credential_api.create_credential(credential_id, cred_ref)
return {'credential': self._convert_v3_to_ec2_credential(cred_ref)}
@@ -193,10 +196,9 @@ class Ec2ControllerCommon(object):
:param user_id: id of user
:returns: credentials: list of ec2 credential dicts
"""
-
self.identity_api.get_user(user_id)
credential_refs = self.credential_api.list_credentials_for_user(
- user_id)
+ user_id, type=CRED_TYPE_EC2)
return {'credentials':
[self._convert_v3_to_ec2_credential(credential)
for credential in credential_refs]}
@@ -210,7 +212,6 @@ class Ec2ControllerCommon(object):
:param credential_id: access key for credentials
:returns: credential: dict of ec2 credential
"""
-
self.identity_api.get_user(user_id)
return {'credential': self._get_credentials(credential_id)}
@@ -223,7 +224,6 @@ class Ec2ControllerCommon(object):
:param credential_id: access key for credentials
:returns: bool: success
"""
-
self.identity_api.get_user(user_id)
self._get_credentials(credential_id)
ec2_credential_id = utils.hash_access_key(credential_id)
@@ -249,20 +249,22 @@ class Ec2ControllerCommon(object):
"""Return credentials from an ID.
:param credential_id: id of credential
- :raises exception.Unauthorized: when credential id is invalid
+ :raises keystone.exception.Unauthorized: when credential id is invalid
+ or when the credential type is not ec2
:returns: credential: dict of ec2 credential.
"""
ec2_credential_id = utils.hash_access_key(credential_id)
- creds = self.credential_api.get_credential(ec2_credential_id)
- if not creds:
- raise exception.Unauthorized(message='EC2 access key not found.')
- return self._convert_v3_to_ec2_credential(creds)
+ cred = self.credential_api.get_credential(ec2_credential_id)
+ if not cred or cred['type'] != CRED_TYPE_EC2:
+ raise exception.Unauthorized(
+ message=_('EC2 access key not found.'))
+ return self._convert_v3_to_ec2_credential(cred)
@dependency.requires('policy_api', 'token_provider_api')
class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
- @controller.v2_deprecated
+ @controller.v2_ec2_deprecated
def authenticate(self, context, credentials=None, ec2Credentials=None):
(user_ref, tenant_ref, metadata_ref, roles_ref,
catalog_ref) = self._authenticate(credentials=credentials,
@@ -282,27 +284,27 @@ class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
auth_token_data, roles_ref, catalog_ref)
return token_data
- @controller.v2_deprecated
+ @controller.v2_ec2_deprecated
def get_credential(self, context, user_id, credential_id):
if not self._is_admin(context):
self._assert_identity(context, user_id)
return super(Ec2Controller, self).get_credential(user_id,
credential_id)
- @controller.v2_deprecated
+ @controller.v2_ec2_deprecated
def get_credentials(self, context, user_id):
if not self._is_admin(context):
self._assert_identity(context, user_id)
return super(Ec2Controller, self).get_credentials(user_id)
- @controller.v2_deprecated
+ @controller.v2_ec2_deprecated
def create_credential(self, context, user_id, tenant_id):
if not self._is_admin(context):
self._assert_identity(context, user_id)
return super(Ec2Controller, self).create_credential(context, user_id,
tenant_id)
- @controller.v2_deprecated
+ @controller.v2_ec2_deprecated
def delete_credential(self, context, user_id, credential_id):
if not self._is_admin(context):
self._assert_identity(context, user_id)
@@ -315,7 +317,7 @@ class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
:param context: standard context
:param user_id: id of user
- :raises exception.Forbidden: when token is invalid
+ :raises keystone.exception.Forbidden: when token is invalid
"""
token_ref = utils.get_token_ref(context)
@@ -343,7 +345,7 @@ class Ec2Controller(Ec2ControllerCommon, controller.V2Controller):
:param user_id: expected credential owner
:param credential_id: id of credential object
- :raises exception.Forbidden: on failure
+ :raises keystone.exception.Forbidden: on failure
"""
ec2_credential_id = utils.hash_access_key(credential_id)
diff --git a/keystone-moon/keystone/contrib/ec2/core.py b/keystone-moon/keystone/contrib/ec2/core.py
index 77857af8..7bba8cab 100644
--- a/keystone-moon/keystone/contrib/ec2/core.py
+++ b/keystone-moon/keystone/contrib/ec2/core.py
@@ -25,9 +25,9 @@ EXTENSION_DATA = {
'links': [
{
'rel': 'describedby',
- # TODO(ayoung): needs a description
'type': 'text/html',
- 'href': 'https://github.com/openstack/identity-api',
+ 'href': 'http://developer.openstack.org/'
+ 'api-ref-identity-v2-ext.html',
}
]}
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
diff --git a/keystone-moon/keystone/contrib/ec2/routers.py b/keystone-moon/keystone/contrib/ec2/routers.py
index 7b6bf115..97c68cf7 100644
--- a/keystone-moon/keystone/contrib/ec2/routers.py
+++ b/keystone-moon/keystone/contrib/ec2/routers.py
@@ -23,10 +23,6 @@ build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation, extension_name='OS-EC2',
extension_version='1.0')
-build_parameter_relation = functools.partial(
- json_home.build_v3_extension_parameter_relation, extension_name='OS-EC2',
- extension_version='1.0')
-
class Ec2Extension(wsgi.ExtensionRouter):
def add_routes(self, mapper):
@@ -90,6 +86,6 @@ class Ec2ExtensionV3(wsgi.V3ExtensionRouter):
rel=build_resource_relation(resource_name='user_credential'),
path_vars={
'credential_id':
- build_parameter_relation(parameter_name='credential_id'),
+ json_home.build_v3_parameter_relation('credential_id'),
'user_id': json_home.Parameters.USER_ID,
})
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/__init__.py b/keystone-moon/keystone/contrib/endpoint_filter/__init__.py
index 72508c3e..e69de29b 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/__init__.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/__init__.py
@@ -1,15 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.contrib.endpoint_filter.core import * # noqa
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py b/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py
index 22d5796a..ad39d045 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/backends/catalog_sql.py
@@ -17,52 +17,52 @@ from oslo_config import cfg
from keystone.catalog.backends import sql
from keystone.catalog import core as catalog_core
from keystone.common import dependency
-from keystone import exception
CONF = cfg.CONF
-@dependency.requires('endpoint_filter_api')
+@dependency.requires('catalog_api')
class EndpointFilterCatalog(sql.Catalog):
def get_v3_catalog(self, user_id, project_id):
substitutions = dict(CONF.items())
- substitutions.update({'tenant_id': project_id, 'user_id': user_id})
+ substitutions.update({
+ 'tenant_id': project_id,
+ 'project_id': project_id,
+ 'user_id': user_id,
+ })
services = {}
- refs = self.endpoint_filter_api.list_endpoints_for_project(project_id)
+ dict_of_endpoint_refs = (self.catalog_api.
+ list_endpoints_for_project(project_id))
- if (not refs and
+ if (not dict_of_endpoint_refs and
CONF.endpoint_filter.return_all_endpoints_if_no_filter):
return super(EndpointFilterCatalog, self).get_v3_catalog(
user_id, project_id)
- for entry in refs:
- try:
- endpoint = self.get_endpoint(entry['endpoint_id'])
- if not endpoint['enabled']:
- # Skip disabled endpoints.
- continue
- service_id = endpoint['service_id']
- services.setdefault(
- service_id,
- self.get_service(service_id))
- service = services[service_id]
- del endpoint['service_id']
- del endpoint['enabled']
- del endpoint['legacy_endpoint_id']
- endpoint['url'] = catalog_core.format_url(
- endpoint['url'], substitutions)
- # populate filtered endpoints
- if 'endpoints' in services[service_id]:
- service['endpoints'].append(endpoint)
- else:
- service['endpoints'] = [endpoint]
- except exception.EndpointNotFound:
- # remove bad reference from association
- self.endpoint_filter_api.remove_endpoint_from_project(
- entry['endpoint_id'], project_id)
+ for endpoint_id, endpoint in dict_of_endpoint_refs.items():
+ if not endpoint['enabled']:
+ # Skip disabled endpoints.
+ continue
+ service_id = endpoint['service_id']
+ services.setdefault(
+ service_id,
+ self.get_service(service_id))
+ service = services[service_id]
+ del endpoint['service_id']
+ del endpoint['enabled']
+ del endpoint['legacy_endpoint_id']
+ # Include deprecated region for backwards compatibility
+ endpoint['region'] = endpoint['region_id']
+ endpoint['url'] = catalog_core.format_url(
+ endpoint['url'], substitutions)
+ # populate filtered endpoints
+ if 'endpoints' in services[service_id]:
+ service['endpoints'].append(endpoint)
+ else:
+ service['endpoints'] = [endpoint]
# format catalog
catalog = []
@@ -70,6 +70,7 @@ class EndpointFilterCatalog(sql.Catalog):
formatted_service = {}
formatted_service['id'] = service['id']
formatted_service['type'] = service['type']
+ formatted_service['name'] = service['name']
formatted_service['endpoints'] = service['endpoints']
catalog.append(formatted_service)
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py b/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py
index cf904268..484934bb 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/backends/sql.py
@@ -12,214 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-from keystone.common import sql
-from keystone.contrib import endpoint_filter
-from keystone import exception
-from keystone.i18n import _
+from oslo_log import versionutils
+from keystone.catalog.backends import sql
-class ProjectEndpoint(sql.ModelBase, sql.ModelDictMixin):
- """project-endpoint relationship table."""
- __tablename__ = 'project_endpoint'
- attributes = ['endpoint_id', 'project_id']
- endpoint_id = sql.Column(sql.String(64),
- primary_key=True,
- nullable=False)
- project_id = sql.Column(sql.String(64),
- primary_key=True,
- nullable=False)
+_OLD = 'keystone.contrib.endpoint_filter.backends.sql.EndpointFilter'
+_NEW = 'sql'
-class EndpointGroup(sql.ModelBase, sql.ModelDictMixin):
- """Endpoint Groups table."""
- __tablename__ = 'endpoint_group'
- attributes = ['id', 'name', 'description', 'filters']
- mutable_attributes = frozenset(['name', 'description', 'filters'])
- id = sql.Column(sql.String(64), primary_key=True)
- name = sql.Column(sql.String(255), nullable=False)
- description = sql.Column(sql.Text, nullable=True)
- filters = sql.Column(sql.JsonBlob(), nullable=False)
-
-
-class ProjectEndpointGroupMembership(sql.ModelBase, sql.ModelDictMixin):
- """Project to Endpoint group relationship table."""
- __tablename__ = 'project_endpoint_group'
- attributes = ['endpoint_group_id', 'project_id']
- endpoint_group_id = sql.Column(sql.String(64),
- sql.ForeignKey('endpoint_group.id'),
- nullable=False)
- project_id = sql.Column(sql.String(64), nullable=False)
- __table_args__ = (sql.PrimaryKeyConstraint('endpoint_group_id',
- 'project_id'), {})
-
-
-class EndpointFilter(endpoint_filter.EndpointFilterDriverV8):
-
- @sql.handle_conflicts(conflict_type='project_endpoint')
- def add_endpoint_to_project(self, endpoint_id, project_id):
- session = sql.get_session()
- with session.begin():
- endpoint_filter_ref = ProjectEndpoint(endpoint_id=endpoint_id,
- project_id=project_id)
- session.add(endpoint_filter_ref)
-
- def _get_project_endpoint_ref(self, session, endpoint_id, project_id):
- endpoint_filter_ref = session.query(ProjectEndpoint).get(
- (endpoint_id, project_id))
- if endpoint_filter_ref is None:
- msg = _('Endpoint %(endpoint_id)s not found in project '
- '%(project_id)s') % {'endpoint_id': endpoint_id,
- 'project_id': project_id}
- raise exception.NotFound(msg)
- return endpoint_filter_ref
-
- def check_endpoint_in_project(self, endpoint_id, project_id):
- session = sql.get_session()
- self._get_project_endpoint_ref(session, endpoint_id, project_id)
-
- def remove_endpoint_from_project(self, endpoint_id, project_id):
- session = sql.get_session()
- endpoint_filter_ref = self._get_project_endpoint_ref(
- session, endpoint_id, project_id)
- with session.begin():
- session.delete(endpoint_filter_ref)
-
- def list_endpoints_for_project(self, project_id):
- session = sql.get_session()
- query = session.query(ProjectEndpoint)
- query = query.filter_by(project_id=project_id)
- endpoint_filter_refs = query.all()
- return [ref.to_dict() for ref in endpoint_filter_refs]
-
- def list_projects_for_endpoint(self, endpoint_id):
- session = sql.get_session()
- query = session.query(ProjectEndpoint)
- query = query.filter_by(endpoint_id=endpoint_id)
- endpoint_filter_refs = query.all()
- return [ref.to_dict() for ref in endpoint_filter_refs]
-
- def delete_association_by_endpoint(self, endpoint_id):
- session = sql.get_session()
- with session.begin():
- query = session.query(ProjectEndpoint)
- query = query.filter_by(endpoint_id=endpoint_id)
- query.delete(synchronize_session=False)
-
- def delete_association_by_project(self, project_id):
- session = sql.get_session()
- with session.begin():
- query = session.query(ProjectEndpoint)
- query = query.filter_by(project_id=project_id)
- query.delete(synchronize_session=False)
-
- def create_endpoint_group(self, endpoint_group_id, endpoint_group):
- session = sql.get_session()
- with session.begin():
- endpoint_group_ref = EndpointGroup.from_dict(endpoint_group)
- session.add(endpoint_group_ref)
- return endpoint_group_ref.to_dict()
-
- def _get_endpoint_group(self, session, endpoint_group_id):
- endpoint_group_ref = session.query(EndpointGroup).get(
- endpoint_group_id)
- if endpoint_group_ref is None:
- raise exception.EndpointGroupNotFound(
- endpoint_group_id=endpoint_group_id)
- return endpoint_group_ref
-
- def get_endpoint_group(self, endpoint_group_id):
- session = sql.get_session()
- endpoint_group_ref = self._get_endpoint_group(session,
- endpoint_group_id)
- return endpoint_group_ref.to_dict()
-
- def update_endpoint_group(self, endpoint_group_id, endpoint_group):
- session = sql.get_session()
- with session.begin():
- endpoint_group_ref = self._get_endpoint_group(session,
- endpoint_group_id)
- old_endpoint_group = endpoint_group_ref.to_dict()
- old_endpoint_group.update(endpoint_group)
- new_endpoint_group = EndpointGroup.from_dict(old_endpoint_group)
- for attr in EndpointGroup.mutable_attributes:
- setattr(endpoint_group_ref, attr,
- getattr(new_endpoint_group, attr))
- return endpoint_group_ref.to_dict()
-
- def delete_endpoint_group(self, endpoint_group_id):
- session = sql.get_session()
- endpoint_group_ref = self._get_endpoint_group(session,
- endpoint_group_id)
- with session.begin():
- self._delete_endpoint_group_association_by_endpoint_group(
- session, endpoint_group_id)
- session.delete(endpoint_group_ref)
-
- def get_endpoint_group_in_project(self, endpoint_group_id, project_id):
- session = sql.get_session()
- ref = self._get_endpoint_group_in_project(session,
- endpoint_group_id,
- project_id)
- return ref.to_dict()
-
- @sql.handle_conflicts(conflict_type='project_endpoint_group')
- def add_endpoint_group_to_project(self, endpoint_group_id, project_id):
- session = sql.get_session()
-
- with session.begin():
- # Create a new Project Endpoint group entity
- endpoint_group_project_ref = ProjectEndpointGroupMembership(
- endpoint_group_id=endpoint_group_id, project_id=project_id)
- session.add(endpoint_group_project_ref)
-
- def _get_endpoint_group_in_project(self, session,
- endpoint_group_id, project_id):
- endpoint_group_project_ref = session.query(
- ProjectEndpointGroupMembership).get((endpoint_group_id,
- project_id))
- if endpoint_group_project_ref is None:
- msg = _('Endpoint Group Project Association not found')
- raise exception.NotFound(msg)
- else:
- return endpoint_group_project_ref
-
- def list_endpoint_groups(self):
- session = sql.get_session()
- query = session.query(EndpointGroup)
- endpoint_group_refs = query.all()
- return [e.to_dict() for e in endpoint_group_refs]
-
- def list_endpoint_groups_for_project(self, project_id):
- session = sql.get_session()
- query = session.query(ProjectEndpointGroupMembership)
- query = query.filter_by(project_id=project_id)
- endpoint_group_refs = query.all()
- return [ref.to_dict() for ref in endpoint_group_refs]
-
- def remove_endpoint_group_from_project(self, endpoint_group_id,
- project_id):
- session = sql.get_session()
- endpoint_group_project_ref = self._get_endpoint_group_in_project(
- session, endpoint_group_id, project_id)
- with session.begin():
- session.delete(endpoint_group_project_ref)
-
- def list_projects_associated_with_endpoint_group(self, endpoint_group_id):
- session = sql.get_session()
- query = session.query(ProjectEndpointGroupMembership)
- query = query.filter_by(endpoint_group_id=endpoint_group_id)
- endpoint_group_refs = query.all()
- return [ref.to_dict() for ref in endpoint_group_refs]
-
- def _delete_endpoint_group_association_by_endpoint_group(
- self, session, endpoint_group_id):
- query = session.query(ProjectEndpointGroupMembership)
- query = query.filter_by(endpoint_group_id=endpoint_group_id)
- query.delete()
-
- def delete_endpoint_group_association_by_project(self, project_id):
- session = sql.get_session()
- with session.begin():
- query = session.query(ProjectEndpointGroupMembership)
- query = query.filter_by(project_id=project_id)
- query.delete()
+class EndpointFilter(sql.Catalog):
+ @versionutils.deprecated(
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of=_NEW,
+ what=_OLD,
+ remove_in=2)
+ def __init__(self, *args, **kwargs):
+ super(EndpointFilter, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
index 2aa93a86..ac0a30cc 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
@@ -12,27 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- endpoint_filtering_table = sql.Table(
- 'project_endpoint',
- meta,
- sql.Column(
- 'endpoint_id',
- sql.String(64),
- primary_key=True,
- nullable=False),
- sql.Column(
- 'project_id',
- sql.String(64),
- primary_key=True,
- nullable=False))
-
- endpoint_filtering_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='endpoint_filter')
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py
index 2c218b0d..ac5aa5b3 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py
@@ -12,30 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- endpoint_group_table = sql.Table(
- 'endpoint_group',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('name', sql.String(255), nullable=False),
- sql.Column('description', sql.Text, nullable=True),
- sql.Column('filters', sql.Text(), nullable=False))
- endpoint_group_table.create(migrate_engine, checkfirst=True)
-
- project_endpoint_group_table = sql.Table(
- 'project_endpoint_group',
- meta,
- sql.Column('endpoint_group_id', sql.String(64),
- sql.ForeignKey('endpoint_group.id'), nullable=False),
- sql.Column('project_id', sql.String(64), nullable=False),
- sql.PrimaryKeyConstraint('endpoint_group_id',
- 'project_id'))
- project_endpoint_group_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='endpoint_filter')
diff --git a/keystone-moon/keystone/contrib/endpoint_filter/routers.py b/keystone-moon/keystone/contrib/endpoint_filter/routers.py
index 285b9df2..f75110f9 100644
--- a/keystone-moon/keystone/contrib/endpoint_filter/routers.py
+++ b/keystone-moon/keystone/contrib/endpoint_filter/routers.py
@@ -12,151 +12,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-import functools
+from oslo_log import log
+from oslo_log import versionutils
-from keystone.common import json_home
from keystone.common import wsgi
-from keystone.contrib.endpoint_filter import controllers
+from keystone.i18n import _
-build_resource_relation = functools.partial(
- json_home.build_v3_extension_resource_relation,
- extension_name='OS-EP-FILTER', extension_version='1.0')
+LOG = log.getLogger(__name__)
-build_parameter_relation = functools.partial(
- json_home.build_v3_extension_parameter_relation,
- extension_name='OS-EP-FILTER', extension_version='1.0')
-ENDPOINT_GROUP_PARAMETER_RELATION = build_parameter_relation(
- parameter_name='endpoint_group_id')
+class EndpointFilterExtension(wsgi.Middleware):
-
-class EndpointFilterExtension(wsgi.V3ExtensionRouter):
- """API Endpoints for the Endpoint Filter extension.
-
- The API looks like::
-
- PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
- GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
- HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
- DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
- GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
- GET /OS-EP-FILTER/projects/{project_id}/endpoints
- GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups
-
- GET /OS-EP-FILTER/endpoint_groups
- POST /OS-EP-FILTER/endpoint_groups
- GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
- HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
- PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
- DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
-
- GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects
- GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints
-
- PUT /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
- {project_id}
- GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
- {project_id}
- HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
- {project_id}
- DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
- {project_id}
-
- """
- PATH_PREFIX = '/OS-EP-FILTER'
- PATH_PROJECT_ENDPOINT = '/projects/{project_id}/endpoints/{endpoint_id}'
- PATH_ENDPOINT_GROUPS = '/endpoint_groups/{endpoint_group_id}'
- PATH_ENDPOINT_GROUP_PROJECTS = PATH_ENDPOINT_GROUPS + (
- '/projects/{project_id}')
-
- def add_routes(self, mapper):
- endpoint_filter_controller = controllers.EndpointFilterV3Controller()
- endpoint_group_controller = controllers.EndpointGroupV3Controller()
- project_endpoint_group_controller = (
- controllers.ProjectEndpointGroupV3Controller())
-
- self._add_resource(
- mapper, endpoint_filter_controller,
- path=self.PATH_PREFIX + '/endpoints/{endpoint_id}/projects',
- get_action='list_projects_for_endpoint',
- rel=build_resource_relation(resource_name='endpoint_projects'),
- path_vars={
- 'endpoint_id': json_home.Parameters.ENDPOINT_ID,
- })
- self._add_resource(
- mapper, endpoint_filter_controller,
- path=self.PATH_PREFIX + self.PATH_PROJECT_ENDPOINT,
- get_head_action='check_endpoint_in_project',
- put_action='add_endpoint_to_project',
- delete_action='remove_endpoint_from_project',
- rel=build_resource_relation(resource_name='project_endpoint'),
- path_vars={
- 'endpoint_id': json_home.Parameters.ENDPOINT_ID,
- 'project_id': json_home.Parameters.PROJECT_ID,
- })
- self._add_resource(
- mapper, endpoint_filter_controller,
- path=self.PATH_PREFIX + '/projects/{project_id}/endpoints',
- get_action='list_endpoints_for_project',
- rel=build_resource_relation(resource_name='project_endpoints'),
- path_vars={
- 'project_id': json_home.Parameters.PROJECT_ID,
- })
- self._add_resource(
- mapper, endpoint_group_controller,
- path=self.PATH_PREFIX + '/projects/{project_id}/endpoint_groups',
- get_action='list_endpoint_groups_for_project',
- rel=build_resource_relation(
- resource_name='project_endpoint_groups'),
- path_vars={
- 'project_id': json_home.Parameters.PROJECT_ID,
- })
- self._add_resource(
- mapper, endpoint_group_controller,
- path=self.PATH_PREFIX + '/endpoint_groups',
- get_action='list_endpoint_groups',
- post_action='create_endpoint_group',
- rel=build_resource_relation(resource_name='endpoint_groups'))
- self._add_resource(
- mapper, endpoint_group_controller,
- path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS,
- get_head_action='get_endpoint_group',
- patch_action='update_endpoint_group',
- delete_action='delete_endpoint_group',
- rel=build_resource_relation(resource_name='endpoint_group'),
- path_vars={
- 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
- })
- self._add_resource(
- mapper, project_endpoint_group_controller,
- path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUP_PROJECTS,
- get_head_action='get_endpoint_group_in_project',
- put_action='add_endpoint_group_to_project',
- delete_action='remove_endpoint_group_from_project',
- rel=build_resource_relation(
- resource_name='endpoint_group_to_project_association'),
- path_vars={
- 'project_id': json_home.Parameters.PROJECT_ID,
- 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
- })
- self._add_resource(
- mapper, endpoint_group_controller,
- path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
- '/projects'),
- get_action='list_projects_associated_with_endpoint_group',
- rel=build_resource_relation(
- resource_name='projects_associated_with_endpoint_group'),
- path_vars={
- 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
- })
- self._add_resource(
- mapper, endpoint_group_controller,
- path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
- '/endpoints'),
- get_action='list_endpoints_associated_with_endpoint_group',
- rel=build_resource_relation(
- resource_name='endpoints_in_endpoint_group'),
- path_vars={
- 'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
- })
+ def __init__(self, *args, **kwargs):
+ super(EndpointFilterExtension, self).__init__(*args, **kwargs)
+ msg = _("Remove endpoint_filter_extension from the paste pipeline, "
+ "the endpoint filter extension is now always available. "
+ "Update the [pipeline:api_v3] section in keystone-paste.ini "
+ "accordingly as it will be removed in the O release.")
+ versionutils.report_deprecated_feature(LOG, msg)
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py b/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py
index 54792f30..93331779 100644
--- a/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py
+++ b/keystone-moon/keystone/contrib/endpoint_policy/backends/sql.py
@@ -10,14 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from oslo_log import versionutils
from keystone.endpoint_policy.backends import sql
-LOG = logging.getLogger(__name__)
-
_OLD = 'keystone.contrib.endpoint_policy.backends.sql.EndpointPolicy'
_NEW = 'keystone.endpoint_policy.backends.sql.EndpointPolicy'
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py
index 5c22f169..32bdabdd 100644
--- a/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py
+++ b/keystone-moon/keystone/contrib/endpoint_policy/migrate_repo/versions/001_add_endpoint_policy_table.py
@@ -12,29 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- endpoint_policy_table = sql.Table(
- 'policy_association',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('policy_id', sql.String(64),
- nullable=False),
- sql.Column('endpoint_id', sql.String(64),
- nullable=True),
- sql.Column('service_id', sql.String(64),
- nullable=True),
- sql.Column('region_id', sql.String(64),
- nullable=True),
- sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- endpoint_policy_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='endpoint_policy')
diff --git a/keystone-moon/keystone/contrib/endpoint_policy/routers.py b/keystone-moon/keystone/contrib/endpoint_policy/routers.py
index 714d1663..c8f7f154 100644
--- a/keystone-moon/keystone/contrib/endpoint_policy/routers.py
+++ b/keystone-moon/keystone/contrib/endpoint_policy/routers.py
@@ -10,14 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from oslo_log import versionutils
from keystone.common import wsgi
-LOG = logging.getLogger(__name__)
-
_OLD = 'keystone.contrib.endpoint_policy.routers.EndpointPolicyExtension'
_NEW = 'keystone.endpoint_policy.routers.Routers'
diff --git a/keystone-moon/keystone/contrib/federation/__init__.py b/keystone-moon/keystone/contrib/federation/__init__.py
index 57c9e42c..e69de29b 100644
--- a/keystone-moon/keystone/contrib/federation/__init__.py
+++ b/keystone-moon/keystone/contrib/federation/__init__.py
@@ -1,15 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.contrib.federation.core import * # noqa
diff --git a/keystone-moon/keystone/contrib/federation/backends/sql.py b/keystone-moon/keystone/contrib/federation/backends/sql.py
index dbd17025..3c24d9c0 100644
--- a/keystone-moon/keystone/contrib/federation/backends/sql.py
+++ b/keystone-moon/keystone/contrib/federation/backends/sql.py
@@ -12,355 +12,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_serialization import jsonutils
+from oslo_log import versionutils
-from keystone.common import sql
-from keystone.contrib.federation import core
-from keystone import exception
-from sqlalchemy import orm
+from keystone.federation.backends import sql
+_OLD = "keystone.contrib.federation.backends.sql.Federation"
+_NEW = "sql"
-class FederationProtocolModel(sql.ModelBase, sql.DictBase):
- __tablename__ = 'federation_protocol'
- attributes = ['id', 'idp_id', 'mapping_id']
- mutable_attributes = frozenset(['mapping_id'])
- id = sql.Column(sql.String(64), primary_key=True)
- idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id',
- ondelete='CASCADE'), primary_key=True)
- mapping_id = sql.Column(sql.String(64), nullable=False)
+class Federation(sql.Federation):
- @classmethod
- def from_dict(cls, dictionary):
- new_dictionary = dictionary.copy()
- return cls(**new_dictionary)
-
- def to_dict(self):
- """Return a dictionary with model's attributes."""
- d = dict()
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
- return d
-
-
-class IdentityProviderModel(sql.ModelBase, sql.DictBase):
- __tablename__ = 'identity_provider'
- attributes = ['id', 'enabled', 'description', 'remote_ids']
- mutable_attributes = frozenset(['description', 'enabled', 'remote_ids'])
-
- id = sql.Column(sql.String(64), primary_key=True)
- enabled = sql.Column(sql.Boolean, nullable=False)
- description = sql.Column(sql.Text(), nullable=True)
- remote_ids = orm.relationship('IdPRemoteIdsModel',
- order_by='IdPRemoteIdsModel.remote_id',
- cascade='all, delete-orphan')
-
- @classmethod
- def from_dict(cls, dictionary):
- new_dictionary = dictionary.copy()
- remote_ids_list = new_dictionary.pop('remote_ids', None)
- if not remote_ids_list:
- remote_ids_list = []
- identity_provider = cls(**new_dictionary)
- remote_ids = []
- # NOTE(fmarco76): the remote_ids_list contains only remote ids
- # associated with the IdP because of the "relationship" established in
- # sqlalchemy and corresponding to the FK in the idp_remote_ids table
- for remote in remote_ids_list:
- remote_ids.append(IdPRemoteIdsModel(remote_id=remote))
- identity_provider.remote_ids = remote_ids
- return identity_provider
-
- def to_dict(self):
- """Return a dictionary with model's attributes."""
- d = dict()
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
- d['remote_ids'] = []
- for remote in self.remote_ids:
- d['remote_ids'].append(remote.remote_id)
- return d
-
-
-class IdPRemoteIdsModel(sql.ModelBase, sql.DictBase):
- __tablename__ = 'idp_remote_ids'
- attributes = ['idp_id', 'remote_id']
- mutable_attributes = frozenset(['idp_id', 'remote_id'])
-
- idp_id = sql.Column(sql.String(64),
- sql.ForeignKey('identity_provider.id',
- ondelete='CASCADE'))
- remote_id = sql.Column(sql.String(255),
- primary_key=True)
-
- @classmethod
- def from_dict(cls, dictionary):
- new_dictionary = dictionary.copy()
- return cls(**new_dictionary)
-
- def to_dict(self):
- """Return a dictionary with model's attributes."""
- d = dict()
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
- return d
-
-
-class MappingModel(sql.ModelBase, sql.DictBase):
- __tablename__ = 'mapping'
- attributes = ['id', 'rules']
-
- id = sql.Column(sql.String(64), primary_key=True)
- rules = sql.Column(sql.JsonBlob(), nullable=False)
-
- @classmethod
- def from_dict(cls, dictionary):
- new_dictionary = dictionary.copy()
- new_dictionary['rules'] = jsonutils.dumps(new_dictionary['rules'])
- return cls(**new_dictionary)
-
- def to_dict(self):
- """Return a dictionary with model's attributes."""
- d = dict()
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
- d['rules'] = jsonutils.loads(d['rules'])
- return d
-
-
-class ServiceProviderModel(sql.ModelBase, sql.DictBase):
- __tablename__ = 'service_provider'
- attributes = ['auth_url', 'id', 'enabled', 'description',
- 'relay_state_prefix', 'sp_url']
- mutable_attributes = frozenset(['auth_url', 'description', 'enabled',
- 'relay_state_prefix', 'sp_url'])
-
- id = sql.Column(sql.String(64), primary_key=True)
- enabled = sql.Column(sql.Boolean, nullable=False)
- description = sql.Column(sql.Text(), nullable=True)
- auth_url = sql.Column(sql.String(256), nullable=False)
- sp_url = sql.Column(sql.String(256), nullable=False)
- relay_state_prefix = sql.Column(sql.String(256), nullable=False)
-
- @classmethod
- def from_dict(cls, dictionary):
- new_dictionary = dictionary.copy()
- return cls(**new_dictionary)
-
- def to_dict(self):
- """Return a dictionary with model's attributes."""
- d = dict()
- for attr in self.__class__.attributes:
- d[attr] = getattr(self, attr)
- return d
-
-
-class Federation(core.FederationDriverV8):
-
- # Identity Provider CRUD
- @sql.handle_conflicts(conflict_type='identity_provider')
- def create_idp(self, idp_id, idp):
- idp['id'] = idp_id
- with sql.transaction() as session:
- idp_ref = IdentityProviderModel.from_dict(idp)
- session.add(idp_ref)
- return idp_ref.to_dict()
-
- def delete_idp(self, idp_id):
- with sql.transaction() as session:
- self._delete_assigned_protocols(session, idp_id)
- idp_ref = self._get_idp(session, idp_id)
- session.delete(idp_ref)
-
- def _get_idp(self, session, idp_id):
- idp_ref = session.query(IdentityProviderModel).get(idp_id)
- if not idp_ref:
- raise exception.IdentityProviderNotFound(idp_id=idp_id)
- return idp_ref
-
- def _get_idp_from_remote_id(self, session, remote_id):
- q = session.query(IdPRemoteIdsModel)
- q = q.filter_by(remote_id=remote_id)
- try:
- return q.one()
- except sql.NotFound:
- raise exception.IdentityProviderNotFound(idp_id=remote_id)
-
- def list_idps(self):
- with sql.transaction() as session:
- idps = session.query(IdentityProviderModel)
- idps_list = [idp.to_dict() for idp in idps]
- return idps_list
-
- def get_idp(self, idp_id):
- with sql.transaction() as session:
- idp_ref = self._get_idp(session, idp_id)
- return idp_ref.to_dict()
-
- def get_idp_from_remote_id(self, remote_id):
- with sql.transaction() as session:
- ref = self._get_idp_from_remote_id(session, remote_id)
- return ref.to_dict()
-
- def update_idp(self, idp_id, idp):
- with sql.transaction() as session:
- idp_ref = self._get_idp(session, idp_id)
- old_idp = idp_ref.to_dict()
- old_idp.update(idp)
- new_idp = IdentityProviderModel.from_dict(old_idp)
- for attr in IdentityProviderModel.mutable_attributes:
- setattr(idp_ref, attr, getattr(new_idp, attr))
- return idp_ref.to_dict()
-
- # Protocol CRUD
- def _get_protocol(self, session, idp_id, protocol_id):
- q = session.query(FederationProtocolModel)
- q = q.filter_by(id=protocol_id, idp_id=idp_id)
- try:
- return q.one()
- except sql.NotFound:
- kwargs = {'protocol_id': protocol_id,
- 'idp_id': idp_id}
- raise exception.FederatedProtocolNotFound(**kwargs)
-
- @sql.handle_conflicts(conflict_type='federation_protocol')
- def create_protocol(self, idp_id, protocol_id, protocol):
- protocol['id'] = protocol_id
- protocol['idp_id'] = idp_id
- with sql.transaction() as session:
- self._get_idp(session, idp_id)
- protocol_ref = FederationProtocolModel.from_dict(protocol)
- session.add(protocol_ref)
- return protocol_ref.to_dict()
-
- def update_protocol(self, idp_id, protocol_id, protocol):
- with sql.transaction() as session:
- proto_ref = self._get_protocol(session, idp_id, protocol_id)
- old_proto = proto_ref.to_dict()
- old_proto.update(protocol)
- new_proto = FederationProtocolModel.from_dict(old_proto)
- for attr in FederationProtocolModel.mutable_attributes:
- setattr(proto_ref, attr, getattr(new_proto, attr))
- return proto_ref.to_dict()
-
- def get_protocol(self, idp_id, protocol_id):
- with sql.transaction() as session:
- protocol_ref = self._get_protocol(session, idp_id, protocol_id)
- return protocol_ref.to_dict()
-
- def list_protocols(self, idp_id):
- with sql.transaction() as session:
- q = session.query(FederationProtocolModel)
- q = q.filter_by(idp_id=idp_id)
- protocols = [protocol.to_dict() for protocol in q]
- return protocols
-
- def delete_protocol(self, idp_id, protocol_id):
- with sql.transaction() as session:
- key_ref = self._get_protocol(session, idp_id, protocol_id)
- session.delete(key_ref)
-
- def _delete_assigned_protocols(self, session, idp_id):
- query = session.query(FederationProtocolModel)
- query = query.filter_by(idp_id=idp_id)
- query.delete()
-
- # Mapping CRUD
- def _get_mapping(self, session, mapping_id):
- mapping_ref = session.query(MappingModel).get(mapping_id)
- if not mapping_ref:
- raise exception.MappingNotFound(mapping_id=mapping_id)
- return mapping_ref
-
- @sql.handle_conflicts(conflict_type='mapping')
- def create_mapping(self, mapping_id, mapping):
- ref = {}
- ref['id'] = mapping_id
- ref['rules'] = mapping.get('rules')
- with sql.transaction() as session:
- mapping_ref = MappingModel.from_dict(ref)
- session.add(mapping_ref)
- return mapping_ref.to_dict()
-
- def delete_mapping(self, mapping_id):
- with sql.transaction() as session:
- mapping_ref = self._get_mapping(session, mapping_id)
- session.delete(mapping_ref)
-
- def list_mappings(self):
- with sql.transaction() as session:
- mappings = session.query(MappingModel)
- return [x.to_dict() for x in mappings]
-
- def get_mapping(self, mapping_id):
- with sql.transaction() as session:
- mapping_ref = self._get_mapping(session, mapping_id)
- return mapping_ref.to_dict()
-
- @sql.handle_conflicts(conflict_type='mapping')
- def update_mapping(self, mapping_id, mapping):
- ref = {}
- ref['id'] = mapping_id
- ref['rules'] = mapping.get('rules')
- with sql.transaction() as session:
- mapping_ref = self._get_mapping(session, mapping_id)
- old_mapping = mapping_ref.to_dict()
- old_mapping.update(ref)
- new_mapping = MappingModel.from_dict(old_mapping)
- for attr in MappingModel.attributes:
- setattr(mapping_ref, attr, getattr(new_mapping, attr))
- return mapping_ref.to_dict()
-
- def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
- with sql.transaction() as session:
- protocol_ref = self._get_protocol(session, idp_id, protocol_id)
- mapping_id = protocol_ref.mapping_id
- mapping_ref = self._get_mapping(session, mapping_id)
- return mapping_ref.to_dict()
-
- # Service Provider CRUD
- @sql.handle_conflicts(conflict_type='service_provider')
- def create_sp(self, sp_id, sp):
- sp['id'] = sp_id
- with sql.transaction() as session:
- sp_ref = ServiceProviderModel.from_dict(sp)
- session.add(sp_ref)
- return sp_ref.to_dict()
-
- def delete_sp(self, sp_id):
- with sql.transaction() as session:
- sp_ref = self._get_sp(session, sp_id)
- session.delete(sp_ref)
-
- def _get_sp(self, session, sp_id):
- sp_ref = session.query(ServiceProviderModel).get(sp_id)
- if not sp_ref:
- raise exception.ServiceProviderNotFound(sp_id=sp_id)
- return sp_ref
-
- def list_sps(self):
- with sql.transaction() as session:
- sps = session.query(ServiceProviderModel)
- sps_list = [sp.to_dict() for sp in sps]
- return sps_list
-
- def get_sp(self, sp_id):
- with sql.transaction() as session:
- sp_ref = self._get_sp(session, sp_id)
- return sp_ref.to_dict()
-
- def update_sp(self, sp_id, sp):
- with sql.transaction() as session:
- sp_ref = self._get_sp(session, sp_id)
- old_sp = sp_ref.to_dict()
- old_sp.update(sp)
- new_sp = ServiceProviderModel.from_dict(old_sp)
- for attr in ServiceProviderModel.mutable_attributes:
- setattr(sp_ref, attr, getattr(new_sp, attr))
- return sp_ref.to_dict()
-
- def get_enabled_service_providers(self):
- with sql.transaction() as session:
- service_providers = session.query(ServiceProviderModel)
- service_providers = service_providers.filter_by(enabled=True)
- return service_providers
+ @versionutils.deprecated(versionutils.deprecated.MITAKA,
+ in_favor_of=_NEW,
+ what=_OLD)
+ def __init__(self, *args, **kwargs):
+ super(Federation, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
index 9a4d574b..d9b24a00 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
@@ -10,33 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- idp_table = sql.Table(
- 'identity_provider',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('enabled', sql.Boolean, nullable=False),
- sql.Column('description', sql.Text(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- idp_table.create(migrate_engine, checkfirst=True)
-
- federation_protocol_table = sql.Table(
- 'federation_protocol',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('idp_id', sql.String(64),
- sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
- primary_key=True),
- sql.Column('mapping_id', sql.String(64), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- federation_protocol_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
index 9a155f5c..d9b24a00 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
@@ -10,18 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- mapping_table = sql.Table(
- 'mapping',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('rules', sql.Text(), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- mapping_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py
index 1731b0d3..8ce8c6fa 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/003_mapping_id_nullable_false.py
@@ -13,17 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sa
+from keystone import exception
def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- federation_protocol = sa.Table('federation_protocol', meta, autoload=True)
- # NOTE(i159): The column is changed to non-nullable. To prevent
- # database errors when the column will be altered, all the existing
- # null-records should be filled with not null values.
- stmt = (federation_protocol.update().
- where(federation_protocol.c.mapping_id.is_(None)).
- values(mapping_id=''))
- migrate_engine.execute(stmt)
- federation_protocol.c.mapping_id.alter(nullable=False)
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py
index 2e0aaf93..d9b24a00 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/004_add_remote_id_column.py
@@ -10,14 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_db.sqlalchemy import utils
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- idp_table = utils.get_table(migrate_engine, 'identity_provider')
- remote_id = sql.Column('remote_id', sql.String(256), nullable=True)
- idp_table.create_column(remote_id)
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py
index 1594f893..d9b24a00 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/005_add_service_provider_table.py
@@ -10,22 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- sp_table = sql.Table(
- 'service_provider',
- meta,
- sql.Column('auth_url', sql.String(256), nullable=True),
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('enabled', sql.Boolean, nullable=False),
- sql.Column('description', sql.Text(), nullable=True),
- sql.Column('sp_url', sql.String(256), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- sp_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py
index dc18f548..d9b24a00 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/006_fixup_service_provider_attributes.py
@@ -10,31 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
-
-_SP_TABLE_NAME = 'service_provider'
-
-
-def _update_null_columns(migrate_engine, sp_table):
- stmt = (sp_table.update().
- where(sp_table.c.auth_url.is_(None)).
- values(auth_url=''))
- migrate_engine.execute(stmt)
-
- stmt = (sp_table.update().
- where(sp_table.c.sp_url.is_(None)).
- values(sp_url=''))
- migrate_engine.execute(stmt)
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- sp_table = sql.Table(_SP_TABLE_NAME, meta, autoload=True)
- # The columns are being changed to non-nullable. To prevent
- # database errors when both are altered, all the existing
- # null-records should be filled with not null values.
- _update_null_columns(migrate_engine, sp_table)
-
- sp_table.c.auth_url.alter(nullable=False)
- sp_table.c.sp_url.alter(nullable=False)
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py
index 77012aad..d9b24a00 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/007_add_remote_id_table.py
@@ -10,34 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as orm
+from keystone import exception
def upgrade(migrate_engine):
- meta = orm.MetaData()
- meta.bind = migrate_engine
- idp_table = orm.Table('identity_provider', meta, autoload=True)
- remote_id_table = orm.Table(
- 'idp_remote_ids',
- meta,
- orm.Column('idp_id',
- orm.String(64),
- orm.ForeignKey('identity_provider.id',
- ondelete='CASCADE')),
- orm.Column('remote_id',
- orm.String(255),
- primary_key=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- remote_id_table.create(migrate_engine, checkfirst=True)
-
- select = orm.sql.select([idp_table.c.id, idp_table.c.remote_id]).where(
- idp_table.c.remote_id.isnot(None))
-
- for identity in migrate_engine.execute(select):
- remote_idp_entry = {'idp_id': identity.id,
- 'remote_id': identity.remote_id}
- remote_id_table.insert(remote_idp_entry).execute()
-
- idp_table.drop_column('remote_id')
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py
index 150dcfed..d9b24a00 100644
--- a/keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py
+++ b/keystone-moon/keystone/contrib/federation/migrate_repo/versions/008_add_relay_state_to_sp.py
@@ -10,30 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
-from oslo_db.sqlalchemy import utils
-import sqlalchemy as sql
-
-
-CONF = cfg.CONF
-_SP_TABLE_NAME = 'service_provider'
-_RELAY_STATE_PREFIX = 'relay_state_prefix'
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- idp_table = utils.get_table(migrate_engine, _SP_TABLE_NAME)
- relay_state_prefix_default = CONF.saml.relay_state_prefix
- relay_state_prefix = sql.Column(_RELAY_STATE_PREFIX, sql.String(256),
- nullable=False,
- server_default=relay_state_prefix_default)
- idp_table.create_column(relay_state_prefix)
-
-
-def downgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- idp_table = utils.get_table(migrate_engine, _SP_TABLE_NAME)
- idp_table.drop_column(_RELAY_STATE_PREFIX)
+ raise exception.MigrationMovedFailure(extension='federation')
diff --git a/keystone-moon/keystone/contrib/federation/routers.py b/keystone-moon/keystone/contrib/federation/routers.py
index ddf2f61f..d5857ca6 100644
--- a/keystone-moon/keystone/contrib/federation/routers.py
+++ b/keystone-moon/keystone/contrib/federation/routers.py
@@ -10,242 +10,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-import functools
+from oslo_log import log
+from oslo_log import versionutils
-from keystone.common import json_home
from keystone.common import wsgi
-from keystone.contrib.federation import controllers
+from keystone.i18n import _
-build_resource_relation = functools.partial(
- json_home.build_v3_extension_resource_relation,
- extension_name='OS-FEDERATION', extension_version='1.0')
+LOG = log.getLogger(__name__)
-build_parameter_relation = functools.partial(
- json_home.build_v3_extension_parameter_relation,
- extension_name='OS-FEDERATION', extension_version='1.0')
-IDP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='idp_id')
-PROTOCOL_ID_PARAMETER_RELATION = build_parameter_relation(
- parameter_name='protocol_id')
-SP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='sp_id')
+class FederationExtension(wsgi.Middleware):
-
-class FederationExtension(wsgi.V3ExtensionRouter):
- """API Endpoints for the Federation extension.
-
- The API looks like::
-
- PUT /OS-FEDERATION/identity_providers/{idp_id}
- GET /OS-FEDERATION/identity_providers
- GET /OS-FEDERATION/identity_providers/{idp_id}
- DELETE /OS-FEDERATION/identity_providers/{idp_id}
- PATCH /OS-FEDERATION/identity_providers/{idp_id}
-
- PUT /OS-FEDERATION/identity_providers/
- {idp_id}/protocols/{protocol_id}
- GET /OS-FEDERATION/identity_providers/
- {idp_id}/protocols
- GET /OS-FEDERATION/identity_providers/
- {idp_id}/protocols/{protocol_id}
- PATCH /OS-FEDERATION/identity_providers/
- {idp_id}/protocols/{protocol_id}
- DELETE /OS-FEDERATION/identity_providers/
- {idp_id}/protocols/{protocol_id}
-
- PUT /OS-FEDERATION/mappings
- GET /OS-FEDERATION/mappings
- PATCH /OS-FEDERATION/mappings/{mapping_id}
- GET /OS-FEDERATION/mappings/{mapping_id}
- DELETE /OS-FEDERATION/mappings/{mapping_id}
-
- GET /OS-FEDERATION/projects
- GET /OS-FEDERATION/domains
-
- PUT /OS-FEDERATION/service_providers/{sp_id}
- GET /OS-FEDERATION/service_providers
- GET /OS-FEDERATION/service_providers/{sp_id}
- DELETE /OS-FEDERATION/service_providers/{sp_id}
- PATCH /OS-FEDERATION/service_providers/{sp_id}
-
- GET /OS-FEDERATION/identity_providers/{identity_provider}/
- protocols/{protocol}/auth
- POST /OS-FEDERATION/identity_providers/{identity_provider}/
- protocols/{protocol}/auth
- GET /auth/OS-FEDERATION/identity_providers/
- {idp_id}/protocols/{protocol_id}/websso
- ?origin=https%3A//horizon.example.com
- POST /auth/OS-FEDERATION/identity_providers/
- {idp_id}/protocols/{protocol_id}/websso
- ?origin=https%3A//horizon.example.com
-
-
- POST /auth/OS-FEDERATION/saml2
- POST /auth/OS-FEDERATION/saml2/ecp
- GET /OS-FEDERATION/saml2/metadata
-
- GET /auth/OS-FEDERATION/websso/{protocol_id}
- ?origin=https%3A//horizon.example.com
-
- POST /auth/OS-FEDERATION/websso/{protocol_id}
- ?origin=https%3A//horizon.example.com
-
- """
- def _construct_url(self, suffix):
- return "/OS-FEDERATION/%s" % suffix
-
- def add_routes(self, mapper):
- auth_controller = controllers.Auth()
- idp_controller = controllers.IdentityProvider()
- protocol_controller = controllers.FederationProtocol()
- mapping_controller = controllers.MappingController()
- project_controller = controllers.ProjectAssignmentV3()
- domain_controller = controllers.DomainV3()
- saml_metadata_controller = controllers.SAMLMetadataV3()
- sp_controller = controllers.ServiceProvider()
-
- # Identity Provider CRUD operations
-
- self._add_resource(
- mapper, idp_controller,
- path=self._construct_url('identity_providers/{idp_id}'),
- get_action='get_identity_provider',
- put_action='create_identity_provider',
- patch_action='update_identity_provider',
- delete_action='delete_identity_provider',
- rel=build_resource_relation(resource_name='identity_provider'),
- path_vars={
- 'idp_id': IDP_ID_PARAMETER_RELATION,
- })
- self._add_resource(
- mapper, idp_controller,
- path=self._construct_url('identity_providers'),
- get_action='list_identity_providers',
- rel=build_resource_relation(resource_name='identity_providers'))
-
- # Protocol CRUD operations
-
- self._add_resource(
- mapper, protocol_controller,
- path=self._construct_url('identity_providers/{idp_id}/protocols/'
- '{protocol_id}'),
- get_action='get_protocol',
- put_action='create_protocol',
- patch_action='update_protocol',
- delete_action='delete_protocol',
- rel=build_resource_relation(
- resource_name='identity_provider_protocol'),
- path_vars={
- 'idp_id': IDP_ID_PARAMETER_RELATION,
- 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
- })
- self._add_resource(
- mapper, protocol_controller,
- path=self._construct_url('identity_providers/{idp_id}/protocols'),
- get_action='list_protocols',
- rel=build_resource_relation(
- resource_name='identity_provider_protocols'),
- path_vars={
- 'idp_id': IDP_ID_PARAMETER_RELATION,
- })
-
- # Mapping CRUD operations
-
- self._add_resource(
- mapper, mapping_controller,
- path=self._construct_url('mappings/{mapping_id}'),
- get_action='get_mapping',
- put_action='create_mapping',
- patch_action='update_mapping',
- delete_action='delete_mapping',
- rel=build_resource_relation(resource_name='mapping'),
- path_vars={
- 'mapping_id': build_parameter_relation(
- parameter_name='mapping_id'),
- })
- self._add_resource(
- mapper, mapping_controller,
- path=self._construct_url('mappings'),
- get_action='list_mappings',
- rel=build_resource_relation(resource_name='mappings'))
-
- # Service Providers CRUD operations
-
- self._add_resource(
- mapper, sp_controller,
- path=self._construct_url('service_providers/{sp_id}'),
- get_action='get_service_provider',
- put_action='create_service_provider',
- patch_action='update_service_provider',
- delete_action='delete_service_provider',
- rel=build_resource_relation(resource_name='service_provider'),
- path_vars={
- 'sp_id': SP_ID_PARAMETER_RELATION,
- })
-
- self._add_resource(
- mapper, sp_controller,
- path=self._construct_url('service_providers'),
- get_action='list_service_providers',
- rel=build_resource_relation(resource_name='service_providers'))
-
- self._add_resource(
- mapper, domain_controller,
- path=self._construct_url('domains'),
- new_path='/auth/domains',
- get_action='list_domains_for_groups',
- rel=build_resource_relation(resource_name='domains'))
- self._add_resource(
- mapper, project_controller,
- path=self._construct_url('projects'),
- new_path='/auth/projects',
- get_action='list_projects_for_groups',
- rel=build_resource_relation(resource_name='projects'))
-
- # Auth operations
- self._add_resource(
- mapper, auth_controller,
- path=self._construct_url('identity_providers/{identity_provider}/'
- 'protocols/{protocol}/auth'),
- get_post_action='federated_authentication',
- rel=build_resource_relation(
- resource_name='identity_provider_protocol_auth'),
- path_vars={
- 'identity_provider': IDP_ID_PARAMETER_RELATION,
- 'protocol': PROTOCOL_ID_PARAMETER_RELATION,
- })
- self._add_resource(
- mapper, auth_controller,
- path='/auth' + self._construct_url('saml2'),
- post_action='create_saml_assertion',
- rel=build_resource_relation(resource_name='saml2'))
- self._add_resource(
- mapper, auth_controller,
- path='/auth' + self._construct_url('saml2/ecp'),
- post_action='create_ecp_assertion',
- rel=build_resource_relation(resource_name='ecp'))
- self._add_resource(
- mapper, auth_controller,
- path='/auth' + self._construct_url('websso/{protocol_id}'),
- get_post_action='federated_sso_auth',
- rel=build_resource_relation(resource_name='websso'),
- path_vars={
- 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
- })
- self._add_resource(
- mapper, auth_controller,
- path='/auth' + self._construct_url(
- 'identity_providers/{idp_id}/protocols/{protocol_id}/websso'),
- get_post_action='federated_idp_specific_sso_auth',
- rel=build_resource_relation(resource_name='identity_providers'),
- path_vars={
- 'idp_id': IDP_ID_PARAMETER_RELATION,
- 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
- })
-
- # Keystone-Identity-Provider metadata endpoint
- self._add_resource(
- mapper, saml_metadata_controller,
- path=self._construct_url('saml2/metadata'),
- get_action='get_metadata',
- rel=build_resource_relation(resource_name='metadata'))
+ def __init__(self, *args, **kwargs):
+ super(FederationExtension, self).__init__(*args, **kwargs)
+ msg = _("Remove federation_extension from the paste pipeline, the "
+ "federation extension is now always available. Update the "
+ "[pipeline:api_v3] section in keystone-paste.ini accordingly, "
+ "as it will be removed in the O release.")
+ versionutils.report_deprecated_feature(LOG, msg)
diff --git a/keystone-moon/keystone/contrib/moon/backends/sql.py b/keystone-moon/keystone/contrib/moon/backends/sql.py
index 2b7258ea..1ddb474e 100644
--- a/keystone-moon/keystone/contrib/moon/backends/sql.py
+++ b/keystone-moon/keystone/contrib/moon/backends/sql.py
@@ -324,13 +324,13 @@ class TenantConnector(TenantDriver):
base[key] = update[key]
def get_tenants_dict(self):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(Tenant)
tenants = query.all()
return {tenant.id: tenant.tenant for tenant in tenants}
def add_tenant_dict(self, tenant_id, tenant_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
new_ref = Tenant.from_dict(
{
"id": tenant_id,
@@ -341,14 +341,14 @@ class TenantConnector(TenantDriver):
return {new_ref.id: new_ref.tenant}
def del_tenant(self, tenant_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Tenant)
query = query.filter_by(id=tenant_id)
tenant = query.first()
session.delete(tenant)
def set_tenant_dict(self, tenant_id, tenant_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Tenant)
query = query.filter_by(id=tenant_id)
ref = query.first()
@@ -363,13 +363,13 @@ class IntraExtensionConnector(IntraExtensionDriver):
# IntraExtension functions
def get_intra_extensions_dict(self):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(IntraExtension)
ref_list = query.all()
return {_ref.id: _ref.intra_extension for _ref in ref_list}
def del_intra_extension(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
ref = session.query(IntraExtension).get(intra_extension_id)
# Must delete all references to that IntraExtension
for _object in __all_objects__:
@@ -378,11 +378,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
_refs = query.all()
for _ref in _refs:
session.delete(_ref)
- session.flush()
+ # session.flush()
session.delete(ref)
def set_intra_extension_dict(self, intra_extension_id, intra_extension_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(IntraExtension)
query = query.filter_by(id=intra_extension_id)
ref = query.first()
@@ -399,19 +399,20 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in IntraExtension.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_intra_extension, attr))
+ # session.flush()
return IntraExtension.to_dict(ref)
# Getter and Setter for subject_category
def get_subject_categories_dict(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(SubjectCategory)
query = query.filter_by(intra_extension_id=intra_extension_id)
ref_list = query.all()
return {_ref.id: _ref.subject_category for _ref in ref_list}
def set_subject_category_dict(self, intra_extension_id, subject_category_id, subject_category_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(SubjectCategory)
query = query.filter_by(intra_extension_id=intra_extension_id, id=subject_category_id)
ref = query.first()
@@ -429,11 +430,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in SubjectCategory.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # # session.flush()
return {subject_category_id: SubjectCategory.to_dict(ref)['subject_category']}
def del_subject_category(self, intra_extension_id, subject_category_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(SubjectCategory)
query = query.filter_by(intra_extension_id=intra_extension_id, id=subject_category_id)
ref = query.first()
@@ -443,14 +444,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for object_category
def get_object_categories_dict(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(ObjectCategory)
query = query.filter_by(intra_extension_id=intra_extension_id)
ref_list = query.all()
return {_ref.id: _ref.object_category for _ref in ref_list}
def set_object_category_dict(self, intra_extension_id, object_category_id, object_category_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ObjectCategory)
query = query.filter_by(intra_extension_id=intra_extension_id, id=object_category_id)
ref = query.first()
@@ -468,11 +469,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in ObjectCategory.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {object_category_id: ObjectCategory.to_dict(ref)['object_category']}
def del_object_category(self, intra_extension_id, object_category_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ObjectCategory)
query = query.filter_by(intra_extension_id=intra_extension_id, id=object_category_id)
ref = query.first()
@@ -482,14 +483,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for action_category
def get_action_categories_dict(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(ActionCategory)
query = query.filter_by(intra_extension_id=intra_extension_id)
ref_list = query.all()
return {_ref.id: _ref.action_category for _ref in ref_list}
def set_action_category_dict(self, intra_extension_id, action_category_id, action_category_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ActionCategory)
query = query.filter_by(intra_extension_id=intra_extension_id, id=action_category_id)
ref = query.first()
@@ -507,11 +508,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in ActionCategory.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {action_category_id: ActionCategory.to_dict(ref)['action_category']}
def del_action_category(self, intra_extension_id, action_category_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ActionCategory)
query = query.filter_by(intra_extension_id=intra_extension_id, id=action_category_id)
ref = query.first()
@@ -521,14 +522,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Perimeter
def get_subjects_dict(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(Subject)
query = query.filter_by(intra_extension_id=intra_extension_id)
ref_list = query.all()
return {_ref.id: _ref.subject for _ref in ref_list}
def set_subject_dict(self, intra_extension_id, subject_id, subject_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Subject)
query = query.filter_by(intra_extension_id=intra_extension_id, id=subject_id)
ref = query.first()
@@ -548,25 +549,25 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in Subject.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {subject_id: Subject.to_dict(ref)['subject']}
def del_subject(self, intra_extension_id, subject_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Subject)
query = query.filter_by(intra_extension_id=intra_extension_id, id=subject_id)
ref = query.first()
session.delete(ref)
def get_objects_dict(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(Object)
query = query.filter_by(intra_extension_id=intra_extension_id)
ref_list = query.all()
return {_ref.id: _ref.object for _ref in ref_list}
def set_object_dict(self, intra_extension_id, object_id, object_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Object)
query = query.filter_by(intra_extension_id=intra_extension_id, id=object_id)
ref = query.first()
@@ -584,25 +585,25 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in Object.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {object_id: Object.to_dict(ref)['object']}
def del_object(self, intra_extension_id, object_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Object)
query = query.filter_by(intra_extension_id=intra_extension_id, id=object_id)
ref = query.first()
session.delete(ref)
def get_actions_dict(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(Action)
query = query.filter_by(intra_extension_id=intra_extension_id)
ref_list = query.all()
return {_ref.id: _ref.action for _ref in ref_list}
def set_action_dict(self, intra_extension_id, action_id, action_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Action)
query = query.filter_by(intra_extension_id=intra_extension_id, id=action_id)
ref = query.first()
@@ -620,11 +621,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in Action.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {action_id: Action.to_dict(ref)['action']}
def del_action(self, intra_extension_id, action_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Action)
query = query.filter_by(intra_extension_id=intra_extension_id, id=action_id)
ref = query.first()
@@ -633,14 +634,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for subject_scope
def get_subject_scopes_dict(self, intra_extension_id, subject_category_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(SubjectScope)
query = query.filter_by(intra_extension_id=intra_extension_id, subject_category_id=subject_category_id)
ref_list = query.all()
return {_ref.id: _ref.subject_scope for _ref in ref_list}
def set_subject_scope_dict(self, intra_extension_id, subject_category_id, subject_scope_id, subject_scope_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(SubjectScope)
query = query.filter_by(intra_extension_id=intra_extension_id, subject_category_id=subject_category_id, id=subject_scope_id)
ref = query.first()
@@ -659,11 +660,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in Subject.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {subject_scope_id: SubjectScope.to_dict(ref)['subject_scope']}
def del_subject_scope(self, intra_extension_id, subject_category_id, subject_scope_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(SubjectScope)
if not subject_category_id or not subject_scope_id:
query = query.filter_by(intra_extension_id=intra_extension_id)
@@ -677,14 +678,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for object_category_scope
def get_object_scopes_dict(self, intra_extension_id, object_category_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(ObjectScope)
query = query.filter_by(intra_extension_id=intra_extension_id, object_category_id=object_category_id)
ref_list = query.all()
return {_ref.id: _ref.object_scope for _ref in ref_list}
def set_object_scope_dict(self, intra_extension_id, object_category_id, object_scope_id, object_scope_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ObjectScope)
query = query.filter_by(intra_extension_id=intra_extension_id, object_category_id=object_category_id, id=object_scope_id)
ref = query.first()
@@ -703,11 +704,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in Object.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {object_scope_id: ObjectScope.to_dict(ref)['object_scope']}
def del_object_scope(self, intra_extension_id, object_category_id, object_scope_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ObjectScope)
if not object_category_id or not object_scope_id:
query = query.filter_by(intra_extension_id=intra_extension_id)
@@ -721,14 +722,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for action_scope
def get_action_scopes_dict(self, intra_extension_id, action_category_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(ActionScope)
query = query.filter_by(intra_extension_id=intra_extension_id, action_category_id=action_category_id)
ref_list = query.all()
return {_ref.id: _ref.action_scope for _ref in ref_list}
def set_action_scope_dict(self, intra_extension_id, action_category_id, action_scope_id, action_scope_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ActionScope)
query = query.filter_by(intra_extension_id=intra_extension_id, action_category_id=action_category_id, id=action_scope_id)
ref = query.first()
@@ -747,11 +748,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in Action.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {action_scope_id: ActionScope.to_dict(ref)['action_scope']}
def del_action_scope(self, intra_extension_id, action_category_id, action_scope_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ActionScope)
if not action_category_id or not action_scope_id:
query = query.filter_by(intra_extension_id=intra_extension_id)
@@ -765,7 +766,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for subject_category_assignment
def get_subject_assignment_list(self, intra_extension_id, subject_id, subject_category_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(SubjectAssignment)
if not subject_id or not subject_category_id or not subject_category_id:
query = query.filter_by(intra_extension_id=intra_extension_id)
@@ -779,7 +780,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
return list(ref.subject_assignment)
def set_subject_assignment_list(self, intra_extension_id, subject_id, subject_category_id, subject_assignment_list=[]):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(SubjectAssignment)
query = query.filter_by(intra_extension_id=intra_extension_id, subject_id=subject_id, subject_category_id=subject_category_id)
ref = query.first()
@@ -799,7 +800,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in SubjectAssignment.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return subject_assignment_list
def add_subject_assignment_list(self, intra_extension_id, subject_id, subject_category_id, subject_scope_id):
@@ -810,9 +811,10 @@ class IntraExtensionConnector(IntraExtensionDriver):
def del_subject_assignment(self, intra_extension_id, subject_id, subject_category_id, subject_scope_id):
if not subject_id or not subject_category_id or not subject_category_id:
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
for ref in self.get_subject_assignment_list(intra_extension_id, None, None):
session.delete(ref)
+ session.flush()
return
new_subject_assignment_list = self.get_subject_assignment_list(intra_extension_id, subject_id, subject_category_id)
new_subject_assignment_list.remove(subject_scope_id)
@@ -821,7 +823,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for object_category_assignment
def get_object_assignment_list(self, intra_extension_id, object_id, object_category_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(ObjectAssignment)
if not object_id or not object_category_id or not object_category_id:
query = query.filter_by(intra_extension_id=intra_extension_id)
@@ -835,7 +837,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
return list(ref.object_assignment)
def set_object_assignment_list(self, intra_extension_id, object_id, object_category_id, object_assignment_list=[]):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ObjectAssignment)
query = query.filter_by(intra_extension_id=intra_extension_id, object_id=object_id, object_category_id=object_category_id)
ref = query.first()
@@ -854,7 +856,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in ObjectAssignment.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return self.get_object_assignment_list(intra_extension_id, object_id, object_category_id)
def add_object_assignment_list(self, intra_extension_id, object_id, object_category_id, object_scope_id):
@@ -865,9 +867,10 @@ class IntraExtensionConnector(IntraExtensionDriver):
def del_object_assignment(self, intra_extension_id, object_id, object_category_id, object_scope_id):
if not object_id or not object_category_id or not object_category_id:
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
for ref in self.get_object_assignment_list(intra_extension_id, None, None):
session.delete(ref)
+ session.flush()
return
new_object_assignment_list = self.get_object_assignment_list(intra_extension_id, object_id, object_category_id)
new_object_assignment_list.remove(object_scope_id)
@@ -876,7 +879,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for action_category_assignment
def get_action_assignment_list(self, intra_extension_id, action_id, action_category_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(ActionAssignment)
if not action_id or not action_category_id or not action_category_id:
query = query.filter_by(intra_extension_id=intra_extension_id)
@@ -890,7 +893,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
return list(ref.action_assignment)
def set_action_assignment_list(self, intra_extension_id, action_id, action_category_id, action_assignment_list=[]):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ActionAssignment)
query = query.filter_by(intra_extension_id=intra_extension_id, action_id=action_id, action_category_id=action_category_id)
ref = query.first()
@@ -909,7 +912,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in ActionAssignment.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return self.get_action_assignment_list(intra_extension_id, action_id, action_category_id)
def add_action_assignment_list(self, intra_extension_id, action_id, action_category_id, action_scope_id):
@@ -920,9 +923,10 @@ class IntraExtensionConnector(IntraExtensionDriver):
def del_action_assignment(self, intra_extension_id, action_id, action_category_id, action_scope_id):
if not action_id or not action_category_id or not action_category_id:
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
for ref in self.get_action_assignment_list(intra_extension_id, None, None):
session.delete(ref)
+ session.flush()
return
new_action_assignment_list = self.get_action_assignment_list(intra_extension_id, action_id, action_category_id)
new_action_assignment_list.remove(action_scope_id)
@@ -931,7 +935,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for sub_meta_rule
def get_aggregation_algorithm_id(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(IntraExtension)
query = query.filter_by(id=intra_extension_id)
ref = query.first()
@@ -941,18 +945,18 @@ class IntraExtensionConnector(IntraExtensionDriver):
return ""
def set_aggregation_algorithm_id(self, intra_extension_id, aggregation_algorithm_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(IntraExtension)
query = query.filter_by(id=intra_extension_id)
ref = query.first()
intra_extension_dict = dict(ref.intra_extension)
intra_extension_dict["aggregation_algorithm"] = aggregation_algorithm_id
setattr(ref, "intra_extension", intra_extension_dict)
- session.flush()
+ # session.flush()
return {"aggregation_algorithm": ref.intra_extension["aggregation_algorithm"]}
def del_aggregation_algorithm(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(IntraExtension)
query = query.filter_by(id=intra_extension_id)
ref = query.first()
@@ -964,14 +968,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for sub_meta_rule
def get_sub_meta_rules_dict(self, intra_extension_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(SubMetaRule)
query = query.filter_by(intra_extension_id=intra_extension_id)
ref_list = query.all()
return {_ref.id: _ref.sub_meta_rule for _ref in ref_list}
def set_sub_meta_rule_dict(self, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(SubMetaRule)
query = query.filter_by(intra_extension_id=intra_extension_id, id=sub_meta_rule_id)
ref = query.first()
@@ -991,11 +995,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in SubMetaRule.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return self.get_sub_meta_rules_dict(intra_extension_id)
def del_sub_meta_rule(self, intra_extension_id, sub_meta_rule_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(SubMetaRule)
query = query.filter_by(intra_extension_id=intra_extension_id, id=sub_meta_rule_id)
ref = query.first()
@@ -1004,14 +1008,14 @@ class IntraExtensionConnector(IntraExtensionDriver):
# Getter and Setter for rules
def get_rules_dict(self, intra_extension_id, sub_meta_rule_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(Rule)
query = query.filter_by(intra_extension_id=intra_extension_id, sub_meta_rule_id=sub_meta_rule_id)
ref_list = query.all()
return {_ref.id: _ref.rule for _ref in ref_list}
def set_rule_dict(self, intra_extension_id, sub_meta_rule_id, rule_id, rule_list):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Rule)
query = query.filter_by(intra_extension_id=intra_extension_id, sub_meta_rule_id=sub_meta_rule_id, id=rule_id)
ref = query.first()
@@ -1030,11 +1034,11 @@ class IntraExtensionConnector(IntraExtensionDriver):
for attr in Rule.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_ref, attr))
- session.flush()
+ # session.flush()
return {rule_id: ref.rule}
def del_rule(self, intra_extension_id, sub_meta_rule_id, rule_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(Rule)
query = query.filter_by(intra_extension_id=intra_extension_id, sub_meta_rule_id=sub_meta_rule_id, id=rule_id)
ref = query.first()
@@ -1074,19 +1078,19 @@ class IntraExtensionConnector(IntraExtensionDriver):
# class InterExtensionConnector(InterExtensionDriver):
#
# def get_inter_extensions(self):
-# with sql.transaction() as session:
+# with sql.session_for_read() as session:
# query = session.query(InterExtension.id)
# interextensions = query.all()
# return [interextension.id for interextension in interextensions]
#
# def create_inter_extensions(self, inter_id, inter_extension):
-# with sql.transaction() as session:
+# with sql.session_for_read() as session:
# ie_ref = InterExtension.from_dict(inter_extension)
# session.add(ie_ref)
# return InterExtension.to_dict(ie_ref)
#
# def get_inter_extension(self, uuid):
-# with sql.transaction() as session:
+# with sql.session_for_read() as session:
# query = session.query(InterExtension)
# query = query.filter_by(id=uuid)
# ref = query.first()
@@ -1095,7 +1099,7 @@ class IntraExtensionConnector(IntraExtensionDriver):
# return ref.to_dict()
#
# def delete_inter_extensions(self, inter_extension_id):
-# with sql.transaction() as session:
+# with sql.session_for_read() as session:
# ref = session.query(InterExtension).get(inter_extension_id)
# session.delete(ref)
diff --git a/keystone-moon/keystone/contrib/moon/core.py b/keystone-moon/keystone/contrib/moon/core.py
index 53b81574..83657317 100644
--- a/keystone-moon/keystone/contrib/moon/core.py
+++ b/keystone-moon/keystone/contrib/moon/core.py
@@ -325,7 +325,7 @@ class ConfigurationManager(manager.Manager):
@enforce("read", "sub_meta_rule_algorithms")
def get_sub_meta_rule_algorithm_id_from_name(self, sub_meta_rule_algorithm_name):
- sub_meta_rule_algorithms_dict = self.driver.get_sub_meta_rule_algorithms_dict()
+ sub_meta_rule_algorithms_dict = self.configuration_api.get_sub_meta_rule_algorithms_dict()
for sub_meta_rule_algorithm_id in sub_meta_rule_algorithms_dict:
if sub_meta_rule_algorithms_dict[sub_meta_rule_algorithm_id]['name'] == sub_meta_rule_algorithm_name:
return sub_meta_rule_algorithm_id
@@ -1218,6 +1218,7 @@ class IntraExtensionManager(manager.Manager):
ie_dict["genre"] = "admin"
ie_dict["description"] = "policy_root"
ref = self.driver.set_intra_extension_dict(ie_dict['id'], ie_dict)
+ logging.debug("Creation of root IE: {}".format(ref))
self.moonlog_api.debug("Creation of root IE: {}".format(ref))
# read the template given by "model" and populate default variables
@@ -2025,6 +2026,8 @@ class IntraExtensionManager(manager.Manager):
@enforce(("read", "write"), "sub_meta_rules")
@enforce("write", "rules")
def add_sub_meta_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_dict):
+ LOG.info("add_sub_meta_rule_dict = {}".format(self.driver.get_sub_meta_rules_dict(intra_extension_id)))
+ LOG.info("add_sub_meta_rule_dict = {}".format(sub_meta_rule_dict))
sub_meta_rules_dict = self.driver.get_sub_meta_rules_dict(intra_extension_id)
for _sub_meta_rule_id in sub_meta_rules_dict:
if sub_meta_rule_dict['name'] == sub_meta_rules_dict[_sub_meta_rule_id]["name"]:
@@ -2065,6 +2068,8 @@ class IntraExtensionManager(manager.Manager):
@enforce(("read", "write"), "sub_meta_rules")
@enforce("write", "rules")
def set_sub_meta_rule_dict(self, user_id, intra_extension_id, sub_meta_rule_id, sub_meta_rule_dict):
+ LOG.info("set_sub_meta_rule_dict = {}".format(self.driver.get_sub_meta_rules_dict(intra_extension_id)))
+ LOG.info("set_sub_meta_rule_dict = {} {}".format(sub_meta_rule_id, sub_meta_rule_dict))
if sub_meta_rule_id not in self.driver.get_sub_meta_rules_dict(intra_extension_id):
raise SubMetaRuleUnknown()
for attribute in sub_meta_rule_dict.keys():
diff --git a/keystone-moon/keystone/contrib/oauth1/__init__.py b/keystone-moon/keystone/contrib/oauth1/__init__.py
index 8cab2498..e69de29b 100644
--- a/keystone-moon/keystone/contrib/oauth1/__init__.py
+++ b/keystone-moon/keystone/contrib/oauth1/__init__.py
@@ -1,15 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.contrib.oauth1.core import * # noqa
diff --git a/keystone-moon/keystone/contrib/oauth1/backends/sql.py b/keystone-moon/keystone/contrib/oauth1/backends/sql.py
index a7876756..31b6ce3b 100644
--- a/keystone-moon/keystone/contrib/oauth1/backends/sql.py
+++ b/keystone-moon/keystone/contrib/oauth1/backends/sql.py
@@ -12,261 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime
-import random as _random
-import uuid
+from oslo_log import versionutils
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
+from keystone.oauth1.backends import sql
-from keystone.common import sql
-from keystone.common import utils
-from keystone.contrib.oauth1 import core
-from keystone import exception
-from keystone.i18n import _
+_OLD = "keystone.contrib.oauth1.backends.sql.OAuth1"
+_NEW = "sql"
-random = _random.SystemRandom()
+class OAuth1(sql.OAuth1):
-class Consumer(sql.ModelBase, sql.DictBase):
- __tablename__ = 'consumer'
- attributes = ['id', 'description', 'secret']
- id = sql.Column(sql.String(64), primary_key=True, nullable=False)
- description = sql.Column(sql.String(64), nullable=True)
- secret = sql.Column(sql.String(64), nullable=False)
- extra = sql.Column(sql.JsonBlob(), nullable=False)
-
-
-class RequestToken(sql.ModelBase, sql.DictBase):
- __tablename__ = 'request_token'
- attributes = ['id', 'request_secret',
- 'verifier', 'authorizing_user_id', 'requested_project_id',
- 'role_ids', 'consumer_id', 'expires_at']
- id = sql.Column(sql.String(64), primary_key=True, nullable=False)
- request_secret = sql.Column(sql.String(64), nullable=False)
- verifier = sql.Column(sql.String(64), nullable=True)
- authorizing_user_id = sql.Column(sql.String(64), nullable=True)
- requested_project_id = sql.Column(sql.String(64), nullable=False)
- role_ids = sql.Column(sql.Text(), nullable=True)
- consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'),
- nullable=False, index=True)
- expires_at = sql.Column(sql.String(64), nullable=True)
-
- @classmethod
- def from_dict(cls, user_dict):
- return cls(**user_dict)
-
- def to_dict(self):
- return dict(self.items())
-
-
-class AccessToken(sql.ModelBase, sql.DictBase):
- __tablename__ = 'access_token'
- attributes = ['id', 'access_secret', 'authorizing_user_id',
- 'project_id', 'role_ids', 'consumer_id',
- 'expires_at']
- id = sql.Column(sql.String(64), primary_key=True, nullable=False)
- access_secret = sql.Column(sql.String(64), nullable=False)
- authorizing_user_id = sql.Column(sql.String(64), nullable=False,
- index=True)
- project_id = sql.Column(sql.String(64), nullable=False)
- role_ids = sql.Column(sql.Text(), nullable=False)
- consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'),
- nullable=False)
- expires_at = sql.Column(sql.String(64), nullable=True)
-
- @classmethod
- def from_dict(cls, user_dict):
- return cls(**user_dict)
-
- def to_dict(self):
- return dict(self.items())
-
-
-class OAuth1(object):
- def _get_consumer(self, session, consumer_id):
- consumer_ref = session.query(Consumer).get(consumer_id)
- if consumer_ref is None:
- raise exception.NotFound(_('Consumer not found'))
- return consumer_ref
-
- def get_consumer_with_secret(self, consumer_id):
- session = sql.get_session()
- consumer_ref = self._get_consumer(session, consumer_id)
- return consumer_ref.to_dict()
-
- def get_consumer(self, consumer_id):
- return core.filter_consumer(
- self.get_consumer_with_secret(consumer_id))
-
- def create_consumer(self, consumer):
- consumer['secret'] = uuid.uuid4().hex
- if not consumer.get('description'):
- consumer['description'] = None
- session = sql.get_session()
- with session.begin():
- consumer_ref = Consumer.from_dict(consumer)
- session.add(consumer_ref)
- return consumer_ref.to_dict()
-
- def _delete_consumer(self, session, consumer_id):
- consumer_ref = self._get_consumer(session, consumer_id)
- session.delete(consumer_ref)
-
- def _delete_request_tokens(self, session, consumer_id):
- q = session.query(RequestToken)
- req_tokens = q.filter_by(consumer_id=consumer_id)
- req_tokens_list = set([x.id for x in req_tokens])
- for token_id in req_tokens_list:
- token_ref = self._get_request_token(session, token_id)
- session.delete(token_ref)
-
- def _delete_access_tokens(self, session, consumer_id):
- q = session.query(AccessToken)
- acc_tokens = q.filter_by(consumer_id=consumer_id)
- acc_tokens_list = set([x.id for x in acc_tokens])
- for token_id in acc_tokens_list:
- token_ref = self._get_access_token(session, token_id)
- session.delete(token_ref)
-
- def delete_consumer(self, consumer_id):
- session = sql.get_session()
- with session.begin():
- self._delete_request_tokens(session, consumer_id)
- self._delete_access_tokens(session, consumer_id)
- self._delete_consumer(session, consumer_id)
-
- def list_consumers(self):
- session = sql.get_session()
- cons = session.query(Consumer)
- return [core.filter_consumer(x.to_dict()) for x in cons]
-
- def update_consumer(self, consumer_id, consumer):
- session = sql.get_session()
- with session.begin():
- consumer_ref = self._get_consumer(session, consumer_id)
- old_consumer_dict = consumer_ref.to_dict()
- old_consumer_dict.update(consumer)
- new_consumer = Consumer.from_dict(old_consumer_dict)
- consumer_ref.description = new_consumer.description
- consumer_ref.extra = new_consumer.extra
- return core.filter_consumer(consumer_ref.to_dict())
-
- def create_request_token(self, consumer_id, project_id, token_duration,
- request_token_id=None, request_token_secret=None):
- if request_token_id is None:
- request_token_id = uuid.uuid4().hex
- if request_token_secret is None:
- request_token_secret = uuid.uuid4().hex
- expiry_date = None
- if token_duration:
- now = timeutils.utcnow()
- future = now + datetime.timedelta(seconds=token_duration)
- expiry_date = utils.isotime(future, subsecond=True)
-
- ref = {}
- ref['id'] = request_token_id
- ref['request_secret'] = request_token_secret
- ref['verifier'] = None
- ref['authorizing_user_id'] = None
- ref['requested_project_id'] = project_id
- ref['role_ids'] = None
- ref['consumer_id'] = consumer_id
- ref['expires_at'] = expiry_date
- session = sql.get_session()
- with session.begin():
- token_ref = RequestToken.from_dict(ref)
- session.add(token_ref)
- return token_ref.to_dict()
-
- def _get_request_token(self, session, request_token_id):
- token_ref = session.query(RequestToken).get(request_token_id)
- if token_ref is None:
- raise exception.NotFound(_('Request token not found'))
- return token_ref
-
- def get_request_token(self, request_token_id):
- session = sql.get_session()
- token_ref = self._get_request_token(session, request_token_id)
- return token_ref.to_dict()
-
- def authorize_request_token(self, request_token_id, user_id,
- role_ids):
- session = sql.get_session()
- with session.begin():
- token_ref = self._get_request_token(session, request_token_id)
- token_dict = token_ref.to_dict()
- token_dict['authorizing_user_id'] = user_id
- token_dict['verifier'] = ''.join(random.sample(core.VERIFIER_CHARS,
- 8))
- token_dict['role_ids'] = jsonutils.dumps(role_ids)
-
- new_token = RequestToken.from_dict(token_dict)
- for attr in RequestToken.attributes:
- if (attr == 'authorizing_user_id' or attr == 'verifier'
- or attr == 'role_ids'):
- setattr(token_ref, attr, getattr(new_token, attr))
-
- return token_ref.to_dict()
-
- def create_access_token(self, request_token_id, token_duration,
- access_token_id=None, access_token_secret=None):
- if access_token_id is None:
- access_token_id = uuid.uuid4().hex
- if access_token_secret is None:
- access_token_secret = uuid.uuid4().hex
- session = sql.get_session()
- with session.begin():
- req_token_ref = self._get_request_token(session, request_token_id)
- token_dict = req_token_ref.to_dict()
-
- expiry_date = None
- if token_duration:
- now = timeutils.utcnow()
- future = now + datetime.timedelta(seconds=token_duration)
- expiry_date = utils.isotime(future, subsecond=True)
-
- # add Access Token
- ref = {}
- ref['id'] = access_token_id
- ref['access_secret'] = access_token_secret
- ref['authorizing_user_id'] = token_dict['authorizing_user_id']
- ref['project_id'] = token_dict['requested_project_id']
- ref['role_ids'] = token_dict['role_ids']
- ref['consumer_id'] = token_dict['consumer_id']
- ref['expires_at'] = expiry_date
- token_ref = AccessToken.from_dict(ref)
- session.add(token_ref)
-
- # remove request token, it's been used
- session.delete(req_token_ref)
-
- return token_ref.to_dict()
-
- def _get_access_token(self, session, access_token_id):
- token_ref = session.query(AccessToken).get(access_token_id)
- if token_ref is None:
- raise exception.NotFound(_('Access token not found'))
- return token_ref
-
- def get_access_token(self, access_token_id):
- session = sql.get_session()
- token_ref = self._get_access_token(session, access_token_id)
- return token_ref.to_dict()
-
- def list_access_tokens(self, user_id):
- session = sql.get_session()
- q = session.query(AccessToken)
- user_auths = q.filter_by(authorizing_user_id=user_id)
- return [core.filter_token(x.to_dict()) for x in user_auths]
-
- def delete_access_token(self, user_id, access_token_id):
- session = sql.get_session()
- with session.begin():
- token_ref = self._get_access_token(session, access_token_id)
- token_dict = token_ref.to_dict()
- if token_dict['authorizing_user_id'] != user_id:
- raise exception.Unauthorized(_('User IDs do not match'))
-
- session.delete(token_ref)
+ @versionutils.deprecated(versionutils.deprecated.MITAKA,
+ in_favor_of=_NEW,
+ what=_OLD)
+ def __init__(self, *args, **kwargs):
+ super(OAuth1, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
index e0305351..fe0212d7 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
@@ -12,46 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- consumer_table = sql.Table(
- 'consumer',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('description', sql.String(64), nullable=False),
- sql.Column('secret', sql.String(64), nullable=False),
- sql.Column('extra', sql.Text(), nullable=False))
- consumer_table.create(migrate_engine, checkfirst=True)
-
- request_token_table = sql.Table(
- 'request_token',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('request_secret', sql.String(64), nullable=False),
- sql.Column('verifier', sql.String(64), nullable=True),
- sql.Column('authorizing_user_id', sql.String(64), nullable=True),
- sql.Column('requested_project_id', sql.String(64), nullable=False),
- sql.Column('requested_roles', sql.Text(), nullable=False),
- sql.Column('consumer_id', sql.String(64), nullable=False, index=True),
- sql.Column('expires_at', sql.String(64), nullable=True))
- request_token_table.create(migrate_engine, checkfirst=True)
-
- access_token_table = sql.Table(
- 'access_token',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('access_secret', sql.String(64), nullable=False),
- sql.Column('authorizing_user_id', sql.String(64),
- nullable=False, index=True),
- sql.Column('project_id', sql.String(64), nullable=False),
- sql.Column('requested_roles', sql.Text(), nullable=False),
- sql.Column('consumer_id', sql.String(64), nullable=False),
- sql.Column('expires_at', sql.String(64), nullable=True))
- access_token_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='oauth1')
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
index 174120e8..fe0212d7 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
@@ -12,26 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
-
-from keystone.common.sql import migration_helpers
+from keystone import exception
def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- consumer_table = sql.Table('consumer', meta, autoload=True)
- request_token_table = sql.Table('request_token', meta, autoload=True)
- access_token_table = sql.Table('access_token', meta, autoload=True)
-
- constraints = [{'table': request_token_table,
- 'fk_column': 'consumer_id',
- 'ref_column': consumer_table.c.id},
- {'table': access_token_table,
- 'fk_column': 'consumer_id',
- 'ref_column': consumer_table.c.id}]
- if meta.bind != 'sqlite':
- migration_helpers.add_constraints(constraints)
+ raise exception.MigrationMovedFailure(extension='oauth1')
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
index cf6ffb7c..fe0212d7 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
@@ -12,11 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- user_table = sql.Table('consumer', meta, autoload=True)
- user_table.c.description.alter(nullable=True)
+ raise exception.MigrationMovedFailure(extension='oauth1')
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
index 6934eb6f..fe0212d7 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
@@ -12,14 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- request_token_table = sql.Table('request_token', meta, autoload=True)
- request_token_table.c.requested_roles.alter(nullable=True)
- request_token_table.c.requested_roles.alter(name="role_ids")
- access_token_table = sql.Table('access_token', meta, autoload=True)
- access_token_table.c.requested_roles.alter(name="role_ids")
+ raise exception.MigrationMovedFailure(extension='oauth1')
diff --git a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py
index 0627d21c..a4681e16 100644
--- a/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py
+++ b/keystone-moon/keystone/contrib/oauth1/migrate_repo/versions/005_consumer_id_index.py
@@ -13,23 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sa
+from keystone import exception
def upgrade(migrate_engine):
-
- if migrate_engine.name == 'mysql':
- meta = sa.MetaData(bind=migrate_engine)
- table = sa.Table('access_token', meta, autoload=True)
-
- # NOTE(i159): MySQL requires indexes on referencing columns, and those
- # indexes create automatically. That those indexes will have different
- # names, depending on version of MySQL used. We shoud make this naming
- # consistent, by reverting index name to a consistent condition.
- if any(i for i in table.indexes if
- list(i.columns.keys()) == ['consumer_id']
- and i.name != 'consumer_id'):
- # NOTE(i159): by this action will be made re-creation of an index
- # with the new name. This can be considered as renaming under the
- # MySQL rules.
- sa.Index('consumer_id', table.c.consumer_id).create()
+ raise exception.MigrationMovedFailure(extension='oauth1')
diff --git a/keystone-moon/keystone/contrib/oauth1/routers.py b/keystone-moon/keystone/contrib/oauth1/routers.py
index 4b772eb5..42a26c10 100644
--- a/keystone-moon/keystone/contrib/oauth1/routers.py
+++ b/keystone-moon/keystone/contrib/oauth1/routers.py
@@ -12,143 +12,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-import functools
+from oslo_log import log
+from oslo_log import versionutils
-from keystone.common import json_home
from keystone.common import wsgi
-from keystone.contrib.oauth1 import controllers
+from keystone.i18n import _
-build_resource_relation = functools.partial(
- json_home.build_v3_extension_resource_relation,
- extension_name='OS-OAUTH1', extension_version='1.0')
+LOG = log.getLogger(__name__)
-build_parameter_relation = functools.partial(
- json_home.build_v3_extension_parameter_relation,
- extension_name='OS-OAUTH1', extension_version='1.0')
-ACCESS_TOKEN_ID_PARAMETER_RELATION = build_parameter_relation(
- parameter_name='access_token_id')
+class OAuth1Extension(wsgi.Middleware):
-
-class OAuth1Extension(wsgi.V3ExtensionRouter):
- """API Endpoints for the OAuth1 extension.
-
- The goal of this extension is to allow third-party service providers
- to acquire tokens with a limited subset of a user's roles for acting
- on behalf of that user. This is done using an oauth-similar flow and
- api.
-
- The API looks like::
-
- # Basic admin-only consumer crud
- POST /OS-OAUTH1/consumers
- GET /OS-OAUTH1/consumers
- PATCH /OS-OAUTH1/consumers/{consumer_id}
- GET /OS-OAUTH1/consumers/{consumer_id}
- DELETE /OS-OAUTH1/consumers/{consumer_id}
-
- # User access token crud
- GET /users/{user_id}/OS-OAUTH1/access_tokens
- GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
- GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles
- GET /users/{user_id}/OS-OAUTH1/access_tokens
- /{access_token_id}/roles/{role_id}
- DELETE /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
-
- # OAuth interfaces
- POST /OS-OAUTH1/request_token # create a request token
- PUT /OS-OAUTH1/authorize # authorize a request token
- POST /OS-OAUTH1/access_token # create an access token
-
- """
-
- def add_routes(self, mapper):
- consumer_controller = controllers.ConsumerCrudV3()
- access_token_controller = controllers.AccessTokenCrudV3()
- access_token_roles_controller = controllers.AccessTokenRolesV3()
- oauth_controller = controllers.OAuthControllerV3()
-
- # basic admin-only consumer crud
- self._add_resource(
- mapper, consumer_controller,
- path='/OS-OAUTH1/consumers',
- get_action='list_consumers',
- post_action='create_consumer',
- rel=build_resource_relation(resource_name='consumers'))
- self._add_resource(
- mapper, consumer_controller,
- path='/OS-OAUTH1/consumers/{consumer_id}',
- get_action='get_consumer',
- patch_action='update_consumer',
- delete_action='delete_consumer',
- rel=build_resource_relation(resource_name='consumer'),
- path_vars={
- 'consumer_id':
- build_parameter_relation(parameter_name='consumer_id'),
- })
-
- # user access token crud
- self._add_resource(
- mapper, access_token_controller,
- path='/users/{user_id}/OS-OAUTH1/access_tokens',
- get_action='list_access_tokens',
- rel=build_resource_relation(resource_name='user_access_tokens'),
- path_vars={
- 'user_id': json_home.Parameters.USER_ID,
- })
- self._add_resource(
- mapper, access_token_controller,
- path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}',
- get_action='get_access_token',
- delete_action='delete_access_token',
- rel=build_resource_relation(resource_name='user_access_token'),
- path_vars={
- 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
- 'user_id': json_home.Parameters.USER_ID,
- })
- self._add_resource(
- mapper, access_token_roles_controller,
- path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/'
- 'roles',
- get_action='list_access_token_roles',
- rel=build_resource_relation(
- resource_name='user_access_token_roles'),
- path_vars={
- 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
- 'user_id': json_home.Parameters.USER_ID,
- })
- self._add_resource(
- mapper, access_token_roles_controller,
- path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/'
- 'roles/{role_id}',
- get_action='get_access_token_role',
- rel=build_resource_relation(
- resource_name='user_access_token_role'),
- path_vars={
- 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
- 'role_id': json_home.Parameters.ROLE_ID,
- 'user_id': json_home.Parameters.USER_ID,
- })
-
- # oauth flow calls
- self._add_resource(
- mapper, oauth_controller,
- path='/OS-OAUTH1/request_token',
- post_action='create_request_token',
- rel=build_resource_relation(resource_name='request_tokens'))
- self._add_resource(
- mapper, oauth_controller,
- path='/OS-OAUTH1/access_token',
- post_action='create_access_token',
- rel=build_resource_relation(resource_name='access_tokens'))
- self._add_resource(
- mapper, oauth_controller,
- path='/OS-OAUTH1/authorize/{request_token_id}',
- path_vars={
- 'request_token_id':
- build_parameter_relation(parameter_name='request_token_id')
- },
- put_action='authorize_request_token',
- rel=build_resource_relation(
- resource_name='authorize_request_token'))
+ def __init__(self, *args, **kwargs):
+ super(OAuth1Extension, self).__init__(*args, **kwargs)
+ msg = _("Remove oauth1_extension from the paste pipeline, the "
+ "oauth1 extension is now always available. Update the "
+ "[pipeline:api_v3] section in keystone-paste.ini accordingly, "
+ "as it will be removed in the O release.")
+ versionutils.report_deprecated_feature(LOG, msg)
diff --git a/keystone-moon/keystone/contrib/revoke/__init__.py b/keystone-moon/keystone/contrib/revoke/__init__.py
index 58ba68db..e69de29b 100644
--- a/keystone-moon/keystone/contrib/revoke/__init__.py
+++ b/keystone-moon/keystone/contrib/revoke/__init__.py
@@ -1,13 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.contrib.revoke.core import * # noqa
diff --git a/keystone-moon/keystone/contrib/revoke/backends/sql.py b/keystone-moon/keystone/contrib/revoke/backends/sql.py
index 82e05194..0bf493ae 100644
--- a/keystone-moon/keystone/contrib/revoke/backends/sql.py
+++ b/keystone-moon/keystone/contrib/revoke/backends/sql.py
@@ -10,95 +10,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import uuid
+from oslo_log import versionutils
-from keystone.common import sql
-from keystone.contrib import revoke
-from keystone.contrib.revoke import model
+from keystone.revoke.backends import sql
-class RevocationEvent(sql.ModelBase, sql.ModelDictMixin):
- __tablename__ = 'revocation_event'
- attributes = model.REVOKE_KEYS
+_OLD = "keystone.contrib.revoke.backends.sql.Revoke"
+_NEW = "sql"
- # The id field is not going to be exposed to the outside world.
- # It is, however, necessary for SQLAlchemy.
- id = sql.Column(sql.String(64), primary_key=True)
- domain_id = sql.Column(sql.String(64))
- project_id = sql.Column(sql.String(64))
- user_id = sql.Column(sql.String(64))
- role_id = sql.Column(sql.String(64))
- trust_id = sql.Column(sql.String(64))
- consumer_id = sql.Column(sql.String(64))
- access_token_id = sql.Column(sql.String(64))
- issued_before = sql.Column(sql.DateTime(), nullable=False)
- expires_at = sql.Column(sql.DateTime())
- revoked_at = sql.Column(sql.DateTime(), nullable=False, index=True)
- audit_id = sql.Column(sql.String(32))
- audit_chain_id = sql.Column(sql.String(32))
+class Revoke(sql.Revoke):
-class Revoke(revoke.RevokeDriverV8):
- def _flush_batch_size(self, dialect):
- batch_size = 0
- if dialect == 'ibm_db_sa':
- # This functionality is limited to DB2, because
- # it is necessary to prevent the transaction log
- # from filling up, whereas at least some of the
- # other supported databases do not support update
- # queries with LIMIT subqueries nor do they appear
- # to require the use of such queries when deleting
- # large numbers of records at once.
- batch_size = 100
- # Limit of 100 is known to not fill a transaction log
- # of default maximum size while not significantly
- # impacting the performance of large token purges on
- # systems where the maximum transaction log size has
- # been increased beyond the default.
- return batch_size
-
- def _prune_expired_events(self):
- oldest = revoke.revoked_before_cutoff_time()
-
- session = sql.get_session()
- dialect = session.bind.dialect.name
- batch_size = self._flush_batch_size(dialect)
- if batch_size > 0:
- query = session.query(RevocationEvent.id)
- query = query.filter(RevocationEvent.revoked_at < oldest)
- query = query.limit(batch_size).subquery()
- delete_query = (session.query(RevocationEvent).
- filter(RevocationEvent.id.in_(query)))
- while True:
- rowcount = delete_query.delete(synchronize_session=False)
- if rowcount == 0:
- break
- else:
- query = session.query(RevocationEvent)
- query = query.filter(RevocationEvent.revoked_at < oldest)
- query.delete(synchronize_session=False)
-
- session.flush()
-
- def list_events(self, last_fetch=None):
- session = sql.get_session()
- query = session.query(RevocationEvent).order_by(
- RevocationEvent.revoked_at)
-
- if last_fetch:
- query = query.filter(RevocationEvent.revoked_at > last_fetch)
-
- events = [model.RevokeEvent(**e.to_dict()) for e in query]
-
- return events
-
- def revoke(self, event):
- kwargs = dict()
- for attr in model.REVOKE_KEYS:
- kwargs[attr] = getattr(event, attr)
- kwargs['id'] = uuid.uuid4().hex
- record = RevocationEvent(**kwargs)
- session = sql.get_session()
- with session.begin():
- session.add(record)
- self._prune_expired_events()
+ @versionutils.deprecated(versionutils.deprecated.MITAKA,
+ in_favor_of=_NEW,
+ what=_OLD)
+ def __init__(self, *args, **kwargs):
+ super(Revoke, self).__init__(*args, **kwargs)
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py
index 8b59010e..81c535e1 100644
--- a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py
+++ b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/001_revoke_table.py
@@ -10,27 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+from keystone import exception
def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- service_table = sql.Table(
- 'revocation_event',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64)),
- sql.Column('project_id', sql.String(64)),
- sql.Column('user_id', sql.String(64)),
- sql.Column('role_id', sql.String(64)),
- sql.Column('trust_id', sql.String(64)),
- sql.Column('consumer_id', sql.String(64)),
- sql.Column('access_token_id', sql.String(64)),
- sql.Column('issued_before', sql.DateTime(), nullable=False),
- sql.Column('expires_at', sql.DateTime()),
- sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False))
- service_table.create(migrate_engine, checkfirst=True)
+ raise exception.MigrationMovedFailure(extension='revoke')
diff --git a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py
index b6d821d7..81c535e1 100644
--- a/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py
+++ b/keystone-moon/keystone/contrib/revoke/migrate_repo/versions/002_add_audit_id_and_chain_to_revoke_table.py
@@ -10,19 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
-
-
-_TABLE_NAME = 'revocation_event'
+from keystone import exception
def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- event_table = sql.Table(_TABLE_NAME, meta, autoload=True)
- audit_id_column = sql.Column('audit_id', sql.String(32), nullable=True)
- audit_chain_column = sql.Column('audit_chain_id', sql.String(32),
- nullable=True)
- event_table.create_column(audit_id_column)
- event_table.create_column(audit_chain_column)
+ raise exception.MigrationMovedFailure(extension='revoke')
diff --git a/keystone-moon/keystone/contrib/revoke/routers.py b/keystone-moon/keystone/contrib/revoke/routers.py
index 4d2edfc0..a44c6194 100644
--- a/keystone-moon/keystone/contrib/revoke/routers.py
+++ b/keystone-moon/keystone/contrib/revoke/routers.py
@@ -10,20 +10,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-from keystone.common import json_home
+from oslo_log import log
+from oslo_log import versionutils
+
from keystone.common import wsgi
-from keystone.contrib.revoke import controllers
+from keystone.i18n import _
+
+LOG = log.getLogger(__name__)
-class RevokeExtension(wsgi.V3ExtensionRouter):
- PATH_PREFIX = '/OS-REVOKE'
+class RevokeExtension(wsgi.Middleware):
- def add_routes(self, mapper):
- revoke_controller = controllers.RevokeController()
- self._add_resource(
- mapper, revoke_controller,
- path=self.PATH_PREFIX + '/events',
- get_action='list_revoke_events',
- rel=json_home.build_v3_extension_resource_relation(
- 'OS-REVOKE', '1.0', 'events'))
+ def __init__(self, *args, **kwargs):
+ super(RevokeExtension, self).__init__(*args, **kwargs)
+ msg = _("Remove revoke_extension from the paste pipeline, the "
+ "revoke extension is now always available. Update the "
+ "[pipeline:api_v3] section in keystone-paste.ini accordingly, "
+ "as it will be removed in the O release.")
+ versionutils.report_deprecated_feature(LOG, msg)
diff --git a/keystone-moon/keystone/contrib/s3/core.py b/keystone-moon/keystone/contrib/s3/core.py
index d3e06acc..c497f5d5 100644
--- a/keystone-moon/keystone/contrib/s3/core.py
+++ b/keystone-moon/keystone/contrib/s3/core.py
@@ -33,6 +33,7 @@ from keystone.common import utils
from keystone.common import wsgi
from keystone.contrib.ec2 import controllers
from keystone import exception
+from keystone.i18n import _
EXTENSION_DATA = {
@@ -45,9 +46,9 @@ EXTENSION_DATA = {
'links': [
{
'rel': 'describedby',
- # TODO(ayoung): needs a description
'type': 'text/html',
- 'href': 'https://github.com/openstack/identity-api',
+ 'href': 'http://developer.openstack.org/'
+ 'api-ref-identity-v2-ext.html',
}
]}
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
@@ -67,16 +68,58 @@ class S3Extension(wsgi.V3ExtensionRouter):
class S3Controller(controllers.Ec2Controller):
def check_signature(self, creds_ref, credentials):
- msg = base64.urlsafe_b64decode(str(credentials['token']))
- key = str(creds_ref['secret']).encode('utf-8')
+ string_to_sign = base64.urlsafe_b64decode(str(credentials['token']))
+ if string_to_sign[0:4] != b'AWS4':
+ signature = self._calculate_signature_v1(string_to_sign,
+ creds_ref['secret'])
+ else:
+ signature = self._calculate_signature_v4(string_to_sign,
+ creds_ref['secret'])
+
+ if not utils.auth_str_equal(credentials['signature'], signature):
+ raise exception.Unauthorized(
+ message=_('Credential signature mismatch'))
+
+ def _calculate_signature_v1(self, string_to_sign, secret_key):
+ """Calculates a v1 signature.
+
+ :param bytes string_to_sign: String that contains request params and
+ is used for calculate signature of request
+ :param text secret_key: Second auth key of EC2 account that is used to
+ sign requests
+ """
+ key = str(secret_key).encode('utf-8')
if six.PY2:
b64_encode = base64.encodestring
else:
b64_encode = base64.encodebytes
+ signed = b64_encode(hmac.new(key, string_to_sign, hashlib.sha1)
+ .digest()).decode('utf-8').strip()
+ return signed
+
+ def _calculate_signature_v4(self, string_to_sign, secret_key):
+ """Calculates a v4 signature.
+
+ :param bytes string_to_sign: String that contains request params and
+ is used for calculate signature of request
+ :param text secret_key: Second auth key of EC2 account that is used to
+ sign requests
+ """
+ parts = string_to_sign.split(b'\n')
+ if len(parts) != 4 or parts[0] != b'AWS4-HMAC-SHA256':
+ raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
+ scope = parts[2].split(b'/')
+ if len(scope) != 4 or scope[2] != b's3' or scope[3] != b'aws4_request':
+ raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
+
+ def _sign(key, msg):
+ return hmac.new(key, msg, hashlib.sha256).digest()
- signed = b64_encode(
- hmac.new(key, msg, hashlib.sha1).digest()).decode('utf-8').strip()
+ signed = _sign(('AWS4' + secret_key).encode('utf-8'), scope[0])
+ signed = _sign(signed, scope[1])
+ signed = _sign(signed, scope[2])
+ signed = _sign(signed, b'aws4_request')
- if not utils.auth_str_equal(credentials['signature'], signed):
- raise exception.Unauthorized('Credential signature mismatch')
+ signature = hmac.new(signed, string_to_sign, hashlib.sha256)
+ return signature.hexdigest()
diff --git a/keystone-moon/keystone/contrib/simple_cert/__init__.py b/keystone-moon/keystone/contrib/simple_cert/__init__.py
index b213192e..2e5f9928 100644
--- a/keystone-moon/keystone/contrib/simple_cert/__init__.py
+++ b/keystone-moon/keystone/contrib/simple_cert/__init__.py
@@ -10,5 +10,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-from keystone.contrib.simple_cert.core import * # noqa
from keystone.contrib.simple_cert.routers import SimpleCertExtension # noqa
diff --git a/keystone-moon/keystone/contrib/simple_cert/routers.py b/keystone-moon/keystone/contrib/simple_cert/routers.py
index 8c36c2a4..b1d509e7 100644
--- a/keystone-moon/keystone/contrib/simple_cert/routers.py
+++ b/keystone-moon/keystone/contrib/simple_cert/routers.py
@@ -10,32 +10,24 @@
# License for the specific language governing permissions and limitations
# under the License.
-import functools
+from oslo_log import log
+from oslo_log import versionutils
-from keystone.common import json_home
from keystone.common import wsgi
-from keystone.contrib.simple_cert import controllers
+from keystone.i18n import _
-build_resource_relation = functools.partial(
- json_home.build_v3_extension_resource_relation,
- extension_name='OS-SIMPLE-CERT', extension_version='1.0')
+LOG = log.getLogger(__name__)
-class SimpleCertExtension(wsgi.V3ExtensionRouter):
+class SimpleCertExtension(wsgi.Middleware):
- PREFIX = 'OS-SIMPLE-CERT'
-
- def add_routes(self, mapper):
- controller = controllers.SimpleCert()
-
- self._add_resource(
- mapper, controller,
- path='/%s/ca' % self.PREFIX,
- get_action='get_ca_certificate',
- rel=build_resource_relation(resource_name='ca_certificate'))
- self._add_resource(
- mapper, controller,
- path='/%s/certificates' % self.PREFIX,
- get_action='list_certificates',
- rel=build_resource_relation(resource_name='certificates'))
+ def __init__(self, application):
+ super(SimpleCertExtension, self).__init__(application)
+ msg = _("Remove simple_cert from the paste pipeline, the "
+ "PKI and PKIz token providers are now deprecated and "
+ "simple_cert was only used insupport of these token "
+ "providers. Update the [pipeline:api_v3] section in "
+ "keystone-paste.ini accordingly, as it will be removed in the "
+ "O release.")
+ versionutils.report_deprecated_feature(LOG, msg)
diff --git a/keystone-moon/keystone/contrib/user_crud/core.py b/keystone-moon/keystone/contrib/user_crud/core.py
index dd16d3a5..b37157ea 100644
--- a/keystone-moon/keystone/contrib/user_crud/core.py
+++ b/keystone-moon/keystone/contrib/user_crud/core.py
@@ -12,123 +12,21 @@
# License for the specific language governing permissions and limitations
# under the License.
-import copy
-import uuid
-
from oslo_log import log
+from oslo_log import versionutils
-from keystone.common import dependency
-from keystone.common import extension
from keystone.common import wsgi
-from keystone import exception
-from keystone import identity
-from keystone.models import token_model
+from keystone.i18n import _
LOG = log.getLogger(__name__)
-extension.register_public_extension(
- 'OS-KSCRUD', {
- 'name': 'OpenStack Keystone User CRUD',
- 'namespace': 'http://docs.openstack.org/identity/api/ext/'
- 'OS-KSCRUD/v1.0',
- 'alias': 'OS-KSCRUD',
- 'updated': '2013-07-07T12:00:0-00:00',
- 'description': 'OpenStack extensions to Keystone v2.0 API '
- 'enabling User Operations.',
- 'links': [
- {
- 'rel': 'describedby',
- # TODO(ayoung): needs a description
- 'type': 'text/html',
- 'href': 'https://github.com/openstack/identity-api',
- }
- ]})
-
-
-@dependency.requires('catalog_api', 'identity_api', 'resource_api',
- 'token_provider_api')
-class UserController(identity.controllers.User):
- def set_user_password(self, context, user_id, user):
- token_id = context.get('token_id')
- original_password = user.get('original_password')
-
- token_data = self.token_provider_api.validate_token(token_id)
- token_ref = token_model.KeystoneToken(token_id=token_id,
- token_data=token_data)
-
- if token_ref.user_id != user_id:
- raise exception.Forbidden('Token belongs to another user')
- if original_password is None:
- raise exception.ValidationError(target='user',
- attribute='original password')
-
- try:
- user_ref = self.identity_api.authenticate(
- context,
- user_id=token_ref.user_id,
- password=original_password)
- if not user_ref.get('enabled', True):
- # NOTE(dolph): why can't you set a disabled user's password?
- raise exception.Unauthorized('User is disabled')
- except AssertionError:
- raise exception.Unauthorized()
-
- update_dict = {'password': user['password'], 'id': user_id}
-
- admin_context = copy.copy(context)
- admin_context['is_admin'] = True
- super(UserController, self).set_user_password(admin_context,
- user_id,
- update_dict)
-
- # Issue a new token based upon the original token data. This will
- # always be a V2.0 token.
-
- # TODO(morganfainberg): Add a mechanism to issue a new token directly
- # from a token model so that this code can go away. This is likely
- # not the norm as most cases do not need to yank apart a token to
- # issue a new one.
- new_token_ref = {}
- metadata_ref = {}
- roles_ref = None
-
- new_token_ref['user'] = user_ref
- if token_ref.bind:
- new_token_ref['bind'] = token_ref.bind
- if token_ref.project_id:
- new_token_ref['tenant'] = self.resource_api.get_project(
- token_ref.project_id)
- if token_ref.role_names:
- roles_ref = [dict(name=value)
- for value in token_ref.role_names]
- if token_ref.role_ids:
- metadata_ref['roles'] = token_ref.role_ids
- if token_ref.trust_id:
- metadata_ref['trust'] = {
- 'id': token_ref.trust_id,
- 'trustee_user_id': token_ref.trustee_user_id}
- new_token_ref['metadata'] = metadata_ref
- new_token_ref['id'] = uuid.uuid4().hex
-
- catalog_ref = self.catalog_api.get_catalog(user_id,
- token_ref.project_id)
-
- new_token_id, new_token_data = self.token_provider_api.issue_v2_token(
- token_ref=new_token_ref, roles_ref=roles_ref,
- catalog_ref=catalog_ref)
- LOG.debug('TOKEN_REF %s', new_token_data)
- return new_token_data
-
-
-class CrudExtension(wsgi.ExtensionRouter):
- """Provides a subset of CRUD operations for internal data types."""
-
- def add_routes(self, mapper):
- user_controller = UserController()
-
- mapper.connect('/OS-KSCRUD/users/{user_id}',
- controller=user_controller,
- action='set_user_password',
- conditions=dict(method=['PATCH']))
+class CrudExtension(wsgi.Middleware):
+ def __init__(self, application):
+ super(CrudExtension, self).__init__(application)
+ msg = _("Remove user_crud_extension from the paste pipeline, the "
+ "user_crud extension is now always available. Update"
+ "the [pipeline:public_api] section in keystone-paste.ini "
+ "accordingly, as it will be removed in the O release.")
+ versionutils.report_deprecated_feature(LOG, msg)
diff --git a/keystone-moon/keystone/credential/__init__.py b/keystone-moon/keystone/credential/__init__.py
index fc7b6317..ea9d906c 100644
--- a/keystone-moon/keystone/credential/__init__.py
+++ b/keystone-moon/keystone/credential/__init__.py
@@ -14,4 +14,3 @@
from keystone.credential import controllers # noqa
from keystone.credential.core import * # noqa
-from keystone.credential import routers # noqa
diff --git a/keystone-moon/keystone/credential/backends/sql.py b/keystone-moon/keystone/credential/backends/sql.py
index 6dc9cd65..dfb9d20a 100644
--- a/keystone-moon/keystone/credential/backends/sql.py
+++ b/keystone-moon/keystone/credential/backends/sql.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from keystone.common import driver_hints
from keystone.common import sql
from keystone import credential
from keystone import exception
@@ -35,25 +36,27 @@ class Credential(credential.CredentialDriverV8):
@sql.handle_conflicts(conflict_type='credential')
def create_credential(self, credential_id, credential):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
ref = CredentialModel.from_dict(credential)
session.add(ref)
- return ref.to_dict()
+ return ref.to_dict()
- @sql.truncated
+ @driver_hints.truncated
def list_credentials(self, hints):
- session = sql.get_session()
- credentials = session.query(CredentialModel)
- credentials = sql.filter_limit_query(CredentialModel,
- credentials, hints)
- return [s.to_dict() for s in credentials]
-
- def list_credentials_for_user(self, user_id):
- session = sql.get_session()
- query = session.query(CredentialModel)
- refs = query.filter_by(user_id=user_id).all()
- return [ref.to_dict() for ref in refs]
+ with sql.session_for_read() as session:
+ credentials = session.query(CredentialModel)
+ credentials = sql.filter_limit_query(CredentialModel,
+ credentials, hints)
+ return [s.to_dict() for s in credentials]
+
+ def list_credentials_for_user(self, user_id, type=None):
+ with sql.session_for_read() as session:
+ query = session.query(CredentialModel)
+ query = query.filter_by(user_id=user_id)
+ if type:
+ query = query.filter_by(type=type)
+ refs = query.all()
+ return [ref.to_dict() for ref in refs]
def _get_credential(self, session, credential_id):
ref = session.query(CredentialModel).get(credential_id)
@@ -62,13 +65,12 @@ class Credential(credential.CredentialDriverV8):
return ref
def get_credential(self, credential_id):
- session = sql.get_session()
- return self._get_credential(session, credential_id).to_dict()
+ with sql.session_for_read() as session:
+ return self._get_credential(session, credential_id).to_dict()
@sql.handle_conflicts(conflict_type='credential')
def update_credential(self, credential_id, credential):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_credential(session, credential_id)
old_dict = ref.to_dict()
for k in credential:
@@ -78,27 +80,21 @@ class Credential(credential.CredentialDriverV8):
if attr != 'id':
setattr(ref, attr, getattr(new_credential, attr))
ref.extra = new_credential.extra
- return ref.to_dict()
+ return ref.to_dict()
def delete_credential(self, credential_id):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_credential(session, credential_id)
session.delete(ref)
def delete_credentials_for_project(self, project_id):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
query = session.query(CredentialModel)
query = query.filter_by(project_id=project_id)
query.delete()
def delete_credentials_for_user(self, user_id):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
query = session.query(CredentialModel)
query = query.filter_by(user_id=user_id)
query.delete()
diff --git a/keystone-moon/keystone/credential/core.py b/keystone-moon/keystone/credential/core.py
index d72856df..1550fc99 100644
--- a/keystone-moon/keystone/credential/core.py
+++ b/keystone-moon/keystone/credential/core.py
@@ -58,7 +58,7 @@ class CredentialDriverV8(object):
def create_credential(self, credential_id, credential):
"""Creates a new credential.
- :raises: keystone.exception.Conflict
+ :raises keystone.exception.Conflict: If a duplicate credential exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -77,10 +77,11 @@ class CredentialDriverV8(object):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def list_credentials_for_user(self, user_id):
+ def list_credentials_for_user(self, user_id, type=None):
"""List credentials for a user.
:param user_id: ID of a user to filter credentials by.
+ :param type: type of credentials to filter on.
:returns: a list of credential_refs or an empty list.
@@ -92,7 +93,8 @@ class CredentialDriverV8(object):
"""Get a credential by ID.
:returns: credential_ref
- :raises: keystone.exception.CredentialNotFound
+ :raises keystone.exception.CredentialNotFound: If credential doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -101,8 +103,9 @@ class CredentialDriverV8(object):
def update_credential(self, credential_id, credential):
"""Updates an existing credential.
- :raises: keystone.exception.CredentialNotFound,
- keystone.exception.Conflict
+ :raises keystone.exception.CredentialNotFound: If credential doesn't
+ exist.
+ :raises keystone.exception.Conflict: If a duplicate credential exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -111,7 +114,8 @@ class CredentialDriverV8(object):
def delete_credential(self, credential_id):
"""Deletes an existing credential.
- :raises: keystone.exception.CredentialNotFound
+ :raises keystone.exception.CredentialNotFound: If credential doesn't
+ exist.
"""
raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/endpoint_policy/__init__.py b/keystone-moon/keystone/endpoint_policy/__init__.py
index c8ae5e68..36c016a1 100644
--- a/keystone-moon/keystone/endpoint_policy/__init__.py
+++ b/keystone-moon/keystone/endpoint_policy/__init__.py
@@ -11,4 +11,3 @@
# under the License.
from keystone.endpoint_policy.core import * # noqa
-from keystone.endpoint_policy import routers # noqa
diff --git a/keystone-moon/keystone/endpoint_policy/backends/sql.py b/keystone-moon/keystone/endpoint_policy/backends/sql.py
index 484444f1..aacbb083 100644
--- a/keystone-moon/keystone/endpoint_policy/backends/sql.py
+++ b/keystone-moon/keystone/endpoint_policy/backends/sql.py
@@ -32,7 +32,7 @@ class PolicyAssociation(sql.ModelBase, sql.ModelDictMixin):
service_id = sql.Column(sql.String(64), nullable=True)
region_id = sql.Column(sql.String(64), nullable=True)
__table_args__ = (sql.UniqueConstraint('endpoint_id', 'service_id',
- 'region_id'), {})
+ 'region_id'),)
def to_dict(self):
"""Returns the model's attributes as a dictionary.
@@ -51,7 +51,7 @@ class EndpointPolicy(object):
def create_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
try:
# See if there is already a row for this association, and if
# so, update it with the new policy_id
@@ -79,14 +79,14 @@ class EndpointPolicy(object):
# NOTE(henry-nash): Getting a single value to save object
# management overhead.
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
if session.query(PolicyAssociation.id).filter(
sql_constraints).distinct().count() == 0:
raise exception.PolicyAssociationNotFound()
def delete_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(policy_id=policy_id)
query = query.filter_by(endpoint_id=endpoint_id)
@@ -102,7 +102,7 @@ class EndpointPolicy(object):
PolicyAssociation.region_id == region_id)
try:
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
policy_id = session.query(PolicyAssociation.policy_id).filter(
sql_constraints).distinct().one()
return {'policy_id': policy_id}
@@ -110,31 +110,31 @@ class EndpointPolicy(object):
raise exception.PolicyAssociationNotFound()
def list_associations_for_policy(self, policy_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(policy_id=policy_id)
return [ref.to_dict() for ref in query.all()]
def delete_association_by_endpoint(self, endpoint_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(endpoint_id=endpoint_id)
query.delete()
def delete_association_by_service(self, service_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(service_id=service_id)
query.delete()
def delete_association_by_region(self, region_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(region_id=region_id)
query.delete()
def delete_association_by_policy(self, policy_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(policy_id=policy_id)
query.delete()
diff --git a/keystone-moon/keystone/endpoint_policy/core.py b/keystone-moon/keystone/endpoint_policy/core.py
index e176ac1c..6243f26b 100644
--- a/keystone-moon/keystone/endpoint_policy/core.py
+++ b/keystone-moon/keystone/endpoint_policy/core.py
@@ -127,7 +127,6 @@ class Manager(manager.Manager):
:returns: list of endpoints that match
"""
-
if region_id in regions_examined:
msg = _LE('Circular reference or a repeated entry found '
'in region tree - %(region_id)s.')
@@ -159,7 +158,7 @@ class Manager(manager.Manager):
matching_endpoints = []
endpoints = self.catalog_api.list_endpoints()
regions = self.catalog_api.list_regions()
- for ref in self.driver.list_associations_for_policy(policy_id):
+ for ref in self.list_associations_for_policy(policy_id):
if ref.get('endpoint_id') is not None:
matching_endpoints.append(
_get_endpoint(ref['endpoint_id'], policy_id))
@@ -213,11 +212,12 @@ class Manager(manager.Manager):
regions_examined = []
while region_id is not None:
try:
- ref = self.driver.get_policy_association(
+ ref = self.get_policy_association(
service_id=endpoint['service_id'],
region_id=region_id)
return ref['policy_id']
- except exception.PolicyAssociationNotFound:
+ except exception.PolicyAssociationNotFound: # nosec
+ # There wasn't one for that region & service, handle below.
pass
# There wasn't one for that region & service, let's
@@ -237,9 +237,11 @@ class Manager(manager.Manager):
# this endpoint.
try:
- ref = self.driver.get_policy_association(endpoint_id=endpoint_id)
+ ref = self.get_policy_association(endpoint_id=endpoint_id)
return _get_policy(ref['policy_id'], endpoint_id)
- except exception.PolicyAssociationNotFound:
+ except exception.PolicyAssociationNotFound: # nosec
+ # There wasn't a policy explicitly defined for this endpoint,
+ # handled below.
pass
# There wasn't a policy explicitly defined for this endpoint, so
@@ -252,10 +254,11 @@ class Manager(manager.Manager):
# Finally, just check if there is one for the service.
try:
- ref = self.driver.get_policy_association(
+ ref = self.get_policy_association(
service_id=endpoint['service_id'])
return _get_policy(ref['policy_id'], endpoint_id)
- except exception.PolicyAssociationNotFound:
+ except exception.PolicyAssociationNotFound: # nosec
+ # No policy is associated with endpoint, handled below.
pass
msg = _('No policy is associated with endpoint '
@@ -304,8 +307,8 @@ class EndpointPolicyDriverV8(object):
:type service_id: string
:param region_id: identity of the region to associate
:type region_id: string
- :raises: keystone.exception.PolicyAssociationNotFound if there is no
- match for the specified association
+ :raises keystone.exception.PolicyAssociationNotFound: If there is no
+ match for the specified association.
:returns: None
"""
@@ -343,8 +346,8 @@ class EndpointPolicyDriverV8(object):
:type service_id: string
:param region_id: identity of the region
:type region_id: string
- :raises: keystone.exception.PolicyAssociationNotFound if there is no
- match for the specified association
+ :raises keystone.exception.PolicyAssociationNotFound: If there is no
+ match for the specified association.
:returns: dict containing policy_id
"""
diff --git a/keystone-moon/keystone/exception.py b/keystone-moon/keystone/exception.py
index f5c0e1cd..e347d345 100644
--- a/keystone-moon/keystone/exception.py
+++ b/keystone-moon/keystone/exception.py
@@ -15,6 +15,7 @@
from oslo_config import cfg
from oslo_log import log
from oslo_utils import encodeutils
+import six
from keystone.i18n import _, _LW
@@ -26,6 +27,22 @@ LOG = log.getLogger(__name__)
_FATAL_EXCEPTION_FORMAT_ERRORS = False
+def _format_with_unicode_kwargs(msg_format, kwargs):
+ try:
+ return msg_format % kwargs
+ except UnicodeDecodeError:
+ try:
+ kwargs = {k: encodeutils.safe_decode(v)
+ for k, v in kwargs.items()}
+ except UnicodeDecodeError:
+ # NOTE(jamielennox): This is the complete failure case
+ # at least by showing the template we have some idea
+ # of where the error is coming from
+ return msg_format
+
+ return msg_format % kwargs
+
+
class Error(Exception):
"""Base error class.
@@ -33,6 +50,7 @@ class Error(Exception):
message_format.
"""
+
code = None
title = None
message_format = None
@@ -53,25 +71,12 @@ class Error(Exception):
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
- :raises: KeyError given insufficient kwargs
+ :raises KeyError: given insufficient kwargs
"""
- if not message:
- try:
- message = self.message_format % kwargs
- except UnicodeDecodeError:
- try:
- kwargs = {k: encodeutils.safe_decode(v)
- for k, v in kwargs.items()}
- except UnicodeDecodeError:
- # NOTE(jamielennox): This is the complete failure case
- # at least by showing the template we have some idea
- # of where the error is coming from
- message = self.message_format
- else:
- message = self.message_format % kwargs
-
- return message
+ if message:
+ return message
+ return _format_with_unicode_kwargs(self.message_format, kwargs)
class ValidationError(Error):
@@ -135,41 +140,57 @@ class CircularRegionHierarchyError(Error):
title = 'Bad Request'
-class PasswordVerificationError(Error):
+class ForbiddenNotSecurity(Error):
+ """When you want to return a 403 Forbidden response but not security.
+
+ Use this for errors where the message is always safe to present to the user
+ and won't give away extra information.
+
+ """
+
+ code = 403
+ title = 'Forbidden'
+
+
+class PasswordVerificationError(ForbiddenNotSecurity):
message_format = _("The password length must be less than or equal "
"to %(size)i. The server could not comply with the "
"request because the password is invalid.")
- code = 403
- title = 'Forbidden'
-class RegionDeletionError(Error):
+class RegionDeletionError(ForbiddenNotSecurity):
message_format = _("Unable to delete region %(region_id)s because it or "
"its child regions have associated endpoints.")
- code = 403
- title = 'Forbidden'
-class PKITokenExpected(Error):
+class PKITokenExpected(ForbiddenNotSecurity):
message_format = _('The certificates you requested are not available. '
'It is likely that this server does not use PKI tokens '
'otherwise this is the result of misconfiguration.')
- code = 403
- title = 'Cannot retrieve certificates'
class SecurityError(Error):
- """Avoids exposing details of security failures, unless in debug mode."""
- amendment = _('(Disable debug mode to suppress these details.)')
+ """Security error exception.
+
+ Avoids exposing details of security errors, unless in insecure_debug mode.
+
+ """
+
+ amendment = _('(Disable insecure_debug mode to suppress these details.)')
def _build_message(self, message, **kwargs):
- """Only returns detailed messages in debug mode."""
- if CONF.debug:
+ """Only returns detailed messages in insecure_debug mode."""
+ if message and CONF.insecure_debug:
+ if isinstance(message, six.string_types):
+ # Only do replacement if message is string. The message is
+ # sometimes a different exception or bytes, which would raise
+ # TypeError.
+ message = _format_with_unicode_kwargs(message, kwargs)
return _('%(message)s %(amendment)s') % {
- 'message': message or self.message_format % kwargs,
+ 'message': message,
'amendment': self.amendment}
- else:
- return self.message_format % kwargs
+
+ return _format_with_unicode_kwargs(self.message_format, kwargs)
class Unauthorized(SecurityError):
@@ -252,9 +273,9 @@ class EndpointNotFound(NotFound):
class MetadataNotFound(NotFound):
- """(dolph): metadata is not a user-facing concept,
- so this exception should not be exposed
- """
+ # NOTE (dolph): metadata is not a user-facing concept,
+ # so this exception should not be exposed.
+
message_format = _("An unhandled exception has occurred:"
" Could not find metadata.")
@@ -271,6 +292,14 @@ class RoleNotFound(NotFound):
message_format = _("Could not find role: %(role_id)s")
+class ImpliedRoleNotFound(NotFound):
+ message_format = _("%(prior_role_id)s does not imply %(implied_role_id)s")
+
+
+class InvalidImpliedRole(Forbidden):
+ message_format = _("%(role_id)s cannot be an implied roles")
+
+
class RoleAssignmentNotFound(NotFound):
message_format = _("Could not find role assignment with role: "
"%(role_id)s, user or group: %(actor_id)s, "
@@ -364,6 +393,12 @@ class ConfigRegistrationNotFound(Exception):
pass
+class KeystoneConfigurationError(Exception):
+ # This is an exception to be used in the case that Keystone config is
+ # invalid and Keystone should not start.
+ pass
+
+
class Conflict(Error):
message_format = _("Conflict occurred attempting to store %(type)s -"
" %(details)s")
@@ -372,27 +407,23 @@ class Conflict(Error):
class UnexpectedError(SecurityError):
- """Avoids exposing details of failures, unless in debug mode."""
- _message_format = _("An unexpected error prevented the server "
- "from fulfilling your request.")
+ """Avoids exposing details of failures, unless in insecure_debug mode."""
+
+ message_format = _("An unexpected error prevented the server "
+ "from fulfilling your request.")
debug_message_format = _("An unexpected error prevented the server "
"from fulfilling your request: %(exception)s")
- @property
- def message_format(self):
- """Return the generic message format string unless debug is enabled."""
- if CONF.debug:
- return self.debug_message_format
- return self._message_format
-
def _build_message(self, message, **kwargs):
- if CONF.debug and 'exception' not in kwargs:
- # Ensure that exception has a value to be extra defensive for
- # substitutions and make sure the exception doesn't raise an
- # exception.
- kwargs['exception'] = ''
- return super(UnexpectedError, self)._build_message(message, **kwargs)
+
+ # Ensure that exception has a value to be extra defensive for
+ # substitutions and make sure the exception doesn't raise an
+ # exception.
+ kwargs.setdefault('exception', '')
+
+ return super(UnexpectedError, self)._build_message(
+ message or self.debug_message_format, **kwargs)
code = 500
title = 'Internal Server Error'
@@ -420,11 +451,17 @@ class MappedGroupNotFound(UnexpectedError):
class MetadataFileError(UnexpectedError):
- message_format = _("Error while reading metadata file, %(reason)s")
+ debug_message_format = _("Error while reading metadata file, %(reason)s")
+
+
+class DirectMappingError(UnexpectedError):
+ message_format = _("Local section in mapping %(mapping_id)s refers to a "
+ "remote match that doesn't exist "
+ "(e.g. {0} in a local section).")
class AssignmentTypeCalculationError(UnexpectedError):
- message_format = _(
+ debug_message_format = _(
'Unexpected combination of grant attributes - '
'User: %(user_id)s, Group: %(group_id)s, Project: %(project_id)s, '
'Domain: %(domain_id)s')
@@ -450,14 +487,14 @@ class ConfigFileNotFound(UnexpectedError):
class KeysNotFound(UnexpectedError):
- message_format = _('No encryption keys found; run keystone-manage '
- 'fernet_setup to bootstrap one.')
+ debug_message_format = _('No encryption keys found; run keystone-manage '
+ 'fernet_setup to bootstrap one.')
class MultipleSQLDriversInConfig(UnexpectedError):
- message_format = _('The Keystone domain-specific configuration has '
- 'specified more than one SQL driver (only one is '
- 'permitted): %(source)s.')
+ debug_message_format = _('The Keystone domain-specific configuration has '
+ 'specified more than one SQL driver (only one is '
+ 'permitted): %(source)s.')
class MigrationNotProvided(Exception):
@@ -469,8 +506,8 @@ class MigrationNotProvided(Exception):
class UnsupportedTokenVersionException(UnexpectedError):
- message_format = _('Token version is unrecognizable or '
- 'unsupported.')
+ debug_message_format = _('Token version is unrecognizable or '
+ 'unsupported.')
class SAMLSigningError(UnexpectedError):
@@ -478,7 +515,6 @@ class SAMLSigningError(UnexpectedError):
'that this server does not have xmlsec1 '
'installed, or this is the result of '
'misconfiguration. Reason %(reason)s')
- title = 'Error signing SAML assertion'
class OAuthHeadersMissingError(UnexpectedError):
@@ -486,10 +522,23 @@ class OAuthHeadersMissingError(UnexpectedError):
'with OAuth related calls, if running under '
'HTTPd or Apache, ensure WSGIPassAuthorization '
'is set to On.')
- title = 'Error retrieving OAuth headers'
class TokenlessAuthConfigError(ValidationError):
message_format = _('Could not determine Identity Provider ID. The '
'configuration option %(issuer_attribute)s '
'was not found in the request environment.')
+
+
+class MigrationMovedFailure(RuntimeError):
+ def __init__(self, extension):
+ self.extension = extension
+ msg = _("The %s extension has been moved into keystone core and as "
+ "such its migrations are maintained by the main keystone "
+ "database control. Use the command: keystone-manage "
+ "db_sync") % self.extension
+ super(MigrationMovedFailure, self).__init__(msg)
+
+
+class UnsupportedDriverVersion(UnexpectedError):
+ debug_message_format = _('%(driver)s is not supported driver version')
diff --git a/keystone-moon/keystone/federation/V8_backends/__init__.py b/keystone-moon/keystone/federation/V8_backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/federation/V8_backends/__init__.py
diff --git a/keystone-moon/keystone/federation/V8_backends/sql.py b/keystone-moon/keystone/federation/V8_backends/sql.py
new file mode 100644
index 00000000..d6b42aa0
--- /dev/null
+++ b/keystone-moon/keystone/federation/V8_backends/sql.py
@@ -0,0 +1,389 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from oslo_serialization import jsonutils
+import six
+from sqlalchemy import orm
+
+from keystone.common import sql
+from keystone import exception
+from keystone.federation import core
+from keystone.i18n import _
+
+
+LOG = log.getLogger(__name__)
+
+
+class FederationProtocolModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'federation_protocol'
+ attributes = ['id', 'idp_id', 'mapping_id']
+ mutable_attributes = frozenset(['mapping_id'])
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id',
+ ondelete='CASCADE'), primary_key=True)
+ mapping_id = sql.Column(sql.String(64), nullable=False)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
+
+class IdentityProviderModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'identity_provider'
+ attributes = ['id', 'enabled', 'description', 'remote_ids']
+ mutable_attributes = frozenset(['description', 'enabled', 'remote_ids'])
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ enabled = sql.Column(sql.Boolean, nullable=False)
+ description = sql.Column(sql.Text(), nullable=True)
+ remote_ids = orm.relationship('IdPRemoteIdsModel',
+ order_by='IdPRemoteIdsModel.remote_id',
+ cascade='all, delete-orphan')
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ remote_ids_list = new_dictionary.pop('remote_ids', None)
+ if not remote_ids_list:
+ remote_ids_list = []
+ identity_provider = cls(**new_dictionary)
+ remote_ids = []
+ # NOTE(fmarco76): the remote_ids_list contains only remote ids
+ # associated with the IdP because of the "relationship" established in
+ # sqlalchemy and corresponding to the FK in the idp_remote_ids table
+ for remote in remote_ids_list:
+ remote_ids.append(IdPRemoteIdsModel(remote_id=remote))
+ identity_provider.remote_ids = remote_ids
+ return identity_provider
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ d['remote_ids'] = []
+ for remote in self.remote_ids:
+ d['remote_ids'].append(remote.remote_id)
+ return d
+
+
+class IdPRemoteIdsModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'idp_remote_ids'
+ attributes = ['idp_id', 'remote_id']
+ mutable_attributes = frozenset(['idp_id', 'remote_id'])
+
+ idp_id = sql.Column(sql.String(64),
+ sql.ForeignKey('identity_provider.id',
+ ondelete='CASCADE'))
+ remote_id = sql.Column(sql.String(255),
+ primary_key=True)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
+
+class MappingModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'mapping'
+ attributes = ['id', 'rules']
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ rules = sql.Column(sql.JsonBlob(), nullable=False)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ new_dictionary['rules'] = jsonutils.dumps(new_dictionary['rules'])
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ d['rules'] = jsonutils.loads(d['rules'])
+ return d
+
+
+class ServiceProviderModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'service_provider'
+ attributes = ['auth_url', 'id', 'enabled', 'description',
+ 'relay_state_prefix', 'sp_url']
+ mutable_attributes = frozenset(['auth_url', 'description', 'enabled',
+ 'relay_state_prefix', 'sp_url'])
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ enabled = sql.Column(sql.Boolean, nullable=False)
+ description = sql.Column(sql.Text(), nullable=True)
+ auth_url = sql.Column(sql.String(256), nullable=False)
+ sp_url = sql.Column(sql.String(256), nullable=False)
+ relay_state_prefix = sql.Column(sql.String(256), nullable=False)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
+
+class Federation(core.FederationDriverV8):
+
+ _CONFLICT_LOG_MSG = 'Conflict %(conflict_type)s: %(details)s'
+
+ def _handle_idp_conflict(self, e):
+ conflict_type = 'identity_provider'
+ details = six.text_type(e)
+ LOG.debug(self._CONFLICT_LOG_MSG, {'conflict_type': conflict_type,
+ 'details': details})
+ if 'remote_id' in details:
+ msg = _('Duplicate remote ID: %s')
+ else:
+ msg = _('Duplicate entry: %s')
+ msg = msg % e.value
+ raise exception.Conflict(type=conflict_type, details=msg)
+
+ # Identity Provider CRUD
+ @sql.handle_conflicts(conflict_type='identity_provider')
+ def create_idp(self, idp_id, idp):
+ idp['id'] = idp_id
+ with sql.session_for_write() as session:
+ idp_ref = IdentityProviderModel.from_dict(idp)
+ session.add(idp_ref)
+ return idp_ref.to_dict()
+
+ def delete_idp(self, idp_id):
+ with sql.session_for_write() as session:
+ self._delete_assigned_protocols(session, idp_id)
+ idp_ref = self._get_idp(session, idp_id)
+ session.delete(idp_ref)
+
+ def _get_idp(self, session, idp_id):
+ idp_ref = session.query(IdentityProviderModel).get(idp_id)
+ if not idp_ref:
+ raise exception.IdentityProviderNotFound(idp_id=idp_id)
+ return idp_ref
+
+ def _get_idp_from_remote_id(self, session, remote_id):
+ q = session.query(IdPRemoteIdsModel)
+ q = q.filter_by(remote_id=remote_id)
+ try:
+ return q.one()
+ except sql.NotFound:
+ raise exception.IdentityProviderNotFound(idp_id=remote_id)
+
+ def list_idps(self):
+ with sql.session_for_read() as session:
+ idps = session.query(IdentityProviderModel)
+ idps_list = [idp.to_dict() for idp in idps]
+ return idps_list
+
+ def get_idp(self, idp_id):
+ with sql.session_for_read() as session:
+ idp_ref = self._get_idp(session, idp_id)
+ return idp_ref.to_dict()
+
+ def get_idp_from_remote_id(self, remote_id):
+ with sql.session_for_read() as session:
+ ref = self._get_idp_from_remote_id(session, remote_id)
+ return ref.to_dict()
+
+ def update_idp(self, idp_id, idp):
+ try:
+ with sql.session_for_write() as session:
+ idp_ref = self._get_idp(session, idp_id)
+ old_idp = idp_ref.to_dict()
+ old_idp.update(idp)
+ new_idp = IdentityProviderModel.from_dict(old_idp)
+ for attr in IdentityProviderModel.mutable_attributes:
+ setattr(idp_ref, attr, getattr(new_idp, attr))
+ return idp_ref.to_dict()
+ except sql.DBDuplicateEntry as e:
+ self._handle_idp_conflict(e)
+
+ # Protocol CRUD
+ def _get_protocol(self, session, idp_id, protocol_id):
+ q = session.query(FederationProtocolModel)
+ q = q.filter_by(id=protocol_id, idp_id=idp_id)
+ try:
+ return q.one()
+ except sql.NotFound:
+ kwargs = {'protocol_id': protocol_id,
+ 'idp_id': idp_id}
+ raise exception.FederatedProtocolNotFound(**kwargs)
+
+ @sql.handle_conflicts(conflict_type='federation_protocol')
+ def create_protocol(self, idp_id, protocol_id, protocol):
+ protocol['id'] = protocol_id
+ protocol['idp_id'] = idp_id
+ with sql.session_for_write() as session:
+ self._get_idp(session, idp_id)
+ protocol_ref = FederationProtocolModel.from_dict(protocol)
+ session.add(protocol_ref)
+ return protocol_ref.to_dict()
+
+ def update_protocol(self, idp_id, protocol_id, protocol):
+ with sql.session_for_write() as session:
+ proto_ref = self._get_protocol(session, idp_id, protocol_id)
+ old_proto = proto_ref.to_dict()
+ old_proto.update(protocol)
+ new_proto = FederationProtocolModel.from_dict(old_proto)
+ for attr in FederationProtocolModel.mutable_attributes:
+ setattr(proto_ref, attr, getattr(new_proto, attr))
+ return proto_ref.to_dict()
+
+ def get_protocol(self, idp_id, protocol_id):
+ with sql.session_for_read() as session:
+ protocol_ref = self._get_protocol(session, idp_id, protocol_id)
+ return protocol_ref.to_dict()
+
+ def list_protocols(self, idp_id):
+ with sql.session_for_read() as session:
+ q = session.query(FederationProtocolModel)
+ q = q.filter_by(idp_id=idp_id)
+ protocols = [protocol.to_dict() for protocol in q]
+ return protocols
+
+ def delete_protocol(self, idp_id, protocol_id):
+ with sql.session_for_write() as session:
+ key_ref = self._get_protocol(session, idp_id, protocol_id)
+ session.delete(key_ref)
+
+ def _delete_assigned_protocols(self, session, idp_id):
+ query = session.query(FederationProtocolModel)
+ query = query.filter_by(idp_id=idp_id)
+ query.delete()
+
+ # Mapping CRUD
+ def _get_mapping(self, session, mapping_id):
+ mapping_ref = session.query(MappingModel).get(mapping_id)
+ if not mapping_ref:
+ raise exception.MappingNotFound(mapping_id=mapping_id)
+ return mapping_ref
+
+ @sql.handle_conflicts(conflict_type='mapping')
+ def create_mapping(self, mapping_id, mapping):
+ ref = {}
+ ref['id'] = mapping_id
+ ref['rules'] = mapping.get('rules')
+ with sql.session_for_write() as session:
+ mapping_ref = MappingModel.from_dict(ref)
+ session.add(mapping_ref)
+ return mapping_ref.to_dict()
+
+ def delete_mapping(self, mapping_id):
+ with sql.session_for_write() as session:
+ mapping_ref = self._get_mapping(session, mapping_id)
+ session.delete(mapping_ref)
+
+ def list_mappings(self):
+ with sql.session_for_read() as session:
+ mappings = session.query(MappingModel)
+ return [x.to_dict() for x in mappings]
+
+ def get_mapping(self, mapping_id):
+ with sql.session_for_read() as session:
+ mapping_ref = self._get_mapping(session, mapping_id)
+ return mapping_ref.to_dict()
+
+ @sql.handle_conflicts(conflict_type='mapping')
+ def update_mapping(self, mapping_id, mapping):
+ ref = {}
+ ref['id'] = mapping_id
+ ref['rules'] = mapping.get('rules')
+ with sql.session_for_write() as session:
+ mapping_ref = self._get_mapping(session, mapping_id)
+ old_mapping = mapping_ref.to_dict()
+ old_mapping.update(ref)
+ new_mapping = MappingModel.from_dict(old_mapping)
+ for attr in MappingModel.attributes:
+ setattr(mapping_ref, attr, getattr(new_mapping, attr))
+ return mapping_ref.to_dict()
+
+ def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
+ with sql.session_for_read() as session:
+ protocol_ref = self._get_protocol(session, idp_id, protocol_id)
+ mapping_id = protocol_ref.mapping_id
+ mapping_ref = self._get_mapping(session, mapping_id)
+ return mapping_ref.to_dict()
+
+ # Service Provider CRUD
+ @sql.handle_conflicts(conflict_type='service_provider')
+ def create_sp(self, sp_id, sp):
+ sp['id'] = sp_id
+ with sql.session_for_write() as session:
+ sp_ref = ServiceProviderModel.from_dict(sp)
+ session.add(sp_ref)
+ return sp_ref.to_dict()
+
+ def delete_sp(self, sp_id):
+ with sql.session_for_write() as session:
+ sp_ref = self._get_sp(session, sp_id)
+ session.delete(sp_ref)
+
+ def _get_sp(self, session, sp_id):
+ sp_ref = session.query(ServiceProviderModel).get(sp_id)
+ if not sp_ref:
+ raise exception.ServiceProviderNotFound(sp_id=sp_id)
+ return sp_ref
+
+ def list_sps(self):
+ with sql.session_for_read() as session:
+ sps = session.query(ServiceProviderModel)
+ sps_list = [sp.to_dict() for sp in sps]
+ return sps_list
+
+ def get_sp(self, sp_id):
+ with sql.session_for_read() as session:
+ sp_ref = self._get_sp(session, sp_id)
+ return sp_ref.to_dict()
+
+ def update_sp(self, sp_id, sp):
+ with sql.session_for_write() as session:
+ sp_ref = self._get_sp(session, sp_id)
+ old_sp = sp_ref.to_dict()
+ old_sp.update(sp)
+ new_sp = ServiceProviderModel.from_dict(old_sp)
+ for attr in ServiceProviderModel.mutable_attributes:
+ setattr(sp_ref, attr, getattr(new_sp, attr))
+ return sp_ref.to_dict()
+
+ def get_enabled_service_providers(self):
+ with sql.session_for_read() as session:
+ service_providers = session.query(ServiceProviderModel)
+ service_providers = service_providers.filter_by(enabled=True)
+ return service_providers
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py b/keystone-moon/keystone/federation/__init__.py
index e45133ab..b62cfb6f 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/063_drop_region_auth_url.py
+++ b/keystone-moon/keystone/federation/__init__.py
@@ -1,3 +1,5 @@
+# Copyright 2014 OpenStack Foundation
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -10,15 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
-
-
-_REGION_TABLE_NAME = 'region'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
- region_table.drop_column('url')
+from keystone.federation.core import * # noqa
diff --git a/keystone-moon/keystone/federation/backends/__init__.py b/keystone-moon/keystone/federation/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/federation/backends/__init__.py
diff --git a/keystone-moon/keystone/federation/backends/sql.py b/keystone-moon/keystone/federation/backends/sql.py
new file mode 100644
index 00000000..add409e6
--- /dev/null
+++ b/keystone-moon/keystone/federation/backends/sql.py
@@ -0,0 +1,393 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from oslo_serialization import jsonutils
+import six
+from sqlalchemy import orm
+
+from keystone.common import sql
+from keystone import exception
+from keystone.federation import core
+from keystone.i18n import _
+
+
+LOG = log.getLogger(__name__)
+
+
+class FederationProtocolModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'federation_protocol'
+ attributes = ['id', 'idp_id', 'mapping_id']
+ mutable_attributes = frozenset(['mapping_id'])
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id',
+ ondelete='CASCADE'), primary_key=True)
+ mapping_id = sql.Column(sql.String(64), nullable=False)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
+
+class IdentityProviderModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'identity_provider'
+ attributes = ['id', 'enabled', 'description', 'remote_ids']
+ mutable_attributes = frozenset(['description', 'enabled', 'remote_ids'])
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ enabled = sql.Column(sql.Boolean, nullable=False)
+ description = sql.Column(sql.Text(), nullable=True)
+ remote_ids = orm.relationship('IdPRemoteIdsModel',
+ order_by='IdPRemoteIdsModel.remote_id',
+ cascade='all, delete-orphan')
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ remote_ids_list = new_dictionary.pop('remote_ids', None)
+ if not remote_ids_list:
+ remote_ids_list = []
+ identity_provider = cls(**new_dictionary)
+ remote_ids = []
+ # NOTE(fmarco76): the remote_ids_list contains only remote ids
+ # associated with the IdP because of the "relationship" established in
+ # sqlalchemy and corresponding to the FK in the idp_remote_ids table
+ for remote in remote_ids_list:
+ remote_ids.append(IdPRemoteIdsModel(remote_id=remote))
+ identity_provider.remote_ids = remote_ids
+ return identity_provider
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ d['remote_ids'] = []
+ for remote in self.remote_ids:
+ d['remote_ids'].append(remote.remote_id)
+ return d
+
+
+class IdPRemoteIdsModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'idp_remote_ids'
+ attributes = ['idp_id', 'remote_id']
+ mutable_attributes = frozenset(['idp_id', 'remote_id'])
+
+ idp_id = sql.Column(sql.String(64),
+ sql.ForeignKey('identity_provider.id',
+ ondelete='CASCADE'))
+ remote_id = sql.Column(sql.String(255),
+ primary_key=True)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
+
+class MappingModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'mapping'
+ attributes = ['id', 'rules']
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ rules = sql.Column(sql.JsonBlob(), nullable=False)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ new_dictionary['rules'] = jsonutils.dumps(new_dictionary['rules'])
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ d['rules'] = jsonutils.loads(d['rules'])
+ return d
+
+
+class ServiceProviderModel(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'service_provider'
+ attributes = ['auth_url', 'id', 'enabled', 'description',
+ 'relay_state_prefix', 'sp_url']
+ mutable_attributes = frozenset(['auth_url', 'description', 'enabled',
+ 'relay_state_prefix', 'sp_url'])
+
+ id = sql.Column(sql.String(64), primary_key=True)
+ enabled = sql.Column(sql.Boolean, nullable=False)
+ description = sql.Column(sql.Text(), nullable=True)
+ auth_url = sql.Column(sql.String(256), nullable=False)
+ sp_url = sql.Column(sql.String(256), nullable=False)
+ relay_state_prefix = sql.Column(sql.String(256), nullable=False)
+
+ @classmethod
+ def from_dict(cls, dictionary):
+ new_dictionary = dictionary.copy()
+ return cls(**new_dictionary)
+
+ def to_dict(self):
+ """Return a dictionary with model's attributes."""
+ d = dict()
+ for attr in self.__class__.attributes:
+ d[attr] = getattr(self, attr)
+ return d
+
+
+class Federation(core.FederationDriverV9):
+
+ _CONFLICT_LOG_MSG = 'Conflict %(conflict_type)s: %(details)s'
+
+ def _handle_idp_conflict(self, e):
+ conflict_type = 'identity_provider'
+ details = six.text_type(e)
+ LOG.debug(self._CONFLICT_LOG_MSG, {'conflict_type': conflict_type,
+ 'details': details})
+ if 'remote_id' in details:
+ msg = _('Duplicate remote ID: %s')
+ else:
+ msg = _('Duplicate entry: %s')
+ msg = msg % e.value
+ raise exception.Conflict(type=conflict_type, details=msg)
+
+ # Identity Provider CRUD
+ def create_idp(self, idp_id, idp):
+ idp['id'] = idp_id
+ try:
+ with sql.session_for_write() as session:
+ idp_ref = IdentityProviderModel.from_dict(idp)
+ session.add(idp_ref)
+ return idp_ref.to_dict()
+ except sql.DBDuplicateEntry as e:
+ self._handle_idp_conflict(e)
+
+ def delete_idp(self, idp_id):
+ with sql.session_for_write() as session:
+ self._delete_assigned_protocols(session, idp_id)
+ idp_ref = self._get_idp(session, idp_id)
+ session.delete(idp_ref)
+
+ def _get_idp(self, session, idp_id):
+ idp_ref = session.query(IdentityProviderModel).get(idp_id)
+ if not idp_ref:
+ raise exception.IdentityProviderNotFound(idp_id=idp_id)
+ return idp_ref
+
+ def _get_idp_from_remote_id(self, session, remote_id):
+ q = session.query(IdPRemoteIdsModel)
+ q = q.filter_by(remote_id=remote_id)
+ try:
+ return q.one()
+ except sql.NotFound:
+ raise exception.IdentityProviderNotFound(idp_id=remote_id)
+
+ def list_idps(self, hints=None):
+ with sql.session_for_read() as session:
+ query = session.query(IdentityProviderModel)
+ idps = sql.filter_limit_query(IdentityProviderModel, query, hints)
+ idps_list = [idp.to_dict() for idp in idps]
+ return idps_list
+
+ def get_idp(self, idp_id):
+ with sql.session_for_read() as session:
+ idp_ref = self._get_idp(session, idp_id)
+ return idp_ref.to_dict()
+
+ def get_idp_from_remote_id(self, remote_id):
+ with sql.session_for_read() as session:
+ ref = self._get_idp_from_remote_id(session, remote_id)
+ return ref.to_dict()
+
+ def update_idp(self, idp_id, idp):
+ try:
+ with sql.session_for_write() as session:
+ idp_ref = self._get_idp(session, idp_id)
+ old_idp = idp_ref.to_dict()
+ old_idp.update(idp)
+ new_idp = IdentityProviderModel.from_dict(old_idp)
+ for attr in IdentityProviderModel.mutable_attributes:
+ setattr(idp_ref, attr, getattr(new_idp, attr))
+ return idp_ref.to_dict()
+ except sql.DBDuplicateEntry as e:
+ self._handle_idp_conflict(e)
+
+ # Protocol CRUD
+ def _get_protocol(self, session, idp_id, protocol_id):
+ q = session.query(FederationProtocolModel)
+ q = q.filter_by(id=protocol_id, idp_id=idp_id)
+ try:
+ return q.one()
+ except sql.NotFound:
+ kwargs = {'protocol_id': protocol_id,
+ 'idp_id': idp_id}
+ raise exception.FederatedProtocolNotFound(**kwargs)
+
+ @sql.handle_conflicts(conflict_type='federation_protocol')
+ def create_protocol(self, idp_id, protocol_id, protocol):
+ protocol['id'] = protocol_id
+ protocol['idp_id'] = idp_id
+ with sql.session_for_write() as session:
+ self._get_idp(session, idp_id)
+ protocol_ref = FederationProtocolModel.from_dict(protocol)
+ session.add(protocol_ref)
+ return protocol_ref.to_dict()
+
+ def update_protocol(self, idp_id, protocol_id, protocol):
+ with sql.session_for_write() as session:
+ proto_ref = self._get_protocol(session, idp_id, protocol_id)
+ old_proto = proto_ref.to_dict()
+ old_proto.update(protocol)
+ new_proto = FederationProtocolModel.from_dict(old_proto)
+ for attr in FederationProtocolModel.mutable_attributes:
+ setattr(proto_ref, attr, getattr(new_proto, attr))
+ return proto_ref.to_dict()
+
+ def get_protocol(self, idp_id, protocol_id):
+ with sql.session_for_read() as session:
+ protocol_ref = self._get_protocol(session, idp_id, protocol_id)
+ return protocol_ref.to_dict()
+
+ def list_protocols(self, idp_id):
+ with sql.session_for_read() as session:
+ q = session.query(FederationProtocolModel)
+ q = q.filter_by(idp_id=idp_id)
+ protocols = [protocol.to_dict() for protocol in q]
+ return protocols
+
+ def delete_protocol(self, idp_id, protocol_id):
+ with sql.session_for_write() as session:
+ key_ref = self._get_protocol(session, idp_id, protocol_id)
+ session.delete(key_ref)
+
+ def _delete_assigned_protocols(self, session, idp_id):
+ query = session.query(FederationProtocolModel)
+ query = query.filter_by(idp_id=idp_id)
+ query.delete()
+
+ # Mapping CRUD
+ def _get_mapping(self, session, mapping_id):
+ mapping_ref = session.query(MappingModel).get(mapping_id)
+ if not mapping_ref:
+ raise exception.MappingNotFound(mapping_id=mapping_id)
+ return mapping_ref
+
+ @sql.handle_conflicts(conflict_type='mapping')
+ def create_mapping(self, mapping_id, mapping):
+ ref = {}
+ ref['id'] = mapping_id
+ ref['rules'] = mapping.get('rules')
+ with sql.session_for_write() as session:
+ mapping_ref = MappingModel.from_dict(ref)
+ session.add(mapping_ref)
+ return mapping_ref.to_dict()
+
+ def delete_mapping(self, mapping_id):
+ with sql.session_for_write() as session:
+ mapping_ref = self._get_mapping(session, mapping_id)
+ session.delete(mapping_ref)
+
+ def list_mappings(self):
+ with sql.session_for_read() as session:
+ mappings = session.query(MappingModel)
+ return [x.to_dict() for x in mappings]
+
+ def get_mapping(self, mapping_id):
+ with sql.session_for_read() as session:
+ mapping_ref = self._get_mapping(session, mapping_id)
+ return mapping_ref.to_dict()
+
+ @sql.handle_conflicts(conflict_type='mapping')
+ def update_mapping(self, mapping_id, mapping):
+ ref = {}
+ ref['id'] = mapping_id
+ ref['rules'] = mapping.get('rules')
+ with sql.session_for_write() as session:
+ mapping_ref = self._get_mapping(session, mapping_id)
+ old_mapping = mapping_ref.to_dict()
+ old_mapping.update(ref)
+ new_mapping = MappingModel.from_dict(old_mapping)
+ for attr in MappingModel.attributes:
+ setattr(mapping_ref, attr, getattr(new_mapping, attr))
+ return mapping_ref.to_dict()
+
+ def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
+ with sql.session_for_read() as session:
+ protocol_ref = self._get_protocol(session, idp_id, protocol_id)
+ mapping_id = protocol_ref.mapping_id
+ mapping_ref = self._get_mapping(session, mapping_id)
+ return mapping_ref.to_dict()
+
+ # Service Provider CRUD
+ @sql.handle_conflicts(conflict_type='service_provider')
+ def create_sp(self, sp_id, sp):
+ sp['id'] = sp_id
+ with sql.session_for_write() as session:
+ sp_ref = ServiceProviderModel.from_dict(sp)
+ session.add(sp_ref)
+ return sp_ref.to_dict()
+
+ def delete_sp(self, sp_id):
+ with sql.session_for_write() as session:
+ sp_ref = self._get_sp(session, sp_id)
+ session.delete(sp_ref)
+
+ def _get_sp(self, session, sp_id):
+ sp_ref = session.query(ServiceProviderModel).get(sp_id)
+ if not sp_ref:
+ raise exception.ServiceProviderNotFound(sp_id=sp_id)
+ return sp_ref
+
+ def list_sps(self, hints=None):
+ with sql.session_for_read() as session:
+ query = session.query(ServiceProviderModel)
+ sps = sql.filter_limit_query(ServiceProviderModel, query, hints)
+ sps_list = [sp.to_dict() for sp in sps]
+ return sps_list
+
+ def get_sp(self, sp_id):
+ with sql.session_for_read() as session:
+ sp_ref = self._get_sp(session, sp_id)
+ return sp_ref.to_dict()
+
+ def update_sp(self, sp_id, sp):
+ with sql.session_for_write() as session:
+ sp_ref = self._get_sp(session, sp_id)
+ old_sp = sp_ref.to_dict()
+ old_sp.update(sp)
+ new_sp = ServiceProviderModel.from_dict(old_sp)
+ for attr in ServiceProviderModel.mutable_attributes:
+ setattr(sp_ref, attr, getattr(new_sp, attr))
+ return sp_ref.to_dict()
+
+ def get_enabled_service_providers(self):
+ with sql.session_for_read() as session:
+ service_providers = session.query(ServiceProviderModel)
+ service_providers = service_providers.filter_by(enabled=True)
+ return service_providers
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py b/keystone-moon/keystone/federation/constants.py
index 86302a8f..afb38494 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/052_add_auth_url_to_region.py
+++ b/keystone-moon/keystone/federation/constants.py
@@ -1,5 +1,3 @@
-# Copyright 2014 IBM Corp.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -12,16 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
-
-
-_REGION_TABLE_NAME = 'region'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- region_table = sql.Table(_REGION_TABLE_NAME, meta, autoload=True)
- url_column = sql.Column('url', sql.String(255), nullable=True)
- region_table.create_column(url_column)
+FEDERATION = 'OS-FEDERATION'
+IDENTITY_PROVIDER = 'OS-FEDERATION:identity_provider'
+PROTOCOL = 'OS-FEDERATION:protocol'
diff --git a/keystone-moon/keystone/federation/controllers.py b/keystone-moon/keystone/federation/controllers.py
new file mode 100644
index 00000000..b9e2d883
--- /dev/null
+++ b/keystone-moon/keystone/federation/controllers.py
@@ -0,0 +1,519 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Workflow logic for the Federation service."""
+
+import string
+
+from oslo_config import cfg
+from oslo_log import log
+import six
+from six.moves import urllib
+import webob
+
+from keystone.auth import controllers as auth_controllers
+from keystone.common import authorization
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import utils as k_utils
+from keystone.common import validation
+from keystone.common import wsgi
+from keystone import exception
+from keystone.federation import idp as keystone_idp
+from keystone.federation import schema
+from keystone.federation import utils
+from keystone.i18n import _
+from keystone.models import token_model
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class _ControllerBase(controller.V3Controller):
+ """Base behaviors for federation controllers."""
+
+ @classmethod
+ def base_url(cls, context, path=None):
+ """Construct a path and pass it to V3Controller.base_url method."""
+ path = '/OS-FEDERATION/' + cls.collection_name
+ return super(_ControllerBase, cls).base_url(context, path=path)
+
+
+@dependency.requires('federation_api')
+class IdentityProvider(_ControllerBase):
+ """Identity Provider representation."""
+
+ collection_name = 'identity_providers'
+ member_name = 'identity_provider'
+
+ _public_parameters = frozenset(['id', 'enabled', 'description',
+ 'remote_ids', 'links'
+ ])
+
+ @classmethod
+ def _add_related_links(cls, context, ref):
+ """Add URLs for entities related with Identity Provider.
+
+ Add URLs pointing to:
+ - protocols tied to the Identity Provider
+
+ """
+ ref.setdefault('links', {})
+ base_path = ref['links'].get('self')
+ if base_path is None:
+ base_path = '/'.join([IdentityProvider.base_url(context),
+ ref['id']])
+ for name in ['protocols']:
+ ref['links'][name] = '/'.join([base_path, name])
+
+ @classmethod
+ def _add_self_referential_link(cls, context, ref):
+ id = ref['id']
+ self_path = '/'.join([cls.base_url(context), id])
+ ref.setdefault('links', {})
+ ref['links']['self'] = self_path
+
+ @classmethod
+ def wrap_member(cls, context, ref):
+ cls._add_self_referential_link(context, ref)
+ cls._add_related_links(context, ref)
+ ref = cls.filter_params(ref)
+ return {cls.member_name: ref}
+
+ @controller.protected()
+ @validation.validated(schema.identity_provider_create, 'identity_provider')
+ def create_identity_provider(self, context, idp_id, identity_provider):
+ identity_provider = self._normalize_dict(identity_provider)
+ identity_provider.setdefault('enabled', False)
+ idp_ref = self.federation_api.create_idp(idp_id, identity_provider)
+ response = IdentityProvider.wrap_member(context, idp_ref)
+ return wsgi.render_response(body=response, status=('201', 'Created'))
+
+ @controller.filterprotected('id', 'enabled')
+ def list_identity_providers(self, context, filters):
+ hints = self.build_driver_hints(context, filters)
+ ref = self.federation_api.list_idps(hints=hints)
+ ref = [self.filter_params(x) for x in ref]
+ return IdentityProvider.wrap_collection(context, ref, hints=hints)
+
+ @controller.protected()
+ def get_identity_provider(self, context, idp_id):
+ ref = self.federation_api.get_idp(idp_id)
+ return IdentityProvider.wrap_member(context, ref)
+
+ @controller.protected()
+ def delete_identity_provider(self, context, idp_id):
+ self.federation_api.delete_idp(idp_id)
+
+ @controller.protected()
+ @validation.validated(schema.identity_provider_update, 'identity_provider')
+ def update_identity_provider(self, context, idp_id, identity_provider):
+ identity_provider = self._normalize_dict(identity_provider)
+ idp_ref = self.federation_api.update_idp(idp_id, identity_provider)
+ return IdentityProvider.wrap_member(context, idp_ref)
+
+
+@dependency.requires('federation_api')
+class FederationProtocol(_ControllerBase):
+ """A federation protocol representation.
+
+ See keystone.common.controller.V3Controller docstring for explanation
+ on _public_parameters class attributes.
+
+ """
+
+ collection_name = 'protocols'
+ member_name = 'protocol'
+
+ _public_parameters = frozenset(['id', 'mapping_id', 'links'])
+
+ @classmethod
+ def _add_self_referential_link(cls, context, ref):
+ """Add 'links' entry to the response dictionary.
+
+ Calls IdentityProvider.base_url() class method, as it constructs
+ proper URL along with the 'identity providers' part included.
+
+ :param ref: response dictionary
+
+ """
+ ref.setdefault('links', {})
+ base_path = ref['links'].get('identity_provider')
+ if base_path is None:
+ base_path = [IdentityProvider.base_url(context), ref['idp_id']]
+ base_path = '/'.join(base_path)
+ self_path = [base_path, 'protocols', ref['id']]
+ self_path = '/'.join(self_path)
+ ref['links']['self'] = self_path
+
+ @classmethod
+ def _add_related_links(cls, context, ref):
+ """Add new entries to the 'links' subdictionary in the response.
+
+ Adds 'identity_provider' key with URL pointing to related identity
+ provider as a value.
+
+ :param ref: response dictionary
+
+ """
+ ref.setdefault('links', {})
+ base_path = '/'.join([IdentityProvider.base_url(context),
+ ref['idp_id']])
+ ref['links']['identity_provider'] = base_path
+
+ @classmethod
+ def wrap_member(cls, context, ref):
+ cls._add_related_links(context, ref)
+ cls._add_self_referential_link(context, ref)
+ ref = cls.filter_params(ref)
+ return {cls.member_name: ref}
+
+ @controller.protected()
+ @validation.validated(schema.federation_protocol_schema, 'protocol')
+ def create_protocol(self, context, idp_id, protocol_id, protocol):
+ ref = self._normalize_dict(protocol)
+ ref = self.federation_api.create_protocol(idp_id, protocol_id, ref)
+ response = FederationProtocol.wrap_member(context, ref)
+ return wsgi.render_response(body=response, status=('201', 'Created'))
+
+ @controller.protected()
+ @validation.validated(schema.federation_protocol_schema, 'protocol')
+ def update_protocol(self, context, idp_id, protocol_id, protocol):
+ ref = self._normalize_dict(protocol)
+ ref = self.federation_api.update_protocol(idp_id, protocol_id,
+ protocol)
+ return FederationProtocol.wrap_member(context, ref)
+
+ @controller.protected()
+ def get_protocol(self, context, idp_id, protocol_id):
+ ref = self.federation_api.get_protocol(idp_id, protocol_id)
+ return FederationProtocol.wrap_member(context, ref)
+
+ @controller.protected()
+ def list_protocols(self, context, idp_id):
+ protocols_ref = self.federation_api.list_protocols(idp_id)
+ protocols = list(protocols_ref)
+ return FederationProtocol.wrap_collection(context, protocols)
+
+ @controller.protected()
+ def delete_protocol(self, context, idp_id, protocol_id):
+ self.federation_api.delete_protocol(idp_id, protocol_id)
+
+
+@dependency.requires('federation_api')
+class MappingController(_ControllerBase):
+ collection_name = 'mappings'
+ member_name = 'mapping'
+
+ @controller.protected()
+ def create_mapping(self, context, mapping_id, mapping):
+ ref = self._normalize_dict(mapping)
+ utils.validate_mapping_structure(ref)
+ mapping_ref = self.federation_api.create_mapping(mapping_id, ref)
+ response = MappingController.wrap_member(context, mapping_ref)
+ return wsgi.render_response(body=response, status=('201', 'Created'))
+
+ @controller.protected()
+ def list_mappings(self, context):
+ ref = self.federation_api.list_mappings()
+ return MappingController.wrap_collection(context, ref)
+
+ @controller.protected()
+ def get_mapping(self, context, mapping_id):
+ ref = self.federation_api.get_mapping(mapping_id)
+ return MappingController.wrap_member(context, ref)
+
+ @controller.protected()
+ def delete_mapping(self, context, mapping_id):
+ self.federation_api.delete_mapping(mapping_id)
+
+ @controller.protected()
+ def update_mapping(self, context, mapping_id, mapping):
+ mapping = self._normalize_dict(mapping)
+ utils.validate_mapping_structure(mapping)
+ mapping_ref = self.federation_api.update_mapping(mapping_id, mapping)
+ return MappingController.wrap_member(context, mapping_ref)
+
+
+@dependency.requires('federation_api')
+class Auth(auth_controllers.Auth):
+
+ def _get_sso_origin_host(self, context):
+ """Validate and return originating dashboard URL.
+
+ Make sure the parameter is specified in the request's URL as well its
+ value belongs to a list of trusted dashboards.
+
+ :param context: request's context
+ :raises keystone.exception.ValidationError: ``origin`` query parameter
+ was not specified. The URL is deemed invalid.
+ :raises keystone.exception.Unauthorized: URL specified in origin query
+ parameter does not exist in list of websso trusted dashboards.
+ :returns: URL with the originating dashboard
+
+ """
+ if 'origin' in context['query_string']:
+ origin = context['query_string']['origin']
+ host = urllib.parse.unquote_plus(origin)
+ else:
+ msg = _('Request must have an origin query parameter')
+ LOG.error(msg)
+ raise exception.ValidationError(msg)
+
+ # change trusted_dashboard hostnames to lowercase before comparison
+ trusted_dashboards = [k_utils.lower_case_hostname(trusted)
+ for trusted in CONF.federation.trusted_dashboard]
+
+ if host not in trusted_dashboards:
+ msg = _('%(host)s is not a trusted dashboard host')
+ msg = msg % {'host': host}
+ LOG.error(msg)
+ raise exception.Unauthorized(msg)
+
+ return host
+
+ def federated_authentication(self, context, idp_id, protocol_id):
+ """Authenticate from dedicated url endpoint.
+
+ Build HTTP request body for federated authentication and inject
+ it into the ``authenticate_for_token`` function.
+
+ """
+ auth = {
+ 'identity': {
+ 'methods': [protocol_id],
+ protocol_id: {
+ 'identity_provider': idp_id,
+ 'protocol': protocol_id
+ }
+ }
+ }
+
+ return self.authenticate_for_token(context, auth=auth)
+
+ def federated_sso_auth(self, context, protocol_id):
+ try:
+ remote_id_name = utils.get_remote_id_parameter(protocol_id)
+ remote_id = context['environment'][remote_id_name]
+ except KeyError:
+ msg = _('Missing entity ID from environment')
+ LOG.error(msg)
+ raise exception.Unauthorized(msg)
+
+ host = self._get_sso_origin_host(context)
+
+ ref = self.federation_api.get_idp_from_remote_id(remote_id)
+ # NOTE(stevemar): the returned object is a simple dict that
+ # contains the idp_id and remote_id.
+ identity_provider = ref['idp_id']
+ res = self.federated_authentication(context, identity_provider,
+ protocol_id)
+ token_id = res.headers['X-Subject-Token']
+ return self.render_html_response(host, token_id)
+
+ def federated_idp_specific_sso_auth(self, context, idp_id, protocol_id):
+ host = self._get_sso_origin_host(context)
+
+ # NOTE(lbragstad): We validate that the Identity Provider actually
+ # exists in the Mapped authentication plugin.
+ res = self.federated_authentication(context, idp_id, protocol_id)
+ token_id = res.headers['X-Subject-Token']
+ return self.render_html_response(host, token_id)
+
+ def render_html_response(self, host, token_id):
+ """Forms an HTML Form from a template with autosubmit."""
+ headers = [('Content-Type', 'text/html')]
+
+ with open(CONF.federation.sso_callback_template) as template:
+ src = string.Template(template.read())
+
+ subs = {'host': host, 'token': token_id}
+ body = src.substitute(subs)
+ return webob.Response(body=body, status='200',
+ headerlist=headers)
+
+ def _create_base_saml_assertion(self, context, auth):
+ issuer = CONF.saml.idp_entity_id
+ sp_id = auth['scope']['service_provider']['id']
+ service_provider = self.federation_api.get_sp(sp_id)
+ utils.assert_enabled_service_provider_object(service_provider)
+ sp_url = service_provider['sp_url']
+
+ token_id = auth['identity']['token']['id']
+ token_data = self.token_provider_api.validate_token(token_id)
+ token_ref = token_model.KeystoneToken(token_id, token_data)
+
+ if not token_ref.project_scoped:
+ action = _('Use a project scoped token when attempting to create '
+ 'a SAML assertion')
+ raise exception.ForbiddenAction(action=action)
+
+ subject = token_ref.user_name
+ roles = token_ref.role_names
+ project = token_ref.project_name
+ # NOTE(rodrigods): the domain name is necessary in order to distinguish
+ # between projects and users with the same name in different domains.
+ project_domain_name = token_ref.project_domain_name
+ subject_domain_name = token_ref.user_domain_name
+
+ generator = keystone_idp.SAMLGenerator()
+ response = generator.samlize_token(
+ issuer, sp_url, subject, subject_domain_name,
+ roles, project, project_domain_name)
+ return (response, service_provider)
+
+ def _build_response_headers(self, service_provider):
+ return [('Content-Type', 'text/xml'),
+ ('X-sp-url', six.binary_type(service_provider['sp_url'])),
+ ('X-auth-url', six.binary_type(service_provider['auth_url']))]
+
+ @validation.validated(schema.saml_create, 'auth')
+ def create_saml_assertion(self, context, auth):
+ """Exchange a scoped token for a SAML assertion.
+
+ :param auth: Dictionary that contains a token and service provider ID
+ :returns: SAML Assertion based on properties from the token
+ """
+ t = self._create_base_saml_assertion(context, auth)
+ (response, service_provider) = t
+
+ headers = self._build_response_headers(service_provider)
+ return wsgi.render_response(body=response.to_string(),
+ status=('200', 'OK'),
+ headers=headers)
+
+ @validation.validated(schema.saml_create, 'auth')
+ def create_ecp_assertion(self, context, auth):
+ """Exchange a scoped token for an ECP assertion.
+
+ :param auth: Dictionary that contains a token and service provider ID
+ :returns: ECP Assertion based on properties from the token
+ """
+ t = self._create_base_saml_assertion(context, auth)
+ (saml_assertion, service_provider) = t
+ relay_state_prefix = service_provider['relay_state_prefix']
+
+ generator = keystone_idp.ECPGenerator()
+ ecp_assertion = generator.generate_ecp(saml_assertion,
+ relay_state_prefix)
+
+ headers = self._build_response_headers(service_provider)
+ return wsgi.render_response(body=ecp_assertion.to_string(),
+ status=('200', 'OK'),
+ headers=headers)
+
+
+@dependency.requires('assignment_api', 'resource_api')
+class DomainV3(controller.V3Controller):
+ collection_name = 'domains'
+ member_name = 'domain'
+
+ def __init__(self):
+ super(DomainV3, self).__init__()
+ self.get_member_from_driver = self.resource_api.get_domain
+
+ @controller.protected()
+ def list_domains_for_groups(self, context):
+ """List all domains available to an authenticated user's groups.
+
+ :param context: request context
+ :returns: list of accessible domains
+
+ """
+ auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV]
+ domains = self.assignment_api.list_domains_for_groups(
+ auth_context['group_ids'])
+ return DomainV3.wrap_collection(context, domains)
+
+
+@dependency.requires('assignment_api', 'resource_api')
+class ProjectAssignmentV3(controller.V3Controller):
+ collection_name = 'projects'
+ member_name = 'project'
+
+ def __init__(self):
+ super(ProjectAssignmentV3, self).__init__()
+ self.get_member_from_driver = self.resource_api.get_project
+
+ @controller.protected()
+ def list_projects_for_groups(self, context):
+ """List all projects available to an authenticated user's groups.
+
+ :param context: request context
+ :returns: list of accessible projects
+
+ """
+ auth_context = context['environment'][authorization.AUTH_CONTEXT_ENV]
+ projects = self.assignment_api.list_projects_for_groups(
+ auth_context['group_ids'])
+ return ProjectAssignmentV3.wrap_collection(context, projects)
+
+
+@dependency.requires('federation_api')
+class ServiceProvider(_ControllerBase):
+ """Service Provider representation."""
+
+ collection_name = 'service_providers'
+ member_name = 'service_provider'
+
+ _public_parameters = frozenset(['auth_url', 'id', 'enabled', 'description',
+ 'links', 'relay_state_prefix', 'sp_url'])
+
+ @controller.protected()
+ @validation.validated(schema.service_provider_create, 'service_provider')
+ def create_service_provider(self, context, sp_id, service_provider):
+ service_provider = self._normalize_dict(service_provider)
+ service_provider.setdefault('enabled', False)
+ service_provider.setdefault('relay_state_prefix',
+ CONF.saml.relay_state_prefix)
+ sp_ref = self.federation_api.create_sp(sp_id, service_provider)
+ response = ServiceProvider.wrap_member(context, sp_ref)
+ return wsgi.render_response(body=response, status=('201', 'Created'))
+
+ @controller.filterprotected('id', 'enabled')
+ def list_service_providers(self, context, filters):
+ hints = self.build_driver_hints(context, filters)
+ ref = self.federation_api.list_sps(hints=hints)
+ ref = [self.filter_params(x) for x in ref]
+ return ServiceProvider.wrap_collection(context, ref, hints=hints)
+
+ @controller.protected()
+ def get_service_provider(self, context, sp_id):
+ ref = self.federation_api.get_sp(sp_id)
+ return ServiceProvider.wrap_member(context, ref)
+
+ @controller.protected()
+ def delete_service_provider(self, context, sp_id):
+ self.federation_api.delete_sp(sp_id)
+
+ @controller.protected()
+ @validation.validated(schema.service_provider_update, 'service_provider')
+ def update_service_provider(self, context, sp_id, service_provider):
+ service_provider = self._normalize_dict(service_provider)
+ sp_ref = self.federation_api.update_sp(sp_id, service_provider)
+ return ServiceProvider.wrap_member(context, sp_ref)
+
+
+class SAMLMetadataV3(_ControllerBase):
+ member_name = 'metadata'
+
+ def get_metadata(self, context):
+ metadata_path = CONF.saml.idp_metadata_path
+ try:
+ with open(metadata_path, 'r') as metadata_handler:
+ metadata = metadata_handler.read()
+ except IOError as e:
+ # Raise HTTP 500 in case Metadata file cannot be read.
+ raise exception.MetadataFileError(reason=e)
+ return wsgi.render_response(body=metadata, status=('200', 'OK'),
+ headers=[('Content-Type', 'text/xml')])
diff --git a/keystone-moon/keystone/federation/core.py b/keystone-moon/keystone/federation/core.py
new file mode 100644
index 00000000..23028dfd
--- /dev/null
+++ b/keystone-moon/keystone/federation/core.py
@@ -0,0 +1,611 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Federation service."""
+
+import abc
+
+from oslo_config import cfg
+from oslo_log import versionutils
+import six
+
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import manager
+from keystone import exception
+from keystone.federation import utils
+
+
+CONF = cfg.CONF
+EXTENSION_DATA = {
+ 'name': 'OpenStack Federation APIs',
+ 'namespace': 'http://docs.openstack.org/identity/api/ext/'
+ 'OS-FEDERATION/v1.0',
+ 'alias': 'OS-FEDERATION',
+ 'updated': '2013-12-17T12:00:0-00:00',
+ 'description': 'OpenStack Identity Providers Mechanism.',
+ 'links': [{
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'http://specs.openstack.org/openstack/keystone-specs/api/v3/'
+ 'identity-api-v3-os-federation-ext.html',
+ }]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+
+@dependency.provider('federation_api')
+class Manager(manager.Manager):
+ """Default pivot point for the Federation backend.
+
+ See :mod:`keystone.common.manager.Manager` for more details on how this
+ dynamically calls the backend.
+
+ """
+
+ driver_namespace = 'keystone.federation'
+
+ def __init__(self):
+ super(Manager, self).__init__(CONF.federation.driver)
+
+ # Make sure it is a driver version we support, and if it is a legacy
+ # driver, then wrap it.
+ if isinstance(self.driver, FederationDriverV8):
+ self.driver = V9FederationWrapperForV8Driver(self.driver)
+ elif not isinstance(self.driver, FederationDriverV9):
+ raise exception.UnsupportedDriverVersion(
+ driver=CONF.federation.driver)
+
+ def get_enabled_service_providers(self):
+ """List enabled service providers for Service Catalog
+
+ Service Provider in a catalog contains three attributes: ``id``,
+ ``auth_url``, ``sp_url``, where:
+
+ - id is a unique, user defined identifier for service provider object
+ - auth_url is an authentication URL of remote Keystone
+ - sp_url a URL accessible at the remote service provider where SAML
+ assertion is transmitted.
+
+ :returns: list of dictionaries with enabled service providers
+ :rtype: list of dicts
+
+ """
+ def normalize(sp):
+ ref = {
+ 'auth_url': sp.auth_url,
+ 'id': sp.id,
+ 'sp_url': sp.sp_url
+ }
+ return ref
+
+ service_providers = self.driver.get_enabled_service_providers()
+ return [normalize(sp) for sp in service_providers]
+
+ def evaluate(self, idp_id, protocol_id, assertion_data):
+ mapping = self.get_mapping_from_idp_and_protocol(idp_id, protocol_id)
+ rules = mapping['rules']
+ rule_processor = utils.RuleProcessor(mapping['id'], rules)
+ mapped_properties = rule_processor.process(assertion_data)
+ return mapped_properties, mapping['id']
+
+
+# The FederationDriverBase class is the set of driver methods from earlier
+# drivers that we still support, that have not been removed or modified. This
+# class is then used to created the augmented V8 and V9 version abstract driver
+# classes, without having to duplicate a lot of abstract method signatures.
+# If you remove a method from V9, then move the abstract methods from this Base
+# class to the V8 class. Do not modify any of the method signatures in the Base
+# class - changes should only be made in the V8 and subsequent classes.
+
+@six.add_metaclass(abc.ABCMeta)
+class FederationDriverBase(object):
+
+ @abc.abstractmethod
+ def create_idp(self, idp_id, idp):
+ """Create an identity provider.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :param idp: idp object
+ :type idp: dict
+ :returns: idp ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_idp(self, idp_id):
+ """Delete an identity provider.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_idp(self, idp_id):
+ """Get an identity provider by ID.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :returns: idp ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_idp_from_remote_id(self, remote_id):
+ """Get an identity provider by remote ID.
+
+ :param remote_id: ID of remote IdP
+ :type idp_id: string
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :returns: idp ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def update_idp(self, idp_id, idp):
+ """Update an identity provider by ID.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :param idp: idp object
+ :type idp: dict
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :returns: idp ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_protocol(self, idp_id, protocol_id, protocol):
+ """Add an IdP-Protocol configuration.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :param protocol_id: ID of protocol object
+ :type protocol_id: string
+ :param protocol: protocol object
+ :type protocol: dict
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :returns: protocol ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def update_protocol(self, idp_id, protocol_id, protocol):
+ """Change an IdP-Protocol configuration.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :param protocol_id: ID of protocol object
+ :type protocol_id: string
+ :param protocol: protocol object
+ :type protocol: dict
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :raises keystone.exception.FederatedProtocolNotFound: If the federated
+ protocol cannot be found.
+ :returns: protocol ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_protocol(self, idp_id, protocol_id):
+ """Get an IdP-Protocol configuration.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :param protocol_id: ID of protocol object
+ :type protocol_id: string
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :raises keystone.exception.FederatedProtocolNotFound: If the federated
+ protocol cannot be found.
+ :returns: protocol ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_protocols(self, idp_id):
+ """List an IdP's supported protocols.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :returns: list of protocol ref
+ :rtype: list of dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_protocol(self, idp_id, protocol_id):
+ """Delete an IdP-Protocol configuration.
+
+ :param idp_id: ID of IdP object
+ :type idp_id: string
+ :param protocol_id: ID of protocol object
+ :type protocol_id: string
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :raises keystone.exception.FederatedProtocolNotFound: If the federated
+ protocol cannot be found.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_mapping(self, mapping_id, mapping):
+ """Create a mapping.
+
+ :param mapping_id: ID of mapping object
+ :type mapping_id: string
+ :param mapping: mapping ref with mapping name
+ :type mapping: dict
+ :returns: mapping ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_mapping(self, mapping_id):
+ """Delete a mapping.
+
+ :param mapping_id: id of mapping to delete
+ :type mapping_ref: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def update_mapping(self, mapping_id, mapping_ref):
+ """Update a mapping.
+
+ :param mapping_id: id of mapping to update
+ :type mapping_id: string
+ :param mapping_ref: new mapping ref
+ :type mapping_ref: dict
+ :returns: mapping ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_mappings(self):
+ """List all mappings.
+
+ :returns: list of mapping refs
+ :rtype: list of dicts
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_mapping(self, mapping_id):
+ """Get a mapping, returns the mapping based on mapping_id.
+
+ :param mapping_id: id of mapping to get
+ :type mapping_ref: string
+ :raises keystone.exception.MappingNotFound: If the mapping cannot
+ be found.
+ :returns: mapping ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
+ """Get mapping based on idp_id and protocol_id.
+
+ :param idp_id: id of the identity provider
+ :type idp_id: string
+ :param protocol_id: id of the protocol
+ :type protocol_id: string
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+ :raises keystone.exception.FederatedProtocolNotFound: If the federated
+ protocol cannot be found.
+ :returns: mapping ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_sp(self, sp_id, sp):
+ """Create a service provider.
+
+ :param sp_id: id of the service provider
+ :type sp_id: string
+ :param sp: service prvider object
+ :type sp: dict
+
+ :returns: service provider ref
+ :rtype: dict
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_sp(self, sp_id):
+ """Delete a service provider.
+
+ :param sp_id: id of the service provider
+ :type sp_id: string
+
+ :raises keystone.exception.ServiceProviderNotFound: If the service
+ provider doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_sp(self, sp_id):
+ """Get a service provider.
+
+ :param sp_id: id of the service provider
+ :type sp_id: string
+ :returns: service provider ref
+ :rtype: dict
+
+ :raises keystone.exception.ServiceProviderNotFound: If the service
+ provider doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def update_sp(self, sp_id, sp):
+ """Update a service provider.
+
+ :param sp_id: id of the service provider
+ :type sp_id: string
+ :param sp: service prvider object
+ :type sp: dict
+
+ :returns: service provider ref
+ :rtype: dict
+
+ :raises keystone.exception.ServiceProviderNotFound: If the service
+ provider doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ def get_enabled_service_providers(self):
+ """List enabled service providers for Service Catalog
+
+ Service Provider in a catalog contains three attributes: ``id``,
+ ``auth_url``, ``sp_url``, where:
+
+ - id is a unique, user defined identifier for service provider object
+ - auth_url is an authentication URL of remote Keystone
+ - sp_url a URL accessible at the remote service provider where SAML
+ assertion is transmitted.
+
+ :returns: list of dictionaries with enabled service providers
+ :rtype: list of dicts
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+
+class FederationDriverV8(FederationDriverBase):
+ """Removed or redefined methods from V8.
+
+ Move the abstract methods of any methods removed or modified in later
+ versions of the driver from FederationDriverBase to here. We maintain this
+ so that legacy drivers, which will be a subclass of FederationDriverV8, can
+ still reference them.
+
+ """
+
+ @abc.abstractmethod
+ def list_idps(self):
+ """List all identity providers.
+
+ :returns: list of idp refs
+ :rtype: list of dicts
+
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_sps(self):
+ """List all service providers.
+
+ :returns: List of service provider ref objects
+ :rtype: list of dicts
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+
+class FederationDriverV9(FederationDriverBase):
+ """New or redefined methods from V8.
+
+ Add any new V9 abstract methods (or those with modified signatures) to
+ this class.
+
+ """
+
+ @abc.abstractmethod
+ def list_idps(self, hints):
+ """List all identity providers.
+
+ :param hints: filter hints which the driver should
+ implement if at all possible.
+ :returns: list of idp refs
+ :rtype: list of dicts
+
+ :raises keystone.exception.IdentityProviderNotFound: If the IdP
+ doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_sps(self, hints):
+ """List all service providers.
+
+ :param hints: filter hints which the driver should
+ implement if at all possible.
+ :returns: List of service provider ref objects
+ :rtype: list of dicts
+
+ :raises keystone.exception.ServiceProviderNotFound: If the SP
+ doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+
+class V9FederationWrapperForV8Driver(FederationDriverV9):
+ """Wrapper class to supported a V8 legacy driver.
+
+ In order to support legacy drivers without having to make the manager code
+ driver-version aware, we wrap legacy drivers so that they look like the
+ latest version. For the various changes made in a new driver, here are the
+ actions needed in this wrapper:
+
+ Method removed from new driver - remove the call-through method from this
+ class, since the manager will no longer be
+ calling it.
+ Method signature (or meaning) changed - wrap the old method in a new
+ signature here, and munge the input
+ and output parameters accordingly.
+ New method added to new driver - add a method to implement the new
+ functionality here if possible. If that is
+ not possible, then return NotImplemented,
+ since we do not guarantee to support new
+ functionality with legacy drivers.
+
+ """
+
+ @versionutils.deprecated(
+ as_of=versionutils.deprecated.MITAKA,
+ what='keystone.federation.FederationDriverV8',
+ in_favor_of='keystone.federation.FederationDriverV9',
+ remove_in=+2)
+ def __init__(self, wrapped_driver):
+ self.driver = wrapped_driver
+
+ def create_idp(self, idp_id, idp):
+ return self.driver.create_idp(idp_id, idp)
+
+ def delete_idp(self, idp_id):
+ self.driver.delete_idp(idp_id)
+
+ # NOTE(davechen): The hints is ignored here to support legacy drivers,
+ # but the filters in hints will be remain unsatisfied and V3Controller
+ # wrapper will apply these filters at the end. So that the result get
+ # returned for list IdP will still be filtered with the legacy drivers.
+ def list_idps(self, hints):
+ return self.driver.list_idps()
+
+ def get_idp(self, idp_id):
+ return self.driver.get_idp(idp_id)
+
+ def get_idp_from_remote_id(self, remote_id):
+ return self.driver.get_idp_from_remote_id(remote_id)
+
+ def update_idp(self, idp_id, idp):
+ return self.driver.update_idp(idp_id, idp)
+
+ def create_protocol(self, idp_id, protocol_id, protocol):
+ return self.driver.create_protocol(idp_id, protocol_id, protocol)
+
+ def update_protocol(self, idp_id, protocol_id, protocol):
+ return self.driver.update_protocol(idp_id, protocol_id, protocol)
+
+ def get_protocol(self, idp_id, protocol_id):
+ return self.driver.get_protocol(idp_id, protocol_id)
+
+ def list_protocols(self, idp_id):
+ return self.driver.list_protocols(idp_id)
+
+ def delete_protocol(self, idp_id, protocol_id):
+ self.driver.delete_protocol(idp_id, protocol_id)
+
+ def create_mapping(self, mapping_id, mapping):
+ return self.driver.create_mapping(mapping_id, mapping)
+
+ def delete_mapping(self, mapping_id):
+ self.driver.delete_mapping(mapping_id)
+
+ def update_mapping(self, mapping_id, mapping_ref):
+ return self.driver.update_mapping(mapping_id, mapping_ref)
+
+ def list_mappings(self):
+ return self.driver.list_mappings()
+
+ def get_mapping(self, mapping_id):
+ return self.driver.get_mapping(mapping_id)
+
+ def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id):
+ return self.driver.get_mapping_from_idp_and_protocol(
+ idp_id, protocol_id)
+
+ def create_sp(self, sp_id, sp):
+ return self.driver.create_sp(sp_id, sp)
+
+ def delete_sp(self, sp_id):
+ self.driver.delete_sp(sp_id)
+
+ # NOTE(davechen): The hints is ignored here to support legacy drivers,
+ # but the filters in hints will be remain unsatisfied and V3Controller
+ # wrapper will apply these filters at the end. So that the result get
+ # returned for list SPs will still be filtered with the legacy drivers.
+ def list_sps(self, hints):
+ return self.driver.list_sps()
+
+ def get_sp(self, sp_id):
+ return self.driver.get_sp(sp_id)
+
+ def update_sp(self, sp_id, sp):
+ return self.driver.update_sp(sp_id, sp)
+
+ def get_enabled_service_providers(self):
+ return self.driver.get_enabled_service_providers()
+
+
+Driver = manager.create_legacy_driver(FederationDriverV8)
diff --git a/keystone-moon/keystone/federation/idp.py b/keystone-moon/keystone/federation/idp.py
new file mode 100644
index 00000000..494d58b9
--- /dev/null
+++ b/keystone-moon/keystone/federation/idp.py
@@ -0,0 +1,615 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import os
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import fileutils
+from oslo_utils import importutils
+from oslo_utils import timeutils
+import saml2
+from saml2 import client_base
+from saml2 import md
+from saml2.profile import ecp
+from saml2 import saml
+from saml2 import samlp
+from saml2.schema import soapenv
+from saml2 import sigver
+xmldsig = importutils.try_import("saml2.xmldsig")
+if not xmldsig:
+ xmldsig = importutils.try_import("xmldsig")
+
+from keystone.common import environment
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _, _LE
+
+
+LOG = log.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class SAMLGenerator(object):
+ """A class to generate SAML assertions."""
+
+ def __init__(self):
+ self.assertion_id = uuid.uuid4().hex
+
+ def samlize_token(self, issuer, recipient, user, user_domain_name, roles,
+ project, project_domain_name, expires_in=None):
+ """Convert Keystone attributes to a SAML assertion.
+
+ :param issuer: URL of the issuing party
+ :type issuer: string
+ :param recipient: URL of the recipient
+ :type recipient: string
+ :param user: User name
+ :type user: string
+ :param user_domain_name: User Domain name
+ :type user_domain_name: string
+ :param roles: List of role names
+ :type roles: list
+ :param project: Project name
+ :type project: string
+ :param project_domain_name: Project Domain name
+ :type project_domain_name: string
+ :param expires_in: Sets how long the assertion is valid for, in seconds
+ :type expires_in: int
+
+ :returns: XML <Response> object
+
+ """
+ expiration_time = self._determine_expiration_time(expires_in)
+ status = self._create_status()
+ saml_issuer = self._create_issuer(issuer)
+ subject = self._create_subject(user, expiration_time, recipient)
+ attribute_statement = self._create_attribute_statement(
+ user, user_domain_name, roles, project, project_domain_name)
+ authn_statement = self._create_authn_statement(issuer, expiration_time)
+ signature = self._create_signature()
+
+ assertion = self._create_assertion(saml_issuer, signature,
+ subject, authn_statement,
+ attribute_statement)
+
+ assertion = _sign_assertion(assertion)
+
+ response = self._create_response(saml_issuer, status, assertion,
+ recipient)
+ return response
+
+ def _determine_expiration_time(self, expires_in):
+ if expires_in is None:
+ expires_in = CONF.saml.assertion_expiration_time
+ now = timeutils.utcnow()
+ future = now + datetime.timedelta(seconds=expires_in)
+ return utils.isotime(future, subsecond=True)
+
+ def _create_status(self):
+ """Create an object that represents a SAML Status.
+
+ <ns0:Status xmlns:ns0="urn:oasis:names:tc:SAML:2.0:protocol">
+ <ns0:StatusCode
+ Value="urn:oasis:names:tc:SAML:2.0:status:Success" />
+ </ns0:Status>
+
+ :returns: XML <Status> object
+
+ """
+ status = samlp.Status()
+ status_code = samlp.StatusCode()
+ status_code.value = samlp.STATUS_SUCCESS
+ status_code.set_text('')
+ status.status_code = status_code
+ return status
+
+ def _create_issuer(self, issuer_url):
+ """Create an object that represents a SAML Issuer.
+
+ <ns0:Issuer
+ xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
+ Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">
+ https://acme.com/FIM/sps/openstack/saml20</ns0:Issuer>
+
+ :returns: XML <Issuer> object
+
+ """
+ issuer = saml.Issuer()
+ issuer.format = saml.NAMEID_FORMAT_ENTITY
+ issuer.set_text(issuer_url)
+ return issuer
+
+ def _create_subject(self, user, expiration_time, recipient):
+ """Create an object that represents a SAML Subject.
+
+ <ns0:Subject>
+ <ns0:NameID>
+ john@smith.com</ns0:NameID>
+ <ns0:SubjectConfirmation
+ Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
+ <ns0:SubjectConfirmationData
+ NotOnOrAfter="2014-08-19T11:53:57.243106Z"
+ Recipient="http://beta.com/Shibboleth.sso/SAML2/POST" />
+ </ns0:SubjectConfirmation>
+ </ns0:Subject>
+
+ :returns: XML <Subject> object
+
+ """
+ name_id = saml.NameID()
+ name_id.set_text(user)
+ subject_conf_data = saml.SubjectConfirmationData()
+ subject_conf_data.recipient = recipient
+ subject_conf_data.not_on_or_after = expiration_time
+ subject_conf = saml.SubjectConfirmation()
+ subject_conf.method = saml.SCM_BEARER
+ subject_conf.subject_confirmation_data = subject_conf_data
+ subject = saml.Subject()
+ subject.subject_confirmation = subject_conf
+ subject.name_id = name_id
+ return subject
+
+ def _create_attribute_statement(self, user, user_domain_name, roles,
+ project, project_domain_name):
+ """Create an object that represents a SAML AttributeStatement.
+
+ <ns0:AttributeStatement>
+ <ns0:Attribute Name="openstack_user">
+ <ns0:AttributeValue
+ xsi:type="xs:string">test_user</ns0:AttributeValue>
+ </ns0:Attribute>
+ <ns0:Attribute Name="openstack_user_domain">
+ <ns0:AttributeValue
+ xsi:type="xs:string">Default</ns0:AttributeValue>
+ </ns0:Attribute>
+ <ns0:Attribute Name="openstack_roles">
+ <ns0:AttributeValue
+ xsi:type="xs:string">admin</ns0:AttributeValue>
+ <ns0:AttributeValue
+ xsi:type="xs:string">member</ns0:AttributeValue>
+ </ns0:Attribute>
+ <ns0:Attribute Name="openstack_project">
+ <ns0:AttributeValue
+ xsi:type="xs:string">development</ns0:AttributeValue>
+ </ns0:Attribute>
+ <ns0:Attribute Name="openstack_project_domain">
+ <ns0:AttributeValue
+ xsi:type="xs:string">Default</ns0:AttributeValue>
+ </ns0:Attribute>
+ </ns0:AttributeStatement>
+
+ :returns: XML <AttributeStatement> object
+
+ """
+ def _build_attribute(attribute_name, attribute_values):
+ attribute = saml.Attribute()
+ attribute.name = attribute_name
+
+ for value in attribute_values:
+ attribute_value = saml.AttributeValue()
+ attribute_value.set_text(value)
+ attribute.attribute_value.append(attribute_value)
+
+ return attribute
+
+ user_attribute = _build_attribute('openstack_user', [user])
+ roles_attribute = _build_attribute('openstack_roles', roles)
+ project_attribute = _build_attribute('openstack_project', [project])
+ project_domain_attribute = _build_attribute(
+ 'openstack_project_domain', [project_domain_name])
+ user_domain_attribute = _build_attribute(
+ 'openstack_user_domain', [user_domain_name])
+
+ attribute_statement = saml.AttributeStatement()
+ attribute_statement.attribute.append(user_attribute)
+ attribute_statement.attribute.append(roles_attribute)
+ attribute_statement.attribute.append(project_attribute)
+ attribute_statement.attribute.append(project_domain_attribute)
+ attribute_statement.attribute.append(user_domain_attribute)
+ return attribute_statement
+
+ def _create_authn_statement(self, issuer, expiration_time):
+ """Create an object that represents a SAML AuthnStatement.
+
+ <ns0:AuthnStatement xmlns:ns0="urn:oasis:names:tc:SAML:2.0:assertion"
+ AuthnInstant="2014-07-30T03:04:25Z" SessionIndex="47335964efb"
+ SessionNotOnOrAfter="2014-07-30T03:04:26Z">
+ <ns0:AuthnContext>
+ <ns0:AuthnContextClassRef>
+ urn:oasis:names:tc:SAML:2.0:ac:classes:Password
+ </ns0:AuthnContextClassRef>
+ <ns0:AuthenticatingAuthority>
+ https://acme.com/FIM/sps/openstack/saml20
+ </ns0:AuthenticatingAuthority>
+ </ns0:AuthnContext>
+ </ns0:AuthnStatement>
+
+ :returns: XML <AuthnStatement> object
+
+ """
+ authn_statement = saml.AuthnStatement()
+ authn_statement.authn_instant = utils.isotime()
+ authn_statement.session_index = uuid.uuid4().hex
+ authn_statement.session_not_on_or_after = expiration_time
+
+ authn_context = saml.AuthnContext()
+ authn_context_class = saml.AuthnContextClassRef()
+ authn_context_class.set_text(saml.AUTHN_PASSWORD)
+
+ authn_authority = saml.AuthenticatingAuthority()
+ authn_authority.set_text(issuer)
+ authn_context.authn_context_class_ref = authn_context_class
+ authn_context.authenticating_authority = authn_authority
+
+ authn_statement.authn_context = authn_context
+
+ return authn_statement
+
+ def _create_assertion(self, issuer, signature, subject, authn_statement,
+ attribute_statement):
+ """Create an object that represents a SAML Assertion.
+
+ <ns0:Assertion
+ ID="35daed258ba647ba8962e9baff4d6a46"
+ IssueInstant="2014-06-11T15:45:58Z"
+ Version="2.0">
+ <ns0:Issuer> ... </ns0:Issuer>
+ <ns1:Signature> ... </ns1:Signature>
+ <ns0:Subject> ... </ns0:Subject>
+ <ns0:AuthnStatement> ... </ns0:AuthnStatement>
+ <ns0:AttributeStatement> ... </ns0:AttributeStatement>
+ </ns0:Assertion>
+
+ :returns: XML <Assertion> object
+
+ """
+ assertion = saml.Assertion()
+ assertion.id = self.assertion_id
+ assertion.issue_instant = utils.isotime()
+ assertion.version = '2.0'
+ assertion.issuer = issuer
+ assertion.signature = signature
+ assertion.subject = subject
+ assertion.authn_statement = authn_statement
+ assertion.attribute_statement = attribute_statement
+ return assertion
+
+ def _create_response(self, issuer, status, assertion, recipient):
+ """Create an object that represents a SAML Response.
+
+ <ns0:Response
+ Destination="http://beta.com/Shibboleth.sso/SAML2/POST"
+ ID="c5954543230e4e778bc5b92923a0512d"
+ IssueInstant="2014-07-30T03:19:45Z"
+ Version="2.0" />
+ <ns0:Issuer> ... </ns0:Issuer>
+ <ns0:Assertion> ... </ns0:Assertion>
+ <ns0:Status> ... </ns0:Status>
+ </ns0:Response>
+
+ :returns: XML <Response> object
+
+ """
+ response = samlp.Response()
+ response.id = uuid.uuid4().hex
+ response.destination = recipient
+ response.issue_instant = utils.isotime()
+ response.version = '2.0'
+ response.issuer = issuer
+ response.status = status
+ response.assertion = assertion
+ return response
+
+ def _create_signature(self):
+ """Create an object that represents a SAML <Signature>.
+
+ This must be filled with algorithms that the signing binary will apply
+ in order to sign the whole message.
+ Currently we enforce X509 signing.
+ Example of the template::
+
+ <Signature xmlns="http://www.w3.org/2000/09/xmldsig#">
+ <SignedInfo>
+ <CanonicalizationMethod
+ Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
+ <SignatureMethod
+ Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
+ <Reference URI="#<Assertion ID>">
+ <Transforms>
+ <Transform
+ Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>
+ <Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
+ </Transforms>
+ <DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
+ <DigestValue />
+ </Reference>
+ </SignedInfo>
+ <SignatureValue />
+ <KeyInfo>
+ <X509Data />
+ </KeyInfo>
+ </Signature>
+
+ :returns: XML <Signature> object
+
+ """
+ canonicalization_method = xmldsig.CanonicalizationMethod()
+ canonicalization_method.algorithm = xmldsig.ALG_EXC_C14N
+ signature_method = xmldsig.SignatureMethod(
+ algorithm=xmldsig.SIG_RSA_SHA1)
+
+ transforms = xmldsig.Transforms()
+ envelope_transform = xmldsig.Transform(
+ algorithm=xmldsig.TRANSFORM_ENVELOPED)
+
+ c14_transform = xmldsig.Transform(algorithm=xmldsig.ALG_EXC_C14N)
+ transforms.transform = [envelope_transform, c14_transform]
+
+ digest_method = xmldsig.DigestMethod(algorithm=xmldsig.DIGEST_SHA1)
+ digest_value = xmldsig.DigestValue()
+
+ reference = xmldsig.Reference()
+ reference.uri = '#' + self.assertion_id
+ reference.digest_method = digest_method
+ reference.digest_value = digest_value
+ reference.transforms = transforms
+
+ signed_info = xmldsig.SignedInfo()
+ signed_info.canonicalization_method = canonicalization_method
+ signed_info.signature_method = signature_method
+ signed_info.reference = reference
+
+ key_info = xmldsig.KeyInfo()
+ key_info.x509_data = xmldsig.X509Data()
+
+ signature = xmldsig.Signature()
+ signature.signed_info = signed_info
+ signature.signature_value = xmldsig.SignatureValue()
+ signature.key_info = key_info
+
+ return signature
+
+
+def _sign_assertion(assertion):
+ """Sign a SAML assertion.
+
+ This method utilizes ``xmlsec1`` binary and signs SAML assertions in a
+ separate process. ``xmlsec1`` cannot read input data from stdin so the
+ prepared assertion needs to be serialized and stored in a temporary
+ file. This file will be deleted immediately after ``xmlsec1`` returns.
+ The signed assertion is redirected to a standard output and read using
+ subprocess.PIPE redirection. A ``saml.Assertion`` class is created
+ from the signed string again and returned.
+
+ Parameters that are required in the CONF::
+ * xmlsec_binary
+ * private key file path
+ * public key file path
+ :returns: XML <Assertion> object
+
+ """
+ xmlsec_binary = CONF.saml.xmlsec1_binary
+ idp_private_key = CONF.saml.keyfile
+ idp_public_key = CONF.saml.certfile
+
+ # xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID <tag> <file>
+ certificates = '%(idp_private_key)s,%(idp_public_key)s' % {
+ 'idp_public_key': idp_public_key,
+ 'idp_private_key': idp_private_key
+ }
+
+ command_list = [xmlsec_binary, '--sign', '--privkey-pem', certificates,
+ '--id-attr:ID', 'Assertion']
+
+ file_path = None
+ try:
+ # NOTE(gyee): need to make the namespace prefixes explicit so
+ # they won't get reassigned when we wrap the assertion into
+ # SAML2 response
+ file_path = fileutils.write_to_tempfile(assertion.to_string(
+ nspair={'saml': saml2.NAMESPACE,
+ 'xmldsig': xmldsig.NAMESPACE}))
+ command_list.append(file_path)
+ subprocess = environment.subprocess
+ stdout = subprocess.check_output(command_list, # nosec : The contents
+ # of the command list are coming from
+ # a trusted source because the
+ # executable and arguments all either
+ # come from the config file or are
+ # hardcoded. The command list is
+ # initialized earlier in this function
+ # to a list and it's still a list at
+ # this point in the function. There is
+ # no opportunity for an attacker to
+ # attempt command injection via string
+ # parsing.
+ stderr=subprocess.STDOUT)
+ except Exception as e:
+ msg = _LE('Error when signing assertion, reason: %(reason)s%(output)s')
+ LOG.error(msg,
+ {'reason': e,
+ 'output': ' ' + e.output if hasattr(e, 'output') else ''})
+ raise exception.SAMLSigningError(reason=e)
+ finally:
+ try:
+ if file_path:
+ os.remove(file_path)
+ except OSError: # nosec
+ # The file is already gone, good.
+ pass
+
+ return saml2.create_class_from_xml_string(saml.Assertion, stdout)
+
+
+class MetadataGenerator(object):
+ """A class for generating SAML IdP Metadata."""
+
+ def generate_metadata(self):
+ """Generate Identity Provider Metadata.
+
+ Generate and format metadata into XML that can be exposed and
+ consumed by a federated Service Provider.
+
+ :returns: XML <EntityDescriptor> object.
+ :raises keystone.exception.ValidationError: If the required
+ config options aren't set.
+ """
+ self._ensure_required_values_present()
+ entity_descriptor = self._create_entity_descriptor()
+ entity_descriptor.idpsso_descriptor = (
+ self._create_idp_sso_descriptor())
+ return entity_descriptor
+
+ def _create_entity_descriptor(self):
+ ed = md.EntityDescriptor()
+ ed.entity_id = CONF.saml.idp_entity_id
+ return ed
+
+ def _create_idp_sso_descriptor(self):
+
+ def get_cert():
+ try:
+ return sigver.read_cert_from_file(CONF.saml.certfile, 'pem')
+ except (IOError, sigver.CertificateError) as e:
+ msg = _('Cannot open certificate %(cert_file)s. '
+ 'Reason: %(reason)s')
+ msg = msg % {'cert_file': CONF.saml.certfile, 'reason': e}
+ LOG.error(msg)
+ raise IOError(msg)
+
+ def key_descriptor():
+ cert = get_cert()
+ return md.KeyDescriptor(
+ key_info=xmldsig.KeyInfo(
+ x509_data=xmldsig.X509Data(
+ x509_certificate=xmldsig.X509Certificate(text=cert)
+ )
+ ), use='signing'
+ )
+
+ def single_sign_on_service():
+ idp_sso_endpoint = CONF.saml.idp_sso_endpoint
+ return md.SingleSignOnService(
+ binding=saml2.BINDING_URI,
+ location=idp_sso_endpoint)
+
+ def organization():
+ name = md.OrganizationName(lang=CONF.saml.idp_lang,
+ text=CONF.saml.idp_organization_name)
+ display_name = md.OrganizationDisplayName(
+ lang=CONF.saml.idp_lang,
+ text=CONF.saml.idp_organization_display_name)
+ url = md.OrganizationURL(lang=CONF.saml.idp_lang,
+ text=CONF.saml.idp_organization_url)
+
+ return md.Organization(
+ organization_display_name=display_name,
+ organization_url=url, organization_name=name)
+
+ def contact_person():
+ company = md.Company(text=CONF.saml.idp_contact_company)
+ given_name = md.GivenName(text=CONF.saml.idp_contact_name)
+ surname = md.SurName(text=CONF.saml.idp_contact_surname)
+ email = md.EmailAddress(text=CONF.saml.idp_contact_email)
+ telephone = md.TelephoneNumber(
+ text=CONF.saml.idp_contact_telephone)
+ contact_type = CONF.saml.idp_contact_type
+
+ return md.ContactPerson(
+ company=company, given_name=given_name, sur_name=surname,
+ email_address=email, telephone_number=telephone,
+ contact_type=contact_type)
+
+ def name_id_format():
+ return md.NameIDFormat(text=saml.NAMEID_FORMAT_TRANSIENT)
+
+ idpsso = md.IDPSSODescriptor()
+ idpsso.protocol_support_enumeration = samlp.NAMESPACE
+ idpsso.key_descriptor = key_descriptor()
+ idpsso.single_sign_on_service = single_sign_on_service()
+ idpsso.name_id_format = name_id_format()
+ if self._check_organization_values():
+ idpsso.organization = organization()
+ if self._check_contact_person_values():
+ idpsso.contact_person = contact_person()
+ return idpsso
+
+ def _ensure_required_values_present(self):
+ """Ensure idp_sso_endpoint and idp_entity_id have values."""
+ if CONF.saml.idp_entity_id is None:
+ msg = _('Ensure configuration option idp_entity_id is set.')
+ raise exception.ValidationError(msg)
+ if CONF.saml.idp_sso_endpoint is None:
+ msg = _('Ensure configuration option idp_sso_endpoint is set.')
+ raise exception.ValidationError(msg)
+
+ def _check_contact_person_values(self):
+ """Determine if contact information is included in metadata."""
+ # Check if we should include contact information
+ params = [CONF.saml.idp_contact_company,
+ CONF.saml.idp_contact_name,
+ CONF.saml.idp_contact_surname,
+ CONF.saml.idp_contact_email,
+ CONF.saml.idp_contact_telephone]
+ for value in params:
+ if value is None:
+ return False
+
+ # Check if contact type is an invalid value
+ valid_type_values = ['technical', 'other', 'support', 'administrative',
+ 'billing']
+ if CONF.saml.idp_contact_type not in valid_type_values:
+ msg = _('idp_contact_type must be one of: [technical, other, '
+ 'support, administrative or billing.')
+ raise exception.ValidationError(msg)
+ return True
+
+ def _check_organization_values(self):
+ """Determine if organization information is included in metadata."""
+ params = [CONF.saml.idp_organization_name,
+ CONF.saml.idp_organization_display_name,
+ CONF.saml.idp_organization_url]
+ for value in params:
+ if value is None:
+ return False
+ return True
+
+
+class ECPGenerator(object):
+ """A class for generating an ECP assertion."""
+
+ @staticmethod
+ def generate_ecp(saml_assertion, relay_state_prefix):
+ ecp_generator = ECPGenerator()
+ header = ecp_generator._create_header(relay_state_prefix)
+ body = ecp_generator._create_body(saml_assertion)
+ envelope = soapenv.Envelope(header=header, body=body)
+ return envelope
+
+ def _create_header(self, relay_state_prefix):
+ relay_state_text = relay_state_prefix + uuid.uuid4().hex
+ relay_state = ecp.RelayState(actor=client_base.ACTOR,
+ must_understand='1',
+ text=relay_state_text)
+ header = soapenv.Header()
+ header.extension_elements = (
+ [saml2.element_to_extension_element(relay_state)])
+ return header
+
+ def _create_body(self, saml_assertion):
+ body = soapenv.Body()
+ body.extension_elements = (
+ [saml2.element_to_extension_element(saml_assertion)])
+ return body
diff --git a/keystone-moon/keystone/federation/routers.py b/keystone-moon/keystone/federation/routers.py
new file mode 100644
index 00000000..a463ca63
--- /dev/null
+++ b/keystone-moon/keystone/federation/routers.py
@@ -0,0 +1,252 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.federation import controllers
+
+
+build_resource_relation = functools.partial(
+ json_home.build_v3_extension_resource_relation,
+ extension_name='OS-FEDERATION', extension_version='1.0')
+
+build_parameter_relation = functools.partial(
+ json_home.build_v3_extension_parameter_relation,
+ extension_name='OS-FEDERATION', extension_version='1.0')
+
+IDP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='idp_id')
+PROTOCOL_ID_PARAMETER_RELATION = build_parameter_relation(
+ parameter_name='protocol_id')
+SP_ID_PARAMETER_RELATION = build_parameter_relation(parameter_name='sp_id')
+
+
+class Routers(wsgi.RoutersBase):
+ """API Endpoints for the Federation extension.
+
+ The API looks like::
+
+ PUT /OS-FEDERATION/identity_providers/{idp_id}
+ GET /OS-FEDERATION/identity_providers
+ GET /OS-FEDERATION/identity_providers/{idp_id}
+ DELETE /OS-FEDERATION/identity_providers/{idp_id}
+ PATCH /OS-FEDERATION/identity_providers/{idp_id}
+
+ PUT /OS-FEDERATION/identity_providers/
+ {idp_id}/protocols/{protocol_id}
+ GET /OS-FEDERATION/identity_providers/
+ {idp_id}/protocols
+ GET /OS-FEDERATION/identity_providers/
+ {idp_id}/protocols/{protocol_id}
+ PATCH /OS-FEDERATION/identity_providers/
+ {idp_id}/protocols/{protocol_id}
+ DELETE /OS-FEDERATION/identity_providers/
+ {idp_id}/protocols/{protocol_id}
+
+ PUT /OS-FEDERATION/mappings
+ GET /OS-FEDERATION/mappings
+ PATCH /OS-FEDERATION/mappings/{mapping_id}
+ GET /OS-FEDERATION/mappings/{mapping_id}
+ DELETE /OS-FEDERATION/mappings/{mapping_id}
+
+ GET /OS-FEDERATION/projects
+ GET /OS-FEDERATION/domains
+
+ PUT /OS-FEDERATION/service_providers/{sp_id}
+ GET /OS-FEDERATION/service_providers
+ GET /OS-FEDERATION/service_providers/{sp_id}
+ DELETE /OS-FEDERATION/service_providers/{sp_id}
+ PATCH /OS-FEDERATION/service_providers/{sp_id}
+
+ GET /OS-FEDERATION/identity_providers/{idp_id}/
+ protocols/{protocol_id}/auth
+ POST /OS-FEDERATION/identity_providers/{idp_id}/
+ protocols/{protocol_id}/auth
+ GET /auth/OS-FEDERATION/identity_providers/
+ {idp_id}/protocols/{protocol_id}/websso
+ ?origin=https%3A//horizon.example.com
+ POST /auth/OS-FEDERATION/identity_providers/
+ {idp_id}/protocols/{protocol_id}/websso
+ ?origin=https%3A//horizon.example.com
+
+
+ POST /auth/OS-FEDERATION/saml2
+ POST /auth/OS-FEDERATION/saml2/ecp
+ GET /OS-FEDERATION/saml2/metadata
+
+ GET /auth/OS-FEDERATION/websso/{protocol_id}
+ ?origin=https%3A//horizon.example.com
+
+ POST /auth/OS-FEDERATION/websso/{protocol_id}
+ ?origin=https%3A//horizon.example.com
+
+ """
+
+ def _construct_url(self, suffix):
+ return "/OS-FEDERATION/%s" % suffix
+
+ def append_v3_routers(self, mapper, routers):
+ auth_controller = controllers.Auth()
+ idp_controller = controllers.IdentityProvider()
+ protocol_controller = controllers.FederationProtocol()
+ mapping_controller = controllers.MappingController()
+ project_controller = controllers.ProjectAssignmentV3()
+ domain_controller = controllers.DomainV3()
+ saml_metadata_controller = controllers.SAMLMetadataV3()
+ sp_controller = controllers.ServiceProvider()
+
+ # Identity Provider CRUD operations
+
+ self._add_resource(
+ mapper, idp_controller,
+ path=self._construct_url('identity_providers/{idp_id}'),
+ get_action='get_identity_provider',
+ put_action='create_identity_provider',
+ patch_action='update_identity_provider',
+ delete_action='delete_identity_provider',
+ rel=build_resource_relation(resource_name='identity_provider'),
+ path_vars={
+ 'idp_id': IDP_ID_PARAMETER_RELATION,
+ })
+ self._add_resource(
+ mapper, idp_controller,
+ path=self._construct_url('identity_providers'),
+ get_action='list_identity_providers',
+ rel=build_resource_relation(resource_name='identity_providers'))
+
+ # Protocol CRUD operations
+
+ self._add_resource(
+ mapper, protocol_controller,
+ path=self._construct_url('identity_providers/{idp_id}/protocols/'
+ '{protocol_id}'),
+ get_action='get_protocol',
+ put_action='create_protocol',
+ patch_action='update_protocol',
+ delete_action='delete_protocol',
+ rel=build_resource_relation(
+ resource_name='identity_provider_protocol'),
+ path_vars={
+ 'idp_id': IDP_ID_PARAMETER_RELATION,
+ 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
+ })
+ self._add_resource(
+ mapper, protocol_controller,
+ path=self._construct_url('identity_providers/{idp_id}/protocols'),
+ get_action='list_protocols',
+ rel=build_resource_relation(
+ resource_name='identity_provider_protocols'),
+ path_vars={
+ 'idp_id': IDP_ID_PARAMETER_RELATION,
+ })
+
+ # Mapping CRUD operations
+
+ self._add_resource(
+ mapper, mapping_controller,
+ path=self._construct_url('mappings/{mapping_id}'),
+ get_action='get_mapping',
+ put_action='create_mapping',
+ patch_action='update_mapping',
+ delete_action='delete_mapping',
+ rel=build_resource_relation(resource_name='mapping'),
+ path_vars={
+ 'mapping_id': build_parameter_relation(
+ parameter_name='mapping_id'),
+ })
+ self._add_resource(
+ mapper, mapping_controller,
+ path=self._construct_url('mappings'),
+ get_action='list_mappings',
+ rel=build_resource_relation(resource_name='mappings'))
+
+ # Service Providers CRUD operations
+
+ self._add_resource(
+ mapper, sp_controller,
+ path=self._construct_url('service_providers/{sp_id}'),
+ get_action='get_service_provider',
+ put_action='create_service_provider',
+ patch_action='update_service_provider',
+ delete_action='delete_service_provider',
+ rel=build_resource_relation(resource_name='service_provider'),
+ path_vars={
+ 'sp_id': SP_ID_PARAMETER_RELATION,
+ })
+
+ self._add_resource(
+ mapper, sp_controller,
+ path=self._construct_url('service_providers'),
+ get_action='list_service_providers',
+ rel=build_resource_relation(resource_name='service_providers'))
+
+ self._add_resource(
+ mapper, domain_controller,
+ path=self._construct_url('domains'),
+ new_path='/auth/domains',
+ get_action='list_domains_for_groups',
+ rel=build_resource_relation(resource_name='domains'))
+ self._add_resource(
+ mapper, project_controller,
+ path=self._construct_url('projects'),
+ new_path='/auth/projects',
+ get_action='list_projects_for_groups',
+ rel=build_resource_relation(resource_name='projects'))
+
+ # Auth operations
+ self._add_resource(
+ mapper, auth_controller,
+ path=self._construct_url('identity_providers/{idp_id}/'
+ 'protocols/{protocol_id}/auth'),
+ get_post_action='federated_authentication',
+ rel=build_resource_relation(
+ resource_name='identity_provider_protocol_auth'),
+ path_vars={
+ 'idp_id': IDP_ID_PARAMETER_RELATION,
+ 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
+ })
+ self._add_resource(
+ mapper, auth_controller,
+ path='/auth' + self._construct_url('saml2'),
+ post_action='create_saml_assertion',
+ rel=build_resource_relation(resource_name='saml2'))
+ self._add_resource(
+ mapper, auth_controller,
+ path='/auth' + self._construct_url('saml2/ecp'),
+ post_action='create_ecp_assertion',
+ rel=build_resource_relation(resource_name='ecp'))
+ self._add_resource(
+ mapper, auth_controller,
+ path='/auth' + self._construct_url('websso/{protocol_id}'),
+ get_post_action='federated_sso_auth',
+ rel=build_resource_relation(resource_name='websso'),
+ path_vars={
+ 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
+ })
+ self._add_resource(
+ mapper, auth_controller,
+ path='/auth' + self._construct_url(
+ 'identity_providers/{idp_id}/protocols/{protocol_id}/websso'),
+ get_post_action='federated_idp_specific_sso_auth',
+ rel=build_resource_relation(resource_name='identity_providers'),
+ path_vars={
+ 'idp_id': IDP_ID_PARAMETER_RELATION,
+ 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION,
+ })
+
+ # Keystone-Identity-Provider metadata endpoint
+ self._add_resource(
+ mapper, saml_metadata_controller,
+ path=self._construct_url('saml2/metadata'),
+ get_action='get_metadata',
+ rel=build_resource_relation(resource_name='metadata'))
diff --git a/keystone-moon/keystone/federation/schema.py b/keystone-moon/keystone/federation/schema.py
new file mode 100644
index 00000000..6cdfd1f5
--- /dev/null
+++ b/keystone-moon/keystone/federation/schema.py
@@ -0,0 +1,115 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+
+
+basic_property_id = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {
+ 'type': 'string'
+ }
+ },
+ 'required': ['id'],
+ 'additionalProperties': False
+}
+
+saml_create = {
+ 'type': 'object',
+ 'properties': {
+ 'identity': {
+ 'type': 'object',
+ 'properties': {
+ 'token': basic_property_id,
+ 'methods': {
+ 'type': 'array'
+ }
+ },
+ 'required': ['token'],
+ 'additionalProperties': False
+ },
+ 'scope': {
+ 'type': 'object',
+ 'properties': {
+ 'service_provider': basic_property_id
+ },
+ 'required': ['service_provider'],
+ 'additionalProperties': False
+ },
+ },
+ 'required': ['identity', 'scope'],
+ 'additionalProperties': False
+}
+
+_service_provider_properties = {
+ # NOTE(rodrigods): The database accepts URLs with 256 as max length,
+ # but parameter_types.url uses 225 as max length.
+ 'auth_url': parameter_types.url,
+ 'sp_url': parameter_types.url,
+ 'description': validation.nullable(parameter_types.description),
+ 'enabled': parameter_types.boolean,
+ 'relay_state_prefix': validation.nullable(parameter_types.description)
+}
+
+service_provider_create = {
+ 'type': 'object',
+ 'properties': _service_provider_properties,
+ # NOTE(rodrigods): 'id' is not required since it is passed in the URL
+ 'required': ['auth_url', 'sp_url'],
+ 'additionalProperties': False
+}
+
+service_provider_update = {
+ 'type': 'object',
+ 'properties': _service_provider_properties,
+ # Make sure at least one property is being updated
+ 'minProperties': 1,
+ 'additionalProperties': False
+}
+
+_identity_provider_properties = {
+ 'enabled': parameter_types.boolean,
+ 'description': validation.nullable(parameter_types.description),
+ 'remote_ids': {
+ 'type': ['array', 'null'],
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True
+ }
+}
+
+identity_provider_create = {
+ 'type': 'object',
+ 'properties': _identity_provider_properties,
+ 'additionalProperties': False
+}
+
+identity_provider_update = {
+ 'type': 'object',
+ 'properties': _identity_provider_properties,
+ # Make sure at least one property is being updated
+ 'minProperties': 1,
+ 'additionalProperties': False
+}
+
+federation_protocol_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'mapping_id': parameter_types.mapping_id_string
+ },
+ # `mapping_id` is the property that cannot be ignored
+ 'minProperties': 1,
+ 'additionalProperties': False
+}
diff --git a/keystone-moon/keystone/federation/utils.py b/keystone-moon/keystone/federation/utils.py
new file mode 100644
index 00000000..1d215a68
--- /dev/null
+++ b/keystone-moon/keystone/federation/utils.py
@@ -0,0 +1,872 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities for Federation Extension."""
+
+import ast
+import re
+
+import jsonschema
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import timeutils
+import six
+
+from keystone import exception
+from keystone.i18n import _, _LW
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+class UserType(object):
+ """User mapping type."""
+
+ EPHEMERAL = 'ephemeral'
+ LOCAL = 'local'
+
+
+MAPPING_SCHEMA = {
+ "type": "object",
+ "required": ['rules'],
+ "properties": {
+ "rules": {
+ "minItems": 1,
+ "type": "array",
+ "items": {
+ "type": "object",
+ "required": ['local', 'remote'],
+ "additionalProperties": False,
+ "properties": {
+ "local": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "user": {
+ "type": "object",
+ "properties": {
+ "id": {"type": "string"},
+ "name": {"type": "string"},
+ "email": {"type": "string"},
+ "domain": {
+ "type": "object",
+ "properties": {
+ "id": {"type": "string"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": False,
+ },
+ "type": {
+ "type": "string",
+ "enum": [UserType.EPHEMERAL,
+ UserType.LOCAL]
+ }
+ },
+ "additionalProperties": False
+ },
+ "group": {
+ "type": "object",
+ "properties": {
+ "id": {"type": "string"},
+ "name": {"type": "string"},
+ "domain": {
+ "type": "object",
+ "properties": {
+ "id": {"type": "string"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": False,
+ },
+ },
+ "additionalProperties": False,
+ },
+ "groups": {
+ "type": "string"
+ },
+ "group_ids": {
+ "type": "string"
+ },
+ "domain": {
+ "type": "object",
+ "properties": {
+ "id": {"type": "string"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": False
+ }
+ }
+ }
+ },
+ "remote": {
+ "minItems": 1,
+ "type": "array",
+ "items": {
+ "type": "object",
+ "oneOf": [
+ {"$ref": "#/definitions/empty"},
+ {"$ref": "#/definitions/any_one_of"},
+ {"$ref": "#/definitions/not_any_of"},
+ {"$ref": "#/definitions/blacklist"},
+ {"$ref": "#/definitions/whitelist"}
+ ],
+ }
+ }
+ }
+ }
+ }
+ },
+ "definitions": {
+ "empty": {
+ "type": "object",
+ "required": ['type'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ },
+ "additionalProperties": False,
+ },
+ "any_one_of": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'any_one_of'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "any_one_of": {
+ "type": "array"
+ },
+ "regex": {
+ "type": "boolean"
+ }
+ }
+ },
+ "not_any_of": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'not_any_of'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "not_any_of": {
+ "type": "array"
+ },
+ "regex": {
+ "type": "boolean"
+ }
+ }
+ },
+ "blacklist": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'blacklist'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "blacklist": {
+ "type": "array"
+ }
+ }
+ },
+ "whitelist": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ['type', 'whitelist'],
+ "properties": {
+ "type": {
+ "type": "string"
+ },
+ "whitelist": {
+ "type": "array"
+ }
+ }
+ }
+ }
+}
+
+
+class DirectMaps(object):
+ """An abstraction around the remote matches.
+
+ Each match is treated internally as a list.
+ """
+
+ def __init__(self):
+ self._matches = []
+
+ def add(self, values):
+ """Adds a matched value to the list of matches.
+
+ :param list value: the match to save
+
+ """
+ self._matches.append(values)
+
+ def __getitem__(self, idx):
+ """Used by Python when executing ``''.format(*DirectMaps())``."""
+ value = self._matches[idx]
+ if isinstance(value, list) and len(value) == 1:
+ return value[0]
+ else:
+ return value
+
+
+def validate_mapping_structure(ref):
+ v = jsonschema.Draft4Validator(MAPPING_SCHEMA)
+
+ messages = ''
+ for error in sorted(v.iter_errors(ref), key=str):
+ messages = messages + error.message + "\n"
+
+ if messages:
+ raise exception.ValidationError(messages)
+
+
+def validate_expiration(token_ref):
+ if timeutils.utcnow() > token_ref.expires:
+ raise exception.Unauthorized(_('Federation token is expired'))
+
+
+def validate_groups_cardinality(group_ids, mapping_id):
+ """Check if groups list is non-empty.
+
+ :param group_ids: list of group ids
+ :type group_ids: list of str
+
+ :raises keystone.exception.MissingGroups: if ``group_ids`` cardinality is 0
+
+ """
+ if not group_ids:
+ raise exception.MissingGroups(mapping_id=mapping_id)
+
+
+def get_remote_id_parameter(protocol):
+ # NOTE(marco-fargetta): Since we support any protocol ID, we attempt to
+ # retrieve the remote_id_attribute of the protocol ID. If it's not
+ # registered in the config, then register the option and try again.
+ # This allows the user to register protocols other than oidc and saml2.
+ remote_id_parameter = None
+ try:
+ remote_id_parameter = CONF[protocol]['remote_id_attribute']
+ except AttributeError:
+ CONF.register_opt(cfg.StrOpt('remote_id_attribute'),
+ group=protocol)
+ try:
+ remote_id_parameter = CONF[protocol]['remote_id_attribute']
+ except AttributeError: # nosec
+ # No remote ID attr, will be logged and use the default instead.
+ pass
+ if not remote_id_parameter:
+ LOG.debug('Cannot find "remote_id_attribute" in configuration '
+ 'group %s. Trying default location in '
+ 'group federation.', protocol)
+ remote_id_parameter = CONF.federation.remote_id_attribute
+
+ return remote_id_parameter
+
+
+def validate_idp(idp, protocol, assertion):
+ """The IdP providing the assertion should be registered for the mapping."""
+ remote_id_parameter = get_remote_id_parameter(protocol)
+ if not remote_id_parameter or not idp['remote_ids']:
+ LOG.debug('Impossible to identify the IdP %s ', idp['id'])
+ # If nothing is defined, the administrator may want to
+ # allow the mapping of every IdP
+ return
+ try:
+ idp_remote_identifier = assertion[remote_id_parameter]
+ except KeyError:
+ msg = _('Could not find Identity Provider identifier in '
+ 'environment')
+ raise exception.ValidationError(msg)
+ if idp_remote_identifier not in idp['remote_ids']:
+ msg = _('Incoming identity provider identifier not included '
+ 'among the accepted identifiers.')
+ raise exception.Forbidden(msg)
+
+
+def validate_groups_in_backend(group_ids, mapping_id, identity_api):
+ """Iterate over group ids and make sure they are present in the backend.
+
+ This call is not transactional.
+ :param group_ids: IDs of the groups to be checked
+ :type group_ids: list of str
+
+ :param mapping_id: id of the mapping used for this operation
+ :type mapping_id: str
+
+ :param identity_api: Identity Manager object used for communication with
+ backend
+ :type identity_api: identity.Manager
+
+ :raises keystone.exception.MappedGroupNotFound: If the group returned by
+ mapping was not found in the backend.
+
+ """
+ for group_id in group_ids:
+ try:
+ identity_api.get_group(group_id)
+ except exception.GroupNotFound:
+ raise exception.MappedGroupNotFound(
+ group_id=group_id, mapping_id=mapping_id)
+
+
+def validate_groups(group_ids, mapping_id, identity_api):
+ """Check group ids cardinality and check their existence in the backend.
+
+ This call is not transactional.
+ :param group_ids: IDs of the groups to be checked
+ :type group_ids: list of str
+
+ :param mapping_id: id of the mapping used for this operation
+ :type mapping_id: str
+
+ :param identity_api: Identity Manager object used for communication with
+ backend
+ :type identity_api: identity.Manager
+
+ :raises keystone.exception.MappedGroupNotFound: If the group returned by
+ mapping was not found in the backend.
+ :raises keystone.exception.MissingGroups: If ``group_ids`` cardinality
+ is 0.
+
+ """
+ validate_groups_cardinality(group_ids, mapping_id)
+ validate_groups_in_backend(group_ids, mapping_id, identity_api)
+
+
+# TODO(marek-denis): Optimize this function, so the number of calls to the
+# backend are minimized.
+def transform_to_group_ids(group_names, mapping_id,
+ identity_api, resource_api):
+ """Transform groups identified by name/domain to their ids
+
+ Function accepts list of groups identified by a name and domain giving
+ a list of group ids in return.
+
+ Example of group_names parameter::
+
+ [
+ {
+ "name": "group_name",
+ "domain": {
+ "id": "domain_id"
+ },
+ },
+ {
+ "name": "group_name_2",
+ "domain": {
+ "name": "domain_name"
+ }
+ }
+ ]
+
+ :param group_names: list of group identified by name and its domain.
+ :type group_names: list
+
+ :param mapping_id: id of the mapping used for mapping assertion into
+ local credentials
+ :type mapping_id: str
+
+ :param identity_api: identity_api object
+ :param resource_api: resource manager object
+
+ :returns: generator object with group ids
+
+ :raises keystone.exception.MappedGroupNotFound: in case asked group doesn't
+ exist in the backend.
+
+ """
+ def resolve_domain(domain):
+ """Return domain id.
+
+ Input is a dictionary with a domain identified either by a ``id`` or a
+ ``name``. In the latter case system will attempt to fetch domain object
+ from the backend.
+
+ :returns: domain's id
+ :rtype: str
+
+ """
+ domain_id = (domain.get('id') or
+ resource_api.get_domain_by_name(
+ domain.get('name')).get('id'))
+ return domain_id
+
+ for group in group_names:
+ try:
+ group_dict = identity_api.get_group_by_name(
+ group['name'], resolve_domain(group['domain']))
+ yield group_dict['id']
+ except exception.GroupNotFound:
+ LOG.debug('Skip mapping group %s; has no entry in the backend',
+ group['name'])
+
+
+def get_assertion_params_from_env(context):
+ LOG.debug('Environment variables: %s', context['environment'])
+ prefix = CONF.federation.assertion_prefix
+ for k, v in list(context['environment'].items()):
+ if not k.startswith(prefix):
+ continue
+ # These bytes may be decodable as ISO-8859-1 according to Section
+ # 3.2.4 of RFC 7230. Let's assume that our web server plugins are
+ # correctly encoding the data.
+ if not isinstance(v, six.text_type) and getattr(v, 'decode', False):
+ v = v.decode('ISO-8859-1')
+ yield (k, v)
+
+
+class RuleProcessor(object):
+ """A class to process assertions and mapping rules."""
+
+ class _EvalType(object):
+ """Mapping rule evaluation types."""
+
+ ANY_ONE_OF = 'any_one_of'
+ NOT_ANY_OF = 'not_any_of'
+ BLACKLIST = 'blacklist'
+ WHITELIST = 'whitelist'
+
+ def __init__(self, mapping_id, rules):
+ """Initialize RuleProcessor.
+
+ Example rules can be found at:
+ :class:`keystone.tests.mapping_fixtures`
+
+ :param mapping_id: id for the mapping
+ :type mapping_id: string
+ :param rules: rules from a mapping
+ :type rules: dict
+
+ """
+ self.mapping_id = mapping_id
+ self.rules = rules
+
+ def process(self, assertion_data):
+ """Transform assertion to a dictionary.
+
+ The dictionary contains mapping of user name and group ids
+ based on mapping rules.
+
+ This function will iterate through the mapping rules to find
+ assertions that are valid.
+
+ :param assertion_data: an assertion containing values from an IdP
+ :type assertion_data: dict
+
+ Example assertion_data::
+
+ {
+ 'Email': 'testacct@example.com',
+ 'UserName': 'testacct',
+ 'FirstName': 'Test',
+ 'LastName': 'Account',
+ 'orgPersonType': 'Tester'
+ }
+
+ :returns: dictionary with user and group_ids
+
+ The expected return structure is::
+
+ {
+ 'name': 'foobar',
+ 'group_ids': ['abc123', 'def456'],
+ 'group_names': [
+ {
+ 'name': 'group_name_1',
+ 'domain': {
+ 'name': 'domain1'
+ }
+ },
+ {
+ 'name': 'group_name_1_1',
+ 'domain': {
+ 'name': 'domain1'
+ }
+ },
+ {
+ 'name': 'group_name_2',
+ 'domain': {
+ 'id': 'xyz132'
+ }
+ }
+ ]
+ }
+
+ """
+ # Assertions will come in as string key-value pairs, and will use a
+ # semi-colon to indicate multiple values, i.e. groups.
+ # This will create a new dictionary where the values are arrays, and
+ # any multiple values are stored in the arrays.
+ LOG.debug('assertion data: %s', assertion_data)
+ assertion = {n: v.split(';') for n, v in assertion_data.items()
+ if isinstance(v, six.string_types)}
+ LOG.debug('assertion: %s', assertion)
+ identity_values = []
+
+ LOG.debug('rules: %s', self.rules)
+ for rule in self.rules:
+ direct_maps = self._verify_all_requirements(rule['remote'],
+ assertion)
+
+ # If the compare comes back as None, then the rule did not apply
+ # to the assertion data, go on to the next rule
+ if direct_maps is None:
+ continue
+
+ # If there are no direct mappings, then add the local mapping
+ # directly to the array of saved values. However, if there is
+ # a direct mapping, then perform variable replacement.
+ if not direct_maps:
+ identity_values += rule['local']
+ else:
+ for local in rule['local']:
+ new_local = self._update_local_mapping(local, direct_maps)
+ identity_values.append(new_local)
+
+ LOG.debug('identity_values: %s', identity_values)
+ mapped_properties = self._transform(identity_values)
+ LOG.debug('mapped_properties: %s', mapped_properties)
+ return mapped_properties
+
+ def _transform(self, identity_values):
+ """Transform local mappings, to an easier to understand format.
+
+ Transform the incoming array to generate the return value for
+ the process function. Generating content for Keystone tokens will
+ be easier if some pre-processing is done at this level.
+
+ :param identity_values: local mapping from valid evaluations
+ :type identity_values: array of dict
+
+ Example identity_values::
+
+ [
+ {
+ 'group': {'id': '0cd5e9'},
+ 'user': {
+ 'email': 'bob@example.com'
+ },
+ },
+ {
+ 'groups': ['member', 'admin', tester'],
+ 'domain': {
+ 'name': 'default_domain'
+ }
+ },
+ {
+ 'group_ids': ['abc123', 'def456', '0cd5e9']
+ }
+ ]
+
+ :returns: dictionary with user name, group_ids and group_names.
+ :rtype: dict
+
+ """
+ def extract_groups(groups_by_domain):
+ for groups in list(groups_by_domain.values()):
+ for group in list({g['name']: g for g in groups}.values()):
+ yield group
+
+ def normalize_user(user):
+ """Parse and validate user mapping."""
+ user_type = user.get('type')
+
+ if user_type and user_type not in (UserType.EPHEMERAL,
+ UserType.LOCAL):
+ msg = _("User type %s not supported") % user_type
+ raise exception.ValidationError(msg)
+
+ if user_type is None:
+ user_type = user['type'] = UserType.EPHEMERAL
+
+ if user_type == UserType.EPHEMERAL:
+ user['domain'] = {
+ 'id': CONF.federation.federated_domain_name
+ }
+
+ # initialize the group_ids as a set to eliminate duplicates
+ user = {}
+ group_ids = set()
+ group_names = list()
+ groups_by_domain = dict()
+
+ # if mapping yield no valid identity values, we should bail right away
+ # instead of continuing on with a normalized bogus user
+ if not identity_values:
+ msg = _("Could not map any federated user properties to identity "
+ "values. Check debug logs or the mapping used for "
+ "additional details.")
+ LOG.warning(msg)
+ raise exception.ValidationError(msg)
+
+ for identity_value in identity_values:
+ if 'user' in identity_value:
+ # if a mapping outputs more than one user name, log it
+ if user:
+ LOG.warning(_LW('Ignoring user name'))
+ else:
+ user = identity_value.get('user')
+ if 'group' in identity_value:
+ group = identity_value['group']
+ if 'id' in group:
+ group_ids.add(group['id'])
+ elif 'name' in group:
+ domain = (group['domain'].get('name') or
+ group['domain'].get('id'))
+ groups_by_domain.setdefault(domain, list()).append(group)
+ group_names.extend(extract_groups(groups_by_domain))
+ if 'groups' in identity_value:
+ if 'domain' not in identity_value:
+ msg = _("Invalid rule: %(identity_value)s. Both 'groups' "
+ "and 'domain' keywords must be specified.")
+ msg = msg % {'identity_value': identity_value}
+ raise exception.ValidationError(msg)
+ # In this case, identity_value['groups'] is a string
+ # representation of a list, and we want a real list. This is
+ # due to the way we do direct mapping substitutions today (see
+ # function _update_local_mapping() )
+ try:
+ group_names_list = ast.literal_eval(
+ identity_value['groups'])
+ except ValueError:
+ group_names_list = [identity_value['groups']]
+ domain = identity_value['domain']
+ group_dicts = [{'name': name, 'domain': domain} for name in
+ group_names_list]
+
+ group_names.extend(group_dicts)
+ if 'group_ids' in identity_value:
+ # If identity_values['group_ids'] is a string representation
+ # of a list, parse it to a real list. Also, if the provided
+ # group_ids parameter contains only one element, it will be
+ # parsed as a simple string, and not a list or the
+ # representation of a list.
+ try:
+ group_ids.update(
+ ast.literal_eval(identity_value['group_ids']))
+ except (ValueError, SyntaxError):
+ group_ids.update([identity_value['group_ids']])
+
+ normalize_user(user)
+
+ return {'user': user,
+ 'group_ids': list(group_ids),
+ 'group_names': group_names}
+
+ def _update_local_mapping(self, local, direct_maps):
+ """Replace any {0}, {1} ... values with data from the assertion.
+
+ :param local: local mapping reference that needs to be updated
+ :type local: dict
+ :param direct_maps: identity values used to update local
+ :type direct_maps: keystone.federation.utils.DirectMaps
+
+ Example local::
+
+ {'user': {'name': '{0} {1}', 'email': '{2}'}}
+
+ Example direct_maps::
+
+ ['Bob', 'Thompson', 'bob@example.com']
+
+ :returns: new local mapping reference with replaced values.
+
+ The expected return structure is::
+
+ {'user': {'name': 'Bob Thompson', 'email': 'bob@example.org'}}
+
+ :raises keystone.exception.DirectMappingError: when referring to a
+ remote match from a local section of a rule
+
+ """
+ LOG.debug('direct_maps: %s', direct_maps)
+ LOG.debug('local: %s', local)
+ new = {}
+ for k, v in local.items():
+ if isinstance(v, dict):
+ new_value = self._update_local_mapping(v, direct_maps)
+ else:
+ try:
+ new_value = v.format(*direct_maps)
+ except IndexError:
+ raise exception.DirectMappingError(
+ mapping_id=self.mapping_id)
+
+ new[k] = new_value
+ return new
+
+ def _verify_all_requirements(self, requirements, assertion):
+ """Compare remote requirements of a rule against the assertion.
+
+ If a value of ``None`` is returned, the rule with this assertion
+ doesn't apply.
+ If an array of zero length is returned, then there are no direct
+ mappings to be performed, but the rule is valid.
+ Otherwise, then it will first attempt to filter the values according
+ to blacklist or whitelist rules and finally return the values in
+ order, to be directly mapped.
+
+ :param requirements: list of remote requirements from rules
+ :type requirements: list
+
+ Example requirements::
+
+ [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "orgPersonType",
+ "any_one_of": [
+ "Customer"
+ ]
+ },
+ {
+ "type": "ADFS_GROUPS",
+ "whitelist": [
+ "g1", "g2", "g3", "g4"
+ ]
+ }
+ ]
+
+ :param assertion: dict of attributes from an IdP
+ :type assertion: dict
+
+ Example assertion::
+
+ {
+ 'UserName': ['testacct'],
+ 'LastName': ['Account'],
+ 'orgPersonType': ['Tester'],
+ 'Email': ['testacct@example.com'],
+ 'FirstName': ['Test'],
+ 'ADFS_GROUPS': ['g1', 'g2']
+ }
+
+ :returns: identity values used to update local
+ :rtype: keystone.federation.utils.DirectMaps or None
+
+ """
+ direct_maps = DirectMaps()
+
+ for requirement in requirements:
+ requirement_type = requirement['type']
+ direct_map_values = assertion.get(requirement_type)
+ regex = requirement.get('regex', False)
+
+ if not direct_map_values:
+ return None
+
+ any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
+ if any_one_values is not None:
+ if self._evaluate_requirement(any_one_values,
+ direct_map_values,
+ self._EvalType.ANY_ONE_OF,
+ regex):
+ continue
+ else:
+ return None
+
+ not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
+ if not_any_values is not None:
+ if self._evaluate_requirement(not_any_values,
+ direct_map_values,
+ self._EvalType.NOT_ANY_OF,
+ regex):
+ continue
+ else:
+ return None
+
+ # If 'any_one_of' or 'not_any_of' are not found, then values are
+ # within 'type'. Attempt to find that 'type' within the assertion,
+ # and filter these values if 'whitelist' or 'blacklist' is set.
+ blacklisted_values = requirement.get(self._EvalType.BLACKLIST)
+ whitelisted_values = requirement.get(self._EvalType.WHITELIST)
+
+ # If a blacklist or whitelist is used, we want to map to the
+ # whole list instead of just its values separately.
+ if blacklisted_values is not None:
+ direct_map_values = [v for v in direct_map_values
+ if v not in blacklisted_values]
+ elif whitelisted_values is not None:
+ direct_map_values = [v for v in direct_map_values
+ if v in whitelisted_values]
+
+ direct_maps.add(direct_map_values)
+
+ LOG.debug('updating a direct mapping: %s', direct_map_values)
+
+ return direct_maps
+
+ def _evaluate_values_by_regex(self, values, assertion_values):
+ for value in values:
+ for assertion_value in assertion_values:
+ if re.search(value, assertion_value):
+ return True
+ return False
+
+ def _evaluate_requirement(self, values, assertion_values,
+ eval_type, regex):
+ """Evaluate the incoming requirement and assertion.
+
+ If the requirement type does not exist in the assertion data, then
+ return False. If regex is specified, then compare the values and
+ assertion values. Otherwise, grab the intersection of the values
+ and use that to compare against the evaluation type.
+
+ :param values: list of allowed values, defined in the requirement
+ :type values: list
+ :param assertion_values: The values from the assertion to evaluate
+ :type assertion_values: list/string
+ :param eval_type: determine how to evaluate requirements
+ :type eval_type: string
+ :param regex: perform evaluation with regex
+ :type regex: boolean
+
+ :returns: boolean, whether requirement is valid or not.
+
+ """
+ if regex:
+ any_match = self._evaluate_values_by_regex(values,
+ assertion_values)
+ else:
+ any_match = bool(set(values).intersection(set(assertion_values)))
+ if any_match and eval_type == self._EvalType.ANY_ONE_OF:
+ return True
+ if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
+ return True
+
+ return False
+
+
+def assert_enabled_identity_provider(federation_api, idp_id):
+ identity_provider = federation_api.get_idp(idp_id)
+ if identity_provider.get('enabled') is not True:
+ msg = _('Identity Provider %(idp)s is disabled') % {'idp': idp_id}
+ LOG.debug(msg)
+ raise exception.Forbidden(msg)
+
+
+def assert_enabled_service_provider_object(service_provider):
+ if service_provider.get('enabled') is not True:
+ sp_id = service_provider['id']
+ msg = _('Service Provider %(sp)s is disabled') % {'sp': sp_id}
+ LOG.debug(msg)
+ raise exception.Forbidden(msg)
diff --git a/keystone-moon/keystone/identity/__init__.py b/keystone-moon/keystone/identity/__init__.py
index 3063b5ca..96b3ee77 100644
--- a/keystone-moon/keystone/identity/__init__.py
+++ b/keystone-moon/keystone/identity/__init__.py
@@ -15,4 +15,3 @@
from keystone.identity import controllers # noqa
from keystone.identity.core import * # noqa
from keystone.identity import generator # noqa
-from keystone.identity import routers # noqa
diff --git a/keystone-moon/keystone/identity/backends/ldap.py b/keystone-moon/keystone/identity/backends/ldap.py
index 1f33bacb..fe8e8477 100644
--- a/keystone-moon/keystone/identity/backends/ldap.py
+++ b/keystone-moon/keystone/identity/backends/ldap.py
@@ -17,6 +17,7 @@ import uuid
import ldap.filter
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
import six
from keystone.common import clean
@@ -31,17 +32,20 @@ from keystone import identity
CONF = cfg.CONF
LOG = log.getLogger(__name__)
+_DEPRECATION_MSG = _('%s for the LDAP identity backend has been deprecated in '
+ 'the Mitaka release in favor of read-only identity LDAP '
+ 'access. It will be removed in the "O" release.')
+
class Identity(identity.IdentityDriverV8):
def __init__(self, conf=None):
super(Identity, self).__init__()
if conf is None:
- conf = CONF
- self.user = UserApi(conf)
- self.group = GroupApi(conf)
-
- def default_assignment_driver(self):
- return 'ldap'
+ self.conf = CONF
+ else:
+ self.conf = conf
+ self.user = UserApi(self.conf)
+ self.group = GroupApi(self.conf)
def is_domain_aware(self):
return False
@@ -87,11 +91,15 @@ class Identity(identity.IdentityDriverV8):
# CRUD
def create_user(self, user_id, user):
+ msg = _DEPRECATION_MSG % "create_user"
+ versionutils.report_deprecated_feature(LOG, msg)
self.user.check_allow_create()
user_ref = self.user.create(user)
return self.user.filter_attributes(user_ref)
def update_user(self, user_id, user):
+ msg = _DEPRECATION_MSG % "update_user"
+ versionutils.report_deprecated_feature(LOG, msg)
self.user.check_allow_update()
old_obj = self.user.get(user_id)
if 'name' in user and old_obj.get('name') != user['name']:
@@ -110,6 +118,8 @@ class Identity(identity.IdentityDriverV8):
return self.user.get_filtered(user_id)
def delete_user(self, user_id):
+ msg = _DEPRECATION_MSG % "delete_user"
+ versionutils.report_deprecated_feature(LOG, msg)
self.user.check_allow_delete()
user = self.user.get(user_id)
user_dn = user['dn']
@@ -122,6 +132,8 @@ class Identity(identity.IdentityDriverV8):
self.user.delete(user_id)
def create_group(self, group_id, group):
+ msg = _DEPRECATION_MSG % "create_group"
+ versionutils.report_deprecated_feature(LOG, msg)
self.group.check_allow_create()
group['name'] = clean.group_name(group['name'])
return common_ldap.filter_entity(self.group.create(group))
@@ -135,28 +147,39 @@ class Identity(identity.IdentityDriverV8):
return self.group.get_filtered_by_name(group_name)
def update_group(self, group_id, group):
+ msg = _DEPRECATION_MSG % "update_group"
+ versionutils.report_deprecated_feature(LOG, msg)
self.group.check_allow_update()
if 'name' in group:
group['name'] = clean.group_name(group['name'])
return common_ldap.filter_entity(self.group.update(group_id, group))
def delete_group(self, group_id):
+ msg = _DEPRECATION_MSG % "delete_group"
+ versionutils.report_deprecated_feature(LOG, msg)
self.group.check_allow_delete()
return self.group.delete(group_id)
def add_user_to_group(self, user_id, group_id):
+ msg = _DEPRECATION_MSG % "add_user_to_group"
+ versionutils.report_deprecated_feature(LOG, msg)
user_ref = self._get_user(user_id)
user_dn = user_ref['dn']
self.group.add_user(user_dn, group_id, user_id)
def remove_user_from_group(self, user_id, group_id):
+ msg = _DEPRECATION_MSG % "remove_user_from_group"
+ versionutils.report_deprecated_feature(LOG, msg)
user_ref = self._get_user(user_id)
user_dn = user_ref['dn']
self.group.remove_user(user_dn, group_id, user_id)
def list_groups_for_user(self, user_id, hints):
user_ref = self._get_user(user_id)
- user_dn = user_ref['dn']
+ if self.conf.ldap.group_members_are_ids:
+ user_dn = user_ref['id']
+ else:
+ user_dn = user_ref['dn']
return self.group.list_user_groups_filtered(user_dn, hints)
def list_groups(self, hints):
@@ -164,15 +187,19 @@ class Identity(identity.IdentityDriverV8):
def list_users_in_group(self, group_id, hints):
users = []
- for user_dn in self.group.list_group_users(group_id):
- user_id = self.user._dn_to_id(user_dn)
+ for user_key in self.group.list_group_users(group_id):
+ if self.conf.ldap.group_members_are_ids:
+ user_id = user_key
+ else:
+ user_id = self.user._dn_to_id(user_key)
+
try:
users.append(self.user.get_filtered(user_id))
except exception.UserNotFound:
- LOG.debug(("Group member '%(user_dn)s' not found in"
+ LOG.debug(("Group member '%(user_key)s' not found in"
" '%(group_id)s'. The user should be removed"
" from the group. The user will be ignored."),
- dict(user_dn=user_dn, group_id=group_id))
+ dict(user_key=user_key, group_id=group_id))
return users
def check_user_in_group(self, user_id, group_id):
@@ -201,6 +228,7 @@ class UserApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
attribute_options_names = {'password': 'pass',
'email': 'mail',
'name': 'name',
+ 'description': 'description',
'enabled': 'enabled',
'default_project_id': 'default_project_id'}
immutable_attrs = ['id']
@@ -264,15 +292,15 @@ class UserApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
return self.filter_attributes(user)
def get_all_filtered(self, hints):
- query = self.filter_query(hints)
- return [self.filter_attributes(user) for user in self.get_all(query)]
+ query = self.filter_query(hints, self.ldap_filter)
+ return [self.filter_attributes(user)
+ for user in self.get_all(query, hints)]
def filter_attributes(self, user):
return identity.filter_user(common_ldap.filter_entity(user))
def is_user(self, dn):
"""Returns True if the entry is a user."""
-
# NOTE(blk-u): It's easy to check if the DN is under the User tree,
# but may not be accurate. A more accurate test would be to fetch the
# entry to see if it's got the user objectclass, but this could be
@@ -314,7 +342,7 @@ class GroupApi(common_ldap.BaseLdap):
def delete(self, group_id):
if self.subtree_delete_enabled:
- super(GroupApi, self).deleteTree(group_id)
+ super(GroupApi, self).delete_tree(group_id)
else:
# TODO(spzala): this is only placeholder for group and domain
# role support which will be added under bug 1101287
@@ -349,7 +377,6 @@ class GroupApi(common_ldap.BaseLdap):
def list_user_groups(self, user_dn):
"""Return a list of groups for which the user is a member."""
-
user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
query = '(%s=%s)%s' % (self.member_attribute,
user_dn_esc,
@@ -358,7 +385,6 @@ class GroupApi(common_ldap.BaseLdap):
def list_user_groups_filtered(self, user_dn, hints):
"""Return a filtered list of groups for which the user is a member."""
-
user_dn_esc = ldap.filter.escape_filter_chars(user_dn)
query = '(%s=%s)%s' % (self.member_attribute,
user_dn_esc,
@@ -396,4 +422,4 @@ class GroupApi(common_ldap.BaseLdap):
def get_all_filtered(self, hints, query=None):
query = self.filter_query(hints, query)
return [common_ldap.filter_entity(group)
- for group in self.get_all(query)]
+ for group in self.get_all(query, hints)]
diff --git a/keystone-moon/keystone/identity/backends/sql.py b/keystone-moon/keystone/identity/backends/sql.py
index d37240eb..5680a8a2 100644
--- a/keystone-moon/keystone/identity/backends/sql.py
+++ b/keystone-moon/keystone/identity/backends/sql.py
@@ -12,8 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
+import sqlalchemy
+from sqlalchemy.ext.hybrid import hybrid_property
+from sqlalchemy import orm
+from keystone.common import driver_hints
from keystone.common import sql
from keystone.common import utils
from keystone import exception
@@ -21,23 +24,84 @@ from keystone.i18n import _
from keystone import identity
-CONF = cfg.CONF
-
-
class User(sql.ModelBase, sql.DictBase):
__tablename__ = 'user'
attributes = ['id', 'name', 'domain_id', 'password', 'enabled',
'default_project_id']
id = sql.Column(sql.String(64), primary_key=True)
- name = sql.Column(sql.String(255), nullable=False)
- domain_id = sql.Column(sql.String(64), nullable=False)
- password = sql.Column(sql.String(128))
enabled = sql.Column(sql.Boolean)
extra = sql.Column(sql.JsonBlob())
default_project_id = sql.Column(sql.String(64))
- # Unique constraint across two columns to create the separation
- # rather than just only 'name' being unique
- __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
+ local_user = orm.relationship('LocalUser', uselist=False,
+ single_parent=True, lazy='subquery',
+ cascade='all,delete-orphan', backref='user')
+ federated_users = orm.relationship('FederatedUser',
+ single_parent=True,
+ lazy='subquery',
+ cascade='all,delete-orphan',
+ backref='user')
+
+ # name property
+ @hybrid_property
+ def name(self):
+ if self.local_user:
+ return self.local_user.name
+ elif self.federated_users:
+ return self.federated_users[0].display_name
+ else:
+ return None
+
+ @name.setter
+ def name(self, value):
+ if not self.local_user:
+ self.local_user = LocalUser()
+ self.local_user.name = value
+
+ @name.expression
+ def name(cls):
+ return LocalUser.name
+
+ # password property
+ @hybrid_property
+ def password(self):
+ if self.local_user and self.local_user.passwords:
+ return self.local_user.passwords[0].password
+ else:
+ return None
+
+ @password.setter
+ def password(self, value):
+ if not value:
+ if self.local_user and self.local_user.passwords:
+ self.local_user.passwords = []
+ else:
+ if not self.local_user:
+ self.local_user = LocalUser()
+ if not self.local_user.passwords:
+ self.local_user.passwords.append(Password())
+ self.local_user.passwords[0].password = value
+
+ @password.expression
+ def password(cls):
+ return Password.password
+
+ # domain_id property
+ @hybrid_property
+ def domain_id(self):
+ if self.local_user:
+ return self.local_user.domain_id
+ else:
+ return None
+
+ @domain_id.setter
+ def domain_id(self, value):
+ if not self.local_user:
+ self.local_user = LocalUser()
+ self.local_user.domain_id = value
+
+ @domain_id.expression
+ def domain_id(cls):
+ return LocalUser.domain_id
def to_dict(self, include_extra_dict=False):
d = super(User, self).to_dict(include_extra_dict=include_extra_dict)
@@ -46,6 +110,49 @@ class User(sql.ModelBase, sql.DictBase):
return d
+class LocalUser(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'local_user'
+ attributes = ['id', 'user_id', 'domain_id', 'name']
+ id = sql.Column(sql.Integer, primary_key=True)
+ user_id = sql.Column(sql.String(64), sql.ForeignKey('user.id',
+ ondelete='CASCADE'), unique=True)
+ domain_id = sql.Column(sql.String(64), nullable=False)
+ name = sql.Column(sql.String(255), nullable=False)
+ passwords = orm.relationship('Password', single_parent=True,
+ cascade='all,delete-orphan',
+ backref='local_user')
+ __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
+
+
+class Password(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'password'
+ attributes = ['id', 'local_user_id', 'password']
+ id = sql.Column(sql.Integer, primary_key=True)
+ local_user_id = sql.Column(sql.Integer, sql.ForeignKey('local_user.id',
+ ondelete='CASCADE'))
+ password = sql.Column(sql.String(128))
+
+
+class FederatedUser(sql.ModelBase, sql.ModelDictMixin):
+ __tablename__ = 'federated_user'
+ attributes = ['id', 'user_id', 'idp_id', 'protocol_id', 'unique_id',
+ 'display_name']
+ id = sql.Column(sql.Integer, primary_key=True)
+ user_id = sql.Column(sql.String(64), sql.ForeignKey('user.id',
+ ondelete='CASCADE'))
+ idp_id = sql.Column(sql.String(64), sql.ForeignKey('identity_provider.id',
+ ondelete='CASCADE'))
+ protocol_id = sql.Column(sql.String(64), nullable=False)
+ unique_id = sql.Column(sql.String(255), nullable=False)
+ display_name = sql.Column(sql.String(255), nullable=True)
+ __table_args__ = (
+ sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'),
+ sqlalchemy.ForeignKeyConstraint(['protocol_id', 'idp_id'],
+ ['federation_protocol.id',
+ 'federation_protocol.idp_id'])
+ )
+
+
class Group(sql.ModelBase, sql.DictBase):
__tablename__ = 'group'
attributes = ['id', 'name', 'domain_id', 'description']
@@ -56,11 +163,12 @@ class Group(sql.ModelBase, sql.DictBase):
extra = sql.Column(sql.JsonBlob())
# Unique constraint across two columns to create the separation
# rather than just only 'name' being unique
- __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
+ __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),)
class UserGroupMembership(sql.ModelBase, sql.DictBase):
"""Group membership join table."""
+
__tablename__ = 'user_group_membership'
user_id = sql.Column(sql.String(64),
sql.ForeignKey('user.id'),
@@ -74,11 +182,9 @@ class Identity(identity.IdentityDriverV8):
# NOTE(henry-nash): Override the __init__() method so as to take a
# config parameter to enable sql to be used as a domain-specific driver.
def __init__(self, conf=None):
+ self.conf = conf
super(Identity, self).__init__()
- def default_assignment_driver(self):
- return 'sql'
-
@property
def is_sql(self):
return True
@@ -96,33 +202,32 @@ class Identity(identity.IdentityDriverV8):
# Identity interface
def authenticate(self, user_id, password):
- session = sql.get_session()
- user_ref = None
- try:
- user_ref = self._get_user(session, user_id)
- except exception.UserNotFound:
- raise AssertionError(_('Invalid user / password'))
- if not self._check_password(password, user_ref):
- raise AssertionError(_('Invalid user / password'))
- return identity.filter_user(user_ref.to_dict())
+ with sql.session_for_read() as session:
+ user_ref = None
+ try:
+ user_ref = self._get_user(session, user_id)
+ except exception.UserNotFound:
+ raise AssertionError(_('Invalid user / password'))
+ if not self._check_password(password, user_ref):
+ raise AssertionError(_('Invalid user / password'))
+ return identity.filter_user(user_ref.to_dict())
# user crud
@sql.handle_conflicts(conflict_type='user')
def create_user(self, user_id, user):
user = utils.hash_user_password(user)
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
user_ref = User.from_dict(user)
session.add(user_ref)
- return identity.filter_user(user_ref.to_dict())
+ return identity.filter_user(user_ref.to_dict())
- @sql.truncated
+ @driver_hints.truncated
def list_users(self, hints):
- session = sql.get_session()
- query = session.query(User)
- user_refs = sql.filter_limit_query(User, query, hints)
- return [identity.filter_user(x.to_dict()) for x in user_refs]
+ with sql.session_for_read() as session:
+ query = session.query(User).outerjoin(LocalUser)
+ user_refs = sql.filter_limit_query(User, query, hints)
+ return [identity.filter_user(x.to_dict()) for x in user_refs]
def _get_user(self, session, user_id):
user_ref = session.query(User).get(user_id)
@@ -131,25 +236,24 @@ class Identity(identity.IdentityDriverV8):
return user_ref
def get_user(self, user_id):
- session = sql.get_session()
- return identity.filter_user(self._get_user(session, user_id).to_dict())
+ with sql.session_for_read() as session:
+ return identity.filter_user(
+ self._get_user(session, user_id).to_dict())
def get_user_by_name(self, user_name, domain_id):
- session = sql.get_session()
- query = session.query(User)
- query = query.filter_by(name=user_name)
- query = query.filter_by(domain_id=domain_id)
- try:
- user_ref = query.one()
- except sql.NotFound:
- raise exception.UserNotFound(user_id=user_name)
- return identity.filter_user(user_ref.to_dict())
+ with sql.session_for_read() as session:
+ query = session.query(User).join(LocalUser)
+ query = query.filter(sqlalchemy.and_(LocalUser.name == user_name,
+ LocalUser.domain_id == domain_id))
+ try:
+ user_ref = query.one()
+ except sql.NotFound:
+ raise exception.UserNotFound(user_id=user_name)
+ return identity.filter_user(user_ref.to_dict())
@sql.handle_conflicts(conflict_type='user')
def update_user(self, user_id, user):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
user_ref = self._get_user(session, user_id)
old_user_dict = user_ref.to_dict()
user = utils.hash_user_password(user)
@@ -160,76 +264,74 @@ class Identity(identity.IdentityDriverV8):
if attr != 'id':
setattr(user_ref, attr, getattr(new_user, attr))
user_ref.extra = new_user.extra
- return identity.filter_user(user_ref.to_dict(include_extra_dict=True))
+ return identity.filter_user(
+ user_ref.to_dict(include_extra_dict=True))
def add_user_to_group(self, user_id, group_id):
- session = sql.get_session()
- self.get_group(group_id)
- self.get_user(user_id)
- query = session.query(UserGroupMembership)
- query = query.filter_by(user_id=user_id)
- query = query.filter_by(group_id=group_id)
- rv = query.first()
- if rv:
- return
-
- with session.begin():
+ with sql.session_for_write() as session:
+ self.get_group(group_id)
+ self.get_user(user_id)
+ query = session.query(UserGroupMembership)
+ query = query.filter_by(user_id=user_id)
+ query = query.filter_by(group_id=group_id)
+ rv = query.first()
+ if rv:
+ return
+
session.add(UserGroupMembership(user_id=user_id,
group_id=group_id))
def check_user_in_group(self, user_id, group_id):
- session = sql.get_session()
- self.get_group(group_id)
- self.get_user(user_id)
- query = session.query(UserGroupMembership)
- query = query.filter_by(user_id=user_id)
- query = query.filter_by(group_id=group_id)
- if not query.first():
- raise exception.NotFound(_("User '%(user_id)s' not found in"
- " group '%(group_id)s'") %
- {'user_id': user_id,
- 'group_id': group_id})
+ with sql.session_for_read() as session:
+ self.get_group(group_id)
+ self.get_user(user_id)
+ query = session.query(UserGroupMembership)
+ query = query.filter_by(user_id=user_id)
+ query = query.filter_by(group_id=group_id)
+ if not query.first():
+ raise exception.NotFound(_("User '%(user_id)s' not found in"
+ " group '%(group_id)s'") %
+ {'user_id': user_id,
+ 'group_id': group_id})
def remove_user_from_group(self, user_id, group_id):
- session = sql.get_session()
# We don't check if user or group are still valid and let the remove
# be tried anyway - in case this is some kind of clean-up operation
- query = session.query(UserGroupMembership)
- query = query.filter_by(user_id=user_id)
- query = query.filter_by(group_id=group_id)
- membership_ref = query.first()
- if membership_ref is None:
- # Check if the group and user exist to return descriptive
- # exceptions.
- self.get_group(group_id)
- self.get_user(user_id)
- raise exception.NotFound(_("User '%(user_id)s' not found in"
- " group '%(group_id)s'") %
- {'user_id': user_id,
- 'group_id': group_id})
- with session.begin():
+ with sql.session_for_write() as session:
+ query = session.query(UserGroupMembership)
+ query = query.filter_by(user_id=user_id)
+ query = query.filter_by(group_id=group_id)
+ membership_ref = query.first()
+ if membership_ref is None:
+ # Check if the group and user exist to return descriptive
+ # exceptions.
+ self.get_group(group_id)
+ self.get_user(user_id)
+ raise exception.NotFound(_("User '%(user_id)s' not found in"
+ " group '%(group_id)s'") %
+ {'user_id': user_id,
+ 'group_id': group_id})
session.delete(membership_ref)
def list_groups_for_user(self, user_id, hints):
- session = sql.get_session()
- self.get_user(user_id)
- query = session.query(Group).join(UserGroupMembership)
- query = query.filter(UserGroupMembership.user_id == user_id)
- query = sql.filter_limit_query(Group, query, hints)
- return [g.to_dict() for g in query]
+ with sql.session_for_read() as session:
+ self.get_user(user_id)
+ query = session.query(Group).join(UserGroupMembership)
+ query = query.filter(UserGroupMembership.user_id == user_id)
+ query = sql.filter_limit_query(Group, query, hints)
+ return [g.to_dict() for g in query]
def list_users_in_group(self, group_id, hints):
- session = sql.get_session()
- self.get_group(group_id)
- query = session.query(User).join(UserGroupMembership)
- query = query.filter(UserGroupMembership.group_id == group_id)
- query = sql.filter_limit_query(User, query, hints)
- return [identity.filter_user(u.to_dict()) for u in query]
+ with sql.session_for_read() as session:
+ self.get_group(group_id)
+ query = session.query(User).outerjoin(LocalUser)
+ query = query.join(UserGroupMembership)
+ query = query.filter(UserGroupMembership.group_id == group_id)
+ query = sql.filter_limit_query(User, query, hints)
+ return [identity.filter_user(u.to_dict()) for u in query]
def delete_user(self, user_id):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_user(session, user_id)
q = session.query(UserGroupMembership)
@@ -242,18 +344,17 @@ class Identity(identity.IdentityDriverV8):
@sql.handle_conflicts(conflict_type='group')
def create_group(self, group_id, group):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
ref = Group.from_dict(group)
session.add(ref)
- return ref.to_dict()
+ return ref.to_dict()
- @sql.truncated
+ @driver_hints.truncated
def list_groups(self, hints):
- session = sql.get_session()
- query = session.query(Group)
- refs = sql.filter_limit_query(Group, query, hints)
- return [ref.to_dict() for ref in refs]
+ with sql.session_for_read() as session:
+ query = session.query(Group)
+ refs = sql.filter_limit_query(Group, query, hints)
+ return [ref.to_dict() for ref in refs]
def _get_group(self, session, group_id):
ref = session.query(Group).get(group_id)
@@ -262,25 +363,23 @@ class Identity(identity.IdentityDriverV8):
return ref
def get_group(self, group_id):
- session = sql.get_session()
- return self._get_group(session, group_id).to_dict()
+ with sql.session_for_read() as session:
+ return self._get_group(session, group_id).to_dict()
def get_group_by_name(self, group_name, domain_id):
- session = sql.get_session()
- query = session.query(Group)
- query = query.filter_by(name=group_name)
- query = query.filter_by(domain_id=domain_id)
- try:
- group_ref = query.one()
- except sql.NotFound:
- raise exception.GroupNotFound(group_id=group_name)
- return group_ref.to_dict()
+ with sql.session_for_read() as session:
+ query = session.query(Group)
+ query = query.filter_by(name=group_name)
+ query = query.filter_by(domain_id=domain_id)
+ try:
+ group_ref = query.one()
+ except sql.NotFound:
+ raise exception.GroupNotFound(group_id=group_name)
+ return group_ref.to_dict()
@sql.handle_conflicts(conflict_type='group')
def update_group(self, group_id, group):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
old_dict = ref.to_dict()
for k in group:
@@ -290,12 +389,10 @@ class Identity(identity.IdentityDriverV8):
if attr != 'id':
setattr(ref, attr, getattr(new_group, attr))
ref.extra = new_group.extra
- return ref.to_dict()
+ return ref.to_dict()
def delete_group(self, group_id):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
q = session.query(UserGroupMembership)
diff --git a/keystone-moon/keystone/identity/controllers.py b/keystone-moon/keystone/identity/controllers.py
index 0ec38190..9e8ba6fc 100644
--- a/keystone-moon/keystone/identity/controllers.py
+++ b/keystone-moon/keystone/identity/controllers.py
@@ -80,6 +80,8 @@ class User(controller.V2Controller):
self.resource_api.get_project(default_project_id)
user['default_project_id'] = default_project_id
+ self.resource_api.ensure_default_domain_exists()
+
# The manager layer will generate the unique ID for users
user_ref = self._normalize_domain_id(context, user.copy())
initiator = notifications._get_request_audit_info(context)
@@ -149,7 +151,7 @@ class User(controller.V2Controller):
try:
self.assignment_api.add_user_to_project(
user_ref['tenantId'], user_id)
- except exception.Conflict:
+ except exception.Conflict: # nosec
# We are already a member of that tenant
pass
except exception.NotFound:
@@ -253,7 +255,8 @@ class UserV3(controller.V3Controller):
@controller.protected(callback=_check_user_and_group_protection)
def add_user_to_group(self, context, user_id, group_id):
- self.identity_api.add_user_to_group(user_id, group_id)
+ initiator = notifications._get_request_audit_info(context)
+ self.identity_api.add_user_to_group(user_id, group_id, initiator)
@controller.protected(callback=_check_user_and_group_protection)
def check_user_in_group(self, context, user_id, group_id):
@@ -261,7 +264,8 @@ class UserV3(controller.V3Controller):
@controller.protected(callback=_check_user_and_group_protection)
def remove_user_from_group(self, context, user_id, group_id):
- self.identity_api.remove_user_from_group(user_id, group_id)
+ initiator = notifications._get_request_audit_info(context)
+ self.identity_api.remove_user_from_group(user_id, group_id, initiator)
@controller.protected()
def delete_user(self, context, user_id):
diff --git a/keystone-moon/keystone/identity/core.py b/keystone-moon/keystone/identity/core.py
index 061b82e1..2f52a358 100644
--- a/keystone-moon/keystone/identity/core.py
+++ b/keystone-moon/keystone/identity/core.py
@@ -17,18 +17,21 @@
import abc
import functools
import os
+import threading
import uuid
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
import six
+from keystone import assignment # TODO(lbragstad): Decouple this dependency
from keystone.common import cache
from keystone.common import clean
+from keystone.common import config
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
-from keystone import config
from keystone import exception
from keystone.i18n import _, _LW
from keystone.identity.mapping_backends import mapping
@@ -39,7 +42,7 @@ CONF = cfg.CONF
LOG = log.getLogger(__name__)
-MEMOIZE = cache.get_memoization_decorator(section='identity')
+MEMOIZE = cache.get_memoization_decorator(group='identity')
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
@@ -70,7 +73,8 @@ def filter_user(user_ref):
try:
user_ref['extra'].pop('password', None)
user_ref['extra'].pop('tenants', None)
- except KeyError:
+ except KeyError: # nosec
+ # ok to not have extra in the user_ref.
pass
return user_ref
@@ -92,43 +96,33 @@ class DomainConfigs(dict):
the identity manager and driver can use.
"""
+
configured = False
driver = None
_any_sql = False
+ lock = threading.Lock()
def _load_driver(self, domain_config):
return manager.load_driver(Manager.driver_namespace,
domain_config['cfg'].identity.driver,
domain_config['cfg'])
- def _assert_no_more_than_one_sql_driver(self, domain_id, new_config,
- config_file=None):
- """Ensure there is no more than one sql driver.
-
- Check to see if the addition of the driver in this new config
- would cause there to now be more than one sql driver.
+ def _load_config_from_file(self, resource_api, file_list, domain_name):
- If we are loading from configuration files, the config_file will hold
- the name of the file we have just loaded.
+ def _assert_no_more_than_one_sql_driver(domain_id, new_config,
+ config_file):
+ """Ensure there is no more than one sql driver.
- """
- if (new_config['driver'].is_sql and
- (self.driver.is_sql or self._any_sql)):
- # The addition of this driver would cause us to have more than
- # one sql driver, so raise an exception.
-
- # TODO(henry-nash): This method is only used in the file-based
- # case, so has no need to worry about the database/API case. The
- # code that overrides config_file below is therefore never used
- # and should be removed, and this method perhaps moved inside
- # _load_config_from_file(). This is raised as bug #1466772.
-
- if not config_file:
- config_file = _('Database at /domains/%s/config') % domain_id
- raise exception.MultipleSQLDriversInConfig(source=config_file)
- self._any_sql = self._any_sql or new_config['driver'].is_sql
+ Check to see if the addition of the driver in this new config
+ would cause there to be more than one sql driver.
- def _load_config_from_file(self, resource_api, file_list, domain_name):
+ """
+ if (new_config['driver'].is_sql and
+ (self.driver.is_sql or self._any_sql)):
+ # The addition of this driver would cause us to have more than
+ # one sql driver, so raise an exception.
+ raise exception.MultipleSQLDriversInConfig(source=config_file)
+ self._any_sql = self._any_sql or new_config['driver'].is_sql
try:
domain_ref = resource_api.get_domain_by_name(domain_name)
@@ -149,9 +143,9 @@ class DomainConfigs(dict):
domain_config['cfg'](args=[], project='keystone',
default_config_files=file_list)
domain_config['driver'] = self._load_driver(domain_config)
- self._assert_no_more_than_one_sql_driver(domain_ref['id'],
- domain_config,
- config_file=file_list)
+ _assert_no_more_than_one_sql_driver(domain_ref['id'],
+ domain_config,
+ file_list)
self[domain_ref['id']] = domain_config
def _setup_domain_drivers_from_files(self, standard_driver, resource_api):
@@ -275,7 +269,7 @@ class DomainConfigs(dict):
# being able to find who has it...either we were very very very
# unlucky or something is awry.
msg = _('Exceeded attempts to register domain %(domain)s to use '
- 'the SQL driver, the last domain that appears to have '
+ 'the SQL driver, the last domain that appears to have '
'had it is %(last_domain)s, giving up') % {
'domain': domain_id, 'last_domain': domain_registered}
raise exception.UnexpectedError(msg)
@@ -322,7 +316,6 @@ class DomainConfigs(dict):
def setup_domain_drivers(self, standard_driver, resource_api):
# This is called by the api call wrapper
- self.configured = True
self.driver = standard_driver
if CONF.identity.domain_configurations_from_database:
@@ -331,6 +324,7 @@ class DomainConfigs(dict):
else:
self._setup_domain_drivers_from_files(standard_driver,
resource_api)
+ self.configured = True
def get_domain_driver(self, domain_id):
self.check_config_and_reload_domain_driver_if_required(domain_id)
@@ -404,7 +398,7 @@ class DomainConfigs(dict):
# specific driver for this domain.
try:
del self[domain_id]
- except KeyError:
+ except KeyError: # nosec
# Allow this error in case we are unlucky and in a
# multi-threaded situation, two threads happen to be running
# in lock step.
@@ -428,15 +422,20 @@ def domains_configured(f):
def wrapper(self, *args, **kwargs):
if (not self.domain_configs.configured and
CONF.identity.domain_specific_drivers_enabled):
- self.domain_configs.setup_domain_drivers(
- self.driver, self.resource_api)
+ # If domain specific driver has not been configured, acquire the
+ # lock and proceed with loading the driver.
+ with self.domain_configs.lock:
+ # Check again just in case some other thread has already
+ # completed domain config.
+ if not self.domain_configs.configured:
+ self.domain_configs.setup_domain_drivers(
+ self.driver, self.resource_api)
return f(self, *args, **kwargs)
return wrapper
def exception_translated(exception_type):
"""Wraps API calls to map to correct exception."""
-
def _exception_translated(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
@@ -458,7 +457,7 @@ def exception_translated(exception_type):
@notifications.listener
@dependency.provider('identity_api')
@dependency.requires('assignment_api', 'credential_api', 'id_mapping_api',
- 'resource_api', 'revoke_api')
+ 'resource_api', 'revoke_api', 'shadow_users_api')
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
@@ -710,7 +709,7 @@ class Manager(manager.Manager):
Use the mapping table to look up the domain, driver and local entity
that is represented by the provided public ID. Handle the situations
- were we do not use the mapping (e.g. single driver that understands
+ where we do not use the mapping (e.g. single driver that understands
UUIDs etc.)
"""
@@ -799,6 +798,41 @@ class Manager(manager.Manager):
not hints.get_exact_filter_by_name('domain_id')):
hints.add_filter('domain_id', domain_id)
+ def _set_list_limit_in_hints(self, hints, driver):
+ """Set list limit in hints from driver
+
+ If a hints list is provided, the wrapper will insert the relevant
+ limit into the hints so that the underlying driver call can try and
+ honor it. If the driver does truncate the response, it will update the
+ 'truncated' attribute in the 'limit' entry in the hints list, which
+ enables the caller of this function to know if truncation has taken
+ place. If, however, the driver layer is unable to perform truncation,
+ the 'limit' entry is simply left in the hints list for the caller to
+ handle.
+
+ A _get_list_limit() method is required to be present in the object
+ class hierarchy, which returns the limit for this backend to which
+ we will truncate.
+
+ If a hints list is not provided in the arguments of the wrapped call
+ then any limits set in the config file are ignored. This allows
+ internal use of such wrapped methods where the entire data set is
+ needed as input for the calculations of some other API (e.g. get role
+ assignments for a given project).
+
+ This method, specific to identity manager, is used instead of more
+ general response_truncated, because the limit for identity entities
+ can be overriden in domain-specific config files. The driver to use
+ is determined during processing of the passed parameters and
+ response_truncated is designed to set the limit before any processing.
+ """
+ if hints is None:
+ return
+
+ list_limit = driver._get_list_limit()
+ if list_limit:
+ hints.set_limit(list_limit)
+
# The actual driver calls - these are pre/post processed here as
# part of the Manager layer to make sure we:
#
@@ -869,11 +903,11 @@ class Manager(manager.Manager):
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
- @manager.response_truncated
@domains_configured
@exception_translated('user')
def list_users(self, domain_scope=None, hints=None):
driver = self._select_identity_driver(domain_scope)
+ self._set_list_limit_in_hints(hints, driver)
hints = hints or driver_hints.Hints()
if driver.is_domain_aware():
# Force the domain_scope into the hint to ensure that we only get
@@ -887,6 +921,14 @@ class Manager(manager.Manager):
return self._set_domain_id_and_mapping(
ref_list, domain_scope, driver, mapping.EntityType.USER)
+ def _check_update_of_domain_id(self, new_domain, old_domain):
+ if new_domain != old_domain:
+ versionutils.report_deprecated_feature(
+ LOG,
+ _('update of domain_id is deprecated as of Mitaka '
+ 'and will be removed in O.')
+ )
+
@domains_configured
@exception_translated('user')
def update_user(self, user_id, user_ref, initiator=None):
@@ -897,6 +939,8 @@ class Manager(manager.Manager):
if 'enabled' in user:
user['enabled'] = clean.user_enabled(user['enabled'])
if 'domain_id' in user:
+ self._check_update_of_domain_id(user['domain_id'],
+ old_user_ref['domain_id'])
self.resource_api.get_domain(user['domain_id'])
if 'id' in user:
if user_id != user['id']:
@@ -941,6 +985,10 @@ class Manager(manager.Manager):
self.id_mapping_api.delete_id_mapping(user_id)
notifications.Audit.deleted(self._USER, user_id, initiator)
+ # Invalidate user role assignments cache region, as it may be caching
+ # role assignments where the actor is the specified user
+ assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
+
@domains_configured
@exception_translated('group')
def create_group(self, group_ref, initiator=None):
@@ -986,6 +1034,9 @@ class Manager(manager.Manager):
@exception_translated('group')
def update_group(self, group_id, group, initiator=None):
if 'domain_id' in group:
+ old_group_ref = self.get_group(group_id)
+ self._check_update_of_domain_id(group['domain_id'],
+ old_group_ref['domain_id'])
self.resource_api.get_domain(group['domain_id'])
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
@@ -1012,9 +1063,13 @@ class Manager(manager.Manager):
for uid in user_ids:
self.emit_invalidate_user_token_persistence(uid)
+ # Invalidate user role assignments cache region, as it may be caching
+ # role assignments expanded from the specified group to its users
+ assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
+
@domains_configured
@exception_translated('group')
- def add_user_to_group(self, user_id, group_id):
+ def add_user_to_group(self, user_id, group_id, initiator=None):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
@@ -1031,9 +1086,15 @@ class Manager(manager.Manager):
group_driver.add_user_to_group(user_entity_id, group_entity_id)
+ # Invalidate user role assignments cache region, as it may now need to
+ # include role assignments from the specified group to its users
+ assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
+ notifications.Audit.added_to(self._GROUP, group_id, self._USER,
+ user_id, initiator)
+
@domains_configured
@exception_translated('group')
- def remove_user_from_group(self, user_id, group_id):
+ def remove_user_from_group(self, user_id, group_id, initiator=None):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
@@ -1051,7 +1112,12 @@ class Manager(manager.Manager):
group_driver.remove_user_from_group(user_entity_id, group_entity_id)
self.emit_invalidate_user_token_persistence(user_id)
- @notifications.internal(notifications.INVALIDATE_USER_TOKEN_PERSISTENCE)
+ # Invalidate user role assignments cache region, as it may be caching
+ # role assignments expanded from this group to this user
+ assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
+ notifications.Audit.removed_from(self._GROUP, group_id, self._USER,
+ user_id, initiator)
+
def emit_invalidate_user_token_persistence(self, user_id):
"""Emit a notification to the callback system to revoke user tokens.
@@ -1061,10 +1127,10 @@ class Manager(manager.Manager):
:param user_id: user identifier
:type user_id: string
"""
- pass
+ notifications.Audit.internal(
+ notifications.INVALIDATE_USER_TOKEN_PERSISTENCE, user_id
+ )
- @notifications.internal(
- notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE)
def emit_invalidate_grant_token_persistence(self, user_project):
"""Emit a notification to the callback system to revoke grant tokens.
@@ -1074,14 +1140,17 @@ class Manager(manager.Manager):
:param user_project: {'user_id': user_id, 'project_id': project_id}
:type user_project: dict
"""
- pass
+ notifications.Audit.internal(
+ notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
+ user_project
+ )
- @manager.response_truncated
@domains_configured
@exception_translated('user')
def list_groups_for_user(self, user_id, hints=None):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
+ self._set_list_limit_in_hints(hints, driver)
hints = hints or driver_hints.Hints()
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
@@ -1091,11 +1160,11 @@ class Manager(manager.Manager):
return self._set_domain_id_and_mapping(
ref_list, domain_id, driver, mapping.EntityType.GROUP)
- @manager.response_truncated
@domains_configured
@exception_translated('group')
def list_groups(self, domain_scope=None, hints=None):
driver = self._select_identity_driver(domain_scope)
+ self._set_list_limit_in_hints(hints, driver)
hints = hints or driver_hints.Hints()
if driver.is_domain_aware():
# Force the domain_scope into the hint to ensure that we only get
@@ -1109,12 +1178,12 @@ class Manager(manager.Manager):
return self._set_domain_id_and_mapping(
ref_list, domain_scope, driver, mapping.EntityType.GROUP)
- @manager.response_truncated
@domains_configured
@exception_translated('group')
def list_users_in_group(self, group_id, hints=None):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
+ self._set_list_limit_in_hints(hints, driver)
hints = hints or driver_hints.Hints()
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
@@ -1154,18 +1223,62 @@ class Manager(manager.Manager):
update_dict = {'password': new_password}
self.update_user(user_id, update_dict)
+ @MEMOIZE
+ def shadow_federated_user(self, idp_id, protocol_id, unique_id,
+ display_name):
+ """Shadows a federated user by mapping to a user.
+
+ :param idp_id: identity provider id
+ :param protocol_id: protocol id
+ :param unique_id: unique id for the user within the IdP
+ :param display_name: user's display name
+
+ :returns: dictionary of the mapped User entity
+ """
+ user_dict = {}
+ try:
+ self.shadow_users_api.update_federated_user_display_name(
+ idp_id, protocol_id, unique_id, display_name)
+ user_dict = self.shadow_users_api.get_federated_user(
+ idp_id, protocol_id, unique_id)
+ except exception.UserNotFound:
+ federated_dict = {
+ 'idp_id': idp_id,
+ 'protocol_id': protocol_id,
+ 'unique_id': unique_id,
+ 'display_name': display_name
+ }
+ user_dict = self.shadow_users_api.create_federated_user(
+ federated_dict)
+ return user_dict
+
@six.add_metaclass(abc.ABCMeta)
class IdentityDriverV8(object):
"""Interface description for an Identity driver."""
+ def _get_conf(self):
+ try:
+ return self.conf or CONF
+ except AttributeError:
+ return CONF
+
def _get_list_limit(self):
- return CONF.identity.list_limit or CONF.list_limit
+ conf = self._get_conf()
+ # use list_limit from domain-specific config. If list_limit in
+ # domain-specific config is not set, look it up in the default config
+ return (conf.identity.list_limit or conf.list_limit or
+ CONF.identity.list_limit or CONF.list_limit)
def is_domain_aware(self):
"""Indicates if Driver supports domains."""
return True
+ def default_assignment_driver(self):
+ # TODO(morganfainberg): To be removed when assignment driver based
+ # upon [identity]/driver option is removed in the "O" release.
+ return 'sql'
+
@property
def is_sql(self):
"""Indicates if this Driver uses SQL."""
@@ -1183,8 +1296,9 @@ class IdentityDriverV8(object):
@abc.abstractmethod
def authenticate(self, user_id, password):
"""Authenticate a given user and password.
+
:returns: user_ref
- :raises: AssertionError
+ :raises AssertionError: If user or password is invalid.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1194,7 +1308,7 @@ class IdentityDriverV8(object):
def create_user(self, user_id, user):
"""Creates a new user.
- :raises: keystone.exception.Conflict
+ :raises keystone.exception.Conflict: If a duplicate user exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1229,7 +1343,7 @@ class IdentityDriverV8(object):
"""Get a user by ID.
:returns: user_ref
- :raises: keystone.exception.UserNotFound
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1238,8 +1352,8 @@ class IdentityDriverV8(object):
def update_user(self, user_id, user):
"""Updates an existing user.
- :raises: keystone.exception.UserNotFound,
- keystone.exception.Conflict
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
+ :raises keystone.exception.Conflict: If a duplicate user exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1248,8 +1362,8 @@ class IdentityDriverV8(object):
def add_user_to_group(self, user_id, group_id):
"""Adds a user to a group.
- :raises: keystone.exception.UserNotFound,
- keystone.exception.GroupNotFound
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
+ :raises keystone.exception.GroupNotFound: If the group doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1258,8 +1372,8 @@ class IdentityDriverV8(object):
def check_user_in_group(self, user_id, group_id):
"""Checks if a user is a member of a group.
- :raises: keystone.exception.UserNotFound,
- keystone.exception.GroupNotFound
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
+ :raises keystone.exception.GroupNotFound: If the group doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1268,7 +1382,7 @@ class IdentityDriverV8(object):
def remove_user_from_group(self, user_id, group_id):
"""Removes a user from a group.
- :raises: keystone.exception.NotFound
+ :raises keystone.exception.NotFound: If the entity not found.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1277,7 +1391,7 @@ class IdentityDriverV8(object):
def delete_user(self, user_id):
"""Deletes an existing user.
- :raises: keystone.exception.UserNotFound
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1287,7 +1401,7 @@ class IdentityDriverV8(object):
"""Get a user by name.
:returns: user_ref
- :raises: keystone.exception.UserNotFound
+ :raises keystone.exception.UserNotFound: If the user doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1298,7 +1412,7 @@ class IdentityDriverV8(object):
def create_group(self, group_id, group):
"""Creates a new group.
- :raises: keystone.exception.Conflict
+ :raises keystone.exception.Conflict: If a duplicate group exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1333,7 +1447,7 @@ class IdentityDriverV8(object):
"""Get a group by ID.
:returns: group_ref
- :raises: keystone.exception.GroupNotFound
+ :raises keystone.exception.GroupNotFound: If the group doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1343,7 +1457,7 @@ class IdentityDriverV8(object):
"""Get a group by name.
:returns: group_ref
- :raises: keystone.exception.GroupNotFound
+ :raises keystone.exception.GroupNotFound: If the group doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1352,8 +1466,8 @@ class IdentityDriverV8(object):
def update_group(self, group_id, group):
"""Updates an existing group.
- :raises: keystone.exceptionGroupNotFound,
- keystone.exception.Conflict
+ :raises keystone.exception.GroupNotFound: If the group doesn't exist.
+ :raises keystone.exception.Conflict: If a duplicate group exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1362,7 +1476,7 @@ class IdentityDriverV8(object):
def delete_group(self, group_id):
"""Deletes an existing group.
- :raises: keystone.exception.GroupNotFound
+ :raises keystone.exception.GroupNotFound: If the group doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -1446,3 +1560,54 @@ class MappingDriverV8(object):
MappingDriver = manager.create_legacy_driver(MappingDriverV8)
+
+
+@dependency.provider('shadow_users_api')
+class ShadowUsersManager(manager.Manager):
+ """Default pivot point for the Shadow Users backend."""
+
+ driver_namespace = 'keystone.identity.shadow_users'
+
+ def __init__(self):
+ super(ShadowUsersManager, self).__init__(CONF.shadow_users.driver)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ShadowUsersDriverV9(object):
+ """Interface description for an Shadow Users driver."""
+
+ @abc.abstractmethod
+ def create_federated_user(self, federated_dict):
+ """Create a new user with the federated identity
+
+ :param dict federated_dict: Reference to the federated user
+ :param user_id: user ID for linking to the federated identity
+ :returns dict: Containing the user reference
+
+ """
+ raise exception.NotImplemented()
+
+ @abc.abstractmethod
+ def get_federated_user(self, idp_id, protocol_id, unique_id):
+ """Returns the found user for the federated identity
+
+ :param idp_id: The identity provider ID
+ :param protocol_id: The federation protocol ID
+ :param unique_id: The unique ID for the user
+ :returns dict: Containing the user reference
+
+ """
+ raise exception.NotImplemented()
+
+ @abc.abstractmethod
+ def update_federated_user_display_name(self, idp_id, protocol_id,
+ unique_id, display_name):
+ """Updates federated user's display name if changed
+
+ :param idp_id: The identity provider ID
+ :param protocol_id: The federation protocol ID
+ :param unique_id: The unique ID for the user
+ :param display_name: The user's display name
+
+ """
+ raise exception.NotImplemented()
diff --git a/keystone-moon/keystone/identity/mapping_backends/sql.py b/keystone-moon/keystone/identity/mapping_backends/sql.py
index 7ab4ef52..91b33dd7 100644
--- a/keystone-moon/keystone/identity/mapping_backends/sql.py
+++ b/keystone-moon/keystone/identity/mapping_backends/sql.py
@@ -23,7 +23,7 @@ class IDMapping(sql.ModelBase, sql.ModelDictMixin):
public_id = sql.Column(sql.String(64), primary_key=True)
domain_id = sql.Column(sql.String(64), nullable=False)
local_id = sql.Column(sql.String(64), nullable=False)
- # NOTE(henry-nash); Postgres requires a name to be defined for an Enum
+ # NOTE(henry-nash): Postgres requires a name to be defined for an Enum
entity_type = sql.Column(
sql.Enum(identity_mapping.EntityType.USER,
identity_mapping.EntityType.GROUP,
@@ -32,7 +32,7 @@ class IDMapping(sql.ModelBase, sql.ModelDictMixin):
# Unique constraint to ensure you can't store more than one mapping to the
# same underlying values
__table_args__ = (
- sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'), {})
+ sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'),)
@dependency.requires('id_generator_api')
@@ -45,27 +45,27 @@ class Mapping(identity.MappingDriverV8):
# work if we hashed all the entries, even those that already generate
# UUIDs, like SQL. Further, this would only work if the generation
# algorithm was immutable (e.g. it had always been sha256).
- session = sql.get_session()
- query = session.query(IDMapping.public_id)
- query = query.filter_by(domain_id=local_entity['domain_id'])
- query = query.filter_by(local_id=local_entity['local_id'])
- query = query.filter_by(entity_type=local_entity['entity_type'])
- try:
- public_ref = query.one()
- public_id = public_ref.public_id
- return public_id
- except sql.NotFound:
- return None
+ with sql.session_for_read() as session:
+ query = session.query(IDMapping.public_id)
+ query = query.filter_by(domain_id=local_entity['domain_id'])
+ query = query.filter_by(local_id=local_entity['local_id'])
+ query = query.filter_by(entity_type=local_entity['entity_type'])
+ try:
+ public_ref = query.one()
+ public_id = public_ref.public_id
+ return public_id
+ except sql.NotFound:
+ return None
def get_id_mapping(self, public_id):
- session = sql.get_session()
- mapping_ref = session.query(IDMapping).get(public_id)
- if mapping_ref:
- return mapping_ref.to_dict()
+ with sql.session_for_read() as session:
+ mapping_ref = session.query(IDMapping).get(public_id)
+ if mapping_ref:
+ return mapping_ref.to_dict()
def create_id_mapping(self, local_entity, public_id=None):
entity = local_entity.copy()
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
if public_id is None:
public_id = self.id_generator_api.generate_public_ID(entity)
entity['public_id'] = public_id
@@ -74,24 +74,25 @@ class Mapping(identity.MappingDriverV8):
return public_id
def delete_id_mapping(self, public_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
try:
session.query(IDMapping).filter(
IDMapping.public_id == public_id).delete()
- except sql.NotFound:
+ except sql.NotFound: # nosec
# NOTE(morganfainberg): There is nothing to delete and nothing
# to do.
pass
def purge_mappings(self, purge_filter):
- session = sql.get_session()
- query = session.query(IDMapping)
- if 'domain_id' in purge_filter:
- query = query.filter_by(domain_id=purge_filter['domain_id'])
- if 'public_id' in purge_filter:
- query = query.filter_by(public_id=purge_filter['public_id'])
- if 'local_id' in purge_filter:
- query = query.filter_by(local_id=purge_filter['local_id'])
- if 'entity_type' in purge_filter:
- query = query.filter_by(entity_type=purge_filter['entity_type'])
- query.delete()
+ with sql.session_for_write() as session:
+ query = session.query(IDMapping)
+ if 'domain_id' in purge_filter:
+ query = query.filter_by(domain_id=purge_filter['domain_id'])
+ if 'public_id' in purge_filter:
+ query = query.filter_by(public_id=purge_filter['public_id'])
+ if 'local_id' in purge_filter:
+ query = query.filter_by(local_id=purge_filter['local_id'])
+ if 'entity_type' in purge_filter:
+ query = query.filter_by(
+ entity_type=purge_filter['entity_type'])
+ query.delete()
diff --git a/keystone-moon/keystone/identity/shadow_backends/__init__.py b/keystone-moon/keystone/identity/shadow_backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/identity/shadow_backends/__init__.py
diff --git a/keystone-moon/keystone/identity/shadow_backends/sql.py b/keystone-moon/keystone/identity/shadow_backends/sql.py
new file mode 100644
index 00000000..af5a995b
--- /dev/null
+++ b/keystone-moon/keystone/identity/shadow_backends/sql.py
@@ -0,0 +1,73 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone.common import sql
+from keystone import exception
+from keystone import identity
+from keystone.identity.backends import sql as model
+
+
+class ShadowUsers(identity.ShadowUsersDriverV9):
+ @sql.handle_conflicts(conflict_type='federated_user')
+ def create_federated_user(self, federated_dict):
+ user = {
+ 'id': uuid.uuid4().hex,
+ 'enabled': True
+ }
+ with sql.session_for_write() as session:
+ federated_ref = model.FederatedUser.from_dict(federated_dict)
+ user_ref = model.User.from_dict(user)
+ user_ref.federated_users.append(federated_ref)
+ session.add(user_ref)
+ return identity.filter_user(user_ref.to_dict())
+
+ def get_federated_user(self, idp_id, protocol_id, unique_id):
+ user_ref = self._get_federated_user(idp_id, protocol_id, unique_id)
+ return identity.filter_user(user_ref.to_dict())
+
+ def _get_federated_user(self, idp_id, protocol_id, unique_id):
+ """Returns the found user for the federated identity
+
+ :param idp_id: The identity provider ID
+ :param protocol_id: The federation protocol ID
+ :param unique_id: The user's unique ID (unique within the IdP)
+ :returns User: Returns a reference to the User
+
+ """
+ with sql.session_for_read() as session:
+ query = session.query(model.User).outerjoin(model.LocalUser)
+ query = query.join(model.FederatedUser)
+ query = query.filter(model.FederatedUser.idp_id == idp_id)
+ query = query.filter(model.FederatedUser.protocol_id ==
+ protocol_id)
+ query = query.filter(model.FederatedUser.unique_id == unique_id)
+ try:
+ user_ref = query.one()
+ except sql.NotFound:
+ raise exception.UserNotFound(user_id=unique_id)
+ return user_ref
+
+ @sql.handle_conflicts(conflict_type='federated_user')
+ def update_federated_user_display_name(self, idp_id, protocol_id,
+ unique_id, display_name):
+ with sql.session_for_write() as session:
+ query = session.query(model.FederatedUser)
+ query = query.filter(model.FederatedUser.idp_id == idp_id)
+ query = query.filter(model.FederatedUser.protocol_id ==
+ protocol_id)
+ query = query.filter(model.FederatedUser.unique_id == unique_id)
+ query = query.filter(model.FederatedUser.display_name !=
+ display_name)
+ query.update({'display_name': display_name})
+ return
diff --git a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po
index a9cfc70a..9f77b841 100644
--- a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: German\n"
-"Language: de\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: German\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone.po
index 6f860754..71503a36 100644
--- a/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/de/LC_MESSAGES/keystone.po
@@ -1,4 +1,4 @@
-# German translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
@@ -6,28 +6,40 @@
# Ettore Atalan <atalanttore@googlemail.com>, 2014
# Robert Simai, 2014
# Reik Keutterling <spielkind@gmail.com>, 2015
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
+# Frank Kloeker <eumel@arcor.de>, 2016. #zanata
+# Monika Wolf <vcomas3@de.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: de\n"
-"Language-Team: German\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-03-24 03:13+0000\n"
+"Last-Translator: Monika Wolf <vcomas3@de.ibm.com>\n"
+"Language: de\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: German\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)s ist keine unterstützte Treiberversion."
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr ""
+"Der %(entity)s-Name darf nicht die folgenden reservierten Zeichen enthalten: "
+"%(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
@@ -51,6 +63,10 @@ msgstr ""
"Pfad unter %(path)s ist nicht vorhanden oder ist kein Verzeichnis."
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s impliziert nicht %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s darf nicht kleiner als %(min_length)s Zeichen sein."
@@ -63,6 +79,10 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s sollte nicht größer als %(max_length)s Zeichen sein."
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s darf keine implizierte Rolle sein"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s darf nicht leer sein."
@@ -78,15 +98,24 @@ msgstr "%s-Feld ist erforderlich und darf nicht leer sein"
msgid "%s field(s) cannot be empty"
msgstr "%s-Felder können nicht leer sein"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(Debugmodus inaktivieren, um diese Details zu unterdrücken.)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"%s für das LDAP-ID-Back-End wurde in Mitaka zugunsten des schreibgeschützten "
+"ID-LDAP-Zugriffs eingestellt und wird im \"O\"-Release entfernt."
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(Modus insecure_debug inaktivieren, um diese Details zu unterdrücken.)"
msgid "--all option cannot be mixed with other options"
msgstr "--all-Option kann nicht zusammen mit anderen Optionen verwendet werden"
msgid "A project-scoped token is required to produce a service catalog."
msgstr ""
-"Ein projektorientiertes Token ist zum Produzieren eines Servicekatalogs "
+"Ein projektorientiertes Token ist zum Produzieren eines Dienstekatalogs "
"erforderlich."
msgid "Access token is expired"
@@ -136,6 +165,17 @@ msgstr ""
msgid "At least one role should be specified."
msgstr "Mindestens eine Rolle sollte angegeben werden."
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"Der Versuch, für die Zuordnung den Treiber basierend auf der Option "
+"[identity]\\driver automatisch auszuwählen, ist fehlgeschlagen, da der "
+"Treiber %s nicht gefunden wurde. Setzen Sie die Option [assignment]/driver "
+"in der Keystone-Konfiguration auf einen gültigen Treiber."
+
msgid "Attempted to authenticate with an unsupported method."
msgstr "Versuch einer Authentifizierung mit einer nicht unterstützten Methode."
@@ -147,7 +187,15 @@ msgstr ""
"Sie v3- Authentifizierung"
msgid "Authentication plugin error."
-msgstr "Authentifizierung-Plug-in-Fehler"
+msgstr "Authentifizierung-Plugin-Fehler"
+
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"Back-End '%(backend)s' ist kein gültiges memcached Back-End. Gültige Back-"
+"Ends: %(backend_list)s"
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
@@ -161,9 +209,6 @@ msgstr "%(option_name)s %(attr)s kann nicht geändert werden"
msgid "Cannot change Domain ID"
msgstr "Die Domänen-ID kann nicht geändert werden"
-msgid "Cannot change consumer secret"
-msgstr "Konsumentengeheimnis kann nicht geändert werden"
-
msgid "Cannot change user ID"
msgstr "Benutzer-ID kann nicht geändert werden"
@@ -171,14 +216,76 @@ msgid "Cannot change user name"
msgstr "Benutzername kann nicht geändert werden"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr ""
+"Es kann kein Endpunkt mit einer ungültigen URL erstellt werden: %(url)s"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr ""
"Projekt kann nicht mit dem übergeordneten Element %(project_id)s erstellt "
"werden"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "Der Name %s kann nicht dupliziert werden."
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"Das Projekt kann nicht erstellt werden, da es den zugehörigen Eigner als "
+"Domäne %(domain_id)s angibt, jedoch ein übergeordnetes Projekt in einer "
+"anderen Domäne (%(parent_domain_id)s) angibt."
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"Das Projekt kann nicht erstellt werden, da das zugehörige übergeordnete "
+"Projekt (%(domain_id)s) als Domäne fungiert, aber die für das Projekt "
+"angegebene 'parent_id' (%(parent_id)s) nicht mit dieser 'domain_id' "
+"übereinstimmt."
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+"Eine aktivierte Domäne kann nicht gelöscht werden. Deaktivieren Sie sie "
+"zuerst."
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Kann Projekt %(project_id)s nicht löschen, da die zugehörige untergeordnete "
+"Baumstruktur aktivierte Projekte enthält."
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"Das Projekt %s kann nicht gelöscht werden, da es kein Blattelement in der "
+"Hierarchie darstellt. Verwenden Sie die Option 'cascade', wenn Sie eine "
+"vollständige, untergeordnete Baumstruktur löschen möchten. "
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Kann Projekt %(project_id)s nicht deaktivieren, da die zugehörige "
+"untergeordnete Baumstruktur aktivierte Projekte enthält."
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr ""
+"Kann Projekt %s nicht aktivieren, da es über inaktivierte übergeordnete "
+"Projekte verfügt"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"Aus Gruppen erstellte und nach Benutzer-ID gefilterte Zuordnungen können "
+"nicht aufgelistet werden."
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
@@ -201,6 +308,11 @@ msgstr ""
"Abschneiden eines Treiberaufrufs ohne Hinweisliste als erstem Parameter nach "
"dem Treiber nicht möglich "
+msgid "Cannot update domain_id of a project that has children."
+msgstr ""
+"Die Aktualisierung von 'domain_id' eines Projekts mit untergeordneten "
+"Projekten ist nicht möglich."
+
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
msgstr ""
@@ -213,6 +325,10 @@ msgstr ""
"Die Abfrageparameter subtree_as_list und subtree_as_ids können nicht "
"gleichzeitig verwendet werden."
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr ""
+"Die Aktualisierungsweitergabe ist nur für aktivierte Attribute zulässig."
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr ""
@@ -227,6 +343,10 @@ msgstr ""
"führt immer zu einer leeren Liste."
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "Konfigurations-API-Entität unter /domains/%s/config"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr "Konflikt beim Versuch, %(type)s zu speichern - %(details)s"
@@ -248,6 +368,14 @@ msgstr ""
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"Identitätsprovider-ID nicht gefunden. Die Konfigurationsoption "
+"%(issuer_attribute)s wurde in der Anforderungsumgebung nicht gefunden."
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
@@ -312,9 +440,6 @@ msgstr "Projekt %(project_id)s konnte nicht gefunden werden"
msgid "Could not find region: %(region_id)s"
msgstr "Region %(region_id)s konnte nicht gefunden werden"
-msgid "Could not find role"
-msgstr "Rolle konnte nicht gefunden werden"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -329,7 +454,7 @@ msgstr "Rolle %(role_id)s konnte nicht gefunden werden"
#, python-format
msgid "Could not find service: %(service_id)s"
-msgstr "Service %(service_id)s konnte nicht gefunden werden"
+msgstr "Dienst %(service_id)s konnte nicht gefunden werden"
#, python-format
msgid "Could not find token: %(token_id)s"
@@ -351,15 +476,49 @@ msgstr "Version %(version)s konnte nicht gefunden werden"
msgid "Could not find: %(target)s"
msgstr "Konnte nicht gefunden werden: %(target)s"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"Es konnten keine eingebundenen Benutzereigenschaften Identitätswerten "
+"zugeordnet werden. Überprüfen Sie die Debugprotokolle oder die verwendete "
+"Zuordnung, um weitere Details zu erhalten."
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"Benutzer konnte beim Festlegen der ephemeren Benutzeridentität nicht "
+"zugeordnet werden. Entweder muss in Zuordnungsregeln Benutzer-ID/Name "
+"angegeben werden oder Umgebungsvariable REMOTE_USER muss festgelegt werden."
+
msgid "Could not validate the access token"
msgstr "Das Zugriffstoken konnte nicht geprüft werden"
msgid "Credential belongs to another user"
msgstr "Berechtigungsnachweis gehört einem anderen Benutzer"
+msgid "Credential signature mismatch"
+msgstr "Ãœbereinstimmungsfehler bei Berechtigungssignatur"
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "Datenbank unter /domains/%s/config"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"Der direkte Import des Authentifizierungsplugins %(name)r wird zugunsten des "
+"zugehörigen Einstiegspunkts aus %(namespace)r seit Liberty nicht mehr "
+"unterstützt und wird möglicherweise im N-Release entfernt."
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"Der direkte Import des Treibers %(name)r wird zugunsten des zugehörigen "
+"Einstiegspunkts aus %(namespace)r seit Liberty nicht mehr unterstützt und "
+"wird möglicherweise im N-Release entfernt."
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -382,12 +541,16 @@ msgstr "Domäne kann nicht die ID %s haben"
msgid "Domain is disabled: %s"
msgstr "Domäne ist inaktiviert: %s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "Domänenmetadaten werden von LDAP nicht unterstützt"
+msgid "Domain name cannot contain reserved characters."
+msgstr "Der Domänenname darf keine reservierten Zeichen enthalten."
msgid "Domain scoped token is not supported"
msgstr "Bereichsorientiertes Token der Domäne wird nicht unterstützt"
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr ""
+"Domänenspezifische rollen werden im V8-Rollentreiber nicht unterstützt."
+
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
@@ -396,9 +559,6 @@ msgstr ""
"Domäne: für %(domain)s ist bereits eine Konfiguration definiert - Datei wird "
"ignoriert: %(file)s."
-msgid "Domains are read-only against LDAP"
-msgstr "Domänen sind für LDAP schreibgeschützt"
-
msgid "Duplicate Entry"
msgstr "Doppelter Eintrag"
@@ -407,9 +567,29 @@ msgid "Duplicate ID, %s."
msgstr "Doppelte ID, %s."
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "Doppelter Eintrag: %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "Doppelter Name, %s."
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "Doppelte ferne ID: %s"
+
+msgid "EC2 access key not found."
+msgstr "EC2 Zugriffsschlüssel nicht gefunden."
+
+msgid "EC2 signature not supplied."
+msgstr "EC2-Signatur nicht angegeben."
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+"Es muss entweder das Argument --bootstrap-password oder "
+"OS_BOOTSTRAP_PASSWORD gesetzt werden."
+
msgid "Enabled field must be a boolean"
msgstr "Das Feld 'Aktiviert' muss ein boolescher Wert sein"
@@ -440,10 +620,32 @@ msgstr ""
"Datei: %(file)s."
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "Fehler beim Öffnen der Datei %(path)s: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "Fehler beim Parsing der Zeile '%(line)s': %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "Fehler beim Parsing der Regeln %(path)s: %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "Fehler beim Lesen der Metadatendatei, %(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"Die maximal zulässige Anzahl an Versuchen, die Domäne %(domain)s für die "
+"Verwendung des SQL-Treibers zu registrieren, wurde überschritten. Die letzte "
+"Domäne, bei der die Registrierung erfolgreich gewesen zu sein scheint, war "
+"%(last_domain)s. Abbruch."
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "Verzeichnis oder Liste erwartet: %s"
@@ -487,6 +689,10 @@ msgstr ""
"Ungültiges Token gefunden. Es ist sowohl projekt- als auch domänenorientiert."
#, python-format
+msgid "Group %s not found in config"
+msgstr "Die Gruppe %s wurde nicht in der Konfiguration gefunden."
+
+#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr ""
"Gruppe %(group)s wird für domänenspezifische Konfigurationen nicht "
@@ -522,6 +728,9 @@ msgid ""
msgstr ""
"Eingehende Identitätsprovider-ID ist nicht in den akzeptierten IDs enthalten."
+msgid "Invalid EC2 signature."
+msgstr "Ungültige EC2-Signatur."
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
@@ -576,7 +785,7 @@ msgid ""
"%(service_id)s, Region: %(region_id)s"
msgstr ""
"Ungültige Mischung von Entitäten für Richtlinienzuordnung - nur Endpunkt, "
-"Service oder Region+Service zulässig. Anforderung war - Endpunkt: "
+"Dienst oder Region+Dienst zulässig. Anforderung war - Endpunkt: "
"%(endpoint_id)s, Service: %(service_id)s, Region: %(region_id)s"
#, python-format
@@ -590,17 +799,12 @@ msgstr ""
msgid "Invalid signature"
msgstr "Ungültige Signatur"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"Ungültiger Wert %s für ssl_cert_reqs, muss lauten \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-
msgid "Invalid user / password"
msgstr "Ungültiger Benutzer / Passwort"
+msgid "Invalid username or TOTP passcode"
+msgstr "Ungültiger Benutzername oder TOTP-Kenncode"
+
msgid "Invalid username or password"
msgstr "Ungültiger Benutzername oder ungültiges Passwort."
@@ -624,6 +828,21 @@ msgstr "LDAP %s löschen"
msgid "LDAP %s update"
msgstr "LDAP %s aktualisieren"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+"Länge der transformierbaren Ressourcen-ID liegt über der maximal zulässigen "
+"Anzahl von 64 Zeichen. "
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"Der lokale Abschnitt in der Zuordnung %(mapping_id)s bezieht sich auf eine "
+"ferne Ãœbereinstimmung, die nicht vorhanden ist (z. B. '{0}' in einem lokalen "
+"Abschnitt)."
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr "Überschreitung der Sperrzeit aufgetreten für Schlüssel %(target)s"
@@ -642,6 +861,10 @@ msgid "Marker could not be found"
msgstr "Marker konnte nicht gefunden werden"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "Die maximale Hierarchietiefe für den %s-Branch wurde erreicht."
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr "Maximale Anzahl an Sperrversuchen auf %s erfolgt."
@@ -675,11 +898,14 @@ msgstr "Entweder Domäne oder Projekt muss angegeben werden"
msgid "Name field is required and cannot be empty"
msgstr "Namensfeld ist erforderlich und darf nicht leer sein"
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr "Weder Projektdomänen-ID noch Projektdomänenname wurde angegeben."
+
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
msgstr ""
-"Keine Autorisierungskopfzeilen gefunden, zu OAuth zugehörige Aufrufe können "
+"Keine Authorisierungskopfzeilen gefunden, zu OAuth zugehörige Aufrufe können "
"nicht fortgesetzt werden. Stellen Sie bei Ausführung unter HTTPd oder Apache "
"sicher, dass WSGIPassAuthorization auf 'On' gesetzt ist."
@@ -703,11 +929,14 @@ msgstr "Endpunkt %(endpoint_id)s ist keine Richtlinie zugeordnet. "
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "Keine verbleibende Verwendung für Vertrauensbeziehung %(trust_id)s"
+msgid "No token in the request"
+msgstr "Kein Token in der Anforderung"
+
msgid "Non-default domain is not supported"
msgstr "Nicht-Standard-Domäne wird nicht unterstützt"
msgid "One of the trust agents is disabled or deleted"
-msgstr "Einer der Vertrauensagenten wurde inaktiviert oder gelöscht"
+msgstr "Einer der Vertrauensagenten wurde deaktiviert oder gelöscht"
#, python-format
msgid ""
@@ -730,9 +959,29 @@ msgid "Project (%s)"
msgstr "Projekt (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "Projekt-ID nicht gefunden: %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "Projektfeld ist erforderlich und darf nicht leer sein."
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "Projekt ist inaktiviert: %s"
+msgid "Project name cannot contain reserved characters."
+msgstr "Der Projektname darf keine reservierten Zeichen enthalten."
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "Abfragezeichenfolge ist nicht UTF-8-codiert"
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr ""
+"Lesen des Standardwerts für die Option %(option)s in der Gruppe %(group)s "
+"wird nicht unterstützt."
+
msgid "Redelegation allowed for delegated by trust only"
msgstr "Redelegation nur zulässig für im Vertrauen redelegierte"
@@ -744,6 +993,78 @@ msgstr ""
"Verbleibende Redelegationstiefe von %(redelegation_depth)d aus dem "
"zulässigen Bereich von [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Entfernen Sie 'admin_crud_extension' aus der Einfügepipeline. "
+"'admin_crud_extension' ist jetzt immer verfügbar. Aktualisieren Sie den "
+"Abschnitt [pipeline:admin_api] in der Datei 'keystone-paste.ini' "
+"entsprechend, da er im 'O'-Release entfernt wird. "
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"Entfernen Sie 'endpoint_filter_extension' aus der Einfügepipeline. Die "
+"Endpunktfiltererweiterung ist jetzt immer verfügbar. Aktualisieren Sie den "
+"Abschnitt [pipeline:api_v3] in der Datei 'keystone-paste.ini' entsprechend, "
+"da er im 'O'-Release entfernt wird."
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Entfernen Sie 'federation_extension' aus der Einfügepipeline. Sie ist jetzt "
+"immer verfügbar. Aktualisieren Sie den Abschnitt [pipeline:api_v3] in der "
+"Datei 'keystone-paste.ini' entsprechend, da er im 'O'-Release entfernt wird."
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Entfernen Sie 'oauth1_extension' aus der Einfügepipeline. Die oauth1-"
+"Erweiterung ist jetzt immer verfügbar. Aktualisieren Sie den Abschnitt "
+"[pipeline:api_v3] in der Datei 'keystone-paste.ini' entsprechend, da er im "
+"'O'-Release entfernt wird."
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Entfernen Sie 'revoke_extension' aus der Einfügepipeline. Die revoke-"
+"Erweiterung ist jetzt immer verfügbar. Aktualisieren Sie den Abschnitt "
+"[pipeline:api_v3] in der Datei 'keystone-paste.ini' entsprechend, da er im "
+"'O'-Release entfernt wird. "
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Entfernen Sie 'simple_cert' aus der Einfügepipeline. Die PKI- und PKIz-Token-"
+"Provider sind jetzt veraltet und 'simple_cert' wurde nur zur Unterstützung "
+"dieser Token-Provider verwendet. Aktualisieren Sie den Abschnitt [pipeline:"
+"api_v3] in der Datei 'keystone-paste.ini' entsprechend, da er im 'O'-Release "
+"entfernt wird."
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Entfernen Sie 'user_crud_extension' aus der Einfügepipeline. 'user_crud "
+"extension' ist jetzt immer verfügbar. Aktualisieren Sie den Abschnitt "
+"[pipeline:public_api] in der Datei 'keystone-paste.ini' entsprechend, da er "
+"im 'O'-Release entfernt wird."
+
msgid "Request Token does not have an authorizing user id"
msgstr "Anforderungstoken weist keine autorisierte Benutzer-ID auf"
@@ -779,10 +1100,6 @@ msgstr ""
"Die angeforderte Redelegationstiefe von %(requested_count)d übersteigt den "
"zulässigen Wert von %(max_count)d"
-#, python-format
-msgid "Role %s not found"
-msgstr "Rolle %s nicht gefunden"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
@@ -831,6 +1148,29 @@ msgstr ""
"Zeichenfolgelänge überschritten. Die Länge der Zeichenfolge '%(string)s' hat "
"den Grenzwert von Spalte %(type)s(CHAR(%(length)d)) überschritten."
+msgid "Tenant name cannot contain reserved characters."
+msgstr "Der Name des Mandanten darf keine reservierten Zeichen enthalten."
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"Die Erweiterung %s wurde in den Keystone-Kern verschoben. Daher werden die "
+"zugehörigen Migrationen über die Keystone-Hauptdatenbanksteuerung verwaltet. "
+"Verwenden Sie den Befehl keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"Die Zeitangabe in 'expires_at' darf nicht vor dem jetzigen Zeitpunkt liegen. "
+"Der Server konnte der Anforderung nicht nachkommen, da ein fehlerhaftes "
+"Format oder ein anderer Fehler vorliegt. Es wird angenommen, dass der Fehler "
+"beim Client liegt."
+
msgid "The --all option cannot be used with the --domain-name option"
msgstr ""
"Die Option --all kann nicht zusammen mit der Option --domain-name verwendet "
@@ -865,6 +1205,16 @@ msgstr ""
"wahrscheinlich, dass dieser Server keine PKI-Tokens verwendet; andernfalls "
"ist dies die Folge einer fehlerhaften Konfiguration."
+msgid "The configured token provider does not support bind authentication."
+msgstr ""
+"Der konfigurierte Token-Anbieter unterstützt die Bindungsauthentifizierung "
+"nicht."
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr ""
+"Die Erstellung von Projekten die als Domänen agieren, ist in v2 nicht "
+"zulässig."
+
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server could "
@@ -889,7 +1239,7 @@ msgstr ""
msgid "The service you have requested is no longer available on this server."
msgstr ""
-"Der Service, den Sie angefordert haben, ist auf diesem Server nicht mehr "
+"Den Dienst, den Sie angefordert haben, ist auf diesem Server nicht mehr "
"verfügbar."
#, python-format
@@ -915,19 +1265,16 @@ msgstr "Es sollten keine non-oauth-Parameter vorhanden sein"
msgid "This is not a recognized Fernet payload version: %s"
msgstr "Dies ist keine anerkannte Fernet-Nutzdatenversion: %s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"Dies ist kein v2.0-Fernet-Token. Verwenden Sie v3 für Vertrauensbeziehungs-, "
-"Domänen- oder föderierte Tokens."
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "Dies ist kein bekanntes Fernet-Token %s"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
"request since it is either malformed or otherwise incorrect. The client is "
"assumed to be in error."
msgstr ""
-"Zeitmarke nicht im erwarteten Format. Der Server konnte der Anforderung "
+"Zeitstempel nicht im erwarteten Format. Der Server konnte der Anforderung "
"nicht nachkommen, da ein fehlerhaftes Format oder ein anderer Fehler "
"vorliegt. Es wird angenommen, dass der Fehler beim Client liegt."
@@ -947,11 +1294,14 @@ msgstr "Token gehört einem anderen Benutzer"
msgid "Token does not belong to specified tenant."
msgstr "Token gehört nicht zu angegebenem Nutzer."
+msgid "Token version is unrecognizable or unsupported."
+msgstr "Tokenversion ist nicht erkennbar oder wird nicht unterstützt."
+
msgid "Trustee has no delegated roles."
msgstr "Trustee hat keine beauftragten Rollen."
msgid "Trustor is disabled."
-msgstr "Trustor ist inaktiviert."
+msgstr "Trustor ist deaktiviert."
#, python-format
msgid ""
@@ -999,6 +1349,9 @@ msgstr ""
"Region %(region_id)s kann nicht gelöscht werden, da sie oder ihr "
"untergeordnete Regionen über zugeordnete Endpunkte verfügen. "
+msgid "Unable to downgrade schema"
+msgstr "Das Schema konnte nicht herabgestuft werden."
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
@@ -1006,13 +1359,6 @@ msgstr ""
"gefunden werden"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-"Verbindung konnte von Pool-ID %(id)s nach %(seconds)s nicht abgerufen "
-"werden. "
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Domänenkonfigurationsverzeichnis wurde nicht gefunden: %s"
@@ -1072,25 +1418,44 @@ msgstr "Unbekannte Tokenversion %s"
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "Nicht registrierte Abhängigkeit: %(name)s für %(targets)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "Das Aktualisieren von `domain_id` ist nicht zulässig. "
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "Das Aktualisieren von 'is_domain' ist nicht zulässig."
+
msgid "Update of `parent_id` is not allowed."
msgstr "Das Aktualisieren von 'parent_id' ist nicht zulässig."
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "Die Aktualisierung von 'domain_id' ist nur für Rootprojekte zulässig."
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+"Es ist nicht zulässig, die 'domain_id' von Projekten zu aktualisieren, die "
+"als Domänen agieren."
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
"Verwenden Sie ein Projektumfangstoken, wenn Sie versuchen, eine SAML-"
"Zusicherung zu erstellen"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"Die Verwendung der Identitätstreiberkonfiguration für die automatische "
+"Konfiguration desselben Zuordnungstreibers ist veraltet. Der "
+"Zuordnungstreiber muss im \"O\"-Release explizit konfiguriert werden, wenn "
+"er sich vom Standardtreiber (SQL) unterscheidet."
+
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "Benutzer %(u_id)s ist nicht berechtigt für Nutzer %(t_id)s"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"Benutzer %(user_id)s verfügt bereits über die Rolle %(role_id)s in Nutzer "
-"%(tenant_id)s"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
msgstr "Benutzer %(user_id)s hat keinen Zugriff auf Domäne %(domain_id)s"
@@ -1109,9 +1474,16 @@ msgstr "Benutzer '%(user_id)s' nicht gefunden in Gruppe '%(group_id)s'"
msgid "User IDs do not match"
msgstr "Benutzerkennungen stimmen nicht überein"
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"Benutzerauthentifizierung kann nicht erstellt werden, da entweder Benutzer-"
+"ID oder Benutzername mit Domänen-ID oder Benutzername mit Domänenname fehlt."
+
#, python-format
msgid "User is disabled: %s"
-msgstr "Benutzer ist inaktiviert: %s"
+msgstr "Benutzer ist deaktiviert: %s"
msgid "User is not a member of the requested project"
msgstr "Benutzer ist kein Mitglied des angeforderten Projekts"
@@ -1122,19 +1494,34 @@ msgstr "Benutzer ist kein Trustee."
msgid "User not found"
msgstr "Benutzer nicht gefunden"
+msgid "User not valid for tenant."
+msgstr "Benutzer nicht gültig für Mandant."
+
+msgid "User roles not supported: tenant_id required"
+msgstr "Benutzerrollen nicht unterstützt: tenant_id erforderlich"
+
#, python-format
msgid "User type %s not supported"
msgstr "Benutzertyp %s nicht unterstützt"
msgid "You are not authorized to perform the requested action."
msgstr ""
-"Sie sind nicht dazu autorisiert, die angeforderte Aktion durchzuführen."
+"Sie sind nicht dazu authorisiert, die angeforderte Aktion durchzuführen."
#, python-format
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr ""
"Sie sind nicht berechtigt, die angeforderte Aktion %(action)s auszuführen"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"Sie haben versucht, eine Ressourcen mit dem Admin-Token zu erstellen. Da "
+"sich dieses Token nicht innerhalb einer Domäne befindet, müssen Sie explizit "
+"eine Domäne angeben, zu der diese Ressource gehört. "
+
msgid "`key_mangler` functions must be callable."
msgstr "`key_mangler`-Funktionen müssen aufrufbar sein."
@@ -1150,51 +1537,19 @@ msgstr "auth_type ist nicht 'Negotiate'"
msgid "authorizing user does not have role required"
msgstr "Der autorisierte Benutzer verfügt nicht über die erforderliche Rolle"
-msgid "cache_collection name is required"
-msgstr "Ein Name für cache_collection ist erforderlich"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
"kann kein Projekt in einer Niederlassung erstellen, die ein inaktiviertes "
"Projekt enthält: %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr ""
-"kann kein Projekt innerhalb einer anderen Domäne als der der übergeordneten "
-"Projekte erstellen."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-"Eine aktivierte Domäne kann nicht gelöscht werden; inaktivieren Sie sie "
-"zuerst."
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-"kann das Projekt %s nicht löschen, da es kein Blattelement in der Hierarchie "
-"darstellt."
-
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr ""
-"kann Projekt %s nicht inaktivieren, da die zugehörige untergeordnete "
-"Baumstruktur aktivierte Projekte enthält"
-
#, python-format
-msgid "cannot enable project %s since it has disabled parents"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"kann Projekt %s nicht aktivieren, da es über inaktivierte übergeordnete "
-"Projekte verfügt"
-
-msgid "database db_name is required"
-msgstr "Die Datenbank db_name ist erforderlich"
-
-msgid "db_hosts value is required"
-msgstr "Ein Wert für db_hosts ist erforderlich"
-
-msgid "delete the default domain"
-msgstr "Standarddomäne löschen"
+"Ein aktiviertes Projekt, das als Domäne agiert, kann nicht gelöscht werden. "
+"Inaktivieren Sie zuerst das Projekt %s."
#, python-format
msgid "group %(group)s"
@@ -1207,33 +1562,33 @@ msgstr ""
"idp_contact_type muss einer der folgenden Werte sein: technical, other, "
"support, administrative oder billing."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "Ganzzahlwert für mongo_ttl_seconds erwartet"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "Ganzzahlwert für Attribut 'w' ('write concern'-Attribut) erwartet"
-
#, python-format
msgid "invalid date format %s"
msgstr "ungültiges Datumsformat %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "für die %s-Niederlassung wurde die maximale Hierarchietiefe erreicht."
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+"Es ist nicht zulässig, zwei Projekte zu haben, die als Domänen mit demselben "
+"Namen agieren: %s"
-msgid "no ssl support available"
-msgstr "Keine SSL-Unterstützung verfügbar"
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr ""
+"Es ist nicht zulässig, zwei Projekte mit demselben Namen innerhalb einer "
+"Domäne zu haben: %s"
+
+msgid "only root projects are allowed to act as domains."
+msgstr "Nur Rootprojekte dürfen als Domänen agieren."
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "Option %(option)s in Gruppe %(group)s"
-msgid "pad must be single character"
-msgstr "Pad muss ein einzelnes Zeichen sein"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "base64url-Pad-Text muss ein Vielfaches von 4 Zeichen enthalten"
-
msgid "provided consumer key does not match stored consumer key"
msgstr ""
"bereitgestellter Konsumentenschlüssel stimmt nicht mit dem gespeicherten "
@@ -1249,9 +1604,6 @@ msgstr ""
"bereitgestellte Prüffunktion stimmt nicht mit gespeicherter Prüffunktion "
"überein"
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "Region weist nicht den Typ 'dogpile.cache.CacheRegion' auf"
-
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses muss eine positive Ganzzahl oder null sein."
@@ -1260,9 +1612,6 @@ msgstr ""
"remaining_uses darf nicht festgelegt werden, wenn eine Redelegation zulässig "
"ist"
-msgid "replicaset_name required when use_replica is True"
-msgstr "replicaset_name erforderlich, wenn use_replica 'True' ist"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1275,20 +1624,12 @@ msgid "rescope a scoped token"
msgstr "Bereich für bereichsorientierten Token ändern"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"Text ist ein Vielfaches von 4, aber Pad \"%s\" steht vor dem zweitletzten "
-"Zeichen"
+msgid "role %s is not defined"
+msgstr "Die Rolle %s ist nicht definiert."
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgid "scope.project.id must be specified if include_subtree is also specified"
msgstr ""
-"Text ist ein Vielfaches von 4, aber Pad \"%s\" steht vor dem letzten Nicht-"
-"Pad-Zeichen"
-
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "Text ist kein Vielfaches von 4, aber enthält Pad \"%s\""
+"scope.project.id muss angegeben werden, wenn include_subtree angegeben wurde."
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1301,3 +1642,16 @@ msgstr "tls_cacertfile %s wurde nicht gefunden oder ist keine Datei"
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr "Tokenreferenz muss vom Typ 'KeystoneToken' sein. Abgerufen wurde: %s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr ""
+"Die Aktualisierung von 'domain_id' wurde in Mitaka eingestellt und wird im "
+"\"O\"-Release entfernt. "
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"Validierung erwartete %(param_name)r in Funktionssignatur für %(func_name)r."
diff --git a/keystone-moon/keystone/locale/el/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/el/LC_MESSAGES/keystone-log-critical.po
index 90f983b2..72c931a3 100644
--- a/keystone-moon/keystone/locale/el/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/el/LC_MESSAGES/keystone-log-critical.po
@@ -7,19 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-05 01:09+0000\n"
-"Last-Translator: Efstathios Iosifidis <iefstathios@gmail.com>\n"
-"Language-Team: Greek\n"
-"Language: el\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-09-05 01:09+0000\n"
+"Last-Translator: Efstathios Iosifidis <iefstathios@gmail.com>\n"
+"Language: el\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Greek\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po
index 5576d065..ab001a72 100644
--- a/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/en_AU/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: English (Australia)\n"
-"Language: en-AU\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: en-AU\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: English (Australia)\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po
index 9b93b5ed..565b8ee0 100644
--- a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Spanish\n"
-"Language: es\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone.po
index 46520ca7..f2336cc3 100644
--- a/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/es/LC_MESSAGES/keystone.po
@@ -1,4 +1,4 @@
-# Spanish translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
@@ -9,32 +9,44 @@
# Jose Enrique Ruiz Navarro <joseenriquernavarro@gmail.com>, 2014
# Jose Ramirez Garcia <jose.ramirez.rk@gmail.com>, 2014
# Pablo Sanchez <furybeat@gmail.com>, 2015
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
+# Eugènia Torrella <tester03@es.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: es\n"
-"Language-Team: Spanish\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-04-28 11:25+0000\n"
+"Last-Translator: Eugènia Torrella <tester03@es.ibm.com>\n"
+"Language: es\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)s es una versión de controlador no soportada"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr ""
+"El nombre %(entity)s no puede contener los siguientes caracteres "
+"reservados: %(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
-"%(event)s no es u suceso de notificación válido, debe ser uno de: %(actions)s"
+"%(event)s no es un suceso de notificación válido, debe ser uno de: "
+"%(actions)s"
#, python-format
msgid "%(host)s is not a trusted dashboard host"
@@ -53,18 +65,26 @@ msgstr ""
"repositorio de migración en %(path)s no existe o no es un directorio."
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s no implica %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s no puede tener menos de %(min_length)s caracteres."
#, python-format
msgid "%(property_name)s is not a %(display_expected_type)s"
-msgstr "%(property_name)s no es una %(display_expected_type)s"
+msgstr "%(property_name)s no es %(display_expected_type)s"
#, python-format
msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s no debe tener más de %(max_length)s caracteres."
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s no puede ser un rol implicado"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s no puede estar vacío."
@@ -74,25 +94,35 @@ msgstr "La extensión %s no existe."
#, python-format
msgid "%s field is required and cannot be empty"
-msgstr "campo %s es necesario y no puede estar vacío"
+msgstr "el campo %s es obligatorio y no puede estar vacío"
#, python-format
msgid "%s field(s) cannot be empty"
-msgstr "%s campo(s) no puede estar vacío"
+msgstr "el campo %s no puede estar vacío"
+
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"El programa de fondo de identidad LDAP %s se ha dejado en desuso en el "
+"release de Mitaka, sustituyéndolo por un acceso LDAP de identidad de solo "
+"lectura. Se eliminará en el release \"O\"."
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(Inhabilite la modalidad de depuración para suprimir estos detalles.)"
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(Inhabilite la modalidad insecure_debug para suprimir estos detalles.)"
msgid "--all option cannot be mixed with other options"
msgstr "La opción --all no puede mezclarse con otras opciones"
msgid "A project-scoped token is required to produce a service catalog."
msgstr ""
-"Se necesita una señal con ámbito de proyecto para producir un catálogo de "
+"Se necesita un token con ámbito de proyecto para producir un catálogo de "
"servicio."
msgid "Access token is expired"
-msgstr "El token de acceso ha expirado"
+msgstr "El token de acceso ha caducado"
msgid "Access token not found"
msgstr "No se ha encontrado el token de acceso"
@@ -102,15 +132,16 @@ msgstr "Se precisan pasos adicionales de autenticación."
msgid "An unexpected error occurred when retrieving domain configs"
msgstr ""
-"Se ha producido un error inesperado al recuperar configuraciones de dominio"
+"Se ha producido un error inesperado al recuperar las configuraciones de "
+"dominio"
#, python-format
msgid "An unexpected error occurred when trying to store %s"
-msgstr "Un error inesperado ocurrió cuando se intentaba almacenar %s"
+msgstr "Se ha producido un error inesperado al intentar almacenar %s"
msgid "An unexpected error prevented the server from fulfilling your request."
msgstr ""
-"El servidor no ha podido completar su petición debido a un error inesperado."
+"El servidor no ha podido completar su solicitud debido a un error inesperado."
#, python-format
msgid ""
@@ -122,17 +153,28 @@ msgstr ""
msgid "An unhandled exception has occurred: Could not find metadata."
msgstr ""
-"Se ha producido una excepción no manejada: no se han podido encontrar los "
+"Se ha producido una excepción no controlada: no se han podido encontrar los "
"metadatos."
msgid "At least one option must be provided"
-msgstr "Debe especificar al menos una opción"
+msgstr "Se debe especificar al menos una opción"
msgid "At least one option must be provided, use either --all or --domain-name"
msgstr "Debe proporcionarse al menos una opción, utilice --all o --domain-name"
msgid "At least one role should be specified."
-msgstr "Al menos debe especificarse un rol"
+msgstr "Se debe especificar al menos un rol"
+
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"Se ha intentado la seleción automática de controlador para la asignación en "
+"base a la opción [identity]\\driver, pero ha fallado porque no se encuentra "
+"el controlador %s. Defina [assignment]/driver con un controlador válido en "
+"la configuración de keystone."
msgid "Attempted to authenticate with an unsupported method."
msgstr "Se ha intentado autenticar con un método no compatible."
@@ -141,11 +183,19 @@ msgid ""
"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
"Authentication"
msgstr ""
-"Intentando utilizar la señal OS-FEDERATION con el servicio de identidad V2, "
+"Intentando utilizar el token OS-FEDERATION con el servicio de identidad V2, "
"utilice la autenticación V3 ."
msgid "Authentication plugin error."
-msgstr "Error en el complemento de autenticación "
+msgstr "Error en el plugin de autenticación "
+
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"El programa de fondo `%(backend)s` no es un programa de fondo almacenado en "
+"caché válido. Programas de fondo válidos: %(backend_list)s"
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
@@ -159,9 +209,6 @@ msgstr "No se puede cambiar %(option_name)s %(attr)s"
msgid "Cannot change Domain ID"
msgstr "No se puede cambiar el ID del Dominio"
-msgid "Cannot change consumer secret"
-msgstr "No se puede cambiar el secreto de consumidor"
-
msgid "Cannot change user ID"
msgstr "No se puede cambiar el ID de usuario"
@@ -169,21 +216,79 @@ msgid "Cannot change user name"
msgstr "No se puede cambiar el nombre de usuario"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "No se puede crear un punto final con un URL no válido: %(url)s"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
-msgstr "No se puede crear el proyecto con padre: %(project_id)s"
+msgstr "No se puede crear un proyecto con el padre: %(project_id)s"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "No se puede duplicar nombre %s"
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"No se puede crear el proyecto porque especifica que su propietario es el "
+"dominio %(domain_id)s, pero especifica un padre en otro dominio distinto "
+"(%(parent_domain_id)s)."
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"No se puede crear el proyecto porque su padre (%(domain_id)s) actúa como "
+"dominio, pero el parent_id especificado en el proyecto, (%(parent_id)s), no "
+"coincide con este domain_id."
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+"No se puede suprimir un dominio que está habilitado, antes debe "
+"inhabilitarlo."
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"No se puede suprimir el proyecto %(project_id)s porque su subárbol contiene "
+"proyectos habilitados."
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"No se puede suprimir el proyecto %s porque no es una hoja en la jerarquía. "
+"Utilice la opción de cascada si desea suprimir un subárbol entero."
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"No se puede inhabilitar el proyecto %(project_id)s porque su subárbol "
+"contiene proyectos habilitados."
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr ""
+"No se puede habilitar el proyecto %s, ya que tiene padres inhabilitados"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"No se pueden enumerar las asignaciones obtenidas de grupos y filtradas por "
+"ID de usuario."
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
-"No se pueden listar las señales de solicitud con una señal emitida mediante "
+"No se pueden listar los tokens de solicitud con un token emitido por "
"delegación."
#, python-format
msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
-msgstr "No se puede abrir el certificado %(cert_file)s. Razón: %(reason)s"
+msgstr "No se puede abrir el certificado %(cert_file)s. Motivo: %(reason)s"
#, python-format
msgid "Cannot remove role that has not been granted, %s"
@@ -193,8 +298,11 @@ msgid ""
"Cannot truncate a driver call without hints list as first parameter after "
"self "
msgstr ""
-"No se puede truncar una llamada de controlador si lista de sugerencias como "
-"primer parámetro después de self "
+"No se puede truncar una llamada de controlador sin la lista de sugerencias "
+"como primer parámetro después de self "
+
+msgid "Cannot update domain_id of a project that has children."
+msgstr "No se puede actualizar el domain_id de un proyecto que tenga hijos."
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
@@ -208,22 +316,31 @@ msgstr ""
"No se pueden utilizar los parámetros de consulta subtree_as_list y "
"subtree_as_ids al mismo tiempo."
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr ""
+"Solo se permite la actualización en cascada de los atributos habilitados."
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr ""
-"La combinación de filtro de grupo y efectivo dará siempre como resultado una "
-"lista vacía."
+"La combinación de filtro de grupo y filtro efectivo dará siempre como "
+"resultado una lista vacía."
msgid ""
"Combining effective, domain and inherited filters will always result in an "
"empty list."
msgstr ""
-"La combinación de filtros heredados, de dominio y efectivos dará siempre "
-"como resultado una lista vacía."
+"La combinación de un filtro heredado, un filtro de dominio y un filtro "
+"efectivo dará siempre como resultado una lista vacía."
+
+#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "Entidad de API de config en /domains/%s/config"
#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
-msgstr "Ha ocurrido un conflicto al intentar almacenar %(type)s - %(details)s"
+msgstr ""
+"Se ha producido un conflicto al intentar almacenar %(type)s - %(details)s"
#, python-format
msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
@@ -243,6 +360,15 @@ msgstr ""
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"No se ha podido determinar el ID del proveedor de identidades. No se ha "
+"encontrado la opción de configuración %(issuer_attribute)s en el entorno de "
+"la solicitud."
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
@@ -251,7 +377,7 @@ msgstr ""
#, python-format
msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
-msgstr "No se ha encontrado un grupo de puntos finales: %(endpoint_group_id)s"
+msgstr "No se ha encontrado el grupo de puntos finales: %(endpoint_group_id)s"
msgid "Could not find Identity Provider identifier in environment"
msgstr ""
@@ -295,7 +421,7 @@ msgid "Could not find mapping: %(mapping_id)s"
msgstr "No se ha podido encontrar la correlación: %(mapping_id)s"
msgid "Could not find policy association"
-msgstr "No se ha encontrado una asociación de política"
+msgstr "No se ha encontrado la asociación de política"
#, python-format
msgid "Could not find policy: %(policy_id)s"
@@ -309,9 +435,6 @@ msgstr "No se ha podido encontrar el proyecto: %(project_id)s"
msgid "Could not find region: %(region_id)s"
msgstr "No se ha podido encontrar la región: %(region_id)s"
-msgid "Could not find role"
-msgstr "No se puede encontrar la función"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -330,7 +453,7 @@ msgstr "No se ha podido encontrar el servicio: %(service_id)s"
#, python-format
msgid "Could not find token: %(token_id)s"
-msgstr "No se ha podido encontrar la señal: %(token_id)s"
+msgstr "No se ha podido encontrar el token: %(token_id)s"
#, python-format
msgid "Could not find trust: %(trust_id)s"
@@ -348,15 +471,49 @@ msgstr "No se ha podido encontrar la versión: %(version)s"
msgid "Could not find: %(target)s"
msgstr "No se ha podido encontrar : %(target)s"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"No se ha podido correlacionar ninguna propiedad de usuario federado a valor "
+"de identidad. Compruebe los registros de depuración o la correlación "
+"utilizada para obtener información más detallada."
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"No se ha podido correlacionar el usuario al establecer la identidad de "
+"usuario efímera. Las reglas de correlación deben especificar ID/nombre de "
+"usuario o se debe establecer la variable de entorno REMOTE_USER."
+
msgid "Could not validate the access token"
-msgstr "No se ha podido validar la señal de acceso"
+msgstr "No se ha podido validar el token de acceso"
msgid "Credential belongs to another user"
msgstr "La credencial pertenece a otro usuario"
+msgid "Credential signature mismatch"
+msgstr "Discrepancia en la firma de credencial"
+
+#, python-format
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"La importación directa del plugin de autorización %(name)r está en desuso a "
+"partir de Liberty, sustituyéndose por su punto de entrada desde "
+"%(namespace)r y puede que se elimine en N."
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "Base de datos en /domains/%s/config"
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"La importación directa del controlador %(name)r está en desuso a partir de "
+"Liberty, sustituyéndose por su punto de entrada desde %(namespace)r y puede "
+"que se elimine en N."
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -370,7 +527,7 @@ msgstr "Dominio (%s)"
#, python-format
msgid "Domain cannot be named %s"
-msgstr "El dominio no se puede llamar %s"
+msgstr "No se puede invocar al dominio %s"
#, python-format
msgid "Domain cannot have ID %s"
@@ -378,36 +535,55 @@ msgstr "El dominio no puede tener el ID %s"
#, python-format
msgid "Domain is disabled: %s"
-msgstr "El dominio está inhabilitado: %s"
+msgstr "El dominio %s está inhabilitado"
-msgid "Domain metadata not supported by LDAP"
-msgstr "Metadatos de dominio no soportados por LDAP"
+msgid "Domain name cannot contain reserved characters."
+msgstr "El nombre de dominio no puede contener caracteres reservados."
msgid "Domain scoped token is not supported"
-msgstr "La señal con ámbito de dominio no está soportada"
+msgstr "No se da soporte a tokens con ámbito de dominio"
+
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr "El controlador de roles V8 no admite roles específicos de dominio."
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
"%(file)s."
msgstr ""
-"Dominio: %(domain)s ya tiene definida una configuración - ignorando el "
+"El dominio: %(domain)s ya tiene definida una configuración - se ignorará el "
"archivo: %(file)s."
-msgid "Domains are read-only against LDAP"
-msgstr "Los dominios son de sólo lectura para LDAP"
-
msgid "Duplicate Entry"
-msgstr "Entrada Duplicada "
+msgstr "Entrada duplicada "
#, python-format
msgid "Duplicate ID, %s."
msgstr "ID duplicado, %s."
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "Entrada duplicada: %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "Nombre duplicado, %s."
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "ID remoto duplicado: %s"
+
+msgid "EC2 access key not found."
+msgstr "No se ha encontrado la clave de acceso de EC2."
+
+msgid "EC2 signature not supplied."
+msgstr "No se ha proporcionado la firma de EC2."
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+"Se debe definir el argumento bootstrap-password o bien OS_BOOTSTRAP_PASSWORD."
+
msgid "Enabled field must be a boolean"
msgstr "El campo habilitado debe ser un booleano"
@@ -422,7 +598,7 @@ msgstr ""
msgid "Endpoint Group Project Association not found"
msgstr ""
-"No se ha encontrado la asociación del proyecto del grupo de puntos finales"
+"No se ha encontrado la asociación de proyecto del grupo de puntos finales"
msgid "Ensure configuration option idp_entity_id is set."
msgstr ""
@@ -441,10 +617,31 @@ msgstr ""
"archivo: %(file)s."
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "Error al abrir el archivo %(path)s: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "Error al analizar la línea: '%(line)s': %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "Error al analizar las reglas %(path)s: %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "Error al leer el archivo de metadatos, %(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"Se ha superado el número máximo de intentos de registrar un dominio "
+"%(domain)s para utilizar el controlador SQL, el último dominio que parece "
+"haberlo tenido es %(last_domain)s, abandonando"
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "Se espera un diccionario o una lista: %s"
@@ -452,7 +649,7 @@ msgid ""
"Expected signing certificates are not available on the server. Please check "
"Keystone configuration."
msgstr ""
-"Los certificados para firmas esperados no están disponibles en el servidor. "
+"No hay los certificados para firmas esperados disponibles en el servidor. "
"Compruebe la configuración de Keystone."
#, python-format
@@ -461,9 +658,9 @@ msgid ""
"with the request since it is either malformed or otherwise incorrect. The "
"client is assumed to be in error."
msgstr ""
-"Esperando encontrar %(attribute)s en %(target)s - el servidor no pudo "
-"cumplir la solicitud porque está formada incorrectamente o de otra forma es "
-"incorrecta. El cliente se asume en error."
+"Se esperaba encontrar %(attribute)s en %(target)s - el servidor no pudo "
+"satisfacer la solicitud porque está mal formada o es incorrecta por algún "
+"otro motivo. Se entiende que el cliente da error."
#, python-format
msgid "Failed to start the %(name)s server"
@@ -473,25 +670,29 @@ msgid "Failed to validate token"
msgstr "Ha fallado la validación del token"
msgid "Federation token is expired"
-msgstr "La señal de federación ha caducado"
+msgstr "El token de federación ha caducado"
#, python-format
msgid ""
"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
"order to redelegate a trust"
msgstr ""
-"El campo \"remaining_uses\" está establecido en %(value)s, pero no debe "
+"El campo \"remaining_uses\" está establecido en %(value)s, pero no puede "
"estar establecido para poder redelegar una confianza"
msgid "Found invalid token: scoped to both project and domain."
msgstr ""
-"Se ha encontrado una señal no válida: tiene un ámbito de proyecto y dominio."
+"Se ha encontrado un token no válido: se ha definido el ámbito a proyecto y "
+"dominio a la vez."
+
+#, python-format
+msgid "Group %s not found in config"
+msgstr "No se ha encontrado el grupo %s en la configuración"
#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr ""
-"El grupo %(group)s no se admite para las configuraciones específicas de "
-"dominio"
+"No se admite el grupo %(group)s para configuraciones específicas de dominio"
#, python-format
msgid ""
@@ -506,8 +707,8 @@ msgid ""
"Group membership across backend boundaries is not allowed, group in question "
"is %(group_id)s, user is %(user_id)s"
msgstr ""
-"La pertenencia a grupos en los límites del programa de fondo no está "
-"permitida, el grupo en cuestión es %(group_id)s, el usuario es %(user_id)s"
+"No se permite la pertenencia a grupos traspasando los límites del programa "
+"de fondo, el grupo en cuestión es %(group_id)s, el usuario es %(user_id)s"
#, python-format
msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
@@ -525,26 +726,33 @@ msgstr ""
"No se ha incluido el identificador del proveedor de identidad de entrada "
"entre los identificadores aceptados."
+msgid "Invalid EC2 signature."
+msgstr "Firma de EC2 no válida."
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
-msgstr "Opción de LDAP TLS no válida: %(option)s. Elegir uno de: %(options)s"
+msgstr ""
+"Opción de LDAP TLS no válida: %(option)s. Elegir una de las siguientes: "
+"%(options)s"
#, python-format
msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-msgstr "Opción LDAP TLS_AVAIL inválida: %s. TLS no disponible"
+msgstr "Opción LDAP TLS_AVAIL no válida: %s. TLS no disponible"
#, python-format
msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
-msgstr "Opción deref LDAP no válida: %(option)s. Elija una de: %(options)s"
+msgstr ""
+"Opción deref LDAP no válida: %(option)s. Elija una de las siguientes: "
+"%(options)s"
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
msgstr ""
-"Ãmbito LDAP incorrecto: %(scope)s. Selecciones una de las siguientes "
+"Ãmbito LDAP incorrecto: %(scope)s. Seleccione una de las siguientes "
"opciones: %(options)s"
msgid "Invalid TLS / LDAPS combination"
-msgstr "Combinación TLS/LDAPS no válida"
+msgstr "Combinación de TLS/LDAPS no válida"
#, python-format
msgid "Invalid audit info data type: %(data)s (%(type)s)"
@@ -552,15 +760,15 @@ msgstr ""
"Tipo de datos de información de auditoría no válido: %(data)s (%(type)s)"
msgid "Invalid blob in credential"
-msgstr "Blob no válido en credencial"
+msgstr "Blob no válido en la credencial"
#, python-format
msgid ""
"Invalid domain name: %(domain)s found in config file name: %(file)s - "
"ignoring this file."
msgstr ""
-"Nombre de dominio no válido: %(domain)s encontrado en el nombre de archivo "
-"de configuración: %(file)s - ignorando este archivo."
+"Se ha encontrado un nombre de dominio no válido: %(domain)s en el nombre del "
+"archivo de configuración: %(file)s - se ignorará este archivo."
#, python-format
msgid "Invalid domain specific configuration: %(reason)s"
@@ -580,7 +788,7 @@ msgid ""
"%(service_id)s, Region: %(region_id)s"
msgstr ""
"Combinación no válida de entidades para la asociación de políticas: solo se "
-"permite Punto final, Servicio o Región + Servicio. La solicitud fue: Punto "
+"permite Punto final, Servicio o Región + Servicio. La solicitud era: Punto "
"final: %(endpoint_id)s, Servicio: %(service_id)s, Región: %(region_id)s"
#, python-format
@@ -589,28 +797,24 @@ msgid ""
"be specified."
msgstr ""
"Regla no válida: %(identity_value)s. Se deben especificar las palabras clave "
-"'grupos' y 'dominio ."
+"'groups' y 'domain'."
msgid "Invalid signature"
msgstr "Firma no válida"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"Valor ssl_cert_reqs no válido de %s, debe ser uno de \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-
msgid "Invalid user / password"
msgstr "Usuario / contraseña no válidos"
+msgid "Invalid username or TOTP passcode"
+msgstr "Nombre de usuario o código de acceso TOTP no válidos"
+
msgid "Invalid username or password"
msgstr "Usuario o contraseña no válidos"
#, python-format
msgid "KVS region %s is already configured. Cannot reconfigure."
-msgstr "La región KVS %s ya se ha configurado. No se puede reconfigurar."
+msgstr ""
+"La región KVS %s ya se ha configurado. No se puede volver a configurar."
#, python-format
msgid "Key Value Store not configured: %s"
@@ -628,9 +832,24 @@ msgstr "Supresión de LDAP %s"
msgid "LDAP %s update"
msgstr "Actualización de LDAP %s"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+"Longitud del ID de recurso transformable > 64, que es el número máximo de "
+"caracteres permitidos"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"La sección local de la correlación %(mapping_id)s hace referencia a una "
+"coincidencia remota que no existe (p.e. {0} en una sección local)."
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
-msgstr "Se ha producido tiempo de espera de bloqueo para la clave, %(target)s"
+msgstr ""
+"Se ha excedido el tiempo de espera de bloqueo para la clave, %(target)s"
#, python-format
msgid "Lock key must match target key: %(lock)s != %(target)s"
@@ -648,8 +867,12 @@ msgid "Marker could not be found"
msgstr "No se ha podido encontrar el marcador"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "Se ha alcanzado la profundidad máxima de jerarquía en la rama %s."
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
-msgstr "Se han producido los intentos de bloqueo máximos en %s."
+msgstr "Se han producido el máximo de intentos de bloqueo en %s."
#, python-format
msgid "Member %(member)s is already a member of group %(group)s"
@@ -673,13 +896,18 @@ msgid "Multiple domains are not supported"
msgstr "No se admiten varios dominios"
msgid "Must be called within an active lock context."
-msgstr "Se debe llamar dentro de un contexto de bloqueo activo."
+msgstr "Se debe invocar dentro de un contexto de bloqueo activo."
msgid "Must specify either domain or project"
-msgstr "Debe especificar dominio o proyecto"
+msgstr "Debe especificar dominio o proyecto, pero no ambas cosas a la vez"
msgid "Name field is required and cannot be empty"
-msgstr "El nombre de campo es necesario y no puede estar vacío"
+msgstr "El nombre de campo es obligatorio y no puede estar vacío"
+
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr ""
+"No se ha proporcionado el ID de dominio de proyecto ni el nombre de dominio "
+"de proyecto."
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
@@ -690,16 +918,16 @@ msgstr ""
"asegúrese de que WSGIPassAuthorization se establece en activada."
msgid "No authenticated user"
-msgstr "Ningún usuario autenticado "
+msgstr "No hay ningún usuario autenticado "
msgid ""
"No encryption keys found; run keystone-manage fernet_setup to bootstrap one."
msgstr ""
"No se han encontrado claves de cifrado; ejecute keystone-manage fernet_setup "
-"para el programa de arranque uno."
+"en el programa de arranque uno."
msgid "No options specified"
-msgstr "No se especificaron opciones"
+msgstr "No se han especificado opciones"
#, python-format
msgid "No policy is associated with endpoint %(endpoint_id)s."
@@ -709,6 +937,9 @@ msgstr "No hay ninguna política asociada con el punto final %(endpoint_id)s."
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "No quedan usos para la confianza: %(trust_id)s"
+msgid "No token in the request"
+msgstr "No hay ningún token en la solicitud"
+
msgid "Non-default domain is not supported"
msgstr "El dominio no predeterminado no está soportado"
@@ -720,27 +951,47 @@ msgid ""
"Option %(option)s found with no group specified while checking domain "
"configuration request"
msgstr ""
-"Se ha encontrado la opción %(option)s sin grupo especificado al comprobar la "
-"solicitud de configuración del dominio"
+"Se ha encontrado la opción %(option)s sin un grupo especificado al comprobar "
+"la solicitud de configuración del dominio"
#, python-format
msgid ""
"Option %(option)s in group %(group)s is not supported for domain specific "
"configurations"
msgstr ""
-"La opción %(option)s del grupo %(group)s no se admite para las "
-"configuraciones específicas del dominio"
+"No se admite la opción %(option)s del grupo %(group)s para configuraciones "
+"específicas de dominio"
#, python-format
msgid "Project (%s)"
msgstr "Proyecto (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "No se ha encontrado el ID de proyecto : %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "El campo de proyecto es obligatorio y no puede estar vacío."
+
+#, python-format
msgid "Project is disabled: %s"
-msgstr "El proyecto está inhabilitado: %s"
+msgstr "El proyecto %s está inhabilitado"
+
+msgid "Project name cannot contain reserved characters."
+msgstr "El nombre de proyecto no puede contener caracteres reservados."
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "La cadena de consulta no está en UTF-8"
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr ""
+"No se da soporte para leer el valor predeterminado para la opción %(option)s "
+"del grupo %(group)s"
msgid "Redelegation allowed for delegated by trust only"
-msgstr "Sólo se permite volver a delegar un delegado por confianza"
+msgstr "Solo se permite volver a delegar un delegado por confianza"
#, python-format
msgid ""
@@ -750,6 +1001,78 @@ msgstr ""
"La profundidad de redelegación restante de %(redelegation_depth)d está fuera "
"del rango permitido de [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Elimine admin_crud_extension de la interconexión de pegar, la extensión "
+"admin_crud ahora está siempre disponible. Actualice la sección [pipeline:"
+"admin_api] en keystone-paste.ini consecuentemente, ya que se eliminará en el "
+"release O."
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"Elimine endpoint_filter_extension de la interconexión de pegar, la extensión "
+"de filtro de punto final ahora está siempre disponible. Actualice la "
+"sección [pipeline:api_v3] en keystone-paste.ini consecuentemente, ya que se "
+"eliminará en el release O."
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Elimine federation_extension de la interconexión de pegar, la extensión de "
+"federación ahora está siempre disponible. Actualice la sección [pipeline:"
+"api_v3] en keystone-paste.ini consecuentemente, ya que se eliminará en el "
+"release O."
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Elimine oauth1_extension de la interconexión de pegar, la extensión oauth1 "
+"ahora está siempre disponible. Actualice la sección [pipeline:api_v3] en "
+"keystone-paste.ini consecuentemente, ya que se eliminará en el release O."
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Elimine revoke_extension de la interconexión de pegar, la extensión de "
+"revocación ahora está siempre disponible. Actualice la sección [pipeline:"
+"api_v3] en keystone-paste.ini consecuentemente, ya que se eliminará en el "
+"release O."
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Elimine simple_cert de la interconexión de pegar, los proveedores de token "
+"PKI y PKIz están ahora en desuso y simple_cert se utilizaba únicamente para "
+"dar soporte a estos proveedores de token. Actualice la sección [pipeline:"
+"api_v3] en keystone-paste.ini consecuentemente, ya que se eliminará en el "
+"release O."
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Elimine user_crud_extension de la interconexión de pegar, la extensión "
+"user_crud ahora está siempre disponible. Actualice la sección [pipeline:"
+"public_api] en keystone-paste.ini consecuentemente, ya que se eliminará en "
+"el release O."
+
msgid "Request Token does not have an authorizing user id"
msgstr "El token de solicitud no tiene un id de usuario de autorización"
@@ -760,14 +1083,15 @@ msgid ""
"invalid (too large). The client is assumed to be in error."
msgstr ""
"El atributo de solicitud %(attribute)s debe ser menor que o igual a "
-"%(size)i. El servidor no pudo cumplir con la solicitud debido al tamaño del "
-"atributo no es válido (demasiado grande). El cliente se asume en error."
+"%(size)i. El servidor no pudo satisfacer la solicitud porque el tamaño del "
+"atributo no es válido (demasiado grande). Se entiende que el cliente da "
+"error."
msgid "Request must have an origin query parameter"
msgstr "La solicitud debe tener un parámetro de consulta de origen"
msgid "Request token is expired"
-msgstr "El token solicitado ha expirado"
+msgstr "El token solicitado ha caducado"
msgid "Request token not found"
msgstr "No se ha encontrado el token solicitado"
@@ -785,10 +1109,6 @@ msgstr ""
"La profundidad de redelegación solicitada de %(requested_count)d es mayor "
"que la permitida %(max_count)d"
-#, python-format
-msgid "Role %s not found"
-msgstr "No se ha encontrado el rol %s"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
@@ -796,17 +1116,17 @@ msgid ""
msgstr ""
"La ejecución de keystone a través de eventlet está en desuso a partir de "
"Kilo sustituyéndose por la ejecución en un servidor WSGI (por ejemplo, "
-"mod_wsgi). El soporte para keystone bajo eventlet se eliminará en \"M\"-"
-"Release."
+"mod_wsgi). El soporte para keystone en eventlet se eliminará en el release "
+"\"M\"."
msgid "Scoping to both domain and project is not allowed"
-msgstr "El ámbito para dominio y proyecto no está permitido"
+msgstr "No se permite definir el ámbito tanto a dominio como a proyecto"
msgid "Scoping to both domain and trust is not allowed"
-msgstr "El ámbito para dominio y confianza no está permitido"
+msgstr "No se permite definir el ámbito tanto a dominio como a confianza"
msgid "Scoping to both project and trust is not allowed"
-msgstr "El ámbito para proyecto y confianza no está permitido"
+msgstr "No se permite definir el ámbito tanto a proyecto como a confianza"
#, python-format
msgid "Service Provider %(sp)s is disabled"
@@ -816,24 +1136,46 @@ msgid "Some of requested roles are not in redelegated trust"
msgstr "Algunos roles solicitados no están en la confianza redelegada"
msgid "Specify a domain or project, not both"
-msgstr "Especifique un dominio o proyecto, no ambos"
+msgstr "Especifique un dominio o un proyecto, no ambas cosas a la vez"
msgid "Specify a user or group, not both"
-msgstr "Especifique un usuario o grupo, no ambos"
+msgstr "Especifique un usuario o un grupo, no ambas cosas a la vez"
msgid "Specify one of domain or project"
-msgstr "Especifique un dominio o proyecto"
+msgstr "Especifique un dominio o un proyecto"
msgid "Specify one of user or group"
-msgstr "Especifique un usuario o grupo"
+msgstr "Especifique un usuario o un grupo"
#, python-format
msgid ""
"String length exceeded.The length of string '%(string)s' exceeded the limit "
"of column %(type)s(CHAR(%(length)d))."
msgstr ""
-"La longitud de la serie se ha excedido. La longitud de la serie '%(string)s' "
-"ha excedido el límite de la columna %(type)s(CHAR(%(length)d))."
+"Se ha superado la longitud de la cadena. La longitud de la cadena "
+"'%(string)s' ha excedido el límite de la columna %(type)s(CHAR(%(length)d))."
+
+msgid "Tenant name cannot contain reserved characters."
+msgstr "El nombre de arrendatario no puede contener caracteres reservados."
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"La extensión %s se ha trasladado al núcleo de keystone y, como tal, el "
+"mantenimiento de sus migraciones se hace desde el control de bases de datos "
+"principal de keystone. Utilice el comando: keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"El valor de 'expires_at' no debe ser antes que el momento actual. El "
+"servidor no ha podido satisfacer la solicitud porque está mal formada o es "
+"incorrecta por algún otro motivo. Se entiende que el cliente da error."
msgid "The --all option cannot be used with the --domain-name option"
msgstr "La opción --all no se puede utilizar con la opción --domain-name"
@@ -841,8 +1183,8 @@ msgstr "La opción --all no se puede utilizar con la opción --domain-name"
#, python-format
msgid "The Keystone configuration file %(config_file)s could not be found."
msgstr ""
-"El archivo de configuración de Keystone %(config_file)s no se ha podido "
-"encontrar."
+"No se ha podido encontrar el archivo de configuración de Keystone "
+"%(config_file)s."
#, python-format
msgid ""
@@ -853,7 +1195,7 @@ msgstr ""
"controlador SQL (sólo se permite uno): %(source)s."
msgid "The action you have requested has not been implemented."
-msgstr "La acción que ha solicitado no ha sido implemento"
+msgstr "La acción que ha solicitado no está implementada"
msgid "The authenticated user should match the trustor."
msgstr "El usuario autenticado debe coincidir con el fideicomitente."
@@ -864,8 +1206,16 @@ msgid ""
"misconfiguration."
msgstr ""
"Los certificados que ha solicitado no están disponibles. Es probable que "
-"este servidor no utilice señales PKI, de lo contrario este es el resultado "
-"de una configuración incorrecta."
+"este servidor no utilice tokens PKI o que se haya hecho una configuración "
+"incorrecta."
+
+msgid "The configured token provider does not support bind authentication."
+msgstr ""
+"El proveedor de señales configurado no da soporte a la autenticación de "
+"enlaces."
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr "En la v2, no se permite crear proyectos que actúen como dominios."
#, python-format
msgid ""
@@ -873,13 +1223,14 @@ msgid ""
"not comply with the request because the password is invalid."
msgstr ""
"La longitud de la contraseña debe ser menor o igual que %(size)i. El "
-"servidor no pudo cumplir la solicitud porque la contraseña no es válida."
+"servidor no ha podido satisfacer la solicitud porque la contraseña no es "
+"válida."
msgid "The request you have made requires authentication."
msgstr "La solicitud que ha hecho requiere autenticación."
msgid "The resource could not be found."
-msgstr "El recurso no se ha podido encontrar."
+msgstr "No se ha podido encontrar el recurso."
msgid ""
"The revoke call must not have both domain_id and project_id. This is a bug "
@@ -915,12 +1266,9 @@ msgstr "Solo puede haber parámetros de oauth"
msgid "This is not a recognized Fernet payload version: %s"
msgstr "Esta no es una versión de carga útil Fernet reconocida: %s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"Esta no es una señal v2.0 Fernet. Utilice v3 para señales de confianza, "
-"dominio o federadas ."
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "Este no es un token Fernet reconocido %s"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
@@ -928,8 +1276,8 @@ msgid ""
"assumed to be in error."
msgstr ""
"La indicación de fecha y hora no está en el formato esperado. El servidor no "
-"ha podido satisfacer la solicitud porque tiene un formato incorrecto o es "
-"incorrecta de alguna otra forma. Se supone que el cliente es erróneo."
+"ha podido satisfacer la solicitud porque está mal formada o es incorrecta "
+"por algún otro motivo. Se entiende que el cliente da error."
#, python-format
msgid ""
@@ -945,21 +1293,24 @@ msgid "Token belongs to another user"
msgstr "El token pertenece a otro usuario"
msgid "Token does not belong to specified tenant."
-msgstr "La señal no pertenece al arrendatario especificado."
+msgstr "El token no pertenece al arrendatario especificado."
+
+msgid "Token version is unrecognizable or unsupported."
+msgstr "Versión de token no reconocida o no soportada."
msgid "Trustee has no delegated roles."
msgstr "La entidad de confianza no tiene roles delegados."
msgid "Trustor is disabled."
-msgstr "Trustor está deshabilitado."
+msgstr "El fideicomitente está deshabilitado."
#, python-format
msgid ""
"Trying to update group %(group)s, so that, and only that, group must be "
"specified in the config"
msgstr ""
-"Intentando actualizar el grupo %(group)s, para que ese, y sólo ese grupo se "
-"especifique en la configuración"
+"Intentando actualizar el grupo %(group)s, de forma que se pueda especificar "
+"ese grupo, y solo ese, enla configuración"
#, python-format
msgid ""
@@ -974,14 +1325,14 @@ msgid ""
"Trying to update option %(option)s in group %(group)s, so that, and only "
"that, option must be specified in the config"
msgstr ""
-"Intentando actualizar la opción %(option)s en el grupo %(group)s, para que "
-"esa, y solo esa opción, se especifique en la configuración"
+"Intentando actualizar la opción %(option)s en el grupo %(group)s, de forma "
+"que se pueda especificar esa opción, y solo esa, en la configuración"
msgid ""
"Unable to access the keystone database, please check it is configured "
"correctly."
msgstr ""
-"No se puede acceder a la base de datos de keystone, compruebe si está "
+"No se puede acceder a la base de datos de keystone, compruebe que está "
"configurada correctamente."
#, python-format
@@ -998,20 +1349,16 @@ msgstr ""
"No se puede suprimir la región %(region_id)s porque sus regiones secundarias "
"tienen puntos finales asociados."
+msgid "Unable to downgrade schema"
+msgstr "No se ha podido degradar el esquema"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
-"No se pueden encontrar grupos válidos mientras se utiliza la correlación "
+"No se han podido encontrar grupos válidos utilizando la correlación "
"%(mapping_id)s"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-"No se puede obtener una conexión del ID de agrupación %(id)s después de "
-"%(seconds)s segundos."
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "No se ha podido localizar el directorio config de dominio: %s"
@@ -1025,7 +1372,7 @@ msgid ""
"values %(new)s and %(old)s"
msgstr ""
"No se puede reconciliar el atributo de identidad %(attribute)s porque tiene "
-"los valores en conflicto %(new)s y %(old)s"
+"los siguientes valores en conflicto: %(new)s y %(old)s"
#, python-format
msgid ""
@@ -1034,11 +1381,11 @@ msgid ""
"%(reason)s"
msgstr ""
"No se puede firmar la aserción SAML. Es probable que este servidor no tenga "
-"xmlsec1 instalado o que sea el resultado de una configuración incorrecta. "
-"Razón %(reason)s"
+"xmlsec1 instalado o que se haya hecho una configuración incorrecta. Motivo: "
+"%(reason)s"
msgid "Unable to sign token."
-msgstr "No se ha podido firmar la señal."
+msgstr "No se ha podido firmar el token."
#, python-format
msgid "Unexpected assignment type encountered, %s"
@@ -1065,32 +1412,51 @@ msgstr "Dominio desconocido '%(name)s' especificado por --domain-name"
#, python-format
msgid "Unknown token version %s"
-msgstr "Versión de señal desconocida %s"
+msgstr "Versión de token desconocida %s"
#, python-format
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "Dependencia no registrada: %(name)s para %(targets)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "No se permite la actualización de `domain_id`."
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "No se permite la actualización de `is_domain`."
+
msgid "Update of `parent_id` is not allowed."
msgstr "No se permite la actualización de `parent_id`."
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "Solo se permite actualizar el domain_id de los proyectos raíz."
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+"No se permite actualizar el domain_id de los proyectos que actúen como "
+"dominios."
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
"Utilice un token de ámbito de proyecto cuando intente crear una aserción SAML"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"El uso de la configuración del controlador de identidad para configurar "
+"automáticamente el mismo controlador de asignación está en desuso. En el "
+"release \"O\", el controlador de asignación se deberá configurar "
+"explícitamente si es distinto que el valor predeterminado (SQL)."
+
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "El usuario %(u_id)s no está autorizado en el proyecto %(t_id)s"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"El usuario %(user_id)s ya tiene el rol %(role_id)s en el arrendatario "
-"%(tenant_id)s"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
-msgstr "El usuario %(user_id)s no tiene acceso al Dominio %(domain_id)s"
+msgstr "El usuario %(user_id)s no tiene acceso al dominio %(domain_id)s"
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
@@ -1102,10 +1468,19 @@ msgstr "El usuario %(user_id)s ya es miembro del grupo %(group_id)s"
#, python-format
msgid "User '%(user_id)s' not found in group '%(group_id)s'"
-msgstr "Usuario '%(user_id)s' no encontrado en el grupo '%(group_id)s'"
+msgstr ""
+"No se ha encontrado el usuario '%(user_id)s' en el grupo '%(group_id)s'"
msgid "User IDs do not match"
-msgstr "ID de usuario no coinciden"
+msgstr "Los ID de usuario no coinciden"
+
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"No se puede crear la autorización de usuario porque falta el ID de usuario o "
+"el nombre de usuario con el ID de dominio, o el nombre de usuario con el "
+"nombre de dominio."
#, python-format
msgid "User is disabled: %s"
@@ -1120,6 +1495,12 @@ msgstr "El usuario no es de confianza."
msgid "User not found"
msgstr "Usuario no encontrado"
+msgid "User not valid for tenant."
+msgstr "Usuario no válido para este arrendatario."
+
+msgid "User roles not supported: tenant_id required"
+msgstr "Roles de usuario no admitidos: tenant_id obligatorio"
+
#, python-format
msgid "User type %s not supported"
msgstr "El tipo de usuario %s no está soportado"
@@ -1131,23 +1512,29 @@ msgstr "No está autorizado para realizar la acción solicitada."
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "No está autorizado para realizar la acción solicitada: %(action)s"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"Ha intentado crear un recurso utilizando el token de administración. Dado "
+"que este token no se encuentra dentro de un dominio, debe incluir "
+"explícitamente un dominio al que pertenecerá este recurso."
+
msgid "`key_mangler` functions must be callable."
-msgstr "Las funciones `key_mangler` se deben poder llamar."
+msgstr "Las funciones `key_mangler` deben ser invocables."
msgid "`key_mangler` option must be a function reference"
msgstr "La opción `key_mangler` debe ser una referencia de función"
msgid "any options"
-msgstr "cualquier opción"
+msgstr "cualesquiera opciones"
msgid "auth_type is not Negotiate"
msgstr "auth_type no es Negotiate"
msgid "authorizing user does not have role required"
-msgstr "el usuario de autorización no tiene la función requerida"
-
-msgid "cache_collection name is required"
-msgstr "el nombre de cache_collection es necesario"
+msgstr "el usuario de autorización no tiene el rol necesario"
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
@@ -1155,38 +1542,13 @@ msgstr ""
"No se puede crear un proyecto en una rama que contiene un proyecto "
"inhabilitado: %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr ""
-"No se puede crear un proyecto dentro de un dominio distinto al de sus padres."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-"no se puede suprimir un dominio que está habilitado, inhabilítelo primero."
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-"No se puede suprimir el proyecto %s, ya que no es una hoja en la jerarquía."
-
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr ""
-"No se puede inhabilitar el proyecto %s, ya que su subárbol contiene "
-"proyectos habilitados"
-
#, python-format
-msgid "cannot enable project %s since it has disabled parents"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"No se puede habilitar el proyecto %s, ya que tiene padres inhabilitados"
-
-msgid "database db_name is required"
-msgstr "base de datos db_name es necesario"
-
-msgid "db_hosts value is required"
-msgstr "El valor db_hosts es necesario"
-
-msgid "delete the default domain"
-msgstr "suprimir el dominio predeterminado"
+"no se puede suprimir un proyecto habilitado que actúe como dominio. "
+"Inhabilite el proyecto %s."
#, python-format
msgid "group %(group)s"
@@ -1199,33 +1561,33 @@ msgstr ""
"idp_contact_type debe ser una de estas opciones: [técnico, otros, soporte, "
"administrativo o facturación."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "se esperaba un valor entero para mongo_ttl_seconds"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "se esperaba un valor entero para w (atributo en cuestión write)"
-
#, python-format
msgid "invalid date format %s"
msgstr "formato de fecha no válido %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "Se ha alcanzado la profundidad máxima de jerarquía en la rama %s."
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+"no se permite tener dos proyectos actuando como dominios con el mismo "
+"nombre: %s"
+
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr ""
+"no se permite tener dos proyectos dentro de un mismo dominio con el mismo "
+"nombre: %s"
-msgid "no ssl support available"
-msgstr "Soporte SSL no disponible."
+msgid "only root projects are allowed to act as domains."
+msgstr "Sólo los proyectos raíz pueden actuar como dominios."
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "opción %(option)s en el grupo %(group)s"
-msgid "pad must be single character"
-msgstr "el relleno debe ser un único carácter"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "el texto base64url rellenado debe ser múltiplo de 4 caracteres"
-
msgid "provided consumer key does not match stored consumer key"
msgstr ""
"la clave de consumidor proporcionada no coincide con la clave de consumidor "
@@ -1239,18 +1601,12 @@ msgstr ""
msgid "provided verifier does not match stored verifier"
msgstr "el verificador proporcionado no coincide con el verificador almacenado"
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "región no tipo dogpile.cache.CacheRegion"
-
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses debe ser un entero positivo o nulo."
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr "remaining_uses no se debe establecer si se permite la redelegación"
-msgid "replicaset_name required when use_replica is True"
-msgstr "se necesita replicaset_name cuando use_replica es True (verdadero)"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1260,33 +1616,38 @@ msgstr ""
"proporcionada contiene el grupo %(group_other)s en su lugar"
msgid "rescope a scoped token"
-msgstr "Volver a establecer el ámbito de una señal con ámbito"
+msgstr "Volver a establecer el ámbito de un token con ámbito"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"el texto es múltiplo de 4, pero el relleno \"%s\" aparece antes del "
-"penúltimo carácter"
+msgid "role %s is not defined"
+msgstr "el rol %s no está definido"
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgid "scope.project.id must be specified if include_subtree is also specified"
msgstr ""
-"el texto es múltiplo de 4, pero el relleno \"%s\" aparece antes del último "
-"carácter no de relleno"
-
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "el texto no es un múltiplo de 4, pero contiene el relleno \"%s\""
+"Se debe especificar scope.project.id si se especifica también include_subtree"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
-msgstr "No se ha encontrado o no es un directorio tls_cacertdir %s"
+msgstr "No se ha encontrado tls_cacertdir %s o no es un directorio"
#, python-format
msgid "tls_cacertfile %s not found or is not a file"
-msgstr "No se ha encontrado o no es un fichero tls_cacertfile %s"
+msgstr "No se ha encontrado tls_cacertfile %s o no es un archivo"
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr ""
"la referencia de señal debe ser un tipo KeystoneToken, se ha obtenido: %s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr ""
+"La actualización de domain_id está en desuso en Mitaka y se eliminará en O."
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"En la validación se esperaba encontrar %(param_name)r en la firma de función "
+"para %(func_name)r."
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po
index 5967192b..c7e55ed2 100644
--- a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: French\n"
-"Language: fr\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: French\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone.po
index 9fb2b2ec..de00f697 100644
--- a/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/fr/LC_MESSAGES/keystone.po
@@ -1,4 +1,4 @@
-# French translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
@@ -7,34 +7,46 @@
# Maxime COQUEREL <max.coquerel@gmail.com>, 2014
# Andrew Melim <nokostya.translation@gmail.com>, 2014
# Olivier Perrin <operrin@heliostech.fr>, 2013
+# Olivier Perrin <operrin@heliostech.fr>, 2013
# Rémi Le Trocquer <remi.letrocquer@orange.com>, 2014
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
+# leroy <dleroy@fr.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: fr\n"
-"Language-Team: French\n"
-"Plural-Forms: nplurals=2; plural=(n > 1)\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-04-18 04:18+0000\n"
+"Last-Translator: Martine Marin <mmarin@fr.ibm.com>\n"
+"Language: fr\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: French\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)s n'est pas une version de pilote prise en charge"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr ""
+"Le nom %(entity)s ne peut pas contenir les caractères réservés suivants : "
+"%(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
-"%(event)s n'est pas un événement de notification valide, doit être l'une des "
-"options suivantes : %(actions)s"
+"%(event)s n'est pas un événement de notification valide, ce doit être l'un "
+"des suivants : %(actions)s"
#, python-format
msgid "%(host)s is not a trusted dashboard host"
@@ -53,13 +65,17 @@ msgstr ""
"référentiel de migration %(path)s n'existe pas ou n'est pas un répertoire."
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s n'implique pas %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr ""
"%(property_name)s ne peut pas contenir moins de %(min_length)s caractères."
#, python-format
msgid "%(property_name)s is not a %(display_expected_type)s"
-msgstr "%(property_name)s n'est pas du type %(display_expected_type)s"
+msgstr "%(property_name)s n'est pas un %(display_expected_type)s"
#, python-format
msgid "%(property_name)s should not be greater than %(max_length)s characters."
@@ -67,12 +83,16 @@ msgstr ""
"%(property_name)s ne doit pas contenir plus de %(max_length)s caractères."
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s ne peut pas être un rôle impliqué"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s ne peut pas être vide."
#, python-format
msgid "%s extension does not exist."
-msgstr "extension %s n'existe pas."
+msgstr "L'extension %s n'existe pas."
#, python-format
msgid "%s field is required and cannot be empty"
@@ -80,60 +100,82 @@ msgstr "La zone %s est obligatoire et ne peut pas être vide"
#, python-format
msgid "%s field(s) cannot be empty"
-msgstr "%s zone(s) ne peut(peuvent) pas être vide(s)"
+msgstr "la ou les zones %s ne peuvent pas être vides"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(Désactivez le mode de débogage pour supprimer ces informations.)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"%s pour le back-end d'identité LDAP est désormais obsolète dans l'édition "
+"Mitaka en faveur de l'accès LDAP d'identité en lecture seule. Il sera "
+"supprimé dans l'édition \"O\"."
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(Désactivez le mode insecure_debug pour supprimer ces détails.)"
msgid "--all option cannot be mixed with other options"
-msgstr "-all option ne peut pas être mélanger avec d'autres options."
+msgstr "L'option -all ne peut pas être associée à d'autres options"
msgid "A project-scoped token is required to produce a service catalog."
-msgstr "Un jeton de projet est requis pour produire un catalogue de service."
+msgstr ""
+"Un jeton de niveau projet est requis pour produire un catalogue de service."
msgid "Access token is expired"
-msgstr "Token d'accès est expiré"
+msgstr "Le jeton d'accès a expiré"
msgid "Access token not found"
-msgstr "Token d'accès non trouvé"
+msgstr "Jeton d'accès non trouvé"
msgid "Additional authentications steps required."
-msgstr "Authentifications étapes supplémentaires sont nécessaires ."
+msgstr "Des étapes d'authentifications supplémentaires sont nécessaires ."
msgid "An unexpected error occurred when retrieving domain configs"
msgstr ""
-"Une erreur inattendue est survenue lors de l'extraction des configurations "
+"Une erreur inattendue s'est produite lors de l'extraction des configurations "
"de domaine"
#, python-format
msgid "An unexpected error occurred when trying to store %s"
msgstr ""
-"Une erreur inattendue est survenue lors de la tentative de stockage de %s"
+"Une erreur inattendue s'est produite lors de la tentative de stockage de %s"
msgid "An unexpected error prevented the server from fulfilling your request."
-msgstr "Une erreur inattendue a empêché le serveur de traiter votre requête."
+msgstr "Une erreur inattendue a empêché le serveur de traiter votre demande."
#, python-format
msgid ""
"An unexpected error prevented the server from fulfilling your request: "
"%(exception)s"
msgstr ""
-"Une erreur inattendue a empêché le serveur de traiter votre requête: "
+"Une erreur inattendue a empêché le serveur de traiter votre demande : "
"%(exception)s"
msgid "An unhandled exception has occurred: Could not find metadata."
msgstr "Une exception non gérée s'est produite : métadonnées introuvables."
msgid "At least one option must be provided"
-msgstr "Au moins une option doit être fourni"
+msgstr "Au moins une option doit être fournie"
msgid "At least one option must be provided, use either --all or --domain-name"
msgstr ""
-"Au moins une option doit être indiquée. Utilisez --all ou --domain-name"
+"Au moins une option doit être indiquée ; utilisez --all ou --domain-name"
msgid "At least one role should be specified."
msgstr "Au moins un rôle doit être indiqué."
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"La tentative de sélection du pilote automatique pour l'affectation basée sur "
+"l'option [identity]\\driver a échoué car le pilote %s est introuvable. "
+"Définissez l'option [assignment]/driver sur un pilote valide dans la "
+"configuration Keystone."
+
msgid "Attempted to authenticate with an unsupported method."
msgstr "Tentative d'authentification avec une méthode non prise en charge ."
@@ -145,45 +187,107 @@ msgstr ""
"utilisez l'authentification V3"
msgid "Authentication plugin error."
-msgstr "Erreur d'authentification du plugin."
+msgstr "Erreur du plug-in d'authentification."
+
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"Le back-end `%(backend)s` n'est pas un back-end memcached valide. Back-ends "
+"valides : %(backend_list)s"
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
-"Impossible d'autoriser un jeton de requête avec un jeton émis par "
-"l'intermédiaire de la délégation."
+"Impossible d'autoriser un jeton de demande avec un jeton émis via une "
+"délégation."
#, python-format
msgid "Cannot change %(option_name)s %(attr)s"
msgstr "Impossible de modifier %(option_name)s %(attr)s"
msgid "Cannot change Domain ID"
-msgstr "Ne peut pas changer l'identifiant du domaine"
-
-msgid "Cannot change consumer secret"
-msgstr "Impossible de changer le secret du client"
+msgstr "Impossible de modifier l'ID du domaine"
msgid "Cannot change user ID"
-msgstr "Impossible de modifier l'id de l'utilisateur"
+msgstr "Impossible de modifier l'ID utilisateur"
msgid "Cannot change user name"
-msgstr "Impossible de changer le nom d'utilisateur"
+msgstr "Impossible de modifier le nom d'utilisateur"
+
+#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "Impossible de créer un nœud final avec une URL non valide : %(url)s"
#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr "Impossible de créer le projet %(project_id)s avec le parent"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "Impossible de dupliquer le nom %s"
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"Impossible de créer le projet, car il indique son propriétaire comme domaine "
+"%(domain_id)s, mais spécifie un parent figurant dans un autre domaine "
+"(%(parent_domain_id)s)."
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"Impossible de créer le projet, car son parent (%(domain_id)s) fait office de "
+"domaine, mais l'ID parent (%(parent_id)s) spécifié pour le projet ne "
+"correspond pas à cet ID de domaine (domain_id)."
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+"Impossible de supprimer un domaine activé, veuillez d'abord le désactiver."
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Impossible de supprimer le projet %(project_id)s car son sous-arbre contient "
+"des projets activés."
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"Impossible de supprimer le projet %s car il ne s'agit pas d'une feuille dans "
+"la hiérarchie. Utilisez l'option cascade si vous voulez supprimer un sous-"
+"arbre complet."
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Impossible de désactiver le projet %(project_id)s car son sous-arbre "
+"contient des projets activés."
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr "Impossible d'activer le projet %s car ses parents sont désactivés"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"Impossible de répertorier les affectations en provenance de groupes et "
+"filtrées par ID utilisateur."
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
-"Impossible de répertorier des jetons de requête avec un jeton émis par "
-"l'intermédiaire de la délégation."
+"Impossible de répertorier des jetons de demande avec un jeton émis via une "
+"délégation."
#, python-format
msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
-msgstr "Impossible d'ouvrir le certificat %(cert_file)s. Raison: %(reason)s"
+msgstr "Impossible d'ouvrir le certificat %(cert_file)s. Raison : %(reason)s"
#, python-format
msgid "Cannot remove role that has not been granted, %s"
@@ -193,8 +297,13 @@ msgid ""
"Cannot truncate a driver call without hints list as first parameter after "
"self "
msgstr ""
-"Impossible de tronquer un appel de pilote sans avoir hints list comme "
-"premier paramètre après self "
+"Impossible de tronquer un appel de pilote sans hints list comme premier "
+"paramètre après self "
+
+msgid "Cannot update domain_id of a project that has children."
+msgstr ""
+"Impossible de mettre à jour l'ID de domaine (domain_id) d'un projet "
+"comportant des enfants."
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
@@ -208,20 +317,27 @@ msgstr ""
"Impossible d'utiliser les paramètres d'interrogation subtree_as_list et "
"subtree_as_ids en même temps."
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "La mise à jour en cascade n'est autorisée que pour l'attribut activé."
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr ""
-"Le fait de combiner un filtre effectif et un filtre de groupes donnera "
+"Le fait de combiner un filtre effectif et un filtre de groupes produira "
"toujours une liste vide."
msgid ""
"Combining effective, domain and inherited filters will always result in an "
"empty list."
msgstr ""
-"Le fait de combiner des filtres effectifs, de domaine et hérités donnera "
+"Le fait de combiner des filtres effectifs, de domaine et hérités produira "
"toujours une liste vide."
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "Entité Config API à /domains/%s/config"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr ""
"Un conflit s'est produit lors de la tentative de stockage de %(type)s - "
@@ -229,11 +345,10 @@ msgstr ""
#, python-format
msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
-msgstr ""
-"ID de région contradictoires indiqués : \"%(url_id)s\" != \"%(ref_id)s\""
+msgstr "ID de région indiqués en conflit : \"%(url_id)s\" != \"%(ref_id)s\""
msgid "Consumer not found"
-msgstr "Client non trouvé"
+msgstr "Consommateur non trouvé"
#, python-format
msgid ""
@@ -244,120 +359,161 @@ msgstr ""
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"Impossible de déterminer l'ID du fournisseur d'identité. L'option de "
+"configuration %(issuer_attribute)s est introuvable dans l'environnement de "
+"demande."
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
-"%(group_or_option)s introuvable dans la configuration de domaine pour le "
-"domaine %(domain_id)s"
+"Impossible de trouver %(group_or_option)s dans la configuration de domaine "
+"pour le domaine %(domain_id)s"
#, python-format
msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
-msgstr "Groupe de points finals introuvable : %(endpoint_group_id)s"
+msgstr ""
+"Impossible de trouver le groupe de nœuds finaux : %(endpoint_group_id)s"
msgid "Could not find Identity Provider identifier in environment"
msgstr ""
-"L'identificateur de fournisseur d'identité est introuvable dans "
-"l'environnement."
+"Identificateur de fournisseur d'identité introuvable dans l'environnement"
#, python-format
msgid "Could not find Identity Provider: %(idp_id)s"
-msgstr "Impossible de trouver l'identité du Provider: %(idp_id)s"
+msgstr "Impossible de trouver le fournisseur d'identité : %(idp_id)s"
#, python-format
msgid "Could not find Service Provider: %(sp_id)s"
-msgstr "Le fournisseur de services %(sp_id)s est introuvable"
+msgstr "Impossible de trouver le fournisseur de services : %(sp_id)s"
#, python-format
msgid "Could not find credential: %(credential_id)s"
-msgstr "Impossible de trouver les paramètres du compte: %(credential_id)s"
+msgstr "Impossible de trouver les données d'identification : %(credential_id)s"
#, python-format
msgid "Could not find domain: %(domain_id)s"
-msgstr "Impossible de trouver le domaine: %(domain_id)s"
+msgstr "Impossible de trouver le domaine : %(domain_id)s"
#, python-format
msgid "Could not find endpoint: %(endpoint_id)s"
-msgstr "Noeud final %(endpoint_id)s introuvable."
+msgstr "Impossible de trouver le nœud final : %(endpoint_id)s"
#, python-format
msgid ""
"Could not find federated protocol %(protocol_id)s for Identity Provider: "
"%(idp_id)s"
msgstr ""
-"Protocole fédéré %(protocol_id)s introuvable pour le fournisseur "
-"d'identité : %(idp_id)s"
+"Impossible de trouver le protocole fédéré %(protocol_id)s pour le "
+"fournisseur d'identité : %(idp_id)s"
#, python-format
msgid "Could not find group: %(group_id)s"
-msgstr "Impossible de trouver le groupe: %(group_id)s"
+msgstr "Impossible de trouver le groupe : %(group_id)s"
#, python-format
msgid "Could not find mapping: %(mapping_id)s"
-msgstr "Mappage %(mapping_id)s introuvable."
+msgstr "Impossible de trouver le mappage : %(mapping_id)s"
msgid "Could not find policy association"
-msgstr "Association de règle introuvable."
+msgstr "Association de stratégie introuvable"
#, python-format
msgid "Could not find policy: %(policy_id)s"
-msgstr "Règle %(policy_id)s introuvable."
+msgstr "Impossible de trouver la stratégie : %(policy_id)s"
#, python-format
msgid "Could not find project: %(project_id)s"
-msgstr "Impossible de trouver le projet: %(project_id)s"
+msgstr "Impossible de trouver le projet : %(project_id)s"
#, python-format
msgid "Could not find region: %(region_id)s"
-msgstr "Impossible de trouver la région: %(region_id)s"
-
-msgid "Could not find role"
-msgstr "Ne peut pas trouvé le role"
+msgstr "Impossible de trouver la région : %(region_id)s"
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
"%(actor_id)s, project or domain: %(target_id)s"
msgstr ""
-"Affectation de rôle avec le rôle : %(role_id)s, l'utilisateur ou le groupe : "
-"%(actor_id)s, le projet ou le domaine : %(target_id)s introuvable"
+"Impossible de trouver l'affectation de rôle avec le rôle : %(role_id)s, "
+"utilisateur ou groupe : %(actor_id)s, projet ou domaine : %(target_id)s"
#, python-format
msgid "Could not find role: %(role_id)s"
-msgstr "Impossible de trouver le rôle: %(role_id)s"
+msgstr "Impossible de trouver le rôle : %(role_id)s"
#, python-format
msgid "Could not find service: %(service_id)s"
-msgstr "Impossible de trouver le service: %(service_id)s"
+msgstr "Impossible de trouver le service : %(service_id)s"
#, python-format
msgid "Could not find token: %(token_id)s"
-msgstr "Impossible de trouver le token: %(token_id)s"
+msgstr "Impossible de trouver le jeton : %(token_id)s"
#, python-format
msgid "Could not find trust: %(trust_id)s"
-msgstr "Confiance %(trust_id)s introuvable."
+msgstr "Impossible de trouver la confiance : %(trust_id)s"
#, python-format
msgid "Could not find user: %(user_id)s"
-msgstr "Impossible de trouver l'utilisateur: %(user_id)s"
+msgstr "Impossible de trouver l'utilisateur : %(user_id)s"
#, python-format
msgid "Could not find version: %(version)s"
-msgstr "Impossible de trouver la version: %(version)s"
+msgstr "Impossible de trouver la version : %(version)s"
#, python-format
msgid "Could not find: %(target)s"
-msgstr "N'est pas trouvé: %(target)s"
+msgstr "Impossible de trouver : %(target)s"
+
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"Impossible de mapper des propriétés d'utilisateur fédéré à des valeurs "
+"d'identité. Pour plus d'informations, consultez les journaux de débogage ou "
+"le mappage utilisé."
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"Impossible de mapper l'utilisateur lors de la définition de l'identité "
+"utilisateur éphémère. Des règles de mappage doivent spécifier l'ID "
+"utilisateur/le nom ou la variable d'environnement REMOTE_USER doit être "
+"définie."
msgid "Could not validate the access token"
-msgstr "Ne peut pas valider l'acces du token"
+msgstr "Impossible de valider le jeton d'accès"
msgid "Credential belongs to another user"
msgstr "Les données d'identification appartiennent à un autre utilisateur"
+msgid "Credential signature mismatch"
+msgstr "Non concordance de signature des données d'identification"
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "Base de données dans /domains/%s/config"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"L'importation directe du plug-in d'authentification %(name)r est obsolète "
+"depuis Liberty en faveur de son point d'entrée depuis %(namespace)r et "
+"susceptible d'être supprimée dans N."
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"L'importation directe du pilote %(name)r est obsolète depuis Liberty en "
+"faveur de son point d'entrée depuis %(namespace)r et susceptible d'être "
+"supprimée dans N."
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -375,28 +531,30 @@ msgstr "Le domaine ne peut pas s'appeler %s"
#, python-format
msgid "Domain cannot have ID %s"
-msgstr "Le domaine ne peut pas posséder l'ID %s"
+msgstr "Le domaine ne peut pas avoir l'ID %s"
#, python-format
msgid "Domain is disabled: %s"
msgstr "Domaine désactivé : %s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "Les métadata du domaine ne sont pas supporté par LDAP"
+msgid "Domain name cannot contain reserved characters."
+msgstr "Le nom de domaine ne peut pas contenir des caractères réservés."
msgid "Domain scoped token is not supported"
-msgstr "Le jeton de périmètre du domaine n'est pas pris en charge"
+msgstr "Le jeton configuré du domaine n'est pas pris en charge"
+
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr ""
+"Les rôles spécifiques au domaine ne sont pas pris en charge dans le pilote "
+"de rôle V8 "
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
"%(file)s."
msgstr ""
-"Le domaine : %(domain)s possède déjà une configuration définie - ce fichier "
-"sera ignoré : %(file)s."
-
-msgid "Domains are read-only against LDAP"
-msgstr "Les domaines sont en lecture seule pour LDAP"
+"Une configuration est déjà définie pour le domaine %(domain)s - fichier "
+"ignoré : %(file)s."
msgid "Duplicate Entry"
msgstr "Entrée en double"
@@ -406,21 +564,40 @@ msgid "Duplicate ID, %s."
msgstr "ID en double, %s."
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "Entrée en double : %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "Nom en double, %s."
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "ID distant en double : %s"
+
+msgid "EC2 access key not found."
+msgstr "Clé d'accès EC2 non trouvée."
+
+msgid "EC2 signature not supplied."
+msgstr "Signature EC2 non fournie."
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+"L'argument --bootstrap-password ou OS_BOOTSTRAP_PASSWORD doit être défini."
+
msgid "Enabled field must be a boolean"
-msgstr "La zone activée doit être un booléen"
+msgstr "La zone activée doit être de type booléen"
msgid "Enabled field should be a boolean"
-msgstr "La zone activée devrait être un booléen"
+msgstr "La zone activée devrait être de type booléen"
#, python-format
msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
-msgstr "Noeud final %(endpoint_id)s introuvable dans le projet %(project_id)s"
+msgstr "Nœud final %(endpoint_id)s non trouvé dans le projet %(project_id)s"
msgid "Endpoint Group Project Association not found"
-msgstr "Association de projets du groupe de points finals introuvable"
+msgstr "Association de projets du groupe de nœuds finaux non trouvée"
msgid "Ensure configuration option idp_entity_id is set."
msgstr "Assurez-vous que l'option de configuration idp_entity_id est définie."
@@ -437,19 +614,41 @@ msgstr ""
"domaine : %(domain)s, fichier : %(file)s."
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "Erreur lors de l'ouverture du fichier %(path)s : %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "Erreur lors de l'analyse de la ligne : '%(line)s' : %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "Erreur lors de l'analyse syntaxique des règles %(path)s : %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
-msgstr "Erreur durant la lecteur des méta data du fichier, %(reason)s"
+msgstr ""
+"Erreur lors de la lecture du fichier de métadonnées du fichier, %(reason)s"
+
+#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"Nombre de tentatives d'enregistrement du domaine %(domain)s dépassé pour "
+"utiliser le pilote SQL, le dernier domaine qui semble l'avoir contenu est "
+"%(last_domain)s, abandon..."
#, python-format
msgid "Expected dict or list: %s"
-msgstr "Type dictionnaire ou liste attendu: %s"
+msgstr "Dictionnaire ou liste attendu : %s"
msgid ""
"Expected signing certificates are not available on the server. Please check "
"Keystone configuration."
msgstr ""
-"Les certificats signataires attendus sont indisponibles sur le serveur. "
-"Veuillez vérifier la configuration de Keystone."
+"Les certificats signataires attendus ne sont pas disponibles sur le serveur. "
+"Vérifiez la configuration de Keystone."
#, python-format
msgid ""
@@ -457,19 +656,19 @@ msgid ""
"with the request since it is either malformed or otherwise incorrect. The "
"client is assumed to be in error."
msgstr ""
-"%(attribute)s recherché dans %(target)s - le serveur n'a pas pu se conformer "
-"à la requête puisqu'elle est mal formée ou incorrecte. Par défaut, le client "
-"est en erreur."
+"En attente de recherche de %(attribute)s dans %(target)s - le serveur n'a "
+"pas pu se conformer à la demande car elle est incorrectement formée ou "
+"incorrecte. Le client est considéré comme étant à l'état d'erreur."
#, python-format
msgid "Failed to start the %(name)s server"
msgstr "Impossible de démarrer le serveur %(name)s"
msgid "Failed to validate token"
-msgstr "Echec de validation du token"
+msgstr "Échec de validation du jeton"
msgid "Federation token is expired"
-msgstr "La fédération du toke est expiré"
+msgstr "Le jeton de fédération a expiré"
#, python-format
msgid ""
@@ -480,33 +679,37 @@ msgstr ""
"pas être définie pour redéléguer une fiducie"
msgid "Found invalid token: scoped to both project and domain."
-msgstr "Jeton non valide trouvé : portée de projet et de domaine."
+msgstr "Jeton non valide trouvé : configuré dans projet et domaine."
+
+#, python-format
+msgid "Group %s not found in config"
+msgstr "Groupe %s non trouvé dans la configuration"
#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr ""
"Le groupe %(group)s n'est pas pris en charge pour les configurations "
-"spécifiques au domaine"
+"spécifiques à un domaine"
#, python-format
msgid ""
"Group %(group_id)s returned by mapping %(mapping_id)s was not found in the "
"backend."
msgstr ""
-"Groupe %(group_id)s renvoyé par le mappage %(mapping_id)s introuvable dans "
-"le backend."
+"Le groupe %(group_id)s renvoyé par le mappage %(mapping_id)s introuvable "
+"dans le back-end."
#, python-format
msgid ""
"Group membership across backend boundaries is not allowed, group in question "
"is %(group_id)s, user is %(user_id)s"
msgstr ""
-"Appartenance au groupe entre frontières dorsales interdite, le groupe en "
-"question est %(group_id)s, l'utilisateur est %(user_id)s"
+"L'appartenance de groupe entre frontières de back-end n'est pas autorisée, "
+"le groupe en question est %(group_id)s, l'utilisateur est %(user_id)s"
#, python-format
msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
-msgstr "L'attribut ID %(id_attr)s est introuvable dans l'objet LDAP %(dn)s"
+msgstr "L'attribut ID %(id_attr)s non trouvé dans l'objet LDAP %(dn)s"
#, python-format
msgid "Identity Provider %(idp)s is disabled"
@@ -516,38 +719,43 @@ msgid ""
"Incoming identity provider identifier not included among the accepted "
"identifiers."
msgstr ""
-"L'identificateur entrant du fournisseur d'identité ne fait pas partie des "
+"L'identificateur de fournisseur d'identité entrant ne fait pas partie des "
"identificateurs acceptés."
+msgid "Invalid EC2 signature."
+msgstr "Signature EC2 non valide."
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
"Option de certificat TLS LDAP non valide : %(option)s. Choisissez l'une des "
-"options : %(options)s"
+"options suivantes : %(options)s"
#, python-format
msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-msgstr "Mauvaise option LDAP TLS_AVAIL: %s. TLS n'est pas disponible"
+msgstr "Option TLS_AVAIL LDAP non valide : %s. TLS non disponible"
#, python-format
msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
msgstr ""
-"Option déréférencée LDAP non valide : %(option)s. Choisir l'une des options "
-"suivantes : %(options)s"
+"Option déréférencée LDAP non valide : %(option)s. Choisissez l'une des "
+"options suivantes : %(options)s"
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
-msgstr "Portée LDAP invalide: %(scope)s. Choisissez parmi: %(options)s"
+msgstr ""
+"Portée LDAP non valide : %(scope)s. Choisissez l'une des portées suivantes : "
+"%(options)s"
msgid "Invalid TLS / LDAPS combination"
-msgstr "Combinaison TLS / LDAPS invalide"
+msgstr "Combinaison TLS / LDAPS non valide"
#, python-format
msgid "Invalid audit info data type: %(data)s (%(type)s)"
msgstr "Type de données d'information d'audit non valide : %(data)s (%(type)s)"
msgid "Invalid blob in credential"
-msgstr "Blob non valide dans les informations d'identification"
+msgstr "Objet LOB non valide dans les informations d'identification"
#, python-format
msgid ""
@@ -555,7 +763,7 @@ msgid ""
"ignoring this file."
msgstr ""
"Nom de domaine non valide : %(domain)s trouvé dans le nom du fichier de "
-"configuration : %(file)s - ce fichier sera ignoré."
+"configuration : %(file)s - fichier ignoré."
#, python-format
msgid "Invalid domain specific configuration: %(reason)s"
@@ -563,12 +771,10 @@ msgstr "Configuration spécifique au domaine non valide : %(reason)s"
#, python-format
msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
-msgstr ""
-"Valeur d'entrée incorrecte pour la zone '%(path)s'. La valeur est "
-"'%(value)s'."
+msgstr "Entrée non valide pour la zone '%(path)s'. La valeur est '%(value)s'."
msgid "Invalid limit value"
-msgstr "Limite de valeur non valide"
+msgstr "Valeur de limite non valide"
#, python-format
msgid ""
@@ -576,8 +782,8 @@ msgid ""
"Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: "
"%(service_id)s, Region: %(region_id)s"
msgstr ""
-"Combinaison non valide d'entités pour l'association de règle. Seules les "
-"entités Point final, Service ou Région+Service sont autorisées. La demande "
+"Combinaison d'entités non valide pour l'association de stratégie. Seules les "
+"entités Nœud final, Service ou Région+Service sont autorisées. La demande "
"était Point final : %(endpoint_id)s, Service : %(service_id)s, Région : "
"%(region_id)s"
@@ -592,27 +798,22 @@ msgstr ""
msgid "Invalid signature"
msgstr "Signature non valide"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"Valeur de ssl_cert_reqs non valide (%s), doit être l'une des valeurs "
-"suivantes : \"NONE\", \"OPTIONAL\", \"REQUIRED\""
-
msgid "Invalid user / password"
-msgstr "Login / Mot de passe non valide"
+msgstr "Utilisateur / Mot de passe non valide"
+
+msgid "Invalid username or TOTP passcode"
+msgstr "Nom d'utilisateur ou code TOTP non valide"
msgid "Invalid username or password"
-msgstr "Nom d'utilisateur ou mot de passe invalide"
+msgstr "Nom d'utilisateur ou mot de passe non valide"
#, python-format
msgid "KVS region %s is already configured. Cannot reconfigure."
-msgstr "KVS region %s est déjà configuré. Ne peut pas être reconfiguré."
+msgstr "La région KVS %s est déjà configurée. Reconfiguration impossible."
#, python-format
msgid "Key Value Store not configured: %s"
-msgstr "La valeur de la clé du magasin n'est pas configuré: %s"
+msgstr "Magasin de valeurs de clé non configuré : %s"
#, python-format
msgid "LDAP %s create"
@@ -626,9 +827,25 @@ msgstr "Suppression LDAP %s"
msgid "LDAP %s update"
msgstr "Mise à jour LDAP %s"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+"Longueur de l'ID de ressource transformable > 64 (nombre maximal de "
+"caractères autorisé)"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"La section locale dans le mappage %(mapping_id)s fait référence à une "
+"correspondance éloignée qui n'existe pas (par exemple, {0} dans une section "
+"locale)."
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
-msgstr "Le délai de verrouillage s'est produit pour la clé, %(target)s"
+msgstr ""
+"Un dépassement de délai de verrouillage s'est produit pour la clé, %(target)s"
#, python-format
msgid "Lock key must match target key: %(lock)s != %(target)s"
@@ -639,11 +856,15 @@ msgstr ""
#, python-format
msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
msgstr ""
-"Un caractère est mal formé dans URL (%(endpoint)s), regarder le log d'erreur "
-"pour plus de détails."
+"URL de nœud final incorrectement formée (%(endpoint)s), consultez le journal "
+"des ERREURS pour plus de détails."
msgid "Marker could not be found"
-msgstr "Le marqueur ne peut pas être trouvé"
+msgstr "Le marqueur est introuvable"
+
+#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "La profondeur maximale de hiérarchie est atteinte pour la branche %s."
#, python-format
msgid "Maximum lock attempts on %s occurred."
@@ -655,10 +876,10 @@ msgstr "Le membre %(member)s est déjà membre du groupe %(group)s"
#, python-format
msgid "Method not callable: %s"
-msgstr "Impossible d'appeler la méthode %s"
+msgstr "Impossible d'appeler la méthode : %s"
msgid "Missing entity ID from environment"
-msgstr "IP d'entité manquant de l'environnement"
+msgstr "IP d'entité absent de l'environnement"
msgid ""
"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting "
@@ -668,16 +889,19 @@ msgstr ""
"interdite. Il est conseillé d'omettre ce paramètre."
msgid "Multiple domains are not supported"
-msgstr "Les multiples domaines ne sont pas supporté"
+msgstr "Les domaines multiples ne sont pas pris en charge"
msgid "Must be called within an active lock context."
-msgstr "Doit être appelé dans un contexte de verrou actif."
+msgstr "Doit être appelé au sein d'un contexte de verrouillage actif."
msgid "Must specify either domain or project"
-msgstr "Indiquer obligatoirement un domaine ou un projet"
+msgstr "Indiquez un domaine ou un projet"
msgid "Name field is required and cannot be empty"
-msgstr "La zone de nom est requise et ne peut pas être vide"
+msgstr "Le champ de nom est obligatoire et ne peut pas être vide"
+
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr "Aucun ID ou nom de domaine de projet n'a été fourni."
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
@@ -701,12 +925,15 @@ msgstr "Aucune option spécifiée"
#, python-format
msgid "No policy is associated with endpoint %(endpoint_id)s."
-msgstr "Aucune règle n'est associée au point final %(endpoint_id)s."
+msgstr "Aucune stratégie n'est associée au nœud final %(endpoint_id)s."
#, python-format
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "Aucune utilisation restante pour la confiance : %(trust_id)s"
+msgid "No token in the request"
+msgstr "Aucun jeton dans la demande"
+
msgid "Non-default domain is not supported"
msgstr "Le domaine non par défaut n'est pas pris en charge"
@@ -718,7 +945,7 @@ msgid ""
"Option %(option)s found with no group specified while checking domain "
"configuration request"
msgstr ""
-"Option %(option)s trouvée avec aucun groupe spécifié lors de la vérification "
+"Option %(option)s trouvée sans aucun groupe spécifié lors de la vérification "
"de la demande de configuration du domaine"
#, python-format
@@ -727,15 +954,35 @@ msgid ""
"configurations"
msgstr ""
"L'option %(option)s dans le groupe %(group)s n'est pas prise en charge pour "
-"les configurations spécifiques au domaine"
+"les configurations spécifiques à un domaine"
#, python-format
msgid "Project (%s)"
msgstr "Projet (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "ID de projet non trouvé : %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "La zone Projet est requise et ne doit pas être vide."
+
+#, python-format
msgid "Project is disabled: %s"
-msgstr "Projet désactivé : %s"
+msgstr "Le projet est désactivé : %s"
+
+msgid "Project name cannot contain reserved characters."
+msgstr "Le nom de projet ne peut pas contenir des caractères réservés."
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "La chaine de requête n'est pas au format UTF-8. "
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr ""
+"La lecture de la valeur par défaut pour l'option %(option)s dans le groupe "
+"%(group)s n'est pas prise en charge"
msgid "Redelegation allowed for delegated by trust only"
msgstr "Redélégation autorisée pour une délégation par fiducie uniquement"
@@ -748,8 +995,81 @@ msgstr ""
"Profondeur de redélégation restante %(redelegation_depth)d par rapport à la "
"plage admise [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Supprimez admin_crud_extension du pipeline de collage, l'extension "
+"admin_crud est désormais toujours disponible. Mettez à jour la section "
+"[pipeline:admin_api] dans le fichier keystone-paste.ini en conséquence, car "
+"elle sera supprimée dans l'édition O."
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"Supprimez endpoint_filter_extension du pipeline de collage, l'extension du "
+"filtre de nœud final est désormais toujours disponible. Mettez à jour la "
+"section [pipeline:api_v3] dans le fichier keystone-paste.ini en conséquence "
+"car elle sera supprimée dans l'édition O."
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Supprimez federation_extension du pipeline de collage, l'extension de "
+"fédération est désormais toujours disponible. Mettez à jour la section "
+"[pipeline:api_v3] dans le fichier keystone-paste.ini en conséquence, car "
+"elle sera supprimée dans l'édition O."
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Supprimez oauth1_extension du pipeline de collage, l'extension oauth1 est "
+"désormais toujours disponible. Mettez à jour la section [pipeline:api_v3] "
+"dans le fichier keystone-paste.ini en conséquence, car elle sera supprimée "
+"dans l'édition O."
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Supprimez revoke_extension du pipeline de collage, l'extension revoke est "
+"désormais toujours disponible. Mettez à jour la section [pipeline:api_v3] "
+"dans le fichier keystone-paste.ini en conséquence, car elle sera supprimée "
+"dans l'édition O."
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Supprimez simple_cert du pipeline de collage, les fournisseurs de jetons "
+"PKI et PKIz sont désormais obsolètes et simple_cert n'a été utilisé que pour "
+"la prise en charge de ces fournisseurs. Mettez à jour la section [pipeline:"
+"api_v3] dans le fichier keystone-paste.ini en conséquence, car elle sera "
+"supprimée dans l'édition O."
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Supprimez user_crud_extension du pipeline de collage, l'extension user_crud "
+"est désormais toujours disponible. Mettez à jour la section [pipeline:"
+"public_api] dans le fichier keystone-paste.ini en conséquence, car elle sera "
+"supprimée de l'édition O."
+
msgid "Request Token does not have an authorizing user id"
-msgstr "Le jeton de la demande ne possède pas d'ID utilisateur d'autorisation"
+msgstr "Le jeton de demande ne comporte pas d'ID utilisateur d'autorisation"
#, python-format
msgid ""
@@ -757,19 +1077,19 @@ msgid ""
"server could not comply with the request because the attribute size is "
"invalid (too large). The client is assumed to be in error."
msgstr ""
-"La valeur de l'attribut %(attribute)s de la demande doit être inférieure ou "
-"égale à %(size)i. Il se peut que le serveur ne soit pas conforme à la "
-"demande car la taille de l'attribut est incorrecte (excessive). Par défaut, "
-"le client est en erreur."
+"La valeur de l'attribut de demande %(attribute)s doit être inférieure ou "
+"égale à %(size)i. Le serveur n'a pas pu se conformer à la demande car la "
+"taille de l'attribut n'est pas valide. Le client est considéré comme étant à "
+"l'état d'erreur."
msgid "Request must have an origin query parameter"
msgstr "La demande doit avoir un paramètre de requête d'origine"
msgid "Request token is expired"
-msgstr "La requete du token est expiré"
+msgstr "Le jeton de la demande a expiré"
msgid "Request token not found"
-msgstr "Token de requete non trouvé"
+msgstr "Jeton de demande non trouvé"
msgid "Requested expiration time is more than redelegated trust can provide"
msgstr ""
@@ -784,18 +1104,14 @@ msgstr ""
"La profondeur de redélégation demandée %(requested_count)d est supérieure à "
"la limite autorisée %(max_count)d"
-#, python-format
-msgid "Role %s not found"
-msgstr "Rôle %s non trouvé"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
"be removed in the \"M\"-Release."
msgstr ""
-"Exécution de keystone via eventlet est obsolète depuis Kilo et remplacée par "
-"l'exécution dans un serveur WSGI (par exemple, mod_wsgi). La prise en charge "
-"pour keystone sous l'eventlet sera supprimée dans \"M\"-Release."
+"L'exécution de Keystone via eventlet est obsolète depuis Kilo et remplacée "
+"par l'exécution sur un serveur WSGI (par exemple, mod_wsgi). La prise en "
+"charge pour Keystone sous l'eventlet sera supprimée dans \"M\"-Release."
msgid "Scoping to both domain and project is not allowed"
msgstr "La configuration du domaine et du projet n'est pas autorisée"
@@ -816,10 +1132,10 @@ msgid "Some of requested roles are not in redelegated trust"
msgstr "Certains rôles demandés ne font pas partie de la fiducie redéléguée"
msgid "Specify a domain or project, not both"
-msgstr "Spécifier un domaine ou un projet, pas les deux"
+msgstr "Indiquez un domaine ou un projet, mais pas les deux"
msgid "Specify a user or group, not both"
-msgstr "Spécifier un utilisateur ou groupe, pas les deux"
+msgstr "Indiquez un utilisateur ou un groupe, mais pas les deux"
msgid "Specify one of domain or project"
msgstr "Indiquez un domaine ou un projet"
@@ -833,26 +1149,47 @@ msgid ""
"of column %(type)s(CHAR(%(length)d))."
msgstr ""
"Longueur de chaîne dépassée. La longueur de la chaîne '%(string)s a dépassé "
-"la valeur maximale de colonne %(type)s(CHAR(%(length)d))."
+"la limite de la colonne %(type)s(CHAR(%(length)d))."
+
+msgid "Tenant name cannot contain reserved characters."
+msgstr "Le nom de locataire ne peut pas contenir des caractères réservés."
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"L'extension %s a été déplacée vers le noyau Keystone et ses migrations sont "
+"donc gérées par le contrôle de la base de données Keystone principale. "
+"Utilisez la commande : keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"La valeur de 'expires_at' ne doit pas être située dans le passé. Le serveur "
+"n'a pas pu se conformer à la demande car elle est incorrectement formée ou "
+"incorrecte. Le client est considéré comme étant à l'état d'erreur."
msgid "The --all option cannot be used with the --domain-name option"
msgstr "L'option --all ne peut pas être utilisée avec l'option --domain-name"
#, python-format
msgid "The Keystone configuration file %(config_file)s could not be found."
-msgstr ""
-"Le fichier de configuration Keystone %(config_file)s ne peut pas être trouvé."
+msgstr "Le fichier de configuration Keystone %(config_file)s est introuvable."
#, python-format
msgid ""
"The Keystone domain-specific configuration has specified more than one SQL "
"driver (only one is permitted): %(source)s."
msgstr ""
-"La configuration spécifique au domaine keystone a spécifié plusieurs pilotes "
+"La configuration spécifique au domaine Keystone a spécifié plusieurs pilotes "
"SQL (un seul est autorisé) : %(source)s."
msgid "The action you have requested has not been implemented."
-msgstr "L'action que vous avez demandée n'a pas été implémentée."
+msgstr "L'action que vous avez demandée n'est pas implémentée."
msgid "The authenticated user should match the trustor."
msgstr "L'utilisateur authentifié doit correspondre au fiduciant."
@@ -862,20 +1199,30 @@ msgid ""
"server does not use PKI tokens otherwise this is the result of "
"misconfiguration."
msgstr ""
-"Les certificats que vous avez demandés sont indisponibles. Il est probable "
-"que ce serveur n'utilise pas les jetons PKI ; sinon, c'est le résultat d'un "
-"problème de configuration."
+"Les certificats que vous avez demandés ne sont pas disponibles. Il est "
+"probable que ce serveur n'utilise pas des jetons PKI ; sinon, ceci est le "
+"résultat d'un problème de configuration."
+
+msgid "The configured token provider does not support bind authentication."
+msgstr ""
+"Le fournisseur de jeton configuré ne prend pas en charge l'authentification "
+"par liaison."
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr ""
+"La création de projets faisant office de domaines n'est pas autorisée en v2."
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server could "
"not comply with the request because the password is invalid."
msgstr ""
-"La longueur du mot de passe doit être inférieure ou égale à %(size)i. n'est "
-"pas conforme à la demande car le mot de passe est incorrect."
+"La longueur du mot de passe doit être inférieure ou égale à %(size)i. Le "
+"serveur n'a pas pu se conformer à la demande car le mot de passe n'est pas "
+"valide."
msgid "The request you have made requires authentication."
-msgstr "La demande que vous avez fait requiert une authentification."
+msgstr "La demande que vous avez présentée requiert une authentification."
msgid "The resource could not be found."
msgstr "La ressource est introuvable."
@@ -885,7 +1232,7 @@ msgid ""
"in the Keystone server. The current request is aborted."
msgstr ""
"L'appel de révocation ne doit pas contenir à la fois domain_id et "
-"project_id. Il s'agit d'un bogue dans le serveur Keystone. La demande en "
+"project_id. Il s'agit d'un bogue sur le serveur Keystone. La demande en "
"cours est abandonnée."
msgid "The service you have requested is no longer available on this server."
@@ -896,7 +1243,7 @@ msgid ""
"The specified parent region %(parent_region_id)s would create a circular "
"region hierarchy."
msgstr ""
-"La région parent spécifiée %(parent_region_id)s risque de créer une "
+"La région parente spécifiée %(parent_region_id)s pourrait créer une "
"hiérarchie de région circulaire."
#, python-format
@@ -908,26 +1255,23 @@ msgstr ""
"dictionnaire d'options"
msgid "There should not be any non-oauth parameters"
-msgstr "Aucun paramètre non-oauth ne doit être utilisé"
+msgstr "Il ne doit y avoir aucun paramètre non-oauth"
#, python-format
msgid "This is not a recognized Fernet payload version: %s"
msgstr "Il ne s'agit pas d'une version de contenu Fernet reconnue : %s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"Il ne s'agit pas d'un jeton v2.0 Fernet. Utilisez v3 pour les jetons de "
-"confiance, de domaine ou fédérés."
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "Il ne s'agit pas d'un jeton Fernet reconnu %s"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
"request since it is either malformed or otherwise incorrect. The client is "
"assumed to be in error."
msgstr ""
-"Horodatage n'est pas au format attendu. Le serveur n'a pas pu se conformer à "
-"la demande car elle est incorrectement formée ou incorrecte. Le client est "
+"L'horodatage n'est pas au format attendu. Le serveur n'a pas pu se conformer "
+"à la demande car elle est incorrectement formée ou incorrecte. Le client est "
"considéré comme étant à l'état d'erreur."
#, python-format
@@ -944,20 +1288,23 @@ msgid "Token belongs to another user"
msgstr "Le jeton appartient à un autre utilisateur"
msgid "Token does not belong to specified tenant."
-msgstr "Le jeton n'appartient pas au titulaire spécifié."
+msgstr "Le jeton n'appartient pas au locataire spécifié."
+
+msgid "Token version is unrecognizable or unsupported."
+msgstr "Version de jeton non reconnue ou non prise en charge."
msgid "Trustee has no delegated roles."
msgstr "Le fiduciaire n'a aucun rôle délégué."
msgid "Trustor is disabled."
-msgstr "Trustor est désactivé. "
+msgstr "Le fiduciant est désactivé. "
#, python-format
msgid ""
"Trying to update group %(group)s, so that, and only that, group must be "
"specified in the config"
msgstr ""
-"Tentative de mise à jour du groupe %(group)s, de sorte que le groupe soit "
+"Tentative de mise à jour du groupe %(group)s, de sorte que le groupe est "
"spécifié dans la configuration uniquement"
#, python-format
@@ -974,14 +1321,14 @@ msgid ""
"that, option must be specified in the config"
msgstr ""
"Tentative de mise à jour de l'option %(option)s dans le groupe %(group)s, de "
-"sorte que l'option soit spécifiée dans la configuration uniquement"
+"sorte que l'option est spécifiée dans la configuration uniquement"
msgid ""
"Unable to access the keystone database, please check it is configured "
"correctly."
msgstr ""
-"Impossible d'accéder à la base de données keystone, vérifiez qu'elle est "
-"configurée correctement."
+"Impossible d'accéder à la base de données Keystone, vérifiez qu'elle est "
+"correctement configurée."
#, python-format
msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
@@ -994,7 +1341,10 @@ msgid ""
"associated endpoints."
msgstr ""
"Impossible de supprimer la région %(region_id)s car la région ou ses régions "
-"enfant ont des noeuds finals associés."
+"enfants comportent des nœuds finaux associés."
+
+msgid "Unable to downgrade schema"
+msgstr "Impossible de rétrograder le schéma"
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
@@ -1003,15 +1353,8 @@ msgstr ""
"%(mapping_id)s"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-"Impossible d'établir une connexion à partir de l'ID de pool %(id)s après "
-"%(seconds)s secondes."
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
-msgstr "Impossible de localiser le répertoire de configuration domaine: %s"
+msgstr "Impossible de localiser le répertoire de configuration de domaine : %s"
#, python-format
msgid "Unable to lookup user %s"
@@ -1022,7 +1365,7 @@ msgid ""
"Unable to reconcile identity attribute %(attribute)s as it has conflicting "
"values %(new)s and %(old)s"
msgstr ""
-"Impossible de rapprocher l'attribut d'identité %(attribute)s car il possède "
+"Impossible de rapprocher l'attribut d'identité %(attribute)s car il comporte "
"des valeurs en conflit : %(new)s et %(old)s"
#, python-format
@@ -1036,11 +1379,11 @@ msgstr ""
"configuration. Raison : %(reason)s"
msgid "Unable to sign token."
-msgstr "Impossible de signer le jeton"
+msgstr "Impossible de signer le jeton."
#, python-format
msgid "Unexpected assignment type encountered, %s"
-msgstr "Type inattendu d'affectation, %s"
+msgstr "Type d'affectation inattendu, %s"
#, python-format
msgid ""
@@ -1055,7 +1398,7 @@ msgid "Unexpected status requested for JSON Home response, %s"
msgstr "Statut inattendu demandé pour la réponse JSON Home, %s"
msgid "Unknown Target"
-msgstr "Cible inconnu"
+msgstr "Cible inconnue"
#, python-format
msgid "Unknown domain '%(name)s' specified by --domain-name"
@@ -1063,37 +1406,58 @@ msgstr "Domaine inconnu '%(name)s' spécifié par --domain-name"
#, python-format
msgid "Unknown token version %s"
-msgstr "Version du token inconnu %s"
+msgstr "Version de jeton inconnue %s"
#, python-format
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "Dépendance désenregistrée : %(name)s pour %(targets)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "La mise à jour de `domain_id` n'est pas autorisée."
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "La mise à jour de `is_domain` n'est pas autorisée."
+
msgid "Update of `parent_id` is not allowed."
-msgstr "La mise à jour de `parent_id` est interdite."
+msgstr "La mise à jour de `parent_id` n'est pas autorisée."
+
+msgid "Update of domain_id is only allowed for root projects."
+msgstr ""
+"La mise à jour de l'ID de domaine (domain_id) est autorisée uniquement pour "
+"les projets racine."
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+"La mise à jour de l'ID de domaine (domain_id) des projets faisant office de "
+"domaines n'est pas autorisée."
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
-"Utilisez un jeton dont la portée est un projet lorsque vous essayez de créer "
-"une assertion SAML"
+"Utilisez un jeton configuré du projet lorsque vous essayez de créer une "
+"assertion SAML"
+
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"L'utilisation de la configuration du pilote d'identité pour configurer "
+"automatiquement le même pilote d'affectation est désormais obsolète. Dans "
+"l'édition \"O\", le pilote d'affectation doit être configuré de manière "
+"explicite s'il est différent de la valeur par défaut (SQL)."
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "L'utilisateur %(u_id)s n'est pas autorisé pour le locataire %(t_id)s"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"L'utilisateur %(user_id)s possède déjà le rôle %(role_id)s dans le locataire "
-"%(tenant_id)s"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
-msgstr "L'utilisateur %(user_id)s n'a pas accès au domaine %(domain_id)s"
+msgstr "L'utilisateur %(user_id)s n'a pas accès au domaine %(domain_id)s"
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
-msgstr "L'utilisateur %(user_id)s n'a pas accès au projet %(project_id)s"
+msgstr "L'utilisateur %(user_id)s n'a pas accès au projet %(project_id)s"
#, python-format
msgid "User %(user_id)s is already a member of group %(group_id)s"
@@ -1106,6 +1470,14 @@ msgstr "Utilisateur '%(user_id)s' non trouvé dans le groupe '%(group_id)s'"
msgid "User IDs do not match"
msgstr "Les ID utilisateur ne correspondent pas."
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"L'authentification utilisateur ne peut pas être créée en raison de l'absence "
+"d'un ID, utilisateur, nom d'utilisateur avec ID de domaine ou nom "
+"d'utilisateur avec nom de domaine."
+
#, python-format
msgid "User is disabled: %s"
msgstr "Utilisateur désactivé : %s"
@@ -1114,21 +1486,36 @@ msgid "User is not a member of the requested project"
msgstr "L'utilisateur n'est pas membre du projet demandé"
msgid "User is not a trustee."
-msgstr "L'utilisateur n'est pas administrateur."
+msgstr "L'utilisateur n'est pas un fiduciaire."
msgid "User not found"
msgstr "Utilisateur introuvable"
+msgid "User not valid for tenant."
+msgstr "Utilisateur non valide pour le locataire."
+
+msgid "User roles not supported: tenant_id required"
+msgstr "Rôles utilisateur non pris en charge : tenant_id est obligatoire"
+
#, python-format
msgid "User type %s not supported"
msgstr "Type d'utilisateur %s non pris en charge"
msgid "You are not authorized to perform the requested action."
-msgstr "Vous n'êtes pas autorisé à effectuer l'action demandée"
+msgstr "Vous n'êtes pas autorisé à effectuer l'action demandée."
#, python-format
msgid "You are not authorized to perform the requested action: %(action)s"
-msgstr "Vous n'êtes pas autorisé à effectuer l'action demandée: %(action)s"
+msgstr "Vous n'êtes pas autorisé à effectuer l'action demandée : %(action)s"
+
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"Vous avez essayé de créer une ressource à l'aide du jeton admin. Comme ce "
+"jeton ne figure pas dans un domaine, vous devez inclure explicitement un "
+"domaine auquel cette ressource doit appartenir."
msgid "`key_mangler` functions must be callable."
msgstr "Les fonctions `key_mangler` doivent pouvoir être appelées."
@@ -1137,57 +1524,27 @@ msgid "`key_mangler` option must be a function reference"
msgstr "L'option `key_mangler` doit être une référence de fonction"
msgid "any options"
-msgstr "toute option"
+msgstr "toutes les options"
msgid "auth_type is not Negotiate"
msgstr "auth_type n'est pas négocié"
msgid "authorizing user does not have role required"
-msgstr "un rôle est facultatif pour l'utilisateur d'autorisation"
-
-msgid "cache_collection name is required"
-msgstr "Nom cache_collection est requis"
+msgstr "l'utilisateur qui autorise n'a pas de rôle obligatoire"
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
-"Impossible de créer un projet dans une branche qui contient un projet "
+"Impossible de créer un projet dans une branche contenant un projet "
"désactivé : %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr ""
-"Impossible de créer un projet dans un domaine qui diffère de celui-ci de ses "
-"parents."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-"Impossible de supprimer un domaine activé, s'il vous plait le désactiver en "
-"premier."
-
#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-"Impossible de supprimer le projet %s car il ne s'agit pas d'une feuille dans "
-"la hiérarchie."
-
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"Impossible de désactiver le projet %s car son sous-arbre contient des "
-"projets activés"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr "Impossible d'activer le projet %s car ses parents sont désactivés"
-
-msgid "database db_name is required"
-msgstr "db_name database est requis"
-
-msgid "db_hosts value is required"
-msgstr "Valeur db_hosts est requis"
-
-msgid "delete the default domain"
-msgstr "Suppression du domaine par défaut"
+"impossible de supprimer un projet activé faisant office de domaine. Veuillez "
+"d'abord désactiver le projet %s."
#, python-format
msgid "group %(group)s"
@@ -1200,46 +1557,45 @@ msgstr ""
"idp_contact_type doit avoir l'une des valeurs suivantes : [technical, other, "
"support, administrative ou billing."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "valeur entière attendue pour mongo_ttl_seconds"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "valeur entière attendue pour w (attribut d'écriture)"
-
#, python-format
msgid "invalid date format %s"
-msgstr "Format de date invalid %s"
+msgstr "Format de date non valide %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "La profondeur maximale de hiérarchie est atteinte pour la branche %s."
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+"il est interdit d'avoir deux projets faisant office de domaines avec le même "
+"nom : %s"
-msgid "no ssl support available"
-msgstr "pas de support du ssl"
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr ""
+"il est interdit d'avoir deux projets au sein d'un domaine avec le même nom : "
+"%s"
+
+msgid "only root projects are allowed to act as domains."
+msgstr "seuls les projets racine sont autorisés à faire office de domaines."
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "option %(option)s dans le groupe %(group)s"
-msgid "pad must be single character"
-msgstr "pad doit etre un seul caractère"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "Le texte base64url rempli doit être un multiple de 4 caractères"
-
msgid "provided consumer key does not match stored consumer key"
-msgstr "la clé du client fournie ne correspond pas à la clé du client stockée"
+msgstr ""
+"la clé de consommateur fournie ne correspond pas à la clé de consommateur "
+"stockée"
msgid "provided request key does not match stored request key"
msgstr ""
-"la clé de la demande fournie ne correspond pas à la clé de la demande stockée"
+"la clé de demande fournie ne correspond pas à la clé de demande stockée"
msgid "provided verifier does not match stored verifier"
msgstr "le vérificateur fourni ne correspond pas au vérificateur stocké"
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "la région n'est pas de type dogpile.cache.CacheRegion"
-
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses doit être un entier positif ou nul."
@@ -1247,9 +1603,6 @@ msgid "remaining_uses must not be set if redelegation is allowed"
msgstr ""
"remaining_uses ne doit pas être défini si la redélégation est autorisée"
-msgid "replicaset_name required when use_replica is True"
-msgstr "replicaset_name requis si use_replica a la valeur True"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1259,33 +1612,38 @@ msgstr ""
"contient le groupe %(group_other)s à la place"
msgid "rescope a scoped token"
-msgstr "Redéfinir la portée d'un jeton"
-
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"le texte est un multiple de 4, mais le remplissage \"%s\" se produit avant "
-"l'avant-dernier caractère"
+msgstr "Reconfigurer un jeton configuré"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr ""
-"le texte est un multiple de 4, mais le remplissage \"%s\" se produit avant "
-"le dernier caractère qui n'est pas de remplissage"
+msgid "role %s is not defined"
+msgstr "Le rôle %s n'est pas défini"
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
+msgid "scope.project.id must be specified if include_subtree is also specified"
msgstr ""
-"le texte n'est pas un multiple de 4, mais contient le remplissage \"%s\""
+"scope.project.id doit être spécifié si include_subtree est également spécifié"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
-msgstr "tls_cacertdir %s introuvable ou n'est pas un répertoire"
+msgstr "tls_cacertdir %s non trouvé ou il ne s'agit pas d'un répertoire"
#, python-format
msgid "tls_cacertfile %s not found or is not a file"
-msgstr "tls_cacertfile %s introuvable ou n'est pas un fichier"
+msgstr "tls_cacertfile %s non trouvé ou il ne s'agit pas d'un fichier"
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr "La référence de jeton doit être un type KeystoneToken, obtenu : %s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr ""
+"la mise à jour de domain_id est obsolète depuis Mitaka et sera supprimée "
+"dans l'édition O."
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"La validation s'attendait %(param_name)r dans la signature de fonction pour "
+"%(func_name)r."
diff --git a/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po
index b45fc0d3..ff8ff2d6 100644
--- a/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/hu/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Hungarian\n"
-"Language: hu\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: hu\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Hungarian\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po
index 317cdc85..35960a34 100644
--- a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Italian\n"
-"Language: it\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: it\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Italian\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone.po
index e60a6d8c..bf854577 100644
--- a/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/it/LC_MESSAGES/keystone.po
@@ -1,30 +1,39 @@
-# Italian translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
-# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
+# Remo Mattei <remo@rm.ht>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: it\n"
-"Language-Team: Italian\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-04-28 07:07+0000\n"
+"Last-Translator: Alessandra <alessandra@translated.net>\n"
+"Language: it\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Italian\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)s non è una versione driver supportata"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr "Il nome %(entity)s non può contenere caratteri riservati: %(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
@@ -48,6 +57,10 @@ msgstr ""
"repository di migrazione in %(path)s non esiste o non è una directory."
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s non implica %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s non può essere inferiore a %(min_length)s caratteri."
@@ -60,6 +73,10 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s non può essere superiore a %(max_length)s caratteri."
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s non può essere un ruolo implicato"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s non può essere vuoto."
@@ -75,8 +92,18 @@ msgstr "Il campo %s è obbligatorio e non può essere vuoto"
msgid "%s field(s) cannot be empty"
msgstr "i campi %s non possono essere vuoti"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(Disabilitare la modalità di debug per eliminare questi dettagli.)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"%s per il backend di 'identità LDAP è obsoleto nella release Mitaka rispetto "
+"all'accesso LDAP di sola lettura. Verrà rimosso nella release \"O\"."
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr ""
+"(Disabilitare la modalità insecure_debug per eliminare questi dettagli)."
msgid "--all option cannot be mixed with other options"
msgstr "--l'opzione all non può essere combinata con altre opzioni"
@@ -131,6 +158,17 @@ msgstr ""
msgid "At least one role should be specified."
msgstr "Specificare almeno un ruolo."
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"Tentata selezione automatica del driver per l'assegnazione basata su "
+"[identity]. Opzione \\driver non riuscita in quanto il driver %s non è stato "
+"trovato. Impostare [assignment]/driver su un driver valido nella "
+"configurazione keystone."
+
msgid "Attempted to authenticate with an unsupported method."
msgstr "Tentativo di autenticazione con un metodo non supportato."
@@ -144,6 +182,14 @@ msgstr ""
msgid "Authentication plugin error."
msgstr "errore di autenticazione plugin."
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"Il backend `%(backend)s` non è un backend memcached valido. Backend validi: "
+"%(backend_list)s"
+
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
"Impossibile autorizzare un token di richiesta con un token emesso mediante "
@@ -156,9 +202,6 @@ msgstr "Impossibile modificare %(option_name)s %(attr)s"
msgid "Cannot change Domain ID"
msgstr "Impossibile modificare l'ID dominio"
-msgid "Cannot change consumer secret"
-msgstr "Impossibile modificare il segreto del consumer"
-
msgid "Cannot change user ID"
msgstr "Impossibile modificare l'ID utente"
@@ -166,12 +209,71 @@ msgid "Cannot change user name"
msgstr "Impossibile modificare il nome utente"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "Impossibile creare un endpoint con un URL non valido: %(url)s"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr "Impossibile creare il progetto con l'elemento parent: %(project_id)s"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "Impossibile duplicare il nome %s"
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"Impossibile creare un progetto in quanto specifica il relativo proprietario "
+"come un dominio (%(domain_id)s) ma specifica un elemento parent in un altro "
+"dominio (%(parent_domain_id)s)."
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"Impossibile creare un progetto in quanto il relativo parent (%(domain_id)s) "
+"agisce come un dominio, ma l'id_parent (%(parent_id)s) specificato del "
+"progetto non corrisponde all'id_dominio."
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+"Impossibile eliminare un dominio abilitato; è necessario prima disabilitarlo."
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Impossibile eliminare il progetto %(project_id)s perché la relativa "
+"struttura ad albero secondaria contiene progetti abilitati."
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"Impossibile eliminare il progetto %s perché non è una foglia nella "
+"gerarchia. Se si desidera eliminare un'intera struttura ad albero secondaria "
+"utilizza l'opzione a catena."
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Impossibile disabilitare il progetto %(project_id)s perché la relativa "
+"struttura ad albero secondaria contiene progetti abilitati."
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr ""
+"Impossibile abilitare il progetto %s perché dispone di elementi parent "
+"disabilitati"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"Impossibile elencare le assegnazione originate da gruppi e filtrate da ID "
+"utente."
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
@@ -193,6 +295,9 @@ msgstr ""
"Impossibile troncare una chiamata al driver senza hints list come primo "
"parametro dopo self "
+msgid "Cannot update domain_id of a project that has children."
+msgstr "Impossibile aggiornare domain_id di un progetto con elementi child."
+
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
msgstr ""
@@ -205,6 +310,9 @@ msgstr ""
"Impossibile utilizzare i parametri della query subtree_as_list e "
"subtree_as_ids contemporaneamente."
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "L'aggiornamento a catena è consentito solo per un attributo abilitato."
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr ""
@@ -219,6 +327,10 @@ msgstr ""
"come risultato un elenco vuoto."
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "Entità API config in /domains/%s/config"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr ""
"Si è verificato un conflitto nel tentativo di archiviare %(type)s - "
@@ -242,6 +354,15 @@ msgstr ""
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"Impossibile determinare l'ID del provider di identità. L'opzione di "
+"configurazione %(issuer_attribute)s non è stata trovata nell'ambiente di "
+"richiesta. "
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
@@ -307,9 +428,6 @@ msgstr "Impossibile trovare il progetto: %(project_id)s"
msgid "Could not find region: %(region_id)s"
msgstr "Impossibile trovare la regione: %(region_id)s"
-msgid "Could not find role"
-msgstr "Impossibile trovare il ruolo"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -346,15 +464,49 @@ msgstr "Impossibile trovare la versione: %(version)s"
msgid "Could not find: %(target)s"
msgstr "Impossibile trovare: %(target)s"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"Impossibile associare le proprietà dell'utente federato per identificare i "
+"valori. Controllare i log di debug o l'associazione utilizzata per ulteriori "
+"dettagli."
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"Impossibile associare l'utente durante l'impostazione dell'identità utente "
+"temporanea. Le regole di associazione devono specificare nome/id utente o la "
+"variabile di ambiente REMOTE_USER deve essereimpostata."
+
msgid "Could not validate the access token"
msgstr "Impossibile convalidare il token di accesso"
msgid "Credential belongs to another user"
msgstr "La credenziale appartiene ad un altro utente"
+msgid "Credential signature mismatch"
+msgstr "Mancata corrispondenza della firma delle credenziali"
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "Database presso /domains/%s/config"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"L'importazione diretta di auth plugin %(name)r è obsoleta a partire da "
+"Liberty rispetto al relativo entrypoint da %(namespace)r e potrebbe essere "
+"rimossa in N."
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"L'importazione diretta del driver %(name)r è obsoleta a partire da Liberty "
+"rispetto al relativo entrypoint da %(namespace)r e potrebbe essere rimossa "
+"in N."
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -378,12 +530,15 @@ msgstr "Il dominio non può avere l'ID %s"
msgid "Domain is disabled: %s"
msgstr "Il dominio è disabilitato: %s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "I metadati del dominio non sono supportati da LDAP"
+msgid "Domain name cannot contain reserved characters."
+msgstr "Il nome dominio non può contenere caratteri riservati."
msgid "Domain scoped token is not supported"
msgstr "L'ambito del dominio token non è supportato"
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr "Ruoli specifici di dominio non sono supportati nel driver ruolo V8"
+
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
@@ -392,9 +547,6 @@ msgstr ""
"Il dominio: %(domain)s dispone già di una configurazione definita - si sta "
"ignorando il file: %(file)s."
-msgid "Domains are read-only against LDAP"
-msgstr "I domini sono di sola lettura rispetto a LDAP"
-
msgid "Duplicate Entry"
msgstr "Duplica voce"
@@ -403,9 +555,29 @@ msgid "Duplicate ID, %s."
msgstr "ID duplicato, %s."
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "Voce duplicata: %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "Nome duplicato, %s."
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "ID remoto duplicato: %s"
+
+msgid "EC2 access key not found."
+msgstr "Chiave di accesso EC2 non trovata."
+
+msgid "EC2 signature not supplied."
+msgstr "Firma EC2 non fornita."
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+"Entrambi gli argomenti bootstrap-password o OS_BOOTSTRAP_PASSWORD devono "
+"essere impostati."
+
msgid "Enabled field must be a boolean"
msgstr "Il campo Abilitato deve essere un valore booleano"
@@ -435,10 +607,31 @@ msgstr ""
"%(domain)s, file: %(file)s."
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "Errore durante l'apertura del file %(path)s: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "Errore durante l'analisi della riga: '%(line)s': %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "Errore durante l'analisi delle regole %(path)s: %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "Errore durante le lettura del file di metadati, %(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"Superato il numero di tentativi per registrare il dominio %(domain)s al fine "
+"di utilizzare il driver SQL, l'ultimo dominio che sembra avere avuto quel "
+"driver è %(last_domain)s, operazione terminata"
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "Previsto dict o list: %s"
@@ -481,6 +674,10 @@ msgid "Found invalid token: scoped to both project and domain."
msgstr "trovato token non valido: in ambito sia di progetto che di dominio."
#, python-format
+msgid "Group %s not found in config"
+msgstr "Gruppo %s non trovato in config"
+
+#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr ""
"Il gruppo %(group)s non è supportato per le configurazioni specifiche del "
@@ -517,6 +714,9 @@ msgstr ""
"L'identificativo del provider identità in entrata non è incluso tra gli "
"identificativi accettati."
+msgid "Invalid EC2 signature."
+msgstr "Firma EC2 non valida."
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
@@ -588,17 +788,12 @@ msgstr ""
msgid "Invalid signature"
msgstr "Firma non valida"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"Valore ssl_cert_reqs di %s non valido; deve essere uno tra \"NONE\", "
-"\"OPTIONAL\", \"REQUIRED\""
-
msgid "Invalid user / password"
msgstr "Utente/password non validi"
+msgid "Invalid username or TOTP passcode"
+msgstr "username o passcode TOTP non validi"
+
msgid "Invalid username or password"
msgstr "username o password non validi"
@@ -622,6 +817,20 @@ msgstr "LDAP %s elimina"
msgid "LDAP %s update"
msgstr "LDAP %s aggiorna"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+"La lunghezza dell'id risorsa trasformabile è > 64, che rappresenta il numero "
+"massimo di caratteri consentiti"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"La sezione locale nell'associazione %(mapping_id)s si riferisce ad una "
+"corrispondenza remota che non esiste (ad esempio {0} in una sezione locale)."
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr "Si è verificato un timeout di blocco per la chiave, %(target)s"
@@ -642,6 +851,10 @@ msgid "Marker could not be found"
msgstr "Impossibile trovare l'indicatore"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "Profondità massima della gerarchia raggiunta per il ramo %s."
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr "È stato raggiunto il numero massimo di tentativi di blocco su %s."
@@ -675,6 +888,10 @@ msgstr "È necessario specificare il dominio o il progetto"
msgid "Name field is required and cannot be empty"
msgstr "Il campo relativo al nome è obbligatorio e non può essere vuoto"
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr ""
+"Non è stato fornito l'ID dominio progetto né il nome dominio progetto. "
+
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
@@ -703,6 +920,9 @@ msgstr "Nessuna politica associata all'endpoint %(endpoint_id)s."
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "Nessun utilizzo residuo per trust: %(trust_id)s"
+msgid "No token in the request"
+msgstr "Nessun token nella richiesta"
+
msgid "Non-default domain is not supported"
msgstr "Il dominio non predefinito non è supportato"
@@ -730,9 +950,29 @@ msgid "Project (%s)"
msgstr "Progetto (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "ID progetto non trovato: %(t_id)s "
+
+msgid "Project field is required and cannot be empty."
+msgstr "Il campo progetto è obbligatorio e non può essere vuoto."
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "Il progetto è disabilitato: %s"
+msgid "Project name cannot contain reserved characters."
+msgstr "Il nome progetto non può contenere caratteri riservati."
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "La stringa di query non è codificata in UTF-8 "
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr ""
+"La lettura dell'impostazione predefinita per l'opzione %(option)s nel gruppo "
+"%(group)s non è supportata"
+
msgid "Redelegation allowed for delegated by trust only"
msgstr ""
"Assegnazione di una nuova delega consentita solo per i delegati dal trust"
@@ -745,6 +985,73 @@ msgstr ""
"profondità di riassegnazione della delega rimanente %(redelegation_depth)d "
"non compresa nell'intervallo consentito [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Rimuovere admin_crud_extension dalla pipeline paste, l'estensione admin_crud "
+"è ora sempre disponibile. Aggiornare la sezione [pipeline:admin_api] in "
+"keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O."
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"Rimuovere endpoint_filter_extension dalla pipeline paste, l'estensione del "
+"filtro di endpoint è ora sempre disponibile. Aggiornare la sezione [pipeline:"
+"api_v3] in keystone-paste.ini di conseguenza, in quanto verrà rimossa nella "
+"release O."
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Rimuovere federation_extension dalla pipeline paste, l'estensione federation "
+"è ora sempre disponibile. Aggiornare la sezione [pipeline:api_v3] in "
+"keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O."
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Rimuovere oauth1_extension dalla pipeline paste, l'estensione oauth1 è ora "
+"sempre disponibile. Aggiornare la sezione [pipeline:api_v3] in keystone-"
+"paste.ini di conseguenza, in quanto verrà rimossa nella release O."
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Rimuovere revoke_extension dalla pipeline paste, l'estensione revoke è ora "
+"sempre disponibile. Aggiornare la sezione [pipeline:api_v3] in keystone-"
+"paste.ini di conseguenza, in quanto verrà rimossa nella release O."
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Rimuovere simple_cert dalla pipeline paste, i provider di token PKI e PKIz "
+"sono ora obsoleti e simple_cert è stato utilizzato solo in supporto di "
+"questi provider di token. Aggiornare la sezione [pipeline:api_v3] in "
+"keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O."
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Rimuovere user_crud_extension dalla pipeline paste, l'estensione user_crud è "
+"ora sempre disponibile. Aggiornare la sezione [pipeline:admin_api] in "
+"keystone-paste.ini di conseguenza, in quanto verrà rimossa nella release O."
+
msgid "Request Token does not have an authorizing user id"
msgstr ""
"Il token della richiesta non dispone di un id utente per l'autorizzazione"
@@ -782,10 +1089,6 @@ msgstr ""
"La profondità di riassegnazione della delega richiesta %(requested_count)d è "
"maggiore del valore consentito %(max_count)d"
-#, python-format
-msgid "Role %s not found"
-msgstr "Ruolo %s non trovato"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
@@ -832,6 +1135,28 @@ msgstr ""
"È stata superata la lunghezza della stringa. La lunghezza della stringa "
"'%(string)s' ha superato il limite della colonna %(type)s(CHAR(%(length)d))."
+msgid "Tenant name cannot contain reserved characters."
+msgstr "Il nome tenant non può contenere caratteri riservati."
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"L'estensione %s è stata spostata nel keystone di base e le relative "
+"migrazioni vengono mantenute dal controllo di database keystone principale. "
+"Utilizzare il comando: keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"'expires_at' non deve essere prima ora. Il server non è riuscito a "
+"rispettare larichiesta perché è in formato errato o non corretta. Il client "
+"viene considerato in errore."
+
msgid "The --all option cannot be used with the --domain-name option"
msgstr "L'opzione --all non può essere utilizzata con l'opzione --domain-name"
@@ -863,6 +1188,13 @@ msgstr ""
"non utilizzi i token PKI, altrimenti questo è il risultato di una "
"configurazione errata."
+msgid "The configured token provider does not support bind authentication."
+msgstr "Il provider di token configurato non supporta l'autenticazione bind. "
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr ""
+"La creazione di progetti che agiscono come domini non è consentita in v2. "
+
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server could "
@@ -911,12 +1243,9 @@ msgstr "Non deve essere presente nessun parametro non-oauth"
msgid "This is not a recognized Fernet payload version: %s"
msgstr "Questa non è una versione di payload Fernet riconosciuta: %s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"Questo non è un token Fernet v2.0. Utilizzare v3 per token attendibili, di "
-"dominio o federati."
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "Questo non è un token Fernet %s riconosciuto "
msgid ""
"Timestamp not in expected format. The server could not comply with the "
@@ -943,6 +1272,9 @@ msgstr "Il token appartiene ad un altro utente"
msgid "Token does not belong to specified tenant."
msgstr "Il token non appartiene al tenant specificato."
+msgid "Token version is unrecognizable or unsupported."
+msgstr "La versione token non è riconoscibile o non supportata. "
+
msgid "Trustee has no delegated roles."
msgstr "Trustee non ha ruoli delegati."
@@ -993,6 +1325,9 @@ msgstr ""
"Impossibile eliminare la regione %(region_id)s perché la regione o le "
"relative regioni child hanno degli endpoint associati."
+msgid "Unable to downgrade schema"
+msgstr "Impossibile eseguire il downgrade dello schema"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
@@ -1000,13 +1335,6 @@ msgstr ""
"%(mapping_id)s"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-"Impossibile ottenere una connessione dall'ID pool %(id)s dopo %(seconds)s "
-"secondi."
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossibile individuare la directory config del dominio: %s"
@@ -1067,25 +1395,44 @@ msgstr "Versione di token sconosciuta %s"
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "Dipendenza non registrata: %(name)s per %(targets)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "Aggiornamento di `domain_id` non consentito."
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "Aggiornamento di `is_domain` non consentito."
+
msgid "Update of `parent_id` is not allowed."
msgstr "Aggiornamento di `parent_id` non consentito."
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "L'aggiornamento di domain_id è consentito solo per progetti root."
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+"L'aggiornamento di domain_id di progetti che agiscono come domini non è "
+"consentito."
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
"Utilizzare un token nell'ambito del progetto quando si tenta di creare "
"un'asserzione SAML"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"Utilizzare la configurazione del driver di identità per configurare "
+"automaticamente la stessa assegnazione. Il driver è obsoleto nella release "
+"\"O\". Il driver di assegnazione dovrà essere configurato esplicitamente se "
+"diverso dal driver predefinito (SQL)."
+
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "L'utente %(u_id)s non è autorizzato per il tenant %(t_id)s"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"L'utente %(user_id)s ha già un ruolo %(role_id)s nel conduttore (tenant) "
-"%(tenant_id)s"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
msgstr "L'utente %(user_id)s non ha accesso al dominio %(domain_id)s"
@@ -1104,6 +1451,13 @@ msgstr "L'utente '%(user_id)s' non è stato trovato nel gruppo '%(group_id)s'"
msgid "User IDs do not match"
msgstr "Gli ID utente non corrispondono"
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"L'autorizzazione utente non può essere creata perché manca l'id utente o il "
+"nome utente con l'id dominio o il nome utente con il nome dominio. "
+
#, python-format
msgid "User is disabled: %s"
msgstr "L'utente è disabilitato: %s"
@@ -1117,6 +1471,12 @@ msgstr "L'utente non è un amministratore."
msgid "User not found"
msgstr "Utente non trovato"
+msgid "User not valid for tenant."
+msgstr "Utente non valido per il tenant."
+
+msgid "User roles not supported: tenant_id required"
+msgstr "Ruoli utente non supportati: richiesto tenant_id"
+
#, python-format
msgid "User type %s not supported"
msgstr "Tipo utente %s non supportato"
@@ -1128,6 +1488,15 @@ msgstr "Non si possiede l'autorizzazione per eseguire l'operazione richiesta."
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "L'utente non è autorizzato ad eseguire l'azione richiesta: %(action)s"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"Si è cercato di creare una risorsa utilizzando il token admin. Poiché questo "
+"token non si trova all'interno di un dominio, è necessario includere "
+"esplicitamente un dominio per fare in modo che questa risorsa vi appartenga."
+
msgid "`key_mangler` functions must be callable."
msgstr "Le funzioni `key_mangler` devono essere disponibili per la chiamata."
@@ -1143,49 +1512,19 @@ msgstr "auth_type non è Negotiate"
msgid "authorizing user does not have role required"
msgstr "l'utente per l'autorizzazione non dispone del ruolo richiesto"
-msgid "cache_collection name is required"
-msgstr "Il nome cache_collection è obbligatorio"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
"impossibile creare un progetto in un ramo che contiene un progetto "
"disabilitato: %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr ""
-"impossibile creare un progetto all'interno di un dominio diverso da quello "
-"dei relativi elementi parent."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-"impossibile eliminare un dominio abilitato; è necessario prima disabilitarlo."
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-"impossibile eliminare il progetto %s perché non è una foglia nella gerarchia."
-
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr ""
-"impossibile disabilitare il progetto %s perché la relativa struttura ad "
-"albero secondaria contiene progetti abilitati"
-
#, python-format
-msgid "cannot enable project %s since it has disabled parents"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"impossibile abilitare il progetto %s perché dispone di elementi parent "
-"disabilitati"
-
-msgid "database db_name is required"
-msgstr "Il database db_name è obbligatorio"
-
-msgid "db_hosts value is required"
-msgstr "Il valore db_hosts è obbligatorio"
-
-msgid "delete the default domain"
-msgstr "eliminare il dominio predefinito"
+"impossibile eliminare un progetto abilitato che agisce come un dominio. "
+"Disabilitare prima il progetto %s."
#, python-format
msgid "group %(group)s"
@@ -1198,33 +1537,32 @@ msgstr ""
"idp_contact_type deve essere uno tra: [tecnico, altro, supporto, "
"amministrativo o di fatturazione."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "valore intero previsto per mongo_ttl_seconds"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "valore intero previsto per w (attributo di scrittura)"
-
#, python-format
msgid "invalid date format %s"
msgstr "formato data non valido %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "profondità massima della gerarchia raggiunta per il ramo %s."
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+"non è consentito avere due progetti che agiscono con lo stesso nome: %s"
-msgid "no ssl support available"
-msgstr "nessun supporto ssl disponibile"
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr ""
+"non è consentito avere due progetti all'interno di un dominio con lo stesso "
+"nome: %s"
+
+msgid "only root projects are allowed to act as domains."
+msgstr "Solo ai progetti root è consentito agire come domini."
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "opzione %(option)s nel gruppo %(group)s"
-msgid "pad must be single character"
-msgstr "il riempimento deve essere un carattere singolo"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "il testo base64url con riempimento deve essere multiplo di 4 caratteri"
-
msgid "provided consumer key does not match stored consumer key"
msgstr ""
"La chiave consumer fornita non corrisponde alla chiave consumer memorizzata"
@@ -1237,9 +1575,6 @@ msgstr ""
msgid "provided verifier does not match stored verifier"
msgstr "il verificatore fornito non corrisponde al verificatore memorizzato"
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "regione non tipo dogpile.cache.CacheRegion"
-
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses deve essere un numero intero positivo o nullo."
@@ -1248,9 +1583,6 @@ msgstr ""
"remaining_uses non deve essere impostato se è consentita la riassegnazione "
"della delega"
-msgid "replicaset_name required when use_replica is True"
-msgstr "replicaset_name è obbligatorio quando use_replica è True"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1263,20 +1595,13 @@ msgid "rescope a scoped token"
msgstr "riassegna ambito a token con ambito"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"il testo è multiplo di 4, ma il riempimento \"%s\" si verifica prima del "
-"penultimo carattere"
+msgid "role %s is not defined"
+msgstr "il ruolo %s non è definito"
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgid "scope.project.id must be specified if include_subtree is also specified"
msgstr ""
-"il testo è multiplo di 4, ma il riempimento \"%s\" si verifica prima "
-"dell'ultimo carattere non di riempimento"
-
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "il testo non è un multiplo di 4, ma contiene il riempimento \"%s\""
+"scope.project.id deve essere specificato se è specificato anche "
+"include_subtree"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1290,3 +1615,17 @@ msgstr "Impossibile trovare tls_cacertfile %s o non è un file"
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr ""
"il riferimento al token deve essere un tipo KeystoneToken, ottenuto: %s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr ""
+"l'aggiornamento di domain_id è obsoleto a partire da Mitaka e verrà rimosso "
+"in O."
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"la convalida prevede di trovare %(param_name)r nella firma funzione per "
+"%(func_name)r."
diff --git a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po
index 9337f92f..b9224fea 100644
--- a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# Akihiro Motoki <amotoki@gmail.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Japanese\n"
-"Language: ja\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: ja\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Japanese\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone.po
index 541eda96..8f460602 100644
--- a/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/ja/LC_MESSAGES/keystone.po
@@ -1,30 +1,41 @@
-# Japanese translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
# Tomoyuki KATO <tomo@dream.daynight.jp>, 2012-2013
# Akihiro Motoki <amotoki@gmail.com>, 2015. #zanata
+# 笹原 昌美 <ebb0de1@jp.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-27 10:27+0000\n"
-"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
-"Language: ja\n"
-"Language-Team: Japanese\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-04-04 01:18+0000\n"
+"Last-Translator: 笹原 昌美 <ebb0de1@jp.ibm.com>\n"
+"Language: ja\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Japanese\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)s ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã‚‹ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã¯ã‚ã‚Šã¾ã›ã‚“"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr "%(entity)s åã«ä»¥ä¸‹ã®äºˆç´„済ã¿æ–‡å­—ã‚’å«ã‚ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“: %(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
@@ -49,6 +60,10 @@ msgstr ""
"ãšã‚Œã‹ã§ã™ã€‚"
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s 㯠%(implied_role_id)s を暗黙的ã«ç¤ºã—ã¾ã›ã‚“"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s 㯠%(min_length)s 文字より短ãã§ãã¾ã›ã‚“。"
@@ -61,6 +76,10 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s 㯠%(max_length)s 文字より長ãã§ãã¾ã›ã‚“。"
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s ã¯æš—黙的ロールã«ã§ãã¾ã›ã‚“"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s ã¯ç©ºã«ã¯ã§ãã¾ã›ã‚“。"
@@ -76,8 +95,18 @@ msgstr "フィールド %s ã¯å¿…須フィールドã§ã‚ã‚‹ãŸã‚ã€ç©ºã«ã§ã
msgid "%s field(s) cannot be empty"
msgstr "フィールド %s を空ã«ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(ã“れらã®è©³ç´°å‡ºåŠ›ã‚’抑制ã™ã‚‹ã«ã¯ã€ãƒ‡ãƒãƒƒã‚°ãƒ¢ãƒ¼ãƒ‰ã‚’無効ã«ã—ã¾ã™ã€‚)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"LDAP ID ãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰ã® %s 㯠Mitaka リリースã«ãŠã„ã¦èª­ã¿å–り専用㮠ID LDAP ã‚¢"
+"クセスをé¸æŠžã—ãŸãŸã‚ã€æ供を終了ã—ã¦ã„ã¾ã™ã€‚ã“れ㯠\"O\" リリースã§å‰Šé™¤ã•ã‚Œã‚‹"
+"予定ã§ã™ã€‚"
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(ã“れらã®è©³ç´°ã‚’抑制ã™ã‚‹ã«ã¯ã€insecure_debug モードを無効ã«ã—ã¾ã™ã€‚)"
msgid "--all option cannot be mixed with other options"
msgstr "--all オプションを他ã®ã‚ªãƒ—ションã¨çµ„ã¿åˆã‚ã›ã¦ä½¿ç”¨ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“"
@@ -128,6 +157,16 @@ msgstr ""
msgid "At least one role should be specified."
msgstr "å°‘ãªãã¨ã‚‚ 1 ã¤ã®ãƒ­ãƒ¼ãƒ«ã‚’指定ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"[identity]\\driver オプションã«åŸºã¥ã割り当ã¦ç”¨ã«ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã®è‡ªå‹•é¸æŠžã‚’試ã¿ã¾"
+"ã—ãŸãŒã€ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ %s ãŒè¦‹ã¤ã‹ã‚‰ãªã‹ã£ãŸãŸã‚失敗ã—ã¾ã—ãŸã€‚[assignment]/"
+"driver ã‚’ Keystone 設定ã®æœ‰åŠ¹ãªãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã«è¨­å®šã—ã¦ãã ã•ã„。"
+
msgid "Attempted to authenticate with an unsupported method."
msgstr "サãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ãªã„メソッドを使用ã—ã¦èªè¨¼ã‚’è¡ŒãŠã†ã¨ã—ã¾ã—ãŸã€‚"
@@ -141,6 +180,14 @@ msgstr ""
msgid "Authentication plugin error."
msgstr "èªè¨¼ãƒ—ラグインエラー。"
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"ãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰ `%(backend)s` ã¯æœ‰åŠ¹ãª memcached ãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰ã§ã¯ã‚ã‚Šã¾ã›ã‚“。有"
+"効ãªãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰: %(backend_list)s"
+
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
"委任ã«ã‚ˆã£ã¦ç™ºè¡Œã•ã‚ŒãŸãƒˆãƒ¼ã‚¯ãƒ³ã‚’使用ã—ã¦è¦æ±‚トークンを許å¯ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›"
@@ -153,23 +200,76 @@ msgstr "%(option_name)s %(attr)s を変更ã§ãã¾ã›ã‚“"
msgid "Cannot change Domain ID"
msgstr "ドメイン ID を変更ã§ãã¾ã›ã‚“"
-#, fuzzy
-msgid "Cannot change consumer secret"
-msgstr "コンシューマーã®ç§˜å¯†ã‚’変更ã§ãã¾ã›ã‚“"
-
msgid "Cannot change user ID"
msgstr "ユーザー ID を変更ã§ãã¾ã›ã‚“"
msgid "Cannot change user name"
msgstr "ユーザーåを変更ã§ãã¾ã›ã‚“"
-#, fuzzy, python-format
+#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "以下ã®ç„¡åŠ¹ãª URL ã‚’æŒã¤ã‚¨ãƒ³ãƒ‰ãƒã‚¤ãƒ³ãƒˆã‚’作æˆã§ãã¾ã›ã‚“: %(url)s"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
-msgstr "親をæŒã¤ãƒ—ロジェクトを作æˆã§ãã¾ã›ã‚“: %(project_id)s"
+msgstr "親をæŒã¤ãƒ—ロジェクト: %(project_id)s を作æˆã§ãã¾ã›ã‚“"
+
+#, python-format
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"プロジェクトã§ãã®æ‰€æœ‰è€…をドメイン %(domain_id)s ã¨ã—ã¦æŒ‡å®šã—ã¦ã„ã‚‹ãŒã€åˆ¥ã®ãƒ‰"
+"メイン (%(parent_domain_id)s) ã«è¦ªã‚’指定ã—ã¦ã„ã‚‹ãŸã‚ã€ãã®ãƒ—ロジェクトを作æˆ"
+"ã§ãã¾ã›ã‚“。"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "åå‰ %s ã¯é‡è¤‡ã—ã¦ã¯ãªã‚Šã¾ã›ã‚“"
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"プロジェクトã®è¦ª (%(domain_id)s) ãŒãƒ‰ãƒ¡ã‚¤ãƒ³ã¨ã—ã¦å‹•ä½œã—ã¦ã„ã‚‹ãŒã€ãƒ—ロジェク"
+"トã§æŒ‡å®šã•ã‚Œã‚‹ parent_id (%(parent_id)s) ãŒã“ã® domain_id ã¨ä¸€è‡´ã—ãªã„ãŸã‚ã€"
+"ãã®ãƒ—ロジェクトを作æˆã§ãã¾ã›ã‚“。"
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+"有効ã«ãªã£ã¦ã„るドメインã¯å‰Šé™¤ã§ãã¾ã›ã‚“。最åˆã«ãã®ãƒ‰ãƒ¡ã‚¤ãƒ³ã‚’無効ã«ã—ã¦ãã "
+"ã•ã„。"
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"プロジェクト %(project_id)s ã¯ãã®ã‚µãƒ–ツリーã«æœ‰åŠ¹ã«ãªã£ã¦ã„るプロジェクトãŒ"
+"å«ã¾ã‚Œã¦ã„ã‚‹ãŸã‚削除ã§ãã¾ã›ã‚“。"
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"プロジェクト %s ã¯éšŽå±¤å†…ã®æœ«ç«¯ã§ã¯ãªã„ãŸã‚ã€å‰Šé™¤ã§ãã¾ã›ã‚“。サブツリー全体を"
+"削除ã™ã‚‹å ´åˆã€ã‚«ã‚¹ã‚±ãƒ¼ãƒ‰ã‚ªãƒ—ションを使用ã—ã¦ãã ã•ã„。"
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"プロジェクト %(project_id)s ã¯ãã®ã‚µãƒ–ツリーã«æœ‰åŠ¹ã«ãªã£ã¦ã„るプロジェクトãŒ"
+"å«ã¾ã‚Œã¦ã„ã‚‹ãŸã‚ã€ç„¡åŠ¹ã«ã§ãã¾ã›ã‚“。"
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr "親ãŒç„¡åŠ¹ã«ãªã£ã¦ã„るプロジェクト %s ã¯æœ‰åŠ¹ã«ã§ãã¾ã›ã‚“"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"グループã‹ã‚‰å–å¾—ã—ã€ãƒ¦ãƒ¼ã‚¶ãƒ¼ ID ã§ãƒ•ã‚£ãƒ«ã‚¿ãƒ¼å‡¦ç†ã—ãŸå‰²ã‚Šå½“ã¦ã‚’リストã§ãã¾ã›"
+"ん。"
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
@@ -192,6 +292,9 @@ msgstr ""
"セルフã®å¾Œã«æœ€åˆã®ãƒ‘ラメーターã¨ã—ã¦ãƒ’ントリストãªã—ã§ãƒ‰ãƒ©ã‚¤ãƒãƒ¼å‘¼ã³å‡ºã—を切"
"ã‚Šæ¨ã¦ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“"
+msgid "Cannot update domain_id of a project that has children."
+msgstr "å­ã‚’æŒã¤ãƒ—ロジェクト㮠domain_id ã‚’æ›´æ–°ã§ãã¾ã›ã‚“。"
+
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
msgstr ""
@@ -204,6 +307,9 @@ msgstr ""
"å•ã„åˆã‚ã›ãƒ‘ラメーター subtree_as_list 㨠subtree_as_ids ã‚’åŒæ™‚ã«ä½¿ç”¨ã™ã‚‹ã“ã¨"
"ã¯ã§ãã¾ã›ã‚“。"
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "カスケード更新ã¯æœ‰åŠ¹ã«ãªã£ã¦ã„る属性ã«ã®ã¿è¨±å¯ã•ã‚Œã¾ã™ã€‚"
+
#, fuzzy
msgid ""
"Combining effective and group filter will always result in an empty list."
@@ -219,6 +325,10 @@ msgstr ""
"ã®ãƒªã‚¹ãƒˆã«ãªã‚Šã¾ã™ã€‚"
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "/domains/%s/config 㮠Config API エンティティー"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr "%(type)s ã‚’ä¿å­˜ã™ã‚‹ã¨ãã«ç«¶åˆãŒç™ºç”Ÿã—ã¾ã—㟠- %(details)s"
@@ -239,6 +349,14 @@ msgstr ""
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"èªè¨¼ãƒ—ロãƒã‚¤ãƒ€ãƒ¼ ID を判別ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚設定オプション "
+"%(issuer_attribute)s ãŒè¦æ±‚環境内ã§è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
@@ -260,9 +378,9 @@ msgstr "ID プロãƒã‚¤ãƒ€ãƒ¼ %(idp_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
msgid "Could not find Service Provider: %(sp_id)s"
msgstr "サービスプロãƒã‚¤ãƒ€ãƒ¼ %(sp_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
-#, fuzzy, python-format
+#, python-format
msgid "Could not find credential: %(credential_id)s"
-msgstr "資格情報ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ: %(credential_id)s"
+msgstr "クレデンシャルãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ: %(credential_id)s"
#, python-format
msgid "Could not find domain: %(domain_id)s"
@@ -272,12 +390,12 @@ msgstr "ドメイン %(domain_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
msgid "Could not find endpoint: %(endpoint_id)s"
msgstr "エンドãƒã‚¤ãƒ³ãƒˆ %(endpoint_id)sãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
-#, fuzzy, python-format
+#, python-format
msgid ""
"Could not find federated protocol %(protocol_id)s for Identity Provider: "
"%(idp_id)s"
msgstr ""
-"Identity Provider ã®çµ±åˆãƒ—ロトコル %(protocol_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ: "
+"Identity Provider ã®é€£æºãƒ—ロトコル %(protocol_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ: "
"%(idp_id)s"
#, python-format
@@ -303,9 +421,6 @@ msgstr "プロジェクト %(project_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
msgid "Could not find region: %(region_id)s"
msgstr "リージョン %(region_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
-msgid "Could not find role"
-msgstr "ロールãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -342,16 +457,48 @@ msgstr "ãƒãƒ¼ã‚¸ãƒ§ãƒ³ %(version)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
msgid "Could not find: %(target)s"
msgstr "%(target)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"フェデレーションã—ãŸãƒ¦ãƒ¼ã‚¶ãƒ¼ãƒ—ロパティーã®ã„ãšã‚Œã‚‚ ID 値ã«ãƒžãƒƒãƒ—ã™ã‚‹ã“ã¨ãŒã§"
+"ãã¾ã›ã‚“ã§ã—ãŸã€‚デãƒãƒƒã‚°ãƒ­ã‚°ã¾ãŸã¯è¿½åŠ ã®è©³ç´°ã«ä½¿ç”¨ã—ãŸãƒžãƒƒãƒ”ングを確èªã—ã¦ã"
+"ã ã•ã„。"
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"一時的ãªãƒ¦ãƒ¼ã‚¶ãƒ¼ ID ã®è¨­å®šä¸­ã«ãƒ¦ãƒ¼ã‚¶ãƒ¼ã‚’マップã™ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“ã§ã—ãŸã€‚"
+"マッピングè¦å‰‡ã«ã‚ˆã£ã¦ãƒ¦ãƒ¼ã‚¶ãƒ¼ ID/ユーザーåを指定ã™ã‚‹ã‹ã€REMOTE_USER 環境変"
+"数を設定ã™ã‚‹ã‹ã€ã„ãšã‚Œã‹ã‚’è¡Œã†å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
+
msgid "Could not validate the access token"
msgstr "アクセストークンを検証ã§ãã¾ã›ã‚“ã§ã—ãŸ"
-#, fuzzy
msgid "Credential belongs to another user"
-msgstr "資格情報ãŒåˆ¥ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã«å±žã—ã¦ã„ã¾ã™"
+msgstr "クレデンシャルãŒåˆ¥ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã«å±žã—ã¦ã„ã¾ã™"
+
+msgid "Credential signature mismatch"
+msgstr "クレデンシャルã®ã‚·ã‚°ãƒ‹ãƒãƒ£ãƒ¼ãŒä¸€è‡´ã—ã¾ã›ã‚“"
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "/domains/%s/config ã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"èªè¨¼ãƒ—ラグイン %(name)r ã®ç›´æŽ¥ã‚¤ãƒ³ãƒãƒ¼ãƒˆã¯ã€Liberty ã®æ™‚点㧠%(namespace)r ã®"
+"エンドãƒã‚¤ãƒ³ãƒˆã‚’é¸æŠžã—ãŸãŸã‚ã€æ供を終了ã—ã¾ã—ãŸã€‚N ã§ã¯å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§ã™ã€‚"
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"ドライãƒãƒ¼ %(name)r ã®ç›´æŽ¥ã‚¤ãƒ³ãƒãƒ¼ãƒˆã¯ã€Liberty ã®æ™‚点㧠%(namespace)r ã‹ã‚‰ã®"
+"エントリーãƒã‚¤ãƒ³ãƒˆã‚’é¸æŠžã—ãŸãŸã‚〠æ供を終了ã—ã¾ã—ãŸã€‚N ã§ã¯å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§"
+"ã™ã€‚"
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -374,12 +521,15 @@ msgstr "ドメイン㫠%s ã¨ã„ㆠID を付ã‘ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“"
msgid "Domain is disabled: %s"
msgstr "ドメイン %s ãŒç„¡åŠ¹ã«ãªã£ã¦ã„ã¾ã™"
-msgid "Domain metadata not supported by LDAP"
-msgstr "ドメインメタデータ㯠LDAP ã§ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“"
+msgid "Domain name cannot contain reserved characters."
+msgstr "ドメインåã«äºˆç´„済ã¿æ–‡å­—ãŒå«ã¾ã‚Œã¦ã„ã¦ã¯ãªã‚Šã¾ã›ã‚“。"
msgid "Domain scoped token is not supported"
msgstr "ドメインをスコープã«ã—ãŸãƒˆãƒ¼ã‚¯ãƒ³ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“"
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr "ドメイン固有ã®ãƒ­ãƒ¼ãƒ«ã¯ã€V8 ã®ãƒ­ãƒ¼ãƒ«ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã§ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“"
+
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
@@ -388,9 +538,6 @@ msgstr ""
"ドメイン %(domain)s ã«ã¯æ—¢ã«å®šç¾©ã•ã‚ŒãŸè¨­å®šãŒã‚ã‚Šã¾ã™ã€‚ファイル %(file)s ã¯ç„¡"
"視ã•ã‚Œã¾ã™ã€‚"
-msgid "Domains are read-only against LDAP"
-msgstr "LDAP ã®å ´åˆã¯ãƒ‰ãƒ¡ã‚¤ãƒ³ã¯èª­ã¿å–り専用ã§ã™"
-
msgid "Duplicate Entry"
msgstr "é‡è¤‡ã™ã‚‹é …ç›®"
@@ -399,9 +546,29 @@ msgid "Duplicate ID, %s."
msgstr "é‡è¤‡ã—㟠IDã€%s。"
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "é‡è¤‡ã™ã‚‹é …ç›®: %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "é‡è¤‡ã—ãŸåå‰ã€%s。"
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "é‡è¤‡ã™ã‚‹ãƒªãƒ¢ãƒ¼ãƒˆ ID: %s"
+
+msgid "EC2 access key not found."
+msgstr "EC2 アクセスキーãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
+
+msgid "EC2 signature not supplied."
+msgstr "EC2 ã®ç½²åãŒæä¾›ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+"--bootstrap-password 引数ã¾ãŸã¯ OS_BOOTSTRAP_PASSWORD ã„ãšã‚Œã‹ã‚’設定ã™ã‚‹å¿…è¦"
+"ãŒã‚ã‚Šã¾ã™ã€‚"
+
msgid "Enabled field must be a boolean"
msgstr "「有効ã€ãƒ•ã‚£ãƒ¼ãƒ«ãƒ‰ã¯ãƒ–ール値ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
@@ -430,10 +597,31 @@ msgstr ""
"ドメイン: %(domain)sã€ãƒ•ã‚¡ã‚¤ãƒ«: %(file)s ã®è¨­å®šãƒ•ã‚¡ã‚¤ãƒ«ã®æ§‹æ–‡è§£æžã‚¨ãƒ©ãƒ¼ã€‚"
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "ファイル %(path)s ã®ã‚ªãƒ¼ãƒ—ン中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "è¡Œ: '%(line)s' ã®è§£æžä¸­ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "ルール %(path)s ã®è§£æžä¸­ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "メタデータファイルã®èª­ã¿å–り中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚%(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"SQL ドライãƒãƒ¼ã‚’使用ã™ã‚‹ãŸã‚ã®ãƒ‰ãƒ¡ã‚¤ãƒ³ %(domain)s ã®ç™»éŒ²ã®è©¦è¡Œå›žæ•°ãŒåˆ¶é™ã‚’超"
+"éŽã—ã¾ã—ãŸã€‚最後ã«ç™»éŒ²ã•ã‚ŒãŸã¨æ€ã‚れるドメイン㯠%(last_domain)s ã§ã™ã€‚中断ã—"
+"ã¾ã™"
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "期待ã•ã‚Œã‚‹è¾žæ›¸ã¾ãŸã¯ãƒªã‚¹ãƒˆ: %s"
@@ -461,9 +649,8 @@ msgstr "%(name)s サーãƒãƒ¼ã®èµ·å‹•ã«å¤±æ•—ã—ã¾ã—ãŸ"
msgid "Failed to validate token"
msgstr "トークンã®æ¤œè¨¼ã«å¤±æ•—ã—ã¾ã—ãŸ"
-#, fuzzy
msgid "Federation token is expired"
-msgstr "連åˆãƒˆãƒ¼ã‚¯ãƒ³ã®æœ‰åŠ¹æœŸé™ãŒåˆ‡ã‚Œã¦ã„ã¾ã™"
+msgstr "çµ±åˆãƒˆãƒ¼ã‚¯ãƒ³ã®æœ‰åŠ¹æœŸé™ãŒåˆ‡ã‚Œã¦ã„ã¾ã™"
#, python-format
msgid ""
@@ -479,6 +666,10 @@ msgstr ""
"ã¦è¨­å®šã•ã‚Œã¦ã„ã¾ã™ã€‚"
#, python-format
+msgid "Group %s not found in config"
+msgstr "グループ %s ãŒè¨­å®šå†…ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
+
+#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr "ドメイン固有ã®è¨­å®šã§ã¯ã‚°ãƒ«ãƒ¼ãƒ— %(group)s ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“"
@@ -506,11 +697,13 @@ msgstr "ID 属性 %(id_attr)s ㌠LDAP オブジェクト %(dn)s ã«è¦‹ã¤ã‹ã‚Š
msgid "Identity Provider %(idp)s is disabled"
msgstr "ID プロãƒã‚¤ãƒ€ãƒ¼ %(idp)s ã¯ç„¡åŠ¹ã«ãªã£ã¦ã„ã¾ã™"
-#, fuzzy
msgid ""
"Incoming identity provider identifier not included among the accepted "
"identifiers."
-msgstr "ç€ä¿¡ ID プロãƒã‚¤ãƒ€ãƒ¼ ID ãŒå—諾 ID ã«å«ã¾ã‚Œã¦ã„ã¾ã›ã‚“ã§ã—ãŸã€‚"
+msgstr "å—諾ã—㟠ID ã®ä¸­ã«ç€ä¿¡ ID プロãƒã‚¤ãƒ€ãƒ¼ã® ID ãŒå«ã¾ã‚Œã¾ã›ã‚“。"
+
+msgid "Invalid EC2 signature."
+msgstr "無効㪠EC2 ã®ç½²å。"
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
@@ -541,9 +734,8 @@ msgstr "無効㪠TLS / LDAPS ã®çµ„ã¿åˆã‚ã›ã§ã™"
msgid "Invalid audit info data type: %(data)s (%(type)s)"
msgstr "無効ãªç›£æŸ»æƒ…報データタイプ %(data)s (%(type)s) ã§ã™"
-#, fuzzy
msgid "Invalid blob in credential"
-msgstr "資格情報内㮠blob ãŒç„¡åŠ¹ã§ã™"
+msgstr "クレデンシャル内㮠blob ãŒç„¡åŠ¹ã§ã™"
#, python-format
msgid ""
@@ -586,17 +778,12 @@ msgstr ""
msgid "Invalid signature"
msgstr "シグニãƒãƒ£ãƒ¼ãŒç„¡åŠ¹ã§ã™"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"%s ã® ssl_cert_reqs 値ãŒç„¡åŠ¹ã§ã™ã€‚「NONEã€ã€ã€ŒOPTIONALã€ã€ã€ŒREQUIREDã€ã®ã„ãš"
-"ã‚Œã‹ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
msgid "Invalid user / password"
msgstr "ユーザー/パスワードãŒç„¡åŠ¹ã§ã™"
+msgid "Invalid username or TOTP passcode"
+msgstr "無効ãªãƒ¦ãƒ¼ã‚¶ãƒ¼åã¾ãŸã¯ TOTP パスコード"
+
msgid "Invalid username or password"
msgstr "無効ãªãƒ¦ãƒ¼ã‚¶ãƒ¼åã‹ãƒ‘スワード"
@@ -604,9 +791,9 @@ msgstr "無効ãªãƒ¦ãƒ¼ã‚¶ãƒ¼åã‹ãƒ‘スワード"
msgid "KVS region %s is already configured. Cannot reconfigure."
msgstr "KVS 領域 %s ã¯æ—¢ã«æ§‹æˆã•ã‚Œã¦ã„ã¾ã™ã€‚å†æ§‹æˆã¯ã§ãã¾ã›ã‚“。"
-#, fuzzy, python-format
+#, python-format
msgid "Key Value Store not configured: %s"
-msgstr "キー値ストアãŒæ§‹æˆã•ã‚Œã¦ã„ã¾ã›ã‚“: %s"
+msgstr "キーãƒãƒªãƒ¥ãƒ¼ã‚¹ãƒˆã‚¢ãŒè¨­å®šã•ã‚Œã¦ã„ã¾ã›ã‚“: %s"
#, python-format
msgid "LDAP %s create"
@@ -620,6 +807,20 @@ msgstr "LDAP %s ã®å‰Šé™¤"
msgid "LDAP %s update"
msgstr "LDAP %s ã®æ›´æ–°"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+"変æ›å¯èƒ½ãªãƒªã‚½ãƒ¼ã‚¹ ID ã®é•·ã•ã¯æœ€å¤§è¨±å®¹æ–‡å­—æ•°ã§ã‚ã‚‹ã€64 文字より少ãªããªã‚Šã¾"
+"ã™ã€‚"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"マッピング %(mapping_id)s ã«ã‚るローカルセクションã¯ã€å­˜åœ¨ã—ãªã„リモートã®ä¸€"
+"致 (例ãˆã°ãƒ­ãƒ¼ã‚«ãƒ«ã‚»ã‚¯ã‚·ãƒ§ãƒ³ã® {0}) ã‚’å‚ç…§ã—ã¾ã™ã€‚"
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr "キー %(target)s ã«ã¤ã„ã¦ãƒ­ãƒƒã‚¯ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆãŒç™ºç”Ÿã—ã¾ã—ãŸ"
@@ -639,6 +840,10 @@ msgid "Marker could not be found"
msgstr "マーカーãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "%s ブランãƒã«åˆ°é”ã™ã‚‹æœ€å¤§ã®éšŽå±¤ã®æ·±ã•ã€‚"
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr "%s ã«å¯¾ã—ã¦ãƒ­ãƒƒã‚¯ãŒæœ€å¤§å›žæ•°ã¾ã§è©¦ã¿ã‚‰ã‚Œã¾ã—ãŸã€‚"
@@ -672,6 +877,11 @@ msgstr "ドメインã¾ãŸã¯ãƒ—ロジェクトã®ã„ãšã‚Œã‹ã‚’指定ã™ã‚‹å¿…
msgid "Name field is required and cannot be empty"
msgstr "「åå‰ã€ãƒ•ã‚£ãƒ¼ãƒ«ãƒ‰ã¯å¿…須フィールドã§ã‚ã‚Šã€ç©ºã«ã§ãã¾ã›ã‚“"
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr ""
+"プロジェクトドメイン ID ãŠã‚ˆã³ プロジェクトドメインåã®ã„ãšã‚Œã‚‚指定ã•ã‚Œã¾ã›ã‚“"
+"ã§ã—ãŸã€‚"
+
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
@@ -683,12 +893,11 @@ msgstr ""
msgid "No authenticated user"
msgstr "èªè¨¼ã•ã‚Œã¦ã„ãªã„ユーザー"
-#, fuzzy
msgid ""
"No encryption keys found; run keystone-manage fernet_setup to bootstrap one."
msgstr ""
-"æš—å·éµãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。keystone ç®¡ç† fernet_setup を実行ã—ã¦ãƒ–ートストラップ"
-"ã‚’è¡Œã£ã¦ãã ã•ã„。"
+"æš—å·éµãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。keystone-manage fernet_setup を実行ã—ã¦æš—å·éµã‚’åˆæœŸè¨­"
+"定ã—ã¾ã™ã€‚"
msgid "No options specified"
msgstr "オプションãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“"
@@ -698,9 +907,12 @@ msgid "No policy is associated with endpoint %(endpoint_id)s."
msgstr ""
"エンドãƒã‚¤ãƒ³ãƒˆ %(endpoint_id)s ã«é–¢é€£ä»˜ã‘られã¦ã„ã‚‹ãƒãƒªã‚·ãƒ¼ã¯ã‚ã‚Šã¾ã›ã‚“。"
-#, fuzzy, python-format
+#, python-format
msgid "No remaining uses for trust: %(trust_id)s"
-msgstr "トラスト %(trust_id)s ã«ã¯ä½¿ç”¨ãŒæ®‹ã£ã¦ã„ã¾ã›ã‚“"
+msgstr "トラストã¯ã“れ以上使用ã§ãã¾ã›ã‚“: %(trust_id)s"
+
+msgid "No token in the request"
+msgstr "è¦æ±‚ã«ãƒˆãƒ¼ã‚¯ãƒ³ãŒã‚ã‚Šã¾ã›ã‚“"
msgid "Non-default domain is not supported"
msgstr "デフォルト以外ã®ãƒ‰ãƒ¡ã‚¤ãƒ³ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“"
@@ -729,9 +941,29 @@ msgid "Project (%s)"
msgstr "プロジェクト (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "プロジェクト ID ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“: %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "プロジェクトフィールドã¯å¿…é ˆã§ã‚ã‚Šã€ç©ºã«ã§ãã¾ã›ã‚“。"
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "プロジェクト %s ãŒç„¡åŠ¹ã«ãªã£ã¦ã„ã¾ã™"
+msgid "Project name cannot contain reserved characters."
+msgstr "プロジェクトåã«äºˆç´„済ã¿æ–‡å­—ãŒå«ã¾ã‚Œã¦ã„ã¦ã¯ãªã‚Šã¾ã›ã‚“。"
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "照会文字列ã¯ã€UTF-8 ã§ã‚¨ãƒ³ã‚³ãƒ¼ãƒ‰ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr ""
+"グループ %(group)s ã®ã‚ªãƒ—ション %(option)s ã®ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆã®èª­ã¿å–ã‚Šã¯ã‚µãƒãƒ¼ãƒˆã•"
+"ã‚Œã¾ã›ã‚“"
+
msgid "Redelegation allowed for delegated by trust only"
msgstr "å†å§”ä»»ã¯ãƒˆãƒ©ã‚¹ãƒˆã«ã‚ˆã‚‹å§”ä»»ã«ã®ã¿è¨±å¯ã•ã‚Œã¾ã™"
@@ -743,6 +975,79 @@ msgstr ""
"%(redelegation_depth)d ã®æ®‹ã‚Šã®å†å§”ä»»ã®æ·±ã•ãŒã€è¨±å¯ã•ã‚ŒãŸç¯„囲 [0.."
"%(max_count)d] を超ãˆã¦ã„ã¾ã™"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"admin_crud_extension ã‚’ Paste ã®ãƒ‘イプラインã‹ã‚‰å‰Šé™¤ã—ãŸãŸã‚ã€admin_crud æ‹¡å¼µ"
+"を常時使用ã§ãるよã†ã«ãªã‚Šã¾ã—ãŸã€‚ã“れ㯠O リリースã§å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§ã‚ã‚‹ãŸ"
+"ã‚ã€ãã‚Œã«å¿œã˜ã¦ keystone-paste.ini 内㮠[pipeline:admin_api] セクションを更"
+"æ–°ã—ã¦ãã ã•ã„。"
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"endpoint_filter_extension ã‚’ Paste パイプラインã‹ã‚‰å‰Šé™¤ã—ãŸãŸã‚ã€ã‚¨ãƒ³ãƒ‰ãƒã‚¤ãƒ³"
+"トフィルター拡張を常時使用ã§ãるよã†ã«ãªã‚Šã¾ã—ãŸã€‚ã“れ㯠O リリースã§å‰Šé™¤ã•ã‚Œ"
+"る予定ã§ã‚ã‚‹ãŸã‚ã€ãã‚Œã«å¿œã˜ã¦ keystone-paste.ini 内㮠[pipeline:api_v3] セク"
+"ションを更新ã—ã¦ãã ã•ã„。"
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"federation_extension ã‚’ Paste パイプラインã‹ã‚‰å‰Šé™¤ã—ãŸãŸã‚ã€ãƒ•ã‚§ãƒ‡ãƒ¬ãƒ¼ã‚·ãƒ§ãƒ³"
+"拡張を常時使用ã§ãるよã†ã«ãªã‚Šã¾ã—ãŸã€‚ã“れ㯠O リリースã§å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§ã‚ã‚‹"
+"ãŸã‚ã€ãã‚Œã«å¿œã˜ã¦ keystone-paste.ini 内㮠[pipeline:api_v3] セクションを更新"
+"ã—ã¦ãã ã•ã„。"
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"oauth1_extension ã‚’ Paste パイプラインã‹ã‚‰å‰Šé™¤ã—ãŸãŸã‚ã€oauth1 拡張を常時使用"
+"ã§ãるよã†ã«ãªã‚Šã¾ã—ãŸã€‚ã“れ㯠O リリースã§å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§ã‚ã‚‹ãŸã‚ã€ãã‚Œã«å¿œ"
+"ã˜ã¦ keystone-paste.ini 内㮠[pipeline:api_v3] セクションを更新ã—ã¦ãã ã•"
+"ã„。"
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"revoke_extension ã‚’ Paste パイプラインã‹ã‚‰å‰Šé™¤ã—ãŸãŸã‚ã€å–り消ã—拡張を常時使"
+"用ã§ãるよã†ã«ãªã‚Šã¾ã—ãŸã€‚ã“れ㯠O リリースã§å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§ã‚ã‚‹ãŸã‚ã€ãã‚Œã«"
+"å¿œã˜ã¦ keystone-paste.ini 内㮠[pipeline:api_v3] セクションを更新ã—ã¦ãã ã•"
+"ã„。"
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"simple_cert ã‚’ Paste パイプラインã‹ã‚‰å‰Šé™¤ã—ãŸãŸã‚ã€PKI ãŠã‚ˆã³ PKIz ã®ãƒˆãƒ¼ã‚¯ãƒ³"
+"プロãƒã‚¤ãƒ€ãƒ¼ã¯éžæŽ¨å¥¨ã¨ãªã‚Šã¾ã—ãŸã€‚ã“れらã®ãƒˆãƒ¼ã‚¯ãƒ³ãƒ—ロãƒã‚¤ãƒ€ãƒ¼ã®ã‚µãƒãƒ¼ãƒˆã«ä½¿"
+"用ã•ã‚Œã¦ã„ãŸã®ã¯ simple_cert ã®ã¿ã§ã—ãŸã€‚ã“れ㯠O リリースã§å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§"
+"ã‚ã‚‹ãŸã‚ã€ãã‚Œã«å¿œã˜ã¦ keystone-paste.ini 内㮠[pipeline:api_v3] セクションを"
+"æ›´æ–°ã—ã¦ãã ã•ã„。"
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"user_crud_extension ã‚’ Paste パイプラインã‹ã‚‰å‰Šé™¤ã—ãŸãŸã‚ã€user_crud 拡張を常"
+"時使用ã§ãるよã†ã«ãªã‚Šã¾ã—ãŸã€‚ ã“れ㯠O リリースã§å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§ã‚ã‚‹ãŸã‚ã€"
+"ãã‚Œã«å¿œã˜ã¦ keystone-paste.ini 内㮠[pipeline:public_api] セクションを更新ã—"
+"ã¦ãã ã•ã„。"
+
msgid "Request Token does not have an authorizing user id"
msgstr "è¦æ±‚ã•ã‚ŒãŸãƒˆãƒ¼ã‚¯ãƒ³ã«è¨±å¯ãƒ¦ãƒ¼ã‚¶ãƒ¼ ID ãŒå«ã¾ã‚Œã¦ã„ã¾ã›ã‚“"
@@ -777,10 +1082,6 @@ msgstr ""
"è¦æ±‚ã•ã‚ŒãŸå†å§”ä»»ã®æ·±ã• %(requested_count)d ãŒã€è¨±å¯ã•ã‚ŒãŸä¸Šé™ %(max_count)d "
"を超ãˆã¦ã„ã¾ã™"
-#, python-format
-msgid "Role %s not found"
-msgstr "ロール %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
@@ -826,6 +1127,28 @@ msgstr ""
"文字列ãŒé•·éŽãŽã¾ã™ã€‚文字列 %(string)s' ã®é•·ã•ãŒåˆ— %(type)s(CHAR(%(length)d)) "
"ã®åˆ¶é™ã‚’超ãˆã¾ã—ãŸã€‚"
+msgid "Tenant name cannot contain reserved characters."
+msgstr "テナントåã«äºˆç´„済ã¿æ–‡å­—ãŒå«ã¾ã‚Œã¦ã„ã¦ã¯ãªã‚Šã¾ã›ã‚“。"
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"%s 拡張㌠keystone コアã«ç§»å‹•ã•ã‚Œã¦ã„ã‚‹ãŸã‚ã€ãã®ãƒžã‚¤ã‚°ãƒ¬ãƒ¼ã‚·ãƒ§ãƒ³ã¯ãƒ¡ã‚¤ãƒ³ã® "
+"keystone データベース制御ã«ã‚ˆã£ã¦ç¶­æŒã•ã‚Œã¾ã™ã€‚次ã®ã‚³ãƒžãƒ³ãƒ‰ã‚’使用ã—ã¾ã™: "
+"keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"'expires_at' ã¯ç¾æ™‚点以å‰ã§ã‚ã£ã¦ã¯ãªã‚Šã¾ã›ã‚“。è¦æ±‚ã®å½¢å¼ãŒèª¤ã£ã¦ã„ã‚‹ã‹ã€è¦æ±‚"
+"ãŒæ­£ã—ããªã„ãŸã‚ã«ã€ã‚µãƒ¼ãƒãƒ¼ã¯ã“ã®è¦æ±‚ã«å¿œã˜ã‚‹ã“ã¨ãŒå‡ºæ¥ã¾ã›ã‚“ã§ã—ãŸã€‚クライ"
+"アントã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ã¨è€ƒãˆã‚‰ã‚Œã¾ã™ã€‚"
+
msgid "The --all option cannot be used with the --domain-name option"
msgstr "--all オプションを --domain-name オプションã¨ä½µç”¨ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“"
@@ -856,6 +1179,12 @@ msgstr ""
"è¦æ±‚ã•ã‚ŒãŸè¨¼æ˜Žæ›¸ãŒã‚ã‚Šã¾ã›ã‚“。ã“ã®ã‚µãƒ¼ãƒãƒ¼ã§ã¯ PKI トークンãŒä½¿ç”¨ã•ã‚Œã¦ã„ãªã„"
"ã‹ã€ãã†ã§ãªã„å ´åˆã¯è¨­å®šãŒé–“é•ã£ã¦ã„ã‚‹ã¨è€ƒãˆã‚‰ã‚Œã¾ã™ã€‚ "
+msgid "The configured token provider does not support bind authentication."
+msgstr "設定済ã¿ãƒˆãƒ¼ã‚¯ãƒ³ãƒ—ロãƒã‚¤ãƒ€ãƒ¼ã¯ãƒã‚¤ãƒ³ãƒ‰èªè¨¼ã‚’サãƒãƒ¼ãƒˆã—ã¾ã›ã‚“。"
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr "v2 ã§ã¯ã€ãƒ‰ãƒ¡ã‚¤ãƒ³ã¨ã—ã¦å‹•ä½œã™ã‚‹ãƒ—ロジェクトã®ä½œæˆã¯è¨±å¯ã•ã‚Œã¾ã›ã‚“。"
+
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server could "
@@ -898,17 +1227,13 @@ msgstr ""
msgid "There should not be any non-oauth parameters"
msgstr "oauth 関連以外ã®ãƒ‘ラメーターãŒå«ã¾ã‚Œã¦ã„ã¦ã¯ã„ã‘ã¾ã›ã‚“"
-#, fuzzy, python-format
+#, python-format
msgid "This is not a recognized Fernet payload version: %s"
-msgstr "ã“ã‚Œã¯èªè­˜ã•ã‚ŒãŸ Fernet ペイロードãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã¯ã‚ã‚Šã¾ã›ã‚“: %s"
+msgstr "ã“ã‚Œã¯èªè­˜å¯èƒ½ãª Fernet ペイロードãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã¯ã‚ã‚Šã¾ã›ã‚“: %s"
-#, fuzzy
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"ã“れ㯠v2.0 Fernet トークンã§ã¯ã‚ã‚Šã¾ã›ã‚“。トラストトークンã€ãƒ‰ãƒ¡ã‚¤ãƒ³ãƒˆãƒ¼ã‚¯"
-"ンã€ã¾ãŸã¯çµ±åˆãƒˆãƒ¼ã‚¯ãƒ³ã«ã¯ v3 を使用ã—ã¦ãã ã•ã„。"
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "ã“ã‚Œã¯èªè­˜å¯èƒ½ãª Fernet トークン %s ã§ã¯ã‚ã‚Šã¾ã›ã‚“"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
@@ -934,9 +1259,12 @@ msgstr "トークンãŒåˆ¥ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã«å±žã—ã¦ã„ã¾ã™"
msgid "Token does not belong to specified tenant."
msgstr "トークンãŒæŒ‡å®šã•ã‚ŒãŸãƒ†ãƒŠãƒ³ãƒˆã«æ‰€å±žã—ã¦ã„ã¾ã›ã‚“。"
+msgid "Token version is unrecognizable or unsupported."
+msgstr "トークンãƒãƒ¼ã‚¸ãƒ§ãƒ³ãŒèªè­˜ã§ããªã„ã‹ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“。"
+
#, fuzzy
msgid "Trustee has no delegated roles."
-msgstr "å—託者ã«å§”ä»»ã•ã‚ŒãŸãƒ­ãƒ¼ãƒ«ãŒã‚ã‚Šã¾ã›ã‚“。"
+msgstr "å—託者ã«å§”ä»»ã•ã‚ŒãŸå½¹å‰²ãŒã‚ã‚Šã¾ã›ã‚“。"
#, fuzzy
msgid "Trustor is disabled."
@@ -985,18 +1313,15 @@ msgstr ""
"リージョン %(region_id)s ã¾ãŸã¯ãã®å­ãƒªãƒ¼ã‚¸ãƒ§ãƒ³ãŒã‚¨ãƒ³ãƒ‰ãƒã‚¤ãƒ³ãƒˆã«é–¢é€£ä»˜ã‘られ"
"ã¦ã„ã‚‹ãŸã‚ã€ã“ã®ãƒªãƒ¼ã‚¸ãƒ§ãƒ³ã‚’削除ã§ãã¾ã›ã‚“。"
+msgid "Unable to downgrade schema"
+msgstr "スキーマをダウングレードã™ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
"マッピング %(mapping_id)s を使用ã™ã‚‹éš›ã«ã€æœ‰åŠ¹ãªã‚°ãƒ«ãƒ¼ãƒ—ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—"
"ãŸ"
-#, fuzzy, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-"%(seconds)s 秒ãŒçµŒéŽã—ãŸæ™‚点ã§ã€ãƒ—ール ID %(id)s ã‹ã‚‰ã®æŽ¥ç¶šãŒã‚ã‚Šã¾ã›ã‚“。"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "ドメイン設定ディレクトリーãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“: %s"
@@ -1025,9 +1350,9 @@ msgstr ""
msgid "Unable to sign token."
msgstr "トークンã«ç½²åã§ãã¾ã›ã‚“。"
-#, fuzzy, python-format
+#, python-format
msgid "Unexpected assignment type encountered, %s"
-msgstr "無効ãªå‰²ã‚Šå½“ã¦ã‚¿ã‚¤ãƒ— %s ãŒæ¤œå‡ºã•ã‚Œã¾ã—ãŸ"
+msgstr "予期ã—ãªã„割り当ã¦ã‚¿ã‚¤ãƒ—ãŒæ¤œå‡ºã•ã‚Œã¾ã—ãŸã€‚%s"
#, python-format
msgid ""
@@ -1037,9 +1362,9 @@ msgstr ""
"èªå¯å±žæ€§ ã®çµ„ã¿åˆã‚ã› (ユーザー: %(user_id)sã€ã‚°ãƒ«ãƒ¼ãƒ—: %(group_id)sã€ãƒ—ロ"
"ジェクト: %(project_id)sã€ãƒ‰ãƒ¡ã‚¤ãƒ³: %(domain_id)s) ãŒæ­£ã—ãã‚ã‚Šã¾ã›ã‚“。"
-#, fuzzy, python-format
+#, python-format
msgid "Unexpected status requested for JSON Home response, %s"
-msgstr "JSON ホーム応答ã«å¯¾ã—ã¦äºˆæœŸã—ãªã„状æ³ãŒè¦æ±‚ã•ã‚Œã¾ã—ãŸã€‚%s"
+msgstr "JSON Home 応答ã«å¯¾ã—ã¦äºˆæœŸã—ãªã„状æ³ãŒè¦æ±‚ã•ã‚Œã¾ã—ãŸã€‚%s"
msgid "Unknown Target"
msgstr "ä¸æ˜Žãªã‚¿ãƒ¼ã‚²ãƒƒãƒˆ"
@@ -1056,25 +1381,42 @@ msgstr "トークンãƒãƒ¼ã‚¸ãƒ§ãƒ³ %s ã¯ä¸æ˜Žã§ã™"
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "未登録ã®ä¾å­˜é–¢ä¿‚: %(targets)s ã«å¯¾ã™ã‚‹ %(name)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "`domain_id` ã®æ›´æ–°ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "`is_domain` ã®æ›´æ–°ã¯è¨±å¯ã•ã‚Œã¾ã›ã‚“。"
+
msgid "Update of `parent_id` is not allowed."
msgstr "\"parent_id\" ã®æ›´æ–°ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "domain_id ã®æ›´æ–°ãŒè¨±å¯ã•ã‚Œã‚‹ã®ã¯ root プロジェクトã®ã¿ã§ã™ã€‚"
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+"ドメインã¨ã—ã¦å‹•ä½œã™ã‚‹ãƒ—ロジェクト㮠domain_id ã®æ›´æ–°ã¯è¨±å¯ã•ã‚Œã¾ã›ã‚“。"
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
"SAML アサーションã®ä½œæˆã‚’è¡Œã†ã¨ãã¯ã€ãƒ—ロジェクトã«ã‚¹ã‚³ãƒ¼ãƒ—ãŒè¨­å®šã•ã‚ŒãŸãƒˆãƒ¼ã‚¯"
"ンを使用ã—ã¦ãã ã•ã„"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"åŒä¸€ã®å‰²ã‚Šå½“ã¦ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã‚’自動的ã«è¨­å®šã™ã‚‹ãŸã‚ã® ID ドライãƒãƒ¼è¨­å®šã®ä½¿ç”¨ã¯ã€"
+"æ供を終了ã—ã¾ã—ãŸã€‚ \"O\" リリースã§ã¯ã€ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆ (SQL) 以外ã®å ´åˆã¯å‰²ã‚Šå½“"
+"ã¦ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã‚’明示的ã«è¨­å®šã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
+
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "ユーザー %(u_id)s ã¯ãƒ†ãƒŠãƒ³ãƒˆ %(t_id)s ã®ã‚¢ã‚¯ã‚»ã‚¹æ¨©é™ãŒã‚ã‚Šã¾ã›ã‚“。"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"ユーザー %(user_id)s ã«ã¯ã™ã§ã«ãƒ†ãƒŠãƒ³ãƒˆ %(tenant_id)s ã§ãƒ­ãƒ¼ãƒ« %(role_id)s ãŒ"
-"割り当ã¦ã‚‰ã‚Œã¦ã„ã¾ã™ã€‚"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
msgstr ""
"ユーザー %(user_id)s ã¯ãƒ‰ãƒ¡ã‚¤ãƒ³ %(domain_id)s ã¸ã®ã‚¢ã‚¯ã‚»ã‚¹æ¨©é™ãŒã‚ã‚Šã¾ã›ã‚“"
@@ -1096,6 +1438,13 @@ msgstr "ユーザー '%(user_id)s' ãŒã‚°ãƒ«ãƒ¼ãƒ— '%(group_id)s' ã§è¦‹ã¤ã‹ã‚
msgid "User IDs do not match"
msgstr "ユーザー ID ãŒä¸€è‡´ã—ã¾ã›ã‚“"
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"ユーザー IDã€ãƒ‰ãƒ¡ã‚¤ãƒ³ ID ãŒæŒ‡å®šã•ã‚ŒãŸãƒ¦ãƒ¼ã‚¶ãƒ¼åã€ãƒ‰ãƒ¡ã‚¤ãƒ³åãŒæŒ‡å®šã•ã‚ŒãŸãƒ¦ãƒ¼"
+"ザーåã®ã„ãšã‚Œã‹ãŒæ¬ è½ã—ã¦ã„ã‚‹ãŸã‚ã€ãƒ¦ãƒ¼ã‚¶ãƒ¼èªè¨¼ã‚’作æˆã§ãã¾ã›ã‚“。"
+
#, python-format
msgid "User is disabled: %s"
msgstr "ユーザーãŒç„¡åŠ¹ã«ãªã£ã¦ã„ã¾ã™: %s"
@@ -1110,9 +1459,15 @@ msgstr "ユーザーã¯å—託者ã§ã¯ã‚ã‚Šã¾ã›ã‚“。"
msgid "User not found"
msgstr "ユーザーãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
+msgid "User not valid for tenant."
+msgstr "ユーザーã¯ãƒ†ãƒŠãƒ³ãƒˆã«å¯¾ã—ã¦ç„¡åŠ¹ã§ã™ã€‚"
+
+msgid "User roles not supported: tenant_id required"
+msgstr "ユーザーロールãŒã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“: tenant_id ãŒå¿…è¦ã§ã™"
+
#, fuzzy, python-format
msgid "User type %s not supported"
-msgstr "ユーザー型 %s ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“"
+msgstr "ユーザータイプ %s ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“"
msgid "You are not authorized to perform the requested action."
msgstr "è¦æ±‚ã•ã‚ŒãŸã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã‚’実行ã™ã‚‹è¨±å¯ãŒã‚ã‚Šã¾ã›ã‚“。"
@@ -1121,6 +1476,14 @@ msgstr "è¦æ±‚ã•ã‚ŒãŸã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã‚’実行ã™ã‚‹è¨±å¯ãŒã‚ã‚Šã¾ã›ã‚“。
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "è¦æ±‚ã•ã‚ŒãŸã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã‚’実行ã™ã‚‹è¨±å¯ãŒã‚ã‚Šã¾ã›ã‚“: %(action)s"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"管ç†ãƒˆãƒ¼ã‚¯ãƒ³ã‚’使用ã—ã¦ãƒªã‚½ãƒ¼ã‚¹ã‚’作æˆã—よã†ã¨ã—ã¦ã„ã¾ã™ã€‚ã“ã®ãƒˆãƒ¼ã‚¯ãƒ³ã¯ãƒ‰ãƒ¡ã‚¤"
+"ン内ã«ãªã„ãŸã‚ã€ã“ã®ãƒªã‚½ãƒ¼ã‚¹ãŒå±žã™ã‚‹ãƒ‰ãƒ¡ã‚¤ãƒ³ã‚’明示的ã«å«ã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
+
msgid "`key_mangler` functions must be callable."
msgstr "`key_mangler` 関数ã¯å‘¼ã³å‡ºã—å¯èƒ½ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。"
@@ -1130,54 +1493,25 @@ msgstr "`key_mangler` オプションã¯é–¢æ•°å‚ç…§ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã
msgid "any options"
msgstr "ä»»æ„ã®ã‚ªãƒ—ション"
-#, fuzzy
msgid "auth_type is not Negotiate"
-msgstr "auth_type ã¯æŠ˜è¡ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+msgstr "auth_type ã¯ãƒã‚´ã‚·ã‚¨ãƒ¼ãƒˆã§ã¯ã‚ã‚Šã¾ã›ã‚“"
msgid "authorizing user does not have role required"
msgstr "ユーザーをèªå¯ã™ã‚‹ã®ã«å¿…è¦ãªãƒ­ãƒ¼ãƒ«ãŒã‚ã‚Šã¾ã›ã‚“"
-#, fuzzy
-msgid "cache_collection name is required"
-msgstr "cache_collection name ã¯å¿…é ˆã§ã™"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
"無効ã«ãªã£ã¦ã„るプロジェクトをå«ã‚€ãƒ–ランãƒã«ãƒ—ロジェクトを作æˆã™ã‚‹ã“ã¨ã¯ã§ã"
"ã¾ã›ã‚“: %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr "プロジェクトã¯è¦ªã¨ã¯åˆ¥ã®ãƒ‰ãƒ¡ã‚¤ãƒ³å†…ã«ã¯ä½œæˆã§ãã¾ã›ã‚“。"
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-"有効ã«ãªã£ã¦ã„るドメインã¯å‰Šé™¤ã§ãã¾ã›ã‚“。最åˆã«ãã®ãƒ‰ãƒ¡ã‚¤ãƒ³ã‚’無効ã«ã—ã¦ãã "
-"ã•ã„。"
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr "プロジェクト %s ã¯éšŽå±¤å†…ã®æœ«ç«¯ã§ã¯ãªã„ãŸã‚ã€å‰Šé™¤ã§ãã¾ã›ã‚“。"
-
#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"プロジェクト %s ã®ã‚µãƒ–ツリーã«ã¯æœ‰åŠ¹ã«ãªã£ã¦ã„るプロジェクトãŒå«ã¾ã‚Œã¦ã„ã‚‹ãŸ"
-"ã‚ã€ã“ã®ãƒ—ロジェクトã¯ç„¡åŠ¹ã«ã§ãã¾ã›ã‚“"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr "親ãŒç„¡åŠ¹ã«ãªã£ã¦ã„るプロジェクト %s ã¯æœ‰åŠ¹ã«ã§ãã¾ã›ã‚“"
-
-#, fuzzy
-msgid "database db_name is required"
-msgstr "database db_name ã¯å¿…é ˆã§ã™"
-
-msgid "db_hosts value is required"
-msgstr "db_hosts 値ã¯å¿…é ˆã§ã™"
-
-msgid "delete the default domain"
-msgstr "デフォルトドメインを削除ã—ã¦ãã ã•ã„"
+"ドメインã¨ã—ã¦å‹•ä½œã™ã‚‹æœ‰åŠ¹ã«ãªã£ã¦ã„るプロジェクトを削除ã§ãã¾ã›ã‚“。最åˆã«ãƒ—"
+"ロジェクト %s を無効ã«ã—ã¦ãã ã•ã„。"
#, python-format
msgid "group %(group)s"
@@ -1190,35 +1524,33 @@ msgstr ""
"idp_contact_type 㯠technicalã€otherã€supportã€administrativeã€billing ã®ã„ãš"
"ã‚Œã‹ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。"
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "mongo_ttl_seconds ã«ã¯æ•´æ•°å€¤ãŒå¿…è¦ã§ã™"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "w (書ãè¾¼ã¿é–¢é€£å±žæ€§) ã«ã¯æ•´æ•°å€¤ãŒå¿…è¦ã§ã™"
-
#, python-format
msgid "invalid date format %s"
msgstr "æ—¥ä»˜å½¢å¼ %s ã¯ç„¡åŠ¹ã§ã™"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "%s ブランãƒã«åˆ°é”ã™ã‚‹æœ€å¤§ã®éšŽå±¤ã®æ·±ã•ã€‚"
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+"ドメインã¨ã—ã¦å‹•ä½œã™ã‚‹åŒã˜åå‰ã® 2 ã¤ã®ãƒ—ロジェクトãŒå­˜åœ¨ã™ã‚‹ã“ã¨ã¯è¨±å¯ã•ã‚Œã¾"
+"ã›ã‚“: %s"
-msgid "no ssl support available"
-msgstr "使用å¯èƒ½ãª SSL サãƒãƒ¼ãƒˆãŒã‚ã‚Šã¾ã›ã‚“"
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr ""
+"1 ã¤ã®ãƒ‰ãƒ¡ã‚¤ãƒ³å†…ã«åŒã˜åå‰ã® 2 ã¤ã®ãƒ—ロジェクトãŒå­˜åœ¨ã™ã‚‹ã“ã¨ã¯è¨±å¯ã•ã‚Œã¾ã›"
+"ã‚“ : %s"
+
+msgid "only root projects are allowed to act as domains."
+msgstr "ドメインã¨ã—ã¦å‹•ä½œã™ã‚‹ã“ã¨ãŒè¨±å¯ã•ã‚Œã‚‹ã®ã¯ root プロジェクトã®ã¿ã§ã™ã€‚"
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "グループ %(group)s ã®ã‚ªãƒ—ション %(option)s"
-#, fuzzy
-msgid "pad must be single character"
-msgstr "埋ã‚è¾¼ã¿ã¯å˜ä¸€æ–‡å­—ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
-#, fuzzy
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "埋ã‚è¾¼ã¾ã‚ŒãŸ base64url テキスト㯠4 ã®å€æ•°ã®æ–‡å­—æ•°ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
msgid "provided consumer key does not match stored consumer key"
msgstr ""
"指定ã•ã‚ŒãŸã‚³ãƒ³ã‚·ãƒ¥ãƒ¼ãƒžãƒ¼éµã¯ä¿å­˜ã•ã‚Œã¦ã„るコンシューマーéµã¨ä¸€è‡´ã—ã¾ã›ã‚“"
@@ -1228,11 +1560,7 @@ msgstr "指定ã•ã‚ŒãŸè¦æ±‚éµã¯ä¿ç®¡ã•ã‚Œã¦ã„ã‚‹è¦æ±‚éµã¨ä¸€è‡´ã—ã¾
#, fuzzy
msgid "provided verifier does not match stored verifier"
-msgstr "指定ã•ã‚ŒãŸãƒ™ãƒªãƒ•ã‚¡ã‚¤ãƒ¤ãƒ¼ã¯ä¿å­˜ã•ã‚Œã¦ã„るベリファイヤーã¨ä¸€è‡´ã—ã¾ã›ã‚“"
-
-#, fuzzy
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "領域ã®ã‚¿ã‚¤ãƒ—㌠dogpile.cache.CacheRegion ã§ã¯ã‚ã‚Šã¾ã›ã‚“"
+msgstr "指定ã•ã‚ŒãŸãƒ™ãƒªãƒ•ã‚¡ã‚¤ãƒ¤ãƒ¼ã¯ä¿ç®¡æ¸ˆã¿ãƒ™ãƒªãƒ•ã‚¡ã‚¤ãƒ¤ãƒ¼ã¨ä¸€è‡´ã—ã¾ã›ã‚“"
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses ã¯æ­£æ•´æ•°ã¾ãŸã¯ãƒŒãƒ«ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。"
@@ -1240,9 +1568,6 @@ msgstr "remaining_uses ã¯æ­£æ•´æ•°ã¾ãŸã¯ãƒŒãƒ«ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr "å†å§”ä»»ãŒè¨±å¯ã•ã‚Œã¦ã„ã‚‹å ´åˆã¯ remaining_uses を設定ã—ã¦ã¯ãªã‚Šã¾ã›ã‚“"
-msgid "replicaset_name required when use_replica is True"
-msgstr "use_replica ㌠True ã®å ´åˆã¯ replicaset_name ãŒå¿…è¦ã§ã™"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1254,20 +1579,13 @@ msgstr ""
msgid "rescope a scoped token"
msgstr "スコープãŒè¨­å®šã•ã‚ŒãŸãƒˆãƒ¼ã‚¯ãƒ³ã®ã‚¹ã‚³ãƒ¼ãƒ—を設定ã—ç›´ã—ã¾ã™"
-#, fuzzy, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"テキスト㯠4 ã®å€æ•°ã§ã™ãŒã€æœ€å¾Œã‹ã‚‰2 ã¤ç›®ã®æ–‡å­—ã®å‰ã«åŸ‹ã‚込㿠\"%s\" ã‚ã‚Šã¾ã™"
+#, python-format
+msgid "role %s is not defined"
+msgstr "ロール %s ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“"
-#, fuzzy, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgid "scope.project.id must be specified if include_subtree is also specified"
msgstr ""
-"テキスト㯠4 ã®å€æ•°ã§ã™ãŒã€åŸ‹ã‚è¾¼ã¿ã§ã¯ãªã„最後ã®æ–‡å­—ã®å‰ã«åŸ‹ã‚込㿠\"%s\" ã‚"
-"ã‚Šã¾ã™"
-
-#, fuzzy, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "テキスト㯠4 ã®å€æ•°ã§ã¯ã‚ã‚Šã¾ã›ã‚“ãŒã€åŸ‹ã‚込㿠\"%s\" ãŒå«ã¾ã‚Œã¦ã„ã¾ã™"
+"include_subtree も指定ã•ã‚Œã‚‹å ´åˆã€scope.project.id を指定ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1281,3 +1599,16 @@ msgstr "tls_cacertfile %s ãŒè¦‹ã¤ã‹ã‚‰ãªã„ã€ã‚‚ã—ãã¯ã€ãƒ•ã‚¡ã‚¤ãƒ«ã§
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr "トークンå‚照㯠KeystoneToken åž‹ã§ã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚%s ã‚’å—ä¿¡ã—ã¾ã—ãŸ"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr ""
+"domain_id ã®æ›´æ–°ã¯ Mitaka ã®æ™‚点ã§æ供を終了ã—ã€O ã§å‰Šé™¤ã•ã‚Œã‚‹äºˆå®šã§ã™ã€‚"
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"検証ã•ã‚Œã€%(func_name)r ã®é–¢æ•°ã®ã‚·ã‚°ãƒ‹ãƒãƒ£ãƒ¼ã§ %(param_name)r ãŒè¦‹ã¤ã‹ã‚‹ã“ã¨"
+"ãŒäºˆæœŸã•ã‚Œã¾ã™"
diff --git a/keystone-moon/keystone/locale/keystone-log-critical.pot b/keystone-moon/keystone/locale/keystone-log-critical.pot
index e6a96bf1..f071ef0f 100644
--- a/keystone-moon/keystone/locale/keystone-log-critical.pot
+++ b/keystone-moon/keystone/locale/keystone-log-critical.pot
@@ -1,21 +1,21 @@
# Translations template for keystone.
-# Copyright (C) 2015 OpenStack Foundation
+# Copyright (C) 2016 ORGANIZATION
# This file is distributed under the same license as the keystone project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.0.0b3.dev14\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-08-01 06:07+0000\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-04 06:55+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.0\n"
+"Generated-By: Babel 2.2.0\n"
#: keystone/catalog/backends/templated.py:106
#, python-format
diff --git a/keystone-moon/keystone/locale/keystone-log-error.pot b/keystone-moon/keystone/locale/keystone-log-error.pot
index f53c653b..7b38a370 100644
--- a/keystone-moon/keystone/locale/keystone-log-error.pot
+++ b/keystone-moon/keystone/locale/keystone-log-error.pot
@@ -1,136 +1,147 @@
# Translations template for keystone.
-# Copyright (C) 2015 OpenStack Foundation
+# Copyright (C) 2016 ORGANIZATION
# This file is distributed under the same license as the keystone project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.0.0b4.dev56\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-09-21 06:08+0000\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-04 06:55+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.0\n"
+"Generated-By: Babel 2.2.0\n"
-#: keystone/notifications.py:379
+#: keystone/notifications.py:336
msgid "Failed to construct notifier"
msgstr ""
-#: keystone/notifications.py:474
+#: keystone/notifications.py:439
#, python-format
msgid "Failed to send %(res_id)s %(event_type)s notification"
msgstr ""
-#: keystone/notifications.py:743
+#: keystone/notifications.py:706
#, python-format
msgid "Failed to send %(action)s %(event_type)s notification"
msgstr ""
-#: keystone/catalog/core.py:63
+#: keystone/assignment/core.py:688
+#, python-format
+msgid "Circular reference found role inference rules - %(prior_role_id)s."
+msgstr ""
+
+#: keystone/catalog/core.py:75
#, python-format
msgid "Malformed endpoint - %(url)r is not a string"
msgstr ""
-#: keystone/catalog/core.py:68
+#: keystone/catalog/core.py:80
#, python-format
msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
msgstr ""
-#: keystone/catalog/core.py:76
+#: keystone/catalog/core.py:88
#, python-format
msgid ""
"Malformed endpoint '%(url)s'. The following type error occurred during "
"string substitution: %(typeerror)s"
msgstr ""
-#: keystone/catalog/core.py:82
+#: keystone/catalog/core.py:94
#, python-format
msgid ""
"Malformed endpoint %s - incomplete format (are you missing a type "
"notifier ?)"
msgstr ""
-#: keystone/common/openssl.py:93
+#: keystone/common/openssl.py:90
#, python-format
-msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
+msgid "Command %(to_exec)s exited with %(retcode)s - %(output)s"
msgstr ""
-#: keystone/common/openssl.py:121
+#: keystone/common/openssl.py:114
#, python-format
msgid "Failed to remove file %(file_path)r: %(error)s"
msgstr ""
-#: keystone/common/utils.py:241
+#: keystone/common/utils.py:267
msgid ""
"Error setting up the debug environment. Verify that the option --debug-"
"url has the format <host>:<port> and that a debugger processes is "
"listening on that port."
msgstr ""
-#: keystone/common/cache/core.py:100
-#, python-format
-msgid ""
-"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
-"Skipping unknown format: %s"
-msgstr ""
-
#: keystone/common/environment/eventlet_server.py:112
#, python-format
msgid "Could not bind to %(host)s:%(port)s"
msgstr ""
-#: keystone/common/environment/eventlet_server.py:205
+#: keystone/common/environment/eventlet_server.py:211
msgid "Server error"
msgstr ""
-#: keystone/contrib/federation/idp.py:429
+#: keystone/endpoint_policy/core.py:131 keystone/endpoint_policy/core.py:231
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - "
+"%(region_id)s."
+msgstr ""
+
+#: keystone/federation/idp.py:440
#, python-format
msgid "Error when signing assertion, reason: %(reason)s%(output)s"
msgstr ""
-#: keystone/contrib/oauth1/core.py:136
+#: keystone/oauth1/core.py:135
msgid "Cannot retrieve Authorization headers"
msgstr ""
-#: keystone/endpoint_policy/core.py:132 keystone/endpoint_policy/core.py:231
+#: keystone/resource/core.py:728
#, python-format
msgid ""
-"Circular reference or a repeated entry found in region tree - "
-"%(region_id)s."
+"Asked to convert a non-domain project into a domain - Domain: "
+"%(domain_id)s, Project ID: %(id)s, Project Name: %(project_name)s"
msgstr ""
-#: keystone/resource/core.py:488
+#: keystone/resource/core.py:831
#, python-format
msgid ""
"Circular reference or a repeated entry found projects hierarchy - "
"%(project_id)s."
msgstr ""
-#: keystone/resource/core.py:966
-#, python-format
-msgid ""
-"Unexpected results in response for domain config - %(count)s responses, "
-"first option is %(option)s, expected option %(expected)s"
+#: keystone/resource/core.py:904
+msgid "Failed to create the default domain."
msgstr ""
-#: keystone/resource/backends/sql.py:101 keystone/resource/backends/sql.py:120
+#: keystone/resource/core.py:1479 keystone/resource/V8_backends/sql.py:100
+#: keystone/resource/V8_backends/sql.py:119
+#: keystone/resource/backends/sql.py:137 keystone/resource/backends/sql.py:156
#, python-format
msgid ""
"Circular reference or a repeated entry found in projects hierarchy - "
"%(project_id)s."
msgstr ""
-#: keystone/token/provider.py:285
+#: keystone/resource/core.py:1660
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+
+#: keystone/token/provider.py:334
#, python-format
msgid "Unexpected error or malformed token determining token expiry: %s"
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:225
+#: keystone/token/persistence/backends/kvs.py:236
#, python-format
msgid ""
"Reinitializing revocation list due to error in loading revocation list "
@@ -138,27 +149,27 @@ msgid ""
"data: %(list)r"
msgstr ""
-#: keystone/token/providers/common.py:701
+#: keystone/token/providers/common.py:728
msgid "Failed to validate token"
msgstr ""
-#: keystone/token/providers/pki.py:47
+#: keystone/token/providers/pki.py:52
msgid "Unable to sign token"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:38
+#: keystone/token/providers/fernet/utils.py:42
#, python-format
msgid ""
"Either [fernet_tokens] key_repository does not exist or Keystone does not"
" have sufficient permission to access it: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:62
+#: keystone/token/providers/fernet/utils.py:66
#, python-format
msgid "Unable to convert Keystone user or group ID. Error: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:79
+#: keystone/token/providers/fernet/utils.py:83
msgid ""
"Failed to create [fernet_tokens] key_repository: either it already exists"
" or you don't have sufficient permissions to create it"
diff --git a/keystone-moon/keystone/locale/keystone-log-info.pot b/keystone-moon/keystone/locale/keystone-log-info.pot
index 69c9609c..664cf0fa 100644
--- a/keystone-moon/keystone/locale/keystone-log-info.pot
+++ b/keystone-moon/keystone/locale/keystone-log-info.pot
@@ -1,66 +1,141 @@
# Translations template for keystone.
-# Copyright (C) 2015 OpenStack Foundation
+# Copyright (C) 2016 ORGANIZATION
# This file is distributed under the same license as the keystone project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.0.0b4.dev16\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-09-08 06:08+0000\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-04 06:55+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.0\n"
+"Generated-By: Babel 2.2.0\n"
-#: keystone/assignment/core.py:217
+#: keystone/assignment/core.py:200
#, python-format
msgid "Creating the default role %s because it does not exist."
msgstr ""
-#: keystone/assignment/core.py:225
+#: keystone/assignment/core.py:208
#, python-format
msgid "Creating the default role %s failed because it was already created"
msgstr ""
-#: keystone/auth/controllers.py:109
+#: keystone/auth/controllers.py:112
#, python-format
msgid ""
"\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use"
" the earliest value."
msgstr ""
-#: keystone/common/openssl.py:81
+#: keystone/cmd/cli.py:188
+#, python-format
+msgid "Created domain %s"
+msgstr ""
+
+#: keystone/cmd/cli.py:191
+#, python-format
+msgid "Domain %s already exists, skipping creation."
+msgstr ""
+
+#: keystone/cmd/cli.py:204
+#, python-format
+msgid "Created project %s"
+msgstr ""
+
+#: keystone/cmd/cli.py:206
+#, python-format
+msgid "Project %s already exists, skipping creation."
+msgstr ""
+
+#: keystone/cmd/cli.py:216
+#, python-format
+msgid "User %s already exists, skipping creation."
+msgstr ""
+
+#: keystone/cmd/cli.py:226
+#, python-format
+msgid "Created user %s"
+msgstr ""
+
+#: keystone/cmd/cli.py:235
+#, python-format
+msgid "Created Role %s"
+msgstr ""
+
+#: keystone/cmd/cli.py:237
+#, python-format
+msgid "Role %s exists, skipping creation."
+msgstr ""
+
+#: keystone/cmd/cli.py:254
+#, python-format
+msgid "Granted %(role)s on %(project)s to user %(username)s."
+msgstr ""
+
+#: keystone/cmd/cli.py:260
+#, python-format
+msgid "User %(username)s already has %(role)s on %(project)s."
+msgstr ""
+
+#: keystone/cmd/cli.py:271
+#, python-format
+msgid "Created Region %s"
+msgstr ""
+
+#: keystone/cmd/cli.py:273
+#, python-format
+msgid "Region %s exists, skipping creation."
+msgstr ""
+
+#: keystone/cmd/cli.py:330
+#, python-format
+msgid "Created %(interface)s endpoint %(url)s"
+msgstr ""
+
+#: keystone/cmd/cli.py:335
+#, python-format
+msgid "Skipping %s endpoint as already created"
+msgstr ""
+
+#: keystone/cmd/cli.py:639
+#, python-format
+msgid "Scanning %r for domain config files"
+msgstr ""
+
+#: keystone/common/openssl.py:80
#, python-format
msgid "Running command - %s"
msgstr ""
-#: keystone/common/wsgi.py:82
+#: keystone/common/wsgi.py:80
msgid "No bind information present in token"
msgstr ""
-#: keystone/common/wsgi.py:86
+#: keystone/common/wsgi.py:87
#, python-format
msgid "Named bind mode %s not in bind information"
msgstr ""
-#: keystone/common/wsgi.py:93
+#: keystone/common/wsgi.py:94
msgid "Kerberos credentials required and not present"
msgstr ""
-#: keystone/common/wsgi.py:97
+#: keystone/common/wsgi.py:98
msgid "Kerberos credentials do not match those in bind"
msgstr ""
-#: keystone/common/wsgi.py:101
+#: keystone/common/wsgi.py:102
msgid "Kerberos bind authentication successful"
msgstr ""
-#: keystone/common/wsgi.py:108
+#: keystone/common/wsgi.py:109
#, python-format
msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
msgstr ""
@@ -70,98 +145,93 @@ msgstr ""
msgid "Starting %(arg0)s on %(host)s:%(port)s"
msgstr ""
-#: keystone/common/kvs/core.py:137
+#: keystone/common/kvs/core.py:159
#, python-format
msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
msgstr ""
-#: keystone/common/kvs/core.py:187
+#: keystone/common/kvs/core.py:209
#, python-format
msgid "Using %(func)s as KVS region %(name)s key_mangler"
msgstr ""
-#: keystone/common/kvs/core.py:199
+#: keystone/common/kvs/core.py:221
#, python-format
-msgid "Using default dogpile sha1_mangle_key as KVS region %s key_mangler"
+msgid ""
+"Using default keystone.common.kvs.sha1_mangle_key as KVS region %s "
+"key_mangler"
msgstr ""
-#: keystone/common/kvs/core.py:209
+#: keystone/common/kvs/core.py:231
#, python-format
msgid "KVS region %s key_mangler disabled."
msgstr ""
-#: keystone/contrib/example/core.py:69 keystone/contrib/example/core.py:78
-#, python-format
-msgid ""
-"Received the following notification: service %(service)s, resource_type: "
-"%(resource_type)s, operation %(operation)s payload %(payload)s"
-msgstr ""
-
-#: keystone/middleware/core.py:266
+#: keystone/middleware/auth.py:172
#, python-format
msgid "Cannot find client issuer in env by the issuer attribute - %s."
msgstr ""
-#: keystone/middleware/core.py:274
+#: keystone/middleware/auth.py:180
#, python-format
msgid ""
"The client issuer %(client_issuer)s does not match with the trusted "
"issuer %(trusted_issuer)s"
msgstr ""
-#: keystone/token/persistence/backends/sql.py:283
+#: keystone/token/persistence/backends/sql.py:286
#, python-format
msgid "Total expired tokens removed: %d"
msgstr ""
-#: keystone/token/providers/fernet/token_formatters.py:181
+#: keystone/token/providers/fernet/token_formatters.py:174
#, python-format
msgid ""
"Fernet token created with length of %d characters, which exceeds 255 "
"characters"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:72
+#: keystone/token/providers/fernet/utils.py:76
msgid ""
"[fernet_tokens] key_repository does not appear to exist; attempting to "
"create it"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:130
+#: keystone/token/providers/fernet/utils.py:134
#, python-format
msgid "Created a new key: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:143
+#: keystone/token/providers/fernet/utils.py:147
msgid "Key repository is already initialized; aborting."
msgstr ""
-#: keystone/token/providers/fernet/utils.py:184
+#: keystone/token/providers/fernet/utils.py:188
#, python-format
msgid "Starting key rotation with %(count)s key files: %(list)s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:190
+#: keystone/token/providers/fernet/utils.py:194
#, python-format
msgid "Current primary key is: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:192
+#: keystone/token/providers/fernet/utils.py:196
#, python-format
msgid "Next primary key will be: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:202
+#: keystone/token/providers/fernet/utils.py:206
#, python-format
msgid "Promoted key 0 to be the primary: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:223
+#: keystone/token/providers/fernet/utils.py:227
#, python-format
msgid "Excess key to purge: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:257
+#: keystone/token/providers/fernet/utils.py:262
#, python-format
msgid "Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: %(dir)s"
msgstr ""
diff --git a/keystone-moon/keystone/locale/keystone-log-warning.pot b/keystone-moon/keystone/locale/keystone-log-warning.pot
index 3beb3a24..6282f2c6 100644
--- a/keystone-moon/keystone/locale/keystone-log-warning.pot
+++ b/keystone-moon/keystone/locale/keystone-log-warning.pot
@@ -1,78 +1,92 @@
# Translations template for keystone.
-# Copyright (C) 2015 OpenStack Foundation
+# Copyright (C) 2016 ORGANIZATION
# This file is distributed under the same license as the keystone project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.0.0rc2.dev1\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-10-01 06:09+0000\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-04 06:55+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"Generated-By: Babel 2.2.0\n"
-#: keystone/exception.py:48
+#: keystone/exception.py:66
msgid "missing exception kwargs (programmer error)"
msgstr ""
-#: keystone/auth/controllers.py:457
+#: keystone/assignment/core.py:1394
+#, python-format
+msgid ""
+"delete_domain_assignments method not found in custom assignment driver. "
+"Domain assignments for domain (%s) to users from other domains will not "
+"be removed. This was added in V9 of the assignment driver."
+msgstr ""
+
+#: keystone/auth/controllers.py:468
#, python-format
msgid ""
"User %(user_id)s doesn't have access to default project %(project_id)s. "
"The token will be unscoped rather than scoped to the project."
msgstr ""
-#: keystone/auth/controllers.py:465
+#: keystone/auth/controllers.py:476
#, python-format
msgid ""
"User %(user_id)s's default project %(project_id)s is disabled. The token "
"will be unscoped rather than scoped to the project."
msgstr ""
-#: keystone/auth/controllers.py:474
+#: keystone/auth/controllers.py:485
#, python-format
msgid ""
"User %(user_id)s's default project %(project_id)s not found. The token "
"will be unscoped rather than scoped to the project."
msgstr ""
-#: keystone/cmd/cli.py:158
+#: keystone/cmd/cli.py:455
+msgid ""
+"keystone-manage pki_setup is deprecated as of Mitaka in favor of not "
+"using PKI tokens and may be removed in 'O' release."
+msgstr ""
+
+#: keystone/cmd/cli.py:458
msgid "keystone-manage pki_setup is not recommended for production use."
msgstr ""
-#: keystone/cmd/cli.py:177
+#: keystone/cmd/cli.py:477
msgid "keystone-manage ssl_setup is not recommended for production use."
msgstr ""
-#: keystone/cmd/cli.py:483
+#: keystone/cmd/cli.py:650
#, python-format
msgid "Ignoring file (%s) while scanning domain config directory"
msgstr ""
-#: keystone/common/authorization.py:65
+#: keystone/common/authorization.py:69
msgid "RBAC: Invalid user data in token"
msgstr ""
-#: keystone/common/controller.py:84 keystone/middleware/core.py:199
+#: keystone/common/controller.py:102 keystone/middleware/auth.py:102
msgid "RBAC: Invalid token"
msgstr ""
-#: keystone/common/controller.py:109 keystone/common/controller.py:206
-#: keystone/common/controller.py:773
+#: keystone/common/controller.py:127 keystone/common/controller.py:246
+#: keystone/common/controller.py:799
msgid "RBAC: Bypassing authorization"
msgstr ""
-#: keystone/common/controller.py:718
+#: keystone/common/controller.py:735
msgid "No domain information specified as part of list request"
msgstr ""
-#: keystone/common/controller.py:745
+#: keystone/common/controller.py:771
msgid ""
"Not specifying a domain during a create user, group or project call, and "
"relying on falling back to the default domain, is deprecated as of "
@@ -80,73 +94,61 @@ msgid ""
"explicitly or use a domain-scoped token"
msgstr ""
-#: keystone/common/openssl.py:73
+#: keystone/common/openssl.py:74
msgid "Failed to invoke ``openssl version``, assuming is v1.0 or newer"
msgstr ""
-#: keystone/common/utils.py:105
+#: keystone/common/utils.py:129
#, python-format
msgid "Truncating user password to %d characters."
msgstr ""
-#: keystone/common/utils.py:527
+#: keystone/common/utils.py:552
msgid "Couldn't find the auth context."
msgstr ""
-#: keystone/common/wsgi.py:251
+#: keystone/common/wsgi.py:252
#, python-format
msgid "Authorization failed. %(exception)s from %(remote_addr)s"
msgstr ""
-#: keystone/common/cache/backends/mongo.py:407
-#, python-format
-msgid ""
-"TTL index already exists on db collection <%(c_name)s>, remove index "
-"<%(indx_name)s> first to make updated mongo_ttl_seconds value to be "
-"effective"
-msgstr ""
-
-#: keystone/common/kvs/core.py:133
+#: keystone/common/kvs/core.py:153
#, python-format
msgid "%s is not a dogpile.proxy.ProxyBackend"
msgstr ""
-#: keystone/common/kvs/core.py:402
+#: keystone/common/kvs/core.py:428
#, python-format
msgid "KVS lock released (timeout reached) for: %s"
msgstr ""
-#: keystone/common/ldap/core.py:1029
+#: keystone/common/ldap/core.py:1033
msgid ""
"LDAP Server does not support paging. Disable paging in keystone.conf to "
"avoid this message."
msgstr ""
-#: keystone/common/ldap/core.py:1224
+#: keystone/common/ldap/core.py:1232
#, python-format
msgid ""
"Invalid additional attribute mapping: \"%s\". Format must be "
"<ldap_attribute>:<keystone_attribute>"
msgstr ""
-#: keystone/common/ldap/core.py:1335
+#: keystone/common/ldap/core.py:1343
#, python-format
msgid ""
"ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and "
"therefore cannot be used as an ID. Will get the ID from DN instead"
msgstr ""
-#: keystone/common/ldap/core.py:1669
+#: keystone/common/ldap/core.py:1704
#, python-format
msgid ""
"When deleting entries for %(search_base)s, could not delete nonexistent "
"entries %(entries)s%(dots)s"
msgstr ""
-#: keystone/contrib/federation/utils.py:543
-msgid "Ignoring user name"
-msgstr ""
-
#: keystone/endpoint_policy/core.py:94
#, python-format
msgid ""
@@ -154,54 +156,83 @@ msgid ""
"%(policy_id)s not found."
msgstr ""
-#: keystone/endpoint_policy/core.py:182
+#: keystone/endpoint_policy/core.py:181
#, python-format
msgid ""
"Unsupported policy association found - Policy %(policy_id)s, Endpoint "
"%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, "
msgstr ""
-#: keystone/endpoint_policy/core.py:198
+#: keystone/endpoint_policy/core.py:197
#, python-format
msgid ""
"Policy %(policy_id)s referenced in association for endpoint "
"%(endpoint_id)s not found."
msgstr ""
-#: keystone/identity/controllers.py:141
+#: keystone/federation/utils.py:615
+msgid "Ignoring user name"
+msgstr ""
+
+#: keystone/identity/controllers.py:145
#, python-format
msgid "Unable to remove user %(user)s from %(tenant)s."
msgstr ""
-#: keystone/identity/controllers.py:160
+#: keystone/identity/controllers.py:164
#, python-format
msgid "Unable to add user %(user)s to %(tenant)s."
msgstr ""
-#: keystone/identity/core.py:137
+#: keystone/identity/core.py:131
#, python-format
msgid "Invalid domain name (%s) found in config file name"
msgstr ""
-#: keystone/identity/core.py:175
+#: keystone/identity/core.py:169
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr ""
-#: keystone/identity/core.py:692
+#: keystone/identity/core.py:691
#, python-format
msgid ""
"Found multiple domains being mapped to a driver that does not support "
"that (e.g. LDAP) - Domain ID: %(domain)s, Default Driver: %(driver)s"
msgstr ""
-#: keystone/middleware/core.py:293
+#: keystone/middleware/auth.py:81
+msgid ""
+"build_auth_context middleware checking for the admin token is deprecated "
+"as of the Mitaka release and will be removed in the O release. If your "
+"deployment requires use of the admin token, update keystone-paste.ini so "
+"that admin_token_auth is before build_auth_context in the paste "
+"pipelines, otherwise remove the admin_token_auth middleware from the "
+"paste pipelines."
+msgstr ""
+
+#: keystone/middleware/auth.py:195
msgid ""
"Auth context already exists in the request environment; it will be used "
"for authorization instead of creating a new one."
msgstr ""
-#: keystone/resource/core.py:1254
+#: keystone/middleware/core.py:63
+msgid ""
+"The admin_token_auth middleware presents a security risk and should be "
+"removed from the [pipeline:api_v3], [pipeline:admin_api], and "
+"[pipeline:public_api] sections of your paste ini file."
+msgstr ""
+
+#: keystone/resource/core.py:896
+msgid ""
+"The default domain was created automatically to contain V2 resources. "
+"This is deprecated in the M release and will not be supported in the O "
+"release. Create the default domain manually or use the keystone-manage "
+"bootstrap command."
+msgstr ""
+
+#: keystone/resource/core.py:1945
#, python-format
msgid ""
"Found what looks like an unmatched config option substitution reference -"
@@ -210,7 +241,7 @@ msgid ""
"added?"
msgstr ""
-#: keystone/resource/core.py:1261
+#: keystone/resource/core.py:1952
#, python-format
msgid ""
"Found what looks like an incorrectly constructed config option "
@@ -218,11 +249,16 @@ msgid ""
"%(option)s, value: %(value)s."
msgstr ""
+#: keystone/resource/backends/sql.py:222
+#, python-format
+msgid "Project %s does not exist and was not deleted."
+msgstr ""
+
#: keystone/server/common.py:42
-msgid "debug is enabled so responses may include sensitive information."
+msgid "insecure_debug is enabled so responses may include sensitive information."
msgstr ""
-#: keystone/token/persistence/core.py:225
+#: keystone/token/persistence/core.py:220
#, python-format
msgid ""
"`token_api.%s` is deprecated as of Juno in favor of utilizing methods on "
@@ -236,40 +272,44 @@ msgid ""
"instead."
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:205
+#: keystone/token/persistence/backends/kvs.py:207
#, python-format
msgid "Token `%s` is expired, not adding to the revocation list."
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:239
+#: keystone/token/persistence/backends/kvs.py:250
#, python-format
msgid ""
"Removing `%s` from revocation list due to invalid expires data in "
"revocation list."
msgstr ""
-#: keystone/token/providers/fernet/utils.py:46
+#: keystone/token/providers/fernet/utils.py:50
#, python-format
msgid "[fernet_tokens] key_repository is world readable: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:90
+#: keystone/token/providers/fernet/utils.py:94
#, python-format
msgid ""
"Unable to change the ownership of [fernet_tokens] key_repository without "
"a keystone user ID and keystone group ID both being provided: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:112
+#: keystone/token/providers/fernet/utils.py:116
#, python-format
msgid ""
"Unable to change the ownership of the new key without a keystone user ID "
"and keystone group ID both being provided: %s"
msgstr ""
-#: keystone/token/providers/fernet/utils.py:210
+#: keystone/token/providers/fernet/utils.py:214
msgid ""
"[fernet_tokens] max_active_keys must be at least 1 to maintain a primary "
"key."
msgstr ""
+#: keystone/version/service.py:77
+msgid "'local conf' from PasteDeploy INI is being ignored."
+msgstr ""
+
diff --git a/keystone-moon/keystone/locale/keystone.pot b/keystone-moon/keystone/locale/keystone.pot
index ce32fa88..b5838aab 100644
--- a/keystone-moon/keystone/locale/keystone.pot
+++ b/keystone-moon/keystone/locale/keystone.pot
@@ -1,23 +1,23 @@
# Translations template for keystone.
-# Copyright (C) 2015 OpenStack Foundation
+# Copyright (C) 2016 ORGANIZATION
# This file is distributed under the same license as the keystone project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2015.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.0.0rc2.dev1\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-10-01 06:09+0000\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-04 06:55+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"Generated-By: Babel 2.2.0\n"
-#: keystone/exception.py:78
+#: keystone/exception.py:83
#, python-format
msgid ""
"Expecting to find %(attribute)s in %(target)s - the server could not "
@@ -25,38 +25,38 @@ msgid ""
"incorrect. The client is assumed to be in error."
msgstr ""
-#: keystone/exception.py:87
+#: keystone/exception.py:92
#, python-format
msgid "Cannot create an endpoint with an invalid URL: %(url)s"
msgstr ""
-#: keystone/exception.py:94
+#: keystone/exception.py:99
#, python-format
msgid "%(detail)s"
msgstr ""
-#: keystone/exception.py:98
+#: keystone/exception.py:103
msgid ""
"Timestamp not in expected format. The server could not comply with the "
"request since it is either malformed or otherwise incorrect. The client "
"is assumed to be in error."
msgstr ""
-#: keystone/exception.py:107
+#: keystone/exception.py:112
msgid ""
"The 'expires_at' must not be before now. The server could not comply with"
" the request since it is either malformed or otherwise incorrect. The "
"client is assumed to be in error."
msgstr ""
-#: keystone/exception.py:116
+#: keystone/exception.py:121
#, python-format
msgid ""
"String length exceeded.The length of string '%(string)s' exceeded the "
"limit of column %(type)s(CHAR(%(length)d))."
msgstr ""
-#: keystone/exception.py:122
+#: keystone/exception.py:127
#, python-format
msgid ""
"Request attribute %(attribute)s must be less than or equal to %(size)i. "
@@ -64,88 +64,88 @@ msgid ""
"is invalid (too large). The client is assumed to be in error."
msgstr ""
-#: keystone/exception.py:132
+#: keystone/exception.py:137
#, python-format
msgid ""
"The specified parent region %(parent_region_id)s would create a circular "
"region hierarchy."
msgstr ""
-#: keystone/exception.py:139
+#: keystone/exception.py:156
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server "
"could not comply with the request because the password is invalid."
msgstr ""
-#: keystone/exception.py:147
+#: keystone/exception.py:162
#, python-format
msgid ""
"Unable to delete region %(region_id)s because it or its child regions "
"have associated endpoints."
msgstr ""
-#: keystone/exception.py:154
+#: keystone/exception.py:167
msgid ""
"The certificates you requested are not available. It is likely that this "
"server does not use PKI tokens otherwise this is the result of "
"misconfiguration."
msgstr ""
-#: keystone/exception.py:163
-msgid "(Disable debug mode to suppress these details.)"
+#: keystone/exception.py:179
+msgid "(Disable insecure_debug mode to suppress these details.)"
msgstr ""
-#: keystone/exception.py:168
+#: keystone/exception.py:189
#, python-format
msgid "%(message)s %(amendment)s"
msgstr ""
-#: keystone/exception.py:176
+#: keystone/exception.py:197
msgid "The request you have made requires authentication."
msgstr ""
-#: keystone/exception.py:182
+#: keystone/exception.py:203
msgid "Authentication plugin error."
msgstr ""
-#: keystone/exception.py:190
+#: keystone/exception.py:211
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
-#: keystone/exception.py:195
+#: keystone/exception.py:216
msgid "Attempted to authenticate with an unsupported method."
msgstr ""
-#: keystone/exception.py:203
+#: keystone/exception.py:224
msgid "Additional authentications steps required."
msgstr ""
-#: keystone/exception.py:211
+#: keystone/exception.py:232
msgid "You are not authorized to perform the requested action."
msgstr ""
-#: keystone/exception.py:218
+#: keystone/exception.py:239
#, python-format
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr ""
-#: keystone/exception.py:223
+#: keystone/exception.py:244
#, python-format
msgid ""
"Could not change immutable attribute(s) '%(attributes)s' in target "
"%(target)s"
msgstr ""
-#: keystone/exception.py:228
+#: keystone/exception.py:249
#, python-format
msgid ""
"Group membership across backend boundaries is not allowed, group in "
"question is %(group_id)s, user is %(user_id)s"
msgstr ""
-#: keystone/exception.py:234
+#: keystone/exception.py:255
#, python-format
msgid ""
"Invalid mix of entities for policy association - only Endpoint, Service "
@@ -153,229 +153,246 @@ msgid ""
"Service: %(service_id)s, Region: %(region_id)s"
msgstr ""
-#: keystone/exception.py:241
+#: keystone/exception.py:262
#, python-format
msgid "Invalid domain specific configuration: %(reason)s"
msgstr ""
-#: keystone/exception.py:245
+#: keystone/exception.py:266
#, python-format
msgid "Could not find: %(target)s"
msgstr ""
-#: keystone/exception.py:251
+#: keystone/exception.py:272
#, python-format
msgid "Could not find endpoint: %(endpoint_id)s"
msgstr ""
-#: keystone/exception.py:258
+#: keystone/exception.py:279
msgid "An unhandled exception has occurred: Could not find metadata."
msgstr ""
-#: keystone/exception.py:263
+#: keystone/exception.py:284
#, python-format
msgid "Could not find policy: %(policy_id)s"
msgstr ""
-#: keystone/exception.py:267
+#: keystone/exception.py:288
msgid "Could not find policy association"
msgstr ""
-#: keystone/exception.py:271
+#: keystone/exception.py:292
#, python-format
msgid "Could not find role: %(role_id)s"
msgstr ""
-#: keystone/exception.py:275
+#: keystone/exception.py:296
+#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr ""
+
+#: keystone/exception.py:300
+#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr ""
+
+#: keystone/exception.py:304
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
"%(actor_id)s, project or domain: %(target_id)s"
msgstr ""
-#: keystone/exception.py:281
+#: keystone/exception.py:310
#, python-format
msgid "Could not find region: %(region_id)s"
msgstr ""
-#: keystone/exception.py:285
+#: keystone/exception.py:314
#, python-format
msgid "Could not find service: %(service_id)s"
msgstr ""
-#: keystone/exception.py:289
+#: keystone/exception.py:318
#, python-format
msgid "Could not find domain: %(domain_id)s"
msgstr ""
-#: keystone/exception.py:293
+#: keystone/exception.py:322
#, python-format
msgid "Could not find project: %(project_id)s"
msgstr ""
-#: keystone/exception.py:297
+#: keystone/exception.py:326
#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr ""
-#: keystone/exception.py:301
+#: keystone/exception.py:330
#, python-format
msgid "Could not find token: %(token_id)s"
msgstr ""
-#: keystone/exception.py:305
+#: keystone/exception.py:334
#, python-format
msgid "Could not find user: %(user_id)s"
msgstr ""
-#: keystone/exception.py:309
+#: keystone/exception.py:338
#, python-format
msgid "Could not find group: %(group_id)s"
msgstr ""
-#: keystone/exception.py:313
+#: keystone/exception.py:342
#, python-format
msgid "Could not find mapping: %(mapping_id)s"
msgstr ""
-#: keystone/exception.py:317
+#: keystone/exception.py:346
#, python-format
msgid "Could not find trust: %(trust_id)s"
msgstr ""
-#: keystone/exception.py:321
+#: keystone/exception.py:350
#, python-format
msgid "No remaining uses for trust: %(trust_id)s"
msgstr ""
-#: keystone/exception.py:325
+#: keystone/exception.py:354
#, python-format
msgid "Could not find credential: %(credential_id)s"
msgstr ""
-#: keystone/exception.py:329
+#: keystone/exception.py:358
#, python-format
msgid "Could not find version: %(version)s"
msgstr ""
-#: keystone/exception.py:333
+#: keystone/exception.py:362
#, python-format
msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
msgstr ""
-#: keystone/exception.py:337
+#: keystone/exception.py:366
#, python-format
msgid "Could not find Identity Provider: %(idp_id)s"
msgstr ""
-#: keystone/exception.py:341
+#: keystone/exception.py:370
#, python-format
msgid "Could not find Service Provider: %(sp_id)s"
msgstr ""
-#: keystone/exception.py:345
+#: keystone/exception.py:374
#, python-format
msgid ""
"Could not find federated protocol %(protocol_id)s for Identity Provider: "
"%(idp_id)s"
msgstr ""
-#: keystone/exception.py:356
+#: keystone/exception.py:385
#, python-format
msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
-#: keystone/exception.py:368
+#: keystone/exception.py:403
#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr ""
-#: keystone/exception.py:376
+#: keystone/exception.py:412
msgid "An unexpected error prevented the server from fulfilling your request."
msgstr ""
-#: keystone/exception.py:379
+#: keystone/exception.py:415
#, python-format
msgid ""
"An unexpected error prevented the server from fulfilling your request: "
"%(exception)s"
msgstr ""
-#: keystone/exception.py:402
+#: keystone/exception.py:433
#, python-format
msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
msgstr ""
-#: keystone/exception.py:407
+#: keystone/exception.py:438
msgid ""
"Expected signing certificates are not available on the server. Please "
"check Keystone configuration."
msgstr ""
-#: keystone/exception.py:413
+#: keystone/exception.py:444
#, python-format
msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
msgstr ""
-#: keystone/exception.py:418
+#: keystone/exception.py:449
#, python-format
msgid ""
"Group %(group_id)s returned by mapping %(mapping_id)s was not found in "
"the backend."
msgstr ""
-#: keystone/exception.py:423
+#: keystone/exception.py:454
#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr ""
-#: keystone/exception.py:427
+#: keystone/exception.py:458
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+
+#: keystone/exception.py:464
#, python-format
msgid ""
"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
msgstr ""
-#: keystone/exception.py:434
+#: keystone/exception.py:471
msgid "The action you have requested has not been implemented."
msgstr ""
-#: keystone/exception.py:441
+#: keystone/exception.py:478
msgid "The service you have requested is no longer available on this server."
msgstr ""
-#: keystone/exception.py:448
+#: keystone/exception.py:485
#, python-format
msgid "The Keystone configuration file %(config_file)s could not be found."
msgstr ""
-#: keystone/exception.py:453
+#: keystone/exception.py:490
msgid ""
"No encryption keys found; run keystone-manage fernet_setup to bootstrap "
"one."
msgstr ""
-#: keystone/exception.py:458
+#: keystone/exception.py:495
#, python-format
msgid ""
"The Keystone domain-specific configuration has specified more than one "
"SQL driver (only one is permitted): %(source)s."
msgstr ""
-#: keystone/exception.py:465
+#: keystone/exception.py:502
#, python-format
msgid ""
"%(mod_name)s doesn't provide database migrations. The migration "
"repository path at %(path)s doesn't exist or isn't a directory."
msgstr ""
-#: keystone/exception.py:472
+#: keystone/exception.py:509
msgid "Token version is unrecognizable or unsupported."
msgstr ""
-#: keystone/exception.py:477
+#: keystone/exception.py:514
#, python-format
msgid ""
"Unable to sign SAML assertion. It is likely that this server does not "
@@ -383,152 +400,183 @@ msgid ""
" %(reason)s"
msgstr ""
-#: keystone/exception.py:485
+#: keystone/exception.py:521
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, "
"if running under HTTPd or Apache, ensure WSGIPassAuthorization is set to "
"On."
msgstr ""
-#: keystone/exception.py:493
+#: keystone/exception.py:528
#, python-format
msgid ""
"Could not determine Identity Provider ID. The configuration option "
"%(issuer_attribute)s was not found in the request environment."
msgstr ""
-#: keystone/notifications.py:274
+#: keystone/exception.py:536
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+
+#: keystone/exception.py:544
+#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr ""
+
+#: keystone/notifications.py:232
#, python-format
msgid "%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
-#: keystone/notifications.py:283
+#: keystone/notifications.py:241
#, python-format
msgid "Method not callable: %s"
msgstr ""
-#: keystone/assignment/controllers.py:99 keystone/identity/controllers.py:71
-#: keystone/resource/controllers.py:88
+#: keystone/assignment/controllers.py:100 keystone/identity/controllers.py:71
+#: keystone/resource/controllers.py:90
msgid "Name field is required and cannot be empty"
msgstr ""
-#: keystone/assignment/controllers.py:155
-#: keystone/assignment/controllers.py:174
+#: keystone/assignment/controllers.py:146
+#: keystone/assignment/controllers.py:163
+#: keystone/assignment/controllers.py:182
msgid "User roles not supported: tenant_id required"
msgstr ""
-#: keystone/assignment/controllers.py:338
-#: keystone/assignment/controllers.py:579
+#: keystone/assignment/controllers.py:567
+#: keystone/assignment/controllers.py:856
msgid "Specify a domain or project, not both"
msgstr ""
-#: keystone/assignment/controllers.py:341
+#: keystone/assignment/controllers.py:570
msgid "Specify one of domain or project"
msgstr ""
-#: keystone/assignment/controllers.py:346
-#: keystone/assignment/controllers.py:584
+#: keystone/assignment/controllers.py:575
+#: keystone/assignment/controllers.py:861
msgid "Specify a user or group, not both"
msgstr ""
-#: keystone/assignment/controllers.py:349
+#: keystone/assignment/controllers.py:578
msgid "Specify one of user or group"
msgstr ""
-#: keystone/assignment/controllers.py:568
+#: keystone/assignment/controllers.py:845
msgid "Combining effective and group filter will always result in an empty list."
msgstr ""
-#: keystone/assignment/controllers.py:573
+#: keystone/assignment/controllers.py:850
msgid ""
"Combining effective, domain and inherited filters will always result in "
"an empty list."
msgstr ""
-#: keystone/assignment/core.py:198
-msgid "Must specify either domain or project"
+#: keystone/assignment/controllers.py:952
+msgid "scope.project.id must be specified if include_subtree is also specified"
msgstr ""
-#: keystone/assignment/core.py:873
-#, python-format
-msgid "Project (%s)"
+#: keystone/assignment/core.py:77
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the "
+"assignment driver will need to be expicitly configured if different than "
+"the default (SQL)."
msgstr ""
-#: keystone/assignment/core.py:875
+#: keystone/assignment/core.py:88
#, python-format
-msgid "Domain (%s)"
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
msgstr ""
-#: keystone/assignment/core.py:877
-msgid "Unknown Target"
+#: keystone/assignment/core.py:179
+msgid "Must specify either domain or project"
msgstr ""
-#: keystone/assignment/backends/ldap.py:91
-msgid "Domain metadata not supported by LDAP"
+#: keystone/assignment/core.py:848
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
msgstr ""
-#: keystone/assignment/backends/ldap.py:397
+#: keystone/assignment/core.py:1058
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
+msgid "Project (%s)"
msgstr ""
-#: keystone/assignment/backends/ldap.py:403
+#: keystone/assignment/core.py:1060
#, python-format
-msgid "Role %s not found"
+msgid "Domain (%s)"
msgstr ""
-#: keystone/assignment/backends/ldap.py:418
-#: keystone/assignment/backends/sql.py:334
-#, python-format
-msgid "Cannot remove role that has not been granted, %s"
+#: keystone/assignment/core.py:1062
+msgid "Unknown Target"
msgstr ""
-#: keystone/assignment/backends/sql.py:410
-#, python-format
-msgid "Unexpected assignment type encountered, %s"
+#: keystone/assignment/core.py:1518
+msgid "Update of `domain_id` is not allowed."
+msgstr ""
+
+#: keystone/assignment/core.py:1743
+msgid "Domain specific roles are not supported in the V8 role driver"
msgstr ""
-#: keystone/assignment/role_backends/ldap.py:61 keystone/catalog/core.py:135
-#: keystone/common/ldap/core.py:1400 keystone/resource/backends/ldap.py:170
+#: keystone/assignment/V8_backends/sql.py:287
+#: keystone/assignment/backends/sql.py:137
#, python-format
-msgid "Duplicate ID, %s."
+msgid "Cannot remove role that has not been granted, %s"
msgstr ""
-#: keystone/assignment/role_backends/ldap.py:69
-#: keystone/common/ldap/core.py:1390
+#: keystone/assignment/V8_backends/sql.py:363
+#: keystone/assignment/backends/sql.py:213
#, python-format
-msgid "Duplicate name, %s."
+msgid "Unexpected assignment type encountered, %s"
msgstr ""
-#: keystone/assignment/role_backends/ldap.py:119
+#: keystone/auth/controllers.py:60
#, python-format
-msgid "Cannot duplicate name %s"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in "
+"favor of its entrypoint from %(namespace)r and may be removed in N."
msgstr ""
-#: keystone/auth/controllers.py:118
+#: keystone/auth/controllers.py:121
#, python-format
msgid ""
"Unable to reconcile identity attribute %(attribute)s as it has "
"conflicting values %(new)s and %(old)s"
msgstr ""
-#: keystone/auth/controllers.py:344 keystone/middleware/core.py:227
+#: keystone/auth/controllers.py:182
+msgid "Domain name cannot contain reserved characters."
+msgstr ""
+
+#: keystone/auth/controllers.py:205
+msgid "Project name cannot contain reserved characters."
+msgstr ""
+
+#: keystone/auth/controllers.py:355 keystone/middleware/auth.py:130
msgid "Scoping to both domain and project is not allowed"
msgstr ""
-#: keystone/auth/controllers.py:347
+#: keystone/auth/controllers.py:358
msgid "Scoping to both domain and trust is not allowed"
msgstr ""
-#: keystone/auth/controllers.py:350
+#: keystone/auth/controllers.py:361
msgid "Scoping to both project and trust is not allowed"
msgstr ""
-#: keystone/auth/controllers.py:520
+#: keystone/auth/controllers.py:530
msgid "User not found"
msgstr ""
-#: keystone/auth/controllers.py:624
+#: keystone/auth/controllers.py:644
msgid "A project-scoped token is required to produce a service catalog."
msgstr ""
@@ -545,75 +593,97 @@ msgstr ""
msgid "auth_type is not Negotiate"
msgstr ""
-#: keystone/auth/plugins/mapped.py:239
+#: keystone/auth/plugins/mapped.py:246
msgid ""
"Could not map user while setting ephemeral user identity. Either mapping "
"rules must specify user id/name or REMOTE_USER environment variable must "
"be set."
msgstr ""
-#: keystone/auth/plugins/oauth1.py:51
+#: keystone/auth/plugins/oauth1.py:46
msgid "Access token is expired"
msgstr ""
-#: keystone/auth/plugins/oauth1.py:65
+#: keystone/auth/plugins/oauth1.py:60
msgid "Could not validate the access token"
msgstr ""
-#: keystone/auth/plugins/password.py:45
+#: keystone/auth/plugins/password.py:39
msgid "Invalid username or password"
msgstr ""
-#: keystone/auth/plugins/token.py:70 keystone/token/controllers.py:162
+#: keystone/auth/plugins/token.py:70 keystone/token/controllers.py:160
msgid "rescope a scoped token"
msgstr ""
-#: keystone/catalog/controllers.py:175
+#: keystone/auth/plugins/totp.py:96
+msgid "Invalid username or TOTP passcode"
+msgstr ""
+
+#: keystone/catalog/controllers.py:215
#, python-format
msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
msgstr ""
-#: keystone/cmd/cli.py:286
+#: keystone/catalog/core.py:149 keystone/common/ldap/core.py:1411
+#, python-format
+msgid "Duplicate ID, %s."
+msgstr ""
+
+#: keystone/catalog/backends/sql.py:389
+#, python-format
+msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+msgstr ""
+
+#: keystone/catalog/backends/sql.py:492
+msgid "Endpoint Group Project Association not found"
+msgstr ""
+
+#: keystone/cmd/cli.py:173
+msgid "Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+
+#: keystone/cmd/cli.py:586
msgid "At least one option must be provided"
msgstr ""
-#: keystone/cmd/cli.py:293
+#: keystone/cmd/cli.py:593
msgid "--all option cannot be mixed with other options"
msgstr ""
-#: keystone/cmd/cli.py:300
+#: keystone/cmd/cli.py:600
#, python-format
msgid "Unknown domain '%(name)s' specified by --domain-name"
msgstr ""
-#: keystone/cmd/cli.py:355 keystone/tests/unit/test_cli.py:215
+#: keystone/cmd/cli.py:679 keystone/tests/unit/test_cli.py:411
msgid "At least one option must be provided, use either --all or --domain-name"
msgstr ""
-#: keystone/cmd/cli.py:361 keystone/tests/unit/test_cli.py:231
+#: keystone/cmd/cli.py:685 keystone/tests/unit/test_cli.py:427
msgid "The --all option cannot be used with the --domain-name option"
msgstr ""
-#: keystone/cmd/cli.py:387 keystone/tests/unit/test_cli.py:248
+#: keystone/cmd/cli.py:710 keystone/tests/unit/test_cli.py:444
#, python-format
msgid ""
"Invalid domain name: %(domain)s found in config file name: %(file)s - "
"ignoring this file."
msgstr ""
-#: keystone/cmd/cli.py:395 keystone/tests/unit/test_cli.py:189
+#: keystone/cmd/cli.py:718 keystone/tests/unit/test_cli.py:385
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
"%(file)s."
msgstr ""
-#: keystone/cmd/cli.py:409
+#: keystone/cmd/cli.py:732
#, python-format
msgid "Error parsing configuration file for domain: %(domain)s, file: %(file)s."
msgstr ""
-#: keystone/cmd/cli.py:442
+#: keystone/cmd/cli.py:765
#, python-format
msgid ""
"To get a more detailed information on this error, re-run this command for"
@@ -621,60 +691,37 @@ msgid ""
"--domain-name %s"
msgstr ""
-#: keystone/cmd/cli.py:460
+#: keystone/cmd/cli.py:783
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr ""
-#: keystone/cmd/cli.py:493
+#: keystone/cmd/cli.py:803
msgid ""
"Unable to access the keystone database, please check it is configured "
"correctly."
msgstr ""
-#: keystone/cmd/cli.py:559
+#: keystone/cmd/cli.py:866
#, python-format
msgid "Error while parsing rules %(path)s: %(err)s"
msgstr ""
-#: keystone/cmd/cli.py:568
+#: keystone/cmd/cli.py:875
#, python-format
msgid "Error while opening file %(path)s: %(err)s"
msgstr ""
-#: keystone/cmd/cli.py:578
+#: keystone/cmd/cli.py:885
#, python-format
msgid "Error while parsing line: '%(line)s': %(err)s"
msgstr ""
-#: keystone/common/authorization.py:57 keystone/common/wsgi.py:66
+#: keystone/common/authorization.py:61 keystone/common/wsgi.py:67
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr ""
-#: keystone/common/base64utils.py:71
-msgid "pad must be single character"
-msgstr ""
-
-#: keystone/common/base64utils.py:220
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-
-#: keystone/common/base64utils.py:224
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr ""
-
-#: keystone/common/base64utils.py:230
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr ""
-
-#: keystone/common/base64utils.py:249 keystone/common/base64utils.py:270
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr ""
-
#: keystone/common/clean.py:24
#, python-format
msgid "%s cannot be empty."
@@ -695,145 +742,119 @@ msgstr ""
msgid "%(property_name)s is not a %(display_expected_type)s"
msgstr ""
-#: keystone/common/controller.py:230 keystone/common/controller.py:246
-#: keystone/token/providers/common.py:638
-msgid "Non-default domain is not supported"
-msgstr ""
-
-#: keystone/common/controller.py:329 keystone/common/controller.py:358
-#: keystone/identity/core.py:596 keystone/resource/core.py:788
-#: keystone/resource/backends/ldap.py:66 keystone/resource/backends/ldap.py:74
+#: keystone/common/controller.py:349 keystone/common/controller.py:377
+#: keystone/identity/core.py:595 keystone/resource/core.py:1145
#, python-format
msgid "Expected dict or list: %s"
msgstr ""
-#: keystone/common/controller.py:371
+#: keystone/common/controller.py:390
msgid "Marker could not be found"
msgstr ""
-#: keystone/common/controller.py:382
+#: keystone/common/controller.py:401
msgid "Invalid limit value"
msgstr ""
-#: keystone/common/controller.py:690
+#: keystone/common/controller.py:705
msgid "Cannot change Domain ID"
msgstr ""
-#: keystone/common/dependency.py:64
+#: keystone/common/controller.py:751
+msgid ""
+"You have tried to create a resource using the admin token. As this token "
+"is not within a domain you must explicitly include a domain for this "
+"resource to belong to."
+msgstr ""
+
+#: keystone/common/dependency.py:65
#, python-format
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr ""
+#: keystone/common/driver_hints.py:38
+msgid ""
+"Cannot truncate a driver call without hints list as first parameter after"
+" self "
+msgstr ""
+
#: keystone/common/json_home.py:76
#, python-format
msgid "Unexpected status requested for JSON Home response, %s"
msgstr ""
-#: keystone/common/tokenless_auth.py:74
+#: keystone/common/manager.py:82
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of "
+"its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+
+#: keystone/common/tokenless_auth.py:73
msgid "Neither Project Domain ID nor Project Domain Name was provided."
msgstr ""
-#: keystone/common/tokenless_auth.py:166
+#: keystone/common/tokenless_auth.py:165
msgid ""
"User auth cannot be built due to missing either user id, or user name "
"with domain id, or user name with domain name."
msgstr ""
-#: keystone/common/utils.py:166 keystone/credential/controllers.py:44
+#: keystone/common/utils.py:63
+msgid "Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+
+#: keystone/common/utils.py:192 keystone/credential/controllers.py:44
msgid "Invalid blob in credential"
msgstr ""
-#: keystone/common/wsgi.py:206
+#: keystone/common/wsgi.py:208
msgid "Query string is not UTF-8 encoded"
msgstr ""
-#: keystone/common/wsgi.py:343
+#: keystone/common/wsgi.py:341
#, python-format
msgid "%s field is required and cannot be empty"
msgstr ""
-#: keystone/common/wsgi.py:355
+#: keystone/common/wsgi.py:353
#, python-format
msgid "%s field(s) cannot be empty"
msgstr ""
-#: keystone/common/wsgi.py:566
+#: keystone/common/wsgi.py:548
msgid "The resource could not be found."
msgstr ""
-#: keystone/common/cache/_memcache_pool.py:124
-#, python-format
-msgid "Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-
-#: keystone/common/cache/core.py:132
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:231
-msgid "db_hosts value is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:236
-msgid "database db_name is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:241
-msgid "cache_collection name is required"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:252
-msgid "integer value expected for w (write concern attribute)"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:260
-msgid "replicaset_name required when use_replica is True"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:275
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:301
-msgid "no ssl support available"
-msgstr ""
-
-#: keystone/common/cache/backends/mongo.py:310
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\","
-" \"REQUIRED\""
-msgstr ""
-
-#: keystone/common/kvs/core.py:70
+#: keystone/common/kvs/core.py:88
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr ""
-#: keystone/common/kvs/core.py:105
+#: keystone/common/kvs/core.py:123
#, python-format
msgid "KVS region %s is already configured. Cannot reconfigure."
msgstr ""
-#: keystone/common/kvs/core.py:144
+#: keystone/common/kvs/core.py:166
#, python-format
msgid "Key Value Store not configured: %s"
msgstr ""
-#: keystone/common/kvs/core.py:197
+#: keystone/common/kvs/core.py:219
msgid "`key_mangler` option must be a function reference"
msgstr ""
-#: keystone/common/kvs/core.py:352
+#: keystone/common/kvs/core.py:376
#, python-format
msgid "Lock key must match target key: %(lock)s != %(target)s"
msgstr ""
-#: keystone/common/kvs/core.py:356
+#: keystone/common/kvs/core.py:380
msgid "Must be called within an active lock context."
msgstr ""
-#: keystone/common/kvs/backends/memcached.py:69
+#: keystone/common/kvs/backends/memcached.py:68
#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr ""
@@ -849,526 +870,678 @@ msgstr ""
msgid "`key_mangler` functions must be callable."
msgstr ""
-#: keystone/common/ldap/core.py:193
+#: keystone/common/ldap/core.py:199
#, python-format
msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
msgstr ""
-#: keystone/common/ldap/core.py:203
+#: keystone/common/ldap/core.py:209
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
-#: keystone/common/ldap/core.py:215
+#: keystone/common/ldap/core.py:221
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
msgstr ""
-#: keystone/common/ldap/core.py:588
+#: keystone/common/ldap/core.py:591
msgid "Invalid TLS / LDAPS combination"
msgstr ""
-#: keystone/common/ldap/core.py:593
+#: keystone/common/ldap/core.py:596
#, python-format
msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
msgstr ""
-#: keystone/common/ldap/core.py:603
+#: keystone/common/ldap/core.py:606
#, python-format
msgid "tls_cacertfile %s not found or is not a file"
msgstr ""
-#: keystone/common/ldap/core.py:615
+#: keystone/common/ldap/core.py:618
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
msgstr ""
-#: keystone/common/ldap/core.py:1325
+#: keystone/common/ldap/core.py:1333
#, python-format
msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
msgstr ""
-#: keystone/common/ldap/core.py:1369
+#: keystone/common/ldap/core.py:1378
#, python-format
msgid "LDAP %s create"
msgstr ""
-#: keystone/common/ldap/core.py:1374
+#: keystone/common/ldap/core.py:1383
#, python-format
msgid "LDAP %s update"
msgstr ""
-#: keystone/common/ldap/core.py:1379
+#: keystone/common/ldap/core.py:1388
#, python-format
msgid "LDAP %s delete"
msgstr ""
-#: keystone/common/ldap/core.py:1522
+#: keystone/common/ldap/core.py:1400
+#, python-format
+msgid "Duplicate name, %s."
+msgstr ""
+
+#: keystone/common/ldap/core.py:1557
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by "
"configuration."
msgstr ""
-#: keystone/common/ldap/core.py:1533
+#: keystone/common/ldap/core.py:1568
#, python-format
msgid "Cannot change %(option_name)s %(attr)s"
msgstr ""
-#: keystone/common/ldap/core.py:1620
+#: keystone/common/ldap/core.py:1655
#, python-format
msgid "Member %(member)s is already a member of group %(group)s"
msgstr ""
-#: keystone/common/sql/core.py:219
-msgid ""
-"Cannot truncate a driver call without hints list as first parameter after"
-" self "
-msgstr ""
-
-#: keystone/common/sql/core.py:445
+#: keystone/common/sql/core.py:413
msgid "Duplicate Entry"
msgstr ""
-#: keystone/common/sql/core.py:461
+#: keystone/common/sql/core.py:429
#, python-format
msgid "An unexpected error occurred when trying to store %s"
msgstr ""
-#: keystone/common/sql/migration_helpers.py:171
-#: keystone/common/sql/migration_helpers.py:213
+#: keystone/common/sql/migration_helpers.py:167
+msgid "Unable to downgrade schema"
+msgstr ""
+
+#: keystone/common/sql/migration_helpers.py:185
+#: keystone/common/sql/migration_helpers.py:231
#, python-format
msgid "%s extension does not exist."
msgstr ""
-#: keystone/common/validation/__init__.py:45
+#: keystone/common/validation/__init__.py:44
#, python-format
msgid ""
"validated expected to find %(param_name)r in function signature for "
"%(func_name)r."
msgstr ""
-#: keystone/common/validation/validators.py:54
+#: keystone/common/validation/validators.py:53
#, python-format
msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
msgstr ""
-#: keystone/contrib/ec2/controllers.py:324
+#: keystone/contrib/admin_crud/core.py:28
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section"
+" in keystone-paste.ini accordingly, as it will be removed in the O "
+"release."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:80 keystone/contrib/s3/core.py:111
+#: keystone/contrib/s3/core.py:114
+msgid "Invalid EC2 signature."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:83
+#: keystone/contrib/ec2/controllers.py:87
+#: keystone/contrib/ec2/controllers.py:125
+msgid "EC2 signature not supplied."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:159
+msgid "User not valid for tenant."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:260
+msgid "EC2 access key not found."
+msgstr ""
+
+#: keystone/contrib/ec2/controllers.py:326
msgid "Token belongs to another user"
msgstr ""
-#: keystone/contrib/ec2/controllers.py:352
+#: keystone/contrib/ec2/controllers.py:354
msgid "Credential belongs to another user"
msgstr ""
-#: keystone/contrib/endpoint_filter/backends/sql.py:70
-#, python-format
-msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
+#: keystone/contrib/endpoint_filter/routers.py:29
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
msgstr ""
-#: keystone/contrib/endpoint_filter/backends/sql.py:181
-msgid "Endpoint Group Project Association not found"
+#: keystone/contrib/federation/routers.py:27
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section "
+"in keystone-paste.ini accordingly, as it will be removed in the O "
+"release."
+msgstr ""
+
+#: keystone/contrib/oauth1/routers.py:29
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is "
+"now always available. Update the [pipeline:api_v3] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+
+#: keystone/contrib/revoke/routers.py:27
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is "
+"now always available. Update the [pipeline:api_v3] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
msgstr ""
-#: keystone/contrib/federation/controllers.py:268
+#: keystone/contrib/s3/core.py:82
+msgid "Credential signature mismatch"
+msgstr ""
+
+#: keystone/contrib/simple_cert/routers.py:27
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token "
+"providers are now deprecated and simple_cert was only used insupport of "
+"these token providers. Update the [pipeline:api_v3] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+
+#: keystone/contrib/user_crud/core.py:28
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud "
+"extension is now always available. Updatethe [pipeline:public_api] "
+"section in keystone-paste.ini accordingly, as it will be removed in the O"
+" release."
+msgstr ""
+
+#: keystone/endpoint_policy/core.py:264
+#, python-format
+msgid "No policy is associated with endpoint %(endpoint_id)s."
+msgstr ""
+
+#: keystone/federation/controllers.py:269
msgid "Request must have an origin query parameter"
msgstr ""
-#: keystone/contrib/federation/controllers.py:273
+#: keystone/federation/controllers.py:278
#, python-format
msgid "%(host)s is not a trusted dashboard host"
msgstr ""
-#: keystone/contrib/federation/controllers.py:304
+#: keystone/federation/controllers.py:309
msgid "Missing entity ID from environment"
msgstr ""
-#: keystone/contrib/federation/controllers.py:353
+#: keystone/federation/controllers.py:357
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
-#: keystone/contrib/federation/idp.py:477
+#: keystone/federation/idp.py:486
#, python-format
msgid "Cannot open certificate %(cert_file)s. Reason: %(reason)s"
msgstr ""
-#: keystone/contrib/federation/idp.py:544
+#: keystone/federation/idp.py:552
msgid "Ensure configuration option idp_entity_id is set."
msgstr ""
-#: keystone/contrib/federation/idp.py:547
+#: keystone/federation/idp.py:555
msgid "Ensure configuration option idp_sso_endpoint is set."
msgstr ""
-#: keystone/contrib/federation/idp.py:567
+#: keystone/federation/idp.py:574
msgid ""
"idp_contact_type must be one of: [technical, other, support, "
"administrative or billing."
msgstr ""
-#: keystone/contrib/federation/utils.py:177
+#: keystone/federation/utils.py:234
msgid "Federation token is expired"
msgstr ""
-#: keystone/contrib/federation/utils.py:230
+#: keystone/federation/utils.py:286
msgid "Could not find Identity Provider identifier in environment"
msgstr ""
-#: keystone/contrib/federation/utils.py:234
+#: keystone/federation/utils.py:290
msgid ""
"Incoming identity provider identifier not included among the accepted "
"identifiers."
msgstr ""
-#: keystone/contrib/federation/utils.py:522
+#: keystone/federation/utils.py:585
#, python-format
msgid "User type %s not supported"
msgstr ""
-#: keystone/contrib/federation/utils.py:557
+#: keystone/federation/utils.py:605
+msgid ""
+"Could not map any federated user properties to identity values. Check "
+"debug logs or the mapping used for additional details."
+msgstr ""
+
+#: keystone/federation/utils.py:629
#, python-format
msgid ""
"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords "
"must be specified."
msgstr ""
-#: keystone/contrib/federation/utils.py:766
+#: keystone/federation/utils.py:854
#, python-format
msgid "Identity Provider %(idp)s is disabled"
msgstr ""
-#: keystone/contrib/federation/utils.py:774
+#: keystone/federation/utils.py:862
#, python-format
msgid "Service Provider %(sp)s is disabled"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:96
-msgid "Cannot change consumer secret"
+#: keystone/federation/backends/sql.py:182
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr ""
+
+#: keystone/federation/backends/sql.py:184
+#, python-format
+msgid "Duplicate entry: %s"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:128
-msgid "Cannot list request tokens with a token issued via delegation."
+#: keystone/identity/controllers.py:74
+msgid "Enabled field must be a boolean"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:189
-#: keystone/contrib/oauth1/backends/sql.py:270
-msgid "User IDs do not match"
+#: keystone/identity/controllers.py:103
+msgid "Enabled field should be a boolean"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:196
-msgid "Could not find role"
+#: keystone/identity/core.py:265
+#, python-format
+msgid "Config API entity at /domains/%s/config"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:245
-msgid "Invalid signature"
+#: keystone/identity/core.py:271
+#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, "
+"the last domain that appears to have had it is %(last_domain)s, giving up"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:296
-#: keystone/contrib/oauth1/controllers.py:374
-msgid "Request token is expired"
+#: keystone/identity/core.py:450 keystone/identity/backends/ldap.py:62
+#: keystone/identity/backends/ldap.py:64 keystone/identity/backends/ldap.py:70
+#: keystone/identity/backends/ldap.py:72 keystone/identity/backends/sql.py:210
+#: keystone/identity/backends/sql.py:212
+msgid "Invalid user / password"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:310
-msgid "There should not be any non-oauth parameters"
+#: keystone/identity/core.py:895
+#, python-format
+msgid "User is disabled: %s"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:314
-msgid "provided consumer key does not match stored consumer key"
+#: keystone/identity/core.py:928 keystone/resource/core.py:375
+msgid "update of domain_id is deprecated as of Mitaka and will be removed in O."
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:318
-msgid "provided verifier does not match stored verifier"
+#: keystone/identity/core.py:947
+msgid "Cannot change user ID"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:322
-msgid "provided request key does not match stored request key"
+#: keystone/identity/backends/ldap.py:35
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka "
+"release in favor of read-only identity LDAP access. It will be removed in"
+" the \"O\" release."
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:326
-msgid "Request Token does not have an authorizing user id"
+#: keystone/identity/backends/ldap.py:106
+msgid "Cannot change user name"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:363
-msgid "Cannot authorize a request token with a token issued via delegation."
+#: keystone/identity/backends/ldap.py:214 keystone/identity/backends/sql.py:292
+#: keystone/identity/backends/sql.py:310
+#, python-format
+msgid "User '%(user_id)s' not found in group '%(group_id)s'"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:390
-msgid "authorizing user does not have role required"
+#: keystone/identity/backends/ldap.py:366
+#, python-format
+msgid "User %(user_id)s is already a member of group %(group_id)s"
msgstr ""
-#: keystone/contrib/oauth1/controllers.py:403
-msgid "User is not a member of the requested project"
+#: keystone/models/token_model.py:62
+msgid "Found invalid token: scoped to both project and domain."
msgstr ""
-#: keystone/contrib/oauth1/backends/sql.py:91
-msgid "Consumer not found"
+#: keystone/oauth1/controllers.py:126
+msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
-#: keystone/contrib/oauth1/backends/sql.py:186
-msgid "Request token not found"
+#: keystone/oauth1/controllers.py:187 keystone/oauth1/backends/sql.py:256
+msgid "User IDs do not match"
msgstr ""
-#: keystone/contrib/oauth1/backends/sql.py:250
-msgid "Access token not found"
+#: keystone/oauth1/controllers.py:243
+msgid "Invalid signature"
msgstr ""
-#: keystone/contrib/revoke/controllers.py:33
-#, python-format
-msgid "invalid date format %s"
+#: keystone/oauth1/controllers.py:294 keystone/oauth1/controllers.py:372
+msgid "Request token is expired"
msgstr ""
-#: keystone/contrib/revoke/core.py:159
-msgid ""
-"The revoke call must not have both domain_id and project_id. This is a "
-"bug in the Keystone server. The current request is aborted."
+#: keystone/oauth1/controllers.py:308
+msgid "There should not be any non-oauth parameters"
msgstr ""
-#: keystone/contrib/revoke/core.py:227 keystone/token/provider.py:197
-#: keystone/token/provider.py:221 keystone/token/provider.py:287
-#: keystone/token/provider.py:294
-msgid "Failed to validate token"
+#: keystone/oauth1/controllers.py:312
+msgid "provided consumer key does not match stored consumer key"
msgstr ""
-#: keystone/endpoint_policy/core.py:261
-#, python-format
-msgid "No policy is associated with endpoint %(endpoint_id)s."
+#: keystone/oauth1/controllers.py:316
+msgid "provided verifier does not match stored verifier"
msgstr ""
-#: keystone/identity/controllers.py:74
-msgid "Enabled field must be a boolean"
+#: keystone/oauth1/controllers.py:320
+msgid "provided request key does not match stored request key"
msgstr ""
-#: keystone/identity/controllers.py:100
-msgid "Enabled field should be a boolean"
+#: keystone/oauth1/controllers.py:324
+msgid "Request Token does not have an authorizing user id"
msgstr ""
-#: keystone/identity/core.py:127
-#, python-format
-msgid "Database at /domains/%s/config"
+#: keystone/oauth1/controllers.py:361
+msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
-#: keystone/identity/core.py:271
-#, python-format
-msgid "Config API entity at /domains/%s/config"
+#: keystone/oauth1/controllers.py:388
+msgid "authorizing user does not have role required"
msgstr ""
-#: keystone/identity/core.py:277
-#, python-format
-msgid ""
-"Exceeded attempts to register domain %(domain)s to use the SQL driver, "
-"the last domain that appears to have had it is %(last_domain)s, giving "
-"up"
+#: keystone/oauth1/controllers.py:401
+msgid "User is not a member of the requested project"
msgstr ""
-#: keystone/identity/core.py:451 keystone/identity/backends/ldap.py:58
-#: keystone/identity/backends/ldap.py:60 keystone/identity/backends/ldap.py:66
-#: keystone/identity/backends/ldap.py:68 keystone/identity/backends/sql.py:104
-#: keystone/identity/backends/sql.py:106
-msgid "Invalid user / password"
+#: keystone/oauth1/backends/sql.py:91
+msgid "Consumer not found"
msgstr ""
-#: keystone/identity/core.py:861
-#, python-format
-msgid "User is disabled: %s"
+#: keystone/oauth1/backends/sql.py:177
+msgid "Request token not found"
msgstr ""
-#: keystone/identity/core.py:903
-msgid "Cannot change user ID"
+#: keystone/oauth1/backends/sql.py:237
+msgid "Access token not found"
msgstr ""
-#: keystone/identity/backends/ldap.py:98
-msgid "Cannot change user name"
+#: keystone/resource/controllers.py:94
+msgid "The creation of projects acting as domains is not allowed in v2."
msgstr ""
-#: keystone/identity/backends/ldap.py:187 keystone/identity/backends/sql.py:188
-#: keystone/identity/backends/sql.py:206
-#, python-format
-msgid "User '%(user_id)s' not found in group '%(group_id)s'"
+#: keystone/resource/controllers.py:284
+msgid ""
+"Cannot use parents_as_list and parents_as_ids query params at the same "
+"time."
+msgstr ""
+
+#: keystone/resource/controllers.py:290
+msgid ""
+"Cannot use subtree_as_list and subtree_as_ids query params at the same "
+"time."
msgstr ""
-#: keystone/identity/backends/ldap.py:338
+#: keystone/resource/core.py:106
#, python-format
-msgid "User %(user_id)s is already a member of group %(group_id)s"
+msgid "Max hierarchy depth reached for %s branch."
msgstr ""
-#: keystone/models/token_model.py:61
-msgid "Found invalid token: scoped to both project and domain."
+#: keystone/resource/core.py:123
+msgid "Multiple domains are not supported"
msgstr ""
-#: keystone/resource/controllers.py:218
-msgid "The creation of projects acting as domains is not allowed yet."
+#: keystone/resource/core.py:129
+msgid "only root projects are allowed to act as domains."
msgstr ""
-#: keystone/resource/controllers.py:252
+#: keystone/resource/core.py:152
+#, python-format
msgid ""
-"Cannot use parents_as_list and parents_as_ids query params at the same "
-"time."
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
msgstr ""
-#: keystone/resource/controllers.py:258
+#: keystone/resource/core.py:163
+#, python-format
msgid ""
-"Cannot use subtree_as_list and subtree_as_ids query params at the same "
-"time."
+"Cannot create project, since it specifies its owner as domain "
+"%(domain_id)s, but specifies a parent in a different domain "
+"(%(parent_domain_id)s)."
msgstr ""
-#: keystone/resource/core.py:81
+#: keystone/resource/core.py:183
#, python-format
-msgid "max hierarchy depth reached for %s branch."
+msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
-#: keystone/resource/core.py:100
-msgid "cannot create a project within a different domain than its parents."
+#: keystone/resource/core.py:191
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: "
+"%(chars)s"
msgstr ""
-#: keystone/resource/core.py:104
+#: keystone/resource/core.py:201
#, python-format
-msgid "cannot create a project in a branch containing a disabled project: %s"
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+
+#: keystone/resource/core.py:205
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
msgstr ""
-#: keystone/resource/core.py:126
+#: keystone/resource/core.py:262
#, python-format
msgid "Domain is disabled: %s"
msgstr ""
-#: keystone/resource/core.py:143
+#: keystone/resource/core.py:279
#, python-format
msgid "Domain cannot be named %s"
msgstr ""
-#: keystone/resource/core.py:146
+#: keystone/resource/core.py:282
#, python-format
msgid "Domain cannot have ID %s"
msgstr ""
-#: keystone/resource/core.py:158
+#: keystone/resource/core.py:297
#, python-format
msgid "Project is disabled: %s"
msgstr ""
-#: keystone/resource/core.py:178
+#: keystone/resource/core.py:304
#, python-format
-msgid "cannot enable project %s since it has disabled parents"
+msgid "Cannot enable project %s since it has disabled parents"
msgstr ""
-#: keystone/resource/core.py:186
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr ""
-
-#: keystone/resource/core.py:197
+#: keystone/resource/core.py:340
msgid "Update of `parent_id` is not allowed."
msgstr ""
-#: keystone/resource/core.py:202
+#: keystone/resource/core.py:345
msgid "Update of `is_domain` is not allowed."
msgstr ""
-#: keystone/resource/core.py:229
+#: keystone/resource/core.py:359
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+
+#: keystone/resource/core.py:366
+msgid "Update of domain_id is only allowed for root projects."
+msgstr ""
+
+#: keystone/resource/core.py:371
+msgid "Cannot update domain_id of a project that has children."
+msgstr ""
+
+#: keystone/resource/core.py:396
#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
msgstr ""
-#: keystone/resource/core.py:256
-msgid "Project field is required and cannot be empty."
+#: keystone/resource/core.py:443
+msgid "Cascade update is only allowed for enabled attribute."
msgstr ""
-#: keystone/resource/core.py:392
-msgid "Multiple domains are not supported"
+#: keystone/resource/core.py:507
+#, python-format
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
+msgstr ""
+
+#: keystone/resource/core.py:513
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use"
+" the cascade option if you want to delete a whole subtree."
+msgstr ""
+
+#: keystone/resource/core.py:526
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
msgstr ""
-#: keystone/resource/core.py:445
-msgid "delete the default domain"
+#: keystone/resource/core.py:554
+msgid "Project field is required and cannot be empty."
msgstr ""
-#: keystone/resource/core.py:456
-msgid "cannot delete a domain that is enabled, please disable it first."
+#: keystone/resource/core.py:795
+msgid "Cannot delete a domain that is enabled, please disable it first."
msgstr ""
-#: keystone/resource/core.py:876
+#: keystone/resource/core.py:1570
msgid "No options specified"
msgstr ""
-#: keystone/resource/core.py:882
+#: keystone/resource/core.py:1576
#, python-format
msgid ""
"The value of group %(group)s specified in the config should be a "
"dictionary of options"
msgstr ""
-#: keystone/resource/core.py:906
+#: keystone/resource/core.py:1600
#, python-format
msgid ""
"Option %(option)s found with no group specified while checking domain "
"configuration request"
msgstr ""
-#: keystone/resource/core.py:913
+#: keystone/resource/core.py:1607
#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr ""
-#: keystone/resource/core.py:920
+#: keystone/resource/core.py:1614
#, python-format
msgid ""
"Option %(option)s in group %(group)s is not supported for domain specific"
" configurations"
msgstr ""
-#: keystone/resource/core.py:973
+#: keystone/resource/core.py:1666
msgid "An unexpected error occurred when retrieving domain configs"
msgstr ""
-#: keystone/resource/core.py:1052 keystone/resource/core.py:1136
-#: keystone/resource/core.py:1207 keystone/resource/config_backends/sql.py:76
+#: keystone/resource/core.py:1745 keystone/resource/core.py:1828
+#: keystone/resource/core.py:1898 keystone/resource/config_backends/sql.py:76
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr ""
-#: keystone/resource/core.py:1055 keystone/resource/core.py:1141
-#: keystone/resource/core.py:1203
+#: keystone/resource/core.py:1748 keystone/resource/core.py:1833
+#: keystone/resource/core.py:1894
#, python-format
msgid "group %(group)s"
msgstr ""
-#: keystone/resource/core.py:1057
+#: keystone/resource/core.py:1750
msgid "any options"
msgstr ""
-#: keystone/resource/core.py:1101
+#: keystone/resource/core.py:1793
#, python-format
msgid ""
"Trying to update option %(option)s in group %(group)s, so that, and only "
"that, option must be specified in the config"
msgstr ""
-#: keystone/resource/core.py:1106
+#: keystone/resource/core.py:1798
#, python-format
msgid ""
"Trying to update group %(group)s, so that, and only that, group must be "
"specified in the config"
msgstr ""
-#: keystone/resource/core.py:1115
+#: keystone/resource/core.py:1807
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
"%(group_other)s instead"
msgstr ""
-#: keystone/resource/core.py:1122
+#: keystone/resource/core.py:1814
#, python-format
msgid ""
"Trying to update option %(option)s in group %(group)s, but config "
"provided contains option %(option_other)s instead"
msgstr ""
-#: keystone/resource/backends/ldap.py:88
-msgid "LDAP does not support projects with is_domain flag enabled"
+#: keystone/resource/core.py:2006
+#, python-format
+msgid "Group %s not found in config"
msgstr ""
-#: keystone/resource/backends/ldap.py:172
-#: keystone/resource/backends/ldap.py:180
-#: keystone/resource/backends/ldap.py:184
-msgid "Domains are read-only against LDAP"
+#: keystone/resource/core.py:2016
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not "
+"supported"
+msgstr ""
+
+#: keystone/revoke/controllers.py:33
+#, python-format
+msgid "invalid date format %s"
+msgstr ""
+
+#: keystone/revoke/core.py:156
+msgid ""
+"The revoke call must not have both domain_id and project_id. This is a "
+"bug in the Keystone server. The current request is aborted."
+msgstr ""
+
+#: keystone/revoke/core.py:226 keystone/token/provider.py:217
+#: keystone/token/provider.py:256 keystone/token/provider.py:336
+#: keystone/token/provider.py:343
+msgid "Failed to validate token"
msgstr ""
#: keystone/server/eventlet.py:77
@@ -1383,157 +1556,154 @@ msgstr ""
msgid "Failed to start the %(name)s server"
msgstr ""
-#: keystone/token/controllers.py:391
+#: keystone/token/controllers.py:372
+msgid "Tenant name cannot contain reserved characters."
+msgstr ""
+
+#: keystone/token/controllers.py:392
#, python-format
msgid "Project ID not found: %(t_id)s"
msgstr ""
-#: keystone/token/controllers.py:395
+#: keystone/token/controllers.py:396
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr ""
-#: keystone/token/controllers.py:414 keystone/token/controllers.py:417
+#: keystone/token/controllers.py:415 keystone/token/controllers.py:418
msgid "Token does not belong to specified tenant."
msgstr ""
-#: keystone/token/persistence/backends/kvs.py:132
-#, python-format
-msgid "Unknown token version %s"
-msgstr ""
-
-#: keystone/token/providers/common.py:53
-msgid "Domains are not supported by the v2 API. Please use the v3 API instead."
+#: keystone/token/provider.py:269 keystone/token/provider.py:293
+msgid "No token in the request"
msgstr ""
-#: keystone/token/providers/common.py:63
-#, python-format
-msgid ""
-"Project not found in the default domain (please use the v3 API instead): "
-"%s"
-msgstr ""
-
-#: keystone/token/providers/common.py:82
+#: keystone/token/persistence/backends/kvs.py:132
#, python-format
-msgid "User not found in the default domain (please use the v3 API instead): %s"
+msgid "Unknown token version %s"
msgstr ""
-#: keystone/token/providers/common.py:299
-#: keystone/token/providers/common.py:404
+#: keystone/token/providers/common.py:313
+#: keystone/token/providers/common.py:445
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
msgstr ""
-#: keystone/token/providers/common.py:304
-#: keystone/token/providers/common.py:409
+#: keystone/token/providers/common.py:318
+#: keystone/token/providers/common.py:450
#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
msgstr ""
-#: keystone/token/providers/common.py:331
+#: keystone/token/providers/common.py:345
msgid "Trustor is disabled."
msgstr ""
-#: keystone/token/providers/common.py:395
+#: keystone/token/providers/common.py:434
msgid "Trustee has no delegated roles."
msgstr ""
-#: keystone/token/providers/common.py:456
+#: keystone/token/providers/common.py:496
#, python-format
msgid "Invalid audit info data type: %(data)s (%(type)s)"
msgstr ""
-#: keystone/token/providers/common.py:484
-msgid "User is not a trustee."
+#: keystone/token/providers/common.py:560
+#: keystone/token/providers/common.py:587
+msgid "The configured token provider does not support bind authentication."
msgstr ""
-#: keystone/token/providers/common.py:553
-msgid "The configured token provider does not support bind authentication."
+#: keystone/token/providers/common.py:598
+msgid "User is not a trustee."
msgstr ""
-#: keystone/token/providers/common.py:628
+#: keystone/token/providers/common.py:665
msgid ""
"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
"Authentication"
msgstr ""
-#: keystone/token/providers/common.py:646
+#: keystone/token/providers/common.py:675
+msgid "Non-default domain is not supported"
+msgstr ""
+
+#: keystone/token/providers/common.py:679
msgid "Domain scoped token is not supported"
msgstr ""
-#: keystone/token/providers/pki.py:48 keystone/token/providers/pkiz.py:30
+#: keystone/token/providers/pki.py:53 keystone/token/providers/pkiz.py:31
msgid "Unable to sign token."
msgstr ""
-#: keystone/token/providers/fernet/core.py:215
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
+#: keystone/token/providers/fernet/token_formatters.py:102
+#, python-format
+msgid "This is not a recognized Fernet token %s"
msgstr ""
-#: keystone/token/providers/fernet/token_formatters.py:83
-msgid "This is not a recognized Fernet token"
+#: keystone/token/providers/fernet/token_formatters.py:198
+#, python-format
+msgid "This is not a recognized Fernet payload version: %s"
msgstr ""
-#: keystone/token/providers/fernet/token_formatters.py:246
+#: keystone/trust/controllers.py:107
#, python-format
-msgid "This is not a recognized Fernet payload version: %s"
+msgid "role %s is not defined"
msgstr ""
-#: keystone/trust/controllers.py:141
+#: keystone/trust/controllers.py:131
msgid "Redelegation allowed for delegated by trust only"
msgstr ""
-#: keystone/trust/controllers.py:174
+#: keystone/trust/controllers.py:164
msgid "The authenticated user should match the trustor."
msgstr ""
-#: keystone/trust/controllers.py:179
+#: keystone/trust/controllers.py:169
msgid "At least one role should be specified."
msgstr ""
-#: keystone/trust/core.py:61
+#: keystone/trust/core.py:58
#, python-format
msgid ""
"Remaining redelegation depth of %(redelegation_depth)d out of allowed "
"range of [0..%(max_count)d]"
msgstr ""
-#: keystone/trust/core.py:70
+#: keystone/trust/core.py:67
#, python-format
msgid ""
"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
"order to redelegate a trust"
msgstr ""
-#: keystone/trust/core.py:81
+#: keystone/trust/core.py:78
msgid "Requested expiration time is more than redelegated trust can provide"
msgstr ""
-#: keystone/trust/core.py:91
+#: keystone/trust/core.py:88
msgid "Some of requested roles are not in redelegated trust"
msgstr ""
-#: keystone/trust/core.py:120
+#: keystone/trust/core.py:112
msgid "One of the trust agents is disabled or deleted"
msgstr ""
-#: keystone/trust/core.py:139
+#: keystone/trust/core.py:131
msgid "remaining_uses must be a positive integer or null."
msgstr ""
-#: keystone/trust/core.py:145
+#: keystone/trust/core.py:137
#, python-format
msgid ""
"Requested redelegation depth of %(requested_count)d is greater than "
"allowed %(max_count)d"
msgstr ""
-#: keystone/trust/core.py:152
+#: keystone/trust/core.py:144
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr ""
-#: keystone/trust/core.py:162
+#: keystone/trust/core.py:154
msgid ""
"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting"
" this parameter is advised."
diff --git a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po
index d7739156..acf44efb 100644
--- a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-critical.po
@@ -3,23 +3,23 @@
# This file is distributed under the same license as the keystone project.
#
# Translators:
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Korean (South Korea)\n"
-"Language: ko-KR\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: ko-KR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Korean (South Korea)\n"
#, python-format
msgid "Unable to open template file %s"
-msgstr "템플리트 íŒŒì¼ %sì„(를) ì—´ 수 ì—†ìŒ"
+msgstr "템플릿 íŒŒì¼ %sì„(를) ì—´ 수 ì—†ìŒ"
diff --git a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-error.po
new file mode 100644
index 00000000..d531e9d5
--- /dev/null
+++ b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-error.po
@@ -0,0 +1,165 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-04-19 04:32+0000\n"
+"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
+"Language: ko-KR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Korean (South Korea)\n"
+
+#, python-format
+msgid ""
+"Asked to convert a non-domain project into a domain - Domain: %(domain_id)s, "
+"Project ID: %(id)s, Project Name: %(project_name)s"
+msgstr ""
+"비ë„ë©”ì¸ í”„ë¡œì íŠ¸ë¥¼ ë„ë©”ì¸ìœ¼ë¡œ 변환하ë„ë¡ ìš”ì²­ - ë„ë©”ì¸: %(domain_id)s, 프로"
+"ì íŠ¸ ID: %(id)s, 프로ì íŠ¸ ì´ë¦„: %(project_name)s"
+
+msgid "Cannot retrieve Authorization headers"
+msgstr "ì¸ì¦ í—¤ë”를 검색할 수 ì—†ìŒ"
+
+#, python-format
+msgid "Circular reference found role inference rules - %(prior_role_id)s."
+msgstr "순환 참조ì—ì„œ ì—­í•  추론 규칙 발견 - %(prior_role_id)s."
+
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in projects hierarchy - "
+"%(project_id)s."
+msgstr ""
+"프로ì íŠ¸ 계층 - %(project_id)sì—ì„œ 순환 참조 ë˜ëŠ” 반복 í•­ëª©ì„ ë°œê²¬í–ˆìŠµë‹ˆë‹¤."
+
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found in region tree - %(region_id)s."
+msgstr "지역 트리ì—ì„œ 순환 참조 ë˜ëŠ” 반복 í•­ëª©ì´ ë°œê²¬ë¨ - %(region_id)s."
+
+#, python-format
+msgid ""
+"Circular reference or a repeated entry found projects hierarchy - "
+"%(project_id)s."
+msgstr "순환 참조 ë˜ëŠ” 반복 항목ì—ì„œ 프로ì íŠ¸ ê³„ì¸µì„ ë°œê²¬ - %(project_id)s."
+
+#, python-format
+msgid "Command %(to_exec)s exited with %(retcode)s - %(output)s"
+msgstr "명령 %(to_exec)sì´(ê°€) 종료ë˜ê³  %(retcode)s - %(output)sì´(ê°€) 표시ë¨"
+
+#, python-format
+msgid "Could not bind to %(host)s:%(port)s"
+msgstr "%(host)s:%(port)sì— ë°”ì¸ë“œí•  수 ì—†ìŒ"
+
+#, python-format
+msgid ""
+"Either [fernet_tokens] key_repository does not exist or Keystone does not "
+"have sufficient permission to access it: %s"
+msgstr ""
+"[fernet_tokens] key_repositoryê°€ 없거나 Keystoneì—ì„œ 액세스할 ê¶Œí•œì´ ì¶©ë¶„í•˜"
+"지 ì•ŠìŒ: %s"
+
+msgid ""
+"Error setting up the debug environment. Verify that the option --debug-url "
+"has the format <host>:<port> and that a debugger processes is listening on "
+"that port."
+msgstr ""
+"디버그 í™˜ê²½ì„ ì„¤ì •í•˜ëŠ” ì¤‘ì— ì˜¤ë¥˜ê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤. --debug-url ì˜µì…˜ì— <host>:"
+"<port> 형ì‹ì´ 있으며 디버거 프로세스가 해당 í¬íŠ¸ì—ì„œ ì²­ì·¨ 중ì¸ì§€ 확ì¸í•˜ì‹­ì‹œ"
+"오."
+
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s%(output)s"
+msgstr "ì–´ì„¤ì…˜ì— ì„œëª…í•  ë•Œ 오류 ë°œìƒ, ì´ìœ : %(reason)s%(output)s"
+
+msgid "Failed to construct notifier"
+msgstr "알리미를 구성하는 ë° ì‹¤íŒ¨"
+
+msgid ""
+"Failed to create [fernet_tokens] key_repository: either it already exists or "
+"you don't have sufficient permissions to create it"
+msgstr ""
+"[fernet_tokens] key_repository ìƒì„± 실패: ì´ë¯¸ 있거나 ìƒì„±í•  ê¶Œí•œì´ ì¶©ë¶„í•˜ì§€ "
+"ì•ŠìŒ"
+
+msgid "Failed to create the default domain."
+msgstr "기본 ë„ë©”ì¸ì„ ìƒì„±í•˜ì§€ 못했습니다."
+
+#, python-format
+msgid "Failed to remove file %(file_path)r: %(error)s"
+msgstr "íŒŒì¼ %(file_path)rì„(를) 제거하는 ë° ì‹¤íŒ¨: %(error)s"
+
+#, python-format
+msgid "Failed to send %(action)s %(event_type)s notification"
+msgstr "%(action)s %(event_type)s ì•Œë¦¼ì„ ë³´ë‚´ëŠ” ë° ì‹¤íŒ¨"
+
+#, python-format
+msgid "Failed to send %(res_id)s %(event_type)s notification"
+msgstr "%(res_id)s %(event_type)s ì•Œë¦¼ì„ ë³´ë‚´ëŠ” ë° ì‹¤íŒ¨"
+
+msgid "Failed to validate token"
+msgstr "토í°ì„ 유효성 ê²€ì¦í•˜ì§€ 못했ìŒ"
+
+#, python-format
+msgid "Malformed endpoint %(url)s - unknown key %(keyerror)s"
+msgstr "형ì‹ì´ ìž˜ëª»ëœ ì—”ë“œí¬ì¸íŠ¸ %(url)s - ì•Œ 수 없는 키 %(keyerror)s"
+
+#, python-format
+msgid ""
+"Malformed endpoint %s - incomplete format (are you missing a type notifier ?)"
+msgstr ""
+"ìž˜ëª»ëœ í˜•ì‹ì˜ 엔드í¬ì¸íŠ¸ %s - 불완전한 형ì‹(유형 알리미가 누ë½ë˜ì—ˆìŠµë‹ˆê¹Œ?)"
+
+#, python-format
+msgid ""
+"Malformed endpoint '%(url)s'. The following type error occurred during "
+"string substitution: %(typeerror)s"
+msgstr ""
+"ìž˜ëª»ëœ í˜•ì‹ì˜ 엔드í¬ì¸íŠ¸ '%(url)s'입니다. 문ìžì—´ 대체 ì¤‘ì— ë‹¤ìŒ ìž…ë ¥ 오류 ë°œ"
+"ìƒ: %(typeerror)s"
+
+#, python-format
+msgid "Malformed endpoint - %(url)r is not a string"
+msgstr "ìž˜ëª»ëœ í˜•ì‹ì˜ 엔드í¬ì¸íŠ¸ - %(url)rì´(ê°€) 문ìžì—´ì´ 아님"
+
+#, python-format
+msgid ""
+"Reinitializing revocation list due to error in loading revocation list from "
+"backend. Expected `list` type got `%(type)s`. Old revocation list data: "
+"%(list)r"
+msgstr ""
+"백엔드ì—ì„œ 취소 목ë¡ì„ 로드하는 ì¤‘ì— ë°œìƒí•œ 오류로 ì¸í•´ 취소 목ë¡ì„ 다시 초기"
+"화합니다. 예ìƒë˜ëŠ”`list` ìœ í˜•ì´ `%(type)s`ì´(ê°€) ë˜ì—ˆìŠµë‹ˆë‹¤. ì´ì „ 취소 ëª©ë¡ "
+"ë°ì´í„°: %(list)r"
+
+msgid "Server error"
+msgstr "서버 오류"
+
+#, python-format
+msgid "Unable to convert Keystone user or group ID. Error: %s"
+msgstr "Keystone ì‚¬ìš©ìž ë˜ëŠ” 그룹 ID를 변환할 수 없습니다. 오류: %s"
+
+msgid "Unable to sign token"
+msgstr "토í°ì— 서명할 수 ì—†ìŒ"
+
+#, python-format
+msgid "Unexpected error or malformed token determining token expiry: %s"
+msgstr "í† í° ë§Œë£Œë¥¼ íŒë³„하는 ìž˜ëª»ëœ í˜•ì‹ì˜ í† í° ë˜ëŠ” 예ìƒì¹˜ 못한 오류: %s"
+
+#, python-format
+msgid ""
+"Unexpected results in response for domain config - %(count)s responses, "
+"first option is %(option)s, expected option %(expected)s"
+msgstr ""
+"ë„ë©”ì¸ êµ¬ì„±ì— ëŒ€í•œ ì‘ë‹µì˜ ì˜ˆê¸°ì¹˜ ì•Šì€ ê²°ê³¼ - %(count)s ì‘답, 첫 번째 옵션 "
+"%(option)s, ì˜ˆìƒ ì˜µì…˜ %(expected)s"
diff --git a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-info.po b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-info.po
new file mode 100644
index 00000000..1fb0edd5
--- /dev/null
+++ b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-info.po
@@ -0,0 +1,210 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Yongbok Kim <ruo91@yongbok.net>, 2015
+# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-04-19 04:30+0000\n"
+"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
+"Language: ko-KR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Korean (South Korea)\n"
+
+#, python-format
+msgid ""
+"\"expires_at\" has conflicting values %(existing)s and %(new)s. Will use "
+"the earliest value."
+msgstr ""
+"\"expires_at\"ì— ì¶©ëŒë˜ëŠ” ê°’ %(existing)s ë° %(new)sì´(ê°€) 있습니다. 가장 ì´"
+"른 ê°’ì„ ì‚¬ìš©í•©ë‹ˆë‹¤."
+
+#, python-format
+msgid "Adding proxy '%(proxy)s' to KVS %(name)s."
+msgstr "KVS %(name)sì— í”„ë¡ì‹œ '%(proxy)s'ì„(를) 추가합니다."
+
+#, python-format
+msgid "Cannot find client issuer in env by the issuer attribute - %s."
+msgstr ""
+"Issuer ì†ì„± - %sì„(를) 사용하여 환경ì—ì„œ í´ë¼ì´ì–¸íŠ¸ issuer를 ì°¾ì„ ìˆ˜ 없습니"
+"다."
+
+#, python-format
+msgid "Couldn't verify unknown bind: {%(bind_type)s: %(identifier)s}"
+msgstr "ì•Œ 수 없는 ë°”ì¸ë“œë¥¼ 확ì¸í•  수 ì—†ìŒ: {%(bind_type)s: %(identifier)s}"
+
+#, python-format
+msgid "Created %(interface)s endpoint %(url)s"
+msgstr "%(interface)s 엔드í¬ì¸íŠ¸ %(url)sì´(ê°€)ìƒì„±ë¨"
+
+#, python-format
+msgid "Created Region %s"
+msgstr "지역 %sì´(ê°€) ìƒì„±ë¨"
+
+#, python-format
+msgid "Created Role %s"
+msgstr "ì—­í•  %sì´(ê°€) ìƒì„±ë¨"
+
+#, python-format
+msgid "Created a new key: %s"
+msgstr "새로운 키 ìƒì„±: %s"
+
+#, python-format
+msgid "Created domain %s"
+msgstr "ë„ë©”ì¸ %sì´(ê°€) ìƒì„±ë¨"
+
+#, python-format
+msgid "Created project %s"
+msgstr "프로ì íŠ¸ %sì´(ê°€) ìƒì„±ë¨"
+
+#, python-format
+msgid "Created user %s"
+msgstr "ì‚¬ìš©ìž \"%s\"ì´(ê°€) ìƒì„±ë¨"
+
+#, python-format
+msgid "Creating the default role %s because it does not exist."
+msgstr "기본 ì—­í•  %sì´(ê°€) 없으므로 작성합니다."
+
+#, python-format
+msgid "Creating the default role %s failed because it was already created"
+msgstr "기본 ì—­í•  %sì´(ê°€) ì´ë¯¸ ìƒì„±ë˜ì—ˆìœ¼ë¯€ë¡œ ìž‘ì„±ì— ì‹¤íŒ¨"
+
+#, python-format
+msgid "Current primary key is: %s"
+msgstr "현재 기본 키: %s"
+
+#, python-format
+msgid "Domain %s already exists, skipping creation."
+msgstr "ë„ë©”ì¸ %sì´(ê°€) ì´ë¯¸ 있으므로, ìƒì„±ì„ 건너ëœë‹ˆë‹¤."
+
+#, python-format
+msgid "Excess key to purge: %s"
+msgstr "제거할 초과 키: %s"
+
+#, python-format
+msgid ""
+"Fernet token created with length of %d characters, which exceeds 255 "
+"characters"
+msgstr "길ì´ê°€ 255ìžë¥¼ 초과하는 %dìžë¡œ fernet 토í°ì´ ìƒì„±ë¨"
+
+#, python-format
+msgid "Granted %(role)s on %(project)s to user %(username)s."
+msgstr ""
+"%(project)sì— ëŒ€í•œ %(role)sì´(ê°€) ì‚¬ìš©ìž %(username)sì— ë¶€ì—¬ë˜ì—ˆìŠµë‹ˆë‹¤."
+
+#, python-format
+msgid "KVS region %s key_mangler disabled."
+msgstr "KVS 지역 %s key_manglerê°€ 사용ë˜ì§€ 않습니다."
+
+msgid "Kerberos bind authentication successful"
+msgstr "Kerberos ë°”ì¸ë“œ ì¸ì¦ 성공"
+
+msgid "Kerberos credentials do not match those in bind"
+msgstr "Kerberos ìžê²© ì¦ëª…ì´ ë°”ì¸ë“œì— 있는 ìžê²© ì¦ëª…ê³¼ ì¼ì¹˜í•˜ì§€ ì•ŠìŒ"
+
+msgid "Kerberos credentials required and not present"
+msgstr "Kerberos ìžê²© ì¦ëª…ì´ í•„ìš”í•˜ì§€ë§Œ ì—†ìŒ"
+
+msgid "Key repository is already initialized; aborting."
+msgstr "키 저장소가 ì´ë¯¸ 초기화ë˜ì—ˆìŠµë‹ˆë‹¤. 중단합니다."
+
+#, python-format
+msgid ""
+"Loaded %(count)d encryption keys (max_active_keys=%(max)d) from: %(dir)s"
+msgstr "%(dir)sì—ì„œ %(count)d 암호화 키(max_active_keys=%(max)d)를 로드함"
+
+#, python-format
+msgid "Named bind mode %s not in bind information"
+msgstr "ë°”ì¸ë“œ ì •ë³´ì— ì´ë¦„ ì§€ì •ëœ ë°”ì¸ë“œ 모드 %sì´(ê°€) ì—†ìŒ"
+
+#, python-format
+msgid "Next primary key will be: %s"
+msgstr "ë‹¤ìŒ ê¸°ë³¸ 키: %s"
+
+msgid "No bind information present in token"
+msgstr "토í°ì— ë°”ì¸ë“œ ì •ë³´ê°€ ì—†ìŒ"
+
+#, python-format
+msgid "Project %s already exists, skipping creation."
+msgstr "프로ì íŠ¸ %sì´(ê°€) ì´ë¯¸ 있으므로, ìƒì„±ì„ 건너ëœë‹ˆë‹¤."
+
+#, python-format
+msgid "Promoted key 0 to be the primary: %s"
+msgstr "ìŠ¹ê²©ëœ í‚¤ 0ì´ ê¸°ë³¸ì´ ë¨: %s"
+
+#, python-format
+msgid "Region %s exists, skipping creation."
+msgstr "지역 %sì´(ê°€) ì´ë¯¸ 있으므로, ìƒì„±ì„ 건너ëœë‹ˆë‹¤."
+
+#, python-format
+msgid "Role %s exists, skipping creation."
+msgstr "ì—­í•  %sì´(ê°€) ì´ë¯¸ 있으므로, ìƒì„±ì„ 건너ëœë‹ˆë‹¤."
+
+#, python-format
+msgid "Running command - %s"
+msgstr "%s - 명령 실행"
+
+#, python-format
+msgid "Scanning %r for domain config files"
+msgstr "%rì—ì„œ ë„ë©”ì¸ êµ¬ì„± íŒŒì¼ ìŠ¤ìº”"
+
+#, python-format
+msgid "Skipping %s endpoint as already created"
+msgstr "%s 엔드í¬ì¸íŠ¸ê°€ ì´ë¯¸ ìƒì„±ë˜ì—ˆìœ¼ë¯€ë¡œ 건너뜀"
+
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr "%(host)s:%(port)sì—ì„œ %(arg0)s 시작 중"
+
+#, python-format
+msgid "Starting key rotation with %(count)s key files: %(list)s"
+msgstr "%(count)s 키 파ì¼ë¡œ 키 순환 시작: %(list)s"
+
+#, python-format
+msgid ""
+"The client issuer %(client_issuer)s does not match with the trusted issuer "
+"%(trusted_issuer)s"
+msgstr ""
+"í´ë¼ì´ì–¸íŠ¸ issuer %(client_issuer)sì´(ê°€) 신뢰할 수 있는 issuer "
+"%(trusted_issuer)sê³¼(와) ì¼ì¹˜í•˜ì§€ ì•ŠìŒ"
+
+#, python-format
+msgid "Total expired tokens removed: %d"
+msgstr "ì œê±°ëœ ë§Œë£Œ í† í° ì´ê³„: %d"
+
+#, python-format
+msgid "User %(username)s already has %(role)s on %(project)s."
+msgstr ""
+"ì‚¬ìš©ìž %(username)sì´(ê°€) ì´ë¯¸ %(project)sì— ëŒ€í•œ %(role)sì´(ê°€) 있습니다."
+
+#, python-format
+msgid "User %s already exists, skipping creation."
+msgstr "ì‚¬ìš©ìž %sì´(ê°€) ì´ë¯¸ 있으므로, ìƒì„±ì„ 건너ëœë‹ˆë‹¤."
+
+#, python-format
+msgid "Using %(func)s as KVS region %(name)s key_mangler"
+msgstr "%(func)sì„(를) KVS region %(name)s key_mangler(으)ë¡œ 사용"
+
+#, python-format
+msgid ""
+"Using default keystone.common.kvs.sha1_mangle_key as KVS region %s "
+"key_mangler"
+msgstr ""
+"기본 keystone.common.kvs.sha1_mangle_keyì„(를) KVS 지역 %s key_mangler(으)ë¡œ "
+"사용"
+
+msgid ""
+"[fernet_tokens] key_repository does not appear to exist; attempting to "
+"create it"
+msgstr ""
+"[fernet_tokens] key_repositoryê°€ 없는 것으로 보입니다. ìƒì„±í•˜ë ¤ê³  ì‹œë„합니다."
diff --git a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-warning.po b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-warning.po
new file mode 100644
index 00000000..0a931724
--- /dev/null
+++ b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone-log-warning.po
@@ -0,0 +1,325 @@
+# Translations template for keystone.
+# Copyright (C) 2015 OpenStack Foundation
+# This file is distributed under the same license as the keystone project.
+#
+# Translators:
+# Yongbok Kim <ruo91@yongbok.net>, 2015
+# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-04-19 04:27+0000\n"
+"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
+"Language: ko-KR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Korean (South Korea)\n"
+
+#, python-format
+msgid "%s is not a dogpile.proxy.ProxyBackend"
+msgstr "%sì´(ê°€) dogpile.proxy.ProxyBackendê°€ 아님"
+
+msgid "'local conf' from PasteDeploy INI is being ignored."
+msgstr "PasteDeploy INIì˜ 'local conf'ê°€ 무시ë©ë‹ˆë‹¤."
+
+msgid ""
+"Auth context already exists in the request environment; it will be used for "
+"authorization instead of creating a new one."
+msgstr ""
+"요청 í™˜ê²½ì— ì¸ì¦ 컨í…스트가 ì´ë¯¸ 있습니다. 새로 ìƒì„±í•˜ì§€ ì•Šê³  ì´ ì¸ì¦ 컨í…스"
+"트를 ì¸ì¦ì— 사용합니다."
+
+#, python-format
+msgid "Authorization failed. %(exception)s from %(remote_addr)s"
+msgstr "%(remote_addr)s ì—ì„œ %(exception)s ì¸ì¦ì— 실패 하였습니다."
+
+msgid "Couldn't find the auth context."
+msgstr "ì¸ì¦ 컨í…스트를 ì°¾ì„ ìˆ˜ 없습니다."
+
+#, python-format
+msgid ""
+"Endpoint %(endpoint_id)s referenced in association for policy %(policy_id)s "
+"not found."
+msgstr ""
+"ì •ì±… %(policy_id)sì˜ ì—°ê´€ì—ì„œ 참조ë˜ëŠ” 엔드í¬ì¸íŠ¸ %(endpoint_id)sì„(를) ì°¾ì„ "
+"수 없습니다."
+
+msgid "Failed to invoke ``openssl version``, assuming is v1.0 or newer"
+msgstr "v1.0 ì´ìƒì´ë¼ê³  가정하여 ``openssl version``ì„ í˜¸ì¶œí•˜ëŠ” ë° ì‹¤íŒ¨"
+
+#, python-format
+msgid ""
+"Found multiple domains being mapped to a driver that does not support that "
+"(e.g. LDAP) - Domain ID: %(domain)s, Default Driver: %(driver)s"
+msgstr ""
+"여러 ë„ë©”ì¸ì´ ë“œë¼ì´ë²„ì— ë§µí•‘ë˜ì–´ 있ìŒì„ 발견했지만, ì´ ë“œë¼ì´ë²„ì—ì„œ ì´ ê¸°ëŠ¥"
+"ì„ ì§€ì›í•˜ì§€ ì•ŠìŒ(예: LDAP) - ë„ë©”ì¸ ID: %(domain)s, 기본 ë“œë¼ì´ë²„: %(driver)s"
+
+#, python-format
+msgid ""
+"Found what looks like an incorrectly constructed config option substitution "
+"reference - domain: %(domain)s, group: %(group)s, option: %(option)s, value: "
+"%(value)s."
+msgstr ""
+"잘못 êµ¬ì„±ëœ êµ¬ì„± 옵션 대체 참조 발견 - ë„ë©”ì¸: %(domain)s, 그룹: %(group)s, "
+"옵션: %(option)s, 값: %(value)s."
+
+#, python-format
+msgid ""
+"Found what looks like an unmatched config option substitution reference - "
+"domain: %(domain)s, group: %(group)s, option: %(option)s, value: %(value)s. "
+"Perhaps the config option to which it refers has yet to be added?"
+msgstr ""
+"ì¼ì¹˜í•˜ì§€ 않는 구성 옵션 대체 발견 - ë„ë©”ì¸: %(domain)s, 그룹: %(group)s, 옵"
+"ì…˜: %(option)s, ê°’: %(value)s. 참조하는 구성 ì˜µì…˜ì´ ì´ë¯¸ 추가ë˜ì—ˆì„ ê°€ëŠ¥ì„±ì´ "
+"있습니다."
+
+#, python-format
+msgid ""
+"ID attribute %(id_attr)s for LDAP object %(dn)s has multiple values and "
+"therefore cannot be used as an ID. Will get the ID from DN instead"
+msgstr ""
+"LDAP 오브ì íŠ¸ %(dn)sì˜ ID ì†ì„± %(id_attr)s ê°’ì´ ì—¬ëŸ¬ ê°œì´ë¯€ë¡œ, IDë¡œ 사용할 "
+"수 없습니다. 대신 DNì—ì„œ ID를 얻습니다."
+
+#, python-format
+msgid "Ignoring file (%s) while scanning domain config directory"
+msgstr "ë„ë©”ì¸ êµ¬ì„± 디렉토리를 스캔하는 ì¤‘ì— íŒŒì¼(%s) 무시"
+
+msgid "Ignoring user name"
+msgstr "ì‚¬ìš©ìž ì´ë¦„ 무시"
+
+#, python-format
+msgid ""
+"Invalid additional attribute mapping: \"%s\". Format must be "
+"<ldap_attribute>:<keystone_attribute>"
+msgstr ""
+"ìž˜ëª»ëœ ì¶”ê°€ ì†ì„± 맵핑:\" %s\". 형ì‹ì€ <ldap_attribute>:<keystone_attribute>"
+
+#, python-format
+msgid "Invalid domain name (%s) found in config file name"
+msgstr "설정 íŒŒì¼ ì´ë¦„ì— ìž˜ëª»ëœ ë„ë©”ì¸ ì´ë¦„(%s)ì„ ì°¾ì•˜ìŠµë‹ˆë‹¤."
+
+msgid ""
+"It is recommended to only use the base key-value-store implementation for "
+"the token driver for testing purposes. Please use 'memcache' or 'sql' "
+"instead."
+msgstr ""
+"테스트용으로만 í† í° ë“œë¼ì´ë²„ì˜ ê¸°ë³¸ key-value-store êµ¬í˜„ì„ ì‚¬ìš©í•˜ëŠ” ê²ƒì´ ì¢‹ìŠµ"
+"니다. 대신 'memcache' ë˜ëŠ” 'sql'ì„ ì‚¬ìš©í•˜ì‹­ì‹œì˜¤."
+
+#, python-format
+msgid "KVS lock released (timeout reached) for: %s"
+msgstr "%sì— ëŒ€í•œ KVS ìž ê¸ˆì´ í•´ì œë¨(ì œí•œì‹œê°„ì— ë„달)"
+
+msgid ""
+"LDAP Server does not support paging. Disable paging in keystone.conf to "
+"avoid this message."
+msgstr ""
+"LDAP 서버가 페ì´ì§•ì„ 지ì›í•˜ì§€ 않습니다. ì´ ë©”ì‹œì§€ë¥¼ 방지하려면 keystone.conf"
+"ì—ì„œ 페ì´ì§•ì„ 사용 안함으로 설정하십시오."
+
+msgid "No domain information specified as part of list request"
+msgstr "ëª©ë¡ ìš”ì²­ì˜ ì¼ë¶€ë¡œ ë„ë©”ì¸ ì •ë³´ê°€ 지정ë˜ì§€ ì•ŠìŒ"
+
+msgid ""
+"Not specifying a domain during a create user, group or project call, and "
+"relying on falling back to the default domain, is deprecated as of Liberty "
+"and will be removed in the N release. Specify the domain explicitly or use a "
+"domain-scoped token"
+msgstr ""
+"사용ìž, 그룹 ë˜ëŠ” 프로ì íŠ¸ 호출 ìƒì„± ì¤‘ì— ë„ë©”ì¸ì„ 지정하지 ì•Šê³ , 기본 ë„ë©”ì¸"
+"으로 다시 ëŒì•„가는 ê¸°ëŠ¥ì€ Libertyì—서는 ë” ì´ìƒ 사용ë˜ì§€ 않으므로 N 릴리스ì—"
+"ì„œ 제거ë©ë‹ˆë‹¤. ë„ë©”ì¸ì„ 명시ì ìœ¼ë¡œ 지정하거나 ë„ë©”ì¸ ë²”ìœ„ 토í°ì„ 사용하십시"
+"오."
+
+#, python-format
+msgid ""
+"Policy %(policy_id)s referenced in association for endpoint %(endpoint_id)s "
+"not found."
+msgstr ""
+"엔드í¬ì¸íŠ¸ %(endpoint_id)sì˜ ì—°ê´€ì—ì„œ 참조ë˜ëŠ” ì •ì±… %(policy_id)sì„(를) ì°¾ì„ "
+"수 없습니다."
+
+#, python-format
+msgid "Project %s does not exist and was not deleted."
+msgstr "프로ì íŠ¸ %sì´(ê°€) 없으므로 ì‚­ì œë˜ì§€ 않았습니다."
+
+msgid "RBAC: Bypassing authorization"
+msgstr "RBAC: 권한 무시"
+
+msgid "RBAC: Invalid token"
+msgstr "RBAC: 올바르지 ì•Šì€ í† í°"
+
+msgid "RBAC: Invalid user data in token"
+msgstr "RBAC: 토í°ì— ìž˜ëª»ëœ ì‚¬ìš©ìž ë°ì´í„°"
+
+#, python-format
+msgid ""
+"Removing `%s` from revocation list due to invalid expires data in revocation "
+"list."
+msgstr ""
+"유효하지 ì•Šì•„ 취소 목ë¡ì—ì„œ `%s`ì„(를) 제거하면 취소 목ë¡ì˜ ë°ì´í„°ê°€ 만료ë©ë‹ˆ"
+"다."
+
+msgid ""
+"The admin_token_auth middleware presents a security risk and should be "
+"removed from the [pipeline:api_v3], [pipeline:admin_api], and [pipeline:"
+"public_api] sections of your paste ini file."
+msgstr ""
+"admin_token_auth 미들웨어ì—서는 보안 ìœ„í—˜ì´ ì œê¸°ë˜ë¯€ë¡œ paste ini 파ì¼ì˜ "
+"[pipeline:api_v3], [pipeline:admin_api] ë° [pipeline:public_api] 섹션ì—ì„œ ì œ"
+"거해야 합니다."
+
+msgid ""
+"The default domain was created automatically to contain V2 resources. This "
+"is deprecated in the M release and will not be supported in the O release. "
+"Create the default domain manually or use the keystone-manage bootstrap "
+"command."
+msgstr ""
+"V2 ìžì›ì„ í¬í•¨í•˜ë„ë¡ ê¸°ë³¸ ë„ë©”ì¸ì´ ìžë™ìœ¼ë¡œ ìƒì„±ë˜ì—ˆìŠµë‹ˆë‹¤. ì´ ê¸°ëŠ¥ì€ M 릴리"
+"스ì—ì„œ ë” ì´ìƒ 사용ë˜ì§€ 않으며 O 릴리스ì—ì„œ 지ì›ë˜ì§€ 않습니다. 수ë™ìœ¼ë¡œ 기본 "
+"ë„ë©”ì¸ì„ ìƒì„±í•˜ê±°ë‚˜ keystone-manage 부트스트랩 ëª…ë ¹ì„ ì‚¬ìš©í•˜ì‹­ì‹œì˜¤."
+
+#, python-format
+msgid "Token `%s` is expired, not adding to the revocation list."
+msgstr "í† í° `%s`를 해지 목ë¡ì— 추가 하지 않으면 만료 ë©ë‹ˆë‹¤."
+
+#, python-format
+msgid "Truncating user password to %d characters."
+msgstr "ì‚¬ìš©ìž ë¹„ë°€ë²ˆí˜¸ë¥¼ %dìžë¡œ ìžë¦…니다."
+
+#, python-format
+msgid "Unable to add user %(user)s to %(tenant)s."
+msgstr "%(tenant)s ì— ì‚¬ìš©ìž %(user)s 를 추가 í•  수 없습니다."
+
+#, python-format
+msgid ""
+"Unable to change the ownership of [fernet_tokens] key_repository without a "
+"keystone user ID and keystone group ID both being provided: %s"
+msgstr ""
+"Keystone ì‚¬ìš©ìž ID와 keystone 그룹 IDê°€ ëª¨ë‘ ì œê³µë˜ì§€ 않으면 [fernet_tokens] "
+"key_repositoryì˜ ì†Œìœ ê¶Œì€ ë³€ê²½í•  수 ì—†ìŒ: %s"
+
+#, python-format
+msgid ""
+"Unable to change the ownership of the new key without a keystone user ID and "
+"keystone group ID both being provided: %s"
+msgstr ""
+"keystone ì‚¬ìš©ìž ID와 keystone 그룹 IDê°€ ëª¨ë‘ ì œê³µë˜ì§€ 않으면 새 í‚¤ì˜ ì†Œìœ ê¶Œ"
+"ì„ ë³€ê²½í•  수 ì—†ìŒ: %s"
+
+#, python-format
+msgid "Unable to locate domain config directory: %s"
+msgstr "%s: ë„ë©”ì¸ ì„¤ì • 디렉토리를 ì°¾ì„ ìˆ˜ 없습니다."
+
+#, python-format
+msgid "Unable to remove user %(user)s from %(tenant)s."
+msgstr "%(tenant)s ì—ì„œ %(user)s 를 제거 í•  수 없습니다."
+
+#, python-format
+msgid ""
+"Unsupported policy association found - Policy %(policy_id)s, Endpoint "
+"%(endpoint_id)s, Service %(service_id)s, Region %(region_id)s, "
+msgstr ""
+"지ì›ë˜ì§€ 않는 ì •ì±… ì—°ê´€ 발견 - ì •ì±… %(policy_id)s, 엔드í¬ì¸íŠ¸ "
+"%(endpoint_id)s, 서비스 %(service_id)s, 지역 %(region_id)s, "
+
+#, python-format
+msgid ""
+"User %(user_id)s doesn't have access to default project %(project_id)s. The "
+"token will be unscoped rather than scoped to the project."
+msgstr ""
+"ì‚¬ìš©ìž %(user_id)sì´(ê°€) 기본 프로ì íŠ¸ %(project_id)sì— ëŒ€í•œ 액세스 ê¶Œí•œì´ ì—†"
+"습니다. 토í°ì˜ 범위가 프로ì íŠ¸ë¡œ 지정ë˜ì§€ ì•Šê³  범위 ì§€ì •ì´ í•´ì œë©ë‹ˆë‹¤."
+
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s is disabled. The token "
+"will be unscoped rather than scoped to the project."
+msgstr ""
+"%(user_id)s 사용ìžì˜ 기본 프로ì íŠ¸ %(project_id)sì„(를) 사용하지 않습니다. 토"
+"í°ì˜ 범위가 프로ì íŠ¸ë¡œ 지정ë˜ì§€ ì•Šê³  범위 ì§€ì •ì´ í•´ì œë©ë‹ˆë‹¤."
+
+#, python-format
+msgid ""
+"User %(user_id)s's default project %(project_id)s not found. The token will "
+"be unscoped rather than scoped to the project."
+msgstr ""
+"ì‚¬ìš©ìž %(user_id)sì˜ ê¸°ë³¸ 프로ì íŠ¸ %(project_id)sì„(를) ì°¾ì„ ìˆ˜ 없습니다. 토"
+"í°ì˜ 범위가 프로ì íŠ¸ë¡œ 지정ë˜ì§€ ì•Šê³  범위 ì§€ì •ì´ í•´ì œë©ë‹ˆë‹¤."
+
+#, python-format
+msgid ""
+"When deleting entries for %(search_base)s, could not delete nonexistent "
+"entries %(entries)s%(dots)s"
+msgstr ""
+"%(search_base)sì˜ í•­ëª©ì„ ì‚­ì œí•  ë•Œ 존재하지 않는 항목 %(entries)s%(dots)sì„"
+"(를) 삭제할 수 ì—†ìŒ"
+
+#, python-format
+msgid "[fernet_tokens] key_repository is world readable: %s"
+msgstr "[fernet_tokens] key_repository는 ì½ì„ 수 있ìŒ: %s"
+
+msgid ""
+"[fernet_tokens] max_active_keys must be at least 1 to maintain a primary key."
+msgstr ""
+"기본 키를 유지 보수하려면 [fernet_tokens] max_active_keysê°€ 최소 1ì´ì–´ì•¼ 합니"
+"다."
+
+#, python-format
+msgid ""
+"`token_api.%s` is deprecated as of Juno in favor of utilizing methods on "
+"`token_provider_api` and may be removed in Kilo."
+msgstr ""
+"Junoì—서는 `token_provider_api`ì˜ ë©”ì†Œë“œë¥¼ 활용하기 위해 `token_api.%s`ì´"
+"(ê°€) ë” ì´ìƒ 사용ë˜ì§€ 않으므로 Kiloì—ì„œ ì œê±°ë  ìˆ˜ 있습니다."
+
+msgid ""
+"build_auth_context middleware checking for the admin token is deprecated as "
+"of the Mitaka release and will be removed in the O release. If your "
+"deployment requires use of the admin token, update keystone-paste.ini so "
+"that admin_token_auth is before build_auth_context in the paste pipelines, "
+"otherwise remove the admin_token_auth middleware from the paste pipelines."
+msgstr ""
+"build_auth_context 미들웨어ì—ì„œ 관리 토í°ì„ 확ì¸í•˜ëŠ” ê¸°ëŠ¥ì€ Mitaka 릴리스ì—"
+"ì„œ ë” ì´ìƒ 사용ë˜ì§€ 않으므로, O 릴리스ì—ì„œ 제거ë©ë‹ˆë‹¤. ë°°í¬ì—ì„œ 관리 토í°ì„ "
+"사용해야 하는 경우 붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ build_auth_context ì „ì— "
+"admin_token_authê°€ 오ë„ë¡ keystone-paste.ini를 ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤. 그렇지 않으"
+"ë©´ 붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ admin_token_auth 미들웨어를 제거하십시오."
+
+#, python-format
+msgid ""
+"delete_domain_assignments method not found in custom assignment driver. "
+"Domain assignments for domain (%s) to users from other domains will not be "
+"removed. This was added in V9 of the assignment driver."
+msgstr ""
+"ì‚¬ìš©ìž ì •ì˜ í• ë‹¹ ë“œë¼ì´ë²„ì—ì„œ delete_domain_assignments 메소드를 ì°¾ì„ ìˆ˜ 없습"
+"니다. 다른 ë„ë©”ì¸ì˜ 사용ìžì—게 할당한 ë„ë©”ì¸(%s)ì€ ì œê±°ë˜ì§€ 않습니다. ì´ ê¸°ëŠ¥"
+"ì€ í• ë‹¹ ë“œë¼ì´ë²„ì˜ V9ì—ì„œ 추가ë˜ì—ˆìŠµë‹ˆë‹¤."
+
+msgid ""
+"insecure_debug is enabled so responses may include sensitive information."
+msgstr "insecure_debugê°€ 사용ë˜ë¯€ë¡œ ì‘ë‹µì— ë¯¼ê°í•œ ì •ë³´ê°€ í¬í•¨ë  수 있습니다."
+
+msgid ""
+"keystone-manage pki_setup is deprecated as of Mitaka in favor of not using "
+"PKI tokens and may be removed in 'O' release."
+msgstr ""
+"Mitakaì—ì„œ PKI 토í°ì„ 사용하지 않기 위해 keystone-manage pki_setupì´ ë” ì´ìƒ "
+"사용ë˜ì§€ 않으므로, 'O' 릴리스ì—ì„œ 제거할 수 있습니다."
+
+msgid "keystone-manage pki_setup is not recommended for production use."
+msgstr "keystone-manage pki_setupì€ í”„ë¡œë•ì…˜ì—ì„œ 사용하지 않는 ê²ƒì´ ì¢‹ìŠµë‹ˆë‹¤.."
+
+msgid "keystone-manage ssl_setup is not recommended for production use."
+msgstr "keystone-manage ssl_setupì€ í”„ë¡œë•ì…˜ì—ì„œ 사용하지 않는 ê²ƒì´ ì¢‹ìŠµë‹ˆë‹¤."
+
+msgid "missing exception kwargs (programmer error)"
+msgstr "누ë½ëœ 예외 kwargs(프로그래머 오류)"
diff --git a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
index 123379ce..850b3e39 100644
--- a/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
@@ -1,31 +1,41 @@
-# Korean (South Korea) translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
# Sungjin Kang <potopro@gmail.com>, 2013
-# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Sungjin Kang <potopro@gmail.com>, 2013
+# Sungjin Kang <gang.sungjin@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: ko_KR\n"
-"Language-Team: Korean (South Korea)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-04-19 04:43+0000\n"
+"Last-Translator: SeYeon Lee <sy_lee@kr.ibm.com>\n"
+"Language: ko-KR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Korean (South Korea)\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)sì€(는) 지ì›ë˜ëŠ” ë“œë¼ì´ë²„ ë²„ì „ì´ ì•„ë‹˜"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr "%(entity)s ì´ë¦„ì—는 다ìŒê³¼ ê°™ì€ ì˜ˆì•½ 문ìžê°€ í¬í•¨ë  수 ì—†ìŒ: %(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
@@ -48,6 +58,10 @@ msgstr ""
"ì…˜ 저장소 경로가 %(path)sì— ì¡´ìž¬í•˜ì§€ 않거나 디렉토리가 아닙니다."
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)sì€(는) %(implied_role_id)sì„(를) ë‚´í¬í•˜ì§€ ì•ŠìŒ"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)sì€(는) %(min_length)sìž ë¯¸ë§Œì¼ ìˆ˜ 없습니다. "
@@ -60,6 +74,10 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)sì€(는) %(max_length)sìž ì´í•˜ì—¬ì•¼ 합니다. "
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)sì€(는) ë‚´í¬ëœ ì—­í• ì¼ ìˆ˜ ì—†ìŒ"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%sì€(는) ê³µë°±ì¼ ìˆ˜ 없습니다. "
@@ -75,9 +93,19 @@ msgstr "%s 필드가 필요하며 비어 ìžˆì„ ìˆ˜ ì—†ìŒ"
msgid "%s field(s) cannot be empty"
msgstr "%s 필드는 비어 ìžˆì„ ìˆ˜ ì—†ìŒ"
-msgid "(Disable debug mode to suppress these details.)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"Mitaka 릴리스ì—서는 ì½ê¸° ì „ìš© ID LDAP 액세스를 사용하기 위해 LDAP ID 백엔드"
+"ì˜ %sì´(ê°€) ë” ì´ìƒ 사용ë˜ì§€ 않으므로, \"O\" 릴리스ì—ì„œ 제거ë©ë‹ˆë‹¤."
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
msgstr ""
-"(ì´ëŸ¬í•œ ì„¸ë¶€ì‚¬í•­ì„ ì–µì œí•˜ë ¤ë©´ 디버그 모드를 사용 안함으로 설정하십시오.)"
+"(ì´ëŸ¬í•œ ì„¸ë¶€ì‚¬í•­ì„ ì–µì œí•˜ë ¤ë©´ insecure_debug 모드를 사용 안함으로 설정하십시"
+"오.)"
msgid "--all option cannot be mixed with other options"
msgstr "--all ì˜µì…˜ì€ ë‹¤ë¥¸ 옵션과 함께 사용할 수 ì—†ìŒ"
@@ -124,6 +152,16 @@ msgstr ""
msgid "At least one role should be specified."
msgstr "최소한 í•˜ë‚˜ì˜ ì—­í• ì„ ì§€ì •í•´ì•¼ 합니다."
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"ë“œë¼ì´ë²„ %sì„(를) ì°¾ì„ ìˆ˜ 없으므로 [identity]\\driver ì˜µì…˜ì„ ê¸°ë°˜ìœ¼ë¡œ 할당할 "
+"ë“œë¼ì´ë²„를 ìžë™ìœ¼ë¡œ ì„ íƒí•˜ëŠ” ë° ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. keystone 구성ì—ì„œ [assignment]/"
+"driver를 올바른 ë“œë¼ì´ë²„ë¡œ 설정하십시오."
+
msgid "Attempted to authenticate with an unsupported method."
msgstr "지ì›ë˜ì§€ 않는 방법으로 ì¸ì¦ì„ ì‹œë„했습니다."
@@ -136,6 +174,14 @@ msgstr ""
msgid "Authentication plugin error."
msgstr "ì¸ì¦ í”ŒëŸ¬ê·¸ì¸ ì˜¤ë¥˜."
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"백엔드 `%(backend)s`ì´(ê°€) 올바른 memcached 백엔드가 아닙니다. 올바른 백엔"
+"드: %(backend_list)s"
+
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr "ìœ„ìž„ì„ í†µí•´ ë°œí–‰ëœ í† í°ìœ¼ë¡œ 요청 토í°ì— ê¶Œí•œì„ ë¶€ì—¬í•  수 없습니다."
@@ -146,9 +192,6 @@ msgstr "%(option_name)s %(attr)sì„(를) 변경할 수 ì—†ìŒ"
msgid "Cannot change Domain ID"
msgstr "ë„ë©”ì¸ ID를 변경할 수 ì—†ìŒ"
-msgid "Cannot change consumer secret"
-msgstr "ì´ìš©ìž 본ì¸í™•ì¸ì •ë³´ë¥¼ 변경할 수 ì—†ìŒ"
-
msgid "Cannot change user ID"
msgstr "ì‚¬ìš©ìž ID를 변경할 수 ì—†ìŒ"
@@ -156,12 +199,67 @@ msgid "Cannot change user name"
msgstr "ì‚¬ìš©ìž ì´ë¦„ì„ ë³€ê²½í•  수 ì—†ìŒ"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "올바르지 ì•Šì€ URLì„ ì‚¬ìš©í•˜ì—¬ 엔드í¬ì¸íŠ¸ë¥¼ 작성할 수 ì—†ìŒ: %(url)s"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr "ìƒìœ„ë¡œ 프로ì íŠ¸ë¥¼ 작성할 수 ì—†ìŒ: %(project_id)s"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "%s ì´ë¦„ì„ ë³µì œí•  수 없습니다."
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"소유ìžë¥¼ ë„ë©”ì¸ %(domain_id)s(으)ë¡œ 지정하지만 다른 ë„ë©”ì¸ "
+"(%(parent_domain_id)s)ì˜ ìƒìœ„를 지정하므로 프로ì íŠ¸ë¥¼ ìƒì„±í•  수 없습니다."
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"ìƒìœ„(%(domain_id)s)ê°€ ë„ë©”ì¸ ì—­í• ì„ ìˆ˜í–‰í•˜ì§€ë§Œ 프로ì íŠ¸ 지정 "
+"parent_id(%(parent_id)s)ê°€ ì´ domain_id와 ì¼ì¹˜í•˜ì§€ 않으므로 프로ì íŠ¸ë¥¼ ìƒì„±"
+"할 수 없습니다."
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+"사용으로 ì„¤ì •ëœ ë„ë©”ì¸ì„ 삭제할 수 없습니다. 먼저 해당 ë„ë©”ì¸ì„ 사용 안함으"
+"로 설정하십시오."
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"ì„œë¸ŒíŠ¸ë¦¬ì— ì‚¬ìš© ì„¤ì •ëœ í”„ë¡œì íŠ¸ê°€ 있으므로 프로ì íŠ¸ %(project_id)sì„(를) ì‚­ì œ"
+"할 수 없습니다."
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"계층 êµ¬ì¡°ì˜ ë¦¬í”„ê°€ 아니므로 프로ì íŠ¸ %sì„(를) 삭제할 수 없습니다. ì „ì²´ 하위 "
+"트리를 삭제하려면 ê³„ë‹¨ì‹ ì˜µì…˜ì„ ì‚¬ìš©í•˜ì‹­ì‹œì˜¤."
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"ì„œë¸ŒíŠ¸ë¦¬ì— ì‚¬ìš© ì„¤ì •ëœ í”„ë¡œì íŠ¸ê°€ 있으므로 프로ì íŠ¸ %(project_id)sì„(를) 사"
+"용 안함으로 설정할 수 없습니다."
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr "프로ì íŠ¸ %sì— ì‚¬ìš© 안함으로 ì„¤ì •ëœ ìƒìœ„ê°€ 있어서 ì´ë¥¼ 사용할 수 ì—†ìŒ"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"그룹ì—ì„œ 소스가 공급ë˜ê³  ì‚¬ìš©ìž ID별로 í•„í„°ë§ëœ í• ë‹¹ì„ ë‚˜ì—´í•  수 없습니다."
msgid "Cannot list request tokens with a token issued via delegation."
msgstr "ìœ„ìž„ì„ í†µí•´ ë°œí–‰ëœ í† í°ìœ¼ë¡œ 요청 토í°ì„ 나열할 수 없습니다."
@@ -180,6 +278,9 @@ msgid ""
msgstr ""
"ìžì‹  ë’¤ì˜ ì²« 번째 매개변수와 ê°™ì€ ížŒíŠ¸ ëª©ë¡ ì—†ì´ ë“œë¼ì´ë²„ í˜¸ì¶œì„ ìžë¥¼ 수 ì—†ìŒ"
+msgid "Cannot update domain_id of a project that has children."
+msgstr "하위가 있는 프로ì íŠ¸ì˜ domain_id를 ì—…ë°ì´íŠ¸í•  수 없습니다."
+
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
msgstr ""
@@ -190,6 +291,9 @@ msgid ""
msgstr ""
"subtree_as_list ë° subtree_as_ids 조회 매개변수를 ë™ì‹œì— 사용할 수 없습니다."
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "ì‚¬ìš©ëœ ì†ì„±ì—만 ê³„ë‹¨ì‹ ì—…ë°ì´íŠ¸ê°€ 허용ë©ë‹ˆë‹¤."
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr "ê²°í•©ì— íš¨ìœ¨ì ì¸ 그룹 필터는 í•­ìƒ ë¹ˆ 목ë¡ì„ ìƒì„±í•©ë‹ˆë‹¤."
@@ -200,6 +304,10 @@ msgid ""
msgstr "ê²°í•©ì— íš¨ìœ¨ì ì¸ ë„ë©”ì¸ê³¼ ìƒì† 필터는 í•­ìƒ ë¹ˆ 목ë¡ì„ ìƒì„±í•©ë‹ˆë‹¤."
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "/domains/%s/configì˜ êµ¬ì„± API 엔티티"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr "%(type)sì„(를) 저장하는 ì¤‘ì— ì¶©ëŒì´ ë°œìƒí•¨ - %(details)s"
@@ -217,6 +325,14 @@ msgstr "%(target)s 대ìƒì—ì„œ 불변 ì†ì„± '%(attributes)s'ì„(를) 변경할
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"ID ì œê³µìž ID를 íŒë³„í•  수 없습니다. 구성 옵션 %(issuer_attribute)sì´(ê°€) 요청 "
+"í™˜ê²½ì— ì—†ìŠµë‹ˆë‹¤. "
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
@@ -280,9 +396,6 @@ msgstr "%(project_id)s 프로ì íŠ¸ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
msgid "Could not find region: %(region_id)s"
msgstr "%(region_id)s ë¦¬ì ¼ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
-msgid "Could not find role"
-msgstr "ì—­í• ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -319,15 +432,45 @@ msgstr "%(version)s ë²„ì „ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
msgid "Could not find: %(target)s"
msgstr "%(target)sì„(를) ì°¾ì„ ìˆ˜ ì—†ìŒ"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"ì—°í•© ì‚¬ìš©ìž íŠ¹ì„±ì„ ID ê°’ì— ë§µí•‘í•  수 없습니다. 추가 세부 ì‚¬í•­ì€ ì‚¬ìš©ëœ ë§µí•‘ "
+"ë˜ëŠ” 디버그 로그를 확ì¸í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"ìž„ì‹œ ì‚¬ìš©ìž ID를 설정하는 ì¤‘ì— ì‚¬ìš©ìžë¥¼ 맵핑할 수 없습니다. 맵핑 ê·œì¹™ì´ ì‚¬ìš©"
+"ìž ID/ì´ë¦„ì„ ì§€ì •í•´ì•¼ 하거나 REMOTE_USER 환경 변수를 설정해야 합니다. "
+
msgid "Could not validate the access token"
msgstr "액세스 토í°ì„ 유효성 ê²€ì¦í•  수 ì—†ìŒ"
msgid "Credential belongs to another user"
msgstr "ì‹ ìž„ ì •ë³´ê°€ 다른 사용ìžì— ì†í•¨"
+msgid "Credential signature mismatch"
+msgstr "ìžê²© ì¦ëª… 서명 불ì¼ì¹˜"
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "/domains/%s/configì˜ ë°ì´í„°ë² ì´ìŠ¤"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"Libertyì—ì„œ %(namespace)rì˜ ìž…ë ¥ì ì„ 사용하기 위해 ì¸ì¦ í”ŒëŸ¬ê·¸ì¸ %(name)rì˜ "
+"ì§ì ‘ 가져오기는 ë” ì´ìƒ 사용ë˜ì§€ 않으므로, Nì—ì„œ ì œê±°ë  ìˆ˜ 있습니다."
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"Libertyì—ì„œ %(namespace)rì˜ ìž…ë ¥ì ì„ 사용하기 위해 ë“œë¼ì´ë²„ %(name)rì˜ ì§ì ‘ "
+"가져오기는 ë” ì´ìƒ 사용ë˜ì§€ 않으므로, Nì—ì„œ ì œê±°ë  ìˆ˜ 있습니다."
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -349,12 +492,15 @@ msgstr "ë„ë©”ì¸ IDê°€ %sì¼ ìˆ˜ ì—†ìŒ"
msgid "Domain is disabled: %s"
msgstr "ë„ë©”ì¸ì„ 사용 안함: %s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "ë„ë©”ì¸ ë©”íƒ€ë°ì´í„°ê°€ LDAPì— ì˜í•´ 지ì›ë˜ì§€ ì•ŠìŒ"
+msgid "Domain name cannot contain reserved characters."
+msgstr "ë„ë©”ì¸ ì´ë¦„ì—는 ì˜ˆì•½ëœ ë¬¸ìžê°€ í¬í•¨ë  수 없습니다."
msgid "Domain scoped token is not supported"
msgstr "ë„ë©”ì¸ ë²”ìœ„ 지정 토í°ì€ 지ì›ë˜ì§€ ì•ŠìŒ"
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr "V8 ì—­í•  ë“œë¼ì´ë²„ì—서는 ë„ë©”ì¸ íŠ¹ì • ì—­í• ì´ ì§€ì›ë˜ì§€ ì•ŠìŒ"
+
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
@@ -363,9 +509,6 @@ msgstr ""
"%(domain)s ë„ë©”ì¸ì— ì´ë¯¸ ì •ì˜ëœ êµ¬ì„±ì´ ìžˆìŒ - ë‹¤ìŒ íŒŒì¼ì„ 무시하십시오. "
"%(file)s."
-msgid "Domains are read-only against LDAP"
-msgstr "LDAPì— ëŒ€í•œ ë„ë©”ì¸ì´ ì½ê¸° 전용입니다."
-
msgid "Duplicate Entry"
msgstr "중복 항목"
@@ -374,9 +517,27 @@ msgid "Duplicate ID, %s."
msgstr "중복 ID, %s."
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "ì¤‘ë³µëœ í•­ëª©: %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "중복 ì´ë¦„, %s."
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "ì¤‘ë³µëœ ì›ê²© ID: %s"
+
+msgid "EC2 access key not found."
+msgstr "EC2 ì ‘ê·¼ 키를 ì°¾ì„ ìˆ˜ 없습니다."
+
+msgid "EC2 signature not supplied."
+msgstr "EC2 ì„œëª…ì´ ì œê³µë˜ì§€ 않았습니다."
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr "--bootstrap-password ì¸ìˆ˜ë‚˜ OS_BOOTSTRAP_PASSWORD를 설정해야 합니다."
+
msgid "Enabled field must be a boolean"
msgstr "사용으로 ì„¤ì •ëœ í•„ë“œëŠ” 부울ì´ì–´ì•¼ 함"
@@ -404,10 +565,31 @@ msgstr ""
"%(file)s."
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "íŒŒì¼ %(path)s 여는 중 오류 ë°œìƒ: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "í–‰: '%(line)s' 구문 ë¶„ì„ ì¤‘ 오류 ë°œìƒ: %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "규칙 %(path)s 구문 ë¶„ì„ ì¤‘ 오류 ë°œìƒ: %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "메타ë°ì´í„° 파ì¼ì„ ì½ëŠ” ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ, %(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"SQL ë“œë¼ì´ë²„를 사용하기 위해 ë„ë©”ì¸ %(domain)sì„(를) 등ë¡í•˜ëŠ” ì‹œë„ê°€ 초과ë˜ì—ˆ"
+"습니다. ë“œë¼ì´ë²„를 보유한 것으로 ë³´ì´ëŠ” 마지막 ë„ë©”ì¸ì€ %(last_domain)s입니"
+"다. í¬ê¸°í•˜ëŠ” 중"
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "예ìƒëœ 사전 ë˜ëŠ” 목ë¡: %s"
@@ -450,6 +632,10 @@ msgstr ""
"올바르지 ì•Šì€ í† í°ì´ 있습니다. 프로ì íŠ¸ì™€ ë„ë©”ì¸ ë‘˜ 다 ë²”ìœ„ì— í¬í•¨ë©ë‹ˆë‹¤."
#, python-format
+msgid "Group %s not found in config"
+msgstr "êµ¬ì„±ì— ê·¸ë£¹ %sì„(를) ì°¾ì„ ìˆ˜ ì—†ìŒ"
+
+#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr "ë„ë©”ì¸ íŠ¹ì • êµ¬ì„±ì— ëŒ€í•´ %(group)s ê·¸ë£¹ì´ ì§€ì›ë˜ì§€ ì•ŠìŒ"
@@ -482,6 +668,9 @@ msgid ""
"identifiers."
msgstr "ìŠ¹ì¸ IDì— ìˆ˜ì‹  ID 제공ìžê°€ í¬í•¨ë˜ì§€ 않습니다."
+msgid "Invalid EC2 signature."
+msgstr "올바르지 ì•Šì€ EC2 서명입니다."
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
@@ -550,17 +739,12 @@ msgstr ""
msgid "Invalid signature"
msgstr "올바르지 ì•Šì€ ì„œëª…"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"%sì˜ ssl_cert_reqs ê°’ì´ ì˜¬ë°”ë¥´ì§€ ì•ŠìŒ, \"NONE\", \"OPTIONAL\", \"REQUIRED\" "
-"중 하나여야 함 "
-
msgid "Invalid user / password"
msgstr "올바르지 ì•Šì€ ì‚¬ìš©ìž / 비밀번호"
+msgid "Invalid username or TOTP passcode"
+msgstr "올바르지 ì•Šì€ ì‚¬ìš©ìž ì´ë¦„ ë˜ëŠ” TOTP 비밀번호"
+
msgid "Invalid username or password"
msgstr "올바르지 ì•Šì€ ì‚¬ìš©ìž ì´ë¦„ ë˜ëŠ” 비밀번호"
@@ -584,6 +768,18 @@ msgstr "LDAP %s 삭제"
msgid "LDAP %s update"
msgstr "LDAP %s ì—…ë°ì´íŠ¸"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr "변환 가능한 ìžì› idì˜ ê¸¸ì´ê°€ 최대 허용 문ìžì¸ 64보다 í¼"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"맵핑 %(mapping_id)sì˜ ë¡œì»¬ 섹션ì—ì„œ 존재하지 않는 ì›ê²© ì¼ì¹˜ë¥¼ 참조합니다(예: "
+"로컬 ì„¹ì…˜ì˜ {0})."
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr "키 %(target)sì— ëŒ€í•´ 잠금 제한시간 초과가 ë°œìƒí•¨"
@@ -602,6 +798,10 @@ msgid "Marker could not be found"
msgstr "마커를 ì°¾ì„ ìˆ˜ ì—†ìŒ"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "%s ë¶„ê¸°ì— ëŒ€í•œ 최대 계층 깊ì´ì— ë„달했습니다."
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr "%sì—ì„œ 최대 잠금 ì‹œë„ê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤."
@@ -635,6 +835,9 @@ msgstr "ë„ë©”ì¸ í”„ë¡œì íŠ¸ 중 하나를 지정해야 함"
msgid "Name field is required and cannot be empty"
msgstr "ì´ë¦„ 필드가 필요하며 비어 ìžˆì„ ìˆ˜ ì—†ìŒ"
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr "프로ì íŠ¸ ë„ë©”ì¸ ID와 프로ì íŠ¸ ë„ë©”ì¸ ì´ë¦„ì´ ì œê³µë˜ì§€ 않았습니다. "
+
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
@@ -663,6 +866,9 @@ msgstr "엔드í¬ì¸íŠ¸ %(endpoint_id)sê³¼(와) ì—°ê´€ëœ ì •ì±…ì´ ì—†ìŠµë‹ˆë‹¤
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "신뢰 %(trust_id)sì— ëŒ€í•´ 남아 있는 ì‚¬ìš©ì´ ì—†ìŒ"
+msgid "No token in the request"
+msgstr "ìš”ì²­ì— í† í°ì´ ì—†ìŒ"
+
msgid "Non-default domain is not supported"
msgstr "ê¸°ë³¸ì´ ì•„ë‹Œ ë„ë©”ì¸ì€ 지ì›ë˜ì§€ ì•ŠìŒ"
@@ -688,9 +894,27 @@ msgid "Project (%s)"
msgstr "프로ì íŠ¸(%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "프로ì íŠ¸ ID를 ì°¾ì„ ìˆ˜ ì—†ìŒ: %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "프로ì íŠ¸ 필드는 필수ì´ë¯€ë¡œ 비어 있어서는 안 ë©ë‹ˆë‹¤. "
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "프로ì íŠ¸ë¥¼ 사용 안함: %s"
+msgid "Project name cannot contain reserved characters."
+msgstr "프로ì íŠ¸ ì´ë¦„ì— ì˜ˆì•½ëœ ë¬¸ìžê°€ í¬í•¨ë  수 없습니다."
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "조회 문ìžì—´ì´ UTF-8ë¡œ ì¸ì½”딩ë˜ì–´ 있지 ì•ŠìŒ"
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr "그룹 %(group)sì—ì„œ 옵션 %(option)sì˜ ê¸°ë³¸ê°’ ì½ê¸°ëŠ” 지ì›ë˜ì§€ ì•ŠìŒ"
+
msgid "Redelegation allowed for delegated by trust only"
msgstr "신뢰ì—ì„œ 위임한 경우ì—만 재위임 허용"
@@ -702,6 +926,72 @@ msgstr ""
"%(redelegation_depth)dì˜ ë‚˜ë¨¸ì§€ 재위임 깊ì´ê°€ 허용 범위 [0..%(max_count)d]ì„"
"(를) 벗어남"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ admin_crud_extensionì„ ì œê±°í•˜ì‹­ì‹œì˜¤. admin_crud 확장"
+"ì€ ì´ì œ í•­ìƒ ì‚¬ìš©í•  수 있습니다. O 릴리스ì—서는 제거ë˜ë¯€ë¡œ keystone-paste.ini"
+"ì—ì„œ [pipeline:admin_api] ì„¹ì…˜ì„ ì ì ˆí•˜ê²Œ ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ endpoint_filter_extensionì„ ì œê±°í•˜ì‹­ì‹œì˜¤. 엔드í¬ì¸íŠ¸ "
+"í•„í„° í™•ìž¥ì€ ì´ì œ í•­ìƒ ì‚¬ìš©í•  수 있습니다. O 릴리스ì—서는 제거ë˜ë¯€ë¡œ keystone-"
+"paste.iniì—ì„œ [pipeline:api_v3] ì„¹ì…˜ì„ ì ì ˆí•˜ê²Œ ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ federation_extensionì„ ì œê±°í•˜ì‹­ì‹œì˜¤. ì—°í•© í™•ìž¥ì€ ì´"
+"ì œ í•­ìƒ ì‚¬ìš©í•  수 있습니다. O 릴리스ì—서는 제거ë˜ë¯€ë¡œ keystone-paste.iniì—ì„œ "
+"[pipeline:api_v3]ì„¹ì…˜ì„ ì ì ˆí•˜ê²Œ ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ oauth1_extensionì„ ì œê±°í•˜ì‹­ì‹œì˜¤. oauth1 í™•ìž¥ì€ ì´ì œ "
+"í•­ìƒ ì‚¬ìš©í•  수 있습니다. O 릴리스ì—서는 제거ë˜ë¯€ë¡œ keystone-paste.iniì—ì„œ "
+"[pipeline:api_v3]ì„¹ì…˜ì„ ì ì ˆí•˜ê²Œ ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ revoke_extensionì„ ì œê±°í•˜ì‹­ì‹œì˜¤. 취소 í™•ìž¥ì€ ì´ì œ í•­"
+"ìƒ ì‚¬ìš©í•  수 있습니다. O 릴리스ì—서는 제거ë˜ë¯€ë¡œ keystone-paste.iniì—ì„œ "
+"[pipeline:api_v3]ì„¹ì…˜ì„ ì ì ˆí•˜ê²Œ ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ simple_cert를 제거하십시오. PKI ë° PKIz í† í° ì œê³µìž"
+"는 ì´ì œ ë” ì´ìƒ 사용ë˜ì§€ 않으며 simple_cert는 ì´ëŸ¬í•œ í† í° ì œê³µìžë¥¼ 지ì›í•˜ëŠ” "
+"ë°ë§Œ 사용ë©ë‹ˆë‹¤. O 릴리스ì—서는 제거ë˜ë¯€ë¡œ keystone-paste.iniì—ì„œ [pipeline:"
+"api_v3]ì„¹ì…˜ì„ ì ì ˆí•˜ê²Œ ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"붙여넣기 파ì´í”„ë¼ì¸ì—ì„œ user_crud_extensionì„ ì œê±°í•˜ì‹­ì‹œì˜¤. user_crud í™•ìž¥ì€ "
+"ì´ì œ í•­ìƒ ì‚¬ìš©í•  수 있습니다. O 릴리스ì—서는 제거ë˜ë¯€ë¡œ keystone-paste.iniì—"
+"ì„œ [pipeline:public_api] ì„¹ì…˜ì„ ì ì ˆí•˜ê²Œ ì—…ë°ì´íŠ¸í•˜ì‹­ì‹œì˜¤."
+
msgid "Request Token does not have an authorizing user id"
msgstr "요청 토í°ì— ì¸ì¦í•˜ëŠ” ì‚¬ìš©ìž IDê°€ ì—†ìŒ"
@@ -732,19 +1022,15 @@ msgid ""
"Requested redelegation depth of %(requested_count)d is greater than allowed "
"%(max_count)d"
msgstr ""
-"%(requested_count)dì˜ ìš”ì²­ëœ ìž¬ìœ„ìž„ 깊ì´ê°€ 허용ë˜ëŠ” %(max_count)d보다 깊ìŒ"
-
-#, python-format
-msgid "Role %s not found"
-msgstr "%s ì—­í• ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
+"%(requested_count)dì˜ ìš”ì²­ëœ ìž¬ìœ„ìž„ depthê°€ 허용ë˜ëŠ” %(max_count)d보다 깊ìŒ"
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
"be removed in the \"M\"-Release."
msgstr ""
-"eventletì„ í†µí•œ 키스톤 ì‹¤í–‰ì€ WSGI 서버 ì‹¤í–‰ì˜ í”Œë ˆì´ë²„ì— ìžˆëŠ” Kilo부터 ë” "
-"ì´ìƒ 사용ë˜ì§€ 않습니다(예: mod_wsgi). eventlet ì•„ëž˜ì˜ í‚¤ìŠ¤í†¤ì— ëŒ€í•œ 지ì›ì€ "
+"Eventletì„ í†µí•œ 키스톤 ì‹¤í–‰ì€ WSGI 서버 ì‹¤í–‰ì˜ í”Œë ˆì´ë²„ì— ìžˆëŠ” Kilo부터 ë” "
+"ì´ìƒ 사용ë˜ì§€ 않습니다(예: mod_wsgi). Eventlet ì•„ëž˜ì˜ í‚¤ìŠ¤í†¤ì— ëŒ€í•œ 지ì›ì€ "
"\"M\"-릴리스ì—ì„œ 제거ë©ë‹ˆë‹¤."
msgid "Scoping to both domain and project is not allowed"
@@ -783,6 +1069,27 @@ msgstr ""
"문ìžì—´ ê¸¸ì´ ì œí•œì„ ì´ˆê³¼í•©ë‹ˆë‹¤. '%(string)s' 문ìžì—´ 길ì´ê°€ ì—´ì˜ í•œë„ "
"%(type)s(CHAR(%(length)d))ì„(를) 초과합니다."
+msgid "Tenant name cannot contain reserved characters."
+msgstr "Tenant ì´ë¦„ì— ì˜ˆì•½ëœ ë¬¸ìžê°€ í¬í•¨ë  수 없습니다."
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"%s í™•ìž¥ì´ keystone ì½”ì–´ì— ì´ë™ë˜ì—ˆìœ¼ë¯€ë¡œ 기본 keystone ë°ì´í„°ë² ì´ìŠ¤ 제어ì—ì„œ "
+"마ì´ê·¸ë ˆì´ì…˜ì„ 유지 관리합니다. keystone-manage db_sync ëª…ë ¹ì„ ì‚¬ìš©í•˜ì‹­ì‹œì˜¤."
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"'expires_at'ì€ ì§€ê¸ˆë³´ë‹¤ ì´ì „ì´ì–´ì„œëŠ” 안 ë©ë‹ˆë‹¤. 형ì‹ì´ 잘못ë˜ì—ˆê±°ë‚˜ 올바르지 "
+"않기 ë•Œë¬¸ì— ì„œë²„ê°€ ìš”ì²­ì„ ì¤€ìˆ˜í•  수 없습니다. í´ë¼ì´ì–¸íŠ¸ëŠ” 오류 ìƒíƒœë¡œ 간주ë©"
+"니다."
+
msgid "The --all option cannot be used with the --domain-name option"
msgstr "--all ì˜µì…˜ì€ --domain-name 옵션과 함께 사용할 수 없습니다."
@@ -812,6 +1119,12 @@ msgstr ""
"요청한 ì¸ì¦ì„œë¥¼ 사용할 수 없습니다. 서버가 PKI 토í°ì„ 사용하지 않거나 ìž˜ëª»ëœ "
"êµ¬ì„±ì˜ ê²°ê³¼ë¡œ ì¸í•´ ë°œìƒí–ˆì„ 수 있습니다."
+msgid "The configured token provider does not support bind authentication."
+msgstr "êµ¬ì„±ëœ í† í° ì œê³µìžê°€ ë°”ì¸ë“œ ì¸ì¦ì„ 지ì›í•˜ì§€ 않습니다. "
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr "ë„ë©”ì¸ ì—­í• ì„ ìˆ˜í–‰í•˜ëŠ” 프로ì íŠ¸ ìƒì„±ì€ v2ì—ì„œ 허용ë˜ì§€ 않습니다. "
+
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server could "
@@ -855,12 +1168,9 @@ msgstr "non-oauth 매개변수가 없어야 함"
msgid "This is not a recognized Fernet payload version: %s"
msgstr "ì¸ì‹ë˜ëŠ” Fernet 페ì´ë¡œë“œ ë²„ì „ì´ ì•„ë‹˜: %s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"ì´ëŠ” v2.0 Fernet 토í°ì´ 아닙니다. 신뢰, ë„ë©”ì¸ ë˜ëŠ” ì—°í•© 토í°ì˜ 경우 v3를 사"
-"용하십시오."
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "ì¸ì‹ë˜ëŠ” Fernet í† í° %sì´(ê°€) 아님"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
@@ -885,6 +1195,9 @@ msgstr "토í°ì´ 다른 사용ìžì— ì†í•¨"
msgid "Token does not belong to specified tenant."
msgstr "토í°ì´ ì§€ì •ëœ í…Œë„ŒíŠ¸ì— ì†í•˜ì§€ 않습니다."
+msgid "Token version is unrecognizable or unsupported."
+msgstr "í† í° ë²„ì „ì´ ì¸ì‹ë˜ì§€ 않거나 지ì›ë˜ì§€ 않습니다. "
+
msgid "Trustee has no delegated roles."
msgstr "Trusteeì— ìœ„ìž„ëœ ì—­í• ì´ ì—†ìŠµë‹ˆë‹¤. "
@@ -917,8 +1230,8 @@ msgid ""
"Unable to access the keystone database, please check it is configured "
"correctly."
msgstr ""
-"키스톤 ë°ì´í„°ë² ì´ìŠ¤ë¥¼ 액세스할 수 없습니다. ë°ì´í„°ë² ì´ìŠ¤ê°€ 제대로 구성ë˜ì–´ 있"
-"는지 확ì¸í•˜ì‹­ì‹œì˜¤. "
+"Keystone ë°ì´í„°ë² ì´ìŠ¤ë¥¼ 액세스할 수 없습니다. ë°ì´í„°ë² ì´ìŠ¤ê°€ 제대로 구성ë˜ì–´ "
+"있는지 확ì¸í•˜ì‹­ì‹œì˜¤. "
#, python-format
msgid "Unable to consume trust %(trust_id)s, unable to acquire lock."
@@ -932,16 +1245,14 @@ msgstr ""
"리젼 %(region_id)s ë˜ëŠ” 하위 ë¦¬ì ¼ì— ì—°ê´€ëœ ì—”ë“œí¬ì¸íŠ¸ê°€ 있어 삭제할 수 없습니"
"다."
+msgid "Unable to downgrade schema"
+msgstr "스키마를 다운그레ì´ë“œí•  수 ì—†ìŒ"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr "%(mapping_id)s ë§µí•‘ì„ ì‚¬ìš©í•˜ëŠ” ì¤‘ì— ì˜¬ë°”ë¥¸ ê·¸ë£¹ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ "
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr "í’€ id %(id)sì—ì„œ %(seconds)s분 í›„ì— ì—°ê²°í•  수 없습니다."
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "%s: ë„ë©”ì¸ ì„¤ì • 디렉토리를 ì°¾ì„ ìˆ˜ 없습니다."
@@ -978,7 +1289,7 @@ msgid ""
"Unexpected combination of grant attributes - User: %(user_id)s, Group: "
"%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s"
msgstr ""
-"grant ì†ì„±ì˜ 예ìƒì¹˜ 못한 ì¡°í•© - 사용ìž: %(user_id)s, 그룹: %(group_id)s, 프로"
+"Grant ì†ì„±ì˜ 예ìƒì¹˜ 못한 ì¡°í•© - 사용ìž: %(user_id)s, 그룹: %(group_id)s, 프로"
"ì íŠ¸: %(project_id)s, ë„ë©”ì¸: %(domain_id)s"
#, python-format
@@ -1000,21 +1311,37 @@ msgstr "ì•Œ 수 없는 í† í° ë²„ì „ %s"
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "등ë¡ë˜ì§€ ì•Šì€ ì¢…ì†ì„±: %(targets)sì˜ %(name)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "`domain_id` ì—…ë°ì´íŠ¸ëŠ” 허용ë˜ì§€ 않습니다."
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "`is_domain`ì˜ ì—…ë°ì´íŠ¸ëŠ” 허용ë˜ì§€ 않습니다. "
+
msgid "Update of `parent_id` is not allowed."
msgstr "`parent_id` ì—…ë°ì´íŠ¸ê°€ 허용ë˜ì§€ 않습니다."
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "domain_idì˜ ì—…ë°ì´íŠ¸ëŠ” 루트 프로ì íŠ¸ì—만 허용ë©ë‹ˆë‹¤."
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr "ë„ë©”ì¸ ì—­í• ì„ í•˜ëŠ” 프로ì íŠ¸ì˜ domain_id는 ì—…ë°ì´íŠ¸í•  수 없습니다."
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr "SAML ì–´ì„¤ì…˜ì„ ìž‘ì„±í•  ë•Œ 프로ì íŠ¸ 범위 지정 í† í° ì‚¬ìš©"
-#, python-format
-msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
-msgstr "ì‚¬ìš©ìž %(u_id)sì´(는) 테넌트 %(t_id)sì— ëŒ€í•œ ê¶Œí•œì´ ì—†ìŠµë‹ˆë‹¤. "
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"ID ë“œë¼ì´ë²„ êµ¬ì„±ì„ ì‚¬ìš©í•˜ì—¬ ë™ì¼í•œ 할당 ë“œë¼ì´ë²„를 ìžë™ìœ¼ë¡œ 구성하는 ê¸°ëŠ¥ì€ "
+"ë” ì´ìƒ 사용ë˜ì§€ 않습니다. \"O\" 릴리스ì—서는 기본값(SQL)ê³¼ 다른 경우 할당 ë“œ"
+"ë¼ì´ë²„를 명시ì ìœ¼ë¡œ 구성해야 합니다."
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"ì‚¬ìš©ìž %(user_id)sì´(ê°€) 테넌트 %(tenant_id)sì—ì„œ ì—­í•  %(role_id)sì„(를) ì´"
-"미 가집니다. "
+msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
+msgstr "ì‚¬ìš©ìž %(u_id)sì´(는) tenant %(t_id)sì— ëŒ€í•œ ê¶Œí•œì´ ì—†ìŠµë‹ˆë‹¤. "
#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
@@ -1037,6 +1364,13 @@ msgstr "'%(group_id)s' ê·¸ë£¹ì— '%(user_id)s' 사용ìžê°€ ì—†ìŒ"
msgid "User IDs do not match"
msgstr "ì‚¬ìš©ìž IDê°€ ì¼ì¹˜í•˜ì§€ ì•ŠìŒ"
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"ì‚¬ìš©ìž ID, ë„ë©”ì¸ IDê°€ í¬í•¨ëœ ì‚¬ìš©ìž ì´ë¦„ ë˜ëŠ” ë„ë©”ì¸ ì´ë¦„ì´ í¬í•¨ëœ ì‚¬ìš©ìž ì´"
+"ë¦„ì´ ëˆ„ë½ë˜ì–´ ì‚¬ìš©ìž ì¸ì¦ì„ 빌드할 수 없습니다. "
+
#, python-format
msgid "User is disabled: %s"
msgstr "사용ìžë¥¼ 사용 안함: %s"
@@ -1050,6 +1384,12 @@ msgstr "사용ìžëŠ” trusteeê°€ 아닙니다."
msgid "User not found"
msgstr "사용ìžë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
+msgid "User not valid for tenant."
+msgstr "Tenant 사용ìžê°€ 올바르지 않습니다."
+
+msgid "User roles not supported: tenant_id required"
+msgstr "ì‚¬ìš©ìž ì—­í• ì´ ì§€ì›ë˜ì§€ ì•ŠìŒ: tenant_id í•„ìš”"
+
#, python-format
msgid "User type %s not supported"
msgstr "ì‚¬ìš©ìž ìœ í˜• %sì´(ê°€) 지ì›ë˜ì§€ ì•ŠìŒ"
@@ -1061,6 +1401,14 @@ msgstr "요청한 조치를 수행할 ê¶Œí•œì´ ì—†ìŠµë‹ˆë‹¤."
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "요청한 조치(%(action)s)를 수행할 ê¶Œí•œì´ ì—†ìŠµë‹ˆë‹¤."
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"ê´€ë¦¬ìž í† í°ì„ 사용하여 ìžì›ì„ ìƒì„±í•˜ë ¤ 했습니다. ì´ í† í°ì´ ë„ë©”ì¸ì— 없으므"
+"ë¡œ, ì´ ìžì›ì´ ì†í•  ë„ë©”ì¸ì„ 명시ì ìœ¼ë¡œ í¬í•¨ì‹œì¼œì•¼ 합니다."
+
msgid "`key_mangler` functions must be callable."
msgstr "`key_mangler` ê¸°ëŠ¥ì„ í˜¸ì¶œí•  수 있어야 합니다."
@@ -1076,45 +1424,19 @@ msgstr "auth_typeì´ Negotiateê°€ 아님"
msgid "authorizing user does not have role required"
msgstr "ì¸ì¦í•˜ëŠ” 사용ìžì—게 필요한 ì—­í• ì´ ì—†ìŒ"
-msgid "cache_collection name is required"
-msgstr "cache_collection ì´ë¦„ì´ í•„ìš”í•¨"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
"사용 안함으로 ì„¤ì •ëœ í”„ë¡œì íŠ¸ê°€ í¬í•¨ëœ ë¶„ê¸°ì— í”„ë¡œì íŠ¸ë¥¼ 작성할 수 없습니다. "
"%s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr "ìƒìœ„와 다른 ë„ë©”ì¸ ë‚´ì— í”„ë¡œì íŠ¸ë¥¼ 작성할 수 없습니다."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-"사용으로 ì„¤ì •ëœ ë„ë©”ì¸ì„ 삭제할 수 없습니다. 먼저 해당 ë„ë©”ì¸ì„ 사용 안함으"
-"로 설정하십시오."
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr "계층 ë‚´ì˜ ë¦¬í”„ê°€ 아니므로 프로ì íŠ¸ %sì„(를) 삭제할 수 없습니다."
-
#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"ì„œë¸ŒíŠ¸ë¦¬ì— ì‚¬ìš© ì„¤ì •ëœ í”„ë¡œì íŠ¸ê°€ 있어서 프로ì íŠ¸ %sì„(를) 사용 안함으로 설정"
-"í•  수 ì—†ìŒ"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr "프로ì íŠ¸ %sì— ì‚¬ìš© 안함으로 ì„¤ì •ëœ ìƒìœ„ê°€ 있어서 ì´ë¥¼ 사용할 수 ì—†ìŒ"
-
-msgid "database db_name is required"
-msgstr "database db_nameì´ í•„ìš”í•¨"
-
-msgid "db_hosts value is required"
-msgstr "db_hosts ê°’ì´ í•„ìš”í•¨"
-
-msgid "delete the default domain"
-msgstr "기본 ë„ë©”ì¸ ì‚­ì œ"
+"ë„ë©”ì¸ ì—­í• ì„ í•˜ëŠ” 사용 ì„¤ì •ëœ í”„ë¡œì íŠ¸ë¥¼ 삭제할 수 없습니다. 프로ì íŠ¸ %sì„"
+"(를) 먼저 사용하지 않게 설정하십시오."
#, python-format
msgid "group %(group)s"
@@ -1126,33 +1448,29 @@ msgid ""
msgstr ""
"idp_contact_typeì€ [기술, 기타, 지ì›, 관리 ë˜ëŠ” 비용 청구 중 하나여야 합니다."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "mongo_ttl_secondsì— ëŒ€í•´ 정수 ê°’ì´ ì˜ˆìƒë¨ "
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "w(write concern ì†ì„±)ì— ëŒ€í•´ 정수 ê°’ì´ ì˜ˆìƒë¨"
-
#, python-format
msgid "invalid date format %s"
msgstr "올바르지 ì•Šì€ ë‚ ì§œ í˜•ì‹ %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "%s ë¶„ê¸°ì— ëŒ€í•œ 최대 계층 깊ì´ì— ë„달했습니다."
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr "ì´ë¦„ì´ ê°™ì€ ë‘ í”„ë¡œì íŠ¸ê°€ ë„ë©”ì¸ ì—­í• ì„ ìˆ˜í–‰í•  수 ì—†ìŒ: %s"
+
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr "í•œ ë„ë©”ì¸ì— ì´ë¦„ì´ ê°™ì€ ë‘ í”„ë¡œì íŠ¸ê°€ ìžˆì„ ìˆ˜ ì—†ìŒ: %s"
-msgid "no ssl support available"
-msgstr "사용 가능한 ssl 지ì›ì´ ì—†ìŒ"
+msgid "only root projects are allowed to act as domains."
+msgstr "루트 프로ì íŠ¸ë§Œ ë„ë©”ì¸ ì—­í• ì„ ìˆ˜í–‰í•  수 있습니다."
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "%(group)s ê·¸ë£¹ì˜ %(option)s 옵션"
-msgid "pad must be single character"
-msgstr "패드는 ë‹¨ì¼ ë¬¸ìžì—¬ì•¼ 함"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "채워진 base64url í…스트는 4ì˜ ë°°ìˆ˜ì—¬ì•¼ 함"
-
msgid "provided consumer key does not match stored consumer key"
msgstr "ì œê³µëœ ì´ìš©ìž 키가 ì €ìž¥ëœ ì´ìš©ìž 키와 ì¼ì¹˜í•˜ì§€ ì•ŠìŒ"
@@ -1162,18 +1480,12 @@ msgstr "ì œê³µëœ ìš”ì²­ 키가 ì €ìž¥ëœ ìš”ì²­ 키와 ì¼ì¹˜í•˜ì§€ ì•ŠìŒ"
msgid "provided verifier does not match stored verifier"
msgstr "ì œê³µëœ í™•ì¸ìžê°€ ì €ìž¥ëœ í™•ì¸ìžì™€ ì¼ì¹˜í•˜ì§€ ì•ŠìŒ "
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "ë¦¬ì ¼ì´ dogpile.cache.CacheRegion ìœ í˜•ì´ ì•„ë‹˜ "
-
msgid "remaining_uses must be a positive integer or null."
-msgstr "remaining_uses는 ì–‘ì˜ ì •ìˆ˜ ë˜ëŠ” ë„ì´ì–´ì•¼ 합니다."
+msgstr "remaining_uses는 ì–‘ì˜ ì •ìˆ˜ ë˜ëŠ” nullì´ì–´ì•¼ 합니다."
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr "ìž¬ìœ„ìž„ì„ í—ˆìš©í•˜ëŠ” 경우 remaining_uses를 설정하지 않아야 함"
-msgid "replicaset_name required when use_replica is True"
-msgstr "use_replicaê°€ Trueì¸ ê²½ìš° replicaset_nameì´ í•„ìš”í•¨ "
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1186,19 +1498,11 @@ msgid "rescope a scoped token"
msgstr "범위 ì§€ì •ëœ í† í°ì˜ 범위 재지정"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"í…스트가 4ì˜ ë°°ìˆ˜ì´ì§€ë§Œ 패드 \"%s\"ì´(ê°€) ë‘ ë²ˆì§¸ ì•žì—서부터 마지막 문ìžê¹Œì§€ "
-"ë°œìƒí•¨"
+msgid "role %s is not defined"
+msgstr "ì—­í•  %sì´(ê°€) ì •ì˜ë˜ì§€ ì•ŠìŒ"
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr ""
-"í…스트가 4ì˜ ë°°ìˆ˜ì´ì§€ë§Œ 패드 \"%s\"ì´(ê°€) 비패드 마지막 ë¬¸ìž ì•žì—ì„œ ë°œìƒí•¨"
-
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "í…스트가 4ì˜ ë°°ìˆ˜ì´ì§€ë§Œ 패드 \"%s\"ì„(를) í¬í•¨í•¨"
+msgid "scope.project.id must be specified if include_subtree is also specified"
+msgstr "include_subtreeë„ ì§€ì •ëœ ê²½ìš° scope.project.id를 지정해야 함"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1211,3 +1515,16 @@ msgstr "tls_cacertfile %s를 ì°¾ì„ ìˆ˜ 없스며, 그런 파ì¼ì´ 없습니다
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr "í† í° ì°¸ì¡°ëŠ” KeystoneToken 유형ì´ì–´ì•¼ 합니다. %sì„(를) 가져왔습니다."
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr ""
+"Mitakaì—ì„œ domain_id ì—…ë°ì´íŠ¸ëŠ” ë” ì´ìƒ 사용ë˜ì§€ 않으므로, Oì—ì„œ 제거ë©ë‹ˆë‹¤."
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"%(func_name)rì— ëŒ€í•œ 함수 서명ì—ì„œ %(param_name)rì„(를) 찾기 위해 유효성 ê²€ì¦"
+"하고 예ìƒí–ˆìŠµë‹ˆë‹¤. "
diff --git a/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po
index c57f0c55..0f2ca85c 100644
--- a/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/pl_PL/LC_MESSAGES/keystone-log-critical.po
@@ -6,20 +6,20 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Polish (Poland)\n"
-"Language: pl-PL\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: pl-PL\n"
"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 "
"|| n%100>=20) ? 1 : 2);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Polish (Poland)\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po
index f3b25b5e..6ed0adbe 100644
--- a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Portuguese (Brazil)\n"
-"Language: pt-BR\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: pt-BR\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Portuguese (Brazil)\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
index 8bda14f0..49a2f8ad 100644
--- a/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
@@ -1,33 +1,47 @@
-# Portuguese (Brazil) translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
# Gabriel Wainer, 2013
+# Gabriel Wainer, 2013
# Lucas Ribeiro <lucasribeiro1990@gmail.com>, 2014
# Volmar Oliveira Junior <volmar.oliveira.jr@gmail.com>, 2013
-# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Volmar Oliveira Junior <volmar.oliveira.jr@gmail.com>, 2013
+# Raildo Mascena <raildom@gmail.com>, 2015. #zanata
+# Carlos Marques <marquesc@br.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: pt_BR\n"
-"Language-Team: Portuguese (Brazil)\n"
-"Plural-Forms: nplurals=2; plural=(n > 1)\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-05-02 09:08+0000\n"
+"Last-Translator: Carlos Marques <marquesc@br.ibm.com>\n"
+"Language: pt-BR\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Portuguese (Brazil)\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "O %(driver)s não é uma versão de driver suportada"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr ""
+"O nome %(entity)s não pode conter os caracteres reservados a seguir: "
+"%(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
@@ -46,10 +60,14 @@ msgid ""
"%(mod_name)s doesn't provide database migrations. The migration repository "
"path at %(path)s doesn't exist or isn't a directory."
msgstr ""
-"%(mod_name)s não fornece migrações de banco de dados. O caminho do "
+"O %(mod_name)s não fornece migrações de banco de dados. O caminho do "
"repositório de migração %(path)s não existe ou não é um diretório."
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s não implica %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s não pode ter menos de %(min_length)s caracteres."
@@ -62,23 +80,37 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s não deve ter mais de %(max_length)s caracteres."
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s não pode ser uma função implícita"
+
+#, python-format
msgid "%s cannot be empty."
-msgstr "%s não pode estar vazio."
+msgstr "%s não pode ficar vazio."
#, python-format
msgid "%s extension does not exist."
-msgstr "Extensão %s não existe."
+msgstr "A extensão %s não existe."
#, python-format
msgid "%s field is required and cannot be empty"
-msgstr "campo %s é obrigatório e não pode estar vazio"
+msgstr "O campo %s é obrigatório e não pode ficar vazio"
#, python-format
msgid "%s field(s) cannot be empty"
-msgstr "%s campo(s) não podem estar vazios"
+msgstr "Os campos %s não podem ficar vazios"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(Desative o modo de depuração para suprimir esses detalhes.)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"%s para o backend de identidade LDAP foi descontinuado na liberação do "
+"Mitaka a favor do acesso LDAP de identidade somente leitura. Ele será "
+"removido na liberação \"O\"."
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(Desative o modo insecure_debug para suprimir esses detalhes)."
msgid "--all option cannot be mixed with other options"
msgstr "A opção --all não pode ser combinada com outras opções"
@@ -89,13 +121,13 @@ msgstr ""
"serviços."
msgid "Access token is expired"
-msgstr "Token de acesso expirou"
+msgstr "O token de acesso expirou"
msgid "Access token not found"
msgstr "Token de acesso não encontrado"
msgid "Additional authentications steps required."
-msgstr "Passos de autenticação adicionais requeridos."
+msgstr "Passos de autenticação adicionais necessários."
msgid "An unexpected error occurred when retrieving domain configs"
msgstr "Ocorreu um erro inesperado ao recuperar as configurações de domínio"
@@ -116,7 +148,8 @@ msgstr ""
"%(exception)s"
msgid "An unhandled exception has occurred: Could not find metadata."
-msgstr "Uma exceção não tratada ocorreu: Não foi possível encontrar metadados."
+msgstr ""
+"Ocorreu uma exceção não manipulada: Não foi possível encontrar metadados."
msgid "At least one option must be provided"
msgstr "Pelo menos uma opção deve ser fornecida"
@@ -125,7 +158,18 @@ msgid "At least one option must be provided, use either --all or --domain-name"
msgstr "Pelo menos uma opção deve ser fornecida, use --all ou --domain-name"
msgid "At least one role should be specified."
-msgstr "Pelo menos uma função deve ser especificada."
+msgstr "Pelo menos um papel deve ser especificado."
+
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"Uma tentativa de seleção de driver automática para designação com base na "
+"opção [identity]\\driver falhou porque o driver %s não foi localizado. "
+"Configure o [assignment]/driver para um driver válido na configuração do "
+"keystone."
msgid "Attempted to authenticate with an unsupported method."
msgstr "Tentativa de autenticação com um método não suportado."
@@ -140,6 +184,14 @@ msgstr ""
msgid "Authentication plugin error."
msgstr "Erro do plugin de autenticação."
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"O backend `%(backend)s`não é um backend memcached válido. Backends válidos: "
+"%(backend_list)s"
+
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
"Não é possível autorizar um token de solicitação com um token emitido por "
@@ -152,9 +204,6 @@ msgstr "Não é possível alterar %(option_name)s %(attr)s"
msgid "Cannot change Domain ID"
msgstr "Não é possível alterar o ID do Domínio"
-msgid "Cannot change consumer secret"
-msgstr "Não é possível alterar segredo do consumidor"
-
msgid "Cannot change user ID"
msgstr "Não é possível alterar o ID do usuário"
@@ -162,12 +211,68 @@ msgid "Cannot change user name"
msgstr "Não é possível alterar o nome de usuário"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "Não é possível criar um terminal com uma URL inválida: %(url)s"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr "Não é possível criar o projeto com o pai: %(project_id)s"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "Não é possível duplicar o nome %s"
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"Não é possível criar o projeto porque ele especifica seu proprietário como "
+"domínio %(domain_id)s, mas especifica um pai em um domínio diferente "
+"(%(parent_domain_id)s)."
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"Não é possível criar um projeto porque seu pai (%(domain_id)s) está agindo "
+"como um domínio, mas o parent_id (%(parent_id)s) especificado do projeto não "
+"corresponde com esse domain_id."
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr ""
+"Não é possível excluir um domínio que esteja ativado, desative-o primeiro."
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Não é possível excluir o projeto%(project_id)s porque sua subárvore contém "
+"projetos ativados."
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"Não é possível excluir o projeto %s porque ele não é uma folha na "
+"hierarquia. Use a opção em cascata se desejar excluir uma subárvore inteira."
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Não é possível desativar o projeto%(project_id)s porque sua subárvore "
+"contém projetos ativados."
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr "Não é possível ativar o projeto %s porque ele possui pais desativados"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"Não é possível listar designações originadas a partir de grupos e filtradas "
+"pelo ID do usuário."
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
@@ -180,15 +285,18 @@ msgstr "Não é possível abrir o certificado %(cert_file)s. Motivo: %(reason)s"
#, python-format
msgid "Cannot remove role that has not been granted, %s"
-msgstr "Não é possível remover role que não foi concedido, %s"
+msgstr "Não é possível remover a função que não foi concedida, %s"
msgid ""
"Cannot truncate a driver call without hints list as first parameter after "
"self "
msgstr ""
-"Não é possível truncar uma chamada de driver sem lista de sugestões como "
+"Não é possível truncar uma chamada de driver sem uma lista de sugestões como "
"primeiro parâmetro após self "
+msgid "Cannot update domain_id of a project that has children."
+msgstr "Não é possível atualizar domain_id de um projeto que possua filhos."
+
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
msgstr ""
@@ -201,17 +309,24 @@ msgstr ""
"Não é possível usar parâmetros de consulta subtree_as_list e subtree_as_ids "
"ao mesmo tempo."
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "A atualização em cascata é permitida somente para atributo ativado."
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr ""
-"Combinar efetivo e filtro de grupo sempre resultará em uma lista vazia."
+"Combinar filtros efetivos e de grupo sempre resultará em uma lista vazia."
msgid ""
"Combining effective, domain and inherited filters will always result in an "
"empty list."
msgstr ""
-"Combinar efetivo, domínio e filtros herdados sempre resultará em uma lista "
-"vazia."
+"Combinar filtros efetivos, de domínio e herdados sempre resultará em uma "
+"lista vazia."
+
+#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "Entidade de API de configuração em /domains/%s/config"
#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
@@ -220,7 +335,7 @@ msgstr "Ocorreu um conflito ao tentar armazenar %(type)s -%(details)s"
#, python-format
msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
msgstr ""
-"IDs de região de conflito especificados: \"%(url_id)s\" != \"%(ref_id)s\""
+"IDs de região conflitantes especificados: \"%(url_id)s\" != \"%(ref_id)s\""
msgid "Consumer not found"
msgstr "Consumidor não encontrado"
@@ -229,11 +344,20 @@ msgstr "Consumidor não encontrado"
msgid ""
"Could not change immutable attribute(s) '%(attributes)s' in target %(target)s"
msgstr ""
-"Não foi possível alterar o atributo imutável '%(attributes)s' no destino "
+"Não foi possível alterar os atributos imutáveis '%(attributes)s' no destino "
"%(target)s"
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"Não foi possível determinar o ID do Provedor de Identidade. A opção de "
+"configuração %(issuer_attribute)s não foi encontrada no ambiente da "
+"solicitação."
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
@@ -300,9 +424,6 @@ msgstr "Não foi possível localizar o projeto: %(project_id)s"
msgid "Could not find region: %(region_id)s"
msgstr "Não foi possível localizar a região: %(region_id)s"
-msgid "Could not find role"
-msgstr "Não é possível encontrar role"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -339,20 +460,54 @@ msgstr "Não foi possível localizar a versão: %(version)s"
msgid "Could not find: %(target)s"
msgstr "Não foi possível localizar: %(target)s"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"Não foi possível mapear nenhuma propriedade do usuário federado para valores "
+"de identidade. Verifique os logs de depuração ou o mapeamento usado para "
+"obter detalhes adicionais"
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"Não foi possível mapear o usuário ao configurar a identidade do usuário "
+"efêmera. As regras de mapeamento devem especificar o ID/nome do usuário ou "
+"a variável de ambiente REMOTE_USER deve ser configurada."
+
msgid "Could not validate the access token"
msgstr "Não foi possível validar o token de acesso"
msgid "Credential belongs to another user"
-msgstr "A credencial pertence à outro usuário"
+msgstr "A credencial pertence a outro usuário"
+
+msgid "Credential signature mismatch"
+msgstr "Incompatibilidade de assinatura de credencial"
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "Banco de dados em /domains/%s/config"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"A importação direta de um plug-in de autoria %(name)r foi descontinuada a "
+"partir do Liberty a favor de seu ponto de entrada de %(namespace)r e pode "
+"ser removida no N."
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"A importação direta de um driver %(name)r foi descontinuada a partir do "
+"Liberty a favor de seu ponto de entrada de %(namespace)r e pode ser removida "
+"no N."
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
msgstr ""
-"A desativação de uma entidade em que o atributo ‘enable' é ignorado pelo "
+"A desativação de uma entidade em que o atributo ‘enable' é ignorado pela "
"configuração."
#, python-format
@@ -371,22 +526,23 @@ msgstr "O domínio não pode ter o ID de %s"
msgid "Domain is disabled: %s"
msgstr "O domínio está desativado: %s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "Metadados de domínio não suportados por LDAP"
+msgid "Domain name cannot contain reserved characters."
+msgstr "O nome do domínio não pode conter caracteres reservados."
msgid "Domain scoped token is not supported"
msgstr "O token de escopo de domínio não é suportado"
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr ""
+"Funções específicas de domínio não são suportadas no driver de função da V8"
+
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
"%(file)s."
msgstr ""
-"Domínio: %(domain)s já possui uma configuração definida - ignorando arquivo: "
-"%(file)s."
-
-msgid "Domains are read-only against LDAP"
-msgstr "Domínios são somente leitura no LDAP"
+"O domínio: %(domain)s já possui uma configuração definida - ignorando "
+"arquivo: %(file)s."
msgid "Duplicate Entry"
msgstr "Entrada Duplicada"
@@ -396,14 +552,34 @@ msgid "Duplicate ID, %s."
msgstr "ID duplicado, %s."
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "Entrada duplicada: %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "Nome duplicado, %s."
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "ID remoto duplicado: %s"
+
+msgid "EC2 access key not found."
+msgstr "Chave de acesso EC2 não encontrada."
+
+msgid "EC2 signature not supplied."
+msgstr "Assinatura EC2 não fornecida."
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+"O argumento de senha de autoinicialização ou OS_BOOTSTRAP_PASSWORD deve ser "
+"configurado."
+
msgid "Enabled field must be a boolean"
-msgstr "Campo habilitado precisa ser um booleano"
+msgstr "O campo habilitado precisa ser um booleano"
msgid "Enabled field should be a boolean"
-msgstr "Campo habilitado deve ser um booleano"
+msgstr "O campo habilitado deve ser um booleano"
#, python-format
msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
@@ -413,10 +589,12 @@ msgid "Endpoint Group Project Association not found"
msgstr "Associação de Projeto do Grupo do Terminal não localizada"
msgid "Ensure configuration option idp_entity_id is set."
-msgstr "Assegure que a opção de configuração idp_entity_id esteja definida."
+msgstr ""
+"Assegure-se de que a opção de configuração idp_entity_id esteja definida."
msgid "Ensure configuration option idp_sso_endpoint is set."
-msgstr "Assegure que a opção de configuração idp_sso_endpoint esteja definida."
+msgstr ""
+"Assegure-se de que a opção de configuração idp_sso_endpoint esteja definida."
#, python-format
msgid ""
@@ -426,10 +604,30 @@ msgstr ""
"arquivo: %(file)s."
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "Erro ao abrir arquivo %(path)s: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "Erro ao analisar a linha %(line)s: %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "Erro ao analisar regras %(path)s: %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "Erro ao ler arquivo de metadados, %(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"Tentativas excedidas de registrar o domínio %(domain)s para usar SQL driver, "
+"o ultimo domínio que parece ter tido foi %(last_domain)s, desistindo"
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "Esperado dict ou list: %s"
@@ -438,7 +636,7 @@ msgid ""
"Keystone configuration."
msgstr ""
"Certificados de assinatura esperados não estão disponíveis no servidor. "
-"Verifique configuração de Keystone."
+"Verifique a configuração de Keystone."
#, python-format
msgid ""
@@ -447,8 +645,8 @@ msgid ""
"client is assumed to be in error."
msgstr ""
"Esperando localizar %(attribute)s em %(target)s - o servidor não pôde "
-"obedecer à solicitação porque ela está malformada ou de alguma maneira "
-"incorreta. O cliente deve estar em erro."
+"atender à solicitação porque ela está malformada ou de outra maneira "
+"incorreta. Supõe-se que o cliente está em erro."
#, python-format
msgid "Failed to start the %(name)s server"
@@ -469,7 +667,11 @@ msgstr ""
"deve ser configurado para delegar novamente uma confiança"
msgid "Found invalid token: scoped to both project and domain."
-msgstr "Token inválido encontrado: escopo para ambos o projeto e o domínio."
+msgstr "Token inválido encontrado: escopo definido para o projeto e o domínio."
+
+#, python-format
+msgid "Group %s not found in config"
+msgstr "Grupo %s não localizado na configuração"
#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
@@ -481,8 +683,8 @@ msgid ""
"Group %(group_id)s returned by mapping %(mapping_id)s was not found in the "
"backend."
msgstr ""
-"Grupo %(group_id)s retornou mapeando %(mapping_id)s não foi localizado no "
-"backend."
+"O grupo %(group_id)s retornado pelo mapeamento %(mapping_id)s não foi "
+"localizado no backend."
#, python-format
msgid ""
@@ -507,10 +709,13 @@ msgstr ""
"O identificador do provedor de identidade recebido não está incluído entre "
"os identificadores aceitos."
+msgid "Invalid EC2 signature."
+msgstr "Assinatura EC2 inválida."
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
-"Opção de certificado LADP TLS inválida: %(option)s. Escolha uma de: "
+"Opção de certificados LADP TLS inválida: %(option)s. Escolha uma de: "
"%(options)s"
#, python-format
@@ -576,23 +781,18 @@ msgstr ""
msgid "Invalid signature"
msgstr "Assinatura inválida"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"valor ssl_cert_reqs inválido de %s, deve ser um de \"NONE\", \"OPTIMAL\", "
-"\"REQUIRED\""
-
msgid "Invalid user / password"
msgstr "Usuário / senha inválido"
+msgid "Invalid username or TOTP passcode"
+msgstr "Nome de usuário ou passcode TOTP inválido"
+
msgid "Invalid username or password"
msgstr "Nome de usuário ou senha inválidos"
#, python-format
msgid "KVS region %s is already configured. Cannot reconfigure."
-msgstr "Região KVS %s já está configurado. Não é possível reconfigurar."
+msgstr "A região KVS %s já está configurada. Não é possível reconfigurar."
#, python-format
msgid "Key Value Store not configured: %s"
@@ -610,6 +810,20 @@ msgstr "Exclusão de LDAP %s"
msgid "LDAP %s update"
msgstr "Atualização de LDAP %s"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+"O comprimento do recurso transformável id > 64, que é o máximo de caracteres "
+"permitidos"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"A seção local no mapeamento %(mapping_id)s refere-se a uma correspondência "
+"remota que não existe (por exemplo, '{0}' em uma seção local)."
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr "Ocorreu um tempo limite de bloqueio para a chave, %(target)s"
@@ -617,16 +831,21 @@ msgstr "Ocorreu um tempo limite de bloqueio para a chave, %(target)s"
#, python-format
msgid "Lock key must match target key: %(lock)s != %(target)s"
msgstr ""
-"Chave de bloqueio deve corresponder à chave de destino: %(lock)s !=%(target)s"
+"A chave de bloqueio deve corresponder à chave de destino: %(lock)s !="
+"%(target)s"
#, python-format
msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
msgstr ""
-"URL de endpoint mal-formada (%(endpoint)s), veja o log de ERROS para "
+"URL de terminal mal-formada (%(endpoint)s), consulte o log ERROR para obter "
"detalhes."
msgid "Marker could not be found"
-msgstr "Marcador não pôde ser encontrado"
+msgstr "O marcador não pôde ser encontrado"
+
+#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "Profundidade máx. de hierarquia atingida para a ramificação %s."
#, python-format
msgid "Maximum lock attempts on %s occurred."
@@ -638,7 +857,7 @@ msgstr "O membro %(member)s já é membro do grupo %(group)s"
#, python-format
msgid "Method not callable: %s"
-msgstr "Método não pode ser chamado: %s"
+msgstr "O método não pode ser chamado: %s"
msgid "Missing entity ID from environment"
msgstr "ID da entidade ausente a partir do ambiente"
@@ -647,8 +866,8 @@ msgid ""
"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting "
"this parameter is advised."
msgstr ""
-"A modificação de \"redelegation_count\" é proibida. É recomendado omitir "
-"este parâmetro."
+"A modificação de \"redelegation_count\" na nova delegação é proibida. É "
+"recomendado omitir este parâmetro."
msgid "Multiple domains are not supported"
msgstr "Múltiplos domínios não são suportados"
@@ -660,15 +879,20 @@ msgid "Must specify either domain or project"
msgstr "Deve especificar o domínio ou projeto"
msgid "Name field is required and cannot be empty"
-msgstr "Campo nome é requerido e não pode ser vazio"
+msgstr "O campo Nome é obrigatório e não pode ficar vazio"
+
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr ""
+"Nem o ID do Domínio do Projeto nem o Nome do Domínio do Projeto foi "
+"fornecido."
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
msgstr ""
-"Nenhum cabeçalho de autorização foi localizado, não é possível continuar com "
-"chamadas relacionadas OAuth, se estiver executando sob HTTPd ou Apache, se "
-"WSGIPassAuthorization for configurado para Ligado."
+"Nenhum cabeçalho de autorização foi localizado; não é possível continuar com "
+"chamadas relacionadas OAuth. Se estiver executando sob HTTPd ou Apache, "
+"certifique-se de que WSGIPassAuthorization esteja configurado para Ligado."
msgid "No authenticated user"
msgstr "Nenhum usuário autenticado"
@@ -676,8 +900,8 @@ msgstr "Nenhum usuário autenticado"
msgid ""
"No encryption keys found; run keystone-manage fernet_setup to bootstrap one."
msgstr ""
-"Nenhuma chave de criptografia foi localizada; execute keystone-manage "
-"fernet_setup para autoinicialização um."
+"Nenhuma chave de criptografia localizada; execute keystone-manage "
+"fernet_setup para autoinicializar uma."
msgid "No options specified"
msgstr "Nenhuma opção especificada"
@@ -690,6 +914,9 @@ msgstr "Nenhuma política associada ao terminal %(endpoint_id)s."
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "Nenhum uso restante para confiança: %(trust_id)s"
+msgid "No token in the request"
+msgstr "Não existe token na solicitação."
+
msgid "Non-default domain is not supported"
msgstr "O domínio não padrão não é suportado"
@@ -701,8 +928,8 @@ msgid ""
"Option %(option)s found with no group specified while checking domain "
"configuration request"
msgstr ""
-"A opção %(option)s localizada sem grupo especificado durante a verificação "
-"de domínio solicitação de configuração"
+"Opção %(option)s localizada sem grupo especificado durante a verificação de "
+"solicitação de configuração de domínio"
#, python-format
msgid ""
@@ -717,22 +944,112 @@ msgid "Project (%s)"
msgstr "Projeto (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "ID de projeto não encontrado: %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "O campo projeto é necessário e não pode ficar vazio."
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "O projeto está desativado: %s"
+msgid "Project name cannot contain reserved characters."
+msgstr "O nome do projeto não pode conter caracteres reservados."
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "A sequência de consulta não está codificada em UTF-8 "
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr ""
+"Não é suportado ler o padrão para a opção %(option)s no grupo %(group)s"
+
msgid "Redelegation allowed for delegated by trust only"
-msgstr "Nova delegação permitida para delegado pela confiança somente"
+msgstr "Nova delegação permitida para delegado somente pelo fiador"
#, python-format
msgid ""
"Remaining redelegation depth of %(redelegation_depth)d out of allowed range "
"of [0..%(max_count)d]"
msgstr ""
-"Profundidade da redelegação restante do %(redelegation_depth)d fora do "
+"Profundidade da nova delegação restante do %(redelegation_depth)d fora do "
"intervalo permitido de [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Remova a admin_crud_extension do pipeline de colagem, já que a extensão "
+"admin_crud agora está sempre disponível. Atualize a seção [pipeline:"
+"admin_api] no keystone-paste.ini de acordo, já que ela será removida da "
+"liberação O."
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"Remova a endpoint_filter_extension do pipeline de colagem, já que a extensão "
+"de filtro de terminal agora está sempre está disponível. Atualize a seção "
+"[pipeline:api_v3] no keystone-paste.ini de acordo, já que ela será removida "
+"da liberação O."
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Remova a federation_extension do pipeline de colagem, já que a extensão de "
+"federação agora está sempre está disponível. Atualize a seção [pipeline:"
+"api_v3] no keystone-paste.ini de acordo, já que ela será removida da "
+"liberação O."
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Remova oauth1_extension do pipeline de colagem, já que a extensão oauth1 "
+"agora está sempre está disponível. Atualize a seção [pipeline:api_v3] no "
+"keystone-paste.ini de acordo, já que ela será removida da liberação O."
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Remova revoke_extension do pipeline de colagem, já que a extensão de "
+"revogação agora está sempre está disponível. Atualize a seção [pipeline:"
+"api_v3] no keystone-paste.ini de acordo, já que ela será removida da "
+"liberação O."
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Remova simple_cert do pipeline de colagem, já que os provedores PKI e PKIz "
+"estão agora descontinuados e simple_cert era usado somente em suporte a "
+"esses provedores de token. Atualize a seção [pipeline:api_v3] no keystone-"
+"paste.ini de acordo, já que ela será removida da liberação O."
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Remova a user_crud_extension do pipeline de colagem, já que a extensão "
+"user_crud agora está sempre disponível. Atualize a seção [pipeline:"
+"public_api] no keystone-paste.ini de acordo, já que ela será removida da "
+"liberação O."
+
msgid "Request Token does not have an authorizing user id"
-msgstr "Token de Requisição não possui um ID de usuário autorizado"
+msgstr "O Token de Solicitação não possui um ID de usuário autorizado"
#, python-format
msgid ""
@@ -740,18 +1057,18 @@ msgid ""
"server could not comply with the request because the attribute size is "
"invalid (too large). The client is assumed to be in error."
msgstr ""
-"Atributo de requisição %(attribute)s deve ser menor ou igual a %(size)i. O "
-"servidor não pôde atender a requisição porque o tamanho do atributo é "
-"inválido (muito grande). Assume-se que o cliente está em erro."
+"O atributo de solicitação %(attribute)s deve ser menor ou igual a %(size)i. "
+"O servidor não pôde atender à solicitação porque o tamanho do atributo é "
+"inválido (muito grande). Supõe-se que o cliente está em erro."
msgid "Request must have an origin query parameter"
msgstr "A solicitação deve ter um parâmetro de consulta de origem"
msgid "Request token is expired"
-msgstr "Token de requisição expirou"
+msgstr "O token de solicitação expirou"
msgid "Request token not found"
-msgstr "Token de requisição não encontrado"
+msgstr "Token de solicitação não encontrado"
msgid "Requested expiration time is more than redelegated trust can provide"
msgstr ""
@@ -766,40 +1083,36 @@ msgstr ""
"Profundidade da nova delegação solicitada de %(requested_count)d é maior que "
"a %(max_count)d permitida"
-#, python-format
-msgid "Role %s not found"
-msgstr "Role %s não localizada"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
"be removed in the \"M\"-Release."
msgstr ""
-"Executar o keystone via eventlet foi descontinuado como Kilo em favor de "
-"executar em um servidor WSGI (por exemplo, mod_wsgi). Suporte para o "
-"keystone sob eventlet será removida no \"M\"-Release."
+"Executar o keystone via eventlet foi descontinuado a partir do Kilo a favor "
+"de executar em um servidor WSGI (por exemplo, mod_wsgi). Suporte para o "
+"keystone sob eventlet será removido no \"M\"-Release."
msgid "Scoping to both domain and project is not allowed"
msgstr "A definição de escopo para o domínio e o projeto não é permitida"
msgid "Scoping to both domain and trust is not allowed"
-msgstr "A definição de escopo para o domínio e a trust não é permitida"
+msgstr "A definição de escopo para o domínio e confiança não é permitida"
msgid "Scoping to both project and trust is not allowed"
-msgstr "A definição de escopo para o projeto e a trust não é permitida"
+msgstr "A definição de escopo para o projeto e a confiança não é permitida"
#, python-format
msgid "Service Provider %(sp)s is disabled"
msgstr "O Provedor de Serviços %(sp)s está desativado"
msgid "Some of requested roles are not in redelegated trust"
-msgstr "Algumas funções de confiança não estão na confiança da nova delegação"
+msgstr "Algumas funções solicitadas não estão na confiança da nova delegação"
msgid "Specify a domain or project, not both"
msgstr "Especifique um domínio ou projeto, não ambos"
msgid "Specify a user or group, not both"
-msgstr "Epecifique um usuário ou grupo, não ambos"
+msgstr "Especifique um usuário ou grupo, não ambos"
msgid "Specify one of domain or project"
msgstr "Especifique um domínio ou projeto"
@@ -812,8 +1125,30 @@ msgid ""
"String length exceeded.The length of string '%(string)s' exceeded the limit "
"of column %(type)s(CHAR(%(length)d))."
msgstr ""
-"Comprimento de string excedido. O comprimento de string '%(string)s' excedeu "
-"o limite da coluna %(type)s(CHAR(%(length)d))."
+"Comprimento de sequência excedido. O comprimento de sequência '%(string)s' "
+"excedeu o limite da coluna %(type)s(CHAR(%(length)d))."
+
+msgid "Tenant name cannot contain reserved characters."
+msgstr "O nome do locatário não pode conter caracteres reservados."
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"A extensão %s foi movida para o núcleo do keystone e, com isso, suas "
+"migrações são mantidas pelo controle de banco de dados keystone principal. "
+"Use o comando: keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"O 'expires_at' não deve ser anterior a agora. O servidor não pôde atender à "
+"solicitação porque ela está malformada ou de outra maneira incorreta. Supõe-"
+"se que o cliente está em erro erro."
msgid "The --all option cannot be used with the --domain-name option"
msgstr "A opção --all não pode ser usada com a opção --domain-name"
@@ -836,7 +1171,7 @@ msgid "The action you have requested has not been implemented."
msgstr "A ação que você solicitou não foi implementada."
msgid "The authenticated user should match the trustor."
-msgstr "O usuário autenticado deve corresponder à confiança."
+msgstr "O usuário autenticado deve corresponder ao fideicomitente."
msgid ""
"The certificates you requested are not available. It is likely that this "
@@ -847,16 +1182,22 @@ msgstr ""
"esse servidor não utiliza tokens PKI, caso contrário, este é o resultado de "
"configuração incorreta."
+msgid "The configured token provider does not support bind authentication."
+msgstr "O provedor de token configurado não suporta autenticação de ligação."
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr "A criação de projetos agindo como domínios não é permitida na v2."
+
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server could "
"not comply with the request because the password is invalid."
msgstr ""
"O comprimento da senha deve ser menor ou igual a %(size)i. O servidor não "
-"pôde obedecer à solicitação porque a senha é inválida."
+"pôde atender à solicitação porque a senha é inválida."
msgid "The request you have made requires authentication."
-msgstr "A requisição que você fez requer autenticação."
+msgstr "A solicitação que você fez requer autenticação."
msgid "The resource could not be found."
msgstr "O recurso não pôde ser localizado."
@@ -865,8 +1206,8 @@ msgid ""
"The revoke call must not have both domain_id and project_id. This is a bug "
"in the Keystone server. The current request is aborted."
msgstr ""
-"A chamada de revogação não deve ter ambos domain_id e project_id. Esse é um "
-"erro no servidor do Keystone. A solicitação atual foi interrompida."
+"A chamada de revogação não deve ter domain_id e project_id. Esse é um erro "
+"no servidor do Keystone. A solicitação atual foi interrompida."
msgid "The service you have requested is no longer available on this server."
msgstr "O serviço que você solicitou não está mais disponível neste servidor."
@@ -894,21 +1235,18 @@ msgstr "Não deve haver nenhum parâmetro não oauth"
msgid "This is not a recognized Fernet payload version: %s"
msgstr "Esta não é uma versão de carga útil do Fernet reconhecida: %s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"Este não é um token Fernet v2.0. Use v3 para tokens de confiança, domínio, "
-"ou federados."
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "Este não é um token Fernet %s reconhecido"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
"request since it is either malformed or otherwise incorrect. The client is "
"assumed to be in error."
msgstr ""
-"A data não está no formato especificado. O servidor não pôde realizar a "
-"requisição pois ela está mal formada ou incorreta. Assume-se que o cliente "
-"está com erro."
+"O registro de data e hora não está no formato especificado. O servidor não "
+"pôde atender à solicitação porque ela está mal formada ou de outra maneira "
+"incorreta. Supõe-se que o cliente está em erro."
#, python-format
msgid ""
@@ -916,21 +1254,24 @@ msgid ""
"the specific domain, i.e.: keystone-manage domain_config_upload --domain-"
"name %s"
msgstr ""
-"Para obter uma obter informações mais detalhadas sobre este erro, execute "
-"novamente este comando para o domínio específico, ou seja: keystone-manage "
+"Para obter informações mais detalhadas sobre este erro, execute novamente "
+"este comando para o domínio específico, ou seja: keystone-manage "
"domain_config_upload --domain-name %s"
msgid "Token belongs to another user"
-msgstr "O token pertence à outro usuário"
+msgstr "O token pertence a outro usuário"
msgid "Token does not belong to specified tenant."
-msgstr "O token não pertence ao tenant especificado."
+msgstr "O token não pertence ao locatário especificado."
+
+msgid "Token version is unrecognizable or unsupported."
+msgstr "A versão de Token é irreconhecida ou não suportada"
msgid "Trustee has no delegated roles."
-msgstr "Fiador não possui roles delegados."
+msgstr "O fiduciário não possui funções delegadas."
msgid "Trustor is disabled."
-msgstr "O fiador está desativado."
+msgstr "O fideicomitente está desativado."
#, python-format
msgid ""
@@ -946,7 +1287,7 @@ msgid ""
"contains option %(option_other)s instead"
msgstr ""
"Tentando atualizar a opção %(option)s no grupo %(group)s, mas a configuração "
-"fornecida contém %(option_other)s ao invés"
+"fornecida contém %(option_other)s "
#, python-format
msgid ""
@@ -974,9 +1315,12 @@ msgid ""
"Unable to delete region %(region_id)s because it or its child regions have "
"associated endpoints."
msgstr ""
-"Não foi possível excluir a região %(region_id)s, uma vez que ela ou suas "
+"Não é possível excluir a região %(region_id)s porque uma ou mais de suas "
"regiões filhas possuem terminais associados."
+msgid "Unable to downgrade schema"
+msgstr "Não é possível fazer downgrade do esquema"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
@@ -984,15 +1328,8 @@ msgstr ""
"%(mapping_id)s"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-"Não é possível obter uma conexão do ID do conjunto %(id)s após %(seconds)s "
-"segundos."
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
-msgstr "Não é possível localizar diretório de configuração de domínio: %s"
+msgstr "Não é possível localizar o diretório de configuração de domínio: %s"
#, python-format
msgid "Unable to lookup user %s"
@@ -1021,7 +1358,7 @@ msgstr "Não é possível assinar o token."
#, python-format
msgid "Unexpected assignment type encountered, %s"
-msgstr "Tipo de designação inesperada encontrada, %s"
+msgstr "Tipo de designação inesperado encontrado, %s"
#, python-format
msgid ""
@@ -1050,21 +1387,40 @@ msgstr "Versão de token desconhecida %s"
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "Dependência não registrada: %(name)s para %(targets)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "Atualização de `domain_id` não é permitida."
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "Atualização de `is_domain` não é permitida."
+
msgid "Update of `parent_id` is not allowed."
msgstr "Atualização de ‘parent_id’ não é permitida."
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "A atualização de domain_id é permitida somente para projetos raízes."
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+"Não é permitido atualizar domain_id de projetos que agem como domínios."
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr ""
"Use um token com escopo definido do projeto ao tentar criar uma asserção SAML"
-#, python-format
-msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
-msgstr "Usuário %(u_id)s não está autorizado para o tenant %(t_id)s"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"O uso da configuração do driver de identidade para configurar "
+"automaticamente o mesmo driver de designação foi descontinuado. Na liberação "
+"\"O\", o driver de designação precisará ser configurado explicitamente caso "
+"seja diferente do padrão (SQL)."
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"Usuário %(user_id)s já possui a função %(role_id)s no locatário %(tenant_id)s"
+msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
+msgstr "O usuário %(u_id)s não está autorizado para o locatário %(t_id)s"
#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
@@ -1076,28 +1432,42 @@ msgstr "O usuário %(user_id)s não tem acesso ao projeto %(project_id)s"
#, python-format
msgid "User %(user_id)s is already a member of group %(group_id)s"
-msgstr "Usuário %(user_id)s já é membro do grupo %(group_id)s"
+msgstr "O usuário %(user_id)s já é membro do grupo %(group_id)s"
#, python-format
msgid "User '%(user_id)s' not found in group '%(group_id)s'"
msgstr "Usuário '%(user_id)s' não localizado no grupo '%(group_id)s'"
msgid "User IDs do not match"
-msgstr "ID de usuário não confere"
+msgstr "O ID de usuário não corresponde"
+
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"A autenticação do usuário não pode ser construída porque está faltando o ID "
+"ou o nome do usuário com o ID do domínio ou o nome do usuário com o nome do "
+"domínio."
#, python-format
msgid "User is disabled: %s"
msgstr "O usuário está desativado: %s"
msgid "User is not a member of the requested project"
-msgstr "Usuário não é um membro do projeto requisitado"
+msgstr "O usuário não é membro do projeto solicitado"
msgid "User is not a trustee."
-msgstr "Usuário não é confiável."
+msgstr "Usuário não é um fiduciário."
msgid "User not found"
msgstr "Usuário não localizado"
+msgid "User not valid for tenant."
+msgstr "O usuário não é válido para o locatário."
+
+msgid "User roles not supported: tenant_id required"
+msgstr "Papéis de usuários não suportados: necessário tenant_id"
+
#, python-format
msgid "User type %s not supported"
msgstr "Tipo de usuário %s não suportado"
@@ -1109,11 +1479,20 @@ msgstr "Você não está autorizado à realizar a ação solicitada."
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "Você não está autorizado a executar a ação solicitada: %(action)s"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"Você tentou criar um recurso usando o token de administração. Como esse "
+"token não está dentro de um domínio, deve-se incluir explicitamente um "
+"domínio ao qual esse recurso possa pertencer."
+
msgid "`key_mangler` functions must be callable."
msgstr "Funções `key_mangler` devem ser chamáveis."
msgid "`key_mangler` option must be a function reference"
-msgstr "opção `key_mangler` deve ser uma referência de função"
+msgstr "A opção `key_mangler` deve ser uma referência de função"
msgid "any options"
msgstr "quaisquer opções"
@@ -1122,48 +1501,21 @@ msgid "auth_type is not Negotiate"
msgstr "auth_type não é Negotiate"
msgid "authorizing user does not have role required"
-msgstr "Usuário autorizado não possui o role necessário"
-
-msgid "cache_collection name is required"
-msgstr "nome cache_collection é necessário"
+msgstr "O usuário autorizado não possui a função necessária"
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr ""
-"não é possível criar um projeto em uma ramificação que contém um projeto "
+"Não é possível criar um projeto em uma ramificação que contém um projeto "
"desativado: %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr "não é possível criar um projeto em um domínio diferente de seus pais."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr ""
-"não é possível excluir um domínio que esteja ativado, desative-o primeiro."
-
#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-"não é possível excluir o projeto %s, pois ele não é uma folha na hierarquia."
-
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr ""
-"não é possível desativar o projeto %s desde que sua subárvore contenha "
-"projetos ativados"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"não é possível ativar o projeto %s desde que ele tenha pais desativados"
-
-msgid "database db_name is required"
-msgstr "banco de dados db_name é necessário"
-
-msgid "db_hosts value is required"
-msgstr "valor db_hosts é necessário"
-
-msgid "delete the default domain"
-msgstr "excluir o domínio padrão"
+"Não é possível excluir um projeto ativado que age como um domínio. Desative "
+"o projeto %s primeiro."
#, python-format
msgid "group %(group)s"
@@ -1176,48 +1528,43 @@ msgstr ""
"idp_contact_type deve ser uma dessas opções: [técnico, outro, suporte, "
"administrativo ou faturamento."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "valor de número inteiro esperado para mongo_ttl_seconds"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "valor inteiro esperado para w (atributo relativo a gravação)"
-
#, python-format
msgid "invalid date format %s"
msgstr "formato de data inválido %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "profundidade máx. de hierarquia atingida para a ramificação %s."
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+"Não é permitido ter dois projetos agindo como domínios com o mesmo nome: %s"
-msgid "no ssl support available"
-msgstr "suporte ssl não disponível"
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr ""
+"Não é permitido ter dois projetos dentro de um domínio com o mesmo nome: %s"
+
+msgid "only root projects are allowed to act as domains."
+msgstr "Somente projetos raízes são permitidos para agirem como domínios. "
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "opção %(option)s no grupo %(group)s"
-msgid "pad must be single character"
-msgstr "preenchimento deve ser caractere único"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "texto base64url preenchido deve ser múltiplo de 4 caracteres"
-
msgid "provided consumer key does not match stored consumer key"
msgstr ""
-"Chave de consumidor fornecida não confere com a chave de consumidor "
+"A chave de consumidor fornecida não confere com a chave de consumidor "
"armazenada"
msgid "provided request key does not match stored request key"
msgstr ""
-"Chave de requisição do provedor não confere com a chave de requisição "
+"A chave de solicitação fornecida não confere com a chave de solicitação "
"armazenada"
msgid "provided verifier does not match stored verifier"
-msgstr "Verificador fornecido não confere com o verificador armazenado"
-
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "região não é do tipo dogpile.cache.CacheRegion"
+msgstr "O verificador fornecido não confere com o verificador armazenado"
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses deve ser um número inteiro positivo ou nulo."
@@ -1226,35 +1573,25 @@ msgid "remaining_uses must not be set if redelegation is allowed"
msgstr ""
"remaining_uses não deverá ser definido se a nova delegação for permitida"
-msgid "replicaset_name required when use_replica is True"
-msgstr "replicaset_name necessário quando use_replica for True"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
"%(group_other)s instead"
msgstr ""
-"solicite atualizar o grupo %(group)s, mas a configuração fornecida contém o "
-"grupo %(group_other)s ao invés"
+"solicitação para atualizar o grupo %(group)s, mas a configuração fornecida "
+"contém o grupo %(group_other)s"
msgid "rescope a scoped token"
msgstr "Defina novamente um escopo de um token com escopo"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"texto é múltiplo de 4, mas preenchimento \"%s\" ocorre antes do penúltimo "
-"caractere"
+msgid "role %s is not defined"
+msgstr "O papel %s não foi definido"
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
+msgid "scope.project.id must be specified if include_subtree is also specified"
msgstr ""
-"texto é múltiplo de 4, mas preenchimento \"%s\" ocorre antes do último "
-"caractere de não preenchimento"
-
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "texto não é um múltiplo de 4, mas contém preenchimento \"%s\""
+"scope.project.id deverá ser especificado se include_subtree também for "
+"especificado"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1262,8 +1599,22 @@ msgstr "tls_cacertdir %s não encontrado ou não é um diretório"
#, python-format
msgid "tls_cacertfile %s not found or is not a file"
-msgstr "tls_cacertfile %s não encontrada ou não é um arquivo"
+msgstr "tls_cacertfile %s não encontrado ou não é um arquivo"
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
-msgstr "referência de token deve ser um tipo KeystoneToken, obteve: %s"
+msgstr "A referência de token deve ser um tipo KeystoneToken, obteve: %s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr ""
+"A atualização de domain_id foi descontinuada a partir do Mitaka e será "
+"removida na liberação O."
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"O validado esperava localizar %(param_name)r na assinatura da função para "
+"%(func_name)r."
diff --git a/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po
index 9435231b..b60e4349 100644
--- a/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone-log-critical.po
@@ -6,21 +6,21 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Russian\n"
-"Language: ru\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: ru\n"
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
"%100>=11 && n%100<=14)? 2 : 3);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Russian\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone.po
index 1188d316..205a3e53 100644
--- a/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/ru/LC_MESSAGES/keystone.po
@@ -1,34 +1,49 @@
-# Russian translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
# kogamatranslator49 <r.podarov@yandex.ru>, 2015
# sher <sher@online.ua>, 2013
+# sher <sher@online.ua>, 2013
# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Grigory Mokhin <mokhin@gmail.com>, 2016. #zanata
+# Lucas Palm <lapalm@us.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-03-18 10:16+0000\n"
+"Last-Translator: Grigory Mokhin <mokhin@gmail.com>\n"
"Language: ru\n"
-"Language-Team: Russian\n"
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
-"%100>=11 && n%100<=14)? 2 : 3)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"%100>=11 && n%100<=14)? 2 : 3);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Russian\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "ВерÑÐ¸Ñ Ð´Ñ€Ð°Ð¹Ð²ÐµÑ€Ð° %(driver)s не поддерживаетÑÑ"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr ""
+"Ð˜Ð¼Ñ %(entity)s не может Ñодержать Ñледующие зарезервированные Ñимволы: "
+"%(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr ""
@@ -52,6 +67,10 @@ msgstr ""
"%(path)s не ÑущеÑтвует или не ÑвлÑетÑÑ ÐºÐ°Ñ‚Ð°Ð»Ð¾Ð³Ð¾Ð¼."
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s не подразумевает %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s не может быть короче %(min_length)s Ñимволов."
@@ -64,6 +83,10 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s не должен быть длинее %(max_length)s Ñимволов."
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s не может быть подразумеваемой ролью"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s не может быть пуÑÑ‚."
@@ -79,10 +102,18 @@ msgstr "Поле %s ÑвлÑетÑÑ Ð¾Ð±Ñзательным и не может
msgid "%s field(s) cannot be empty"
msgstr "Поле %s не может быть пуÑтым"
-msgid "(Disable debug mode to suppress these details.)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
msgstr ""
-" \t\n"
-"(Отключить режим отладки, чтобы подавить Ñти детали.)"
+"%s Ð´Ð»Ñ ÑиÑтемы идентификации LDAP уÑтарело Mitaka, вмеÑто него иÑпользуетÑÑ "
+"Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ñ LDAP Ñ Ð´Ð¾Ñтупом только Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ. Эта Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ удалена в "
+"выпуÑке \"O\"."
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(Выключите режим insecure_debug, чтобы не показывать Ñти подробноÑти.)"
msgid "--all option cannot be mixed with other options"
msgstr "опцию --all Ð½ÐµÐ»ÑŒÐ·Ñ ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°Ñ‚ÑŒ вмеÑте Ñ Ð´Ñ€ÑƒÐ³Ð¸Ð¼Ð¸ опциÑми"
@@ -131,6 +162,16 @@ msgstr ""
msgid "At least one role should be specified."
msgstr "Ðеобходимо указать по крайней мере одну роль."
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"Ðе удалоÑÑŒ автоматичеÑки выбрать драйвер на оÑнове опции [identity]\\driver, "
+"так как драйвер %s не найден. Укажите требуемый драйвер в [assignment]/"
+"driver в конфигурации keystone."
+
msgid "Attempted to authenticate with an unsupported method."
msgstr "Попытка идентификации Ñ Ð¸Ñпользованием неподдерживаемого метода."
@@ -144,6 +185,14 @@ msgstr ""
msgid "Authentication plugin error."
msgstr "Ошибка Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ð¸."
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"Ð‘Ð°Ð·Ð¾Ð²Ð°Ñ ÑиÑтема `%(backend)s` не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимой базовой ÑиÑтемой в кÑше "
+"памÑти. ДопуÑтимые базовые ÑиÑтемы: %(backend_list)s"
+
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr ""
"ПредоÑтавить права доÑтупа маркеру запроÑа Ñ Ð¼Ð°Ñ€ÐºÐµÑ€Ð¾Ð¼, выданным поÑредÑтвом "
@@ -156,9 +205,6 @@ msgstr "Ðевозможно изменить %(option_name)s %(attr)s"
msgid "Cannot change Domain ID"
msgstr "Ðевозможно изменить ИД домена"
-msgid "Cannot change consumer secret"
-msgstr "Ðевозможно изменить Ñекретный ключ приемника"
-
msgid "Cannot change user ID"
msgstr "Ðевозможно изменить ИД пользователÑ"
@@ -166,12 +212,68 @@ msgid "Cannot change user name"
msgstr "Ðевозможно изменить Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "Ðе удаетÑÑ Ñоздать конечную точку Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ недопуÑтимого URL: %(url)s"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr "Ðе удаетÑÑ Ñоздать проект Ñ Ñ€Ð¾Ð´Ð¸Ñ‚ÐµÐ»ÑŒÑким объектом: %(project_id)s"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "Ðевозможно копировать Ð¸Ð¼Ñ %s"
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"Ðе удаетÑÑ Ñоздать проект, так как его владелец указан как домен "
+"%(domain_id)s, но его родительÑкий объект задан в другом домене "
+"(%(parent_domain_id)s)."
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"Ðе удаетÑÑ Ñоздать проект, так как его родительÑкий Ñлемент (%(domain_id)s) "
+"работает в качеÑтве домена, но parent_id (%(parent_id)s), указанный Ð´Ð»Ñ "
+"проекта, не ÑоответÑтвует данному domain_id."
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr "Ðевозможно удалить включенный домен, Ñначала выключите его."
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"Ðевозможно удалить проект %(project_id)s, так как его поддерево Ñодержит "
+"включенные проекты"
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"Ðевозможно удалить проект %s, так как он не ÑвлÑетÑÑ ÐºÐ¾Ð½ÐµÑ‡Ð½Ñ‹Ð¼ объектом в "
+"Ñтруктуре. ИÑпользуйте каÑкадную опцию Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð²Ñего поддерева."
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr ""
+"ÐÐµÐ»ÑŒÐ·Ñ Ð¾Ñ‚ÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÑŒ проект %(project_id)s, так как его поддерево Ñодержит "
+"включенные проекты"
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr ""
+"Ðе удаетÑÑ Ð²ÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÑŒ проект %s, так как у него отключены родительÑкие объекты"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr ""
+"Ðе удаетÑÑ Ð¿Ð¾ÐºÐ°Ð·Ð°Ñ‚ÑŒ ÑпиÑок приÑвоений, полученных из групп и отфильтрованных "
+"по ИД пользователÑ."
msgid "Cannot list request tokens with a token issued via delegation."
msgstr ""
@@ -193,6 +295,11 @@ msgstr ""
"Ðевозможно отÑечь вызов драйвера без ÑпиÑка подÑказок в качеÑтве первого "
"параметра поÑле Ñамого ÑÐµÐ±Ñ "
+msgid "Cannot update domain_id of a project that has children."
+msgstr ""
+"Ðе разрешено обновлÑÑ‚ÑŒ domain_id Ð´Ð»Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð°, у которого еÑÑ‚ÑŒ дочерние "
+"объекты."
+
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
msgstr ""
@@ -205,6 +312,9 @@ msgstr ""
"ÐÐµÐ»ÑŒÐ·Ñ Ð¸Ñпользовать параметры запроÑа subtree_as_list и subtree_as_ids "
"одновременно."
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "КаÑкадное обновление разрешено только Ð´Ð»Ñ Ð²ÐºÐ»ÑŽÑ‡ÐµÐ½Ð½Ñ‹Ñ… атрибутов."
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr ""
@@ -218,6 +328,10 @@ msgstr ""
"вÑегда дает пуÑтой ÑпиÑок."
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "ÐаÑтроить Ñлемент API в /domains/%s/config"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr "При попытке Ñохранить %(type)s возник конфликт - %(details)s"
@@ -236,6 +350,14 @@ msgstr ""
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"Ðе удалоÑÑŒ определить ИД поÑтавщика идентификации. ÐžÐ¿Ñ†Ð¸Ñ ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ð¸ "
+"%(issuer_attribute)s не найдена в Ñреде запроÑа."
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr ""
@@ -300,9 +422,6 @@ msgstr "Проект %(project_id)s не найден"
msgid "Could not find region: %(region_id)s"
msgstr "Регион %(region_id)s не найден"
-msgid "Could not find role"
-msgstr "Ðе удалоÑÑŒ найти роль"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -339,15 +458,47 @@ msgstr "ВерÑÐ¸Ñ %(version)s не найдена"
msgid "Could not find: %(target)s"
msgstr "%(target)s не найдена"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"Ðе удаетÑÑ ÑвÑзать объединенные ÑвойÑтва Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€Ð°Ð¼Ð¸. "
+"Дополнительные ÑÐ²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾ ÑвÑзывании приведены в протоколе отладки."
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"Ðе удалоÑÑŒ привÑзать Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð²Ð¾ Ð²Ñ€ÐµÐ¼Ñ Ð½Ð°Ñтройки временного "
+"идентификатора пользователÑ. Правила привÑзка должны указывать имÑ/ИД "
+"пользователÑ, либо должна быть задана Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ Ñреды REMOTE_USER."
+
msgid "Could not validate the access token"
msgstr "Ðе удалоÑÑŒ проверить ключ доÑтупа"
msgid "Credential belongs to another user"
msgstr "Разрешение принадлежит другому пользователю"
+msgid "Credential signature mismatch"
+msgstr "ÐеÑовпадение подпиÑи идентификационных данных"
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "База данных в каталоге /domains/%s/config"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"ПрÑмой импорт Ð¼Ð¾Ð´ÑƒÐ»Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ð¸ %(name)r уÑтарел в Liberty и может быть "
+"удален в выпуÑке N. ВмеÑто Ñтого иÑпользуетÑÑ ÐµÐ³Ð¾ точка входа из "
+"%(namespace)r."
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"ПрÑмой импорт драйвера %(name)r уÑтарел в Liberty и может быть удален в "
+"выпуÑке N. ВмеÑто Ñтого иÑпользуетÑÑ ÐµÐ³Ð¾ точка входа из %(namespace)r."
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -371,12 +522,15 @@ msgstr "Домен не может иметь идентификатор %s"
msgid "Domain is disabled: %s"
msgstr "Домен отключен: %s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "Метаданные домена не поддерживаютÑÑ LDAP"
+msgid "Domain name cannot contain reserved characters."
+msgstr "Ð˜Ð¼Ñ Ð´Ð¾Ð¼ÐµÐ½Ð° не может Ñодержать зарезервированные Ñимволы."
msgid "Domain scoped token is not supported"
msgstr "Маркер, облаÑÑ‚ÑŒ которого - домен, не поддерживаетÑÑ"
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr "ОÑобые роли домена не поддерживаютÑÑ Ð² драйвере ролей V8"
+
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
@@ -384,9 +538,6 @@ msgid ""
msgstr ""
"У домена %(domain)s уже определена ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ - файл пропущен: %(file)s."
-msgid "Domains are read-only against LDAP"
-msgstr "Домены доÑтупны только Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð² LDAP"
-
msgid "Duplicate Entry"
msgstr "Дубликат запиÑи"
@@ -395,9 +546,28 @@ msgid "Duplicate ID, %s."
msgstr "ПовторÑющийÑÑ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ‚Ð¾Ñ€, %s."
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "ПовторÑющаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ: %s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "ПовторÑющееÑÑ Ð¸Ð¼Ñ, %s."
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "ПовторÑющийÑÑ ÑƒÐ´Ð°Ð»ÐµÐ½Ð½Ñ‹Ð¹ ИД: %s"
+
+msgid "EC2 access key not found."
+msgstr "Ключ доÑтупа EC2 не найден."
+
+msgid "EC2 signature not supplied."
+msgstr "Ðе указана подпиÑÑŒ EC2."
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr ""
+"Ðеобходимо указать аргумент --bootstrap-password или OS_BOOTSTRAP_PASSWORD."
+
msgid "Enabled field must be a boolean"
msgstr "Ðктивное поле должно быть булевÑким значением"
@@ -424,10 +594,31 @@ msgstr ""
"Ошибка анализа файла конфигурации Ð´Ð»Ñ Ð´Ð¾Ð¼ÐµÐ½Ð° %(domain)s, файл: %(file)s."
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "Ошибка при открытии файла %(path)s: %(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "Ошибка при анализе Ñтроки: '%(line)s': %(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "Ошибка при анализе правил %(path)s: %(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° метаданных: %(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"Превышено чиÑло попыток региÑтрации домена %(domain)s Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ "
+"драйвера SQL. ПоÑледний домен, Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ð¾Ð³Ð¾ Ñто было Ñделано - "
+"%(last_domain)s. Больше попыток не будет"
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "ОжидаетÑÑ dict или list: %s"
@@ -472,6 +663,10 @@ msgstr ""
"домена."
#, python-format
+msgid "Group %s not found in config"
+msgstr "Группа %s не найдена в конфигурации"
+
+#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr ""
"Группа %(group)s не поддерживаетÑÑ Ð´Ð»Ñ Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð½Ñ‹Ñ… конфигураций домена"
@@ -507,6 +702,9 @@ msgstr ""
"ВходÑщий идентификатор поÑтавщика идентификаторов не включен в принÑтые "
"идентификаторы."
+msgid "Invalid EC2 signature."
+msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¿Ð¾Ð´Ð¿Ð¸ÑÑŒ EC2."
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr ""
@@ -579,17 +777,12 @@ msgstr ""
msgid "Invalid signature"
msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¿Ð¾Ð´Ð¿Ð¸ÑÑŒ"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"ÐедопуÑтимое значение ssl_cert_reqs, %s, необходимо указать одно из "
-"значений: \"NONE\", \"OPTIONAL\", \"REQUIRED\""
-
msgid "Invalid user / password"
msgstr "ÐедопуÑтимый пользователь / пароль"
+msgid "Invalid username or TOTP passcode"
+msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð¸Ð»Ð¸ пароль TOTP"
+
msgid "Invalid username or password"
msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð¸Ð»Ð¸ пароль"
@@ -613,6 +806,20 @@ msgstr "LDAP %s удаление"
msgid "LDAP %s update"
msgstr "LDAP %s обновление"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr ""
+"Длина ИД преобразуемого реÑурÑа > 64 Ñимволов, то еÑÑ‚ÑŒ превышает макÑимально "
+"допуÑтимую"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"Локальный раздел в преобразовании %(mapping_id)s указывает на удаленное "
+"Ñовпадение, которое не ÑущеÑтвует (например, {0} в локальном разделе)."
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr "ÐаÑтупил тайм-аут блокировки Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð°, %(target)s"
@@ -632,6 +839,10 @@ msgid "Marker could not be found"
msgstr "Ðе удалоÑÑŒ найти маркер"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "Ð”Ð»Ñ Ð²ÐµÑ‚Ð²Ð¸ %s доÑтигнута макÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð³Ð»ÑƒÐ±Ð¸Ð½Ð° иерархии."
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr "Выполнено макÑимальное чиÑло попыток блокировки в %s."
@@ -665,6 +876,9 @@ msgstr "Ðеобходимо указать домен или проект"
msgid "Name field is required and cannot be empty"
msgstr "Поле имени ÑвлÑетÑÑ Ð¾Ð±Ñзательным и не может быть пуÑтым"
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr "Ðе указаны ни ИД домена проекта, ни Ð¸Ð¼Ñ Ð´Ð¾Ð¼ÐµÐ½Ð° проекта."
+
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
@@ -693,6 +907,9 @@ msgstr "С конечной точкой %(endpoint_id)s не ÑвÑзано нÐ
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "Вариантов иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ñ‹ Ð´Ð¾Ð²ÐµÑ€Ð¸Ñ %(trust_id)s не оÑталоÑÑŒ"
+msgid "No token in the request"
+msgstr "Ð’ запроÑе отÑутÑтвует маркер"
+
msgid "Non-default domain is not supported"
msgstr "Домен, отличный от применÑемого по умолчанию, не поддерживаетÑÑ"
@@ -720,9 +937,29 @@ msgid "Project (%s)"
msgstr "Проект (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "Ðе найден ИД проекта: %(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "Поле проекта ÑвлÑетÑÑ Ð¾Ð±Ñзательным и не может быть пуÑтым."
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "Проект отключен: %s"
+msgid "Project name cannot contain reserved characters."
+msgstr "Ð˜Ð¼Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð° не может Ñодержать зарезервированные Ñимволы."
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "Строка запроÑа указана в кодировке, отличной от UTF-8"
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr ""
+"Чтение Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ умолчанию Ð´Ð»Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð° %(option)s в группе %(group)s не "
+"поддерживаетÑÑ"
+
msgid "Redelegation allowed for delegated by trust only"
msgstr "Изменение Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ€Ð°Ð·Ñ€ÐµÑˆÐµÐ½Ð¾ только Ð´Ð»Ñ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ð¾Ð³Ð¾ пользователÑ"
@@ -734,6 +971,72 @@ msgstr ""
"ОÑтавшаÑÑÑ Ð³Ð»ÑƒÐ±Ð¸Ð½Ð° Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %(redelegation_depth)d выходит за "
"пределы разрешенного диапазона [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Удалите admin_crud_extension из конвейера вÑтавки, раÑширение admin_crud "
+"теперь доÑтупно вÑегда. Обновите раздел [pipeline:admin_api] в файле "
+"keystone-paste.ini ÑоответÑтвенно, так как он будет удален в выпуÑке O."
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"Удалите endpoint_filter_extension из конвейера вÑтавки, раÑширение фильтра "
+"конечной точки теперь доÑтупно вÑегда. Обновите раздел [pipeline:api_v3] в "
+"файле keystone-paste.ini ÑоответÑтвенно, так как он будет удален в выпуÑке O."
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Удалите federation_filter_extension из конвейера вÑтавки, раÑширение "
+"Ð¾Ð±ÑŠÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ‚ÐµÐ¿ÐµÑ€ÑŒ доÑтупно вÑегда. Обновите раздел [pipeline:api_v3] в "
+"файле keystone-paste.ini ÑоответÑтвенно, так как он будет удален в выпуÑке O."
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Удалите oauth1_filter_extension из конвейера вÑтавки, раÑширение oauth1 "
+"теперь доÑтупно вÑегда. Обновите раздел [pipeline:api_v3] в файле keystone-"
+"paste.ini ÑоответÑтвенно, так как он будет удален в выпуÑке O."
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Удалите revoke_filter_extension из конвейера вÑтавки, раÑширение отзыва "
+"теперь доÑтупно вÑегда. Обновите раздел [pipeline:api_v3] в файле keystone-"
+"paste.ini ÑоответÑтвенно, так как он будет удален в выпуÑке O."
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"Удалите simple_cert из конвейера вÑтавки, теперь поÑтавщики ключей PKI и "
+"PKIz уÑтарели, а simple_cert иÑпользовалÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ Ð´Ð»Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶ÐºÐ¸ Ñтих "
+"поÑтавщиков. Обновите раздел [pipeline:api_v3] в файле keystone-paste.ini "
+"ÑоответÑтвенно, так как он будет удален в выпуÑке O."
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"Удалите user_crud_extension из конвейера вÑтавки, раÑширение user_crud "
+"теперь доÑтупно вÑегда. Обновите раздел [pipeline:public_api] в файле "
+"keystone-paste.ini ÑоответÑтвенно, так как он будет удален в выпуÑке O."
+
msgid "Request Token does not have an authorizing user id"
msgstr ""
"Маркер запроÑа не Ñодержит ИД Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ð´Ð»Ñ Ð¿Ñ€ÐµÐ´Ð¾ÑÑ‚Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð°Ð² доÑтупа"
@@ -771,10 +1074,6 @@ msgstr ""
"Ð—Ð°Ð¿Ñ€Ð¾ÑˆÐµÐ½Ð½Ð°Ñ Ð³Ð»ÑƒÐ±Ð¸Ð½Ð° Ð¸Ð·Ð¼ÐµÐ½ÐµÐ½Ð¸Ñ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ %(requested_count)d превышает "
"разрешенную %(max_count)d"
-#, python-format
-msgid "Role %s not found"
-msgstr "Роль %s не найдена"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
@@ -822,6 +1121,28 @@ msgstr ""
"Превышена длина Ñтроки. Длина Ñтроки '%(string)s' превышает ограничение "
"Ñтолбца %(type)s(CHAR(%(length)d))."
+msgid "Tenant name cannot contain reserved characters."
+msgstr "Ð˜Ð¼Ñ Ð°Ñ€ÐµÐ½Ð´Ð°Ñ‚Ð¾Ñ€Ð° не может Ñодержать зарезервированные Ñимволы."
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"РаÑширение %s было перемещено в Ñдро keystone, и его Ð¿ÐµÑ€ÐµÐ½Ð¾Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÑ‚ÑÑ "
+"оÑновной ÑиÑтемой ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð±Ð°Ð·Ñ‹ данных keystone. ИÑпользуйте команду: "
+"keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"Значение параметра 'expires_at' не должно быть меньше наÑтоÑщего времени. "
+"Серверу не удалоÑÑŒ иÑполнить запроÑ, так как он поврежден или неправильно "
+"Ñформирован. Предположительно, клиент находитÑÑ Ð² ÑоÑтоÑнии ошибки."
+
msgid "The --all option cannot be used with the --domain-name option"
msgstr "Параметр --all Ð½ÐµÐ»ÑŒÐ·Ñ ÑƒÐºÐ°Ð·Ñ‹Ð²Ð°Ñ‚ÑŒ вмеÑте Ñ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ð¾Ð¼ --domain-name"
@@ -852,6 +1173,12 @@ msgstr ""
"маркеры PKI, в противном Ñлучае, Ñто ÑвлÑетÑÑ ÑледÑтвием ошибки в "
"конфигурации."
+msgid "The configured token provider does not support bind authentication."
+msgstr "ÐаÑтроенный модуль маркера не поддерживает идентификацию привÑзки."
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr "Создание проектов, работающих в качеÑтве доменов, не разрешено в v2."
+
#, python-format
msgid ""
"The password length must be less than or equal to %(size)i. The server could "
@@ -899,12 +1226,9 @@ msgstr "Ðе допуÑкаютÑÑ Ð¿Ð°Ñ€Ð°Ð¼ÐµÑ‚Ñ€Ñ‹, отличные от oa
msgid "This is not a recognized Fernet payload version: %s"
msgstr "Это не раÑÐ¿Ð¾Ð·Ð½Ð°Ð½Ð½Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ Ð¿Ð¾Ð»ÐµÐ·Ð½Ð¾Ð¹ нагрузки Fernet: %s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"Это не маркер Fernet верÑии 2.0. Следует иÑпользовать верÑию 3 Ð´Ð»Ñ Ð½Ð°Ð´ÐµÐ¶Ð½Ñ‹Ñ…, "
-"доменных или объединенных маркеров."
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "Это не маркер Fernet: %s"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
@@ -930,6 +1254,9 @@ msgstr "Маркер принадлежит другому пользоватеÐ
msgid "Token does not belong to specified tenant."
msgstr "Маркер не принадлежит указанному арендатору."
+msgid "Token version is unrecognizable or unsupported."
+msgstr "ВерÑÐ¸Ñ Ð¼Ð°Ñ€ÐºÐµÑ€Ð° не раÑпознана либо не поддерживаетÑÑ."
+
msgid "Trustee has no delegated roles."
msgstr "У доверенного лица нет делегированных ролей."
@@ -978,6 +1305,9 @@ msgstr ""
"Ðе удалоÑÑŒ удалить регион %(region_id)s: регион или его дочерние регионы "
"имеют ÑвÑзанные конечные точки."
+msgid "Unable to downgrade schema"
+msgstr "Ðе удаетÑÑ Ð¿Ð¾Ð½Ð¸Ð·Ð¸Ñ‚ÑŒ верÑию Ñхемы"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr ""
@@ -985,12 +1315,6 @@ msgstr ""
"%(mapping_id)s"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr ""
-"Ðе удалоÑÑŒ получить Ñоединение из пула Ñ Ð˜Ð” %(id)s за %(seconds)s Ñекунд."
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Ðе удалоÑÑŒ найти каталог конфигурации домена: %s"
@@ -1051,23 +1375,41 @@ msgstr "ÐеизвеÑÑ‚Ð½Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ Ð¼Ð°Ñ€ÐºÐµÑ€Ð° %s"
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "ÐезарегиÑÑ‚Ñ€Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð·Ð°Ð²Ð¸ÑимоÑÑ‚ÑŒ %(name)s Ð´Ð»Ñ %(targets)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "Обновление `domain_id` не разрешено."
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "Обновление `is_domain` не разрешено."
+
msgid "Update of `parent_id` is not allowed."
msgstr "Обновление `parent_id` не разрешено."
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "Обновление domain_id разрешено только Ð´Ð»Ñ ÐºÐ¾Ñ€Ð½ÐµÐ²Ñ‹Ñ… проектов."
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr ""
+"Ðе разрешено обновлÑÑ‚ÑŒ domain_id Ð´Ð»Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð¾Ð², работающих в качеÑтве доменов."
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr "ИÑпользовать локальный ключ проекта при Ñоздании ÑƒÑ‚Ð²ÐµÑ€Ð¶Ð´ÐµÐ½Ð¸Ñ SAML"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"ИÑпользование конфигурации драйвера идентификатора Ð´Ð»Ñ Ð°Ð²Ñ‚Ð¾Ð¼Ð°Ñ‚Ð¸Ñ‡ÐµÑкой "
+"наÑтройки такого же драйвера приÑÐ²Ð¾ÐµÐ½Ð¸Ñ ÑƒÑтарело. Ð’ выпуÑке \"O\" драйвер "
+"приÑÐ²Ð¾ÐµÐ½Ð¸Ñ Ð´Ð¾Ð»Ð¶ÐµÐ½ будет наÑтраиватьÑÑ Ñвным образом, еÑли он не Ñовпадает Ñ "
+"драйвером по умолчанию (SQL)."
+
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "У Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %(u_id)s нет доÑтупа к арендатору %(t_id)s"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"Пользователю %(user_id)s уже приÑвоена роль %(role_id)s в арендаторе "
-"%(tenant_id)s"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
msgstr "У Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %(user_id)s нет доÑтупа к домену %(domain_id)s"
@@ -1086,6 +1428,14 @@ msgstr "Пользователь '%(user_id)s' не найден в группе
msgid "User IDs do not match"
msgstr "ИД пользователей не Ñовпадают"
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"Ðе удалоÑÑŒ Ñкомпоновать идентификацию пользователÑ, так как отÑутÑтвует ИД "
+"пользователÑ, Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñ Ð˜Ð” домена либо Ð¸Ð¼Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ "
+"домена."
+
#, python-format
msgid "User is disabled: %s"
msgstr "Пользователь отключен: %s"
@@ -1099,6 +1449,12 @@ msgstr "Пользователь не ÑвлÑетÑÑ Ð´Ð¾Ð²ÐµÑ€ÐµÐ½Ð½Ñ‹Ð¼ ли
msgid "User not found"
msgstr "Пользователь не найден"
+msgid "User not valid for tenant."
+msgstr "ÐедопуÑтимый пользователь Ð´Ð»Ñ Ð°Ñ€ÐµÐ½Ð´Ð°Ñ‚Ð¾Ñ€Ð°."
+
+msgid "User roles not supported: tenant_id required"
+msgstr "Роли пользователей не поддерживаютÑÑ, требуетÑÑ tenant_id"
+
#, python-format
msgid "User type %s not supported"
msgstr "Тип Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %s не поддерживаетÑÑ"
@@ -1110,6 +1466,15 @@ msgstr "У Ð²Ð°Ñ Ð½ÐµÑ‚ прав на выполнение запрашивае
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "У Ð²Ð°Ñ Ð½ÐµÑ‚ прав на выполнение запрошенного дейÑтвиÑ: %(action)s"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"Попытка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€ÐµÑурÑа Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ админиÑтративного маркера. Так как Ñтот "
+"маркер не принадлежит домену, необходимо Ñвно указать домен, которому будет "
+"принадлежать реÑурÑ."
+
msgid "`key_mangler` functions must be callable."
msgstr "Функции `key_mangler` должны быть доÑтупны Ð´Ð»Ñ Ð²Ñ‹Ð·Ð¾Ð²Ð°."
@@ -1126,45 +1491,17 @@ msgid "authorizing user does not have role required"
msgstr ""
"пользователю, предоÑтавлÑющему права доÑтупа, не приÑвоена Ñ‚Ñ€ÐµÐ±ÑƒÐµÐ¼Ð°Ñ Ñ€Ð¾Ð»ÑŒ"
-msgid "cache_collection name is required"
-msgstr "Ð¸Ð¼Ñ cache_collection ÑвлÑетÑÑ Ð¾Ð±Ñзательным"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr "ÐÐµÐ»ÑŒÐ·Ñ Ñоздать проект в ветви, Ñодержащей отключенный проект: %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr ""
-"ÐÐµÐ»ÑŒÐ·Ñ Ñоздать проект в домене, отличном от домена, в котором находÑÑ‚ÑÑ ÐµÐ³Ð¾ "
-"родительÑкие объекты."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr "невозможно удалить работающий домен, вначале отключите его."
-
#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr ""
-"ÐÐµÐ»ÑŒÐ·Ñ ÑƒÐ´Ð°Ð»Ð¸Ñ‚ÑŒ проект %s, так как он не ÑвлÑетÑÑ ÐºÐ¾Ð½ÐµÑ‡Ð½Ñ‹Ð¼ объектом в "
-"Ñтруктуре."
-
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
msgstr ""
-"ÐÐµÐ»ÑŒÐ·Ñ Ð¾Ñ‚ÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÑŒ проект %s, так как его поддерево Ñодержит включенные проекты"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr ""
-"ÐÐµÐ»ÑŒÐ·Ñ Ð²ÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÑŒ проект %s, так как у него отключены родительÑкие объекты"
-
-msgid "database db_name is required"
-msgstr "db_name базы данных ÑвлÑетÑÑ Ð¾Ð±Ñзательным"
-
-msgid "db_hosts value is required"
-msgstr "Значение db_hosts ÑвлÑетÑÑ Ð¾Ð±Ñзательным"
-
-msgid "delete the default domain"
-msgstr "удалить домен по умолчанию"
+"Ðевозможно удалить включенный проект, работающий как домен. Сначала "
+"выключите проект %s."
#, python-format
msgid "group %(group)s"
@@ -1177,33 +1514,33 @@ msgstr ""
"Значение idp_contact_type должно быть одним из Ñледующих: technical, other, "
"support, administrative или billing."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "Ð´Ð»Ñ Ð°Ñ‚Ñ€Ð¸Ð±ÑƒÑ‚Ð° mongo_ttl_seconds ожидаетÑÑ Ñ†ÐµÐ»Ð¾Ñ‡Ð¸Ñленное значение"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "Ð´Ð»Ñ w (атрибут учаÑÑ‚Ð¸Ñ Ð² запиÑи) ожидаетÑÑ Ñ†ÐµÐ»Ð¾Ñ‡Ð¸Ñленное значение"
-
#, python-format
msgid "invalid date format %s"
msgstr "ÐедопуÑтимый формат даты %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "Ð”Ð»Ñ Ð²ÐµÑ‚Ð²Ð¸ %s доÑтигнута макÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð³Ð»ÑƒÐ±Ð¸Ð½Ð° иерархии."
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr ""
+"Ðе разрешено иÑпользовать два проекта в качеÑтве доменов Ñ Ð¾Ð´Ð¸Ð½Ð°ÐºÐ¾Ð²Ñ‹Ð¼ "
+"именем: %s"
-msgid "no ssl support available"
-msgstr "отÑутÑтвует поддержка ssl"
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr ""
+"Ðе разрешено иÑпользовать два проекта в одном домене Ñ Ð¾Ð´Ð¸Ð½Ð°ÐºÐ¾Ð²Ñ‹Ð¼Ð¸ именами: "
+"%s"
+
+msgid "only root projects are allowed to act as domains."
+msgstr "Только корневые проекты могут работать в качеÑтве доменов."
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "параметр %(option)s в группе %(group)s"
-msgid "pad must be single character"
-msgstr "заполнитель должен ÑоответÑтвовать одному Ñимволу"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "размер текÑта base64url Ñ Ð·Ð°Ð¿Ð¾Ð»Ð½Ð¸Ñ‚ÐµÐ»Ñми должен быть кратен 4 Ñимволам"
-
msgid "provided consumer key does not match stored consumer key"
msgstr "переданный ключ приемника не Ñовпадает Ñ Ñохраненным"
@@ -1213,9 +1550,6 @@ msgstr "переданный ключ запроÑа не Ñовпадает Ñ
msgid "provided verifier does not match stored verifier"
msgstr "Ð¿ÐµÑ€ÐµÐ´Ð°Ð½Ð½Ð°Ñ Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ¸ не Ñовпадает Ñ Ñохраненной"
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "регион не отноÑитÑÑ Ðº типу dogpile.cache.CacheRegion"
-
msgid "remaining_uses must be a positive integer or null."
msgstr ""
"Значение remaining_uses должно быть положительным целым чиÑлом или равным "
@@ -1226,11 +1560,6 @@ msgstr ""
"ЕÑли включено изменение делегированиÑ, параметр remaining_uses не должен "
"быть задан"
-msgid "replicaset_name required when use_replica is True"
-msgstr ""
-"replicaset_name ÑвлÑетÑÑ Ð¾Ð±Ñзательным, еÑли Ð´Ð»Ñ use_replica задано значение "
-"True"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1243,20 +1572,11 @@ msgid "rescope a scoped token"
msgstr "Изменить облаÑÑ‚ÑŒ помещенного в облаÑÑ‚ÑŒ ключа"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr ""
-"размер текÑта кратен 4, но заполнитель \"%s\" вÑтречаетÑÑ Ð¿ÐµÑ€ÐµÐ´ 2-м и до "
-"поÑледнего Ñимвола"
-
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr ""
-"размер текÑта кратен 4, но заполнитель \"%s\" вÑтречаетÑÑ Ð¿ÐµÑ€ÐµÐ´ поÑледним "
-"Ñимволом без заполнителÑ"
+msgid "role %s is not defined"
+msgstr "роль %s не определена"
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "размер текÑта не кратен 4, но Ñодержит заполнитель \"%s\""
+msgid "scope.project.id must be specified if include_subtree is also specified"
+msgstr "scope.project.id необходимо указать, еÑли указан include_subtree"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1269,3 +1589,15 @@ msgstr "tls_cacertfile %s не найден или не ÑвлÑетÑÑ Ñ„Ð°Ð¹Ð
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr "СÑылка на маркер должна отноÑитьÑÑ Ðº типу KeystoneToken, а получено %s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr "обновление domain_id уÑтарело в Mitaka и будет удалено в O."
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr ""
+"ожидалоÑÑŒ найти проверенный параметр %(param_name)r в подпиÑи функции "
+"%(func_name)r."
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po
index 4c59ad09..2dc7345d 100644
--- a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-08-04 01:49+0000\n"
-"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-04 01:49+0000\n"
+"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
+"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po
index de599919..18bc9fa2 100644
--- a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-error.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-08-04 01:50+0000\n"
-"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-04 01:50+0000\n"
+"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
+"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
msgid "Cannot retrieve Authorization headers"
msgstr "Yetkilendirme başlıkları alınamıyor"
@@ -47,10 +47,6 @@ msgstr ""
"%(project_id)s."
#, python-format
-msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
-msgstr "%(to_exec)s komutu %(retcode)s ile çıktı- %(output)s"
-
-#, python-format
msgid "Could not bind to %(host)s:%(port)s"
msgstr "%(host)s:%(port)s adresine bağlanılamadı"
@@ -133,14 +129,6 @@ msgid "Server error"
msgstr "Sunucu hatası"
#, python-format
-msgid ""
-"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
-"Skipping unknown format: %s"
-msgstr ""
-"Zula yapılandırma anahtarı inşa edilemiyor. Beklenen biçim \"<değişken ismi>:"
-"<değer>\". Bilinmeyen biçim atlanıyor: %s"
-
-#, python-format
msgid "Unable to convert Keystone user or group ID. Error: %s"
msgstr "Keystone kullanıcı veya grup kimliği dönüştürülemiyor. Hata: %s"
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po
index aeae0585..9d1cd41a 100644
--- a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone-log-warning.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-09-03 12:54+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
#, python-format
msgid "%s is not a dogpile.proxy.ProxyBackend"
@@ -135,15 +135,6 @@ msgstr ""
"feshetme listesinden kaldırılıyor."
#, python-format
-msgid ""
-"TTL index already exists on db collection <%(c_name)s>, remove index <"
-"%(indx_name)s> first to make updated mongo_ttl_seconds value to be effective"
-msgstr ""
-"TTL indisi zaten <%(c_name)s> db koleksiyonunda mevcut, güncellenmiş "
-"mongo_ttl_seconds değerini etkin yapmak için önce <%(indx_name)s> indisini "
-"kaldırın"
-
-#, python-format
msgid "Token `%s` is expired, not adding to the revocation list."
msgstr "`%s` jetonunun süresi dolmuş, feshetme listesine eklenmiyor."
diff --git a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
index 3ded8a93..91bc5d15 100644
--- a/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
@@ -1,4 +1,4 @@
-# Turkish (Turkey) translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
@@ -6,21 +6,23 @@
# Alper Çiftçi <alprciftci@gmail.com>, 2015
# Andreas Jaeger <jaegerandi@gmail.com>, 2015
# catborise <muhammetalisag@gmail.com>, 2013
+# catborise <muhammetalisag@gmail.com>, 2013
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-03 12:54+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: tr_TR\n"
+"Language: tr-TR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
"Language-Team: Turkish (Turkey)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
#, python-format
msgid "%(detail)s"
@@ -77,9 +79,6 @@ msgstr "%s alanı gerekli ve boş olamaz"
msgid "%s field(s) cannot be empty"
msgstr "%s alan(lar)ı boş olamaz"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(Bu detayları gizlemek için hata ayıklama kipini kapatın.)"
-
msgid "--all option cannot be mixed with other options"
msgstr "--all seçeneği diğer seçeneklerle birleştirilemez"
@@ -156,9 +155,6 @@ msgstr "%(option_name)s %(attr)s deÄŸiÅŸtirilemiyor"
msgid "Cannot change Domain ID"
msgstr "Alan ID'si deÄŸiÅŸtirilemez"
-msgid "Cannot change consumer secret"
-msgstr "Tüketici sırrı değiştirilemez"
-
msgid "Cannot change user ID"
msgstr "Kullanıcı ID'si değiştirilemiyor"
@@ -173,10 +169,6 @@ msgstr "%(url)s geçersiz URL' si ile bir bitiş noktası yaratılamıyor"
msgid "Cannot create project with parent: %(project_id)s"
msgstr "Üst proje %(project_id)s ye sahip proje oluşturulamıyor"
-#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "%s ismi kopyalanamaz"
-
msgid "Cannot list request tokens with a token issued via delegation."
msgstr "Vekalet ile sağlanan bir jeton ile istek jetonları listelenemez."
@@ -301,9 +293,6 @@ msgstr "Proje bulunamadı: %(project_id)s"
msgid "Could not find region: %(region_id)s"
msgstr "Bölge bulunamadı: %(region_id)s"
-msgid "Could not find role"
-msgstr "Rol bulunamadı"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -346,10 +335,6 @@ msgstr "Erişim jetonu doğrulanamadı"
msgid "Credential belongs to another user"
msgstr "Kimlik bilgisi başka bir kullanıcıya ait"
-#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "/domains/%s/config konumundaki veri tabanı"
-
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
msgstr ""
@@ -372,9 +357,6 @@ msgstr "Alan %s ID'sine sahip olamaz"
msgid "Domain is disabled: %s"
msgstr "Alan kapalı: %s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "Alan metadata'sı LDAP tarafından desteklenmiyor"
-
msgid "Domain scoped token is not supported"
msgstr "Alan kapsamlı jeton desteklenmiyor"
@@ -386,12 +368,6 @@ msgstr ""
"Alan: %(domain)s zaten tanımlanmış bir yapılandırmaya sahip - dosya "
"atlanıyor: %(file)s."
-msgid "Domains are not supported by the v2 API. Please use the v3 API instead."
-msgstr "v2 API alanları desteklemiyor. Bunun yerine lütfen v3 API kullanın"
-
-msgid "Domains are read-only against LDAP"
-msgstr "Alanlar LDAP'a karşı yalnızca-okunur"
-
msgid "Duplicate Entry"
msgstr "Kopya Girdi"
@@ -592,14 +568,6 @@ msgstr ""
msgid "Invalid signature"
msgstr "Geçersiz imza"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"%s değerinde geçersiz ssl_cert_reqs, \"HİÇBİRİ\", \"İSTEĞE BAĞLI\", \"GEREKLİ"
-"\" den biri olmalı"
-
msgid "Invalid user / password"
msgstr "Geçersiz kullanıcı / parola"
@@ -790,10 +758,6 @@ msgstr ""
"%(requested_count)d istenen tekrar yetki verme derinliÄŸi izin verilen "
"%(max_count)d den fazla"
-#, python-format
-msgid "Role %s not found"
-msgstr "%s rolü bulunamadı"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
@@ -924,13 +888,6 @@ msgid "This is not a recognized Fernet payload version: %s"
msgstr "Bu bilinen bir Fernet faydalı yük sürümü değil: %s"
msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr ""
-"Bu v2.0 Fernet jetonu değil. Güven, alan, veya federasyon jetonları için v3 "
-"kullanın."
-
-msgid ""
"Timestamp not in expected format. The server could not comply with the "
"request since it is either malformed or otherwise incorrect. The client is "
"assumed to be in error."
@@ -1009,11 +966,6 @@ msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr "Eşleştirme %(mapping_id)s kullanırken geçerli gruplar bulunamadı"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr "%(seconds)s saniye sonra havuz %(id)s'den bağlantı alınamadı."
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Alan yapılandırma dizini bulunamıyor: %s"
@@ -1084,12 +1036,6 @@ msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "%(u_id)s kullanıcısı %(t_id)s kiracısı için yetkilendirilmemiş"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr ""
-"Kullanıcı %(user_id)s zaten %(tenant_id)s kiracısı içinde bir %(role_id)s "
-"rolüne sahip"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
msgstr "%(user_id)s kullanıcısının %(domain_id)s alanına erişimi yok"
@@ -1150,40 +1096,10 @@ msgstr "auth_type Negotiate deÄŸil"
msgid "authorizing user does not have role required"
msgstr "yetkilendiren kullanıcı gerekli role sahip değil"
-msgid "cache_collection name is required"
-msgstr "cache_collection ismi gerekli"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr "kapalı bir proje içeren bir alt grupta proje oluşturulamaz: %s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr "üst projelerinden farklı alanda bir proje oluşturulamaz."
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr "etkin alan silinemez, lütfen önce kapatın."
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr "%s projesi silinemiyor çünkü sıradüzen içindeki bir yaprak değil."
-
-#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr "proje %s kapatılamıyor çünkü alt ağacında etkin projeler var"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr "proje %s etkinleştirilemiyor çünkü üstleri kapatılmış"
-
-msgid "database db_name is required"
-msgstr "veri tabanı db_name gerekli"
-
-msgid "db_hosts value is required"
-msgstr "db_hosts deÄŸeri gerekli"
-
-msgid "delete the default domain"
-msgstr "varsayılan alanı sil"
-
#, python-format
msgid "group %(group)s"
msgstr "grup %(group)s"
@@ -1195,33 +1111,14 @@ msgstr ""
"idp_contact_type şunlardan biri olmalı: [teknik, diğer, destek, idari veya "
"faturalama."
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "mongo_ttl_seconds için tam sayı değer bekleniyor"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "w için tam sayı değer bekleniyor (yazma ilgisi özniteliği)"
-
#, python-format
msgid "invalid date format %s"
msgstr "geçersiz tarih biçimi %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "%s alt grubu için azami sıralı dizi derinliğine ulaşıldı."
-
-msgid "no ssl support available"
-msgstr "ssl desteÄŸi yok"
-
-#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "%(group)s grubundaki %(option)s seçeneği"
-msgid "pad must be single character"
-msgstr "dolgu tek bir karakter olmalı"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "dolgulanmış base64url metni 4 karakterin katı olmalı"
-
msgid "provided consumer key does not match stored consumer key"
msgstr "sağlanan tüketici anahtarı depolanan tüketici anahtarıyla eşleşmiyor"
@@ -1231,18 +1128,12 @@ msgstr "sağlanan istek anahtarı depolanan istek anahtarıyla eşleşmiyor"
msgid "provided verifier does not match stored verifier"
msgstr "sağlanan doğrulayıcı depolanan doğrulayıcı ile eşleşmiyor"
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "bölge dogpile.cache.CacheRegion türünde değil"
-
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses pozitif bir değer ya da null olmalı."
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr "tekrar yetkilendirmeye izin veriliyorsa remaining_uses ayarlanmamalı"
-msgid "replicaset_name required when use_replica is True"
-msgstr "use_replica True olduÄŸunda replicaset_name gereklidir"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1255,20 +1146,6 @@ msgid "rescope a scoped token"
msgstr "kapsamlı bir jeton tekrar kapsamlandı"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr "metin 4'ün katı, ama dolgu \"%s\" son karaktere 2 önceden önce"
-
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr ""
-"metin 4'ün katı, ama doldurma \"%s\" doldurma karakteri olmayan son "
-"karakterden önce"
-
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "metin 4'ün katı değil, ama \"%s\" dolgusu içeriyor"
-
-#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
msgstr "tls_cacertdir %s bulunamadı ya da bir dizin"
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po
index 44dbbe37..d645e82c 100644
--- a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-critical.po
@@ -3,22 +3,22 @@
# This file is distributed under the same license as the keystone project.
#
# Translators:
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Linda <duleish@cn.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (China)\n"
-"Language: zh-CN\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (China)\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po
index 791681d7..b3df3b82 100644
--- a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po
+++ b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone-log-error.po
@@ -6,21 +6,23 @@
# Xiao Xi LIU <liuxx@cn.ibm.com>, 2014
# 刘俊朋 <liujunpeng@inspur.com>, 2015
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
+# Gaoxiao Zhu <zhu.gaoxiao@h3c.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-06-26 05:13+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (China)\n"
-"Language: zh-CN\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-03-15 10:40+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
+"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (China)\n"
msgid "Cannot retrieve Authorization headers"
msgstr "无法获å–认è¯å¤´ä¿¡æ¯"
@@ -43,10 +45,6 @@ msgid ""
msgstr "在项目树-%(project_id)s 中å‘现循环引用或é‡å¤é¡¹ã€‚"
#, python-format
-msgid "Command %(to_exec)s exited with %(retcode)s- %(output)s"
-msgstr "命令 %(to_exec)s 已退出,退出ç åŠè¾“出为 %(retcode)s- %(output)s"
-
-#, python-format
msgid "Could not bind to %(host)s:%(port)s"
msgstr "无法绑定至 %(host)s:%(port)s"
@@ -64,6 +62,10 @@ msgstr ""
"设置调试环境出错。请确ä¿é€‰é¡¹--debug-url çš„æ ¼å¼æ˜¯è¿™æ ·çš„<host>:<port> ,和确ä¿"
"有一个调试进程正在监å¬é‚£ä¸ªç«¯å£"
+#, python-format
+msgid "Error when signing assertion, reason: %(reason)s%(output)s"
+msgstr "对断言进行签å时出错,原因:%(reason)s%(output)s"
+
msgid "Failed to construct notifier"
msgstr "构造通知器失败"
@@ -72,6 +74,9 @@ msgid ""
"you don't have sufficient permissions to create it"
msgstr "创建[Fernet_tokens] 键仓库失败:它已存在或你没有足够的æƒé™åŽ»åˆ›å»ºå®ƒã€‚"
+msgid "Failed to create the default domain."
+msgstr "无法创建默认域。"
+
#, python-format
msgid "Failed to remove file %(file_path)r: %(error)s"
msgstr "无法删除文件%(file_path)r: %(error)s"
@@ -119,12 +124,6 @@ msgstr ""
msgid "Server error"
msgstr "æœåŠ¡å™¨æŠ¥é”™"
-#, python-format
-msgid ""
-"Unable to build cache config-key. Expected format \"<argname>:<value>\". "
-"Skipping unknown format: %s"
-msgstr "无法构建缓存é…置键值对。期望格å¼â€œ<å‚æ•°>:<值>â€ã€‚跳过未知的格å¼: %s"
-
msgid "Unable to sign token"
msgstr "无法签å令牌"
diff --git a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
index 6479984c..c20b31f0 100644
--- a/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
@@ -1,4 +1,4 @@
-# Chinese (Simplified, China) translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
@@ -6,29 +6,40 @@
# Zhong Chaoliang <charliezon@gmail.com>, 2013
# Dongliang Yu <yudl.nju@gmail.com>, 2013
# Lee Yao <yaoli111144@gmail.com>, 2013
+# Lee Yao <yaoli111144@gmail.com>, 2013
+# Zhong Chaoliang <charliezon@gmail.com>, 2013
# 颜海峰 <yanheven@gmail.com>, 2014
-# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Linda <duleish@cn.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: zh_Hans_CN\n"
-"Language-Team: Chinese (China)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-04-27 05:34+0000\n"
+"Last-Translator: Linda <duleish@cn.ibm.com>\n"
+"Language: zh-CN\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (China)\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)s ä¸æ˜¯å—支æŒçš„驱动程åºç‰ˆæœ¬"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr "%(entity)s å称ä¸èƒ½åŒ…å«ä»¥ä¸‹ä¿ç•™å­—符:%(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
msgstr "%(event)s ä¸æ˜¯æœ‰æ•ˆé€šçŸ¥äº‹ä»¶ï¼Œå¿…须是下列其中一项:%(actions)s"
@@ -50,6 +61,10 @@ msgstr ""
"录。"
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s 并未暗示 %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s ä¸èƒ½å°‘于 %(min_length)s 个字符。"
@@ -62,6 +77,10 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s ä¸åº”该超过 %(max_length)s 个字符。"
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s ä¸èƒ½æ˜¯æš—示角色"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s ä¸èƒ½ä¸ºç©ºã€‚"
@@ -77,8 +96,17 @@ msgstr "%s 字段是必填字段,ä¸èƒ½ä¸ºç©º"
msgid "%s field(s) cannot be empty"
msgstr "%s 字段ä¸èƒ½ä¸ºç©º"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(ç¦ç”¨è°ƒè¯•æ–¹å¼ä»¥é¿å…显示这些详细信æ¯ã€‚)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"在 Mitaka å‘行版中,已ä¸æŽ¨è使用 LDAP 身份åŽç«¯çš„ %s (以支æŒåªè¯»èº«ä»½ LDAP 访"
+"问)。它将在“Oâ€å‘行版中移除。"
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(ç¦ç”¨ insecure_debug æ–¹å¼ä»¥é¿å…这些详细信æ¯ã€‚)"
msgid "--all option cannot be mixed with other options"
msgstr "--all 选项ä¸èƒ½ä¸Žå…¶ä»–选项一起使用"
@@ -93,7 +121,7 @@ msgid "Access token not found"
msgstr "找ä¸åˆ°è®¿é—®ä»¤ç‰Œ"
msgid "Additional authentications steps required."
-msgstr "需è¦é¢å¤–身份验è¯"
+msgstr "需è¦é¢å¤–的认è¯æ­¥éª¤ã€‚"
msgid "An unexpected error occurred when retrieving domain configs"
msgstr "检索域é…置时å‘生æ„外错误"
@@ -109,10 +137,10 @@ msgstr "æ„外错误阻止了æœåŠ¡å™¨å®Œæˆæ‚¨çš„请求。"
msgid ""
"An unexpected error prevented the server from fulfilling your request: "
"%(exception)s"
-msgstr "æ„外错误导致æœåŠ¡å™¨æ— æ³•å®Œæˆæ‚¨çš„请求:%(exception)s"
+msgstr "æ„外错误阻止了æœåŠ¡å™¨å®Œæˆæ‚¨çš„请求:%(exception)s"
msgid "An unhandled exception has occurred: Could not find metadata."
-msgstr "存在无法处ç†çš„异常:找ä¸åˆ°å…ƒæ•°æ®ã€‚"
+msgstr "å‘é€äº†æ— æ³•å¤„ç†çš„异常:找ä¸åˆ°å…ƒæ•°æ®ã€‚"
msgid "At least one option must be provided"
msgstr "必须至少æ供一个选项"
@@ -123,8 +151,17 @@ msgstr "必须至少æ供一个选项,请使用 --all 或 --domain-name"
msgid "At least one role should be specified."
msgstr "应该至少指定一个角色。"
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"å°è¯•æ ¹æ® [identity]\\driver 选项为分é…自动选择驱动程åºå¤±è´¥ï¼Œå› ä¸ºæ‰¾ä¸åˆ°é©±åŠ¨ç¨‹"
+"åº %s。请在 keystone é…置中将 [assignment]/driver 设置为有效驱动程åºã€‚"
+
msgid "Attempted to authenticate with an unsupported method."
-msgstr "å°è¯•ä½¿ç”¨æœªæ”¯æŒçš„方法进行验è¯"
+msgstr "å°è¯•ä½¿ç”¨ä¸å—支æŒçš„方法进行验è¯ã€‚"
msgid ""
"Attempting to use OS-FEDERATION token with V2 Identity Service, use V3 "
@@ -132,7 +169,13 @@ msgid ""
msgstr "正在å°è¯•å°† OS-FEDERATION 令牌与 V2 身份æœåŠ¡é…åˆä½¿ç”¨ï¼Œè¯·ä½¿ç”¨ V3 认è¯"
msgid "Authentication plugin error."
-msgstr "认è¯æ’件错误"
+msgstr "认è¯æ’件错误。"
+
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr "åŽç«¯â€œ%(backend)sâ€ä¸æ˜¯æœ‰æ•ˆçš„ memcached åŽç«¯ã€‚有效åŽç«¯ï¼š%(backend_list)s"
msgid "Cannot authorize a request token with a token issued via delegation."
msgstr "无法对带有通过代ç†å‘出的令牌的请求令牌授æƒã€‚"
@@ -144,9 +187,6 @@ msgstr "无法更改 %(option_name)s %(attr)s"
msgid "Cannot change Domain ID"
msgstr "无法更改域标识"
-msgid "Cannot change consumer secret"
-msgstr "ä¸èƒ½æ”¹å˜ç”¨æˆ·å¯†ç "
-
msgid "Cannot change user ID"
msgstr "无法更改用户标识"
@@ -154,12 +194,59 @@ msgid "Cannot change user name"
msgstr "无法更改用户å"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "无法创建具有无效 URL %(url)s 的端点"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr "无法创建具有父代的项目:%(project_id)s"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "ä¸èƒ½é‡å¤å称 %s"
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"无法创建项目,因为它将其所有者指定为域 %(domain_id)s,但在å¦ä¸€ä¸ªåŸŸ "
+"(%(parent_domain_id)s) 中指定了父代。"
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"无法创建项目,因为其父代 (%(domain_id)s) 正充当域,但该项目的指定 parent_id "
+"(%(parent_id)s) 与此 domain_id ä¸åŒ¹é…。"
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr "无法删除已å¯ç”¨çš„域,请先ç¦ç”¨è¯¥åŸŸã€‚"
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr "无法删除项目 %(project_id)s,因为其å­æ ‘包å«å·²å¯ç”¨çš„项目。"
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"无法删除项目 %s,因为它ä¸æ˜¯è¯¥å±‚次结构中的支å¶ã€‚如果è¦åˆ é™¤æ•´ä¸ªå­æ ‘,请使用级è”"
+"选项。"
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr "无法ç¦ç”¨é¡¹ç›® %(project_id)s,因为它的å­æ ‘包å«å·²å¯ç”¨çš„项目。"
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr "无法å¯ç”¨é¡¹ç›® %s,因为它具有已ç¦ç”¨çš„父代"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr "无法列示æºè‡ªè‹¥å¹²ç»„并按用户标识过滤的分é…。"
msgid "Cannot list request tokens with a token issued via delegation."
msgstr "无法列示带有通过代ç†å‘出的令牌的请求令牌。"
@@ -178,6 +265,9 @@ msgid ""
msgstr ""
"在没有将 hints list 用作 self åŽé¢çš„第一个å‚数的情况下,无法截断驱动程åºè°ƒç”¨"
+msgid "Cannot update domain_id of a project that has children."
+msgstr "无法更新具有å­ä»£çš„项目的 domain_id。"
+
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
msgstr "无法åŒæ—¶ä½¿ç”¨ parents_as_list å’Œ parents_as_ids 查询å‚数。"
@@ -186,6 +276,9 @@ msgid ""
"Cannot use subtree_as_list and subtree_as_ids query params at the same time."
msgstr "无法åŒæ—¶ä½¿ç”¨ subtree_as_list å’Œ subtree_as_ids 查询å‚数。"
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "åªå…许对已å¯ç”¨çš„属性执行级è”更新。"
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr "将有效过滤器与组过滤器进行组åˆå°†å§‹ç»ˆäº§ç”Ÿç©ºåˆ—表。"
@@ -196,6 +289,10 @@ msgid ""
msgstr "将有效过滤器ã€åŸŸè¿‡æ»¤å™¨å’Œç»§æ‰¿çš„过滤器进行组åˆå°†å§‹ç»ˆäº§ç”Ÿç©ºåˆ—表。"
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "在 /domains/%s/config 处é…ç½® API 实体"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr "å°è¯•å­˜å‚¨ %(type)s æ—¶å‘ç”Ÿå†²çª - %(details)s"
@@ -204,12 +301,19 @@ msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
msgstr "指定的区域标识有冲çªï¼šâ€œ%(url_id)sâ€ä¸ç­‰äºŽâ€œ%(ref_id)sâ€"
msgid "Consumer not found"
-msgstr "找ä¸åˆ°ä½¿ç”¨è€…"
+msgstr "找ä¸åˆ°ç”¨æˆ·"
#, python-format
msgid ""
"Could not change immutable attribute(s) '%(attributes)s' in target %(target)s"
-msgstr "未能更改目标 %(target)s 中的ä¸å¯å˜å±žæ€§ %(attributes)s "
+msgstr "无法更改目标 %(target)s 中的ä¸å¯å˜å±žæ€§ %(attributes)s "
+
+#, python-format
+msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"未能确定身份æ供者标识。在请求环境中找ä¸åˆ°é…置选项 %(issuer_attribute)s。"
#, python-format
msgid ""
@@ -222,7 +326,7 @@ msgid "Could not find Endpoint Group: %(endpoint_group_id)s"
msgstr "找ä¸åˆ°ç«¯ç‚¹ç»„:%(endpoint_group_id)s"
msgid "Could not find Identity Provider identifier in environment"
-msgstr "在环境中,找ä¸åˆ°â€œèº«ä»½æ供者â€æ ‡è¯†"
+msgstr "为在环境中找到“身份æ供者â€æ ‡è¯†"
#, python-format
msgid "Could not find Identity Provider: %(idp_id)s"
@@ -273,9 +377,6 @@ msgstr "找ä¸åˆ°é¡¹ç›®ï¼š%(project_id)s"
msgid "Could not find region: %(region_id)s"
msgstr "找ä¸åˆ°åŒºåŸŸï¼š%(region_id)s"
-msgid "Could not find role"
-msgstr "找ä¸åˆ°è§’色"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -312,19 +413,49 @@ msgstr "找ä¸åˆ°ç‰ˆæœ¬ï¼š%(version)s"
msgid "Could not find: %(target)s"
msgstr "找ä¸åˆ° %(target)s"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"无法将任何è”åˆç”¨æˆ·å±žæ€§æ˜ å°„至身份值。请检查调试日志或所使用的映射以获å–其他详"
+"细信æ¯ã€‚"
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"设置临时用户身份时未能映射用户。映射规则必须指定用户标识/用户å,或者必须设"
+"ç½® REMOTE_USER 环境å˜é‡ã€‚"
+
msgid "Could not validate the access token"
msgstr "未能验è¯è®¿é—®ä»¤ç‰Œ"
msgid "Credential belongs to another user"
msgstr "凭è¯å±žäºŽå¦ä¸€ç”¨æˆ·"
+msgid "Credential signature mismatch"
+msgstr "凭æ®ç­¾åä¸åŒ¹é…"
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "ä½äºŽ /domains/%s/config 处的数æ®åº“"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"自 Liberty 开始,已ä¸æŽ¨è直接导入认è¯æ’件 %(name)r(为了支æŒå®ƒåœ¨ "
+"%(namespace)r 中的入å£ç‚¹ï¼‰ï¼Œå¹¶ä¸”å¯èƒ½åœ¨ N 中移除。"
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"自 Liberty 开始,已ä¸æŽ¨èç›´æŽ¥å¯¼å…¥é©±åŠ¨ç¨‹åº %(name)r(为了支æŒå®ƒåœ¨ "
+"%(namespace)r 中的入å£ç‚¹ï¼‰ï¼Œå¹¶ä¸”å¯èƒ½åœ¨ N 中移除。"
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
-msgstr "正在ç¦ç”¨å®žä½“,在此情况下,“enableâ€å±žæ€§å·²ç”±é…置忽略。"
+msgstr "正在ç¦ç”¨å®žä½“,在此情况下,é…置已忽略“enableâ€å±žæ€§ã€‚"
#, python-format
msgid "Domain (%s)"
@@ -342,20 +473,20 @@ msgstr "域ä¸èƒ½å…·æœ‰æ ‡è¯† %s"
msgid "Domain is disabled: %s"
msgstr "域已ç¦ç”¨ï¼š%s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "LDAP ä¸æ”¯æŒåŸŸå…ƒæ•°æ®"
+msgid "Domain name cannot contain reserved characters."
+msgstr "域åä¸èƒ½åŒ…å«ä¿ç•™å­—符。"
msgid "Domain scoped token is not supported"
-msgstr "作用域é™å®šåˆ°åŸŸçš„令牌ä¸å—支æŒ"
+msgstr "作用域é™å®šçš„令牌ä¸å—支æŒ"
+
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr "V8 角色驱动程åºä¸­ä¸æ”¯æŒç‰¹å®šäºŽåŸŸçš„角色"
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
"%(file)s."
-msgstr "域 %(domain)s 已定义é…ç½® - 正在忽略以下文件:%(file)s。"
-
-msgid "Domains are read-only against LDAP"
-msgstr "对于 LDAP,域为åªè¯»"
+msgstr "域 %(domain)s 已定义é…ç½® - 忽略以下文件:%(file)s。"
msgid "Duplicate Entry"
msgstr "é‡å¤æ¡ç›®"
@@ -365,9 +496,27 @@ msgid "Duplicate ID, %s."
msgstr "标识 %s é‡å¤ã€‚"
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "é‡å¤æ¡ç›®ï¼š%s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "å称 %s é‡å¤ã€‚"
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "é‡å¤è¿œç¨‹æ ‡è¯†ï¼š%s"
+
+msgid "EC2 access key not found."
+msgstr "找ä¸åˆ° EC2 访问密钥。"
+
+msgid "EC2 signature not supplied."
+msgstr "未æä¾› EC2 ç­¾å。"
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr "必须设置 --bootstrap-password 自å˜é‡æˆ– OS_BOOTSTRAP_PASSWORD。"
+
msgid "Enabled field must be a boolean"
msgstr "å·²å¯ç”¨çš„字段必须为布尔值"
@@ -393,12 +542,32 @@ msgid ""
msgstr "解æžåŸŸ %(domain)s çš„é…置文件时出错,文件为 %(file)s。"
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "打开文件 %(path)s 时出错:%(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "解æžè¡Œâ€œ%(line)sâ€æ—¶å‡ºé”™ï¼š%(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "解æžè§„则 %(path)s 时出错:%(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "读å–元数æ®æ–‡ä»¶æ—¶å‡ºé”™ï¼ŒåŽŸå› ä¸º %(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"注册域 %(domain)s 以使用 SQL 驱动程åºçš„å°è¯•æ¬¡æ•°å·²è¶…出é™åˆ¶ï¼Œæ˜¾ç¤ºä¸ºè¿›è¡Œæ­¤å°è¯•çš„"
+"最åŽä¸€ä¸ªåŸŸä¸º %(last_domain)s,正在放弃"
+
+#, python-format
msgid "Expected dict or list: %s"
-msgstr "期望字典或者列表: %s"
+msgstr "期望的字典或者列表:%s"
msgid ""
"Expected signing certificates are not available on the server. Please check "
@@ -419,7 +588,7 @@ msgid "Failed to start the %(name)s server"
msgstr "未能å¯åŠ¨ %(name)s æœåŠ¡å™¨"
msgid "Failed to validate token"
-msgstr "token验è¯å¤±è´¥"
+msgstr "验è¯ä»¤ç‰Œå¤±è´¥"
msgid "Federation token is expired"
msgstr "è”åˆä»¤ç‰Œå·²åˆ°æœŸ"
@@ -435,6 +604,10 @@ msgid "Found invalid token: scoped to both project and domain."
msgstr "å‘现无效令牌:范围åŒæ—¶ä¸ºé¡¹ç›®å’ŒåŸŸã€‚"
#, python-format
+msgid "Group %s not found in config"
+msgstr "在é…置中找ä¸åˆ°ç»„ %s。"
+
+#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
msgstr "特定于域的é…ç½®ä¸æ”¯æŒç»„ %(group)s"
@@ -454,7 +627,7 @@ msgstr ""
#, python-format
msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
-msgstr "在 LDAP 对象 %(dn)s 中,找ä¸åˆ°æ ‡è¯†å±žæ€§ %(id_attr)s"
+msgstr "未在 LDAP 对象 %(dn)s 中找到标识属性 %(id_attr)s"
#, python-format
msgid "Identity Provider %(idp)s is disabled"
@@ -465,13 +638,16 @@ msgid ""
"identifiers."
msgstr "新的“身份æ供者â€æ ‡è¯†æœªåŒ…å«åœ¨å·²æŽ¥å—的标识中。"
+msgid "Invalid EC2 signature."
+msgstr "无效 EC2 ç­¾å。"
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
msgstr "LDAP TLS è¯ä¹¦é€‰é¡¹ %(option)s 无效。请选择下列其中一项:%(options)s"
#, python-format
msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-msgstr "无效的LDAP TLS_AVAIL 选项: %s.TLS无效"
+msgstr "无效的 LDAP TLS_AVAIL 选项:%s。TLS ä¸å¯ç”¨"
#, python-format
msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
@@ -479,7 +655,7 @@ msgstr "LDAP deref 选项 %(option)s 无效。请选择下列其中一项:%(op
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
-msgstr "无效的 LDAP作用域: %(scope)s. 选择以下选项之一: %(options)s"
+msgstr "无效的 LDAP 作用域:%(scope)s。请选择下列其中一项:%(options)s"
msgid "Invalid TLS / LDAPS combination"
msgstr "无效的 TLS / LDAPS 组åˆ"
@@ -495,7 +671,7 @@ msgstr "凭è¯ä¸­çš„ BLOB 无效"
msgid ""
"Invalid domain name: %(domain)s found in config file name: %(file)s - "
"ignoring this file."
-msgstr "在é…置文件å %(file)s 中找到的域å %(domain)s 无效 - 正在忽略此文件。"
+msgstr "在é…置文件å %(file)s 中找到的域å %(domain)s 无效 - 忽略此文件。"
#, python-format
msgid "Invalid domain specific configuration: %(reason)s"
@@ -503,7 +679,7 @@ msgstr "特定于域的é…置无效:%(reason)s"
#, python-format
msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
-msgstr "对字段“%(path)sâ€çš„输入无效。值为“%(value)sâ€ã€‚"
+msgstr "对字段“%(path)sâ€çš„输入无效。该值为“%(value)sâ€ã€‚"
msgid "Invalid limit value"
msgstr "é™åˆ¶å€¼æ— æ•ˆ"
@@ -526,22 +702,18 @@ msgstr "规则 %(identity_value)s 无效。必须åŒæ—¶æŒ‡å®šå…³é”®å­—“groupsâ
msgid "Invalid signature"
msgstr "ç­¾å无效"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"ssl_cert_reqs 值 %s 无效,必须是下列其中一项:“NONEâ€ã€â€œOPTIONALâ€å’Œâ€œREQUIREDâ€"
-
msgid "Invalid user / password"
msgstr "用户/密ç æ— æ•ˆ"
+msgid "Invalid username or TOTP passcode"
+msgstr "无效用户å或 TOTP 密ç "
+
msgid "Invalid username or password"
msgstr "无效用户å或密ç "
#, python-format
msgid "KVS region %s is already configured. Cannot reconfigure."
-msgstr "KVS 区域 %s å·²é…置。无法é‡æ–°é…置。"
+msgstr "å·²é…ç½® KVS 区域 %s。无法é‡æ–°é…置。"
#, python-format
msgid "Key Value Store not configured: %s"
@@ -559,6 +731,18 @@ msgstr "LDAP %s 删除"
msgid "LDAP %s update"
msgstr "LDAP %s æ›´æ–°"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr "å¯å˜æ¢èµ„æºæ ‡è¯†çš„长度超过 64 个字符(å…许的最大字符数)"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"映射 %(mapping_id)s 中的本地节引用ä¸å­˜åœ¨çš„远程匹é…(例如,本地节中的 "
+"'{0}')。"
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
msgstr "对于键 %(target)s,å‘生é”定超时"
@@ -569,12 +753,16 @@ msgstr "é”定键必须与目标键匹é…:%(lock)s != %(target)s"
#, python-format
msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
-msgstr "ä¸æ­£ç¡®çš„端点URL(%(endpoint)s), 查看错误日志获å–详情"
+msgstr "端点 URL (%(endpoint)s) çš„æ ¼å¼ä¸æ­£ç¡®ï¼Œè¯·æŸ¥çœ‹é”™è¯¯æ—¥å¿—获å–详细信æ¯ã€‚"
msgid "Marker could not be found"
msgstr "找ä¸åˆ°æ ‡è®°ç¬¦"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "已达到 %s 分支的最大层深度。"
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr "已达到对 %s 的最大é”定å°è¯•æ¬¡æ•°ã€‚"
@@ -592,7 +780,7 @@ msgstr "环境中缺少实体标识"
msgid ""
"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting "
"this parameter is advised."
-msgstr "正在修改“redelegation_countâ€ï¼ˆå½“ç¦æ­¢é‡æ–°å§”派时)。建议çœç•¥æ­¤å‚数。"
+msgstr "ç¦æ­¢ä¿®æ”¹é’ˆå¯¹é‡æ–°æŽˆæƒçš„“redelegation_countâ€ã€‚建议çœç•¥æ­¤å‚数。"
msgid "Multiple domains are not supported"
msgstr "多个域ä¸å—支æŒ"
@@ -601,11 +789,14 @@ msgid "Must be called within an active lock context."
msgstr "必须在处于活动状æ€çš„é”定上下文内调用。"
msgid "Must specify either domain or project"
-msgstr "必须指定 domain 或 project"
+msgstr "必须指定域或项目"
msgid "Name field is required and cannot be empty"
msgstr "å称字段是必填字段,ä¸èƒ½ä¸ºç©º"
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr "既未æ供项目域标识,也未æ供项目域å。"
+
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
"running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On."
@@ -622,7 +813,7 @@ msgstr ""
"找ä¸åˆ°ä»»ä½•åŠ å¯†å¯†é’¥ï¼›è¯·é’ˆå¯¹å¼•å¯¼ç¨‹åº 1 è¿è¡Œ keystone-manage fernet_setup。"
msgid "No options specified"
-msgstr "无选项指定"
+msgstr "未指定选项"
#, python-format
msgid "No policy is associated with endpoint %(endpoint_id)s."
@@ -632,6 +823,9 @@ msgstr "没有任何策略与端点 %(endpoint_id)s å…³è”。"
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "对于信任 %(trust_id)s,ä¸å­˜åœ¨å…¶ä½™ä½¿ç”¨"
+msgid "No token in the request"
+msgstr "请求中没有令牌。"
+
msgid "Non-default domain is not supported"
msgstr "éžç¼ºçœåŸŸä¸å—支æŒ"
@@ -655,9 +849,27 @@ msgid "Project (%s)"
msgstr "项目 (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "找ä¸åˆ°é¡¹ç›®æ ‡è¯†ï¼š%(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "项目字段是必填字段,ä¸èƒ½ä¸ºç©ºã€‚"
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "项目已ç¦ç”¨ï¼š%s"
+msgid "Project name cannot contain reserved characters."
+msgstr "项目å称ä¸èƒ½åŒ…å«ä¿ç•™å­—符。"
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "查询字符串ä¸æ˜¯é‡‡ç”¨ UTF-8 ç¼–ç "
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr "系统ä¸æ”¯æŒè¯»å–组 %(group)s 中的选项 %(option)s 的缺çœå€¼ã€‚"
+
msgid "Redelegation allowed for delegated by trust only"
msgstr "ä»…å…许对“委派者â€ä¿¡ä»»è¿›è¡Œé‡æ–°å§”æ´¾"
@@ -668,6 +880,68 @@ msgid ""
msgstr ""
"其余é‡æ–°å§”派深度 %(redelegation_depth)d 超出å…许的范围 [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"从粘贴管é“移除 admin_crud_extension,admin_crud 扩展现在始终å¯ç”¨ã€‚对 "
+"keystone-paste.ini 中的 [pipeline:admin_api] 节进行相应更新,因为它将会在 O "
+"å‘行版中移除。"
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"从粘贴管é“移除 endpoint_filter_extension,端点过滤器扩展现在始终å¯ç”¨ã€‚对 "
+"keystone-paste.ini 中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O å‘"
+"行版中移除。"
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"从粘贴管é“移除 federation_extension,è”åˆæ‰©å±•çŽ°åœ¨å§‹ç»ˆå¯ç”¨ã€‚对 keystone-paste."
+"ini 中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O å‘行版中移除。"
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"从粘贴管é“移除 oauth1_extension,oauth1 扩展现在始终å¯ç”¨ã€‚对 keystone-paste."
+"ini 中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O å‘行版中移除。"
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"从粘贴管é“移除 revoke_extension,撤销扩展现在始终å¯ç”¨ã€‚对 keystone-paste.ini "
+"中的 [pipeline:api_v3] 节进行相应更新,因为它将会在 O å‘行版中移除。"
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"从粘贴管é“移除 simple_cert,现在已ä¸æŽ¨è使用 PKI å’Œ PKIz 令牌,simple_cert ä»…"
+"用于支æŒè¿™äº›ä»¤ç‰Œæ供程åºã€‚对 keystone-paste.ini 中的 [pipeline:api_v3] 节进行"
+"相应更新,因为它将会在 O å‘行版中移除。"
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"从粘贴管é“移除 user_crud_extension,user_crud 扩展现在始终å¯ç”¨ã€‚对 keystone-"
+"paste.ini 中的 [pipeline:admin_api] 节进行相应更新,因为它将会在 O å‘行版中移"
+"除。"
+
msgid "Request Token does not have an authorizing user id"
msgstr "请求令牌没有授æƒç”¨æˆ·æ ‡è¯†"
@@ -698,10 +972,6 @@ msgid ""
"%(max_count)d"
msgstr "请求的é‡æ–°å§”派深度 %(requested_count)d 超过å…许的 %(max_count)d"
-#, python-format
-msgid "Role %s not found"
-msgstr "找ä¸åˆ°è§’色 %s"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
@@ -733,18 +1003,38 @@ msgid "Specify a user or group, not both"
msgstr "请指定用户或组,但ä¸æ˜¯åŒæ—¶æŒ‡å®šè¿™ä¸¤è€…"
msgid "Specify one of domain or project"
-msgstr "请指定域或项目"
+msgstr "请指定其中一个域或项目"
msgid "Specify one of user or group"
-msgstr "请指定用户或组"
+msgstr "请指定其中一个用户或组"
#, python-format
msgid ""
"String length exceeded.The length of string '%(string)s' exceeded the limit "
"of column %(type)s(CHAR(%(length)d))."
msgstr ""
-"字符串长度过长.字符串'%(string)s' 的长度超过列é™åˆ¶ %(type)s(字符"
-"(%(length)d))."
+"字符串长度过长。字符串“%(string)sâ€çš„长度超过列é™åˆ¶ %(type)s(字符"
+"(%(length)d))。"
+
+msgid "Tenant name cannot contain reserved characters."
+msgstr "租户å称ä¸èƒ½åŒ…å«ä¿ç•™å­—符。"
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"%s 扩展已移至 keystone 核心,因此,其è¿ç§»ç”±ä¸» keystone æ•°æ®åº“控件维护。使用以"
+"下命令:keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"“expires_atâ€ä¸å¾—早于现在。æœåŠ¡å™¨æœªèƒ½éµä»Žè¯·æ±‚,因为它的格å¼ä¸æ­£ç¡®ï¼Œæˆ–者其他方"
+"é¢ä¸æ­£ç¡®ã€‚客户机被认为å‘生错误。"
msgid "The --all option cannot be used with the --domain-name option"
msgstr "--all 选项ä¸èƒ½ä¸Ž --domain-name 选项é…åˆä½¿ç”¨"
@@ -758,11 +1048,11 @@ msgid ""
"The Keystone domain-specific configuration has specified more than one SQL "
"driver (only one is permitted): %(source)s."
msgstr ""
-"特定于 Keystone 域的é…置已指定多个 SQL 驱动程åºï¼ˆä»…å…许指定一个):"
+"特定于 Keystone 域的é…置指定了多个 SQL 驱动程åºï¼ˆä»…å…许指定一个):"
"%(source)s。"
msgid "The action you have requested has not been implemented."
-msgstr "您请求的æ“作暂未被执行"
+msgstr "暂未执行您请求的æ“作。"
msgid "The authenticated user should match the trustor."
msgstr "认è¯ç”¨æˆ·åº”匹é…信任者。"
@@ -771,8 +1061,13 @@ msgid ""
"The certificates you requested are not available. It is likely that this "
"server does not use PKI tokens otherwise this is the result of "
"misconfiguration."
-msgstr ""
-"已请求的è¯ä¹¦ä¸å¯ç”¨ã€‚å¯èƒ½æ­¤æœåŠ¡å™¨æœªä½¿ç”¨ PKI 令牌,或者这是因为é…置错误。"
+msgstr "请求的è¯ä¹¦ä¸å¯ç”¨ã€‚å¯èƒ½æ­¤æœåŠ¡å™¨æœªä½¿ç”¨ PKI 令牌,或者这是因为é…置错误。"
+
+msgid "The configured token provider does not support bind authentication."
+msgstr "所é…置的令牌æ供程åºä¸æ”¯æŒç»‘定认è¯ã€‚"
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr "ä¸å…许在 V2 中创建充当域的项目。"
#, python-format
msgid ""
@@ -781,7 +1076,7 @@ msgid ""
msgstr "密ç é•¿åº¦å¿…é¡»å°äºŽæˆ–等于 %(size)i。æœåŠ¡å™¨æœªèƒ½éµç…§è¯·æ±‚,因为密ç æ— æ•ˆã€‚"
msgid "The request you have made requires authentication."
-msgstr "你的请求需è¦å…ˆæŽˆæƒ"
+msgstr "您的请求需è¦å…ˆæŽˆæƒã€‚"
msgid "The resource could not be found."
msgstr "找ä¸åˆ°è¯¥èµ„æºã€‚"
@@ -815,10 +1110,9 @@ msgstr "ä¸åº”è¯¥å­˜åœ¨ä»»ä½•éž oauth å‚æ•°"
msgid "This is not a recognized Fernet payload version: %s"
msgstr "è¿™ä¸æ˜¯å¯è¯†åˆ«çš„ Fernet 有效内容版本:%s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr "è¿™ä¸æ˜¯ V2.0 Fernet 令牌。请将 V3 用于信任ã€åŸŸæˆ–è”åˆçš„令牌。"
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "è¿™ä¸æ˜¯å¯è¯†åˆ«çš„ Fernet 令牌 %s"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
@@ -843,11 +1137,14 @@ msgstr "令牌属于å¦ä¸€ç”¨æˆ·"
msgid "Token does not belong to specified tenant."
msgstr "令牌ä¸å±žäºŽæŒ‡å®šçš„租户。"
+msgid "Token version is unrecognizable or unsupported."
+msgstr "令牌版本ä¸å¯è¯†åˆ«æˆ–者ä¸å—支æŒã€‚"
+
msgid "Trustee has no delegated roles."
-msgstr "托管人没有任何已委派的角色。"
+msgstr "托管人没有委派的角色。"
msgid "Trustor is disabled."
-msgstr "Trustor被ç¦ç”¨"
+msgstr "Trustor å·²ç¦ç”¨"
#, python-format
msgid ""
@@ -886,18 +1183,16 @@ msgid ""
"associated endpoints."
msgstr "无法删除区域 %(region_id)s,因为它或它的å­åŒºåŸŸå…·æœ‰å…³è”的端点。"
+msgid "Unable to downgrade schema"
+msgstr "无法对模å¼è¿›è¡Œé™çº§"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr "使用映射 %(mapping_id)s 时,找ä¸åˆ°æœ‰æ•ˆç»„"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr "在 %(seconds)s 秒之åŽï¼Œæ— æ³•æ ¹æ®æ± æ ‡è¯† %(id)s 获å–连接。"
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
-msgstr "找ä¸åˆ°æŒ‡å®šçš„域é…置目录:%s"
+msgstr "找ä¸åˆ°åŸŸé…置目录:%s"
#, python-format
msgid "Unable to lookup user %s"
@@ -907,7 +1202,7 @@ msgstr "无法查找用户 %s"
msgid ""
"Unable to reconcile identity attribute %(attribute)s as it has conflicting "
"values %(new)s and %(old)s"
-msgstr "无法å调身份属性 %(attribute)s,因为它具有冲çªå€¼%(new)s å’Œ %(old)s"
+msgstr "无法å调身份属性 %(attribute)s,因为它具有冲çªå€¼ %(new)s å’Œ %(old)s"
#, python-format
msgid ""
@@ -950,40 +1245,63 @@ msgstr "令牌版本 %s 未知"
#, python-format
msgid "Unregistered dependency: %(name)s for %(targets)s"
-msgstr "已针对 %(targets)s 注销ä¾èµ–关系 %(name)s"
+msgstr "已撤销注册 %(targets)s çš„ä¾èµ–关系 %(name)s"
+
+msgid "Update of `domain_id` is not allowed."
+msgstr "ä¸å…许更新“domain_idâ€ã€‚"
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "ä¸å…许更新“is_domainâ€ã€‚"
msgid "Update of `parent_id` is not allowed."
msgstr "ä¸å…许更新“parent_idâ€ã€‚"
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "åªå…许更新根项目的 domain_id。"
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr "ä¸å…许更新充当域的项目的 domain_id。"
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
-msgstr "当å°è¯•åˆ›å»º SAML 断言时,请使用项目范围的令牌"
+msgstr "å°è¯•åˆ›å»º SAML 断言时,请使用项目范围的令牌"
-#, python-format
-msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
-msgstr "没有授æƒç»™ç”¨æˆ·%(u_id)s项目%(t_id)sçš„æƒé™"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"å·²ä¸æŽ¨è使用标识驱动程åºé…ç½®æ¥è‡ªåŠ¨é…ç½®åŒä¸€åˆ†é…驱动程åºï¼Œåœ¨â€œOâ€å‘行版中,如果ä¸"
+"åŒäºŽç¼ºçœå€¼ (SQL),那么需è¦æ˜¾å¼é…置分é…驱动程åºã€‚"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr "在租户 %(tenant_id)s 中,用户 %(user_id)s 已具有角色 %(role_id)s"
+msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
+msgstr "用户 %(u_id)s 没有授æƒç»™ç§Ÿæˆ· %(t_id)s"
#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
-msgstr "用户%(user_id)s对域%(domain_id)s没有任何访问æƒé™"
+msgstr "用户 %(user_id)s 没有访问域 %(domain_id)s çš„æƒé™"
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
-msgstr "用户%(user_id)s 没有访问项目 %(project_id)sçš„æƒé™"
+msgstr "用户 %(user_id)s 没有访问项目 %(project_id)s çš„æƒé™"
#, python-format
msgid "User %(user_id)s is already a member of group %(group_id)s"
-msgstr "用户%(user_id)s 已是组 %(group_id)s çš„æˆå‘˜"
+msgstr "用户 %(user_id)s 已是组 %(group_id)s çš„æˆå‘˜"
#, python-format
msgid "User '%(user_id)s' not found in group '%(group_id)s'"
msgstr "在组“%(group_id)sâ€ä¸­æ‰¾ä¸åˆ°ç”¨æˆ·â€œ%(user_id)sâ€"
msgid "User IDs do not match"
-msgstr "用户IDä¸åŒ¹é…"
+msgstr "用户 ID ä¸åŒ¹é…"
+
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"由于缺少用户标识ã€å…·æœ‰åŸŸçš„用户å或者具有域å的用户å,因此无法构建用户认è¯ã€‚"
#, python-format
msgid "User is disabled: %s"
@@ -998,17 +1316,31 @@ msgstr "用户ä¸æ˜¯å—托人。"
msgid "User not found"
msgstr "找ä¸åˆ°ç”¨æˆ·"
+msgid "User not valid for tenant."
+msgstr "用户åšä¸ºç§Ÿæˆ·æ˜¯æ— æ•ˆçš„。"
+
+msgid "User roles not supported: tenant_id required"
+msgstr "用户角色ä¸å—支æŒï¼šéœ€è¦ tenant_id"
+
#, python-format
msgid "User type %s not supported"
msgstr "用户类型 %s ä¸å—支æŒ"
msgid "You are not authorized to perform the requested action."
-msgstr "您没有授æƒå®Œæˆæ‰€è¯·æ±‚çš„æ“作。"
+msgstr "您无æƒæ‰§è¡Œè¯·æ±‚çš„æ“作。"
#, python-format
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "您无æƒæ‰§è¡Œè¯·æ±‚çš„æ“作:%(action)s"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"您已å°è¯•ä½¿ç”¨ç®¡ç†å‘˜ä»¤ç‰Œåˆ›å»ºèµ„æºã€‚因为此令牌ä¸åœ¨åŸŸä¸­ï¼Œæ‰€ä»¥æ‚¨å¿…须显å¼æ·»åŠ åŸŸä»¥ä½¿"
+"此资æºæˆä¸ºå…¶æˆå‘˜ã€‚"
+
msgid "`key_mangler` functions must be callable."
msgstr "“key_manglerâ€å‡½æ•°å¿…é¡»å¯è°ƒç”¨ã€‚"
@@ -1024,39 +1356,15 @@ msgstr "auth_type ä¸æ˜¯â€œNegotiateâ€"
msgid "authorizing user does not have role required"
msgstr "授æƒç”¨æˆ·æ²¡æœ‰å¿…需的角色"
-msgid "cache_collection name is required"
-msgstr "éœ€è¦ cache_collection å称"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr "无法在包å«å·²ç¦ç”¨é¡¹ç›®çš„分支中创建项目:%s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr "如果æŸä¸ªåŸŸä¸æ˜¯é¡¹ç›®çš„父代所在的域,那么无法在该域内创建该项目。"
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr "无法删除已å¯ç”¨çš„域,请先ç¦ç”¨è¯¥åŸŸã€‚"
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr "无法删除项目 %s,因为它ä¸æ˜¯å±‚次结构中的å¶ã€‚"
-
#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr "无法ç¦ç”¨é¡¹ç›® %s,因为其å­æ ‘包å«å·²å¯ç”¨çš„项目"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr "无法å¯ç”¨é¡¹ç›® %s,因为它具有已ç¦ç”¨çš„父代"
-
-msgid "database db_name is required"
-msgstr "需è¦æ•°æ®åº“ db_name"
-
-msgid "db_hosts value is required"
-msgstr "éœ€è¦ db_hosts 值"
-
-msgid "delete the default domain"
-msgstr "请删除缺çœåŸŸ"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
+msgstr "无法删除充当域的已å¯ç”¨é¡¹ç›®ã€‚请先ç¦ç”¨é¡¹ç›® %s。"
#, python-format
msgid "group %(group)s"
@@ -1069,35 +1377,31 @@ msgstr ""
"idp_contact_type 必须是下列其中一项:technicalã€otherã€supportã€"
"administrative 或 billing。"
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "mongo_ttl_seconds 需è¦æ•´æ•°å€¼"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "w(写相关属性)需è¦æ•´æ•°å€¼"
-
#, python-format
msgid "invalid date format %s"
msgstr "æ—¥æœŸæ ¼å¼ %s 无效"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "对于 %s 分支,已达到最大层次结构深度。"
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr "ä¸å…许两个åŒå项目充当域:%s"
+
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr "ä¸å…许一个域的两个项目具有相åŒå称:%s"
-msgid "no ssl support available"
-msgstr "未æä¾› ssl 支æŒ"
+msgid "only root projects are allowed to act as domains."
+msgstr "åªå…许根项目充当域。"
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "组 %(group)s 中的选项 %(option)s"
-msgid "pad must be single character"
-msgstr "填充项必须是å•ä¸ªå­—符"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "å¡«å……çš„ base64url 文本的字符数必须是 4 çš„å€æ•°"
-
msgid "provided consumer key does not match stored consumer key"
-msgstr "æ供的用户密钥与存储的用户密钥ä¸ç¬¦"
+msgstr "æ供的用户密钥与存储的用户密钥ä¸åŒ¹é…"
msgid "provided request key does not match stored request key"
msgstr "æ供的请求密钥与存储的请求密钥ä¸åŒ¹é…"
@@ -1105,38 +1409,27 @@ msgstr "æ供的请求密钥与存储的请求密钥ä¸åŒ¹é…"
msgid "provided verifier does not match stored verifier"
msgstr "æ供的验è¯å™¨ä¸Žå­˜å‚¨çš„验è¯å™¨ä¸åŒ¹é…"
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "区域的类型ä¸æ˜¯ dogpile.cache.CacheRegion"
-
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses 必须为正整数或 Null。"
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr "如果å…许é‡æ–°å§”派,那么ä¸èƒ½è®¾ç½® remaining_uses"
-msgid "replicaset_name required when use_replica is True"
-msgstr "当 use_replica 为 True æ—¶ï¼Œéœ€è¦ replicaset_name"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
"%(group_other)s instead"
-msgstr "请求更新组 %(group)s,但所æä¾›é…ç½®å而包å«ç»„ %(group_other)s"
+msgstr "请求更新组 %(group)s,但所æ供的é…ç½®å而包å«ç»„ %(group_other)s"
msgid "rescope a scoped token"
msgstr "请é‡æ–°ç¡®å®šå¸¦èŒƒå›´çš„令牌的范围"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr "文本的字符数是 4 çš„å€æ•°ï¼Œä½†å¡«å……项“%sâ€å‡ºçŽ°åœ¨å€’数第 2 个字符å‰é¢"
-
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr "文本的字符数是 4 çš„å€æ•°ï¼Œä½†å¡«å……项“%sâ€å‡ºçŽ°åœ¨éžå¡«å……的最åŽä¸€ä¸ªå­—符å‰é¢"
+msgid "role %s is not defined"
+msgstr "未定义角色 %s"
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "文本的字符数ä¸æ˜¯ 4 çš„å€æ•°ï¼Œä½†æ–‡æœ¬åŒ…å«å¡«å……项“%sâ€"
+msgid "scope.project.id must be specified if include_subtree is also specified"
+msgstr "如果还指定了 include_subtree,那么必须指定 scope.project.id"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1149,3 +1442,13 @@ msgstr "tls_cacertfile %s 未找到或者ä¸æ˜¯ä¸€ä¸ªæ–‡ä»¶"
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr "令牌引用必须为 KeystoneToken 类型,但收到:%s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr "从 Mitaka 开始,已ä¸æŽ¨èæ›´æ–° domain_id,它将在 O å‘行版中移除。"
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr "已验è¯æœŸæœ›åœ¨ %(func_name)r 的函数签å中查找 %(param_name)r"
diff --git a/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po b/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po
index 0b7082e0..3c4e36e8 100644
--- a/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po
+++ b/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone-log-critical.po
@@ -3,22 +3,22 @@
# This file is distributed under the same license as the keystone project.
#
# Translators:
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Jennifer <cristxu@tw.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2014-08-31 03:19+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (Taiwan)\n"
-"Language: zh-TW\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-08-31 03:19+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: zh-TW\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (Taiwan)\n"
#, python-format
msgid "Unable to open template file %s"
diff --git a/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone.po b/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
index 0c01497a..3f4a798e 100644
--- a/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
+++ b/keystone-moon/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
@@ -1,33 +1,42 @@
-# Chinese (Traditional, Taiwan) translations for keystone.
+# Translations template for keystone.
# Copyright (C) 2015 OpenStack Foundation
# This file is distributed under the same license as the keystone project.
#
# Translators:
-# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
-# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Jennifer <cristxu@tw.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: keystone 8.0.1.dev11\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/keystone\n"
-"POT-Creation-Date: 2015-11-05 06:13+0000\n"
-"PO-Revision-Date: 2015-09-03 12:54+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: zh_Hant_TW\n"
-"Language-Team: Chinese (Taiwan)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"Project-Id-Version: keystone 9.0.1.dev10\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
+"POT-Creation-Date: 2016-05-03 20:05+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.1.1\n"
+"PO-Revision-Date: 2016-04-19 03:26+0000\n"
+"Last-Translator: Jennifer <cristxu@tw.ibm.com>\n"
+"Language: zh-TW\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (Taiwan)\n"
#, python-format
msgid "%(detail)s"
msgstr "%(detail)s"
#, python-format
+msgid "%(driver)s is not supported driver version"
+msgstr "%(driver)s ä¸æ˜¯å—支æ´çš„驅動程å¼ç‰ˆæœ¬"
+
+#, python-format
+msgid ""
+"%(entity)s name cannot contain the following reserved characters: %(chars)s"
+msgstr "%(entity)s å稱ä¸èƒ½åŒ…å«ä¸‹åˆ—ä¿ç•™å­—元:%(chars)s"
+
+#, python-format
msgid ""
"%(event)s is not a valid notification event, must be one of: %(actions)s"
-msgstr "%(event)s ä¸æ˜¯æœ‰æ•ˆçš„通知事件,必須是 %(actions)s 的其中之一"
+msgstr "%(event)s ä¸æ˜¯æœ‰æ•ˆçš„通知事件,必須是下列其中一個:%(actions)s"
#, python-format
msgid "%(host)s is not a trusted dashboard host"
@@ -46,6 +55,10 @@ msgstr ""
"錄。"
#, python-format
+msgid "%(prior_role_id)s does not imply %(implied_role_id)s"
+msgstr "%(prior_role_id)s ä¸æš—示 %(implied_role_id)s"
+
+#, python-format
msgid "%(property_name)s cannot be less than %(min_length)s characters."
msgstr "%(property_name)s ä¸èƒ½å°‘æ–¼ %(min_length)s 個字元。"
@@ -58,6 +71,10 @@ msgid "%(property_name)s should not be greater than %(max_length)s characters."
msgstr "%(property_name)s ä¸æ‡‰è¶…éŽ %(max_length)s 個字元。"
#, python-format
+msgid "%(role_id)s cannot be an implied roles"
+msgstr "%(role_id)s ä¸èƒ½æ˜¯éš±å«è§’色"
+
+#, python-format
msgid "%s cannot be empty."
msgstr "%s ä¸èƒ½æ˜¯ç©ºçš„。"
@@ -73,8 +90,17 @@ msgstr "%s 欄ä½æ˜¯å¿…è¦æ¬„ä½ï¼Œå› æ­¤ä¸èƒ½æ˜¯ç©ºçš„"
msgid "%s field(s) cannot be empty"
msgstr "%s 欄ä½ä¸èƒ½æ˜¯ç©ºçš„"
-msgid "(Disable debug mode to suppress these details.)"
-msgstr "(åœç”¨é™¤éŒ¯æ¨¡å¼ï¼Œä»¥æš«åœé€™äº›è©³ç´°è³‡æ–™ã€‚)"
+#, python-format
+msgid ""
+"%s for the LDAP identity backend has been deprecated in the Mitaka release "
+"in favor of read-only identity LDAP access. It will be removed in the \"O\" "
+"release."
+msgstr ""
+"LDAP 身分後端的 %s 在 Mitaka 版本中已é­åˆ°æ·˜æ±°ï¼Œä»¥æ”¯æ´å”¯è®€èº«åˆ† LDAP å­˜å–。它將"
+"在 \"O\" 版本中予以移除。"
+
+msgid "(Disable insecure_debug mode to suppress these details.)"
+msgstr "(åœç”¨ insecure_debug 模å¼ï¼Œä»¥æš«åœé€™äº›è©³ç´°è³‡æ–™ã€‚)"
msgid "--all option cannot be mixed with other options"
msgstr "--all é¸é …ä¸èƒ½èˆ‡å…¶ä»–é¸é …æ··åˆ"
@@ -83,7 +109,7 @@ msgid "A project-scoped token is required to produce a service catalog."
msgstr "需è¦å°ˆæ¡ˆç¯„åœçš„記號來產生æœå‹™åž‹éŒ„。"
msgid "Access token is expired"
-msgstr "å­˜å–記號éŽæœŸ"
+msgstr "å­˜å–記號已éŽæœŸ"
msgid "Access token not found"
msgstr "找ä¸åˆ°å­˜å–記號"
@@ -108,7 +134,7 @@ msgid ""
msgstr "發生éžé æœŸçš„錯誤,造æˆä¼ºæœå™¨ç„¡æ³•å±¥è¡Œè¦æ±‚:%(exception)s"
msgid "An unhandled exception has occurred: Could not find metadata."
-msgstr "發生無法處ç†çš„異常狀æ³ï¼šæ‰¾ä¸åˆ° meta 資料。"
+msgstr "發生未處ç†çš„異常狀æ³ï¼šæ‰¾ä¸åˆ° meta 資料。"
msgid "At least one option must be provided"
msgstr "å¿…é ˆæ供至少一個é¸é …"
@@ -119,6 +145,16 @@ msgstr "å¿…é ˆæ供至少一個é¸é …,請使用 --all 或 --domain-name"
msgid "At least one role should be specified."
msgstr "應該至少指定一個角色。"
+#, python-format
+msgid ""
+"Attempted automatic driver selection for assignment based upon "
+"[identity]\\driver option failed since driver %s is not found. Set "
+"[assignment]/driver to a valid driver in keystone config."
+msgstr ""
+"é‡å°åŸºæ–¼ [identity]\\driver é¸é …的指派,嘗試自動é¸å–驅動程å¼å¤±æ•—,因為找ä¸åˆ°"
+"é©…å‹•ç¨‹å¼ %s。請在 Keystone é…置中,將 [assignment]/driver 設為有效的驅動程"
+"å¼ã€‚"
+
msgid "Attempted to authenticate with an unsupported method."
msgstr "已嘗試使用ä¸æ”¯æ´çš„方法進行鑑別。"
@@ -131,8 +167,15 @@ msgstr ""
msgid "Authentication plugin error."
msgstr "鑑別外掛程å¼éŒ¯èª¤ã€‚"
+#, python-format
+msgid ""
+"Backend `%(backend)s` is not a valid memcached backend. Valid backends: "
+"%(backend_list)s"
+msgstr ""
+"後端 `%(backend)s` ä¸æ˜¯æœ‰æ•ˆçš„ Memcached 後端。有效後端:%(backend_list)s"
+
msgid "Cannot authorize a request token with a token issued via delegation."
-msgstr "無法使用é€éŽå§”派發出之記號授權è¦æ±‚記號。"
+msgstr "無法å°å«æœ‰é€éŽå§”派發出之記號的è¦æ±‚記號進行授權。"
#, python-format
msgid "Cannot change %(option_name)s %(attr)s"
@@ -141,9 +184,6 @@ msgstr "無法變更 %(option_name)s %(attr)s"
msgid "Cannot change Domain ID"
msgstr "無法變更網域 ID"
-msgid "Cannot change consumer secret"
-msgstr "無法變更消費者密碼"
-
msgid "Cannot change user ID"
msgstr "無法變更使用者 ID"
@@ -151,12 +191,59 @@ msgid "Cannot change user name"
msgstr "無法變更使用者å稱"
#, python-format
+msgid "Cannot create an endpoint with an invalid URL: %(url)s"
+msgstr "無法使用無效 URL %(url)s 來建立端點"
+
+#, python-format
msgid "Cannot create project with parent: %(project_id)s"
msgstr "無法建立具有æ¯é …的專案:%(project_id)s"
#, python-format
-msgid "Cannot duplicate name %s"
-msgstr "無法複製å稱 %s"
+msgid ""
+"Cannot create project, since it specifies its owner as domain %(domain_id)s, "
+"but specifies a parent in a different domain (%(parent_domain_id)s)."
+msgstr ""
+"無法建立專案,因為它指定自己的æ“有者作為網域 %(domain_id)s,但å»æŒ‡å®šäº†ä½æ–¼ä¸"
+"åŒç¶²åŸŸ (%(parent_domain_id)s) 中的æ¯é …。"
+
+#, python-format
+msgid ""
+"Cannot create project, since its parent (%(domain_id)s) is acting as a "
+"domain, but project's specified parent_id (%(parent_id)s) does not match "
+"this domain_id."
+msgstr ""
+"無法建立專案,因為它的æ¯é … (%(domain_id)s) 正在充當網域,但專案的指定 "
+"parent_id (%(parent_id)s) 與此 domain_id ä¸ç¬¦ã€‚"
+
+msgid "Cannot delete a domain that is enabled, please disable it first."
+msgstr "無法刪除已啟用的網域,請先åœç”¨è©²ç¶²åŸŸã€‚"
+
+#, python-format
+msgid ""
+"Cannot delete project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr "無法刪除專案 %(project_id)s,因為它的å­æ¨¹ç‹€çµæ§‹åŒ…å«å·²å•Ÿç”¨çš„專案。"
+
+#, python-format
+msgid ""
+"Cannot delete the project %s since it is not a leaf in the hierarchy. Use "
+"the cascade option if you want to delete a whole subtree."
+msgstr ""
+"無法刪除專案 %s,因為它ä¸æ˜¯éšŽå±¤ä¸­çš„葉節點。如果è¦åˆªé™¤æ•´å€‹å­æ¨¹ç‹€çµæ§‹ï¼Œè«‹ä½¿ç”¨é‡"
+"疊顯示é¸é …。"
+
+#, python-format
+msgid ""
+"Cannot disable project %(project_id)s since its subtree contains enabled "
+"projects."
+msgstr "無法åœç”¨å°ˆæ¡ˆ %(project_id)s,因為它的å­æ¨¹ç‹€çµæ§‹åŒ…å«å·²å•Ÿç”¨çš„專案。"
+
+#, python-format
+msgid "Cannot enable project %s since it has disabled parents"
+msgstr "無法啟用專案 %s,因為它具有已åœç”¨çš„æ¯é …"
+
+msgid "Cannot list assignments sourced from groups and filtered by user ID."
+msgstr "無法列出由群組æ供且ä¾ä½¿ç”¨è€… ID 進行éŽæ¿¾çš„指派。"
msgid "Cannot list request tokens with a token issued via delegation."
msgstr "無法列出å«æœ‰é€éŽå§”派發出之記號的è¦æ±‚記號。"
@@ -172,16 +259,22 @@ msgstr "無法移除尚未授權的角色,%s"
msgid ""
"Cannot truncate a driver call without hints list as first parameter after "
"self "
-msgstr "屬性 limit ä¸åœ¨ hints 清單時,無法截斷驅動程å¼å‘¼å«"
+msgstr "如果æ示清單ä¸æ˜¯ self 後é¢çš„第一個åƒæ•¸ï¼Œå‰‡ç„¡æ³•æˆªæ–·é©…動程å¼å‘¼å«"
+
+msgid "Cannot update domain_id of a project that has children."
+msgstr "無法更新包å«å­é …之專案的 domain_id。"
msgid ""
"Cannot use parents_as_list and parents_as_ids query params at the same time."
-msgstr "無法åŒæ™‚使用 parents_as_list 與 parents_as_ids查詢åƒæ•¸ã€‚"
+msgstr "無法åŒæ™‚使用 parents_as_list 與 parents_as_ids 查詢åƒæ•¸ã€‚"
msgid ""
"Cannot use subtree_as_list and subtree_as_ids query params at the same time."
msgstr "無法åŒæ™‚使用 subtree_as_list 與 subtree_as_ids 查詢åƒæ•¸ã€‚"
+msgid "Cascade update is only allowed for enabled attribute."
+msgstr "åªå®¹è¨±å°å·²å•Ÿç”¨çš„屬性進行é‡ç–Šé¡¯ç¤ºæ›´æ–°ã€‚"
+
msgid ""
"Combining effective and group filter will always result in an empty list."
msgstr "çµåˆä½œç”¨ä¸­çš„éŽæ¿¾å™¨å’Œç¾¤çµ„éŽæ¿¾å™¨å°‡ä¸€å¾‹å°Žè‡´ç©ºæ¸…單。"
@@ -192,6 +285,10 @@ msgid ""
msgstr "çµåˆä½œç”¨ä¸­çš„éŽæ¿¾å™¨ã€ç¶²åŸŸéŽæ¿¾å™¨åŠç¹¼æ‰¿çš„éŽæ¿¾å™¨å°‡ä¸€å¾‹å°Žè‡´ç©ºæ¸…單。"
#, python-format
+msgid "Config API entity at /domains/%s/config"
+msgstr "在 /domains/%s/config 處é…ç½® API 實體"
+
+#, python-format
msgid "Conflict occurred attempting to store %(type)s - %(details)s"
msgstr "嘗試儲存 %(type)s 時發生è¡çª - %(details)s"
@@ -209,6 +306,13 @@ msgstr "無法變更目標 %(target)s 中固定ä¸è®Šçš„屬性 '%(attributes)s'"
#, python-format
msgid ""
+"Could not determine Identity Provider ID. The configuration option "
+"%(issuer_attribute)s was not found in the request environment."
+msgstr ""
+"無法判定身分æ供者 ID。在è¦æ±‚環境中,找ä¸åˆ°é…ç½®é¸é … %(issuer_attribute)s。"
+
+#, python-format
+msgid ""
"Could not find %(group_or_option)s in domain configuration for domain "
"%(domain_id)s"
msgstr "在下列網域的網域é…置中找ä¸åˆ° %(group_or_option)s:%(domain_id)s"
@@ -269,9 +373,6 @@ msgstr "找ä¸åˆ°å°ˆæ¡ˆï¼š%(project_id)s"
msgid "Could not find region: %(region_id)s"
msgstr "找ä¸åˆ°å€åŸŸï¼š%(region_id)s"
-msgid "Could not find role"
-msgstr "找ä¸åˆ°è§’色"
-
#, python-format
msgid ""
"Could not find role assignment with role: %(role_id)s, user or group: "
@@ -308,15 +409,45 @@ msgstr "找ä¸åˆ°ç‰ˆæœ¬ï¼š%(version)s"
msgid "Could not find: %(target)s"
msgstr "找ä¸åˆ°ï¼š%(target)s"
+msgid ""
+"Could not map any federated user properties to identity values. Check debug "
+"logs or the mapping used for additional details."
+msgstr ""
+"無法將任何è¯åˆä½¿ç”¨è€…內容å°æ˜ è‡³èº«åˆ†å€¼ã€‚如需其他詳細資料,請檢查除錯日誌或使用"
+"çš„å°æ˜ ã€‚"
+
+msgid ""
+"Could not map user while setting ephemeral user identity. Either mapping "
+"rules must specify user id/name or REMOTE_USER environment variable must be "
+"set."
+msgstr ""
+"設定暫時使用者身分時,無法å°æ˜ ä½¿ç”¨è€…。å°æ˜ è¦å‰‡å¿…須指定使用者 ID/å稱,或者必"
+"須設定 REMOTE_USER 環境變數。"
+
msgid "Could not validate the access token"
msgstr "無法驗證存å–記號"
msgid "Credential belongs to another user"
msgstr "èªè­‰å±¬æ–¼å¦ä¸€å€‹ä½¿ç”¨è€…"
+msgid "Credential signature mismatch"
+msgstr "èªè­‰ç°½ç« ä¸ç¬¦"
+
#, python-format
-msgid "Database at /domains/%s/config"
-msgstr "ä½æ–¼ /domains/%s/config 中的資料庫"
+msgid ""
+"Direct import of auth plugin %(name)r is deprecated as of Liberty in favor "
+"of its entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"ä¸å»ºè­°ç›´æŽ¥åŒ¯å…¥é‘‘åˆ¥å¤–æŽ›ç¨‹å¼ %(name)r,因為 Liberty 支æ´å®ƒåœ¨ %(namespace)r 中的"
+"進入點且å¯èƒ½åœ¨ N 中予以移除。"
+
+#, python-format
+msgid ""
+"Direct import of driver %(name)r is deprecated as of Liberty in favor of its "
+"entrypoint from %(namespace)r and may be removed in N."
+msgstr ""
+"ä¸å»ºè­°ç›´æŽ¥åŒ¯å…¥é©…å‹•ç¨‹å¼ %(name)r,因為 Liberty 支æ´å®ƒåœ¨ %(namespace)r 中的進入"
+"點且å¯èƒ½åœ¨ N 中予以移除。"
msgid ""
"Disabling an entity where the 'enable' attribute is ignored by configuration."
@@ -328,7 +459,7 @@ msgstr "網域 (%s)"
#, python-format
msgid "Domain cannot be named %s"
-msgstr "網域ä¸èƒ½å‘½å為 %s"
+msgstr "無法將網域命å為 %s"
#, python-format
msgid "Domain cannot have ID %s"
@@ -338,32 +469,50 @@ msgstr "網域ä¸èƒ½å…·æœ‰ ID %s"
msgid "Domain is disabled: %s"
msgstr "å·²åœç”¨ç¶²åŸŸï¼š%s"
-msgid "Domain metadata not supported by LDAP"
-msgstr "LDAP ä¸æ”¯æ´ç¶²åŸŸ meta 資料"
+msgid "Domain name cannot contain reserved characters."
+msgstr "網域å稱ä¸èƒ½åŒ…å«ä¿ç•™å­—元。"
msgid "Domain scoped token is not supported"
msgstr "ä¸æ”¯æ´ç¶²åŸŸç¯„åœçš„記號"
+msgid "Domain specific roles are not supported in the V8 role driver"
+msgstr "網域專屬角色在第 8 版角色驅動程å¼ä¸­ä¸å—支æ´"
+
#, python-format
msgid ""
"Domain: %(domain)s already has a configuration defined - ignoring file: "
"%(file)s."
msgstr "網域 %(domain)s 已定義é…ç½® - 正在忽略檔案 %(file)s。"
-msgid "Domains are read-only against LDAP"
-msgstr "網域å°æ–¼ LDAP 而言是唯讀的"
-
msgid "Duplicate Entry"
-msgstr "é …ç›®é‡è¤‡"
+msgstr "é‡è¤‡çš„é …ç›®"
#, python-format
msgid "Duplicate ID, %s."
msgstr "é‡è¤‡çš„ ID,%s。"
#, python-format
+msgid "Duplicate entry: %s"
+msgstr "é‡è¤‡çš„項目:%s"
+
+#, python-format
msgid "Duplicate name, %s."
msgstr "é‡è¤‡çš„å稱,%s。"
+#, python-format
+msgid "Duplicate remote ID: %s"
+msgstr "é‡è¤‡çš„é ç«¯ ID:%s"
+
+msgid "EC2 access key not found."
+msgstr "找ä¸åˆ° EC2 å­˜å–金鑰。"
+
+msgid "EC2 signature not supplied."
+msgstr "未æä¾› EC2 簽章。"
+
+msgid ""
+"Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be set."
+msgstr "必須設定 --bootstrap-password 引數或 OS_BOOTSTRAP_PASSWORD。"
+
msgid "Enabled field must be a boolean"
msgstr "「已啟用ã€æ¬„ä½å¿…須是布林值"
@@ -389,17 +538,37 @@ msgid ""
msgstr "剖æžç¶²åŸŸ %(domain)s çš„é…置檔時發生錯誤,檔案:%(file)s。"
#, python-format
+msgid "Error while opening file %(path)s: %(err)s"
+msgstr "開啟檔案 %(path)s 時發生錯誤:%(err)s"
+
+#, python-format
+msgid "Error while parsing line: '%(line)s': %(err)s"
+msgstr "剖æžè¡Œ '%(line)s' 時發生錯誤:%(err)s"
+
+#, python-format
+msgid "Error while parsing rules %(path)s: %(err)s"
+msgstr "剖æžè¦å‰‡ %(path)s 時發生錯誤:%(err)s"
+
+#, python-format
msgid "Error while reading metadata file, %(reason)s"
msgstr "è®€å– meta 資料檔時發生錯誤,%(reason)s"
#, python-format
+msgid ""
+"Exceeded attempts to register domain %(domain)s to use the SQL driver, the "
+"last domain that appears to have had it is %(last_domain)s, giving up"
+msgstr ""
+"已超éŽå˜—試登錄網域 %(domain)s 以使用 SQL 驅動程å¼çš„次數,似乎已經具有它的最後"
+"一個網域是 %(last_domain)s,將放棄"
+
+#, python-format
msgid "Expected dict or list: %s"
msgstr "é æœŸå­—典或清單:%s"
msgid ""
"Expected signing certificates are not available on the server. Please check "
"Keystone configuration."
-msgstr "伺æœå™¨ä¸Šç„¡æ³•ä½¿ç”¨é æœŸçš„簽署憑證。請檢查 Keystone é…置。"
+msgstr "在伺æœå™¨ä¸Šï¼Œç„¡æ³•ä½¿ç”¨é æœŸçš„簽署憑證。請檢查 Keystone é…置。"
#, python-format
msgid ""
@@ -407,8 +576,8 @@ msgid ""
"with the request since it is either malformed or otherwise incorrect. The "
"client is assumed to be in error."
msgstr ""
-"é æœŸåœ¨ %(target)s 中找到 %(attribute)s - 伺æœå™¨ç„¡æ³•éµå®ˆè¦æ±‚,因為它的格å¼ä¸æ­£"
-"確。系統會å‡å®šç”¨æˆ¶ç«¯è™•æ–¼éŒ¯èª¤ç‹€æ…‹ã€‚"
+"é æœŸåœ¨ %(target)s 中找到 %(attribute)s - 伺æœå™¨ç„¡æ³•éµå®ˆè¦æ±‚,因為它的形態異"
+"常,或者在其他方é¢ç™¼ç”ŸéŒ¯èª¤ã€‚系統會å‡å®šç”¨æˆ¶ç«¯è™•æ–¼éŒ¯èª¤ç‹€æ…‹ã€‚"
#, python-format
msgid "Failed to start the %(name)s server"
@@ -425,14 +594,18 @@ msgid ""
"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
"order to redelegate a trust"
msgstr ""
-"æ¬„ä½ \"remaining_uses\" 設定為 %(value)s,但為了é‡æ–°å§”派信任,ä¸èƒ½è¨­å®šè©²æ¬„ä½"
+"æ¬„ä½ \"remaining_uses\" 設定為 %(value)s,但為了é‡æ–°å§”派信任,ä¸å¾—設定該欄ä½"
msgid "Found invalid token: scoped to both project and domain."
msgstr "找到無效記號:已將範åœé™å®šç‚ºå°ˆæ¡ˆåŠç¶²åŸŸã€‚"
#, python-format
+msgid "Group %s not found in config"
+msgstr "在é…置中找ä¸åˆ°ç¾¤çµ„ %s"
+
+#, python-format
msgid "Group %(group)s is not supported for domain specific configurations"
-msgstr "網域特定é…ç½®ä¸æ”¯æ´ç¾¤çµ„ %(group)s"
+msgstr "網域專屬é…ç½®ä¸æ”¯æ´ç¾¤çµ„ %(group)s"
#, python-format
msgid ""
@@ -445,7 +618,7 @@ msgid ""
"Group membership across backend boundaries is not allowed, group in question "
"is %(group_id)s, user is %(user_id)s"
msgstr ""
-"ä¸å®¹è¨±å¾Œç«¯ç•Œé™ä¹‹é–“的群組æˆå“¡è³‡æ ¼ï¼Œæœ‰å•é¡Œçš„群組為%(group_id)s,使用者為 "
+"ä¸å®¹è¨±è·¨å¾Œç«¯ç•Œé™çš„群組æˆå“¡è³‡æ ¼ï¼Œæœ‰å•é¡Œçš„群組為 %(group_id)s,使用者為 "
"%(user_id)s"
#, python-format
@@ -461,21 +634,24 @@ msgid ""
"identifiers."
msgstr "é€å…¥çš„身分æ供者 ID 未包括在接å—çš„ ID 中。"
+msgid "Invalid EC2 signature."
+msgstr "無效的 EC2 簽章。"
+
#, python-format
msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
-msgstr "無效的 LDAP TLS 憑證é¸é …:%(option)s。請é¸æ“‡ %(options)s 的其中之一"
+msgstr "無效的 LDAP TLS 憑證é¸é …:%(option)s。請é¸æ“‡ä¸‹åˆ—其中一個:%(options)s"
#, python-format
msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-msgstr "無效的 LDAP TLS_AVAIL é¸é …:%s。TLS 無法使用"
+msgstr "無效的 LDAP TLS_AVAIL é¸é …:%s。無法使用 TLS"
#, python-format
msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
-msgstr "無效的 LDAP deref é¸é …:%(option)s。請é¸æ“‡ %(options)s 的其中之一"
+msgstr "無效的 LDAP deref é¸é …:%(option)s。請é¸æ“‡ä¸‹åˆ—其中一個:%(options)s"
#, python-format
msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
-msgstr "無效的 LDAP 範åœï¼š%(scope)s。請é¸æ“‡ %(options)s 的其中之一"
+msgstr "無效的 LDAP 範åœï¼š%(scope)s。請é¸æ“‡ä¸‹åˆ—其中一個:%(options)s"
msgid "Invalid TLS / LDAPS combination"
msgstr "無效的 TLS/LDAPS 組åˆ"
@@ -496,7 +672,7 @@ msgstr ""
#, python-format
msgid "Invalid domain specific configuration: %(reason)s"
-msgstr "網域特定é…置無效:%(reason)s"
+msgstr "網域專屬é…置無效:%(reason)s"
#, python-format
msgid "Invalid input for field '%(path)s'. The value is '%(value)s'."
@@ -523,19 +699,14 @@ msgstr "è¦å‰‡ %(identity_value)s 無效。必須指定 'groups' åŠ 'domain' é—
msgid "Invalid signature"
msgstr "無效的簽章"
-#, python-format
-msgid ""
-"Invalid ssl_cert_reqs value of %s, must be one of \"NONE\", \"OPTIONAL\", "
-"\"REQUIRED\""
-msgstr ""
-"%s çš„ ssl_cert_reqs 值無效,必須是 \"NONE\"ã€\"OPTIONAL\" åŠ\"REQUIRED\" çš„å…¶"
-"中之一"
-
msgid "Invalid user / password"
msgstr "無效的使用者/密碼"
+msgid "Invalid username or TOTP passcode"
+msgstr "使用者å稱或 TOTP 密碼無效"
+
msgid "Invalid username or password"
-msgstr "無效的使用者å稱或密碼"
+msgstr "使用者å稱或密碼無效"
#, python-format
msgid "KVS region %s is already configured. Cannot reconfigure."
@@ -543,7 +714,7 @@ msgstr "KVS å€åŸŸ %s å·²é…置。無法é‡æ–°é…置。"
#, python-format
msgid "Key Value Store not configured: %s"
-msgstr "未é…置金鑰值儲存庫:%s"
+msgstr "未é…置「金鑰值儲存庫ã€ï¼š%s"
#, python-format
msgid "LDAP %s create"
@@ -557,22 +728,38 @@ msgstr "LDAP %s 刪除"
msgid "LDAP %s update"
msgstr "LDAP %s æ›´æ–°"
+msgid ""
+"Length of transformable resource id > 64, which is max allowed characters"
+msgstr "å¯è½‰æ›è³‡æº ID 的長度大於 64(這是所容許的字元數目上é™ï¼‰"
+
+#, python-format
+msgid ""
+"Local section in mapping %(mapping_id)s refers to a remote match that "
+"doesn't exist (e.g. {0} in a local section)."
+msgstr ""
+"å°æ˜  %(mapping_id)s 中的本端å€æ®µåƒç…§äº†ä¸€å€‹ä¸å­˜åœ¨çš„é ç«¯ç›¸ç¬¦é …(例如,本端å€æ®µ"
+"中的 '{0}')。"
+
#, python-format
msgid "Lock Timeout occurred for key, %(target)s"
-msgstr "金鑰 %(target)s 發生鎖定逾時"
+msgstr "金鑰 %(target)s 發生「鎖定逾時ã€"
#, python-format
msgid "Lock key must match target key: %(lock)s != %(target)s"
-msgstr "鎖定金鑰必須與目標金鑰相符:%(lock)s ä¸ç­‰æ–¼ %(target)s"
+msgstr "鎖定金鑰必須與目標金鑰相符:%(lock)s != %(target)s"
#, python-format
msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
-msgstr "端點 URL (%(endpoint)s) çš„æ ¼å¼ä¸æ­£ç¢ºï¼Œè«‹åƒé–±éŒ¯èª¤æ—¥èªŒä»¥å–得詳細資料。"
+msgstr "端點 URL (%(endpoint)s) 的形態異常,請åƒé–±éŒ¯èª¤æ—¥èªŒä»¥å–得詳細資料。"
msgid "Marker could not be found"
msgstr "找ä¸åˆ°æ¨™è¨˜"
#, python-format
+msgid "Max hierarchy depth reached for %s branch."
+msgstr "å·²é”到 %s 分支的階層深度上é™ã€‚"
+
+#, python-format
msgid "Maximum lock attempts on %s occurred."
msgstr "å·²é”到 %s 的鎖定嘗試次數上é™ã€‚"
@@ -585,7 +772,7 @@ msgid "Method not callable: %s"
msgstr "方法ä¸å¯å‘¼å«ï¼š%s"
msgid "Missing entity ID from environment"
-msgstr "環境中éºæ¼å¯¦é«” ID"
+msgstr "環境中éºæ¼äº†å¯¦é«” ID"
msgid ""
"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting "
@@ -596,13 +783,16 @@ msgid "Multiple domains are not supported"
msgstr "ä¸æ”¯æ´å¤šå€‹ç¶²åŸŸ"
msgid "Must be called within an active lock context."
-msgstr "必須在作用中鎖定環境定義內呼å«ã€‚"
+msgstr "必須在作用中鎖定環境定義內予以呼å«ã€‚"
msgid "Must specify either domain or project"
-msgstr "必須指定 Domain 或 Project"
+msgstr "必須指定網域或專案"
msgid "Name field is required and cannot be empty"
-msgstr "å稱欄ä½æ˜¯å¿…è¦æ¬„ä½ï¼Œå› æ­¤ä¸èƒ½æ˜¯ç©ºçš„"
+msgstr "「å稱ã€æ¬„ä½æ˜¯å¿…è¦æ¬„ä½ï¼Œå› æ­¤ä¸èƒ½æ˜¯ç©ºçš„"
+
+msgid "Neither Project Domain ID nor Project Domain Name was provided."
+msgstr "既未æ供「專案網域 IDã€ï¼Œä¹Ÿæœªæ供「專案網域å稱ã€ã€‚"
msgid ""
"No Authorization headers found, cannot proceed with OAuth related calls, if "
@@ -612,7 +802,7 @@ msgstr ""
"è«‹ç¢ºä¿ WSGIPassAuthorization 設定為 On。"
msgid "No authenticated user"
-msgstr "沒有已鑑別使用者"
+msgstr "沒有已鑑別的使用者"
msgid ""
"No encryption keys found; run keystone-manage fernet_setup to bootstrap one."
@@ -629,6 +819,9 @@ msgstr "沒有原則與端點 %(endpoint_id)s 相關è¯ã€‚"
msgid "No remaining uses for trust: %(trust_id)s"
msgstr "沒有信任 %(trust_id)s 的剩餘使用情形"
+msgid "No token in the request"
+msgstr "è¦æ±‚中沒有記號"
+
msgid "Non-default domain is not supported"
msgstr "ä¸æ”¯æ´éžé è¨­ç¶²åŸŸ"
@@ -645,16 +838,34 @@ msgstr "檢查網域é…ç½®è¦æ±‚時,發ç¾é¸é … %(option)s 未指定任何群
msgid ""
"Option %(option)s in group %(group)s is not supported for domain specific "
"configurations"
-msgstr "網域特定é…ç½®ä¸æ”¯æ´ç¾¤çµ„ %(group)s 中的é¸é … %(option)s"
+msgstr "網域專屬é…ç½®ä¸æ”¯æ´ç¾¤çµ„ %(group)s 中的é¸é … %(option)s"
#, python-format
msgid "Project (%s)"
msgstr "專案 (%s)"
#, python-format
+msgid "Project ID not found: %(t_id)s"
+msgstr "找ä¸åˆ°å°ˆæ¡ˆ ID:%(t_id)s"
+
+msgid "Project field is required and cannot be empty."
+msgstr "「專案ã€æ¬„ä½æ˜¯å¿…è¦çš„,因此ä¸èƒ½æ˜¯ç©ºçš„。"
+
+#, python-format
msgid "Project is disabled: %s"
msgstr "å·²åœç”¨å°ˆæ¡ˆï¼š%s"
+msgid "Project name cannot contain reserved characters."
+msgstr "專案å稱ä¸èƒ½åŒ…å«ä¿ç•™å­—元。"
+
+msgid "Query string is not UTF-8 encoded"
+msgstr "查詢字串未使用 UTF-8 進行編碼"
+
+#, python-format
+msgid ""
+"Reading the default for option %(option)s in group %(group)s is not supported"
+msgstr "ä¸æ”¯æ´è®€å–群組 %(group)s 中é¸é … %(option)s çš„é è¨­å€¼"
+
msgid "Redelegation allowed for delegated by trust only"
msgstr "僅委派為信任時,æ‰å®¹è¨±é‡æ–°å§”æ´¾"
@@ -665,6 +876,70 @@ msgid ""
msgstr ""
"剩餘的é‡æ–°å§”派深度 %(redelegation_depth)d è¶…å‡ºå®¹è¨±çš„ç¯„åœ [0..%(max_count)d]"
+msgid ""
+"Remove admin_crud_extension from the paste pipeline, the admin_crud "
+"extension is now always available. Updatethe [pipeline:admin_api] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"從貼上 Pipeline 中移除 admin_crud_extension,admin_crud 延伸ç¾åœ¨å°‡ä¸€å¾‹å¯ç”¨ã€‚"
+"相應地更新 keystone-paste.ini 中的 [pipeline:admin_api] å€æ®µï¼Œå› ç‚ºå®ƒåœ¨ O 版本"
+"中將予以移除。"
+
+msgid ""
+"Remove endpoint_filter_extension from the paste pipeline, the endpoint "
+"filter extension is now always available. Update the [pipeline:api_v3] "
+"section in keystone-paste.ini accordingly as it will be removed in the O "
+"release."
+msgstr ""
+"從貼上 Pipeline 中移除 endpoint_filter_extension,端點éŽæ¿¾å™¨å»¶ä¼¸ç¾åœ¨å°‡ä¸€å¾‹å¯"
+"用。相應地更新 keystone-paste.ini 中的 [pipeline:api_v3] å€æ®µï¼Œå› ç‚ºå®ƒåœ¨ O 版"
+"本中將予以移除。"
+
+msgid ""
+"Remove federation_extension from the paste pipeline, the federation "
+"extension is now always available. Update the [pipeline:api_v3] section in "
+"keystone-paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"從貼上 Pipeline 中移除 federation_extension,è¯åˆå»¶ä¼¸ç¾åœ¨å°‡ä¸€å¾‹å¯ç”¨ã€‚相應地更"
+"æ–° keystone-paste.ini 中的 [pipeline:api_v3] å€æ®µï¼Œå› ç‚ºå®ƒåœ¨ O 版本中將予以移"
+"除。"
+
+msgid ""
+"Remove oauth1_extension from the paste pipeline, the oauth1 extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"從貼上 Pipeline 中移除 oauth1_extension,oauth1 延伸ç¾åœ¨å°‡ä¸€å¾‹å¯ç”¨ã€‚相應地更"
+"æ–° keystone-paste.ini 中的 [pipeline:api_v3] å€æ®µï¼Œå› ç‚ºå®ƒåœ¨ O 版本中將予以移"
+"除。"
+
+msgid ""
+"Remove revoke_extension from the paste pipeline, the revoke extension is now "
+"always available. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"從貼上 Pipeline 中移除 revoke_extension,撤銷延伸ç¾åœ¨å°‡ä¸€å¾‹å¯ç”¨ã€‚相應地更新 "
+"keystone-paste.ini 中的 [pipeline:api_v3] å€æ®µï¼Œå› ç‚ºå®ƒåœ¨ O 版本中將予以移除。"
+
+msgid ""
+"Remove simple_cert from the paste pipeline, the PKI and PKIz token providers "
+"are now deprecated and simple_cert was only used insupport of these token "
+"providers. Update the [pipeline:api_v3] section in keystone-paste.ini "
+"accordingly, as it will be removed in the O release."
+msgstr ""
+"從貼上 Pipeline 中移除 simple_cert,PKI å’Œ PKIz 記號æ供者ç¾åœ¨å·²é­åˆ°æ·˜æ±°ï¼Œä¸¦"
+"且使用 simple_cert 的目的åªæ˜¯ç‚ºäº†æ”¯æ´é€™äº›è¨˜è™Ÿæ供者。相應地更新 keystone-"
+"paste.ini 中的 [pipeline:api_v3] å€æ®µï¼Œå› ç‚ºå®ƒåœ¨ O 版本中將予以移除。"
+
+msgid ""
+"Remove user_crud_extension from the paste pipeline, the user_crud extension "
+"is now always available. Updatethe [pipeline:public_api] section in keystone-"
+"paste.ini accordingly, as it will be removed in the O release."
+msgstr ""
+"從貼上 Pipeline 中移除 user_crud_extension,user_crud 延伸ç¾åœ¨å°‡ä¸€å¾‹å¯ç”¨ã€‚相"
+"應地更新 keystone-paste.ini 中的 [pipeline:public_api] å€æ®µï¼Œå› ç‚ºå®ƒåœ¨ O 版本"
+"中將予以移除。"
+
msgid "Request Token does not have an authorizing user id"
msgstr "è¦æ±‚記號ä¸å…·æœ‰æŽˆæ¬Šä½¿ç”¨è€… ID"
@@ -674,14 +949,14 @@ msgid ""
"server could not comply with the request because the attribute size is "
"invalid (too large). The client is assumed to be in error."
msgstr ""
-"è¦æ±‚屬性 %(attribute)s 必須少於或等於 %(size)i。伺æœå™¨ç„¡æ³•éµå®ˆè¦æ±‚,因為屬性"
+"è¦æ±‚屬性 %(attribute)s å¿…é ˆå°æ–¼æˆ–等於 %(size)i。伺æœå™¨ç„¡æ³•éµå®ˆè¦æ±‚,因為屬性"
"大å°ç„¡æ•ˆï¼ˆå¤ªå¤§ï¼‰ã€‚系統會å‡å®šç”¨æˆ¶ç«¯è™•æ–¼éŒ¯èª¤ç‹€æ…‹ã€‚"
msgid "Request must have an origin query parameter"
msgstr "è¦æ±‚必須具有原始查詢åƒæ•¸"
msgid "Request token is expired"
-msgstr "è¦æ±‚記號éŽæœŸ"
+msgstr "è¦æ±‚記號已éŽæœŸ"
msgid "Request token not found"
msgstr "找ä¸åˆ°è¦æ±‚記號"
@@ -695,17 +970,13 @@ msgid ""
"%(max_count)d"
msgstr "所è¦æ±‚çš„é‡æ–°å§”派深度 %(requested_count)d 大於容許的 %(max_count)d"
-#, python-format
-msgid "Role %s not found"
-msgstr "找ä¸åˆ°è§’色 %s"
-
msgid ""
"Running keystone via eventlet is deprecated as of Kilo in favor of running "
"in a WSGI server (e.g. mod_wsgi). Support for keystone under eventlet will "
"be removed in the \"M\"-Release."
msgstr ""
-"é€éŽ eventlet 執行 Keystone 這一åšæ³•å·²é­æ·˜æ±°ï¼Œå› ç‚º Kilo å好在 WSGI 伺æœå™¨"
-"(例如,mod_wsgi)中執行 Keystone。將在\"M\" 版本中移除å°åœ¨ eventlet 下執行 "
+"é€éŽ eventlet 執行 Keystone 這一åšæ³•å·²é­åˆ°æ·˜æ±°ï¼Œå› ç‚º Kilo 支æ´åœ¨ WSGI 伺æœå™¨"
+"(例如,mod_wsgi)中執行 Keystone。將在 \"M\" 版本中移除å°åœ¨ eventlet 下執行 "
"Keystone 的支æ´ã€‚"
msgid "Scoping to both domain and project is not allowed"
@@ -725,10 +996,10 @@ msgid "Some of requested roles are not in redelegated trust"
msgstr "所è¦æ±‚的部分角色ä¸åœ¨é‡æ–°å§”派的信任中"
msgid "Specify a domain or project, not both"
-msgstr "指定網域或專案,但ä¸è¦åŒæ™‚指定兩者"
+msgstr "指定網域或專案,但ä¸èƒ½åŒæ™‚指定這兩者"
msgid "Specify a user or group, not both"
-msgstr "指定使用者或群組,但ä¸è¦åŒæ™‚指定兩者"
+msgstr "指定使用者或群組,但ä¸èƒ½åŒæ™‚指定這兩者"
msgid "Specify one of domain or project"
msgstr "指定網域或專案"
@@ -744,6 +1015,26 @@ msgstr ""
"已超出字串長度。字串 '%(string)s' 的長度已超出直欄 %(type)s çš„é™åˆ¶ "
"(CHAR(%(length)d))。"
+msgid "Tenant name cannot contain reserved characters."
+msgstr "承租人å稱ä¸èƒ½åŒ…å«ä¿ç•™å­—元。"
+
+#, python-format
+msgid ""
+"The %s extension has been moved into keystone core and as such its "
+"migrations are maintained by the main keystone database control. Use the "
+"command: keystone-manage db_sync"
+msgstr ""
+"%s 延伸已移到 Keystone æ ¸å¿ƒå…§ï¼Œå› æ­¤å®ƒçš„ç§»è½‰å°‡ç”±ä¸»è¦ Keystone 資料庫控制進行維"
+"護。請使用指令:keystone-manage db_sync"
+
+msgid ""
+"The 'expires_at' must not be before now. The server could not comply with "
+"the request since it is either malformed or otherwise incorrect. The client "
+"is assumed to be in error."
+msgstr ""
+"'expires_at' ä¸å¾—æ—©æ–¼ç¾åœ¨ã€‚伺æœå™¨ç„¡æ³•éµå®ˆè¦æ±‚,因為它的形態異常,或者在其他方"
+"é¢ç™¼ç”ŸéŒ¯èª¤ã€‚系統會å‡å®šç”¨æˆ¶ç«¯è™•æ–¼éŒ¯èª¤ç‹€æ…‹ã€‚"
+
msgid "The --all option cannot be used with the --domain-name option"
msgstr "--all é¸é …ä¸èƒ½èˆ‡ --domain-name é¸é …æ­é…使用"
@@ -756,7 +1047,7 @@ msgid ""
"The Keystone domain-specific configuration has specified more than one SQL "
"driver (only one is permitted): %(source)s."
msgstr ""
-"Keystone 網域特定é…置指定了多個SQL 驅動程å¼ï¼ˆåƒ…å…許一個):%(source)s。"
+"Keystone 網域專屬é…置指定了多個 SQL 驅動程å¼ï¼ˆåƒ…å…許一個):%(source)s。"
msgid "The action you have requested has not been implemented."
msgstr "尚未實作所è¦æ±‚的動作。"
@@ -769,8 +1060,14 @@ msgid ""
"server does not use PKI tokens otherwise this is the result of "
"misconfiguration."
msgstr ""
-"所è¦æ±‚的憑證無法使用。å¯èƒ½æ˜¯æ­¤ä¼ºæœå™¨æ²’有使用 PKI 記號,å¦å‰‡ï¼Œé€™æ˜¯ç”±æ–¼é…置錯誤"
-"所造æˆã€‚"
+"無法使用所è¦æ±‚的憑證。å¯èƒ½æ˜¯æ­¤ä¼ºæœå™¨æ²’有使用 PKI 記號,å¦å‰‡ï¼Œé€™æ˜¯é…置錯誤的çµ"
+"果。"
+
+msgid "The configured token provider does not support bind authentication."
+msgstr "所é…置的記號æ供者ä¸æ”¯æ´é€£çµé‘‘別。"
+
+msgid "The creation of projects acting as domains is not allowed in v2."
+msgstr "在第 2 版中,ä¸å®¹è¨±å»ºç«‹å°ˆæ¡ˆä»¥å……當網域。"
#, python-format
msgid ""
@@ -788,11 +1085,11 @@ msgid ""
"The revoke call must not have both domain_id and project_id. This is a bug "
"in the Keystone server. The current request is aborted."
msgstr ""
-"撤銷呼å«ä¸å¾—åŒæ™‚具有 domain_id å’Œ project_id。這是Keystone 伺æœå™¨ä¸­çš„錯誤。已"
-"中斷ç¾è¡Œè¦æ±‚。"
+"撤銷呼å«ä¸å¾—åŒæ™‚具有 domain_id å’Œ project_id。這是 Keystone 伺æœå™¨ä¸­çš„錯誤。"
+"已中斷ç¾è¡Œè¦æ±‚。"
msgid "The service you have requested is no longer available on this server."
-msgstr "此伺æœå™¨ä¸Šç„¡æ³•å†ä½¿ç”¨æ‰€è¦æ±‚çš„æœå‹™ã€‚"
+msgstr "在此伺æœå™¨ä¸Šï¼Œç„¡æ³•å†ä½¿ç”¨æ‰€è¦æ±‚çš„æœå‹™ã€‚"
#, python-format
msgid ""
@@ -804,7 +1101,7 @@ msgstr "指定的æ¯é …å€åŸŸ %(parent_region_id)s 會建立循環å€åŸŸéšŽå±¤ã€
msgid ""
"The value of group %(group)s specified in the config should be a dictionary "
"of options"
-msgstr "在é…置中指定的群組 %(group)s 的值應該為é¸é …å­—å…¸"
+msgstr "在é…置中指定之群組 %(group)s 的值應該為é¸é …å­—å…¸"
msgid "There should not be any non-oauth parameters"
msgstr "ä¸æ‡‰è©²å…·æœ‰ä»»ä½• non-oauth åƒæ•¸"
@@ -813,18 +1110,17 @@ msgstr "ä¸æ‡‰è©²å…·æœ‰ä»»ä½• non-oauth åƒæ•¸"
msgid "This is not a recognized Fernet payload version: %s"
msgstr "這ä¸æ˜¯å·²è¾¨è­˜çš„ Fernet 內容版本:%s"
-msgid ""
-"This is not a v2.0 Fernet token. Use v3 for trust, domain, or federated "
-"tokens."
-msgstr "這ä¸æ˜¯ 2.0 版 Fernet 記號。請å°ä¿¡ä»»ã€ç¶²åŸŸæˆ–è¯åˆè¨˜è™Ÿä½¿ç”¨ç¬¬ 3 版。"
+#, python-format
+msgid "This is not a recognized Fernet token %s"
+msgstr "這ä¸æ˜¯å·²è¾¨è­˜çš„ Fernet 記號 %s"
msgid ""
"Timestamp not in expected format. The server could not comply with the "
"request since it is either malformed or otherwise incorrect. The client is "
"assumed to be in error."
msgstr ""
-"時間戳記的格å¼ä¸ç¬¦åˆé æœŸã€‚伺æœå™¨ç„¡æ³•éµå®ˆè¦æ±‚,因為它的格å¼ä¸æ­£ç¢ºã€‚系統會å‡å®š"
-"用戶端處於錯誤狀態。"
+"時間戳記的格å¼ä¸ç¬¦åˆé æœŸã€‚伺æœå™¨ç„¡æ³•éµå®ˆè¦æ±‚,因為它的形態異常,或者在其他方"
+"é¢ç™¼ç”ŸéŒ¯èª¤ã€‚系統會å‡å®šç”¨æˆ¶ç«¯è™•æ–¼éŒ¯èª¤ç‹€æ…‹ã€‚"
#, python-format
msgid ""
@@ -832,14 +1128,17 @@ msgid ""
"the specific domain, i.e.: keystone-manage domain_config_upload --domain-"
"name %s"
msgstr ""
-"è‹¥è¦å–得此錯誤的更詳細資訊,請é‡å°ç‰¹å®šçš„網域é‡æ–°åŸ·è¡Œæ­¤æŒ‡ä»¤ï¼Œä¾‹å¦‚:keystone-"
+"如果è¦å–得此錯誤的更詳細資訊,請é‡å°ç‰¹å®šçš„網域é‡æ–°åŸ·è¡Œæ­¤æŒ‡ä»¤ï¼Œä¾‹å¦‚:keystone-"
"manage domain_config_upload --domain-name %s"
msgid "Token belongs to another user"
msgstr "記號屬於å¦ä¸€å€‹ä½¿ç”¨è€…"
msgid "Token does not belong to specified tenant."
-msgstr "記號ä¸å±¬æ–¼æ‰€æŒ‡å®šçš„ Tenant。"
+msgstr "記號ä¸å±¬æ–¼æ‰€æŒ‡å®šçš„承租人。"
+
+msgid "Token version is unrecognizable or unsupported."
+msgstr "無法辨識或ä¸æ”¯æ´è¨˜è™Ÿç‰ˆæœ¬ã€‚"
msgid "Trustee has no delegated roles."
msgstr "å—託人沒有委派的角色。"
@@ -885,16 +1184,14 @@ msgid ""
"associated endpoints."
msgstr "無法刪除å€åŸŸ %(region_id)s,因為此å€åŸŸæˆ–å…¶å­å€åŸŸå…·æœ‰ç›¸é—œè¯çš„端點。"
+msgid "Unable to downgrade schema"
+msgstr "無法將綱目é™ç´š"
+
#, python-format
msgid "Unable to find valid groups while using mapping %(mapping_id)s"
msgstr "使用å°æ˜  %(mapping_id)s 時找ä¸åˆ°æœ‰æ•ˆçš„群組"
#, python-format
-msgid ""
-"Unable to get a connection from pool id %(id)s after %(seconds)s seconds."
-msgstr "在 %(seconds)s ç§’ä¹‹å¾Œï¼Œç„¡æ³•å¾žå„²å­˜å€ ID %(id)s å–得連線。"
-
-#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "找ä¸åˆ°ç¶²åŸŸé…置目錄:%s"
@@ -915,15 +1212,15 @@ msgid ""
"xmlsec1 installed, or this is the result of misconfiguration. Reason "
"%(reason)s"
msgstr ""
-"無法簽署 SAML 主張。此伺æœå™¨å¯èƒ½æœªå®‰è£xmlsec1,或者這是é…置錯誤的çµæžœã€‚原"
-"因: %(reason)s"
+"無法簽署 SAML 主張。此伺æœå™¨å¯èƒ½æœªå®‰è£ xmlsec1,或者這是é…置錯誤的çµæžœã€‚原"
+"因:%(reason)s"
msgid "Unable to sign token."
msgstr "無法簽署記號。"
#, python-format
msgid "Unexpected assignment type encountered, %s"
-msgstr "發ç¾éžé æœŸçš„指派類型:%s"
+msgstr "發ç¾éžé æœŸçš„指派類型,%s"
#, python-format
msgid ""
@@ -935,7 +1232,7 @@ msgstr ""
#, python-format
msgid "Unexpected status requested for JSON Home response, %s"
-msgstr "é‡å°ã€ŒJSON 起始目錄ã€å›žæ‡‰è¦æ±‚了éžé æœŸç‹€æ…‹ %s"
+msgstr "é‡å°ã€ŒJSON 起始目錄ã€å›žæ‡‰è¦æ±‚了éžé æœŸç‹€æ…‹ï¼Œ%s"
msgid "Unknown Target"
msgstr "ä¸æ˜Žçš„目標"
@@ -952,27 +1249,44 @@ msgstr "ä¸æ˜Žçš„記號版本 %s"
msgid "Unregistered dependency: %(name)s for %(targets)s"
msgstr "å·²å–消登錄 %(targets)s 的相ä¾é—œä¿‚:%(name)s"
+msgid "Update of `domain_id` is not allowed."
+msgstr "ä¸å®¹è¨±æ›´æ–° 'domain_id'。"
+
+msgid "Update of `is_domain` is not allowed."
+msgstr "ä¸å®¹è¨±æ›´æ–° `is_domain`。"
+
msgid "Update of `parent_id` is not allowed."
msgstr "ä¸å®¹è¨±æ›´æ–° 'parent_id'。"
+msgid "Update of domain_id is only allowed for root projects."
+msgstr "åªå®¹è¨±æ›´æ–°æ ¹å°ˆæ¡ˆçš„ domain_id。"
+
+msgid "Update of domain_id of projects acting as domains is not allowed."
+msgstr "ä¸å®¹è¨±æ›´æ–°æ­£åœ¨å……當網域之專案的 domain_id。"
+
msgid "Use a project scoped token when attempting to create a SAML assertion"
msgstr "嘗試建立 SAML 主張時,使用專案範åœçš„記號"
+msgid ""
+"Use of the identity driver config to automatically configure the same "
+"assignment driver has been deprecated, in the \"O\" release, the assignment "
+"driver will need to be expicitly configured if different than the default "
+"(SQL)."
+msgstr ""
+"ä¸å»ºè­°ä½¿ç”¨èº«åˆ†é©…動程å¼é…置來自動é…置相åŒçš„指派驅動程å¼ï¼Œåœ¨ \"O\" 版本中,如果"
+"指派驅動程å¼èˆ‡é è¨­å€¼ (SQL) ä¸åŒï¼Œå‰‡éœ€è¦æ˜Žç¢ºé…置指派驅動程å¼ã€‚"
+
#, python-format
msgid "User %(u_id)s is unauthorized for tenant %(t_id)s"
msgstr "使用者 %(u_id)s 未ç²æ‰¿ç§Ÿäºº %(t_id)s 的授權"
#, python-format
-msgid "User %(user_id)s already has role %(role_id)s in tenant %(tenant_id)s"
-msgstr "使用者 %(user_id)s 在承租人 %(tenant_id)s 中已經具有角色 %(role_id)s"
-
-#, python-format
msgid "User %(user_id)s has no access to domain %(domain_id)s"
-msgstr "使用者 %(user_id)s 無法存å–網域 %(domain_id)s"
+msgstr "使用者 %(user_id)s 無權存å–網域 %(domain_id)s"
#, python-format
msgid "User %(user_id)s has no access to project %(project_id)s"
-msgstr "使用者 %(user_id)s 無法存å–專案 %(project_id)s"
+msgstr "使用者 %(user_id)s 無權存å–專案 %(project_id)s"
#, python-format
msgid "User %(user_id)s is already a member of group %(group_id)s"
@@ -985,6 +1299,13 @@ msgstr "在群組 '%(group_id)s' 中找ä¸åˆ°ä½¿ç”¨è€… '%(user_id)s'"
msgid "User IDs do not match"
msgstr "使用者 ID ä¸ç¬¦"
+msgid ""
+"User auth cannot be built due to missing either user id, or user name with "
+"domain id, or user name with domain name."
+msgstr ""
+"無法建置使用者鑑別,因為éºæ¼äº†ä½¿ç”¨è€… IDã€å…·æœ‰ç¶²åŸŸ ID 的使用者å稱或具有網域å"
+"稱的使用者å稱。"
+
#, python-format
msgid "User is disabled: %s"
msgstr "å·²åœç”¨ä½¿ç”¨è€…:%s"
@@ -998,6 +1319,12 @@ msgstr "使用者ä¸æ˜¯å—託人。"
msgid "User not found"
msgstr "找ä¸åˆ°ä½¿ç”¨è€…"
+msgid "User not valid for tenant."
+msgstr "使用者ä¸æ˜¯æœ‰æ•ˆçš„承租人。"
+
+msgid "User roles not supported: tenant_id required"
+msgstr "使用者角色ä¸å—支æ´ï¼šéœ€è¦ tenant_id"
+
#, python-format
msgid "User type %s not supported"
msgstr "使用者類型 %s ä¸å—支æ´"
@@ -1009,6 +1336,14 @@ msgstr "您未ç²æŽˆæ¬Šä¾†åŸ·è¡Œæ‰€è¦æ±‚的動作。"
msgid "You are not authorized to perform the requested action: %(action)s"
msgstr "您未ç²æŽˆæ¬Šä¾†åŸ·è¡Œæ‰€è¦æ±‚的動作:%(action)s"
+msgid ""
+"You have tried to create a resource using the admin token. As this token is "
+"not within a domain you must explicitly include a domain for this resource "
+"to belong to."
+msgstr ""
+"您已嘗試使用管ç†è€…記號建立資æºã€‚因為此記號ä¸åœ¨ç¶²åŸŸå…§ï¼Œæ‰€ä»¥æ‚¨å¿…須明確包å«æŸå€‹"
+"網域,以讓此資æºå±¬æ–¼è©²ç¶²åŸŸã€‚"
+
msgid "`key_mangler` functions must be callable."
msgstr "`key_mangler` 函數必須å¯å‘¼å«ã€‚"
@@ -1024,39 +1359,15 @@ msgstr "auth_type ä¸æ˜¯ Negotiate"
msgid "authorizing user does not have role required"
msgstr "授權使用者ä¸å…·æœ‰å¿…è¦çš„角色"
-msgid "cache_collection name is required"
-msgstr "éœ€è¦ cache_collection å稱"
-
#, python-format
msgid "cannot create a project in a branch containing a disabled project: %s"
msgstr "無法在包å«å·²åœç”¨å°ˆæ¡ˆçš„分支中建立專案:%s"
-msgid "cannot create a project within a different domain than its parents."
-msgstr "無法在ä¸åŒæ–¼å…¶æ¯é …的網域內建立專案。"
-
-msgid "cannot delete a domain that is enabled, please disable it first."
-msgstr "無法刪除已啟用的網域,請先åœç”¨è©²ç¶²åŸŸã€‚"
-
-#, python-format
-msgid "cannot delete the project %s since it is not a leaf in the hierarchy."
-msgstr "無法刪除專案 %s,因為它ä¸æ˜¯éšŽå±¤ä¸­çš„葉節點。"
-
#, python-format
-msgid "cannot disable project %s since its subtree contains enabled projects"
-msgstr "無法åœç”¨å°ˆæ¡ˆ %s,因為其å­æ¨¹ç‹€çµæ§‹åŒ…å«å·²å•Ÿç”¨çš„專案"
-
-#, python-format
-msgid "cannot enable project %s since it has disabled parents"
-msgstr "無法啟用專案 %s,因為它具有已åœç”¨çš„æ¯é …"
-
-msgid "database db_name is required"
-msgstr "需è¦è³‡æ–™åº« db_name"
-
-msgid "db_hosts value is required"
-msgstr "éœ€è¦ db_hosts 值"
-
-msgid "delete the default domain"
-msgstr "刪除é è¨­ç¶²åŸŸ"
+msgid ""
+"cannot delete an enabled project acting as a domain. Please disable the "
+"project %s first."
+msgstr "無法刪除已啟用且正在充當網域的專案。請先åœç”¨å°ˆæ¡ˆ %s。"
#, python-format
msgid "group %(group)s"
@@ -1067,44 +1378,37 @@ msgid ""
"or billing."
msgstr "idp_contact_type 必須是下列其中一個:技術ã€å…¶ä»–ã€æ”¯æ´ã€ç®¡ç†æˆ–計費。"
-msgid "integer value expected for mongo_ttl_seconds"
-msgstr "mongo_ttl_seconds é æœŸæ•´æ•¸å€¼"
-
-msgid "integer value expected for w (write concern attribute)"
-msgstr "w(WriteConcern 屬性)é æœŸæ•´æ•¸å€¼"
-
#, python-format
msgid "invalid date format %s"
msgstr "ç„¡æ•ˆçš„æ—¥æœŸæ ¼å¼ %s"
#, python-format
-msgid "max hierarchy depth reached for %s branch."
-msgstr "å·²é”到 %s 分支的最大階層深度。"
+msgid ""
+"it is not permitted to have two projects acting as domains with the same "
+"name: %s"
+msgstr "ä¸å…許包å«å…©å€‹å…·æœ‰ç›¸åŒå稱且充當網域的專案:%s"
+
+#, python-format
+msgid ""
+"it is not permitted to have two projects within a domain with the same "
+"name : %s"
+msgstr "在一個網域內,ä¸å…許包å«å…©å€‹å…·æœ‰ç›¸åŒå稱的專案:%s"
-msgid "no ssl support available"
-msgstr "無法使用 SSL 支æ´"
+msgid "only root projects are allowed to act as domains."
+msgstr "åªå®¹è¨±æ ¹å°ˆæ¡ˆå……當網域。"
#, python-format
msgid "option %(option)s in group %(group)s"
msgstr "群組 %(group)s 中的é¸é … %(option)s"
-msgid "pad must be single character"
-msgstr "填補必須是單一字元"
-
-msgid "padded base64url text must be multiple of 4 characters"
-msgstr "填補 base64url 文字必須是 4 個字元的å€æ•¸"
-
msgid "provided consumer key does not match stored consumer key"
-msgstr "æ供的消費者金鑰,與儲存的消費者金鑰ä¸ç¬¦"
+msgstr "所æ供的消費者金鑰與儲存的消費者金鑰ä¸ç¬¦"
msgid "provided request key does not match stored request key"
-msgstr "æ供的è¦æ±‚金鑰,與儲存的è¦æ±‚金鑰ä¸ç¬¦"
+msgstr "所æ供的è¦æ±‚金鑰與儲存的è¦æ±‚金鑰ä¸ç¬¦"
msgid "provided verifier does not match stored verifier"
-msgstr "æ供的驗證器,與儲存的驗證器ä¸ç¬¦"
-
-msgid "region not type dogpile.cache.CacheRegion"
-msgstr "å€åŸŸä¸æ˜¯ dogpile.cache.CacheRegion é¡žåž‹"
+msgstr "所æ供的驗證器與儲存的驗證器ä¸ç¬¦"
msgid "remaining_uses must be a positive integer or null."
msgstr "remaining_uses 必須是正整數或空值。"
@@ -1112,9 +1416,6 @@ msgstr "remaining_uses 必須是正整數或空值。"
msgid "remaining_uses must not be set if redelegation is allowed"
msgstr "如果容許é‡æ–°å§”派,則ä¸å¾—設定 remaining_uses"
-msgid "replicaset_name required when use_replica is True"
-msgstr "use_replica 為 True æ™‚éœ€è¦ replicaset_name"
-
#, python-format
msgid ""
"request to update group %(group)s, but config provided contains group "
@@ -1125,16 +1426,11 @@ msgid "rescope a scoped token"
msgstr "é‡æ–°åŠƒå®šå·²é™å®šç¯„åœä¹‹è¨˜è™Ÿçš„範åœ"
#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before 2nd to last char"
-msgstr "文字是 4 çš„å€æ•¸ï¼Œä½†å¡«è£œ \"%s\" 出ç¾æ–¼å€’數第二個字元之å‰"
-
-#, python-format
-msgid "text is multiple of 4, but pad \"%s\" occurs before non-pad last char"
-msgstr "文字是 4 çš„å€æ•¸ï¼Œä½†å¡«è£œ \"%s\" 出ç¾æ–¼æœ€å¾Œä¸€å€‹å­—元(ä¸å¯å¡«è£œï¼‰ä¹‹å‰"
+msgid "role %s is not defined"
+msgstr "未定義角色 %s"
-#, python-format
-msgid "text is not a multiple of 4, but contains pad \"%s\""
-msgstr "文字ä¸æ˜¯ 4 çš„å€æ•¸ï¼Œä½†åŒ…å«å¡«è£œ \"%s\""
+msgid "scope.project.id must be specified if include_subtree is also specified"
+msgstr "如果也指定了 include_subtree,則必須指定 scope.project.id"
#, python-format
msgid "tls_cacertdir %s not found or is not a directory"
@@ -1147,3 +1443,13 @@ msgstr "tls_cacertfile %s 找ä¸åˆ°ï¼Œæˆ–者ä¸æ˜¯æª”案"
#, python-format
msgid "token reference must be a KeystoneToken type, got: %s"
msgstr "記號åƒç…§å¿…須是 KeystoneToken 類型,但å»å–得:%s"
+
+msgid ""
+"update of domain_id is deprecated as of Mitaka and will be removed in O."
+msgstr "ä¸å»ºè­°æ›´æ–° domain_id,因為 Mitaka 將在 O 版本中予以移除。"
+
+#, python-format
+msgid ""
+"validated expected to find %(param_name)r in function signature for "
+"%(func_name)r."
+msgstr "在 %(func_name)r 的函數簽章中,驗證é æœŸå°‹æ‰¾ %(param_name)r。"
diff --git a/keystone-moon/keystone/middleware/__init__.py b/keystone-moon/keystone/middleware/__init__.py
index efbaa7c9..4325d946 100644
--- a/keystone-moon/keystone/middleware/__init__.py
+++ b/keystone-moon/keystone/middleware/__init__.py
@@ -12,4 +12,5 @@
# License for the specific language governing permissions and limitations
# under the License.
+from keystone.middleware.auth import * # noqa
from keystone.middleware.core import * # noqa
diff --git a/keystone-moon/keystone/middleware/auth.py b/keystone-moon/keystone/middleware/auth.py
new file mode 100644
index 00000000..cc7d0ecc
--- /dev/null
+++ b/keystone-moon/keystone/middleware/auth.py
@@ -0,0 +1,222 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_context import context as oslo_context
+from oslo_log import log
+from oslo_log import versionutils
+
+from keystone.common import authorization
+from keystone.common import tokenless_auth
+from keystone.common import wsgi
+from keystone import exception
+from keystone.federation import constants as federation_constants
+from keystone.federation import utils
+from keystone.i18n import _, _LI, _LW
+from keystone.middleware import core
+from keystone.models import token_model
+from keystone.token.providers import common
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+__all__ = ('AuthContextMiddleware',)
+
+
+class AuthContextMiddleware(wsgi.Middleware):
+ """Build the authentication context from the request auth token."""
+
+ def _build_auth_context(self, request):
+
+ # NOTE(gyee): token takes precedence over SSL client certificates.
+ # This will preserve backward compatibility with the existing
+ # behavior. Tokenless authorization with X.509 SSL client
+ # certificate is effectively disabled if no trusted issuers are
+ # provided.
+
+ token_id = None
+ if core.AUTH_TOKEN_HEADER in request.headers:
+ token_id = request.headers[core.AUTH_TOKEN_HEADER].strip()
+
+ is_admin = request.environ.get(core.CONTEXT_ENV, {}).get('is_admin',
+ False)
+ if is_admin:
+ # NOTE(gyee): no need to proceed any further as we already know
+ # this is an admin request.
+ auth_context = {}
+ return auth_context, token_id, is_admin
+
+ if token_id:
+ # In this case the client sent in a token.
+ auth_context, is_admin = self._build_token_auth_context(
+ request, token_id)
+ return auth_context, token_id, is_admin
+
+ # No token, maybe the client presented an X.509 certificate.
+
+ if self._validate_trusted_issuer(request.environ):
+ auth_context = self._build_tokenless_auth_context(
+ request.environ)
+ return auth_context, None, False
+
+ LOG.debug('There is either no auth token in the request or '
+ 'the certificate issuer is not trusted. No auth '
+ 'context will be set.')
+
+ return None, None, False
+
+ def _build_token_auth_context(self, request, token_id):
+ if CONF.admin_token and token_id == CONF.admin_token:
+ versionutils.report_deprecated_feature(
+ LOG,
+ _LW('build_auth_context middleware checking for the admin '
+ 'token is deprecated as of the Mitaka release and will be '
+ 'removed in the O release. If your deployment requires '
+ 'use of the admin token, update keystone-paste.ini so '
+ 'that admin_token_auth is before build_auth_context in '
+ 'the paste pipelines, otherwise remove the '
+ 'admin_token_auth middleware from the paste pipelines.'))
+ return {}, True
+
+ context = {'token_id': token_id}
+ context['environment'] = request.environ
+
+ try:
+ token_ref = token_model.KeystoneToken(
+ token_id=token_id,
+ token_data=self.token_provider_api.validate_token(token_id))
+ # TODO(gyee): validate_token_bind should really be its own
+ # middleware
+ wsgi.validate_token_bind(context, token_ref)
+ return authorization.token_to_auth_context(token_ref), False
+ except exception.TokenNotFound:
+ LOG.warning(_LW('RBAC: Invalid token'))
+ raise exception.Unauthorized()
+
+ def _build_tokenless_auth_context(self, env):
+ """Build the authentication context.
+
+ The context is built from the attributes provided in the env,
+ such as certificate and scope attributes.
+ """
+ tokenless_helper = tokenless_auth.TokenlessAuthHelper(env)
+
+ (domain_id, project_id, trust_ref, unscoped) = (
+ tokenless_helper.get_scope())
+ user_ref = tokenless_helper.get_mapped_user(
+ project_id,
+ domain_id)
+
+ # NOTE(gyee): if it is an ephemeral user, the
+ # given X.509 SSL client cert does not need to map to
+ # an existing user.
+ if user_ref['type'] == utils.UserType.EPHEMERAL:
+ auth_context = {}
+ auth_context['group_ids'] = user_ref['group_ids']
+ auth_context[federation_constants.IDENTITY_PROVIDER] = (
+ user_ref[federation_constants.IDENTITY_PROVIDER])
+ auth_context[federation_constants.PROTOCOL] = (
+ user_ref[federation_constants.PROTOCOL])
+ if domain_id and project_id:
+ msg = _('Scoping to both domain and project is not allowed')
+ raise ValueError(msg)
+ if domain_id:
+ auth_context['domain_id'] = domain_id
+ if project_id:
+ auth_context['project_id'] = project_id
+ auth_context['roles'] = user_ref['roles']
+ else:
+ # it's the local user, so token data is needed.
+ token_helper = common.V3TokenDataHelper()
+ token_data = token_helper.get_token_data(
+ user_id=user_ref['id'],
+ method_names=[CONF.tokenless_auth.protocol],
+ domain_id=domain_id,
+ project_id=project_id)
+
+ auth_context = {'user_id': user_ref['id']}
+ auth_context['is_delegated_auth'] = False
+ if domain_id:
+ auth_context['domain_id'] = domain_id
+ if project_id:
+ auth_context['project_id'] = project_id
+ auth_context['roles'] = [role['name'] for role
+ in token_data['token']['roles']]
+ return auth_context
+
+ def _validate_trusted_issuer(self, env):
+ """To further filter the certificates that are trusted.
+
+ If the config option 'trusted_issuer' is absent or does
+ not contain the trusted issuer DN, no certificates
+ will be allowed in tokenless authorization.
+
+ :param env: The env contains the client issuer's attributes
+ :type env: dict
+ :returns: True if client_issuer is trusted; otherwise False
+ """
+ if not CONF.tokenless_auth.trusted_issuer:
+ return False
+
+ client_issuer = env.get(CONF.tokenless_auth.issuer_attribute)
+ if not client_issuer:
+ msg = _LI('Cannot find client issuer in env by the '
+ 'issuer attribute - %s.')
+ LOG.info(msg, CONF.tokenless_auth.issuer_attribute)
+ return False
+
+ if client_issuer in CONF.tokenless_auth.trusted_issuer:
+ return True
+
+ msg = _LI('The client issuer %(client_issuer)s does not match with '
+ 'the trusted issuer %(trusted_issuer)s')
+ LOG.info(
+ msg, {'client_issuer': client_issuer,
+ 'trusted_issuer': CONF.tokenless_auth.trusted_issuer})
+
+ return False
+
+ def process_request(self, request):
+
+ # The request context stores itself in thread-local memory for logging.
+ request_context = oslo_context.RequestContext(
+ request_id=request.environ.get('openstack.request_id'))
+
+ if authorization.AUTH_CONTEXT_ENV in request.environ:
+ msg = _LW('Auth context already exists in the request '
+ 'environment; it will be used for authorization '
+ 'instead of creating a new one.')
+ LOG.warning(msg)
+ return
+
+ auth_context, token_id, is_admin = self._build_auth_context(request)
+
+ request_context.auth_token = token_id
+ request_context.is_admin = is_admin
+
+ if auth_context is None:
+ # The client didn't send any auth info, so don't set auth context.
+ return
+
+ # The attributes of request_context are put into the logs. This is a
+ # common pattern for all the OpenStack services. In all the other
+ # projects these are IDs, so set the attributes to IDs here rather than
+ # the name.
+ request_context.user = auth_context.get('user_id')
+ request_context.tenant = auth_context.get('project_id')
+ request_context.domain = auth_context.get('domain_id')
+ request_context.user_domain = auth_context.get('user_domain_id')
+ request_context.project_domain = auth_context.get('project_domain_id')
+ request_context.update_store()
+
+ LOG.debug('RBAC: auth_context: %s', auth_context)
+ request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context
diff --git a/keystone-moon/keystone/middleware/core.py b/keystone-moon/keystone/middleware/core.py
index 75be5b27..245b9e67 100644
--- a/keystone-moon/keystone/middleware/core.py
+++ b/keystone-moon/keystone/middleware/core.py
@@ -13,27 +13,17 @@
# under the License.
from oslo_config import cfg
-from oslo_context import context as oslo_context
from oslo_log import log
-from oslo_log import versionutils
-from oslo_middleware import sizelimit
from oslo_serialization import jsonutils
-from keystone.common import authorization
-from keystone.common import tokenless_auth
from keystone.common import wsgi
-from keystone.contrib.federation import constants as federation_constants
-from keystone.contrib.federation import utils
from keystone import exception
-from keystone.i18n import _, _LI, _LW
-from keystone.models import token_model
-from keystone.token.providers import common
+from keystone.i18n import _LW
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-
# Header used to transmit the auth token
AUTH_TOKEN_HEADER = 'X-Auth-Token'
@@ -68,34 +58,21 @@ class AdminTokenAuthMiddleware(wsgi.Middleware):
"""
+ def __init__(self, application):
+ super(AdminTokenAuthMiddleware, self).__init__(application)
+ LOG.warning(_LW("The admin_token_auth middleware presents a security "
+ "risk and should be removed from the "
+ "[pipeline:api_v3], [pipeline:admin_api], and "
+ "[pipeline:public_api] sections of your paste ini "
+ "file."))
+
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
- context['is_admin'] = (token == CONF.admin_token)
+ context['is_admin'] = CONF.admin_token and (token == CONF.admin_token)
request.environ[CONTEXT_ENV] = context
-class PostParamsMiddleware(wsgi.Middleware):
- """Middleware to allow method arguments to be passed as POST parameters.
-
- Filters out the parameters `self`, `context` and anything beginning with
- an underscore.
-
- """
-
- def process_request(self, request):
- params_parsed = request.params
- params = {}
- for k, v in params_parsed.items():
- if k in ('self', 'context'):
- continue
- if k.startswith('_'):
- continue
- params[k] = v
-
- request.environ[PARAMS_ENV] = params
-
-
class JsonBodyMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as serialized JSON.
@@ -106,6 +83,7 @@ class JsonBodyMiddleware(wsgi.Middleware):
an underscore.
"""
+
def process_request(self, request):
# Abort early if we don't have any work to do
params_json = request.body
@@ -158,158 +136,3 @@ class NormalizingFilter(wsgi.Middleware):
# Rewrites path to root if no path is given.
elif not request.environ['PATH_INFO']:
request.environ['PATH_INFO'] = '/'
-
-
-class RequestBodySizeLimiter(sizelimit.RequestBodySizeLimiter):
- @versionutils.deprecated(
- versionutils.deprecated.KILO,
- in_favor_of='oslo_middleware.sizelimit.RequestBodySizeLimiter',
- remove_in=+1,
- what='keystone.middleware.RequestBodySizeLimiter')
- def __init__(self, *args, **kwargs):
- super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
-
-
-class AuthContextMiddleware(wsgi.Middleware):
- """Build the authentication context from the request auth token."""
-
- def _build_auth_context(self, request):
- token_id = request.headers.get(AUTH_TOKEN_HEADER).strip()
-
- if token_id == CONF.admin_token:
- # NOTE(gyee): no need to proceed any further as the special admin
- # token is being handled by AdminTokenAuthMiddleware. This code
- # will not be impacted even if AdminTokenAuthMiddleware is removed
- # from the pipeline as "is_admin" is default to "False". This code
- # is independent of AdminTokenAuthMiddleware.
- return {}
-
- context = {'token_id': token_id}
- context['environment'] = request.environ
-
- try:
- token_ref = token_model.KeystoneToken(
- token_id=token_id,
- token_data=self.token_provider_api.validate_token(token_id))
- # TODO(gyee): validate_token_bind should really be its own
- # middleware
- wsgi.validate_token_bind(context, token_ref)
- return authorization.token_to_auth_context(token_ref)
- except exception.TokenNotFound:
- LOG.warning(_LW('RBAC: Invalid token'))
- raise exception.Unauthorized()
-
- def _build_tokenless_auth_context(self, env):
- """Build the authentication context.
-
- The context is built from the attributes provided in the env,
- such as certificate and scope attributes.
- """
- tokenless_helper = tokenless_auth.TokenlessAuthHelper(env)
-
- (domain_id, project_id, trust_ref, unscoped) = (
- tokenless_helper.get_scope())
- user_ref = tokenless_helper.get_mapped_user(
- project_id,
- domain_id)
-
- # NOTE(gyee): if it is an ephemeral user, the
- # given X.509 SSL client cert does not need to map to
- # an existing user.
- if user_ref['type'] == utils.UserType.EPHEMERAL:
- auth_context = {}
- auth_context['group_ids'] = user_ref['group_ids']
- auth_context[federation_constants.IDENTITY_PROVIDER] = (
- user_ref[federation_constants.IDENTITY_PROVIDER])
- auth_context[federation_constants.PROTOCOL] = (
- user_ref[federation_constants.PROTOCOL])
- if domain_id and project_id:
- msg = _('Scoping to both domain and project is not allowed')
- raise ValueError(msg)
- if domain_id:
- auth_context['domain_id'] = domain_id
- if project_id:
- auth_context['project_id'] = project_id
- auth_context['roles'] = user_ref['roles']
- else:
- # it's the local user, so token data is needed.
- token_helper = common.V3TokenDataHelper()
- token_data = token_helper.get_token_data(
- user_id=user_ref['id'],
- method_names=[CONF.tokenless_auth.protocol],
- domain_id=domain_id,
- project_id=project_id)
-
- auth_context = {'user_id': user_ref['id']}
- auth_context['is_delegated_auth'] = False
- if domain_id:
- auth_context['domain_id'] = domain_id
- if project_id:
- auth_context['project_id'] = project_id
- auth_context['roles'] = [role['name'] for role
- in token_data['token']['roles']]
- return auth_context
-
- def _validate_trusted_issuer(self, env):
- """To further filter the certificates that are trusted.
-
- If the config option 'trusted_issuer' is absent or does
- not contain the trusted issuer DN, no certificates
- will be allowed in tokenless authorization.
-
- :param env: The env contains the client issuer's attributes
- :type env: dict
- :returns: True if client_issuer is trusted; otherwise False
- """
-
- if not CONF.tokenless_auth.trusted_issuer:
- return False
-
- client_issuer = env.get(CONF.tokenless_auth.issuer_attribute)
- if not client_issuer:
- msg = _LI('Cannot find client issuer in env by the '
- 'issuer attribute - %s.')
- LOG.info(msg, CONF.tokenless_auth.issuer_attribute)
- return False
-
- if client_issuer in CONF.tokenless_auth.trusted_issuer:
- return True
-
- msg = _LI('The client issuer %(client_issuer)s does not match with '
- 'the trusted issuer %(trusted_issuer)s')
- LOG.info(
- msg, {'client_issuer': client_issuer,
- 'trusted_issuer': CONF.tokenless_auth.trusted_issuer})
-
- return False
-
- def process_request(self, request):
-
- # The request context stores itself in thread-local memory for logging.
- oslo_context.RequestContext(
- request_id=request.environ.get('openstack.request_id'))
-
- if authorization.AUTH_CONTEXT_ENV in request.environ:
- msg = _LW('Auth context already exists in the request '
- 'environment; it will be used for authorization '
- 'instead of creating a new one.')
- LOG.warning(msg)
- return
-
- # NOTE(gyee): token takes precedence over SSL client certificates.
- # This will preserve backward compatibility with the existing
- # behavior. Tokenless authorization with X.509 SSL client
- # certificate is effectively disabled if no trusted issuers are
- # provided.
- if AUTH_TOKEN_HEADER in request.headers:
- auth_context = self._build_auth_context(request)
- elif self._validate_trusted_issuer(request.environ):
- auth_context = self._build_tokenless_auth_context(
- request.environ)
- else:
- LOG.debug('There is either no auth token in the request or '
- 'the certificate issuer is not trusted. No auth '
- 'context will be set.')
- return
- LOG.debug('RBAC: auth_context: %s', auth_context)
- request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context
diff --git a/keystone-moon/keystone/models/revoke_model.py b/keystone-moon/keystone/models/revoke_model.py
new file mode 100644
index 00000000..0fc3e628
--- /dev/null
+++ b/keystone-moon/keystone/models/revoke_model.py
@@ -0,0 +1,373 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import timeutils
+from six.moves import map
+
+from keystone.common import utils
+
+
+# The set of attributes common between the RevokeEvent
+# and the dictionaries created from the token Data.
+_NAMES = ['trust_id',
+ 'consumer_id',
+ 'access_token_id',
+ 'audit_id',
+ 'audit_chain_id',
+ 'expires_at',
+ 'domain_id',
+ 'project_id',
+ 'user_id',
+ 'role_id']
+
+
+# Additional arguments for creating a RevokeEvent
+_EVENT_ARGS = ['issued_before', 'revoked_at']
+
+# Names of attributes in the RevocationEvent, including "virtual" attributes.
+# Virtual attributes are those added based on other values.
+_EVENT_NAMES = _NAMES + ['domain_scope_id']
+
+# Values that will be in the token data but not in the event.
+# These will compared with event values that have different names.
+# For example: both trustor_id and trustee_id are compared against user_id
+_TOKEN_KEYS = ['identity_domain_id',
+ 'assignment_domain_id',
+ 'issued_at',
+ 'trustor_id',
+ 'trustee_id']
+
+# Alternative names to be checked in token for every field in
+# revoke tree.
+ALTERNATIVES = {
+ 'user_id': ['user_id', 'trustor_id', 'trustee_id'],
+ 'domain_id': ['identity_domain_id', 'assignment_domain_id'],
+ # For a domain-scoped token, the domain is in assignment_domain_id.
+ 'domain_scope_id': ['assignment_domain_id', ],
+}
+
+
+REVOKE_KEYS = _NAMES + _EVENT_ARGS
+
+
+def blank_token_data(issued_at):
+ token_data = dict()
+ for name in _NAMES:
+ token_data[name] = None
+ for name in _TOKEN_KEYS:
+ token_data[name] = None
+ # required field
+ token_data['issued_at'] = issued_at
+ return token_data
+
+
+class RevokeEvent(object):
+ def __init__(self, **kwargs):
+ for k in REVOKE_KEYS:
+ v = kwargs.get(k)
+ setattr(self, k, v)
+
+ if self.domain_id and self.expires_at:
+ # This is revoking a domain-scoped token.
+ self.domain_scope_id = self.domain_id
+ self.domain_id = None
+ else:
+ # This is revoking all tokens for a domain.
+ self.domain_scope_id = None
+
+ if self.expires_at is not None:
+ # Trim off the expiration time because MySQL timestamps are only
+ # accurate to the second.
+ self.expires_at = self.expires_at.replace(microsecond=0)
+
+ if self.revoked_at is None:
+ self.revoked_at = timeutils.utcnow()
+ if self.issued_before is None:
+ self.issued_before = self.revoked_at
+
+ def to_dict(self):
+ keys = ['user_id',
+ 'role_id',
+ 'domain_id',
+ 'domain_scope_id',
+ 'project_id',
+ 'audit_id',
+ 'audit_chain_id',
+ ]
+ event = {key: self.__dict__[key] for key in keys
+ if self.__dict__[key] is not None}
+ if self.trust_id is not None:
+ event['OS-TRUST:trust_id'] = self.trust_id
+ if self.consumer_id is not None:
+ event['OS-OAUTH1:consumer_id'] = self.consumer_id
+ if self.consumer_id is not None:
+ event['OS-OAUTH1:access_token_id'] = self.access_token_id
+ if self.expires_at is not None:
+ event['expires_at'] = utils.isotime(self.expires_at)
+ if self.issued_before is not None:
+ event['issued_before'] = utils.isotime(self.issued_before,
+ subsecond=True)
+ return event
+
+ def key_for_name(self, name):
+ return "%s=%s" % (name, getattr(self, name) or '*')
+
+
+def attr_keys(event):
+ return list(map(event.key_for_name, _EVENT_NAMES))
+
+
+class RevokeTree(object):
+ """Fast Revocation Checking Tree Structure
+
+ The Tree is an index to quickly match tokens against events.
+ Each node is a hashtable of key=value combinations from revocation events.
+ The
+
+ """
+
+ def __init__(self, revoke_events=None):
+ self.revoke_map = dict()
+ self.add_events(revoke_events)
+
+ def add_event(self, event):
+ """Updates the tree based on a revocation event.
+
+ Creates any necessary internal nodes in the tree corresponding to the
+ fields of the revocation event. The leaf node will always be set to
+ the latest 'issued_before' for events that are otherwise identical.
+
+ :param: Event to add to the tree
+
+ :returns: the event that was passed in.
+
+ """
+ revoke_map = self.revoke_map
+ for key in attr_keys(event):
+ revoke_map = revoke_map.setdefault(key, {})
+ revoke_map['issued_before'] = max(
+ event.issued_before, revoke_map.get(
+ 'issued_before', event.issued_before))
+ return event
+
+ def remove_event(self, event):
+ """Update the tree based on the removal of a Revocation Event
+
+ Removes empty nodes from the tree from the leaf back to the root.
+
+ If multiple events trace the same path, but have different
+ 'issued_before' values, only the last is ever stored in the tree.
+ So only an exact match on 'issued_before' ever triggers a removal
+
+ :param: Event to remove from the tree
+
+ """
+ stack = []
+ revoke_map = self.revoke_map
+ for name in _EVENT_NAMES:
+ key = event.key_for_name(name)
+ nxt = revoke_map.get(key)
+ if nxt is None:
+ break
+ stack.append((revoke_map, key, nxt))
+ revoke_map = nxt
+ else:
+ if event.issued_before == revoke_map['issued_before']:
+ revoke_map.pop('issued_before')
+ for parent, key, child in reversed(stack):
+ if not any(child):
+ del parent[key]
+
+ def add_events(self, revoke_events):
+ return list(map(self.add_event, revoke_events or []))
+
+ @staticmethod
+ def _next_level_keys(name, token_data):
+ """Generate keys based on current field name and token data
+
+ Generate all keys to look for in the next iteration of revocation
+ event tree traversal.
+ """
+ yield '*'
+ if name == 'role_id':
+ # Roles are very special since a token has a list of them.
+ # If the revocation event matches any one of them,
+ # revoke the token.
+ for role_id in token_data.get('roles', []):
+ yield role_id
+ else:
+ # For other fields we try to get any branch that concur
+ # with any alternative field in the token.
+ for alt_name in ALTERNATIVES.get(name, [name]):
+ yield token_data[alt_name]
+
+ def _search(self, revoke_map, names, token_data):
+ """Search for revocation event by token_data
+
+ Traverse the revocation events tree looking for event matching token
+ data issued after the token.
+ """
+ if not names:
+ # The last (leaf) level is checked in a special way because we
+ # verify issued_at field differently.
+ try:
+ return revoke_map['issued_before'] >= token_data['issued_at']
+ except KeyError:
+ return False
+
+ name, remaining_names = names[0], names[1:]
+
+ for key in self._next_level_keys(name, token_data):
+ subtree = revoke_map.get('%s=%s' % (name, key))
+ if subtree and self._search(subtree, remaining_names, token_data):
+ return True
+
+ # If we made it out of the loop then no element in revocation tree
+ # corresponds to our token and it is good.
+ return False
+
+ def is_revoked(self, token_data):
+ """Check if a token matches the revocation event
+
+ Compare the values for each level of the tree with the values from
+ the token, accounting for attributes that have alternative
+ keys, and for wildcard matches.
+ if there is a match, continue down the tree.
+ if there is no match, exit early.
+
+ token_data is a map based on a flattened view of token.
+ The required fields are:
+
+ 'expires_at','user_id', 'project_id', 'identity_domain_id',
+ 'assignment_domain_id', 'trust_id', 'trustor_id', 'trustee_id'
+ 'consumer_id', 'access_token_id'
+
+ """
+ return self._search(self.revoke_map, _EVENT_NAMES, token_data)
+
+
+def build_token_values_v2(access, default_domain_id):
+ token_data = access['token']
+
+ token_expires_at = timeutils.parse_isotime(token_data['expires'])
+
+ # Trim off the microseconds because the revocation event only has
+ # expirations accurate to the second.
+ token_expires_at = token_expires_at.replace(microsecond=0)
+
+ token_values = {
+ 'expires_at': timeutils.normalize_time(token_expires_at),
+ 'issued_at': timeutils.normalize_time(
+ timeutils.parse_isotime(token_data['issued_at'])),
+ 'audit_id': token_data.get('audit_ids', [None])[0],
+ 'audit_chain_id': token_data.get('audit_ids', [None])[-1],
+ }
+
+ token_values['user_id'] = access.get('user', {}).get('id')
+
+ project = token_data.get('tenant')
+ if project is not None:
+ token_values['project_id'] = project['id']
+ else:
+ token_values['project_id'] = None
+
+ token_values['identity_domain_id'] = default_domain_id
+ token_values['assignment_domain_id'] = default_domain_id
+
+ trust = token_data.get('trust')
+ if trust is None:
+ token_values['trust_id'] = None
+ token_values['trustor_id'] = None
+ token_values['trustee_id'] = None
+ else:
+ token_values['trust_id'] = trust['id']
+ token_values['trustor_id'] = trust['trustor_id']
+ token_values['trustee_id'] = trust['trustee_id']
+
+ token_values['consumer_id'] = None
+ token_values['access_token_id'] = None
+
+ role_list = []
+ # Roles are by ID in metadata and by name in the user section
+ roles = access.get('metadata', {}).get('roles', [])
+ for role in roles:
+ role_list.append(role)
+ token_values['roles'] = role_list
+ return token_values
+
+
+def build_token_values(token_data):
+
+ token_expires_at = timeutils.parse_isotime(token_data['expires_at'])
+
+ # Trim off the microseconds because the revocation event only has
+ # expirations accurate to the second.
+ token_expires_at = token_expires_at.replace(microsecond=0)
+
+ token_values = {
+ 'expires_at': timeutils.normalize_time(token_expires_at),
+ 'issued_at': timeutils.normalize_time(
+ timeutils.parse_isotime(token_data['issued_at'])),
+ 'audit_id': token_data.get('audit_ids', [None])[0],
+ 'audit_chain_id': token_data.get('audit_ids', [None])[-1],
+ }
+
+ user = token_data.get('user')
+ if user is not None:
+ token_values['user_id'] = user['id']
+ # Federated users do not have a domain, be defensive and get the user
+ # domain set to None in the federated user case.
+ token_values['identity_domain_id'] = user.get('domain', {}).get('id')
+ else:
+ token_values['user_id'] = None
+ token_values['identity_domain_id'] = None
+
+ project = token_data.get('project', token_data.get('tenant'))
+ if project is not None:
+ token_values['project_id'] = project['id']
+ # The domain_id of projects acting as domains is None
+ token_values['assignment_domain_id'] = (
+ project['domain']['id'] if project['domain'] else None)
+ else:
+ token_values['project_id'] = None
+
+ domain = token_data.get('domain')
+ if domain is not None:
+ token_values['assignment_domain_id'] = domain['id']
+ else:
+ token_values['assignment_domain_id'] = None
+
+ role_list = []
+ roles = token_data.get('roles')
+ if roles is not None:
+ for role in roles:
+ role_list.append(role['id'])
+ token_values['roles'] = role_list
+
+ trust = token_data.get('OS-TRUST:trust')
+ if trust is None:
+ token_values['trust_id'] = None
+ token_values['trustor_id'] = None
+ token_values['trustee_id'] = None
+ else:
+ token_values['trust_id'] = trust['id']
+ token_values['trustor_id'] = trust['trustor_user']['id']
+ token_values['trustee_id'] = trust['trustee_user']['id']
+
+ oauth1 = token_data.get('OS-OAUTH1')
+ if oauth1 is None:
+ token_values['consumer_id'] = None
+ token_values['access_token_id'] = None
+ else:
+ token_values['consumer_id'] = oauth1['consumer_id']
+ token_values['access_token_id'] = oauth1['access_token_id']
+ return token_values
diff --git a/keystone-moon/keystone/models/token_model.py b/keystone-moon/keystone/models/token_model.py
index 2032fd19..32e6b365 100644
--- a/keystone-moon/keystone/models/token_model.py
+++ b/keystone-moon/keystone/models/token_model.py
@@ -14,14 +14,14 @@
from keystoneclient.common import cms
from oslo_config import cfg
+from oslo_utils import reflection
from oslo_utils import timeutils
import six
-from keystone.contrib.federation import constants as federation_constants
from keystone import exception
+from keystone.federation import constants
from keystone.i18n import _
-
CONF = cfg.CONF
# supported token versions
V2 = 'v2.0'
@@ -37,6 +37,7 @@ def _parse_and_normalize_time(time_data):
class KeystoneToken(dict):
"""An in-memory representation that unifies v2 and v3 tokens."""
+
# TODO(morganfainberg): Align this in-memory representation with the
# objects in keystoneclient. This object should be eventually updated
# to be the source of token data with the ability to emit any version
@@ -64,7 +65,9 @@ class KeystoneToken(dict):
def __repr__(self):
desc = ('<%(type)s (audit_id=%(audit_id)s, '
'audit_chain_id=%(audit_chain_id)s) at %(loc)s>')
- return desc % {'type': self.__class__.__name__,
+ self_cls_name = reflection.get_class_name(self,
+ fully_qualified=False)
+ return desc % {'type': self_cls_name,
'audit_id': self.audit_id,
'audit_chain_id': self.audit_chain_id,
'loc': hex(id(self))}
@@ -116,7 +119,7 @@ class KeystoneToken(dict):
return self['user']['domain']['name']
elif 'user' in self:
return "Default"
- except KeyError:
+ except KeyError: # nosec
# Do not raise KeyError, raise UnexpectedError
pass
raise exception.UnexpectedError()
@@ -128,7 +131,7 @@ class KeystoneToken(dict):
return self['user']['domain']['id']
elif 'user' in self:
return CONF.identity.default_domain_id
- except KeyError:
+ except KeyError: # nosec
# Do not raise KeyError, raise UnexpectedError
pass
raise exception.UnexpectedError()
@@ -184,7 +187,7 @@ class KeystoneToken(dict):
return self['project']['domain']['id']
elif 'tenant' in self['token']:
return CONF.identity.default_domain_id
- except KeyError:
+ except KeyError: # nosec
# Do not raise KeyError, raise UnexpectedError
pass
@@ -197,7 +200,7 @@ class KeystoneToken(dict):
return self['project']['domain']['name']
if 'tenant' in self['token']:
return 'Default'
- except KeyError:
+ except KeyError: # nosec
# Do not raise KeyError, raise UnexpectedError
pass
@@ -297,7 +300,7 @@ class KeystoneToken(dict):
def is_federated_user(self):
try:
return (self.version is V3 and
- federation_constants.FEDERATION in self['user'])
+ constants.FEDERATION in self['user'])
except KeyError:
raise exception.UnexpectedError()
@@ -306,7 +309,7 @@ class KeystoneToken(dict):
if self.is_federated_user:
if self.version is V3:
try:
- groups = self['user'][federation_constants.FEDERATION].get(
+ groups = self['user'][constants.FEDERATION].get(
'groups', [])
return [g['id'] for g in groups]
except KeyError:
@@ -317,15 +320,12 @@ class KeystoneToken(dict):
def federation_idp_id(self):
if self.version is not V3 or not self.is_federated_user:
return None
- return (
- self['user'][federation_constants.FEDERATION]
- ['identity_provider']['id'])
+ return self['user'][constants.FEDERATION]['identity_provider']['id']
@property
def federation_protocol_id(self):
if self.version is V3 and self.is_federated_user:
- return (self['user'][federation_constants.FEDERATION]['protocol']
- ['id'])
+ return self['user'][constants.FEDERATION]['protocol']['id']
return None
@property
diff --git a/keystone-moon/keystone/notifications.py b/keystone-moon/keystone/notifications.py
index bea09d3c..30d1713c 100644
--- a/keystone-moon/keystone/notifications.py
+++ b/keystone-moon/keystone/notifications.py
@@ -22,8 +22,8 @@ import socket
from oslo_config import cfg
from oslo_log import log
-from oslo_log import versionutils
import oslo_messaging
+from oslo_utils import reflection
import pycadf
from pycadf import cadftaxonomy as taxonomy
from pycadf import cadftype
@@ -32,6 +32,7 @@ from pycadf import eventfactory
from pycadf import resource
from keystone.i18n import _, _LE
+from keystone.common import utils
notifier_opts = [
@@ -44,6 +45,14 @@ notifier_opts = [
'the resource being operated on. A "cadf" notification '
'has the same information, as well as information about '
'the initiator of the event.'),
+ cfg.MultiStrOpt('notification_opt_out', default=[],
+ help='Define the notification options to opt-out from. '
+ 'The value expected is: '
+ 'identity.<resource_type>.<operation>. This field '
+ 'can be set multiple times in order to add more '
+ 'notifications to opt-out from. For example:\n '
+ 'notification_opt_out=identity.user.created\n '
+ 'notification_opt_out=identity.authenticate.success'),
]
config_section = None
@@ -100,7 +109,8 @@ class Audit(object):
"""
@classmethod
- def _emit(cls, operation, resource_type, resource_id, initiator, public):
+ def _emit(cls, operation, resource_type, resource_id, initiator, public,
+ actor_dict=None):
"""Directly send an event notification.
:param operation: one of the values from ACTIONS
@@ -111,6 +121,8 @@ class Audit(object):
:param public: If True (default), the event will be sent to the
notifier API. If False, the event will only be sent via
notify_event_callbacks to in process listeners
+ :param actor_dict: dictionary of actor information in the event of
+ assignment notification
"""
# NOTE(stevemar): the _send_notification function is
# overloaded, it's used to register callbacks and to actually
@@ -121,6 +133,7 @@ class Audit(object):
operation,
resource_type,
resource_id,
+ actor_dict,
public=public)
if CONF.notification_format == 'cadf' and public:
@@ -152,91 +165,35 @@ class Audit(object):
cls._emit(ACTIONS.deleted, resource_type, resource_id, initiator,
public)
+ @classmethod
+ def added_to(cls, target_type, target_id, actor_type, actor_id,
+ initiator=None, public=True):
+ actor_dict = {'id': actor_id,
+ 'type': actor_type,
+ 'actor_operation': 'added'}
+ cls._emit(ACTIONS.updated, target_type, target_id, initiator, public,
+ actor_dict=actor_dict)
-class ManagerNotificationWrapper(object):
- """Send event notifications for ``Manager`` methods.
-
- Sends a notification if the wrapped Manager method does not raise an
- ``Exception`` (such as ``keystone.exception.NotFound``).
-
- :param operation: one of the values from ACTIONS
- :param resource_type: type of resource being affected
- :param public: If True (default), the event will be sent to the notifier
- API. If False, the event will only be sent via
- notify_event_callbacks to in process listeners
-
- """
- def __init__(self, operation, resource_type, public=True,
- resource_id_arg_index=1, result_id_arg_attr=None):
- self.operation = operation
- self.resource_type = resource_type
- self.public = public
- self.resource_id_arg_index = resource_id_arg_index
- self.result_id_arg_attr = result_id_arg_attr
-
- def __call__(self, f):
- def wrapper(*args, **kwargs):
- """Send a notification if the wrapped callable is successful."""
- try:
- result = f(*args, **kwargs)
- except Exception:
- raise
- else:
- if self.result_id_arg_attr is not None:
- resource_id = result[self.result_id_arg_attr]
- else:
- resource_id = args[self.resource_id_arg_index]
-
- # NOTE(stevemar): the _send_notification function is
- # overloaded, it's used to register callbacks and to actually
- # send the notification externally. Thus, we should check
- # the desired notification format in the function instead
- # of before it.
- _send_notification(
- self.operation,
- self.resource_type,
- resource_id,
- public=self.public)
-
- # Only emit CADF notifications for public events
- if CONF.notification_format == 'cadf' and self.public:
- outcome = taxonomy.OUTCOME_SUCCESS
- # NOTE(morganfainberg): The decorator form will always use
- # a 'None' initiator, since we do not pass context around
- # in a manner that allows the decorator to inspect context
- # and extract the needed information.
- initiator = None
- _create_cadf_payload(self.operation, self.resource_type,
- resource_id, outcome, initiator)
- return result
-
- return wrapper
-
-
-def created(*args, **kwargs):
- """Decorator to send notifications for ``Manager.create_*`` methods."""
- return ManagerNotificationWrapper(ACTIONS.created, *args, **kwargs)
-
-
-def updated(*args, **kwargs):
- """Decorator to send notifications for ``Manager.update_*`` methods."""
- return ManagerNotificationWrapper(ACTIONS.updated, *args, **kwargs)
-
-
-def disabled(*args, **kwargs):
- """Decorator to send notifications when an object is disabled."""
- return ManagerNotificationWrapper(ACTIONS.disabled, *args, **kwargs)
-
-
-def deleted(*args, **kwargs):
- """Decorator to send notifications for ``Manager.delete_*`` methods."""
- return ManagerNotificationWrapper(ACTIONS.deleted, *args, **kwargs)
-
+ @classmethod
+ def removed_from(cls, target_type, target_id, actor_type, actor_id,
+ initiator=None, public=True):
+ actor_dict = {'id': actor_id,
+ 'type': actor_type,
+ 'actor_operation': 'removed'}
+ cls._emit(ACTIONS.updated, target_type, target_id, initiator, public,
+ actor_dict=actor_dict)
-def internal(*args, **kwargs):
- """Decorator to send notifications for internal notifications only."""
- kwargs['public'] = False
- return ManagerNotificationWrapper(ACTIONS.internal, *args, **kwargs)
+ @classmethod
+ def internal(cls, resource_type, resource_id):
+ # NOTE(lbragstad): Internal notifications are never public and have
+ # never used the initiator variable, but the _emit() method expects
+ # them. Let's set them here but not expose them through the method
+ # signature - that way someone can not do something like send an
+ # internal notification publicly.
+ initiator = None
+ public = False
+ cls._emit(ACTIONS.internal, resource_type, resource_id, initiator,
+ public)
def _get_callback_info(callback):
@@ -252,7 +209,8 @@ def _get_callback_info(callback):
module_name = getattr(callback, '__module__', None)
func_name = callback.__name__
if inspect.ismethod(callback):
- class_name = callback.__self__.__class__.__name__
+ class_name = reflection.get_class_name(callback.__self__,
+ fully_qualified=False)
return [module_name, class_name, func_name]
else:
return [module_name, func_name]
@@ -326,7 +284,6 @@ def listener(cls):
}
"""
-
def init_wrapper(init):
@functools.wraps(init)
def __new_init__(self, *args, **kwargs):
@@ -354,7 +311,7 @@ def notify_event_callbacks(service, resource_type, operation, payload):
'operation': operation,
'payload': payload}
LOG.debug('Invoking callback %(cb_name)s for event '
- '%(service)s %(resource_type)s %(operation)s for'
+ '%(service)s %(resource_type)s %(operation)s for '
'%(payload)s', subst_dict)
cb(service, resource_type, operation, payload)
@@ -424,7 +381,6 @@ def _create_cadf_payload(operation, resource_type, resource_id,
:param outcome: outcomes of the operation (SUCCESS, FAILURE, etc)
:param initiator: CADF representation of the user that created the request
"""
-
if resource_type not in CADF_TYPE_MAP:
target_uri = taxonomy.UNKNOWN
else:
@@ -440,7 +396,8 @@ def _create_cadf_payload(operation, resource_type, resource_id,
target, event_type, **audit_kwargs)
-def _send_notification(operation, resource_type, resource_id, public=True):
+def _send_notification(operation, resource_type, resource_id, actor_dict=None,
+ public=True):
"""Send notification to inform observers about the affected resource.
This method doesn't raise an exception when sending the notification fails.
@@ -448,6 +405,7 @@ def _send_notification(operation, resource_type, resource_id, public=True):
:param operation: operation being performed (created, updated, or deleted)
:param resource_type: type of resource being operated on
:param resource_id: ID of resource being operated on
+ :param actor_dict: a dictionary containing the actor's ID and type
:param public: if True (default), the event will be sent
to the notifier API.
if False, the event will only be sent via
@@ -455,6 +413,11 @@ def _send_notification(operation, resource_type, resource_id, public=True):
"""
payload = {'resource_info': resource_id}
+ if actor_dict:
+ payload['actor_id'] = actor_dict['id']
+ payload['actor_type'] = actor_dict['type']
+ payload['actor_operation'] = actor_dict['actor_operation']
+
notify_event_callbacks(SERVICE, resource_type, operation, payload)
# Only send this notification if the 'basic' format is used, otherwise
@@ -468,6 +431,8 @@ def _send_notification(operation, resource_type, resource_id, public=True):
'service': SERVICE,
'resource_type': resource_type,
'operation': operation}
+ if _check_notification_opt_out(event_type, outcome=None):
+ return
try:
notifier.info(context, event_type, payload)
except Exception:
@@ -484,7 +449,6 @@ def _get_request_audit_info(context, user_id=None):
:returns: Auditing data about the request
:rtype: :class:`pycadf.Resource`
"""
-
remote_addr = None
http_user_agent = None
project_id = None
@@ -503,8 +467,12 @@ def _get_request_audit_info(context, user_id=None):
{}).get('domain_id')
host = pycadf.host.Host(address=remote_addr, agent=http_user_agent)
- initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER,
- id=user_id, host=host)
+ initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER, host=host)
+
+ if user_id:
+ initiator.user_id = user_id
+ initiator.id = utils.resource_uuid(user_id)
+
if project_id:
initiator.project_id = project_id
if domain_id:
@@ -519,8 +487,8 @@ class CadfNotificationWrapper(object):
This function is only used for Authentication events. Its ``action`` and
``event_type`` are dictated below.
- - action: authenticate
- - event_type: identity.authenticate
+ - action: ``authenticate``
+ - event_type: ``identity.authenticate``
Sends CADF notifications for events such as whether an authentication was
successful or not.
@@ -534,9 +502,9 @@ class CadfNotificationWrapper(object):
self.event_type = '%s.%s' % (SERVICE, operation)
def __call__(self, f):
+ @functools.wraps(f)
def wrapper(wrapped_self, context, user_id, *args, **kwargs):
"""Always send a notification."""
-
initiator = _get_request_audit_info(context, user_id)
target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
try:
@@ -562,42 +530,44 @@ class CadfRoleAssignmentNotificationWrapper(object):
This function is only used for role assignment events. Its ``action`` and
``event_type`` are dictated below.
- - action: created.role_assignment or deleted.role_assignment
- - event_type: identity.role_assignment.created or
- identity.role_assignment.deleted
+ - action: ``created.role_assignment`` or ``deleted.role_assignment``
+ - event_type: ``identity.role_assignment.created`` or
+ ``identity.role_assignment.deleted``
Sends a CADF notification if the wrapped method does not raise an
- ``Exception`` (such as ``keystone.exception.NotFound``).
+ :class:`Exception` (such as :class:`keystone.exception.NotFound`).
- :param operation: one of the values from ACTIONS (create or delete)
+ :param operation: one of the values from ACTIONS (created or deleted)
"""
ROLE_ASSIGNMENT = 'role_assignment'
def __init__(self, operation):
self.action = '%s.%s' % (operation, self.ROLE_ASSIGNMENT)
- self.deprecated_event_type = '%s.%s.%s' % (SERVICE, operation,
- self.ROLE_ASSIGNMENT)
self.event_type = '%s.%s.%s' % (SERVICE, self.ROLE_ASSIGNMENT,
operation)
def __call__(self, f):
+ @functools.wraps(f)
def wrapper(wrapped_self, role_id, *args, **kwargs):
- """Send a notification if the wrapped callable is successful."""
+ """Send a notification if the wrapped callable is successful.
- """ NOTE(stevemar): The reason we go through checking kwargs
+ NOTE(stevemar): The reason we go through checking kwargs
and args for possible target and actor values is because the
create_grant() (and delete_grant()) method are called
differently in various tests.
- Using named arguments, i.e.:
+ Using named arguments, i.e.::
+
create_grant(user_id=user['id'], domain_id=domain['id'],
role_id=role['id'])
- Or, using positional arguments, i.e.:
+ Or, using positional arguments, i.e.::
+
create_grant(role_id['id'], user['id'], None,
domain_id=domain['id'], None)
- Or, both, i.e.:
+ Or, both, i.e.::
+
create_grant(role_id['id'], user_id=user['id'],
domain_id=domain['id'])
@@ -605,6 +575,9 @@ class CadfRoleAssignmentNotificationWrapper(object):
in as a dictionary
The actual method signature is
+
+ ::
+
create_grant(role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False)
@@ -635,30 +608,19 @@ class CadfRoleAssignmentNotificationWrapper(object):
audit_kwargs['inherited_to_projects'] = inherited
audit_kwargs['role'] = role_id
- # For backward compatibility, send both old and new event_type.
- # Deprecate old format and remove it in the next release.
- event_types = [self.deprecated_event_type, self.event_type]
- versionutils.deprecated(
- as_of=versionutils.deprecated.KILO,
- remove_in=+1,
- what=('sending duplicate %s notification event type' %
- self.deprecated_event_type),
- in_favor_of='%s notification event type' % self.event_type)
try:
result = f(wrapped_self, role_id, *args, **kwargs)
except Exception:
- for event_type in event_types:
- _send_audit_notification(self.action, initiator,
- taxonomy.OUTCOME_FAILURE,
- target, event_type,
- **audit_kwargs)
+ _send_audit_notification(self.action, initiator,
+ taxonomy.OUTCOME_FAILURE,
+ target, self.event_type,
+ **audit_kwargs)
raise
else:
- for event_type in event_types:
- _send_audit_notification(self.action, initiator,
- taxonomy.OUTCOME_SUCCESS,
- target, event_type,
- **audit_kwargs)
+ _send_audit_notification(self.action, initiator,
+ taxonomy.OUTCOME_SUCCESS,
+ target, self.event_type,
+ **audit_kwargs)
return result
return wrapper
@@ -686,7 +648,6 @@ def send_saml_audit_notification(action, context, user_id, group_ids,
:param outcome: One of :class:`pycadf.cadftaxonomy`
:type outcome: str
"""
-
initiator = _get_request_audit_info(context)
target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER)
audit_type = SAML_AUDIT_TYPE
@@ -718,6 +679,8 @@ def _send_audit_notification(action, initiator, outcome, target,
key-value pairs to the CADF event.
"""
+ if _check_notification_opt_out(event_type, outcome):
+ return
event = eventfactory.EventFactory().new_event(
eventType=cadftype.EVENTTYPE_ACTIVITY,
@@ -745,6 +708,33 @@ def _send_audit_notification(action, initiator, outcome, target,
{'action': action, 'event_type': event_type})
+def _check_notification_opt_out(event_type, outcome):
+ """Check if a particular event_type has been opted-out of.
+
+ This method checks to see if an event should be sent to the messaging
+ service. Any event specified in the opt-out list will not be transmitted.
+
+ :param event_type: This is the meter name that Ceilometer uses to poll
+ events. For example: identity.user.created, or
+ identity.authenticate.success, or identity.role_assignment.created
+ :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING,
+ taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE)
+
+ """
+ # NOTE(stevemar): Special handling for authenticate, we look at the outcome
+ # as well when evaluating. For authN events, event_type is just
+ # idenitity.authenticate, which isn't fine enough to provide any opt-out
+ # value, so we attach the outcome to re-create the meter name used in
+ # ceilometer.
+ if 'authenticate' in event_type:
+ event_type = event_type + "." + outcome
+
+ if event_type in CONF.notification_opt_out:
+ return True
+
+ return False
+
+
emit_event = CadfNotificationWrapper
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py b/keystone-moon/keystone/oauth1/__init__.py
index caf4d66f..ea011f6b 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/054_add_actor_id_index.py
+++ b/keystone-moon/keystone/oauth1/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2014 IBM Corp.
+# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -12,16 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
-
-
-ASSIGNMENT_TABLE = 'assignment'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
- idx = sql.Index('ix_actor_id', assignment.c.actor_id)
- idx.create(migrate_engine)
+from keystone.oauth1.core import * # noqa
diff --git a/keystone-moon/keystone/oauth1/backends/__init__.py b/keystone-moon/keystone/oauth1/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/oauth1/backends/__init__.py
diff --git a/keystone-moon/keystone/oauth1/backends/sql.py b/keystone-moon/keystone/oauth1/backends/sql.py
new file mode 100644
index 00000000..c5da7873
--- /dev/null
+++ b/keystone-moon/keystone/oauth1/backends/sql.py
@@ -0,0 +1,258 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import random as _random
+import uuid
+
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+
+from keystone.common import sql
+from keystone.common import utils
+from keystone import exception
+from keystone.i18n import _
+from keystone.oauth1 import core
+
+
+random = _random.SystemRandom()
+
+
+class Consumer(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'consumer'
+ attributes = ['id', 'description', 'secret']
+ id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+ description = sql.Column(sql.String(64), nullable=True)
+ secret = sql.Column(sql.String(64), nullable=False)
+ extra = sql.Column(sql.JsonBlob(), nullable=False)
+
+
+class RequestToken(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'request_token'
+ attributes = ['id', 'request_secret',
+ 'verifier', 'authorizing_user_id', 'requested_project_id',
+ 'role_ids', 'consumer_id', 'expires_at']
+ id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+ request_secret = sql.Column(sql.String(64), nullable=False)
+ verifier = sql.Column(sql.String(64), nullable=True)
+ authorizing_user_id = sql.Column(sql.String(64), nullable=True)
+ requested_project_id = sql.Column(sql.String(64), nullable=False)
+ role_ids = sql.Column(sql.Text(), nullable=True)
+ consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'),
+ nullable=False, index=True)
+ expires_at = sql.Column(sql.String(64), nullable=True)
+
+ @classmethod
+ def from_dict(cls, user_dict):
+ return cls(**user_dict)
+
+ def to_dict(self):
+ return dict(self.items())
+
+
+class AccessToken(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'access_token'
+ attributes = ['id', 'access_secret', 'authorizing_user_id',
+ 'project_id', 'role_ids', 'consumer_id',
+ 'expires_at']
+ id = sql.Column(sql.String(64), primary_key=True, nullable=False)
+ access_secret = sql.Column(sql.String(64), nullable=False)
+ authorizing_user_id = sql.Column(sql.String(64), nullable=False,
+ index=True)
+ project_id = sql.Column(sql.String(64), nullable=False)
+ role_ids = sql.Column(sql.Text(), nullable=False)
+ consumer_id = sql.Column(sql.String(64), sql.ForeignKey('consumer.id'),
+ nullable=False)
+ expires_at = sql.Column(sql.String(64), nullable=True)
+
+ @classmethod
+ def from_dict(cls, user_dict):
+ return cls(**user_dict)
+
+ def to_dict(self):
+ return dict(self.items())
+
+
+class OAuth1(core.Oauth1DriverV8):
+ def _get_consumer(self, session, consumer_id):
+ consumer_ref = session.query(Consumer).get(consumer_id)
+ if consumer_ref is None:
+ raise exception.NotFound(_('Consumer not found'))
+ return consumer_ref
+
+ def get_consumer_with_secret(self, consumer_id):
+ with sql.session_for_read() as session:
+ consumer_ref = self._get_consumer(session, consumer_id)
+ return consumer_ref.to_dict()
+
+ def get_consumer(self, consumer_id):
+ return core.filter_consumer(
+ self.get_consumer_with_secret(consumer_id))
+
+ def create_consumer(self, consumer_ref):
+ with sql.session_for_write() as session:
+ consumer = Consumer.from_dict(consumer_ref)
+ session.add(consumer)
+ return consumer.to_dict()
+
+ def _delete_consumer(self, session, consumer_id):
+ consumer_ref = self._get_consumer(session, consumer_id)
+ session.delete(consumer_ref)
+
+ def _delete_request_tokens(self, session, consumer_id):
+ q = session.query(RequestToken)
+ req_tokens = q.filter_by(consumer_id=consumer_id)
+ req_tokens_list = set([x.id for x in req_tokens])
+ for token_id in req_tokens_list:
+ token_ref = self._get_request_token(session, token_id)
+ session.delete(token_ref)
+
+ def _delete_access_tokens(self, session, consumer_id):
+ q = session.query(AccessToken)
+ acc_tokens = q.filter_by(consumer_id=consumer_id)
+ acc_tokens_list = set([x.id for x in acc_tokens])
+ for token_id in acc_tokens_list:
+ token_ref = self._get_access_token(session, token_id)
+ session.delete(token_ref)
+
+ def delete_consumer(self, consumer_id):
+ with sql.session_for_write() as session:
+ self._delete_request_tokens(session, consumer_id)
+ self._delete_access_tokens(session, consumer_id)
+ self._delete_consumer(session, consumer_id)
+
+ def list_consumers(self):
+ with sql.session_for_read() as session:
+ cons = session.query(Consumer)
+ return [core.filter_consumer(x.to_dict()) for x in cons]
+
+ def update_consumer(self, consumer_id, consumer_ref):
+ with sql.session_for_write() as session:
+ consumer = self._get_consumer(session, consumer_id)
+ old_consumer_dict = consumer.to_dict()
+ old_consumer_dict.update(consumer_ref)
+ new_consumer = Consumer.from_dict(old_consumer_dict)
+ consumer.description = new_consumer.description
+ consumer.extra = new_consumer.extra
+ return core.filter_consumer(consumer.to_dict())
+
+ def create_request_token(self, consumer_id, requested_project,
+ request_token_duration):
+ request_token_id = uuid.uuid4().hex
+ request_token_secret = uuid.uuid4().hex
+ expiry_date = None
+ if request_token_duration:
+ now = timeutils.utcnow()
+ future = now + datetime.timedelta(seconds=request_token_duration)
+ expiry_date = utils.isotime(future, subsecond=True)
+
+ ref = {}
+ ref['id'] = request_token_id
+ ref['request_secret'] = request_token_secret
+ ref['verifier'] = None
+ ref['authorizing_user_id'] = None
+ ref['requested_project_id'] = requested_project
+ ref['role_ids'] = None
+ ref['consumer_id'] = consumer_id
+ ref['expires_at'] = expiry_date
+ with sql.session_for_write() as session:
+ token_ref = RequestToken.from_dict(ref)
+ session.add(token_ref)
+ return token_ref.to_dict()
+
+ def _get_request_token(self, session, request_token_id):
+ token_ref = session.query(RequestToken).get(request_token_id)
+ if token_ref is None:
+ raise exception.NotFound(_('Request token not found'))
+ return token_ref
+
+ def get_request_token(self, request_token_id):
+ with sql.session_for_read() as session:
+ token_ref = self._get_request_token(session, request_token_id)
+ return token_ref.to_dict()
+
+ def authorize_request_token(self, request_token_id, user_id,
+ role_ids):
+ with sql.session_for_write() as session:
+ token_ref = self._get_request_token(session, request_token_id)
+ token_dict = token_ref.to_dict()
+ token_dict['authorizing_user_id'] = user_id
+ token_dict['verifier'] = ''.join(random.sample(core.VERIFIER_CHARS,
+ 8))
+ token_dict['role_ids'] = jsonutils.dumps(role_ids)
+
+ new_token = RequestToken.from_dict(token_dict)
+ for attr in RequestToken.attributes:
+ if (attr == 'authorizing_user_id' or attr == 'verifier'
+ or attr == 'role_ids'):
+ setattr(token_ref, attr, getattr(new_token, attr))
+
+ return token_ref.to_dict()
+
+ def create_access_token(self, request_id, access_token_duration):
+ access_token_id = uuid.uuid4().hex
+ access_token_secret = uuid.uuid4().hex
+ with sql.session_for_write() as session:
+ req_token_ref = self._get_request_token(session, request_id)
+ token_dict = req_token_ref.to_dict()
+
+ expiry_date = None
+ if access_token_duration:
+ now = timeutils.utcnow()
+ future = (now +
+ datetime.timedelta(seconds=access_token_duration))
+ expiry_date = utils.isotime(future, subsecond=True)
+
+ # add Access Token
+ ref = {}
+ ref['id'] = access_token_id
+ ref['access_secret'] = access_token_secret
+ ref['authorizing_user_id'] = token_dict['authorizing_user_id']
+ ref['project_id'] = token_dict['requested_project_id']
+ ref['role_ids'] = token_dict['role_ids']
+ ref['consumer_id'] = token_dict['consumer_id']
+ ref['expires_at'] = expiry_date
+ token_ref = AccessToken.from_dict(ref)
+ session.add(token_ref)
+
+ # remove request token, it's been used
+ session.delete(req_token_ref)
+
+ return token_ref.to_dict()
+
+ def _get_access_token(self, session, access_token_id):
+ token_ref = session.query(AccessToken).get(access_token_id)
+ if token_ref is None:
+ raise exception.NotFound(_('Access token not found'))
+ return token_ref
+
+ def get_access_token(self, access_token_id):
+ with sql.session_for_read() as session:
+ token_ref = self._get_access_token(session, access_token_id)
+ return token_ref.to_dict()
+
+ def list_access_tokens(self, user_id):
+ with sql.session_for_read() as session:
+ q = session.query(AccessToken)
+ user_auths = q.filter_by(authorizing_user_id=user_id)
+ return [core.filter_token(x.to_dict()) for x in user_auths]
+
+ def delete_access_token(self, user_id, access_token_id):
+ with sql.session_for_write() as session:
+ token_ref = self._get_access_token(session, access_token_id)
+ token_dict = token_ref.to_dict()
+ if token_dict['authorizing_user_id'] != user_id:
+ raise exception.Unauthorized(_('User IDs do not match'))
+
+ session.delete(token_ref)
diff --git a/keystone-moon/keystone/oauth1/controllers.py b/keystone-moon/keystone/oauth1/controllers.py
new file mode 100644
index 00000000..489bb4c7
--- /dev/null
+++ b/keystone-moon/keystone/oauth1/controllers.py
@@ -0,0 +1,409 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Extensions supporting OAuth1."""
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import utils
+from keystone.common import validation
+from keystone.common import wsgi
+from keystone import exception
+from keystone.i18n import _
+from keystone import notifications
+from keystone.oauth1 import core as oauth1
+from keystone.oauth1 import schema
+from keystone.oauth1 import validator
+
+
+CONF = cfg.CONF
+
+
+def _emit_user_oauth_consumer_token_invalidate(payload):
+ # This is a special case notification that expect the payload to be a dict
+ # containing the user_id and the consumer_id. This is so that the token
+ # provider can invalidate any tokens in the token persistence if
+ # token persistence is enabled
+ notifications.Audit.internal(
+ notifications.INVALIDATE_USER_OAUTH_CONSUMER_TOKENS,
+ payload,
+ )
+
+
+@dependency.requires('oauth_api', 'token_provider_api')
+class ConsumerCrudV3(controller.V3Controller):
+ collection_name = 'consumers'
+ member_name = 'consumer'
+
+ @classmethod
+ def base_url(cls, context, path=None):
+ """Construct a path and pass it to V3Controller.base_url method."""
+ # NOTE(stevemar): Overriding path to /OS-OAUTH1/consumers so that
+ # V3Controller.base_url handles setting the self link correctly.
+ path = '/OS-OAUTH1/' + cls.collection_name
+ return controller.V3Controller.base_url(context, path=path)
+
+ @controller.protected()
+ @validation.validated(schema.consumer_create, 'consumer')
+ def create_consumer(self, context, consumer):
+ ref = self._assign_unique_id(self._normalize_dict(consumer))
+ initiator = notifications._get_request_audit_info(context)
+ consumer_ref = self.oauth_api.create_consumer(ref, initiator)
+ return ConsumerCrudV3.wrap_member(context, consumer_ref)
+
+ @controller.protected()
+ @validation.validated(schema.consumer_update, 'consumer')
+ def update_consumer(self, context, consumer_id, consumer):
+ self._require_matching_id(consumer_id, consumer)
+ ref = self._normalize_dict(consumer)
+ initiator = notifications._get_request_audit_info(context)
+ ref = self.oauth_api.update_consumer(consumer_id, ref, initiator)
+ return ConsumerCrudV3.wrap_member(context, ref)
+
+ @controller.protected()
+ def list_consumers(self, context):
+ ref = self.oauth_api.list_consumers()
+ return ConsumerCrudV3.wrap_collection(context, ref)
+
+ @controller.protected()
+ def get_consumer(self, context, consumer_id):
+ ref = self.oauth_api.get_consumer(consumer_id)
+ return ConsumerCrudV3.wrap_member(context, ref)
+
+ @controller.protected()
+ def delete_consumer(self, context, consumer_id):
+ user_token_ref = utils.get_token_ref(context)
+ payload = {'user_id': user_token_ref.user_id,
+ 'consumer_id': consumer_id}
+ _emit_user_oauth_consumer_token_invalidate(payload)
+ initiator = notifications._get_request_audit_info(context)
+ self.oauth_api.delete_consumer(consumer_id, initiator)
+
+
+@dependency.requires('oauth_api')
+class AccessTokenCrudV3(controller.V3Controller):
+ collection_name = 'access_tokens'
+ member_name = 'access_token'
+
+ @classmethod
+ def _add_self_referential_link(cls, context, ref):
+ # NOTE(lwolf): overriding method to add proper path to self link
+ ref.setdefault('links', {})
+ path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % {
+ 'user_id': cls._get_user_id(ref)
+ }
+ ref['links']['self'] = cls.base_url(context, path) + '/' + ref['id']
+
+ @controller.protected()
+ def get_access_token(self, context, user_id, access_token_id):
+ access_token = self.oauth_api.get_access_token(access_token_id)
+ if access_token['authorizing_user_id'] != user_id:
+ raise exception.NotFound()
+ access_token = self._format_token_entity(context, access_token)
+ return AccessTokenCrudV3.wrap_member(context, access_token)
+
+ @controller.protected()
+ def list_access_tokens(self, context, user_id):
+ auth_context = context.get('environment',
+ {}).get('KEYSTONE_AUTH_CONTEXT', {})
+ if auth_context.get('is_delegated_auth'):
+ raise exception.Forbidden(
+ _('Cannot list request tokens'
+ ' with a token issued via delegation.'))
+ refs = self.oauth_api.list_access_tokens(user_id)
+ formatted_refs = ([self._format_token_entity(context, x)
+ for x in refs])
+ return AccessTokenCrudV3.wrap_collection(context, formatted_refs)
+
+ @controller.protected()
+ def delete_access_token(self, context, user_id, access_token_id):
+ access_token = self.oauth_api.get_access_token(access_token_id)
+ consumer_id = access_token['consumer_id']
+ payload = {'user_id': user_id, 'consumer_id': consumer_id}
+ _emit_user_oauth_consumer_token_invalidate(payload)
+ initiator = notifications._get_request_audit_info(context)
+ return self.oauth_api.delete_access_token(
+ user_id, access_token_id, initiator)
+
+ @staticmethod
+ def _get_user_id(entity):
+ return entity.get('authorizing_user_id', '')
+
+ def _format_token_entity(self, context, entity):
+
+ formatted_entity = entity.copy()
+ access_token_id = formatted_entity['id']
+ user_id = self._get_user_id(formatted_entity)
+ if 'role_ids' in entity:
+ formatted_entity.pop('role_ids')
+ if 'access_secret' in entity:
+ formatted_entity.pop('access_secret')
+
+ url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s'
+ '/roles' % {'user_id': user_id,
+ 'access_token_id': access_token_id})
+
+ formatted_entity.setdefault('links', {})
+ formatted_entity['links']['roles'] = (self.base_url(context, url))
+
+ return formatted_entity
+
+
+@dependency.requires('oauth_api', 'role_api')
+class AccessTokenRolesV3(controller.V3Controller):
+ collection_name = 'roles'
+ member_name = 'role'
+
+ @controller.protected()
+ def list_access_token_roles(self, context, user_id, access_token_id):
+ access_token = self.oauth_api.get_access_token(access_token_id)
+ if access_token['authorizing_user_id'] != user_id:
+ raise exception.NotFound()
+ authed_role_ids = access_token['role_ids']
+ authed_role_ids = jsonutils.loads(authed_role_ids)
+ refs = ([self._format_role_entity(x) for x in authed_role_ids])
+ return AccessTokenRolesV3.wrap_collection(context, refs)
+
+ @controller.protected()
+ def get_access_token_role(self, context, user_id,
+ access_token_id, role_id):
+ access_token = self.oauth_api.get_access_token(access_token_id)
+ if access_token['authorizing_user_id'] != user_id:
+ raise exception.Unauthorized(_('User IDs do not match'))
+ authed_role_ids = access_token['role_ids']
+ authed_role_ids = jsonutils.loads(authed_role_ids)
+ for authed_role_id in authed_role_ids:
+ if authed_role_id == role_id:
+ role = self._format_role_entity(role_id)
+ return AccessTokenRolesV3.wrap_member(context, role)
+ raise exception.RoleNotFound(role_id=role_id)
+
+ def _format_role_entity(self, role_id):
+ role = self.role_api.get_role(role_id)
+ formatted_entity = role.copy()
+ if 'description' in role:
+ formatted_entity.pop('description')
+ if 'enabled' in role:
+ formatted_entity.pop('enabled')
+ return formatted_entity
+
+
+@dependency.requires('assignment_api', 'oauth_api',
+ 'resource_api', 'token_provider_api')
+class OAuthControllerV3(controller.V3Controller):
+ collection_name = 'not_used'
+ member_name = 'not_used'
+
+ def create_request_token(self, context):
+ headers = context['headers']
+ oauth_headers = oauth1.get_oauth_headers(headers)
+ consumer_id = oauth_headers.get('oauth_consumer_key')
+ requested_project_id = headers.get('Requested-Project-Id')
+
+ if not consumer_id:
+ raise exception.ValidationError(
+ attribute='oauth_consumer_key', target='request')
+ if not requested_project_id:
+ raise exception.ValidationError(
+ attribute='requested_project_id', target='request')
+
+ # NOTE(stevemar): Ensure consumer and requested project exist
+ self.resource_api.get_project(requested_project_id)
+ self.oauth_api.get_consumer(consumer_id)
+
+ url = self.base_url(context, context['path'])
+
+ req_headers = {'Requested-Project-Id': requested_project_id}
+ req_headers.update(headers)
+ request_verifier = oauth1.RequestTokenEndpoint(
+ request_validator=validator.OAuthValidator(),
+ token_generator=oauth1.token_generator)
+ h, b, s = request_verifier.create_request_token_response(
+ url,
+ http_method='POST',
+ body=context['query_string'],
+ headers=req_headers)
+
+ if (not b) or int(s) > 399:
+ msg = _('Invalid signature')
+ raise exception.Unauthorized(message=msg)
+
+ request_token_duration = CONF.oauth1.request_token_duration
+ initiator = notifications._get_request_audit_info(context)
+ token_ref = self.oauth_api.create_request_token(consumer_id,
+ requested_project_id,
+ request_token_duration,
+ initiator)
+
+ result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s'
+ % {'key': token_ref['id'],
+ 'secret': token_ref['request_secret']})
+
+ if CONF.oauth1.request_token_duration:
+ expiry_bit = '&oauth_expires_at=%s' % token_ref['expires_at']
+ result += expiry_bit
+
+ headers = [('Content-Type', 'application/x-www-urlformencoded')]
+ response = wsgi.render_response(result,
+ status=(201, 'Created'),
+ headers=headers)
+
+ return response
+
+ def create_access_token(self, context):
+ headers = context['headers']
+ oauth_headers = oauth1.get_oauth_headers(headers)
+ consumer_id = oauth_headers.get('oauth_consumer_key')
+ request_token_id = oauth_headers.get('oauth_token')
+ oauth_verifier = oauth_headers.get('oauth_verifier')
+
+ if not consumer_id:
+ raise exception.ValidationError(
+ attribute='oauth_consumer_key', target='request')
+ if not request_token_id:
+ raise exception.ValidationError(
+ attribute='oauth_token', target='request')
+ if not oauth_verifier:
+ raise exception.ValidationError(
+ attribute='oauth_verifier', target='request')
+
+ req_token = self.oauth_api.get_request_token(
+ request_token_id)
+
+ expires_at = req_token['expires_at']
+ if expires_at:
+ now = timeutils.utcnow()
+ expires = timeutils.normalize_time(
+ timeutils.parse_isotime(expires_at))
+ if now > expires:
+ raise exception.Unauthorized(_('Request token is expired'))
+
+ url = self.base_url(context, context['path'])
+
+ access_verifier = oauth1.AccessTokenEndpoint(
+ request_validator=validator.OAuthValidator(),
+ token_generator=oauth1.token_generator)
+ h, b, s = access_verifier.create_access_token_response(
+ url,
+ http_method='POST',
+ body=context['query_string'],
+ headers=headers)
+ params = oauth1.extract_non_oauth_params(b)
+ if params:
+ msg = _('There should not be any non-oauth parameters')
+ raise exception.Unauthorized(message=msg)
+
+ if req_token['consumer_id'] != consumer_id:
+ msg = _('provided consumer key does not match stored consumer key')
+ raise exception.Unauthorized(message=msg)
+
+ if req_token['verifier'] != oauth_verifier:
+ msg = _('provided verifier does not match stored verifier')
+ raise exception.Unauthorized(message=msg)
+
+ if req_token['id'] != request_token_id:
+ msg = _('provided request key does not match stored request key')
+ raise exception.Unauthorized(message=msg)
+
+ if not req_token.get('authorizing_user_id'):
+ msg = _('Request Token does not have an authorizing user id')
+ raise exception.Unauthorized(message=msg)
+
+ access_token_duration = CONF.oauth1.access_token_duration
+ initiator = notifications._get_request_audit_info(context)
+ token_ref = self.oauth_api.create_access_token(request_token_id,
+ access_token_duration,
+ initiator)
+
+ result = ('oauth_token=%(key)s&oauth_token_secret=%(secret)s'
+ % {'key': token_ref['id'],
+ 'secret': token_ref['access_secret']})
+
+ if CONF.oauth1.access_token_duration:
+ expiry_bit = '&oauth_expires_at=%s' % (token_ref['expires_at'])
+ result += expiry_bit
+
+ headers = [('Content-Type', 'application/x-www-urlformencoded')]
+ response = wsgi.render_response(result,
+ status=(201, 'Created'),
+ headers=headers)
+
+ return response
+
+ @controller.protected()
+ def authorize_request_token(self, context, request_token_id, roles):
+ """An authenticated user is going to authorize a request token.
+
+ As a security precaution, the requested roles must match those in
+ the request token. Because this is in a CLI-only world at the moment,
+ there is not another easy way to make sure the user knows which roles
+ are being requested before authorizing.
+ """
+ auth_context = context.get('environment',
+ {}).get('KEYSTONE_AUTH_CONTEXT', {})
+ if auth_context.get('is_delegated_auth'):
+ raise exception.Forbidden(
+ _('Cannot authorize a request token'
+ ' with a token issued via delegation.'))
+
+ req_token = self.oauth_api.get_request_token(request_token_id)
+
+ expires_at = req_token['expires_at']
+ if expires_at:
+ now = timeutils.utcnow()
+ expires = timeutils.normalize_time(
+ timeutils.parse_isotime(expires_at))
+ if now > expires:
+ raise exception.Unauthorized(_('Request token is expired'))
+
+ # put the roles in a set for easy comparison
+ authed_roles = set()
+ for role in roles:
+ authed_roles.add(role['id'])
+
+ # verify the authorizing user has the roles
+ user_token = utils.get_token_ref(context)
+ user_id = user_token.user_id
+ project_id = req_token['requested_project_id']
+ user_roles = self.assignment_api.get_roles_for_user_and_project(
+ user_id, project_id)
+ cred_set = set(user_roles)
+
+ if not cred_set.issuperset(authed_roles):
+ msg = _('authorizing user does not have role required')
+ raise exception.Unauthorized(message=msg)
+
+ # create list of just the id's for the backend
+ role_ids = list(authed_roles)
+
+ # verify the user has the project too
+ req_project_id = req_token['requested_project_id']
+ user_projects = self.assignment_api.list_projects_for_user(user_id)
+ for user_project in user_projects:
+ if user_project['id'] == req_project_id:
+ break
+ else:
+ msg = _("User is not a member of the requested project")
+ raise exception.Unauthorized(message=msg)
+
+ # finally authorize the token
+ authed_token = self.oauth_api.authorize_request_token(
+ request_token_id, user_id, role_ids)
+
+ to_return = {'token': {'oauth_verifier': authed_token['verifier']}}
+ return to_return
diff --git a/keystone-moon/keystone/oauth1/core.py b/keystone-moon/keystone/oauth1/core.py
new file mode 100644
index 00000000..2e52aefe
--- /dev/null
+++ b/keystone-moon/keystone/oauth1/core.py
@@ -0,0 +1,367 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the OAuth1 service."""
+
+from __future__ import absolute_import
+
+import abc
+import string
+import uuid
+
+import oauthlib.common
+from oauthlib import oauth1
+from oslo_config import cfg
+from oslo_log import log
+import six
+
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _LE
+from keystone import notifications
+
+
+RequestValidator = oauth1.RequestValidator
+Client = oauth1.Client
+AccessTokenEndpoint = oauth1.AccessTokenEndpoint
+ResourceEndpoint = oauth1.ResourceEndpoint
+AuthorizationEndpoint = oauth1.AuthorizationEndpoint
+SIG_HMAC = oauth1.SIGNATURE_HMAC
+RequestTokenEndpoint = oauth1.RequestTokenEndpoint
+oRequest = oauthlib.common.Request
+# The characters used to generate verifiers are limited to alphanumerical
+# values for ease of manual entry. Commonly confused characters are omitted.
+VERIFIER_CHARS = string.ascii_letters + string.digits
+CONFUSED_CHARS = 'jiIl1oO0'
+VERIFIER_CHARS = ''.join(c for c in VERIFIER_CHARS if c not in CONFUSED_CHARS)
+
+
+class Token(object):
+ def __init__(self, key, secret):
+ self.key = key
+ self.secret = secret
+ self.verifier = None
+
+ def set_verifier(self, verifier):
+ self.verifier = verifier
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+def token_generator(*args, **kwargs):
+ return uuid.uuid4().hex
+
+
+EXTENSION_DATA = {
+ 'name': 'OpenStack OAUTH1 API',
+ 'namespace': 'http://docs.openstack.org/identity/api/ext/'
+ 'OS-OAUTH1/v1.0',
+ 'alias': 'OS-OAUTH1',
+ 'updated': '2013-07-07T12:00:0-00:00',
+ 'description': 'OpenStack OAuth 1.0a Delegated Auth Mechanism.',
+ 'links': [
+ {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'http://specs.openstack.org/openstack/keystone-specs/api/'
+ 'v3/identity-api-v3-os-oauth1-ext.html',
+ }
+ ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+
+def filter_consumer(consumer_ref):
+ """Filter out private items in a consumer dict.
+
+ 'secret' is never returned.
+
+ :returns: consumer_ref
+
+ """
+ if consumer_ref:
+ consumer_ref = consumer_ref.copy()
+ consumer_ref.pop('secret', None)
+ return consumer_ref
+
+
+def filter_token(access_token_ref):
+ """Filter out private items in an access token dict.
+
+ 'access_secret' is never returned.
+
+ :returns: access_token_ref
+
+ """
+ if access_token_ref:
+ access_token_ref = access_token_ref.copy()
+ access_token_ref.pop('access_secret', None)
+ return access_token_ref
+
+
+def get_oauth_headers(headers):
+ parameters = {}
+
+ # The incoming headers variable is your usual heading from context
+ # In an OAuth signed req, where the oauth variables are in the header,
+ # they with the key 'Authorization'.
+
+ if headers and 'Authorization' in headers:
+ # A typical value for Authorization is seen below
+ # 'OAuth realm="", oauth_body_hash="2jm%3D", oauth_nonce="14475435"
+ # along with other oauth variables, the 'OAuth ' part is trimmed
+ # to split the rest of the headers.
+
+ auth_header = headers['Authorization']
+ params = oauth1.rfc5849.utils.parse_authorization_header(auth_header)
+ parameters.update(dict(params))
+ return parameters
+ else:
+ msg = _LE('Cannot retrieve Authorization headers')
+ LOG.error(msg)
+ raise exception.OAuthHeadersMissingError()
+
+
+def extract_non_oauth_params(query_string):
+ params = oauthlib.common.extract_params(query_string)
+ return {k: v for k, v in params if not k.startswith('oauth_')}
+
+
+@dependency.provider('oauth_api')
+class Manager(manager.Manager):
+ """Default pivot point for the OAuth1 backend.
+
+ See :mod:`keystone.common.manager.Manager` for more details on how this
+ dynamically calls the backend.
+
+ """
+
+ driver_namespace = 'keystone.oauth1'
+
+ _ACCESS_TOKEN = "OS-OAUTH1:access_token"
+ _REQUEST_TOKEN = "OS-OAUTH1:request_token"
+ _CONSUMER = "OS-OAUTH1:consumer"
+
+ def __init__(self):
+ super(Manager, self).__init__(CONF.oauth1.driver)
+
+ def create_consumer(self, consumer_ref, initiator=None):
+ consumer_ref = consumer_ref.copy()
+ consumer_ref['secret'] = uuid.uuid4().hex
+ ret = self.driver.create_consumer(consumer_ref)
+ notifications.Audit.created(self._CONSUMER, ret['id'], initiator)
+ return ret
+
+ def update_consumer(self, consumer_id, consumer_ref, initiator=None):
+ ret = self.driver.update_consumer(consumer_id, consumer_ref)
+ notifications.Audit.updated(self._CONSUMER, consumer_id, initiator)
+ return ret
+
+ def delete_consumer(self, consumer_id, initiator=None):
+ ret = self.driver.delete_consumer(consumer_id)
+ notifications.Audit.deleted(self._CONSUMER, consumer_id, initiator)
+ return ret
+
+ def create_access_token(self, request_id, access_token_duration,
+ initiator=None):
+ ret = self.driver.create_access_token(request_id,
+ access_token_duration)
+ notifications.Audit.created(self._ACCESS_TOKEN, ret['id'], initiator)
+ return ret
+
+ def delete_access_token(self, user_id, access_token_id, initiator=None):
+ ret = self.driver.delete_access_token(user_id, access_token_id)
+ notifications.Audit.deleted(self._ACCESS_TOKEN, access_token_id,
+ initiator)
+ return ret
+
+ def create_request_token(self, consumer_id, requested_project,
+ request_token_duration, initiator=None):
+ ret = self.driver.create_request_token(
+ consumer_id, requested_project, request_token_duration)
+ notifications.Audit.created(self._REQUEST_TOKEN, ret['id'],
+ initiator)
+ return ret
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Oauth1DriverV8(object):
+ """Interface description for an OAuth1 driver."""
+
+ @abc.abstractmethod
+ def create_consumer(self, consumer_ref):
+ """Create consumer.
+
+ :param consumer_ref: consumer ref with consumer name
+ :type consumer_ref: dict
+ :returns: consumer_ref
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def update_consumer(self, consumer_id, consumer_ref):
+ """Update consumer.
+
+ :param consumer_id: id of consumer to update
+ :type consumer_id: string
+ :param consumer_ref: new consumer ref with consumer name
+ :type consumer_ref: dict
+ :returns: consumer_ref
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_consumers(self):
+ """List consumers.
+
+ :returns: list of consumers
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_consumer(self, consumer_id):
+ """Get consumer, returns the consumer id (key) and description.
+
+ :param consumer_id: id of consumer to get
+ :type consumer_id: string
+ :returns: consumer_ref
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_consumer_with_secret(self, consumer_id):
+ """Like get_consumer(), but also returns consumer secret.
+
+ Returned dictionary consumer_ref includes consumer secret.
+ Secrets should only be shared upon consumer creation; the
+ consumer secret is required to verify incoming OAuth requests.
+
+ :param consumer_id: id of consumer to get
+ :type consumer_id: string
+ :returns: consumer_ref containing consumer secret
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_consumer(self, consumer_id):
+ """Delete consumer.
+
+ :param consumer_id: id of consumer to get
+ :type consumer_id: string
+ :returns: None.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_access_tokens(self, user_id):
+ """List access tokens.
+
+ :param user_id: search for access tokens authorized by given user id
+ :type user_id: string
+ :returns: list of access tokens the user has authorized
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_access_token(self, user_id, access_token_id):
+ """Delete access token.
+
+ :param user_id: authorizing user id
+ :type user_id: string
+ :param access_token_id: access token to delete
+ :type access_token_id: string
+ :returns: None
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_request_token(self, consumer_id, requested_project,
+ request_token_duration):
+ """Create request token.
+
+ :param consumer_id: the id of the consumer
+ :type consumer_id: string
+ :param requested_project_id: requested project id
+ :type requested_project_id: string
+ :param request_token_duration: duration of request token
+ :type request_token_duration: string
+ :returns: request_token_ref
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_request_token(self, request_token_id):
+ """Get request token.
+
+ :param request_token_id: the id of the request token
+ :type request_token_id: string
+ :returns: request_token_ref
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_access_token(self, access_token_id):
+ """Get access token.
+
+ :param access_token_id: the id of the access token
+ :type access_token_id: string
+ :returns: access_token_ref
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def authorize_request_token(self, request_token_id, user_id, role_ids):
+ """Authorize request token.
+
+ :param request_token_id: the id of the request token, to be authorized
+ :type request_token_id: string
+ :param user_id: the id of the authorizing user
+ :type user_id: string
+ :param role_ids: list of role ids to authorize
+ :type role_ids: list
+ :returns: verifier
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def create_access_token(self, request_id, access_token_duration):
+ """Create access token.
+
+ :param request_id: the id of the request token, to be deleted
+ :type request_id: string
+ :param access_token_duration: duration of an access token
+ :type access_token_duration: string
+ :returns: access_token_ref
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+
+Driver = manager.create_legacy_driver(Oauth1DriverV8)
diff --git a/keystone-moon/keystone/oauth1/routers.py b/keystone-moon/keystone/oauth1/routers.py
new file mode 100644
index 00000000..0575b107
--- /dev/null
+++ b/keystone-moon/keystone/oauth1/routers.py
@@ -0,0 +1,154 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.oauth1 import controllers
+
+
+build_resource_relation = functools.partial(
+ json_home.build_v3_extension_resource_relation,
+ extension_name='OS-OAUTH1', extension_version='1.0')
+
+build_parameter_relation = functools.partial(
+ json_home.build_v3_extension_parameter_relation,
+ extension_name='OS-OAUTH1', extension_version='1.0')
+
+ACCESS_TOKEN_ID_PARAMETER_RELATION = build_parameter_relation(
+ parameter_name='access_token_id')
+
+
+class Routers(wsgi.RoutersBase):
+ """API Endpoints for the OAuth1 extension.
+
+ The goal of this extension is to allow third-party service providers
+ to acquire tokens with a limited subset of a user's roles for acting
+ on behalf of that user. This is done using an oauth-similar flow and
+ api.
+
+ The API looks like::
+
+ # Basic admin-only consumer crud
+ POST /OS-OAUTH1/consumers
+ GET /OS-OAUTH1/consumers
+ PATCH /OS-OAUTH1/consumers/{consumer_id}
+ GET /OS-OAUTH1/consumers/{consumer_id}
+ DELETE /OS-OAUTH1/consumers/{consumer_id}
+
+ # User access token crud
+ GET /users/{user_id}/OS-OAUTH1/access_tokens
+ GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
+ GET /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles
+ GET /users/{user_id}/OS-OAUTH1/access_tokens
+ /{access_token_id}/roles/{role_id}
+ DELETE /users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
+
+ # OAuth interfaces
+ POST /OS-OAUTH1/request_token # create a request token
+ PUT /OS-OAUTH1/authorize # authorize a request token
+ POST /OS-OAUTH1/access_token # create an access token
+
+ """
+
+ def append_v3_routers(self, mapper, routers):
+ consumer_controller = controllers.ConsumerCrudV3()
+ access_token_controller = controllers.AccessTokenCrudV3()
+ access_token_roles_controller = controllers.AccessTokenRolesV3()
+ oauth_controller = controllers.OAuthControllerV3()
+
+ # basic admin-only consumer crud
+ self._add_resource(
+ mapper, consumer_controller,
+ path='/OS-OAUTH1/consumers',
+ get_action='list_consumers',
+ post_action='create_consumer',
+ rel=build_resource_relation(resource_name='consumers'))
+ self._add_resource(
+ mapper, consumer_controller,
+ path='/OS-OAUTH1/consumers/{consumer_id}',
+ get_action='get_consumer',
+ patch_action='update_consumer',
+ delete_action='delete_consumer',
+ rel=build_resource_relation(resource_name='consumer'),
+ path_vars={
+ 'consumer_id':
+ build_parameter_relation(parameter_name='consumer_id'),
+ })
+
+ # user access token crud
+ self._add_resource(
+ mapper, access_token_controller,
+ path='/users/{user_id}/OS-OAUTH1/access_tokens',
+ get_action='list_access_tokens',
+ rel=build_resource_relation(resource_name='user_access_tokens'),
+ path_vars={
+ 'user_id': json_home.Parameters.USER_ID,
+ })
+ self._add_resource(
+ mapper, access_token_controller,
+ path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}',
+ get_action='get_access_token',
+ delete_action='delete_access_token',
+ rel=build_resource_relation(resource_name='user_access_token'),
+ path_vars={
+ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
+ 'user_id': json_home.Parameters.USER_ID,
+ })
+ self._add_resource(
+ mapper, access_token_roles_controller,
+ path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/'
+ 'roles',
+ get_action='list_access_token_roles',
+ rel=build_resource_relation(
+ resource_name='user_access_token_roles'),
+ path_vars={
+ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
+ 'user_id': json_home.Parameters.USER_ID,
+ })
+ self._add_resource(
+ mapper, access_token_roles_controller,
+ path='/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/'
+ 'roles/{role_id}',
+ get_action='get_access_token_role',
+ rel=build_resource_relation(
+ resource_name='user_access_token_role'),
+ path_vars={
+ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
+ 'role_id': json_home.Parameters.ROLE_ID,
+ 'user_id': json_home.Parameters.USER_ID,
+ })
+
+ # oauth flow calls
+ self._add_resource(
+ mapper, oauth_controller,
+ path='/OS-OAUTH1/request_token',
+ post_action='create_request_token',
+ rel=build_resource_relation(resource_name='request_tokens'))
+ self._add_resource(
+ mapper, oauth_controller,
+ path='/OS-OAUTH1/access_token',
+ post_action='create_access_token',
+ rel=build_resource_relation(resource_name='access_tokens'))
+ self._add_resource(
+ mapper, oauth_controller,
+ path='/OS-OAUTH1/authorize/{request_token_id}',
+ path_vars={
+ 'request_token_id':
+ build_parameter_relation(parameter_name='request_token_id')
+ },
+ put_action='authorize_request_token',
+ rel=build_resource_relation(
+ resource_name='authorize_request_token'))
diff --git a/keystone-moon/keystone/oauth1/schema.py b/keystone-moon/keystone/oauth1/schema.py
new file mode 100644
index 00000000..51c11afe
--- /dev/null
+++ b/keystone-moon/keystone/oauth1/schema.py
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+
+_consumer_properties = {
+ 'description': validation.nullable(parameter_types.description)
+}
+
+consumer_create = {
+ 'type': 'object',
+ 'properties': _consumer_properties,
+ 'additionalProperties': True
+}
+
+consumer_update = {
+ 'type': 'object',
+ 'properties': _consumer_properties,
+ 'not': {
+ 'required': ['secret']
+ },
+ 'minProperties': 1,
+ 'additionalProperties': True
+}
diff --git a/keystone-moon/keystone/oauth1/validator.py b/keystone-moon/keystone/oauth1/validator.py
new file mode 100644
index 00000000..f21a02d7
--- /dev/null
+++ b/keystone-moon/keystone/oauth1/validator.py
@@ -0,0 +1,177 @@
+# Copyright 2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oAuthlib request validator."""
+
+import six
+
+from keystone.common import dependency
+from keystone import exception
+from keystone.oauth1 import core as oauth1
+
+
+METHOD_NAME = 'oauth_validator'
+
+
+@dependency.requires('oauth_api')
+class OAuthValidator(oauth1.RequestValidator):
+
+ # TODO(mhu) set as option probably?
+ @property
+ def enforce_ssl(self):
+ return False
+
+ @property
+ def safe_characters(self):
+ # oauth tokens are generated from a uuid hex value
+ return set("abcdef0123456789")
+
+ def _check_token(self, token):
+ # generic token verification when they're obtained from a uuid hex
+ return (set(token) <= self.safe_characters and
+ len(token) == 32)
+
+ def check_client_key(self, client_key):
+ return self._check_token(client_key)
+
+ def check_request_token(self, request_token):
+ return self._check_token(request_token)
+
+ def check_access_token(self, access_token):
+ return self._check_token(access_token)
+
+ def check_nonce(self, nonce):
+ # Assuming length is not a concern
+ return set(nonce) <= self.safe_characters
+
+ def check_verifier(self, verifier):
+ return (all(i in oauth1.VERIFIER_CHARS for i in verifier) and
+ len(verifier) == 8)
+
+ def get_client_secret(self, client_key, request):
+ client = self.oauth_api.get_consumer_with_secret(client_key)
+ return client['secret']
+
+ def get_request_token_secret(self, client_key, token, request):
+ token_ref = self.oauth_api.get_request_token(token)
+ return token_ref['request_secret']
+
+ def get_access_token_secret(self, client_key, token, request):
+ access_token = self.oauth_api.get_access_token(token)
+ return access_token['access_secret']
+
+ def get_default_realms(self, client_key, request):
+ # realms weren't implemented with the previous library
+ return []
+
+ def get_realms(self, token, request):
+ return []
+
+ def get_redirect_uri(self, token, request):
+ # OOB (out of band) is supposed to be the default value to use
+ return 'oob'
+
+ def get_rsa_key(self, client_key, request):
+ # HMAC signing is used, so return a dummy value
+ return ''
+
+ def invalidate_request_token(self, client_key, request_token, request):
+ # this method is invoked when an access token is generated out of a
+ # request token, to make sure that request token cannot be consumed
+ # anymore. This is done in the backend, so we do nothing here.
+ pass
+
+ def validate_client_key(self, client_key, request):
+ try:
+ return self.oauth_api.get_consumer(client_key) is not None
+ except exception.NotFound:
+ return False
+
+ def validate_request_token(self, client_key, token, request):
+ try:
+ return self.oauth_api.get_request_token(token) is not None
+ except exception.NotFound:
+ return False
+
+ def validate_access_token(self, client_key, token, request):
+ try:
+ return self.oauth_api.get_access_token(token) is not None
+ except exception.NotFound:
+ return False
+
+ def validate_timestamp_and_nonce(self,
+ client_key,
+ timestamp,
+ nonce,
+ request,
+ request_token=None,
+ access_token=None):
+ return True
+
+ def validate_redirect_uri(self, client_key, redirect_uri, request):
+ # we expect OOB, we don't really care
+ return True
+
+ def validate_requested_realms(self, client_key, realms, request):
+ # realms are not used
+ return True
+
+ def validate_realms(self,
+ client_key,
+ token,
+ request,
+ uri=None,
+ realms=None):
+ return True
+
+ def validate_verifier(self, client_key, token, verifier, request):
+ try:
+ req_token = self.oauth_api.get_request_token(token)
+ return req_token['verifier'] == verifier
+ except exception.NotFound:
+ return False
+
+ def verify_request_token(self, token, request):
+ # there aren't strong expectations on the request token format
+ return isinstance(token, six.string_types)
+
+ def verify_realms(self, token, realms, request):
+ return True
+
+ # The following save_XXX methods are called to create tokens. I chose to
+ # keep the original logic, but the comments below show how that could be
+ # implemented. The real implementation logic is in the backend.
+ def save_access_token(self, token, request):
+ pass
+# token_duration = CONF.oauth1.request_token_duration
+# request_token_id = request.client_key
+# self.oauth_api.create_access_token(request_token_id,
+# token_duration,
+# token["oauth_token"],
+# token["oauth_token_secret"])
+
+ def save_request_token(self, token, request):
+ pass
+# project_id = request.headers.get('Requested-Project-Id')
+# token_duration = CONF.oauth1.request_token_duration
+# self.oauth_api.create_request_token(request.client_key,
+# project_id,
+# token_duration,
+# token["oauth_token"],
+# token["oauth_token_secret"])
+
+ def save_verifier(self, token, verifier, request):
+ # keep the old logic for this, as it is done in two steps and requires
+ # information that the request validator has no access to
+ pass
diff --git a/keystone-moon/keystone/policy/__init__.py b/keystone-moon/keystone/policy/__init__.py
index 4cd96793..a95aac1f 100644
--- a/keystone-moon/keystone/policy/__init__.py
+++ b/keystone-moon/keystone/policy/__init__.py
@@ -14,4 +14,3 @@
from keystone.policy import controllers # noqa
from keystone.policy.core import * # noqa
-from keystone.policy import routers # noqa
diff --git a/keystone-moon/keystone/policy/backends/rules.py b/keystone-moon/keystone/policy/backends/rules.py
index a4150575..5a13287d 100644
--- a/keystone-moon/keystone/policy/backends/rules.py
+++ b/keystone-moon/keystone/policy/backends/rules.py
@@ -44,18 +44,18 @@ def init():
def enforce(credentials, action, target, do_raise=True):
"""Verifies that the action is valid on the target in this context.
- :param credentials: user credentials
- :param action: string representing the action to be checked, which
- should be colon separated for clarity.
- :param target: dictionary representing the object of the action
- for object creation this should be a dictionary
- representing the location of the object e.g.
- {'project_id': object.project_id}
- :raises: `exception.Forbidden` if verification fails.
-
- Actions should be colon separated for clarity. For example:
-
- * identity:list_users
+ :param credentials: user credentials
+ :param action: string representing the action to be checked, which should
+ be colon separated for clarity.
+ :param target: dictionary representing the object of the action for object
+ creation this should be a dictionary representing the
+ location of the object e.g. {'project_id':
+ object.project_id}
+ :raises keystone.exception.Forbidden: If verification fails.
+
+ Actions should be colon separated for clarity. For example:
+
+ * identity:list_users
"""
init()
diff --git a/keystone-moon/keystone/policy/backends/sql.py b/keystone-moon/keystone/policy/backends/sql.py
index b2cccd01..94763f0d 100644
--- a/keystone-moon/keystone/policy/backends/sql.py
+++ b/keystone-moon/keystone/policy/backends/sql.py
@@ -30,19 +30,16 @@ class Policy(rules.Policy):
@sql.handle_conflicts(conflict_type='policy')
def create_policy(self, policy_id, policy):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = PolicyModel.from_dict(policy)
session.add(ref)
- return ref.to_dict()
+ return ref.to_dict()
def list_policies(self):
- session = sql.get_session()
-
- refs = session.query(PolicyModel).all()
- return [ref.to_dict() for ref in refs]
+ with sql.session_for_read() as session:
+ refs = session.query(PolicyModel).all()
+ return [ref.to_dict() for ref in refs]
def _get_policy(self, session, policy_id):
"""Private method to get a policy model object (NOT a dictionary)."""
@@ -52,15 +49,12 @@ class Policy(rules.Policy):
return ref
def get_policy(self, policy_id):
- session = sql.get_session()
-
- return self._get_policy(session, policy_id).to_dict()
+ with sql.session_for_read() as session:
+ return self._get_policy(session, policy_id).to_dict()
@sql.handle_conflicts(conflict_type='policy')
def update_policy(self, policy_id, policy):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_policy(session, policy_id)
old_dict = ref.to_dict()
old_dict.update(policy)
@@ -72,8 +66,6 @@ class Policy(rules.Policy):
return ref.to_dict()
def delete_policy(self, policy_id):
- session = sql.get_session()
-
- with session.begin():
+ with sql.session_for_write() as session:
ref = self._get_policy(session, policy_id)
session.delete(ref)
diff --git a/keystone-moon/keystone/policy/core.py b/keystone-moon/keystone/policy/core.py
index dfd6ff2d..f52795a5 100644
--- a/keystone-moon/keystone/policy/core.py
+++ b/keystone-moon/keystone/policy/core.py
@@ -100,7 +100,7 @@ class PolicyDriverV8(object):
def create_policy(self, policy_id, policy):
"""Store a policy blob.
- :raises: keystone.exception.Conflict
+ :raises keystone.exception.Conflict: If a duplicate policy exists.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -114,7 +114,7 @@ class PolicyDriverV8(object):
def get_policy(self, policy_id):
"""Retrieve a specific policy blob.
- :raises: keystone.exception.PolicyNotFound
+ :raises keystone.exception.PolicyNotFound: If the policy doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -123,7 +123,7 @@ class PolicyDriverV8(object):
def update_policy(self, policy_id, policy):
"""Update a policy blob.
- :raises: keystone.exception.PolicyNotFound
+ :raises keystone.exception.PolicyNotFound: If the policy doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -132,7 +132,7 @@ class PolicyDriverV8(object):
def delete_policy(self, policy_id):
"""Remove a policy blob.
- :raises: keystone.exception.PolicyNotFound
+ :raises keystone.exception.PolicyNotFound: If the policy doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/resource/V8_backends/__init__.py b/keystone-moon/keystone/resource/V8_backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/resource/V8_backends/__init__.py
diff --git a/keystone-moon/keystone/resource/V8_backends/sql.py b/keystone-moon/keystone/resource/V8_backends/sql.py
new file mode 100644
index 00000000..6c9b7912
--- /dev/null
+++ b/keystone-moon/keystone/resource/V8_backends/sql.py
@@ -0,0 +1,260 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+
+from keystone.common import clean
+from keystone.common import driver_hints
+from keystone.common import sql
+from keystone import exception
+from keystone.i18n import _LE
+from keystone import resource as keystone_resource
+
+
+LOG = log.getLogger(__name__)
+
+
+class Resource(keystone_resource.ResourceDriverV8):
+
+ def default_assignment_driver(self):
+ return 'sql'
+
+ def _get_project(self, session, project_id):
+ project_ref = session.query(Project).get(project_id)
+ if project_ref is None:
+ raise exception.ProjectNotFound(project_id=project_id)
+ return project_ref
+
+ def get_project(self, tenant_id):
+ with sql.session_for_read() as session:
+ return self._get_project(session, tenant_id).to_dict()
+
+ def get_project_by_name(self, tenant_name, domain_id):
+ with sql.session_for_read() as session:
+ query = session.query(Project)
+ query = query.filter_by(name=tenant_name)
+ query = query.filter_by(domain_id=domain_id)
+ try:
+ project_ref = query.one()
+ except sql.NotFound:
+ raise exception.ProjectNotFound(project_id=tenant_name)
+ return project_ref.to_dict()
+
+ @driver_hints.truncated
+ def list_projects(self, hints):
+ with sql.session_for_read() as session:
+ query = session.query(Project)
+ project_refs = sql.filter_limit_query(Project, query, hints)
+ return [project_ref.to_dict() for project_ref in project_refs]
+
+ def list_projects_from_ids(self, ids):
+ if not ids:
+ return []
+ else:
+ with sql.session_for_read() as session:
+ query = session.query(Project)
+ query = query.filter(Project.id.in_(ids))
+ return [project_ref.to_dict() for project_ref in query.all()]
+
+ def list_project_ids_from_domain_ids(self, domain_ids):
+ if not domain_ids:
+ return []
+ else:
+ with sql.session_for_read() as session:
+ query = session.query(Project.id)
+ query = (
+ query.filter(Project.domain_id.in_(domain_ids)))
+ return [x.id for x in query.all()]
+
+ def list_projects_in_domain(self, domain_id):
+ with sql.session_for_read() as session:
+ self._get_domain(session, domain_id)
+ query = session.query(Project)
+ project_refs = query.filter_by(domain_id=domain_id)
+ return [project_ref.to_dict() for project_ref in project_refs]
+
+ def _get_children(self, session, project_ids):
+ query = session.query(Project)
+ query = query.filter(Project.parent_id.in_(project_ids))
+ project_refs = query.all()
+ return [project_ref.to_dict() for project_ref in project_refs]
+
+ def list_projects_in_subtree(self, project_id):
+ with sql.session_for_read() as session:
+ children = self._get_children(session, [project_id])
+ subtree = []
+ examined = set([project_id])
+ while children:
+ children_ids = set()
+ for ref in children:
+ if ref['id'] in examined:
+ msg = _LE('Circular reference or a repeated '
+ 'entry found in projects hierarchy - '
+ '%(project_id)s.')
+ LOG.error(msg, {'project_id': ref['id']})
+ return
+ children_ids.add(ref['id'])
+
+ examined.update(children_ids)
+ subtree += children
+ children = self._get_children(session, children_ids)
+ return subtree
+
+ def list_project_parents(self, project_id):
+ with sql.session_for_read() as session:
+ project = self._get_project(session, project_id).to_dict()
+ parents = []
+ examined = set()
+ while project.get('parent_id') is not None:
+ if project['id'] in examined:
+ msg = _LE('Circular reference or a repeated '
+ 'entry found in projects hierarchy - '
+ '%(project_id)s.')
+ LOG.error(msg, {'project_id': project['id']})
+ return
+
+ examined.add(project['id'])
+ parent_project = self._get_project(
+ session, project['parent_id']).to_dict()
+ parents.append(parent_project)
+ project = parent_project
+ return parents
+
+ def is_leaf_project(self, project_id):
+ with sql.session_for_read() as session:
+ project_refs = self._get_children(session, [project_id])
+ return not project_refs
+
+ # CRUD
+ @sql.handle_conflicts(conflict_type='project')
+ def create_project(self, tenant_id, tenant):
+ tenant['name'] = clean.project_name(tenant['name'])
+ with sql.session_for_write() as session:
+ tenant_ref = Project.from_dict(tenant)
+ session.add(tenant_ref)
+ return tenant_ref.to_dict()
+
+ @sql.handle_conflicts(conflict_type='project')
+ def update_project(self, tenant_id, tenant):
+ if 'name' in tenant:
+ tenant['name'] = clean.project_name(tenant['name'])
+
+ with sql.session_for_write() as session:
+ tenant_ref = self._get_project(session, tenant_id)
+ old_project_dict = tenant_ref.to_dict()
+ for k in tenant:
+ old_project_dict[k] = tenant[k]
+ new_project = Project.from_dict(old_project_dict)
+ for attr in Project.attributes:
+ if attr != 'id':
+ setattr(tenant_ref, attr, getattr(new_project, attr))
+ tenant_ref.extra = new_project.extra
+ return tenant_ref.to_dict(include_extra_dict=True)
+
+ @sql.handle_conflicts(conflict_type='project')
+ def delete_project(self, tenant_id):
+ with sql.session_for_write() as session:
+ tenant_ref = self._get_project(session, tenant_id)
+ session.delete(tenant_ref)
+
+ # domain crud
+
+ @sql.handle_conflicts(conflict_type='domain')
+ def create_domain(self, domain_id, domain):
+ with sql.session_for_write() as session:
+ ref = Domain.from_dict(domain)
+ session.add(ref)
+ return ref.to_dict()
+
+ @driver_hints.truncated
+ def list_domains(self, hints):
+ with sql.session_for_read() as session:
+ query = session.query(Domain)
+ refs = sql.filter_limit_query(Domain, query, hints)
+ return [ref.to_dict() for ref in refs]
+
+ def list_domains_from_ids(self, ids):
+ if not ids:
+ return []
+ else:
+ with sql.session_for_read() as session:
+ query = session.query(Domain)
+ query = query.filter(Domain.id.in_(ids))
+ domain_refs = query.all()
+ return [domain_ref.to_dict() for domain_ref in domain_refs]
+
+ def _get_domain(self, session, domain_id):
+ ref = session.query(Domain).get(domain_id)
+ if ref is None:
+ raise exception.DomainNotFound(domain_id=domain_id)
+ return ref
+
+ def get_domain(self, domain_id):
+ with sql.session_for_read() as session:
+ return self._get_domain(session, domain_id).to_dict()
+
+ def get_domain_by_name(self, domain_name):
+ with sql.session_for_read() as session:
+ try:
+ ref = (session.query(Domain).
+ filter_by(name=domain_name).one())
+ except sql.NotFound:
+ raise exception.DomainNotFound(domain_id=domain_name)
+ return ref.to_dict()
+
+ @sql.handle_conflicts(conflict_type='domain')
+ def update_domain(self, domain_id, domain):
+ with sql.session_for_write() as session:
+ ref = self._get_domain(session, domain_id)
+ old_dict = ref.to_dict()
+ for k in domain:
+ old_dict[k] = domain[k]
+ new_domain = Domain.from_dict(old_dict)
+ for attr in Domain.attributes:
+ if attr != 'id':
+ setattr(ref, attr, getattr(new_domain, attr))
+ ref.extra = new_domain.extra
+ return ref.to_dict()
+
+ def delete_domain(self, domain_id):
+ with sql.session_for_write() as session:
+ ref = self._get_domain(session, domain_id)
+ session.delete(ref)
+
+
+class Domain(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'domain'
+ attributes = ['id', 'name', 'enabled']
+ id = sql.Column(sql.String(64), primary_key=True)
+ name = sql.Column(sql.String(64), nullable=False)
+ enabled = sql.Column(sql.Boolean, default=True, nullable=False)
+ extra = sql.Column(sql.JsonBlob())
+ __table_args__ = (sql.UniqueConstraint('name'),)
+
+
+class Project(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'project'
+ attributes = ['id', 'name', 'domain_id', 'description', 'enabled',
+ 'parent_id', 'is_domain']
+ id = sql.Column(sql.String(64), primary_key=True)
+ name = sql.Column(sql.String(64), nullable=False)
+ domain_id = sql.Column(sql.String(64), sql.ForeignKey('domain.id'),
+ nullable=False)
+ description = sql.Column(sql.Text())
+ enabled = sql.Column(sql.Boolean)
+ extra = sql.Column(sql.JsonBlob())
+ parent_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'))
+ is_domain = sql.Column(sql.Boolean, default=False, nullable=False,
+ server_default='0')
+ # Unique constraint across two columns to create the separation
+ # rather than just only 'name' being unique
+ __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),)
diff --git a/keystone-moon/keystone/resource/__init__.py b/keystone-moon/keystone/resource/__init__.py
index c0070a12..7f879f4b 100644
--- a/keystone-moon/keystone/resource/__init__.py
+++ b/keystone-moon/keystone/resource/__init__.py
@@ -12,4 +12,3 @@
from keystone.resource import controllers # noqa
from keystone.resource.core import * # noqa
-from keystone.resource import routers # noqa
diff --git a/keystone-moon/keystone/resource/backends/sql.py b/keystone-moon/keystone/resource/backends/sql.py
index 59bab372..39bb4f3b 100644
--- a/keystone-moon/keystone/resource/backends/sql.py
+++ b/keystone-moon/keystone/resource/backends/sql.py
@@ -10,87 +10,123 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
from oslo_log import log
from keystone.common import clean
+from keystone.common import driver_hints
from keystone.common import sql
from keystone import exception
-from keystone.i18n import _LE
+from keystone.i18n import _LE, _LW
from keystone import resource as keystone_resource
-CONF = cfg.CONF
LOG = log.getLogger(__name__)
-class Resource(keystone_resource.ResourceDriverV8):
+class Resource(keystone_resource.ResourceDriverV9):
def default_assignment_driver(self):
return 'sql'
+ def _encode_domain_id(self, ref):
+ if 'domain_id' in ref and ref['domain_id'] is None:
+ new_ref = ref.copy()
+ new_ref['domain_id'] = keystone_resource.NULL_DOMAIN_ID
+ return new_ref
+ else:
+ return ref
+
+ def _is_hidden_ref(self, ref):
+ return ref.id == keystone_resource.NULL_DOMAIN_ID
+
def _get_project(self, session, project_id):
project_ref = session.query(Project).get(project_id)
- if project_ref is None:
+ if project_ref is None or self._is_hidden_ref(project_ref):
raise exception.ProjectNotFound(project_id=project_id)
return project_ref
- def get_project(self, tenant_id):
- with sql.transaction() as session:
- return self._get_project(session, tenant_id).to_dict()
+ def get_project(self, project_id):
+ with sql.session_for_read() as session:
+ return self._get_project(session, project_id).to_dict()
- def get_project_by_name(self, tenant_name, domain_id):
- with sql.transaction() as session:
+ def get_project_by_name(self, project_name, domain_id):
+ with sql.session_for_read() as session:
query = session.query(Project)
- query = query.filter_by(name=tenant_name)
- query = query.filter_by(domain_id=domain_id)
+ query = query.filter_by(name=project_name)
+ if domain_id is None:
+ query = query.filter_by(
+ domain_id=keystone_resource.NULL_DOMAIN_ID)
+ else:
+ query = query.filter_by(domain_id=domain_id)
try:
project_ref = query.one()
except sql.NotFound:
- raise exception.ProjectNotFound(project_id=tenant_name)
+ raise exception.ProjectNotFound(project_id=project_name)
+
+ if self._is_hidden_ref(project_ref):
+ raise exception.ProjectNotFound(project_id=project_name)
return project_ref.to_dict()
- @sql.truncated
+ @driver_hints.truncated
def list_projects(self, hints):
- with sql.transaction() as session:
+ # If there is a filter on domain_id and the value is None, then to
+ # ensure that the sql filtering works correctly, we need to patch
+ # the value to be NULL_DOMAIN_ID. This is safe to do here since we
+ # know we are able to satisfy any filter of this type in the call to
+ # filter_limit_query() below, which will remove the filter from the
+ # hints (hence ensuring our substitution is not exposed to the caller).
+ for f in hints.filters:
+ if (f['name'] == 'domain_id' and f['value'] is None):
+ f['value'] = keystone_resource.NULL_DOMAIN_ID
+ with sql.session_for_read() as session:
query = session.query(Project)
project_refs = sql.filter_limit_query(Project, query, hints)
- return [project_ref.to_dict() for project_ref in project_refs]
+ return [project_ref.to_dict() for project_ref in project_refs
+ if not self._is_hidden_ref(project_ref)]
def list_projects_from_ids(self, ids):
if not ids:
return []
else:
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(Project)
query = query.filter(Project.id.in_(ids))
- return [project_ref.to_dict() for project_ref in query.all()]
+ return [project_ref.to_dict() for project_ref in query.all()
+ if not self._is_hidden_ref(project_ref)]
def list_project_ids_from_domain_ids(self, domain_ids):
if not domain_ids:
return []
else:
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
query = session.query(Project.id)
query = (
query.filter(Project.domain_id.in_(domain_ids)))
- return [x.id for x in query.all()]
+ return [x.id for x in query.all()
+ if not self._is_hidden_ref(x)]
def list_projects_in_domain(self, domain_id):
- with sql.transaction() as session:
- self._get_domain(session, domain_id)
+ with sql.session_for_read() as session:
+ try:
+ self._get_project(session, domain_id)
+ except exception.ProjectNotFound:
+ raise exception.DomainNotFound(domain_id=domain_id)
query = session.query(Project)
- project_refs = query.filter_by(domain_id=domain_id)
+ project_refs = query.filter(Project.domain_id == domain_id)
return [project_ref.to_dict() for project_ref in project_refs]
- def _get_children(self, session, project_ids):
+ def list_projects_acting_as_domain(self, hints):
+ hints.add_filter('is_domain', True)
+ return self.list_projects(hints)
+
+ def _get_children(self, session, project_ids, domain_id=None):
query = session.query(Project)
query = query.filter(Project.parent_id.in_(project_ids))
project_refs = query.all()
return [project_ref.to_dict() for project_ref in project_refs]
def list_projects_in_subtree(self, project_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
children = self._get_children(session, [project_id])
subtree = []
examined = set([project_id])
@@ -111,7 +147,7 @@ class Resource(keystone_resource.ResourceDriverV8):
return subtree
def list_project_parents(self, project_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
project = self._get_project(session, project_id).to_dict()
parents = []
examined = set()
@@ -131,105 +167,61 @@ class Resource(keystone_resource.ResourceDriverV8):
return parents
def is_leaf_project(self, project_id):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
project_refs = self._get_children(session, [project_id])
return not project_refs
# CRUD
@sql.handle_conflicts(conflict_type='project')
- def create_project(self, tenant_id, tenant):
- tenant['name'] = clean.project_name(tenant['name'])
- with sql.transaction() as session:
- tenant_ref = Project.from_dict(tenant)
- session.add(tenant_ref)
- return tenant_ref.to_dict()
+ def create_project(self, project_id, project):
+ project['name'] = clean.project_name(project['name'])
+ new_project = self._encode_domain_id(project)
+ with sql.session_for_write() as session:
+ project_ref = Project.from_dict(new_project)
+ session.add(project_ref)
+ return project_ref.to_dict()
@sql.handle_conflicts(conflict_type='project')
- def update_project(self, tenant_id, tenant):
- if 'name' in tenant:
- tenant['name'] = clean.project_name(tenant['name'])
-
- with sql.transaction() as session:
- tenant_ref = self._get_project(session, tenant_id)
- old_project_dict = tenant_ref.to_dict()
- for k in tenant:
- old_project_dict[k] = tenant[k]
+ def update_project(self, project_id, project):
+ if 'name' in project:
+ project['name'] = clean.project_name(project['name'])
+
+ update_project = self._encode_domain_id(project)
+ with sql.session_for_write() as session:
+ project_ref = self._get_project(session, project_id)
+ old_project_dict = project_ref.to_dict()
+ for k in update_project:
+ old_project_dict[k] = update_project[k]
+ # When we read the old_project_dict, any "null" domain_id will have
+ # been decoded, so we need to re-encode it
+ old_project_dict = self._encode_domain_id(old_project_dict)
new_project = Project.from_dict(old_project_dict)
for attr in Project.attributes:
if attr != 'id':
- setattr(tenant_ref, attr, getattr(new_project, attr))
- tenant_ref.extra = new_project.extra
- return tenant_ref.to_dict(include_extra_dict=True)
+ setattr(project_ref, attr, getattr(new_project, attr))
+ project_ref.extra = new_project.extra
+ return project_ref.to_dict(include_extra_dict=True)
@sql.handle_conflicts(conflict_type='project')
- def delete_project(self, tenant_id):
- with sql.transaction() as session:
- tenant_ref = self._get_project(session, tenant_id)
- session.delete(tenant_ref)
-
- # domain crud
-
- @sql.handle_conflicts(conflict_type='domain')
- def create_domain(self, domain_id, domain):
- with sql.transaction() as session:
- ref = Domain.from_dict(domain)
- session.add(ref)
- return ref.to_dict()
-
- @sql.truncated
- def list_domains(self, hints):
- with sql.transaction() as session:
- query = session.query(Domain)
- refs = sql.filter_limit_query(Domain, query, hints)
- return [ref.to_dict() for ref in refs]
-
- def list_domains_from_ids(self, ids):
- if not ids:
- return []
- else:
- with sql.transaction() as session:
- query = session.query(Domain)
- query = query.filter(Domain.id.in_(ids))
- domain_refs = query.all()
- return [domain_ref.to_dict() for domain_ref in domain_refs]
-
- def _get_domain(self, session, domain_id):
- ref = session.query(Domain).get(domain_id)
- if ref is None:
- raise exception.DomainNotFound(domain_id=domain_id)
- return ref
-
- def get_domain(self, domain_id):
- with sql.transaction() as session:
- return self._get_domain(session, domain_id).to_dict()
-
- def get_domain_by_name(self, domain_name):
- with sql.transaction() as session:
- try:
- ref = (session.query(Domain).
- filter_by(name=domain_name).one())
- except sql.NotFound:
- raise exception.DomainNotFound(domain_id=domain_name)
- return ref.to_dict()
-
- @sql.handle_conflicts(conflict_type='domain')
- def update_domain(self, domain_id, domain):
- with sql.transaction() as session:
- ref = self._get_domain(session, domain_id)
- old_dict = ref.to_dict()
- for k in domain:
- old_dict[k] = domain[k]
- new_domain = Domain.from_dict(old_dict)
- for attr in Domain.attributes:
- if attr != 'id':
- setattr(ref, attr, getattr(new_domain, attr))
- ref.extra = new_domain.extra
- return ref.to_dict()
+ def delete_project(self, project_id):
+ with sql.session_for_write() as session:
+ project_ref = self._get_project(session, project_id)
+ session.delete(project_ref)
- def delete_domain(self, domain_id):
- with sql.transaction() as session:
- ref = self._get_domain(session, domain_id)
- session.delete(ref)
+ @sql.handle_conflicts(conflict_type='project')
+ def delete_projects_from_ids(self, project_ids):
+ if not project_ids:
+ return
+ with sql.session_for_write() as session:
+ query = session.query(Project).filter(Project.id.in_(
+ project_ids))
+ project_ids_from_bd = [p['id'] for p in query.all()]
+ for project_id in project_ids:
+ if (project_id not in project_ids_from_bd or
+ project_id == keystone_resource.NULL_DOMAIN_ID):
+ LOG.warning(_LW('Project %s does not exist and was not '
+ 'deleted.') % project_id)
+ query.delete(synchronize_session=False)
class Domain(sql.ModelBase, sql.DictBase):
@@ -239,22 +231,37 @@ class Domain(sql.ModelBase, sql.DictBase):
name = sql.Column(sql.String(64), nullable=False)
enabled = sql.Column(sql.Boolean, default=True, nullable=False)
extra = sql.Column(sql.JsonBlob())
- __table_args__ = (sql.UniqueConstraint('name'), {})
+ __table_args__ = (sql.UniqueConstraint('name'),)
class Project(sql.ModelBase, sql.DictBase):
+ # NOTE(henry-nash): From the manager and above perspective, the domain_id
+ # is nullable. However, to ensure uniqueness in multi-process
+ # configurations, it is better to still use the sql uniqueness constraint.
+ # Since the support for a nullable component of a uniqueness constraint
+ # across different sql databases is mixed, we instead store a special value
+ # to represent null, as defined in NULL_DOMAIN_ID above.
+
+ def to_dict(self, include_extra_dict=False):
+ d = super(Project, self).to_dict(
+ include_extra_dict=include_extra_dict)
+ if d['domain_id'] == keystone_resource.NULL_DOMAIN_ID:
+ d['domain_id'] = None
+ return d
+
__tablename__ = 'project'
attributes = ['id', 'name', 'domain_id', 'description', 'enabled',
'parent_id', 'is_domain']
id = sql.Column(sql.String(64), primary_key=True)
name = sql.Column(sql.String(64), nullable=False)
- domain_id = sql.Column(sql.String(64), sql.ForeignKey('domain.id'),
+ domain_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'),
nullable=False)
description = sql.Column(sql.Text())
enabled = sql.Column(sql.Boolean)
extra = sql.Column(sql.JsonBlob())
parent_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'))
- is_domain = sql.Column(sql.Boolean, default=False, nullable=False)
+ is_domain = sql.Column(sql.Boolean, default=False, nullable=False,
+ server_default='0')
# Unique constraint across two columns to create the separation
# rather than just only 'name' being unique
- __table_args__ = (sql.UniqueConstraint('domain_id', 'name'), {})
+ __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),)
diff --git a/keystone-moon/keystone/resource/config_backends/sql.py b/keystone-moon/keystone/resource/config_backends/sql.py
index 7c296074..6413becc 100644
--- a/keystone-moon/keystone/resource/config_backends/sql.py
+++ b/keystone-moon/keystone/resource/config_backends/sql.py
@@ -59,12 +59,12 @@ class DomainConfig(resource.DomainConfigDriverV8):
@sql.handle_conflicts(conflict_type='domain_config')
def create_config_option(self, domain_id, group, option, value,
sensitive=False):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
config_table = self.choose_table(sensitive)
ref = config_table(domain_id=domain_id, group=group,
option=option, value=value)
session.add(ref)
- return ref.to_dict()
+ return ref.to_dict()
def _get_config_option(self, session, domain_id, group, option, sensitive):
try:
@@ -80,14 +80,14 @@ class DomainConfig(resource.DomainConfigDriverV8):
return ref
def get_config_option(self, domain_id, group, option, sensitive=False):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
ref = self._get_config_option(session, domain_id, group, option,
sensitive)
- return ref.to_dict()
+ return ref.to_dict()
def list_config_options(self, domain_id, group=None, option=None,
sensitive=False):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
config_table = self.choose_table(sensitive)
query = session.query(config_table)
query = query.filter_by(domain_id=domain_id)
@@ -99,11 +99,11 @@ class DomainConfig(resource.DomainConfigDriverV8):
def update_config_option(self, domain_id, group, option, value,
sensitive=False):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
ref = self._get_config_option(session, domain_id, group, option,
sensitive)
ref.value = value
- return ref.to_dict()
+ return ref.to_dict()
def delete_config_options(self, domain_id, group=None, option=None,
sensitive=False):
@@ -114,7 +114,7 @@ class DomainConfig(resource.DomainConfigDriverV8):
if there was nothing to delete.
"""
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
config_table = self.choose_table(sensitive)
query = session.query(config_table)
query = query.filter_by(domain_id=domain_id)
@@ -126,25 +126,25 @@ class DomainConfig(resource.DomainConfigDriverV8):
def obtain_registration(self, domain_id, type):
try:
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
ref = ConfigRegister(type=type, domain_id=domain_id)
session.add(ref)
return True
- except sql.DBDuplicateEntry:
+ except sql.DBDuplicateEntry: # nosec
+ # Continue on and return False to indicate failure.
pass
return False
def read_registration(self, type):
- with sql.transaction() as session:
+ with sql.session_for_read() as session:
ref = session.query(ConfigRegister).get(type)
if not ref:
raise exception.ConfigRegistrationNotFound()
- return ref.domain_id
+ return ref.domain_id
def release_registration(self, domain_id, type=None):
"""Silently delete anything registered for the domain specified."""
-
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
query = session.query(ConfigRegister)
if type:
query = query.filter_by(type=type)
diff --git a/keystone-moon/keystone/resource/controllers.py b/keystone-moon/keystone/resource/controllers.py
index 4fbeb715..5cabe064 100644
--- a/keystone-moon/keystone/resource/controllers.py
+++ b/keystone-moon/keystone/resource/controllers.py
@@ -18,7 +18,6 @@
import uuid
from oslo_config import cfg
-from oslo_log import log
from keystone.common import controller
from keystone.common import dependency
@@ -31,7 +30,6 @@ from keystone.resource import schema
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
@dependency.requires('resource_api')
@@ -40,13 +38,18 @@ class Tenant(controller.V2Controller):
@controller.v2_deprecated
def get_all_projects(self, context, **kw):
"""Gets a list of all tenants for an admin user."""
+ self.assert_admin(context)
+
if 'name' in context['query_string']:
- return self.get_project_by_name(
- context, context['query_string'].get('name'))
+ return self._get_project_by_name(context['query_string']['name'])
- self.assert_admin(context)
- tenant_refs = self.resource_api.list_projects_in_domain(
- CONF.identity.default_domain_id)
+ try:
+ tenant_refs = self.resource_api.list_projects_in_domain(
+ CONF.identity.default_domain_id)
+ except exception.DomainNotFound:
+ # If the default domain doesn't exist then there are no V2
+ # projects.
+ tenant_refs = []
tenant_refs = [self.v3_to_v2_project(tenant_ref)
for tenant_ref in tenant_refs
if not tenant_ref.get('is_domain')]
@@ -71,12 +74,11 @@ class Tenant(controller.V2Controller):
self._assert_not_is_domain_project(tenant_id, ref)
return {'tenant': self.v3_to_v2_project(ref)}
- @controller.v2_deprecated
- def get_project_by_name(self, context, tenant_name):
- self.assert_admin(context)
+ def _get_project_by_name(self, tenant_name):
# Projects acting as a domain should not be visible via v2
ref = self.resource_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
+ self._assert_not_is_domain_project(ref['id'], ref)
return {'tenant': self.v3_to_v2_project(ref)}
# CRUD Extension
@@ -88,7 +90,15 @@ class Tenant(controller.V2Controller):
msg = _('Name field is required and cannot be empty')
raise exception.ValidationError(message=msg)
+ if 'is_domain' in tenant_ref:
+ msg = _('The creation of projects acting as domains is not '
+ 'allowed in v2.')
+ raise exception.ValidationError(message=msg)
+
self.assert_admin(context)
+
+ self.resource_api.ensure_default_domain_exists()
+
tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex)
initiator = notifications._get_request_audit_info(context)
tenant = self.resource_api.create_project(
@@ -162,11 +172,13 @@ class DomainV3(controller.V3Controller):
@dependency.requires('domain_config_api')
+@dependency.requires('resource_api')
class DomainConfigV3(controller.V3Controller):
member_name = 'config'
@controller.protected()
def create_domain_config(self, context, domain_id, config):
+ self.resource_api.get_domain(domain_id)
original_config = (
self.domain_config_api.get_config_with_sensitive_info(domain_id))
ref = self.domain_config_api.create_config(domain_id, config)
@@ -179,29 +191,39 @@ class DomainConfigV3(controller.V3Controller):
@controller.protected()
def get_domain_config(self, context, domain_id, group=None, option=None):
+ self.resource_api.get_domain(domain_id)
ref = self.domain_config_api.get_config(domain_id, group, option)
return {self.member_name: ref}
@controller.protected()
def update_domain_config(
self, context, domain_id, config, group, option):
+ self.resource_api.get_domain(domain_id)
ref = self.domain_config_api.update_config(
domain_id, config, group, option)
return wsgi.render_response(body={self.member_name: ref})
def update_domain_config_group(self, context, domain_id, group, config):
+ self.resource_api.get_domain(domain_id)
return self.update_domain_config(
context, domain_id, config, group, option=None)
def update_domain_config_only(self, context, domain_id, config):
+ self.resource_api.get_domain(domain_id)
return self.update_domain_config(
context, domain_id, config, group=None, option=None)
@controller.protected()
def delete_domain_config(
self, context, domain_id, group=None, option=None):
+ self.resource_api.get_domain(domain_id)
self.domain_config_api.delete_config(domain_id, group, option)
+ @controller.protected()
+ def get_domain_config_default(self, context, group=None, option=None):
+ ref = self.domain_config_api.get_config_default(group, option)
+ return {self.member_name: ref}
+
@dependency.requires('resource_api')
class ProjectV3(controller.V3Controller):
@@ -216,25 +238,31 @@ class ProjectV3(controller.V3Controller):
@validation.validated(schema.project_create, 'project')
def create_project(self, context, project):
ref = self._assign_unique_id(self._normalize_dict(project))
- ref = self._normalize_domain_id(context, ref)
- if ref.get('is_domain'):
- msg = _('The creation of projects acting as domains is not '
- 'allowed yet.')
- raise exception.NotImplemented(msg)
+ if not ref.get('is_domain'):
+ ref = self._normalize_domain_id(context, ref)
+ # Our API requires that you specify the location in the hierarchy
+ # unambiguously. This could be by parent_id or, if it is a top level
+ # project, just by providing a domain_id.
+ if not ref.get('parent_id'):
+ ref['parent_id'] = ref.get('domain_id')
initiator = notifications._get_request_audit_info(context)
try:
ref = self.resource_api.create_project(ref['id'], ref,
initiator=initiator)
- except exception.DomainNotFound as e:
+ except (exception.DomainNotFound, exception.ProjectNotFound) as e:
raise exception.ValidationError(e)
return ProjectV3.wrap_member(context, ref)
@controller.filterprotected('domain_id', 'enabled', 'name',
- 'parent_id')
+ 'parent_id', 'is_domain')
def list_projects(self, context, filters):
hints = ProjectV3.build_driver_hints(context, filters)
+ # If 'is_domain' has not been included as a query, we default it to
+ # False (which in query terms means '0'
+ if 'is_domain' not in context['query_string']:
+ hints.add_filter('is_domain', '0')
refs = self.resource_api.list_projects(hints=hints)
return ProjectV3.wrap_collection(context, refs, hints=hints)
diff --git a/keystone-moon/keystone/resource/core.py b/keystone-moon/keystone/resource/core.py
index 6891c572..f8d72e91 100644
--- a/keystone-moon/keystone/resource/core.py
+++ b/keystone-moon/keystone/resource/core.py
@@ -13,16 +13,20 @@
"""Main entry point into the Resource service."""
import abc
+import copy
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
import six
+from keystone import assignment
from keystone.common import cache
from keystone.common import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
+from keystone.common import utils
from keystone import exception
from keystone.i18n import _, _LE, _LW
from keystone import notifications
@@ -30,18 +34,27 @@ from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-MEMOIZE = cache.get_memoization_decorator(section='resource')
+MEMOIZE = cache.get_memoization_decorator(group='resource')
def calc_default_domain():
return {'description':
- (u'Owns users and tenants (i.e. projects)'
- ' available on Identity API v2.'),
+ (u'The default domain'),
'enabled': True,
'id': CONF.identity.default_domain_id,
'name': u'Default'}
+def _get_project_from_domain(domain_ref):
+ """Creates a project ref from the provided domain ref."""
+ project_ref = domain_ref.copy()
+ project_ref['is_domain'] = True
+ project_ref['domain_id'] = None
+ project_ref['parent_id'] = None
+
+ return project_ref
+
+
@dependency.provider('resource_api')
@dependency.requires('assignment_api', 'credential_api', 'domain_config_api',
'identity_api', 'revoke_api')
@@ -69,48 +82,171 @@ class Manager(manager.Manager):
super(Manager, self).__init__(resource_driver)
+ # Make sure it is a driver version we support, and if it is a legacy
+ # driver, then wrap it.
+ if isinstance(self.driver, ResourceDriverV8):
+ self.driver = V9ResourceWrapperForV8Driver(self.driver)
+ elif not isinstance(self.driver, ResourceDriverV9):
+ raise exception.UnsupportedDriverVersion(driver=resource_driver)
+
def _get_hierarchy_depth(self, parents_list):
return len(parents_list) + 1
def _assert_max_hierarchy_depth(self, project_id, parents_list=None):
if parents_list is None:
parents_list = self.list_project_parents(project_id)
- max_depth = CONF.max_project_tree_depth
+ # NOTE(henry-nash): In upgrading to a scenario where domains are
+ # represented as projects acting as domains, we will effectively
+ # increase the depth of any existing project hierarchy by one. To avoid
+ # pushing any existing hierarchies over the limit, we add one to the
+ # maximum depth allowed, as specified in the configuration file.
+ max_depth = CONF.max_project_tree_depth + 1
if self._get_hierarchy_depth(parents_list) > max_depth:
- raise exception.ForbiddenAction(
- action=_('max hierarchy depth reached for '
- '%s branch.') % project_id)
-
- def create_project(self, tenant_id, tenant, initiator=None):
- tenant = tenant.copy()
- tenant.setdefault('enabled', True)
- tenant['enabled'] = clean.project_enabled(tenant['enabled'])
- tenant.setdefault('description', '')
- tenant.setdefault('parent_id', None)
- tenant.setdefault('is_domain', False)
-
- self.get_domain(tenant.get('domain_id'))
- if tenant.get('parent_id') is not None:
- parent_ref = self.get_project(tenant.get('parent_id'))
- parents_list = self.list_project_parents(parent_ref['id'])
+ raise exception.ForbiddenNotSecurity(
+ _('Max hierarchy depth reached for %s branch.') % project_id)
+
+ def _assert_is_domain_project_constraints(self, project_ref):
+ """Enforces specific constraints of projects that act as domains
+
+ Called when is_domain is true, this method ensures that:
+
+ * multiple domains are enabled
+ * the project name is not the reserved name for a federated domain
+ * the project is a root project
+
+ :raises keystone.exception.ValidationError: If one of the constraints
+ was not satisfied.
+ """
+ if (not self.identity_api.multiple_domains_supported and
+ project_ref['id'] != CONF.identity.default_domain_id):
+ raise exception.ValidationError(
+ message=_('Multiple domains are not supported'))
+
+ self.assert_domain_not_federated(project_ref['id'], project_ref)
+
+ if project_ref['parent_id']:
+ raise exception.ValidationError(
+ message=_('only root projects are allowed to act as '
+ 'domains.'))
+
+ def _assert_regular_project_constraints(self, project_ref):
+ """Enforces regular project hierarchy constraints
+
+ Called when is_domain is false. The project must contain a valid
+ domain_id and parent_id. The goal of this method is to check
+ that the domain_id specified is consistent with the domain of its
+ parent.
+
+ :raises keystone.exception.ValidationError: If one of the constraints
+ was not satisfied.
+ :raises keystone.exception.DomainNotFound: In case the domain is not
+ found.
+ """
+ # Ensure domain_id is valid, and by inference will not be None.
+ domain = self.get_domain(project_ref['domain_id'])
+ parent_ref = self.get_project(project_ref['parent_id'])
+
+ if parent_ref['is_domain']:
+ if parent_ref['id'] != domain['id']:
+ raise exception.ValidationError(
+ message=_('Cannot create project, since its parent '
+ '(%(domain_id)s) is acting as a domain, '
+ 'but project\'s specified parent_id '
+ '(%(parent_id)s) does not match '
+ 'this domain_id.')
+ % {'domain_id': domain['id'],
+ 'parent_id': parent_ref['id']})
+ else:
+ parent_domain_id = parent_ref.get('domain_id')
+ if parent_domain_id != domain['id']:
+ raise exception.ValidationError(
+ message=_('Cannot create project, since it specifies '
+ 'its owner as domain %(domain_id)s, but '
+ 'specifies a parent in a different domain '
+ '(%(parent_domain_id)s).')
+ % {'domain_id': domain['id'],
+ 'parent_domain_id': parent_domain_id})
+
+ def _enforce_project_constraints(self, project_ref):
+ if project_ref.get('is_domain'):
+ self._assert_is_domain_project_constraints(project_ref)
+ else:
+ self._assert_regular_project_constraints(project_ref)
+ # The whole hierarchy (upwards) must be enabled
+ parent_id = project_ref['parent_id']
+ parents_list = self.list_project_parents(parent_id)
+ parent_ref = self.get_project(parent_id)
parents_list.append(parent_ref)
for ref in parents_list:
- if ref.get('domain_id') != tenant.get('domain_id'):
- raise exception.ValidationError(
- message=_('cannot create a project within a different '
- 'domain than its parents.'))
if not ref.get('enabled', True):
raise exception.ValidationError(
message=_('cannot create a project in a '
'branch containing a disabled '
'project: %s') % ref['id'])
- self._assert_max_hierarchy_depth(tenant.get('parent_id'),
+
+ self._assert_max_hierarchy_depth(project_ref.get('parent_id'),
parents_list)
- ret = self.driver.create_project(tenant_id, tenant)
- notifications.Audit.created(self._PROJECT, tenant_id, initiator)
+ def _raise_reserved_character_exception(self, entity_type, name):
+ msg = _('%(entity)s name cannot contain the following reserved '
+ 'characters: %(chars)s')
+ raise exception.ValidationError(
+ message=msg % {
+ 'entity': entity_type,
+ 'chars': utils.list_url_unsafe_chars(name)
+ })
+
+ def _generate_project_name_conflict_msg(self, project):
+ if project['is_domain']:
+ return _('it is not permitted to have two projects '
+ 'acting as domains with the same name: %s'
+ ) % project['name']
+ else:
+ return _('it is not permitted to have two projects '
+ 'within a domain with the same name : %s'
+ ) % project['name']
+
+ def create_project(self, project_id, project, initiator=None):
+ project = project.copy()
+
+ if (CONF.resource.project_name_url_safe != 'off' and
+ utils.is_not_url_safe(project['name'])):
+ self._raise_reserved_character_exception('Project',
+ project['name'])
+
+ project.setdefault('enabled', True)
+ project['enabled'] = clean.project_enabled(project['enabled'])
+ project.setdefault('description', '')
+
+ # For regular projects, the controller will ensure we have a valid
+ # domain_id. For projects acting as a domain, the project_id
+ # is, effectively, the domain_id - and for such projects we don't
+ # bother to store a copy of it in the domain_id attribute.
+ project.setdefault('domain_id', None)
+ project.setdefault('parent_id', None)
+ if not project['parent_id']:
+ project['parent_id'] = project['domain_id']
+ project.setdefault('is_domain', False)
+
+ self._enforce_project_constraints(project)
+
+ # We leave enforcing name uniqueness to the underlying driver (instead
+ # of doing it in code in the project_constraints above), so as to allow
+ # this check to be done at the storage level, avoiding race conditions
+ # in multi-process keystone configurations.
+ try:
+ ret = self.driver.create_project(project_id, project)
+ except exception.Conflict:
+ raise exception.Conflict(
+ type='project',
+ details=self._generate_project_name_conflict_msg(project))
+
+ if project.get('is_domain'):
+ notifications.Audit.created(self._DOMAIN, project_id, initiator)
+ else:
+ notifications.Audit.created(self._PROJECT, project_id, initiator)
if MEMOIZE.should_cache(ret):
- self.get_project.set(ret, self, tenant_id)
+ self.get_project.set(ret, self, project_id)
self.get_project_by_name.set(ret, self, ret['name'],
ret['domain_id'])
return ret
@@ -153,95 +289,257 @@ class Manager(manager.Manager):
"""
if project is None:
project = self.get_project(project_id)
- self.assert_domain_enabled(domain_id=project['domain_id'])
+ # If it's a regular project (i.e. it has a domain_id), we need to make
+ # sure the domain itself is not disabled
+ if project['domain_id']:
+ self.assert_domain_enabled(domain_id=project['domain_id'])
if not project.get('enabled', True):
raise AssertionError(_('Project is disabled: %s') % project_id)
- @notifications.disabled(_PROJECT, public=False)
- def _disable_project(self, project_id):
- """Emit a notification to the callback system project is been disabled.
-
- This method, and associated callback listeners, removes the need for
- making direct calls to other managers to take action (e.g. revoking
- project scoped tokens) when a project is disabled.
-
- :param project_id: project identifier
- :type project_id: string
- """
- pass
-
def _assert_all_parents_are_enabled(self, project_id):
parents_list = self.list_project_parents(project_id)
for project in parents_list:
if not project.get('enabled', True):
- raise exception.ForbiddenAction(
- action=_('cannot enable project %s since it has '
- 'disabled parents') % project_id)
-
- def _assert_whole_subtree_is_disabled(self, project_id):
- subtree_list = self.list_projects_in_subtree(project_id)
- for ref in subtree_list:
- if ref.get('enabled', True):
- raise exception.ForbiddenAction(
- action=_('cannot disable project %s since '
- 'its subtree contains enabled '
- 'projects') % project_id)
-
- def update_project(self, tenant_id, tenant, initiator=None):
- original_tenant = self.driver.get_project(tenant_id)
- tenant = tenant.copy()
-
- parent_id = original_tenant.get('parent_id')
- if 'parent_id' in tenant and tenant.get('parent_id') != parent_id:
- raise exception.ForbiddenAction(
- action=_('Update of `parent_id` is not allowed.'))
-
- if ('is_domain' in tenant and
- tenant['is_domain'] != original_tenant['is_domain']):
+ raise exception.ForbiddenNotSecurity(
+ _('Cannot enable project %s since it has disabled '
+ 'parents') % project_id)
+
+ def _check_whole_subtree_is_disabled(self, project_id, subtree_list=None):
+ if not subtree_list:
+ subtree_list = self.list_projects_in_subtree(project_id)
+ subtree_enabled = [ref.get('enabled', True) for ref in subtree_list]
+ return (not any(subtree_enabled))
+
+ def _update_project(self, project_id, project, initiator=None,
+ cascade=False):
+ # Use the driver directly to prevent using old cached value.
+ original_project = self.driver.get_project(project_id)
+ project = project.copy()
+
+ if original_project['is_domain']:
+ domain = self._get_domain_from_project(original_project)
+ self.assert_domain_not_federated(project_id, domain)
+ if 'enabled' in domain:
+ domain['enabled'] = clean.domain_enabled(domain['enabled'])
+ url_safe_option = CONF.resource.domain_name_url_safe
+ exception_entity = 'Domain'
+ else:
+ url_safe_option = CONF.resource.project_name_url_safe
+ exception_entity = 'Project'
+
+ if (url_safe_option != 'off' and
+ 'name' in project and
+ project['name'] != original_project['name'] and
+ utils.is_not_url_safe(project['name'])):
+ self._raise_reserved_character_exception(exception_entity,
+ project['name'])
+
+ parent_id = original_project.get('parent_id')
+ if 'parent_id' in project and project.get('parent_id') != parent_id:
+ raise exception.ForbiddenNotSecurity(
+ _('Update of `parent_id` is not allowed.'))
+
+ if ('is_domain' in project and
+ project['is_domain'] != original_project['is_domain']):
raise exception.ValidationError(
message=_('Update of `is_domain` is not allowed.'))
- if 'enabled' in tenant:
- tenant['enabled'] = clean.project_enabled(tenant['enabled'])
-
- # NOTE(rodrigods): for the current implementation we only allow to
- # disable a project if all projects below it in the hierarchy are
- # already disabled. This also means that we can not enable a
- # project that has disabled parents.
- original_tenant_enabled = original_tenant.get('enabled', True)
- tenant_enabled = tenant.get('enabled', True)
- if not original_tenant_enabled and tenant_enabled:
- self._assert_all_parents_are_enabled(tenant_id)
- if original_tenant_enabled and not tenant_enabled:
- self._assert_whole_subtree_is_disabled(tenant_id)
- self._disable_project(tenant_id)
-
- ret = self.driver.update_project(tenant_id, tenant)
- notifications.Audit.updated(self._PROJECT, tenant_id, initiator)
- self.get_project.invalidate(self, tenant_id)
- self.get_project_by_name.invalidate(self, original_tenant['name'],
- original_tenant['domain_id'])
+ update_domain = ('domain_id' in project and
+ project['domain_id'] != original_project['domain_id'])
+
+ # NOTE(htruta): Even if we are allowing domain_ids to be
+ # modified (i.e. 'domain_id_immutable' is set False),
+ # a project.domain_id can only be updated for root projects
+ # that have no children. The update of domain_id of a project in
+ # the middle of the hierarchy creates an inconsistent project
+ # hierarchy.
+ if update_domain:
+ if original_project['is_domain']:
+ raise exception.ValidationError(
+ message=_('Update of domain_id of projects acting as '
+ 'domains is not allowed.'))
+ parent_project = (
+ self.driver.get_project(original_project['parent_id']))
+ is_root_project = parent_project['is_domain']
+ if not is_root_project:
+ raise exception.ValidationError(
+ message=_('Update of domain_id is only allowed for '
+ 'root projects.'))
+ subtree_list = self.list_projects_in_subtree(project_id)
+ if subtree_list:
+ raise exception.ValidationError(
+ message=_('Cannot update domain_id of a project that '
+ 'has children.'))
+ versionutils.report_deprecated_feature(
+ LOG,
+ _('update of domain_id is deprecated as of Mitaka '
+ 'and will be removed in O.')
+ )
+
+ if 'enabled' in project:
+ project['enabled'] = clean.project_enabled(project['enabled'])
+
+ original_project_enabled = original_project.get('enabled', True)
+ project_enabled = project.get('enabled', True)
+ if not original_project_enabled and project_enabled:
+ self._assert_all_parents_are_enabled(project_id)
+ if original_project_enabled and not project_enabled:
+ # NOTE(htruta): In order to disable a regular project, all its
+ # children must already be disabled. However, to keep
+ # compatibility with the existing domain behaviour, we allow a
+ # project acting as a domain to be disabled irrespective of the
+ # state of its children. Disabling a project acting as domain
+ # effectively disables its children.
+ if (not original_project.get('is_domain') and not cascade and not
+ self._check_whole_subtree_is_disabled(project_id)):
+ raise exception.ForbiddenNotSecurity(
+ _('Cannot disable project %(project_id)s since its '
+ 'subtree contains enabled projects.')
+ % {'project_id': project_id})
+
+ notifications.Audit.disabled(self._PROJECT, project_id,
+ public=False)
+ if cascade:
+ self._only_allow_enabled_to_update_cascade(project,
+ original_project)
+ self._update_project_enabled_cascade(project_id, project_enabled)
+
+ try:
+ project['is_domain'] = (project.get('is_domain') or
+ original_project['is_domain'])
+ ret = self.driver.update_project(project_id, project)
+ except exception.Conflict:
+ raise exception.Conflict(
+ type='project',
+ details=self._generate_project_name_conflict_msg(project))
+
+ notifications.Audit.updated(self._PROJECT, project_id, initiator)
+ if original_project['is_domain']:
+ notifications.Audit.updated(self._DOMAIN, project_id, initiator)
+ # If the domain is being disabled, issue the disable notification
+ # as well
+ if original_project_enabled and not project_enabled:
+ notifications.Audit.disabled(self._DOMAIN, project_id,
+ public=False)
+
+ self.get_project.invalidate(self, project_id)
+ self.get_project_by_name.invalidate(self, original_project['name'],
+ original_project['domain_id'])
+
+ if ('domain_id' in project and
+ project['domain_id'] != original_project['domain_id']):
+ # If the project's domain_id has been updated, invalidate user
+ # role assignments cache region, as it may be caching inherited
+ # assignments from the old domain to the specified project
+ assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
+
return ret
- def delete_project(self, tenant_id, initiator=None):
- if not self.driver.is_leaf_project(tenant_id):
- raise exception.ForbiddenAction(
- action=_('cannot delete the project %s since it is not '
- 'a leaf in the hierarchy.') % tenant_id)
+ def _only_allow_enabled_to_update_cascade(self, project, original_project):
+ for attr in project:
+ if attr != 'enabled':
+ if project.get(attr) != original_project.get(attr):
+ raise exception.ValidationError(
+ message=_('Cascade update is only allowed for '
+ 'enabled attribute.'))
+
+ def _update_project_enabled_cascade(self, project_id, enabled):
+ subtree = self.list_projects_in_subtree(project_id)
+ # Update enabled only if different from original value
+ subtree_to_update = [child for child in subtree
+ if child['enabled'] != enabled]
+ for child in subtree_to_update:
+ child['enabled'] = enabled
+
+ if not enabled:
+ # Does not in fact disable the project, only emits a
+ # notification that it was disabled. The actual disablement
+ # is done in the next line.
+ notifications.Audit.disabled(self._PROJECT, child['id'],
+ public=False)
+
+ self.driver.update_project(child['id'], child)
+
+ def update_project(self, project_id, project, initiator=None,
+ cascade=False):
+ ret = self._update_project(project_id, project, initiator, cascade)
+ if ret['is_domain']:
+ self.get_domain.invalidate(self, project_id)
+ self.get_domain_by_name.invalidate(self, ret['name'])
- project = self.driver.get_project(tenant_id)
+ return ret
+
+ def _pre_delete_cleanup_project(self, project_id, project, initiator=None):
project_user_ids = (
- self.assignment_api.list_user_ids_for_project(tenant_id))
+ self.assignment_api.list_user_ids_for_project(project_id))
for user_id in project_user_ids:
- payload = {'user_id': user_id, 'project_id': tenant_id}
- self._emit_invalidate_user_project_tokens_notification(payload)
- ret = self.driver.delete_project(tenant_id)
- self.assignment_api.delete_project_assignments(tenant_id)
- self.get_project.invalidate(self, tenant_id)
+ payload = {'user_id': user_id, 'project_id': project_id}
+ notifications.Audit.internal(
+ notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
+ payload
+ )
+
+ def _post_delete_cleanup_project(self, project_id, project,
+ initiator=None):
+ self.assignment_api.delete_project_assignments(project_id)
+ self.get_project.invalidate(self, project_id)
self.get_project_by_name.invalidate(self, project['name'],
project['domain_id'])
- self.credential_api.delete_credentials_for_project(tenant_id)
- notifications.Audit.deleted(self._PROJECT, tenant_id, initiator)
+ self.credential_api.delete_credentials_for_project(project_id)
+ notifications.Audit.deleted(self._PROJECT, project_id, initiator)
+ # Invalidate user role assignments cache region, as it may
+ # be caching role assignments where the target is
+ # the specified project
+ assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
+
+ def delete_project(self, project_id, initiator=None, cascade=False):
+ project = self.driver.get_project(project_id)
+ if project.get('is_domain'):
+ self.delete_domain(project_id, initiator)
+ else:
+ self._delete_project(project_id, initiator, cascade)
+
+ def _delete_project(self, project_id, initiator=None, cascade=False):
+ # Use the driver directly to prevent using old cached value.
+ project = self.driver.get_project(project_id)
+ if project['is_domain'] and project['enabled']:
+ raise exception.ValidationError(
+ message=_('cannot delete an enabled project acting as a '
+ 'domain. Please disable the project %s first.')
+ % project.get('id'))
+
+ if not self.is_leaf_project(project_id) and not cascade:
+ raise exception.ForbiddenNotSecurity(
+ _('Cannot delete the project %s since it is not a leaf in the '
+ 'hierarchy. Use the cascade option if you want to delete a '
+ 'whole subtree.')
+ % project_id)
+
+ if cascade:
+ # Getting reversed project's subtrees list, i.e. from the leaves
+ # to the root, so we do not break parent_id FK.
+ subtree_list = self.list_projects_in_subtree(project_id)
+ subtree_list.reverse()
+ if not self._check_whole_subtree_is_disabled(
+ project_id, subtree_list=subtree_list):
+ raise exception.ForbiddenNotSecurity(
+ _('Cannot delete project %(project_id)s since its subtree '
+ 'contains enabled projects.')
+ % {'project_id': project_id})
+
+ project_list = subtree_list + [project]
+ projects_ids = [x['id'] for x in project_list]
+
+ for prj in project_list:
+ self._pre_delete_cleanup_project(prj['id'], prj, initiator)
+ ret = self.driver.delete_projects_from_ids(projects_ids)
+ for prj in project_list:
+ self._post_delete_cleanup_project(prj['id'], prj, initiator)
+ else:
+ self._pre_delete_cleanup_project(project_id, project, initiator)
+ ret = self.driver.delete_project(project_id)
+ self._post_delete_cleanup_project(project_id, project, initiator)
+
return ret
def _filter_projects_list(self, projects_list, user_id):
@@ -378,85 +676,127 @@ class Manager(manager.Manager):
project_id, _projects_indexed_by_parent(subtree_list))
return subtree_as_ids
+ def list_domains_from_ids(self, domain_ids):
+ """List domains for the provided list of ids.
+
+ :param domain_ids: list of ids
+
+ :returns: a list of domain_refs.
+
+ This method is used internally by the assignment manager to bulk read
+ a set of domains given their ids.
+
+ """
+ # Retrieve the projects acting as domains get their correspondent
+ # domains
+ projects = self.list_projects_from_ids(domain_ids)
+ domains = [self._get_domain_from_project(project)
+ for project in projects]
+
+ return domains
+
@MEMOIZE
def get_domain(self, domain_id):
- return self.driver.get_domain(domain_id)
+ try:
+ # Retrieve the corresponding project that acts as a domain
+ project = self.driver.get_project(domain_id)
+ except exception.ProjectNotFound:
+ raise exception.DomainNotFound(domain_id=domain_id)
+
+ # Return its correspondent domain
+ return self._get_domain_from_project(project)
@MEMOIZE
def get_domain_by_name(self, domain_name):
- return self.driver.get_domain_by_name(domain_name)
-
- def create_domain(self, domain_id, domain, initiator=None):
- if (not self.identity_api.multiple_domains_supported and
- domain_id != CONF.identity.default_domain_id):
- raise exception.Forbidden(_('Multiple domains are not supported'))
- self.assert_domain_not_federated(domain_id, domain)
- domain.setdefault('enabled', True)
- domain['enabled'] = clean.domain_enabled(domain['enabled'])
- ret = self.driver.create_domain(domain_id, domain)
+ try:
+ # Retrieve the corresponding project that acts as a domain
+ project = self.driver.get_project_by_name(domain_name,
+ domain_id=None)
+ except exception.ProjectNotFound:
+ raise exception.DomainNotFound(domain_id=domain_name)
- notifications.Audit.created(self._DOMAIN, domain_id, initiator)
+ # Return its correspondent domain
+ return self._get_domain_from_project(project)
- if MEMOIZE.should_cache(ret):
- self.get_domain.set(ret, self, domain_id)
- self.get_domain_by_name.set(ret, self, ret['name'])
- return ret
+ def _get_domain_from_project(self, project_ref):
+ """Creates a domain ref from a project ref.
- @manager.response_truncated
- def list_domains(self, hints=None):
- return self.driver.list_domains(hints or driver_hints.Hints())
+ Based on the provided project ref, create a domain ref, so that the
+ result can be returned in response to a domain API call.
+ """
+ if not project_ref['is_domain']:
+ LOG.error(_LE('Asked to convert a non-domain project into a '
+ 'domain - Domain: %(domain_id)s, Project ID: '
+ '%(id)s, Project Name: %(project_name)s'),
+ {'domain_id': project_ref['domain_id'],
+ 'id': project_ref['id'],
+ 'project_name': project_ref['name']})
+ raise exception.DomainNotFound(domain_id=project_ref['id'])
+
+ domain_ref = project_ref.copy()
+ # As well as the project specific attributes that we need to remove,
+ # there is an old compatibility issue in that update project (as well
+ # as extracting an extra attributes), also includes a copy of the
+ # actual extra dict as well - something that update domain does not do.
+ for k in ['parent_id', 'domain_id', 'is_domain', 'extra']:
+ domain_ref.pop(k, None)
+
+ return domain_ref
- @notifications.disabled(_DOMAIN, public=False)
- def _disable_domain(self, domain_id):
- """Emit a notification to the callback system domain is been disabled.
+ def create_domain(self, domain_id, domain, initiator=None):
+ if (CONF.resource.domain_name_url_safe != 'off' and
+ utils.is_not_url_safe(domain['name'])):
+ self._raise_reserved_character_exception('Domain', domain['name'])
+ project_from_domain = _get_project_from_domain(domain)
+ is_domain_project = self.create_project(
+ domain_id, project_from_domain, initiator)
- This method, and associated callback listeners, removes the need for
- making direct calls to other managers to take action (e.g. revoking
- domain scoped tokens) when a domain is disabled.
+ return self._get_domain_from_project(is_domain_project)
- :param domain_id: domain identifier
- :type domain_id: string
- """
- pass
+ @manager.response_truncated
+ def list_domains(self, hints=None):
+ projects = self.list_projects_acting_as_domain(hints)
+ domains = [self._get_domain_from_project(project)
+ for project in projects]
+ return domains
def update_domain(self, domain_id, domain, initiator=None):
+ # TODO(henry-nash): We shouldn't have to check for the federated domain
+ # here as well as _update_project, but currently our tests assume the
+ # checks are done in a specific order. The tests should be refactored.
self.assert_domain_not_federated(domain_id, domain)
- original_domain = self.driver.get_domain(domain_id)
- if 'enabled' in domain:
- domain['enabled'] = clean.domain_enabled(domain['enabled'])
- ret = self.driver.update_domain(domain_id, domain)
- notifications.Audit.updated(self._DOMAIN, domain_id, initiator)
- # disable owned users & projects when the API user specifically set
- # enabled=False
- if (original_domain.get('enabled', True) and
- not domain.get('enabled', True)):
- notifications.Audit.disabled(self._DOMAIN, domain_id, initiator,
- public=False)
+ project = _get_project_from_domain(domain)
+ try:
+ original_domain = self.driver.get_project(domain_id)
+ project = self._update_project(domain_id, project, initiator)
+ except exception.ProjectNotFound:
+ raise exception.DomainNotFound(domain_id=domain_id)
+ domain_from_project = self._get_domain_from_project(project)
self.get_domain.invalidate(self, domain_id)
self.get_domain_by_name.invalidate(self, original_domain['name'])
- return ret
- def delete_domain(self, domain_id, initiator=None):
- # explicitly forbid deleting the default domain (this should be a
- # carefully orchestrated manual process involving configuration
- # changes, etc)
- if domain_id == CONF.identity.default_domain_id:
- raise exception.ForbiddenAction(action=_('delete the default '
- 'domain'))
+ return domain_from_project
- domain = self.driver.get_domain(domain_id)
+ def delete_domain(self, domain_id, initiator=None):
+ # Use the driver directly to get the project that acts as a domain and
+ # prevent using old cached value.
+ try:
+ domain = self.driver.get_project(domain_id)
+ except exception.ProjectNotFound:
+ raise exception.DomainNotFound(domain_id=domain_id)
# To help avoid inadvertent deletes, we insist that the domain
# has been previously disabled. This also prevents a user deleting
# their own domain since, once it is disabled, they won't be able
# to get a valid token to issue this delete.
if domain['enabled']:
- raise exception.ForbiddenAction(
- action=_('cannot delete a domain that is enabled, '
- 'please disable it first.'))
+ raise exception.ForbiddenNotSecurity(
+ _('Cannot delete a domain that is enabled, please disable it '
+ 'first.'))
self._delete_domain_contents(domain_id)
+ self._delete_project(domain_id, initiator)
# Delete any database stored domain config
self.domain_config_api.delete_config_options(domain_id)
self.domain_config_api.delete_config_options(domain_id, sensitive=True)
@@ -468,11 +808,14 @@ class Manager(manager.Manager):
# other domains - so we should delete these here by making a call
# to the backend to delete all assignments for this domain.
# (see Bug #1277847)
- self.driver.delete_domain(domain_id)
notifications.Audit.deleted(self._DOMAIN, domain_id, initiator)
self.get_domain.invalidate(self, domain_id)
self.get_domain_by_name.invalidate(self, domain['name'])
+ # Invalidate user role assignments cache region, as it may be caching
+ # role assignments where the target is the specified domain
+ assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
+
def _delete_domain_contents(self, domain_id):
"""Delete the contents of a domain.
@@ -483,7 +826,6 @@ class Manager(manager.Manager):
associated with them as well as revoking any relevant tokens.
"""
-
def _delete_projects(project, projects, examined):
if project['id'] in examined:
msg = _LE('Circular reference or a repeated entry found '
@@ -498,7 +840,7 @@ class Manager(manager.Manager):
_delete_projects(proj, projects, examined)
try:
- self.delete_project(project['id'])
+ self.delete_project(project['id'], initiator=None)
except exception.ProjectNotFound:
LOG.debug(('Project %(projectid)s not found when '
'deleting domain contents for %(domainid)s, '
@@ -509,7 +851,7 @@ class Manager(manager.Manager):
proj_refs = self.list_projects_in_domain(domain_id)
# Deleting projects recursively
- roots = [x for x in proj_refs if x.get('parent_id') is None]
+ roots = [x for x in proj_refs if x.get('parent_id') == domain_id]
examined = set()
for project in roots:
_delete_projects(project, proj_refs, examined)
@@ -524,29 +866,258 @@ class Manager(manager.Manager):
def list_projects_in_domain(self, domain_id):
return self.driver.list_projects_in_domain(domain_id)
+ def list_projects_acting_as_domain(self, hints=None):
+ return self.driver.list_projects_acting_as_domain(
+ hints or driver_hints.Hints())
+
@MEMOIZE
def get_project(self, project_id):
return self.driver.get_project(project_id)
@MEMOIZE
- def get_project_by_name(self, tenant_name, domain_id):
- return self.driver.get_project_by_name(tenant_name, domain_id)
+ def get_project_by_name(self, project_name, domain_id):
+ return self.driver.get_project_by_name(project_name, domain_id)
- @notifications.internal(
- notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE)
- def _emit_invalidate_user_project_tokens_notification(self, payload):
- # This notification's payload is a dict of user_id and
- # project_id so the token provider can invalidate the tokens
- # from persistence if persistence is enabled.
- pass
+ def ensure_default_domain_exists(self):
+ """Creates the default domain if it doesn't exist.
+
+ This is only used for the v2 API and can go away when V2 does.
+
+ """
+ try:
+ default_domain_attrs = {
+ 'name': 'Default',
+ 'id': CONF.identity.default_domain_id,
+ 'description': 'Domain created automatically to support V2.0 '
+ 'operations.',
+ }
+ self.create_domain(CONF.identity.default_domain_id,
+ default_domain_attrs)
+ LOG.warning(_LW(
+ 'The default domain was created automatically to contain V2 '
+ 'resources. This is deprecated in the M release and will not '
+ 'be supported in the O release. Create the default domain '
+ 'manually or use the keystone-manage bootstrap command.'))
+ except exception.Conflict:
+ LOG.debug('The default domain already exists.')
+ except Exception:
+ LOG.error(_LE('Failed to create the default domain.'))
+ raise
+
+
+# The ResourceDriverBase class is the set of driver methods from earlier
+# drivers that we still support, that have not been removed or modified. This
+# class is then used to created the augmented V8 and V9 version abstract driver
+# classes, without having to duplicate a lot of abstract method signatures.
+# If you remove a method from V9, then move the abstract methods from this Base
+# class to the V8 class. Do not modify any of the method signatures in the Base
+# class - changes should only be made in the V8 and subsequent classes.
+
+# Starting with V9, some drivers use a special value to represent a domain_id
+# of None. See comment in Project class of resource/backends/sql.py for more
+# details.
+NULL_DOMAIN_ID = '<<keystone.domain.root>>'
@six.add_metaclass(abc.ABCMeta)
-class ResourceDriverV8(object):
+class ResourceDriverBase(object):
def _get_list_limit(self):
return CONF.resource.list_limit or CONF.list_limit
+ # project crud
+ @abc.abstractmethod
+ def list_projects(self, hints):
+ """List projects in the system.
+
+ :param hints: filter hints which the driver should
+ implement if at all possible.
+
+ :returns: a list of project_refs or an empty list.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_projects_from_ids(self, project_ids):
+ """List projects for the provided list of ids.
+
+ :param project_ids: list of ids
+
+ :returns: a list of project_refs.
+
+ This method is used internally by the assignment manager to bulk read
+ a set of projects given their ids.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_project_ids_from_domain_ids(self, domain_ids):
+ """List project ids for the provided list of domain ids.
+
+ :param domain_ids: list of domain ids
+
+ :returns: a list of project ids owned by the specified domain ids.
+
+ This method is used internally by the assignment manager to bulk read
+ a set of project ids given a list of domain ids.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_projects_in_domain(self, domain_id):
+ """List projects in the domain.
+
+ :param domain_id: the driver MUST only return projects
+ within this domain.
+
+ :returns: a list of project_refs or an empty list.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def get_project(self, project_id):
+ """Get a project by ID.
+
+ :returns: project_ref
+ :raises keystone.exception.ProjectNotFound: if project_id does not
+ exist
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def update_project(self, project_id, project):
+ """Updates an existing project.
+
+ :raises keystone.exception.ProjectNotFound: if project_id does not
+ exist
+ :raises keystone.exception.Conflict: if project name already exists
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def delete_project(self, project_id):
+ """Deletes an existing project.
+
+ :raises keystone.exception.ProjectNotFound: if project_id does not
+ exist
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def list_project_parents(self, project_id):
+ """List all parents from a project by its ID.
+
+ :param project_id: the driver will list the parents of this
+ project.
+
+ :returns: a list of project_refs or an empty list.
+ :raises keystone.exception.ProjectNotFound: if project_id does not
+ exist
+
+ """
+ raise exception.NotImplemented()
+
+ @abc.abstractmethod
+ def list_projects_in_subtree(self, project_id):
+ """List all projects in the subtree of a given project.
+
+ :param project_id: the driver will get the subtree under
+ this project.
+
+ :returns: a list of project_refs or an empty list
+ :raises keystone.exception.ProjectNotFound: if project_id does not
+ exist
+
+ """
+ raise exception.NotImplemented()
+
+ @abc.abstractmethod
+ def is_leaf_project(self, project_id):
+ """Checks if a project is a leaf in the hierarchy.
+
+ :param project_id: the driver will check if this project
+ is a leaf in the hierarchy.
+
+ :raises keystone.exception.ProjectNotFound: if project_id does not
+ exist
+
+ """
+ raise exception.NotImplemented()
+
+ def _validate_default_domain(self, ref):
+ """Validate that either the default domain or nothing is specified.
+
+ Also removes the domain from the ref so that LDAP doesn't have to
+ persist the attribute.
+
+ """
+ ref = ref.copy()
+ domain_id = ref.pop('domain_id', CONF.identity.default_domain_id)
+ self._validate_default_domain_id(domain_id)
+ return ref
+
+ def _validate_default_domain_id(self, domain_id):
+ """Validate that the domain ID belongs to the default domain."""
+ if domain_id != CONF.identity.default_domain_id:
+ raise exception.DomainNotFound(domain_id=domain_id)
+
+
+class ResourceDriverV8(ResourceDriverBase):
+ """Removed or redefined methods from V8.
+
+ Move the abstract methods of any methods removed or modified in later
+ versions of the driver from ResourceDriverBase to here. We maintain this
+ so that legacy drivers, which will be a subclass of ResourceDriverV8, can
+ still reference them.
+
+ """
+
+ @abc.abstractmethod
+ def create_project(self, tenant_id, tenant):
+ """Creates a new project.
+
+ :param tenant_id: This parameter can be ignored.
+ :param dict tenant: The new project
+
+ Project schema::
+
+ type: object
+ properties:
+ id:
+ type: string
+ name:
+ type: string
+ domain_id:
+ type: string
+ description:
+ type: string
+ enabled:
+ type: boolean
+ parent_id:
+ type: string
+ is_domain:
+ type: boolean
+ required: [id, name, domain_id]
+ additionalProperties: true
+
+ If project doesn't match the schema the behavior is undefined.
+
+ The driver can impose requirements such as the maximum length of a
+ field. If these requirements are not met the behavior is undefined.
+
+ :raises keystone.exception.Conflict: if the project id already exists
+ or the name already exists for the domain_id.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
@abc.abstractmethod
def get_project_by_name(self, tenant_name, domain_id):
"""Get a tenant by name.
@@ -558,6 +1129,21 @@ class ResourceDriverV8(object):
"""
raise exception.NotImplemented() # pragma: no cover
+ # Domain management functions for backends that only allow a single
+ # domain. Although we no longer use this, a custom legacy driver might
+ # have made use of it, so keep it here in case.
+ def _set_default_domain(self, ref):
+ """If the domain ID has not been set, set it to the default."""
+ if isinstance(ref, dict):
+ if 'domain_id' not in ref:
+ ref = ref.copy()
+ ref['domain_id'] = CONF.identity.default_domain_id
+ return ref
+ elif isinstance(ref, list):
+ return [self._set_default_domain(x) for x in ref]
+ else:
+ raise ValueError(_('Expected dict or list: %s') % type(ref))
+
# domain crud
@abc.abstractmethod
def create_domain(self, domain_id, domain):
@@ -635,182 +1221,288 @@ class ResourceDriverV8(object):
"""
raise exception.NotImplemented() # pragma: no cover
- # project crud
- @abc.abstractmethod
- def create_project(self, project_id, project):
- """Creates a new project.
- :raises keystone.exception.Conflict: if project_id or project name
- already exists
+class ResourceDriverV9(ResourceDriverBase):
+ """New or redefined methods from V8.
- """
- raise exception.NotImplemented() # pragma: no cover
+ Add any new V9 abstract methods (or those with modified signatures) to
+ this class.
- @abc.abstractmethod
- def list_projects(self, hints):
- """List projects in the system.
+ """
- :param hints: filter hints which the driver should
- implement if at all possible.
+ @abc.abstractmethod
+ def create_project(self, project_id, project):
+ """Creates a new project.
- :returns: a list of project_refs or an empty list.
+ :param project_id: This parameter can be ignored.
+ :param dict project: The new project
+
+ Project schema::
+
+ type: object
+ properties:
+ id:
+ type: string
+ name:
+ type: string
+ domain_id:
+ type: [string, null]
+ description:
+ type: string
+ enabled:
+ type: boolean
+ parent_id:
+ type: string
+ is_domain:
+ type: boolean
+ required: [id, name, domain_id]
+ additionalProperties: true
+
+ If the project doesn't match the schema the behavior is undefined.
+
+ The driver can impose requirements such as the maximum length of a
+ field. If these requirements are not met the behavior is undefined.
+
+ :raises keystone.exception.Conflict: if the project id already exists
+ or the name already exists for the domain_id.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def list_projects_from_ids(self, project_ids):
- """List projects for the provided list of ids.
-
- :param project_ids: list of ids
-
- :returns: a list of project_refs.
+ def get_project_by_name(self, project_name, domain_id):
+ """Get a project by name.
- This method is used internally by the assignment manager to bulk read
- a set of projects given their ids.
+ :returns: project_ref
+ :raises keystone.exception.ProjectNotFound: if a project with the
+ project_name does not exist within the domain
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def list_project_ids_from_domain_ids(self, domain_ids):
- """List project ids for the provided list of domain ids.
-
- :param domain_ids: list of domain ids
-
- :returns: a list of project ids owned by the specified domain ids.
-
- This method is used internally by the assignment manager to bulk read
- a set of project ids given a list of domain ids.
-
+ def delete_projects_from_ids(self, project_ids):
+ """Deletes a given list of projects.
+
+ Deletes a list of projects. Ensures no project on the list exists
+ after it is successfully called. If an empty list is provided,
+ the it is silently ignored. In addition, if a project ID in the list
+ of project_ids is not found in the backend, no exception is raised,
+ but a message is logged.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def list_projects_in_domain(self, domain_id):
- """List projects in the domain.
+ def list_projects_acting_as_domain(self, hints):
+ """List all projects acting as domains.
- :param domain_id: the driver MUST only return projects
- within this domain.
+ :param hints: filter hints which the driver should
+ implement if at all possible.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
- @abc.abstractmethod
- def get_project(self, project_id):
- """Get a project by ID.
- :returns: project_ref
- :raises keystone.exception.ProjectNotFound: if project_id does not
- exist
+class V9ResourceWrapperForV8Driver(ResourceDriverV9):
+ """Wrapper class to supported a V8 legacy driver.
- """
- raise exception.NotImplemented() # pragma: no cover
+ In order to support legacy drivers without having to make the manager code
+ driver-version aware, we wrap legacy drivers so that they look like the
+ latest version. For the various changes made in a new driver, here are the
+ actions needed in this wrapper:
- @abc.abstractmethod
- def update_project(self, project_id, project):
- """Updates an existing project.
+ Method removed from new driver - remove the call-through method from this
+ class, since the manager will no longer be
+ calling it.
+ Method signature (or meaning) changed - wrap the old method in a new
+ signature here, and munge the input
+ and output parameters accordingly.
+ New method added to new driver - add a method to implement the new
+ functionality here if possible. If that is
+ not possible, then return NotImplemented,
+ since we do not guarantee to support new
+ functionality with legacy drivers.
- :raises keystone.exception.ProjectNotFound: if project_id does not
- exist
- :raises keystone.exception.Conflict: if project name already exists
+ This wrapper contains the following support for newer manager code:
- """
- raise exception.NotImplemented() # pragma: no cover
+ - The current manager code expects domains to be represented as projects
+ acting as domains, something that may not be possible in a legacy driver.
+ Hence the wrapper will map any calls for projects acting as a domain back
+ onto the driver domain methods. The caveat for this, is that this assumes
+ that there can not be a clash between a project_id and a domain_id, in
+ which case it may not be able to locate the correct entry.
- @abc.abstractmethod
- def delete_project(self, project_id):
- """Deletes an existing project.
+ """
- :raises keystone.exception.ProjectNotFound: if project_id does not
- exist
+ @versionutils.deprecated(
+ as_of=versionutils.deprecated.MITAKA,
+ what='keystone.resource.ResourceDriverV8',
+ in_favor_of='keystone.resource.ResourceDriverV9',
+ remove_in=+2)
+ def __init__(self, wrapped_driver):
+ self.driver = wrapped_driver
+ def _get_domain_from_project(self, project_ref):
+ """Creates a domain ref from a project ref.
+
+ Based on the provided project ref (or partial ref), creates a
+ domain ref, so that the result can be passed to the driver
+ domain methods.
"""
- raise exception.NotImplemented() # pragma: no cover
+ domain_ref = project_ref.copy()
+ for k in ['parent_id', 'domain_id', 'is_domain']:
+ domain_ref.pop(k, None)
+ return domain_ref
- @abc.abstractmethod
- def list_project_parents(self, project_id):
- """List all parents from a project by its ID.
+ def get_project_by_name(self, project_name, domain_id):
+ if domain_id is None:
+ try:
+ domain_ref = self.driver.get_domain_by_name(project_name)
+ return _get_project_from_domain(domain_ref)
+ except exception.DomainNotFound:
+ raise exception.ProjectNotFound(project_id=project_name)
+ else:
+ return self.driver.get_project_by_name(project_name, domain_id)
- :param project_id: the driver will list the parents of this
- project.
+ def create_project(self, project_id, project):
+ if project['is_domain']:
+ new_domain = self._get_domain_from_project(project)
+ domain_ref = self.driver.create_domain(project_id, new_domain)
+ return _get_project_from_domain(domain_ref)
+ else:
+ return self.driver.create_project(project_id, project)
- :returns: a list of project_refs or an empty list.
- :raises keystone.exception.ProjectNotFound: if project_id does not
- exist
+ def list_projects(self, hints):
+ """List projects and/or domains.
- """
- raise exception.NotImplemented()
+ We use the hints filter to determine whether we are listing projects,
+ domains or both.
- @abc.abstractmethod
- def list_projects_in_subtree(self, project_id):
- """List all projects in the subtree below the hierarchy of the
- given project.
+ If the filter includes domain_id==None, then we should only list
+ domains (convert to a project acting as a domain) since regular
+ projcets always have a non-None value for domain_id.
- :param project_id: the driver will get the subtree under
- this project.
+ Likewise, if the filter includes domain_id==<non-None value>, then we
+ should only list projects.
- :returns: a list of project_refs or an empty list
- :raises keystone.exception.ProjectNotFound: if project_id does not
- exist
+ If there is no domain_id filter, then we need to do a combained listing
+ of domains and projects, converting domains to projects acting as a
+ domain.
"""
- raise exception.NotImplemented()
+ domain_listing_filter = None
+ for f in hints.filters:
+ if (f['name'] == 'domain_id'):
+ domain_listing_filter = f
+
+ if domain_listing_filter is not None:
+ if domain_listing_filter['value'] is not None:
+ proj_list = self.driver.list_projects(hints)
+ else:
+ domains = self.driver.list_domains(hints)
+ proj_list = [_get_project_from_domain(p) for p in domains]
+ hints.filters.remove(domain_listing_filter)
+ return proj_list
+ else:
+ # No domain_id filter, so combine domains and projects. Although
+ # we hand any remaining filters into each driver, since each filter
+ # might need to be carried out more than once, we use copies of the
+ # filters, allowing the original filters to be passed back up to
+ # controller level where a final filter will occur.
+ local_hints = copy.deepcopy(hints)
+ proj_list = self.driver.list_projects(local_hints)
+ local_hints = copy.deepcopy(hints)
+ domains = self.driver.list_domains(local_hints)
+ for domain in domains:
+ proj_list.append(_get_project_from_domain(domain))
+ return proj_list
- @abc.abstractmethod
- def is_leaf_project(self, project_id):
- """Checks if a project is a leaf in the hierarchy.
+ def list_projects_from_ids(self, project_ids):
+ return [self.get_project(id) for id in project_ids]
- :param project_id: the driver will check if this project
- is a leaf in the hierarchy.
+ def list_project_ids_from_domain_ids(self, domain_ids):
+ return self.driver.list_project_ids_from_domain_ids(domain_ids)
- :raises keystone.exception.ProjectNotFound: if project_id does not
- exist
+ def list_projects_in_domain(self, domain_id):
+ return self.driver.list_projects_in_domain(domain_id)
- """
- raise exception.NotImplemented()
+ def get_project(self, project_id):
+ try:
+ domain_ref = self.driver.get_domain(project_id)
+ return _get_project_from_domain(domain_ref)
+ except exception.DomainNotFound:
+ return self.driver.get_project(project_id)
- # Domain management functions for backends that only allow a single
- # domain. Currently, this is only LDAP, but might be used by other
- # backends in the future.
- def _set_default_domain(self, ref):
- """If the domain ID has not been set, set it to the default."""
- if isinstance(ref, dict):
- if 'domain_id' not in ref:
- ref = ref.copy()
- ref['domain_id'] = CONF.identity.default_domain_id
- return ref
- elif isinstance(ref, list):
- return [self._set_default_domain(x) for x in ref]
+ def _is_domain(self, project_id):
+ ref = self.get_project(project_id)
+ return ref.get('is_domain', False)
+
+ def update_project(self, project_id, project):
+ if self._is_domain(project_id):
+ update_domain = self._get_domain_from_project(project)
+ domain_ref = self.driver.update_domain(project_id, update_domain)
+ return _get_project_from_domain(domain_ref)
else:
- raise ValueError(_('Expected dict or list: %s') % type(ref))
+ return self.driver.update_project(project_id, project)
- def _validate_default_domain(self, ref):
- """Validate that either the default domain or nothing is specified.
+ def delete_project(self, project_id):
+ if self._is_domain(project_id):
+ try:
+ self.driver.delete_domain(project_id)
+ except exception.DomainNotFound:
+ raise exception.ProjectNotFound(project_id=project_id)
+ else:
+ self.driver.delete_project(project_id)
- Also removes the domain from the ref so that LDAP doesn't have to
- persist the attribute.
+ def delete_projects_from_ids(self, project_ids):
+ raise exception.NotImplemented() # pragma: no cover
- """
- ref = ref.copy()
- domain_id = ref.pop('domain_id', CONF.identity.default_domain_id)
- self._validate_default_domain_id(domain_id)
- return ref
+ def list_project_parents(self, project_id):
+ """List a project's ancestors.
- def _validate_default_domain_id(self, domain_id):
- """Validate that the domain ID specified belongs to the default domain.
+ The current manager expects the ancestor tree to end with the project
+ acting as the domain (since that's now the top of the tree), but a
+ legacy driver will not have that top project in their projects table,
+ since it's still in the domain table. Hence we lift the algorithm for
+ traversing up the tree from the driver to here, so that our version of
+ get_project() is called, which will fetch the "project" from the right
+ table.
"""
- if domain_id != CONF.identity.default_domain_id:
- raise exception.DomainNotFound(domain_id=domain_id)
+ project = self.get_project(project_id)
+ parents = []
+ examined = set()
+ while project.get('parent_id') is not None:
+ if project['id'] in examined:
+ msg = _LE('Circular reference or a repeated '
+ 'entry found in projects hierarchy - '
+ '%(project_id)s.')
+ LOG.error(msg, {'project_id': project['id']})
+ return
+
+ examined.add(project['id'])
+ parent_project = self.get_project(project['parent_id'])
+ parents.append(parent_project)
+ project = parent_project
+ return parents
+
+ def list_projects_in_subtree(self, project_id):
+ return self.driver.list_projects_in_subtree(project_id)
+
+ def is_leaf_project(self, project_id):
+ return self.driver.is_leaf_project(project_id)
+
+ def list_projects_acting_as_domain(self, hints):
+ refs = self.driver.list_domains(hints)
+ return [_get_project_from_domain(p) for p in refs]
Driver = manager.create_legacy_driver(ResourceDriverV8)
-MEMOIZE_CONFIG = cache.get_memoization_decorator(section='domain_config')
+MEMOIZE_CONFIG = cache.get_memoization_decorator(group='domain_config')
@dependency.provider('domain_config_api')
@@ -829,15 +1521,16 @@ class DomainConfigManager(manager.Manager):
driver_namespace = 'keystone.resource.domain_config'
whitelisted_options = {
- 'identity': ['driver'],
+ 'identity': ['driver', 'list_limit'],
'ldap': [
'url', 'user', 'suffix', 'use_dumb_member', 'dumb_member',
'allow_subtree_delete', 'query_scope', 'page_size',
'alias_dereferencing', 'debug_level', 'chase_referrals',
'user_tree_dn', 'user_filter', 'user_objectclass',
'user_id_attribute', 'user_name_attribute', 'user_mail_attribute',
- 'user_pass_attribute', 'user_enabled_attribute',
- 'user_enabled_invert', 'user_enabled_mask', 'user_enabled_default',
+ 'user_description_attribute', 'user_pass_attribute',
+ 'user_enabled_attribute', 'user_enabled_invert',
+ 'user_enabled_mask', 'user_enabled_default',
'user_attribute_ignore', 'user_default_project_id_attribute',
'user_allow_create', 'user_allow_update', 'user_allow_delete',
'user_enabled_emulation', 'user_enabled_emulation_dn',
@@ -928,7 +1621,6 @@ class DomainConfigManager(manager.Manager):
def _config_to_list(self, config):
"""Build whitelisted and sensitive lists for use by backend drivers."""
-
whitelisted = []
sensitive = []
for group in config:
@@ -1086,7 +1778,6 @@ class DomainConfigManager(manager.Manager):
"""
def _assert_valid_update(domain_id, config, group=None, option=None):
"""Ensure the combination of config, group and option is valid."""
-
self._assert_valid_config(config)
self._assert_valid_group_and_option(group, option)
@@ -1145,7 +1836,6 @@ class DomainConfigManager(manager.Manager):
def _update_or_create(domain_id, option, sensitive):
"""Update the option, if it doesn't exist then create it."""
-
try:
self.create_config_option(
domain_id, option['group'], option['option'],
@@ -1266,7 +1956,7 @@ class DomainConfigManager(manager.Manager):
'value: %(value)s.')
if warning_msg:
- LOG.warn(warning_msg % {
+ LOG.warning(warning_msg % {
'domain': domain_id,
'group': each_whitelisted['group'],
'option': each_whitelisted['option'],
@@ -1285,6 +1975,59 @@ class DomainConfigManager(manager.Manager):
"""
return self._get_config_with_sensitive_info(domain_id)
+ def get_config_default(self, group=None, option=None):
+ """Get default config, or partial default config
+
+ :param group: an optional specific group of options
+ :param option: an optional specific option within the group
+
+ :returns: a dict of group dicts containing the default options,
+ filtered by group and option if specified
+ :raises keystone.exception.InvalidDomainConfig: when the config
+ and group/option parameters specify an option we do not
+ support (or one that is not whitelisted).
+
+ An example response::
+
+ {
+ 'ldap': {
+ 'url': 'myurl',
+ 'user_tree_dn': 'OU=myou',
+ ....},
+ 'identity': {
+ 'driver': 'ldap'}
+
+ }
+
+ """
+ def _option_dict(group, option):
+ group_attr = getattr(CONF, group)
+ if group_attr is None:
+ msg = _('Group %s not found in config') % group
+ raise exception.UnexpectedError(msg)
+ return {'group': group, 'option': option,
+ 'value': getattr(group_attr, option)}
+
+ self._assert_valid_group_and_option(group, option)
+ config_list = []
+ if group:
+ if option:
+ if option not in self.whitelisted_options[group]:
+ msg = _('Reading the default for option %(option)s in '
+ 'group %(group)s is not supported') % {
+ 'option': option, 'group': group}
+ raise exception.InvalidDomainConfig(reason=msg)
+ config_list.append(_option_dict(group, option))
+ else:
+ for each_option in self.whitelisted_options[group]:
+ config_list.append(_option_dict(group, each_option))
+ else:
+ for each_group in self.whitelisted_options:
+ for each_option in self.whitelisted_options[each_group]:
+ config_list.append(_option_dict(each_group, each_option))
+
+ return self._list_to_config(config_list, req_option=option)
+
@six.add_metaclass(abc.ABCMeta)
class DomainConfigDriverV8(object):
@@ -1394,8 +2137,8 @@ class DomainConfigDriverV8(object):
:param type: type of registration
:returns: domain_id of who is registered.
- :raises: keystone.exception.ConfigRegistrationNotFound: nobody is
- registered.
+ :raises keystone.exception.ConfigRegistrationNotFound: If nobody is
+ registered.
"""
raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/resource/routers.py b/keystone-moon/keystone/resource/routers.py
index 8ccd10aa..d58474e2 100644
--- a/keystone-moon/keystone/resource/routers.py
+++ b/keystone-moon/keystone/resource/routers.py
@@ -88,6 +88,37 @@ class Routers(wsgi.RoutersBase):
'config_option')
})
+ self._add_resource(
+ mapper, config_controller,
+ path='/domains/config/default',
+ get_action='get_domain_config_default',
+ rel=json_home.build_v3_resource_relation('domain_config_default'),
+ status=json_home.Status.EXPERIMENTAL)
+
+ self._add_resource(
+ mapper, config_controller,
+ path='/domains/config/{group}/default',
+ get_action='get_domain_config_default',
+ rel=json_home.build_v3_resource_relation(
+ 'domain_config_default_group'),
+ status=json_home.Status.EXPERIMENTAL,
+ path_vars={
+ 'group': config_group_param
+ })
+
+ self._add_resource(
+ mapper, config_controller,
+ path='/domains/config/{group}/{option}/default',
+ get_action='get_domain_config_default',
+ rel=json_home.build_v3_resource_relation(
+ 'domain_config_default_option'),
+ status=json_home.Status.EXPERIMENTAL,
+ path_vars={
+ 'group': config_group_param,
+ 'option': json_home.build_v3_parameter_relation(
+ 'config_option')
+ })
+
routers.append(
router.Router(controllers.ProjectV3(),
'projects', 'project',
diff --git a/keystone-moon/keystone/resource/schema.py b/keystone-moon/keystone/resource/schema.py
index e26a9c4a..7e2cd667 100644
--- a/keystone-moon/keystone/resource/schema.py
+++ b/keystone-moon/keystone/resource/schema.py
@@ -16,10 +16,8 @@ from keystone.common.validation import parameter_types
_project_properties = {
'description': validation.nullable(parameter_types.description),
- # NOTE(lbragstad): domain_id isn't nullable according to some backends.
- # The identity-api should be updated to be consistent with the
- # implementation.
- 'domain_id': parameter_types.id_string,
+ # NOTE(htruta): domain_id is nullable for projects acting as a domain.
+ 'domain_id': validation.nullable(parameter_types.id_string),
'enabled': parameter_types.boolean,
'is_domain': parameter_types.boolean,
'parent_id': validation.nullable(parameter_types.id_string),
@@ -43,7 +41,7 @@ project_create = {
project_update = {
'type': 'object',
'properties': _project_properties,
- # NOTE(lbragstad) Make sure at least one property is being updated
+ # NOTE(lbragstad): Make sure at least one property is being updated
'minProperties': 1,
'additionalProperties': True
}
diff --git a/keystone-moon/keystone/revoke/__init__.py b/keystone-moon/keystone/revoke/__init__.py
new file mode 100644
index 00000000..6d4ee0bc
--- /dev/null
+++ b/keystone-moon/keystone/revoke/__init__.py
@@ -0,0 +1,13 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.revoke.core import * # noqa
diff --git a/keystone-moon/keystone/revoke/backends/__init__.py b/keystone-moon/keystone/revoke/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/revoke/backends/__init__.py
diff --git a/keystone-moon/keystone/revoke/backends/sql.py b/keystone-moon/keystone/revoke/backends/sql.py
new file mode 100644
index 00000000..9f8a82db
--- /dev/null
+++ b/keystone-moon/keystone/revoke/backends/sql.py
@@ -0,0 +1,100 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import sql
+from keystone.models import revoke_model
+from keystone import revoke
+
+
+class RevocationEvent(sql.ModelBase, sql.ModelDictMixin):
+ __tablename__ = 'revocation_event'
+ attributes = revoke_model.REVOKE_KEYS
+
+ # The id field is not going to be exposed to the outside world.
+ # It is, however, necessary for SQLAlchemy.
+ id = sql.Column(sql.Integer, primary_key=True, nullable=False)
+ domain_id = sql.Column(sql.String(64))
+ project_id = sql.Column(sql.String(64))
+ user_id = sql.Column(sql.String(64))
+ role_id = sql.Column(sql.String(64))
+ trust_id = sql.Column(sql.String(64))
+ consumer_id = sql.Column(sql.String(64))
+ access_token_id = sql.Column(sql.String(64))
+ issued_before = sql.Column(sql.DateTime(), nullable=False)
+ expires_at = sql.Column(sql.DateTime())
+ revoked_at = sql.Column(sql.DateTime(), nullable=False, index=True)
+ audit_id = sql.Column(sql.String(32))
+ audit_chain_id = sql.Column(sql.String(32))
+
+
+class Revoke(revoke.RevokeDriverV8):
+ def _flush_batch_size(self, dialect):
+ batch_size = 0
+ if dialect == 'ibm_db_sa':
+ # This functionality is limited to DB2, because
+ # it is necessary to prevent the transaction log
+ # from filling up, whereas at least some of the
+ # other supported databases do not support update
+ # queries with LIMIT subqueries nor do they appear
+ # to require the use of such queries when deleting
+ # large numbers of records at once.
+ batch_size = 100
+ # Limit of 100 is known to not fill a transaction log
+ # of default maximum size while not significantly
+ # impacting the performance of large token purges on
+ # systems where the maximum transaction log size has
+ # been increased beyond the default.
+ return batch_size
+
+ def _prune_expired_events(self):
+ oldest = revoke.revoked_before_cutoff_time()
+
+ with sql.session_for_write() as session:
+ dialect = session.bind.dialect.name
+ batch_size = self._flush_batch_size(dialect)
+ if batch_size > 0:
+ query = session.query(RevocationEvent.id)
+ query = query.filter(RevocationEvent.revoked_at < oldest)
+ query = query.limit(batch_size).subquery()
+ delete_query = (session.query(RevocationEvent).
+ filter(RevocationEvent.id.in_(query)))
+ while True:
+ rowcount = delete_query.delete(synchronize_session=False)
+ if rowcount == 0:
+ break
+ else:
+ query = session.query(RevocationEvent)
+ query = query.filter(RevocationEvent.revoked_at < oldest)
+ query.delete(synchronize_session=False)
+
+ session.flush()
+
+ def list_events(self, last_fetch=None):
+ with sql.session_for_read() as session:
+ query = session.query(RevocationEvent).order_by(
+ RevocationEvent.revoked_at)
+
+ if last_fetch:
+ query = query.filter(RevocationEvent.revoked_at > last_fetch)
+
+ events = [revoke_model.RevokeEvent(**e.to_dict()) for e in query]
+
+ return events
+
+ def revoke(self, event):
+ kwargs = dict()
+ for attr in revoke_model.REVOKE_KEYS:
+ kwargs[attr] = getattr(event, attr)
+ record = RevocationEvent(**kwargs)
+ with sql.session_for_write() as session:
+ session.add(record)
+ self._prune_expired_events()
diff --git a/keystone-moon/keystone/revoke/controllers.py b/keystone-moon/keystone/revoke/controllers.py
new file mode 100644
index 00000000..40151bae
--- /dev/null
+++ b/keystone-moon/keystone/revoke/controllers.py
@@ -0,0 +1,44 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import timeutils
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone import exception
+from keystone.i18n import _
+
+
+@dependency.requires('revoke_api')
+class RevokeController(controller.V3Controller):
+ @controller.protected()
+ def list_revoke_events(self, context):
+ since = context['query_string'].get('since')
+ last_fetch = None
+ if since:
+ try:
+ last_fetch = timeutils.normalize_time(
+ timeutils.parse_isotime(since))
+ except ValueError:
+ raise exception.ValidationError(
+ message=_('invalid date format %s') % since)
+ events = self.revoke_api.list_events(last_fetch=last_fetch)
+ # Build the links by hand as the standard controller calls require ids
+ response = {'events': [event.to_dict() for event in events],
+ 'links': {
+ 'next': None,
+ 'self': RevokeController.base_url(
+ context,
+ path=context['path']),
+ 'previous': None}
+ }
+ return response
diff --git a/keystone-moon/keystone/revoke/core.py b/keystone-moon/keystone/revoke/core.py
new file mode 100644
index 00000000..64d2e998
--- /dev/null
+++ b/keystone-moon/keystone/revoke/core.py
@@ -0,0 +1,261 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Main entry point into the Revoke service."""
+
+import abc
+import datetime
+
+from oslo_config import cfg
+from oslo_log import versionutils
+from oslo_utils import timeutils
+import six
+
+from keystone.common import cache
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import manager
+from keystone import exception
+from keystone.i18n import _
+from keystone.models import revoke_model
+from keystone import notifications
+
+
+CONF = cfg.CONF
+
+
+EXTENSION_DATA = {
+ 'name': 'OpenStack Revoke API',
+ 'namespace': 'http://docs.openstack.org/identity/api/ext/'
+ 'OS-REVOKE/v1.0',
+ 'alias': 'OS-REVOKE',
+ 'updated': '2014-02-24T20:51:0-00:00',
+ 'description': 'OpenStack revoked token reporting mechanism.',
+ 'links': [
+ {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'http://specs.openstack.org/openstack/keystone-specs/api/'
+ 'v3/identity-api-v3-os-revoke-ext.html',
+ }
+ ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+MEMOIZE = cache.get_memoization_decorator(group='revoke')
+
+
+def revoked_before_cutoff_time():
+ expire_delta = datetime.timedelta(
+ seconds=CONF.token.expiration + CONF.revoke.expiration_buffer)
+ oldest = timeutils.utcnow() - expire_delta
+ return oldest
+
+
+@dependency.provider('revoke_api')
+class Manager(manager.Manager):
+ """Default pivot point for the Revoke backend.
+
+ Performs common logic for recording revocations.
+
+ See :mod:`keystone.common.manager.Manager` for more details on
+ how this dynamically calls the backend.
+
+ """
+
+ driver_namespace = 'keystone.revoke'
+
+ def __init__(self):
+ super(Manager, self).__init__(CONF.revoke.driver)
+ self._register_listeners()
+ self.model = revoke_model
+
+ def _user_callback(self, service, resource_type, operation,
+ payload):
+ self.revoke_by_user(payload['resource_info'])
+
+ def _role_callback(self, service, resource_type, operation,
+ payload):
+ self.revoke(
+ revoke_model.RevokeEvent(role_id=payload['resource_info']))
+
+ def _project_callback(self, service, resource_type, operation,
+ payload):
+ self.revoke(
+ revoke_model.RevokeEvent(project_id=payload['resource_info']))
+
+ def _domain_callback(self, service, resource_type, operation,
+ payload):
+ self.revoke(
+ revoke_model.RevokeEvent(domain_id=payload['resource_info']))
+
+ def _trust_callback(self, service, resource_type, operation,
+ payload):
+ self.revoke(
+ revoke_model.RevokeEvent(trust_id=payload['resource_info']))
+
+ def _consumer_callback(self, service, resource_type, operation,
+ payload):
+ self.revoke(
+ revoke_model.RevokeEvent(consumer_id=payload['resource_info']))
+
+ def _access_token_callback(self, service, resource_type, operation,
+ payload):
+ self.revoke(
+ revoke_model.RevokeEvent(access_token_id=payload['resource_info']))
+
+ def _role_assignment_callback(self, service, resource_type, operation,
+ payload):
+ info = payload['resource_info']
+ self.revoke_by_grant(role_id=info['role_id'], user_id=info['user_id'],
+ domain_id=info.get('domain_id'),
+ project_id=info.get('project_id'))
+
+ def _register_listeners(self):
+ callbacks = {
+ notifications.ACTIONS.deleted: [
+ ['OS-TRUST:trust', self._trust_callback],
+ ['OS-OAUTH1:consumer', self._consumer_callback],
+ ['OS-OAUTH1:access_token', self._access_token_callback],
+ ['role', self._role_callback],
+ ['user', self._user_callback],
+ ['project', self._project_callback],
+ ['role_assignment', self._role_assignment_callback]
+ ],
+ notifications.ACTIONS.disabled: [
+ ['user', self._user_callback],
+ ['project', self._project_callback],
+ ['domain', self._domain_callback],
+ ],
+ notifications.ACTIONS.internal: [
+ [notifications.INVALIDATE_USER_TOKEN_PERSISTENCE,
+ self._user_callback],
+ ]
+ }
+
+ for event, cb_info in callbacks.items():
+ for resource_type, callback_fns in cb_info:
+ notifications.register_event_callback(event, resource_type,
+ callback_fns)
+
+ def revoke_by_user(self, user_id):
+ return self.revoke(revoke_model.RevokeEvent(user_id=user_id))
+
+ def _assert_not_domain_and_project_scoped(self, domain_id=None,
+ project_id=None):
+ if domain_id is not None and project_id is not None:
+ msg = _('The revoke call must not have both domain_id and '
+ 'project_id. This is a bug in the Keystone server. The '
+ 'current request is aborted.')
+ raise exception.UnexpectedError(exception=msg)
+
+ @versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
+ remove_in=0)
+ def revoke_by_expiration(self, user_id, expires_at,
+ domain_id=None, project_id=None):
+
+ self._assert_not_domain_and_project_scoped(domain_id=domain_id,
+ project_id=project_id)
+
+ self.revoke(
+ revoke_model.RevokeEvent(user_id=user_id,
+ expires_at=expires_at,
+ domain_id=domain_id,
+ project_id=project_id))
+
+ def revoke_by_audit_id(self, audit_id):
+ self.revoke(revoke_model.RevokeEvent(audit_id=audit_id))
+
+ def revoke_by_audit_chain_id(self, audit_chain_id, project_id=None,
+ domain_id=None):
+
+ self._assert_not_domain_and_project_scoped(domain_id=domain_id,
+ project_id=project_id)
+
+ self.revoke(revoke_model.RevokeEvent(audit_chain_id=audit_chain_id,
+ domain_id=domain_id,
+ project_id=project_id))
+
+ def revoke_by_grant(self, role_id, user_id=None,
+ domain_id=None, project_id=None):
+ self.revoke(
+ revoke_model.RevokeEvent(user_id=user_id,
+ role_id=role_id,
+ domain_id=domain_id,
+ project_id=project_id))
+
+ def revoke_by_user_and_project(self, user_id, project_id):
+ self.revoke(
+ revoke_model.RevokeEvent(project_id=project_id, user_id=user_id))
+
+ def revoke_by_project_role_assignment(self, project_id, role_id):
+ self.revoke(revoke_model.RevokeEvent(project_id=project_id,
+ role_id=role_id))
+
+ def revoke_by_domain_role_assignment(self, domain_id, role_id):
+ self.revoke(revoke_model.RevokeEvent(domain_id=domain_id,
+ role_id=role_id))
+
+ @MEMOIZE
+ def _get_revoke_tree(self):
+ events = self.driver.list_events()
+ revoke_tree = revoke_model.RevokeTree(revoke_events=events)
+
+ return revoke_tree
+
+ def check_token(self, token_values):
+ """Checks the values from a token against the revocation list
+
+ :param token_values: dictionary of values from a token, normalized for
+ differences between v2 and v3. The checked values
+ are a subset of the attributes of model.TokenEvent
+
+ :raises keystone.exception.TokenNotFound: If the token is invalid.
+
+ """
+ if self._get_revoke_tree().is_revoked(token_values):
+ raise exception.TokenNotFound(_('Failed to validate token'))
+
+ def revoke(self, event):
+ self.driver.revoke(event)
+ self._get_revoke_tree.invalidate(self)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class RevokeDriverV8(object):
+ """Interface for recording and reporting revocation events."""
+
+ @abc.abstractmethod
+ def list_events(self, last_fetch=None):
+ """return the revocation events, as a list of objects
+
+ :param last_fetch: Time of last fetch. Return all events newer.
+ :returns: A list of keystone.revoke.model.RevokeEvent
+ newer than `last_fetch.`
+ If no last_fetch is specified, returns all events
+ for tokens issued after the expiration cutoff.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+ @abc.abstractmethod
+ def revoke(self, event):
+ """register a revocation event
+
+ :param event: An instance of
+ keystone.revoke.model.RevocationEvent
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+
+
+Driver = manager.create_legacy_driver(RevokeDriverV8)
diff --git a/keystone-moon/keystone/revoke/model.py b/keystone-moon/keystone/revoke/model.py
new file mode 100644
index 00000000..28a8d07f
--- /dev/null
+++ b/keystone-moon/keystone/revoke/model.py
@@ -0,0 +1,13 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.models.revoke_model import * # noqa
diff --git a/keystone-moon/keystone/revoke/routers.py b/keystone-moon/keystone/revoke/routers.py
new file mode 100644
index 00000000..aab78493
--- /dev/null
+++ b/keystone-moon/keystone/revoke/routers.py
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone.revoke import controllers
+
+
+class Routers(wsgi.RoutersBase):
+
+ PATH_PREFIX = '/OS-REVOKE'
+
+ def append_v3_routers(self, mapper, routers):
+ revoke_controller = controllers.RevokeController()
+ self._add_resource(
+ mapper, revoke_controller,
+ path=self.PATH_PREFIX + '/events',
+ get_action='list_revoke_events',
+ rel=json_home.build_v3_extension_resource_relation(
+ 'OS-REVOKE', '1.0', 'events'))
diff --git a/keystone-moon/keystone/server/backends.py b/keystone-moon/keystone/server/backends.py
index ebe00a81..a518e777 100644
--- a/keystone-moon/keystone/server/backends.py
+++ b/keystone-moon/keystone/server/backends.py
@@ -14,15 +14,14 @@ from keystone import assignment
from keystone import auth
from keystone import catalog
from keystone.common import cache
-from keystone.contrib import endpoint_filter
-from keystone.contrib import federation
-from keystone.contrib import oauth1
-from keystone.contrib import revoke
from keystone import credential
from keystone import endpoint_policy
+from keystone import federation
from keystone import identity
+from keystone import oauth1
from keystone import policy
from keystone import resource
+from keystone import revoke
from keystone import token
from keystone import trust
@@ -30,12 +29,23 @@ from keystone import trust
def load_backends():
# Configure and build the cache
- cache.configure_cache_region(cache.REGION)
+ cache.configure_cache()
+ cache.configure_cache(region=catalog.COMPUTED_CATALOG_REGION)
+ cache.apply_invalidation_patch(
+ region=catalog.COMPUTED_CATALOG_REGION,
+ region_name=catalog.COMPUTED_CATALOG_REGION.name)
+ cache.configure_cache(region=assignment.COMPUTED_ASSIGNMENTS_REGION)
+ cache.apply_invalidation_patch(
+ region=assignment.COMPUTED_ASSIGNMENTS_REGION,
+ region_name=assignment.COMPUTED_ASSIGNMENTS_REGION.name)
# Ensure that the identity driver is created before the assignment manager
# and that the assignment driver is created before the resource manager.
# The default resource driver depends on assignment, which in turn
# depends on identity - hence we need to ensure the chain is available.
+ # TODO(morganfainberg): In "O" release move _IDENTITY_API to be directly
+ # instantiated in the DRIVERS dict once assignment driver being selected
+ # based upon [identity]/driver is removed.
_IDENTITY_API = identity.Manager()
_ASSIGNMENT_API = assignment.Manager()
@@ -44,12 +54,12 @@ def load_backends():
catalog_api=catalog.Manager(),
credential_api=credential.Manager(),
domain_config_api=resource.DomainConfigManager(),
- endpoint_filter_api=endpoint_filter.Manager(),
endpoint_policy_api=endpoint_policy.Manager(),
federation_api=federation.Manager(),
id_generator_api=identity.generator.Manager(),
id_mapping_api=identity.MappingManager(),
identity_api=_IDENTITY_API,
+ shadow_users_api=identity.ShadowUsersManager(),
oauth_api=oauth1.Manager(),
policy_api=policy.Manager(),
resource_api=resource.Manager(),
diff --git a/keystone-moon/keystone/server/common.py b/keystone-moon/keystone/server/common.py
index 7bc5958e..4b1ee469 100644
--- a/keystone-moon/keystone/server/common.py
+++ b/keystone-moon/keystone/server/common.py
@@ -15,9 +15,9 @@
from oslo_config import cfg
from oslo_log import log
+from keystone.common import config
from keystone.common import dependency
from keystone.common import sql
-from keystone import config
from keystone.i18n import _LW
from keystone.server import backends
@@ -30,7 +30,7 @@ def configure(version=None, config_files=None,
pre_setup_logging_fn=lambda: None):
config.configure()
sql.initialize()
- config.set_default_for_default_log_levels()
+ config.set_config_defaults()
CONF(project='keystone', version=version,
default_config_files=config_files)
@@ -38,9 +38,9 @@ def configure(version=None, config_files=None,
pre_setup_logging_fn()
config.setup_logging()
- if CONF.debug:
- LOG.warn(_LW(
- 'debug is enabled so responses may include sensitive '
+ if CONF.insecure_debug:
+ LOG.warning(_LW(
+ 'insecure_debug is enabled so responses may include sensitive '
'information.'))
diff --git a/keystone-moon/keystone/server/eventlet.py b/keystone-moon/keystone/server/eventlet.py
index 243f0234..e688baed 100644
--- a/keystone-moon/keystone/server/eventlet.py
+++ b/keystone-moon/keystone/server/eventlet.py
@@ -32,12 +32,12 @@ import pbr.version
oslo_i18n.enable_lazy()
+from keystone.common import config
from keystone.common import environment
from keystone.common import utils
-from keystone import config
from keystone.i18n import _
from keystone.server import common
-from keystone import service as keystone_service
+from keystone.version import service as keystone_service
CONF = cfg.CONF
diff --git a/keystone-moon/keystone/server/wsgi.py b/keystone-moon/keystone/server/wsgi.py
index ae24c48e..a62a8460 100644
--- a/keystone-moon/keystone/server/wsgi.py
+++ b/keystone-moon/keystone/server/wsgi.py
@@ -16,7 +16,6 @@ import logging
from oslo_config import cfg
import oslo_i18n
-import oslo_middleware.cors as cors
# NOTE(dstanek): i18n.enable_lazy() must be called before
@@ -26,28 +25,16 @@ import oslo_middleware.cors as cors
oslo_i18n.enable_lazy()
+from keystone.common import config
from keystone.common import environment
-from keystone import config
-import keystone.middleware.core as middleware_core
from keystone.server import common
-from keystone import service as keystone_service
+from keystone.version import service as keystone_service
CONF = cfg.CONF
-KEYSTONE_HEADERS = [
- middleware_core.AUTH_TOKEN_HEADER,
- middleware_core.SUBJECT_TOKEN_HEADER,
- 'X-Project-Id',
- 'X-Project-Name',
- 'X-Project-Domain-Id',
- 'X-Project-Domain-Name',
- 'X-Domain-Id',
- 'X-Domain-Name'
-]
-
-def initialize_application(name):
+def initialize_application(name, post_log_configured_function=lambda: None):
common.configure()
# Log the options used when starting if we're in debug mode...
@@ -56,21 +43,14 @@ def initialize_application(name):
environment.use_stdlib()
+ post_log_configured_function()
+
def loadapp():
return keystone_service.loadapp(
'config:%s' % config.find_paste_config(), name)
_unused, application = common.setup_backends(
startup_application_fn=loadapp)
-
- # Create a CORS wrapper, and attach keystone-specific defaults that must be
- # included in all CORS responses
- application = cors.CORS(application, CONF)
- application.set_latent(
- allow_headers=KEYSTONE_HEADERS,
- allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
- expose_headers=KEYSTONE_HEADERS
- )
return application
diff --git a/keystone-moon/keystone/service.py b/keystone-moon/keystone/service.py
index 35b548fa..20869731 100644
--- a/keystone-moon/keystone/service.py
+++ b/keystone-moon/keystone/service.py
@@ -12,120 +12,50 @@
# License for the specific language governing permissions and limitations
# under the License.
-import functools
-import sys
-
-from oslo_config import cfg
-from oslo_log import log
-from paste import deploy
-import routes
-
-from keystone import assignment
-from keystone import auth
-from keystone import catalog
-from keystone.common import wsgi
-from keystone import controllers
-from keystone import credential
-from keystone import endpoint_policy
-from keystone import identity
-from keystone import policy
-from keystone import resource
-from keystone import routers
-from keystone import token
-from keystone import trust
-
-
-CONF = cfg.CONF
-LOG = log.getLogger(__name__)
-
-
-def loadapp(conf, name):
- # NOTE(blk-u): Save the application being loaded in the controllers module.
- # This is similar to how public_app_factory() and v3_app_factory()
- # register the version with the controllers module.
- controllers.latest_app = deploy.loadapp(conf, name=name)
- return controllers.latest_app
-
-
-def fail_gracefully(f):
- """Logs exceptions and aborts."""
- @functools.wraps(f)
- def wrapper(*args, **kw):
- try:
- return f(*args, **kw)
- except Exception as e:
- LOG.debug(e, exc_info=True)
-
- # exception message is printed to all logs
- LOG.critical(e)
- sys.exit(1)
-
- return wrapper
-
-
-@fail_gracefully
+from oslo_log import versionutils
+import six
+
+from keystone.version import service
+
+
+def deprecated_to_version(f):
+ """Specialized deprecation wrapper for service module.
+
+ This wraps the standard deprecation wrapper and fills in the method
+ names automatically.
+
+ """
+ @six.wraps(f)
+ def wrapper(*args, **kwargs):
+ x = versionutils.deprecated(
+ what='keystone.service.' + f.__name__ + '()',
+ as_of=versionutils.deprecated.MITAKA,
+ remove_in=+2,
+ in_favor_of='keystone.version.service.' + f.__name__ + '()')
+ return x(f)
+ return wrapper()
+
+
+@deprecated_to_version
def public_app_factory(global_conf, **local_conf):
- controllers.register_version('v2.0')
- return wsgi.ComposingRouter(routes.Mapper(),
- [assignment.routers.Public(),
- token.routers.Router(),
- routers.VersionV2('public'),
- routers.Extension(False)])
+ return service.public_app_factory(global_conf, **local_conf)
-@fail_gracefully
+@deprecated_to_version
def admin_app_factory(global_conf, **local_conf):
- controllers.register_version('v2.0')
- return wsgi.ComposingRouter(routes.Mapper(),
- [identity.routers.Admin(),
- assignment.routers.Admin(),
- token.routers.Router(),
- resource.routers.Admin(),
- routers.VersionV2('admin'),
- routers.Extension()])
+ return service.admin_app_factory(global_conf, **local_conf)
-@fail_gracefully
+@deprecated_to_version
def public_version_app_factory(global_conf, **local_conf):
- return wsgi.ComposingRouter(routes.Mapper(),
- [routers.Versions('public')])
+ return service.public_version_app_factory(global_conf, **local_conf)
-@fail_gracefully
+@deprecated_to_version
def admin_version_app_factory(global_conf, **local_conf):
- return wsgi.ComposingRouter(routes.Mapper(),
- [routers.Versions('admin')])
+ return service.admin_app_factory(global_conf, **local_conf)
-@fail_gracefully
+@deprecated_to_version
def v3_app_factory(global_conf, **local_conf):
- controllers.register_version('v3')
- mapper = routes.Mapper()
- sub_routers = []
- _routers = []
-
- # NOTE(dstanek): Routers should be ordered by their frequency of use in
- # a live system. This is due to the routes implementation. The most
- # frequently used routers should appear first.
- router_modules = [auth,
- assignment,
- catalog,
- credential,
- identity,
- policy,
- resource]
-
- if CONF.trust.enabled:
- router_modules.append(trust)
-
- if CONF.endpoint_policy.enabled:
- router_modules.append(endpoint_policy)
-
- for module in router_modules:
- routers_instance = module.routers.Routers()
- _routers.append(routers_instance)
- routers_instance.append_v3_routers(mapper, sub_routers)
-
- # Add in the v3 version api
- sub_routers.append(routers.VersionV3('public', _routers))
- return wsgi.ComposingRouter(mapper, sub_routers)
+ return service.v3_app_factory(global_conf, **local_conf)
diff --git a/keystone-moon/keystone/tests/common/__init__.py b/keystone-moon/keystone/tests/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/common/__init__.py
diff --git a/keystone-moon/keystone/tests/common/auth.py b/keystone-moon/keystone/tests/common/auth.py
new file mode 100644
index 00000000..547418cf
--- /dev/null
+++ b/keystone-moon/keystone/tests/common/auth.py
@@ -0,0 +1,109 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class AuthTestMixin(object):
+ """To hold auth building helper functions."""
+
+ def _build_auth_scope(self, project_id=None, project_name=None,
+ project_domain_id=None, project_domain_name=None,
+ domain_id=None, domain_name=None, trust_id=None,
+ unscoped=None):
+ scope_data = {}
+ if unscoped:
+ scope_data['unscoped'] = {}
+ if project_id or project_name:
+ scope_data['project'] = {}
+ if project_id:
+ scope_data['project']['id'] = project_id
+ else:
+ scope_data['project']['name'] = project_name
+ if project_domain_id or project_domain_name:
+ project_domain_json = {}
+ if project_domain_id:
+ project_domain_json['id'] = project_domain_id
+ else:
+ project_domain_json['name'] = project_domain_name
+ scope_data['project']['domain'] = project_domain_json
+ if domain_id or domain_name:
+ scope_data['domain'] = {}
+ if domain_id:
+ scope_data['domain']['id'] = domain_id
+ else:
+ scope_data['domain']['name'] = domain_name
+ if trust_id:
+ scope_data['OS-TRUST:trust'] = {}
+ scope_data['OS-TRUST:trust']['id'] = trust_id
+ return scope_data
+
+ def _build_auth(self, user_id=None, username=None, user_domain_id=None,
+ user_domain_name=None, **kwargs):
+
+ # NOTE(dstanek): just to ensure sanity in the tests
+ self.assertEqual(1, len(kwargs),
+ message='_build_auth requires 1 (and only 1) '
+ 'secret type and value')
+
+ secret_type, secret_value = list(kwargs.items())[0]
+
+ # NOTE(dstanek): just to ensure sanity in the tests
+ self.assertIn(secret_type, ('passcode', 'password'),
+ message="_build_auth only supports 'passcode' "
+ "and 'password' secret types")
+
+ data = {'user': {}}
+ if user_id:
+ data['user']['id'] = user_id
+ else:
+ data['user']['name'] = username
+ if user_domain_id or user_domain_name:
+ data['user']['domain'] = {}
+ if user_domain_id:
+ data['user']['domain']['id'] = user_domain_id
+ else:
+ data['user']['domain']['name'] = user_domain_name
+ data['user'][secret_type] = secret_value
+ return data
+
+ def _build_token_auth(self, token):
+ return {'id': token}
+
+ def build_authentication_request(self, token=None, user_id=None,
+ username=None, user_domain_id=None,
+ user_domain_name=None, password=None,
+ kerberos=False, passcode=None, **kwargs):
+ """Build auth dictionary.
+
+ It will create an auth dictionary based on all the arguments
+ that it receives.
+ """
+ auth_data = {}
+ auth_data['identity'] = {'methods': []}
+ if kerberos:
+ auth_data['identity']['methods'].append('kerberos')
+ auth_data['identity']['kerberos'] = {}
+ if token:
+ auth_data['identity']['methods'].append('token')
+ auth_data['identity']['token'] = self._build_token_auth(token)
+ if password and (user_id or username):
+ auth_data['identity']['methods'].append('password')
+ auth_data['identity']['password'] = self._build_auth(
+ user_id, username, user_domain_id, user_domain_name,
+ password=password)
+ if passcode and (user_id or username):
+ auth_data['identity']['methods'].append('totp')
+ auth_data['identity']['totp'] = self._build_auth(
+ user_id, username, user_domain_id, user_domain_name,
+ passcode=passcode)
+ if kwargs:
+ auth_data['scope'] = self._build_auth_scope(**kwargs)
+ return {'auth': auth_data}
diff --git a/keystone-moon/keystone/tests/functional/core.py b/keystone-moon/keystone/tests/functional/core.py
new file mode 100644
index 00000000..2759412b
--- /dev/null
+++ b/keystone-moon/keystone/tests/functional/core.py
@@ -0,0 +1,85 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import requests
+import testtools
+
+from keystone.tests.common import auth as common_auth
+
+
+class BaseTestCase(testtools.TestCase, common_auth.AuthTestMixin):
+
+ request_headers = {'content-type': 'application/json'}
+
+ def setUp(self):
+ self.ADMIN_URL = os.environ.get('KSTEST_ADMIN_URL',
+ 'http://localhost:35357')
+ self.PUBLIC_URL = os.environ.get('KSTEST_PUBLIC_URL',
+ 'http://localhost:5000')
+ self.admin = {
+ 'name': os.environ.get('KSTEST_ADMIN_USERNAME', 'admin'),
+ 'password': os.environ.get('KSTEST_ADMIN_PASSWORD', ''),
+ 'domain_id': os.environ.get('KSTEST_ADMIN_DOMAIN_ID', 'default')
+ }
+
+ self.user = {
+ 'name': os.environ.get('KSTEST_USER_USERNAME', 'demo'),
+ 'password': os.environ.get('KSTEST_USER_PASSWORD', ''),
+ 'domain_id': os.environ.get('KSTEST_USER_DOMAIN_ID', 'default')
+ }
+
+ self.project_id = os.environ.get('KSTEST_PROJECT_ID')
+
+ super(BaseTestCase, self).setUp()
+
+ def _http_headers(self, token=None):
+ headers = {'content-type': 'application/json'}
+ if token:
+ headers['X-Auth-Token'] = token
+ return headers
+
+ def get_scoped_token_response(self, user):
+ """Convenience method so that we can test authenticated requests
+
+ :param user: A dictionary with user information like 'username',
+ 'password', 'domain_id'
+ :returns: urllib3.Response object
+
+ """
+ body = self.build_authentication_request(
+ username=user['name'], user_domain_name=user['domain_id'],
+ password=user['password'], project_id=self.project_id)
+ return requests.post(self.PUBLIC_URL + '/v3/auth/tokens',
+ headers=self.request_headers,
+ json=body)
+
+ def get_scoped_token(self, user):
+ """Convenience method for getting scoped token
+
+ This method doesn't do any token validaton.
+
+ :param user: A dictionary with user information like 'username',
+ 'password', 'domain_id'
+ :returns: An OpenStack token for further use
+ :rtype: str
+
+ """
+ r = self.get_scoped_token_response(user)
+ return r.headers.get('X-Subject-Token')
+
+ def get_scoped_admin_token(self):
+ return self.get_scoped_token(self.admin)
+
+ def get_scoped_user_token(self):
+ return self.get_scoped_token(self.user)
diff --git a/keystone-moon/keystone/tests/functional/shared/test_running.py b/keystone-moon/keystone/tests/functional/shared/test_running.py
index aed48ac2..1b46b32d 100644
--- a/keystone-moon/keystone/tests/functional/shared/test_running.py
+++ b/keystone-moon/keystone/tests/functional/shared/test_running.py
@@ -13,38 +13,46 @@
import requests
import testtools.matchers
+from keystone.tests.functional import core as functests
+
is_multiple_choices = testtools.matchers.Equals(
requests.status_codes.codes.multiple_choices)
is_ok = testtools.matchers.Equals(requests.status_codes.codes.ok)
-admin_url = 'http://localhost:35357'
-public_url = 'http://localhost:5000'
versions = ('v2.0', 'v3')
-class TestServerRunning(testtools.TestCase):
+class TestServerRunning(functests.BaseTestCase):
def test_admin_responds_with_multiple_choices(self):
- resp = requests.get(admin_url)
+ resp = requests.get(self.ADMIN_URL)
self.assertThat(resp.status_code, is_multiple_choices)
def test_admin_versions(self):
for version in versions:
- resp = requests.get(admin_url + '/' + version)
+ resp = requests.get(self.ADMIN_URL + '/' + version)
self.assertThat(
resp.status_code,
testtools.matchers.Annotate(
'failed for version %s' % version, is_ok))
def test_public_responds_with_multiple_choices(self):
- resp = requests.get(public_url)
+ resp = requests.get(self.PUBLIC_URL)
self.assertThat(resp.status_code, is_multiple_choices)
def test_public_versions(self):
for version in versions:
- resp = requests.get(public_url + '/' + version)
+ resp = requests.get(self.PUBLIC_URL + '/' + version)
self.assertThat(
resp.status_code,
testtools.matchers.Annotate(
'failed for version %s' % version, is_ok))
+
+ def test_get_user_token(self):
+ token = self.get_scoped_user_token()
+ self.assertIsNotNone(token)
+
+ def test_get_admin_token(self):
+ token = self.get_scoped_admin_token()
+ self.assertIsNotNone(token)
diff --git a/keystone-moon/keystone/tests/hacking/checks.py b/keystone-moon/keystone/tests/hacking/checks.py
index 17bafff3..581dbcf9 100644
--- a/keystone-moon/keystone/tests/hacking/checks.py
+++ b/keystone-moon/keystone/tests/hacking/checks.py
@@ -126,14 +126,21 @@ class CheckForAssertingNoneEquality(BaseASTChecker):
# NOTE(dstanek): I wrote this in a verbose way to make it easier to
# read for those that have little experience with Python's AST.
+ def _is_None(node):
+ if six.PY3:
+ return (isinstance(node, ast.NameConstant)
+ and node.value is None)
+ else:
+ return isinstance(node, ast.Name) and node.id == 'None'
+
if isinstance(node.func, ast.Attribute):
if node.func.attr == 'assertEqual':
for arg in node.args:
- if isinstance(arg, ast.Name) and arg.id == 'None':
+ if _is_None(arg):
self.add_error(node, message=self.CHECK_DESC_IS)
elif node.func.attr == 'assertNotEqual':
for arg in node.args:
- if isinstance(arg, ast.Name) and arg.id == 'None':
+ if _is_None(arg):
self.add_error(node, message=self.CHECK_DESC_ISNOT)
super(CheckForAssertingNoneEquality, self).generic_visit(node)
@@ -144,6 +151,7 @@ class CheckForLoggingIssues(BaseASTChecker):
DEBUG_CHECK_DESC = 'K005 Using translated string in debug logging'
NONDEBUG_CHECK_DESC = 'K006 Not using translating helper for logging'
EXCESS_HELPER_CHECK_DESC = 'K007 Using hints when _ is necessary'
+ USING_DEPRECATED_WARN = 'K009 Using the deprecated Logger.warn'
LOG_MODULES = ('logging', 'oslo_log.log')
I18N_MODULES = (
'keystone.i18n._',
@@ -155,7 +163,6 @@ class CheckForLoggingIssues(BaseASTChecker):
TRANS_HELPER_MAP = {
'debug': None,
'info': '_LI',
- 'warn': '_LW',
'warning': '_LW',
'error': '_LE',
'exception': '_LE',
@@ -186,9 +193,7 @@ class CheckForLoggingIssues(BaseASTChecker):
self.visit(value)
def _filter_imports(self, module_name, alias):
- """Keeps lists of logging and i18n imports
-
- """
+ """Keeps lists of logging and i18n imports."""
if module_name in self.LOG_MODULES:
self.logger_module_names.append(alias.asname or alias.name)
elif module_name in self.I18N_MODULES:
@@ -284,10 +289,7 @@ class CheckForLoggingIssues(BaseASTChecker):
return super(CheckForLoggingIssues, self).generic_visit(node)
def visit_Call(self, node):
- """Look for the 'LOG.*' calls.
-
- """
-
+ """Look for the 'LOG.*' calls."""
# obj.method
if isinstance(node.func, ast.Attribute):
obj_name = self._find_name(node.func.value)
@@ -299,13 +301,18 @@ class CheckForLoggingIssues(BaseASTChecker):
else: # could be Subscript, Call or many more
return super(CheckForLoggingIssues, self).generic_visit(node)
+ # if dealing with a logger the method can't be "warn"
+ if obj_name in self.logger_names and method_name == 'warn':
+ msg = node.args[0] # first arg to a logging method is the msg
+ self.add_error(msg, message=self.USING_DEPRECATED_WARN)
+
# must be a logger instance and one of the support logging methods
if (obj_name not in self.logger_names
or method_name not in self.TRANS_HELPER_MAP):
return super(CheckForLoggingIssues, self).generic_visit(node)
# the call must have arguments
- if not len(node.args):
+ if not node.args:
return super(CheckForLoggingIssues, self).generic_visit(node)
if method_name == 'debug':
@@ -364,7 +371,7 @@ class CheckForLoggingIssues(BaseASTChecker):
# because:
# 1. We have code like this that we'll fix when dealing with the %:
# msg = _('....') % {}
- # LOG.warn(msg)
+ # LOG.warning(msg)
# 2. We also do LOG.exception(e) in several places. I'm not sure
# exactly what we should be doing about that.
if msg.id not in self.assignments:
@@ -391,15 +398,19 @@ class CheckForLoggingIssues(BaseASTChecker):
peers = find_peers(node)
for peer in peers:
if isinstance(peer, ast.Raise):
- if (isinstance(peer.type, ast.Call) and
- len(peer.type.args) > 0 and
- isinstance(peer.type.args[0], ast.Name) and
- name in (a.id for a in peer.type.args)):
+ if six.PY3:
+ exc = peer.exc
+ else:
+ exc = peer.type
+ if (isinstance(exc, ast.Call) and
+ len(exc.args) > 0 and
+ isinstance(exc.args[0], ast.Name) and
+ name in (a.id for a in exc.args)):
return True
else:
return False
elif isinstance(peer, ast.Assign):
- if name in (t.id for t in peer.targets):
+ if name in (t.id for t in peer.targets if hasattr(t, 'id')):
return False
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py
index 9775047d..59eb3d25 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_configuration.py
@@ -57,7 +57,7 @@ class TestConfigurationManager(tests.TestCase):
self.config_fixture.config(
group='moon',
tenant_driver='keystone.contrib.moon.backends.sql.TenantConnector')
- self.policy_directory = 'examples/moon/policies'
+ self.policy_directory = '/etc/keystone/policies'
self.config_fixture.config(
group='moon',
intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py
index ca6ef93e..f32df5dd 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_admin.py
@@ -65,7 +65,7 @@ class TestIntraExtensionAdminManagerOK(tests.TestCase):
def config_overrides(self):
super(TestIntraExtensionAdminManagerOK, self).config_overrides()
- self.policy_directory = 'examples/moon/policies'
+ self.policy_directory = '/etc/keystone/policies'
self.config_fixture.config(
group='moon',
intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
@@ -989,7 +989,7 @@ class TestIntraExtensionAdminManagerKO(tests.TestCase):
def config_overrides(self):
super(TestIntraExtensionAdminManagerKO, self).config_overrides()
- self.policy_directory = 'examples/moon/policies'
+ self.policy_directory = '/etc/keystone/policies'
self.config_fixture.config(
group='moon',
intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py
index 8fa46268..13d9dcd1 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_intra_extension_authz.py
@@ -64,7 +64,7 @@ class TestIntraExtensionAuthzManagerAuthzOK(tests.TestCase):
def config_overrides(self):
super(TestIntraExtensionAuthzManagerAuthzOK, self).config_overrides()
- self.policy_directory = 'examples/moon/policies'
+ self.policy_directory = '/etc/keystone/policies'
self.config_fixture.config(
group='moon',
intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
@@ -975,7 +975,7 @@ class TestIntraExtensionAuthzManagerAuthzKO(tests.TestCase):
def config_overrides(self):
super(TestIntraExtensionAuthzManagerAuthzKO, self).config_overrides()
- self.policy_directory = 'examples/moon/policies'
+ self.policy_directory = '/etc/keystone/policies'
self.root_policy_directory = 'policy_root'
self.config_fixture.config(
group='moon',
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py
index 143e7c8b..49886d32 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_log.py
@@ -66,7 +66,7 @@ class TestIntraExtensionAdminManager(tests.TestCase):
def config_overrides(self):
super(TestIntraExtensionAdminManager, self).config_overrides()
- self.policy_directory = 'examples/moon/policies'
+ self.policy_directory = '/etc/keystone/policies'
self.config_fixture.config(
group='moon',
intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
diff --git a/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py b/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py
index c2f60424..47b0df8f 100644
--- a/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py
+++ b/keystone-moon/keystone/tests/moon/unit/test_unit_core_tenant.py
@@ -66,7 +66,7 @@ class TestTenantManager(tests.TestCase):
self.config_fixture.config(
group='moon',
tenant_driver='keystone.contrib.moon.backends.sql.TenantConnector')
- self.policy_directory = 'examples/moon/policies'
+ self.policy_directory = '/etc/keystone/policies'
self.config_fixture.config(
group='moon',
intraextension_driver='keystone.contrib.moon.backends.sql.IntraExtensionConnector')
diff --git a/keystone-moon/keystone/tests/unit/__init__.py b/keystone-moon/keystone/tests/unit/__init__.py
index 52af8dfc..0e92ca65 100644
--- a/keystone-moon/keystone/tests/unit/__init__.py
+++ b/keystone-moon/keystone/tests/unit/__init__.py
@@ -13,6 +13,25 @@
# under the License.
import oslo_i18n
+import six
+
+
+if six.PY3:
+ # NOTE(dstanek): This block will monkey patch libraries that are not
+ # yet supported in Python3. We do this that that it is possible to
+ # execute any tests at all. Without monkey patching modules the
+ # tests will fail with import errors.
+
+ import sys
+ from unittest import mock # noqa: our import detection is naive?
+
+ sys.modules['ldap'] = mock.Mock()
+ sys.modules['ldap.controls'] = mock.Mock()
+ sys.modules['ldap.dn'] = mock.Mock()
+ sys.modules['ldap.filter'] = mock.Mock()
+ sys.modules['ldap.modlist'] = mock.Mock()
+ sys.modules['ldappool'] = mock.Mock()
+
# NOTE(dstanek): oslo_i18n.enable_lazy() must be called before
# keystone.i18n._() is called to ensure it has the desired lazy lookup
diff --git a/keystone-moon/keystone/tests/unit/assignment/__init__.py b/keystone-moon/keystone/tests/unit/assignment/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/assignment/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/assignment/role_backends/__init__.py b/keystone-moon/keystone/tests/unit/assignment/role_backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/assignment/role_backends/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/assignment/role_backends/test_sql.py b/keystone-moon/keystone/tests/unit/assignment/role_backends/test_sql.py
new file mode 100644
index 00000000..37e2d924
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/assignment/role_backends/test_sql.py
@@ -0,0 +1,112 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone.common import sql
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit.assignment import test_core
+from keystone.tests.unit.backend import core_sql
+
+
+class SqlRoleModels(core_sql.BaseBackendSqlModels):
+
+ def test_role_model(self):
+ cols = (('id', sql.String, 64),
+ ('name', sql.String, 255),
+ ('domain_id', sql.String, 64))
+ self.assertExpectedSchema('role', cols)
+
+
+class SqlRole(core_sql.BaseBackendSqlTests, test_core.RoleTests):
+
+ def test_create_null_role_name(self):
+ role = unit.new_role_ref(name=None)
+ self.assertRaises(exception.UnexpectedError,
+ self.role_api.create_role,
+ role['id'],
+ role)
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.get_role,
+ role['id'])
+
+ def test_create_duplicate_role_domain_specific_name_fails(self):
+ domain = unit.new_domain_ref()
+ role1 = unit.new_role_ref(domain_id=domain['id'])
+ self.role_api.create_role(role1['id'], role1)
+ role2 = unit.new_role_ref(name=role1['name'],
+ domain_id=domain['id'])
+ self.assertRaises(exception.Conflict,
+ self.role_api.create_role,
+ role2['id'],
+ role2)
+
+ def test_update_domain_id_of_role_fails(self):
+ # Create a global role
+ role1 = unit.new_role_ref()
+ role1 = self.role_api.create_role(role1['id'], role1)
+ # Try and update it to be domain specific
+ domainA = unit.new_domain_ref()
+ role1['domain_id'] = domainA['id']
+ self.assertRaises(exception.ValidationError,
+ self.role_api.update_role,
+ role1['id'],
+ role1)
+
+ # Create a domain specific role from scratch
+ role2 = unit.new_role_ref(domain_id=domainA['id'])
+ self.role_api.create_role(role2['id'], role2)
+ # Try to "move" it to another domain
+ domainB = unit.new_domain_ref()
+ role2['domain_id'] = domainB['id']
+ self.assertRaises(exception.ValidationError,
+ self.role_api.update_role,
+ role2['id'],
+ role2)
+ # Now try to make it global
+ role2['domain_id'] = None
+ self.assertRaises(exception.ValidationError,
+ self.role_api.update_role,
+ role2['id'],
+ role2)
+
+ def test_domain_specific_separation(self):
+ domain1 = unit.new_domain_ref()
+ role1 = unit.new_role_ref(domain_id=domain1['id'])
+ role_ref1 = self.role_api.create_role(role1['id'], role1)
+ self.assertDictEqual(role1, role_ref1)
+ # Check we can have the same named role in a different domain
+ domain2 = unit.new_domain_ref()
+ role2 = unit.new_role_ref(name=role1['name'], domain_id=domain2['id'])
+ role_ref2 = self.role_api.create_role(role2['id'], role2)
+ self.assertDictEqual(role2, role_ref2)
+ # ...and in fact that you can have the same named role as a global role
+ role3 = unit.new_role_ref(name=role1['name'])
+ role_ref3 = self.role_api.create_role(role3['id'], role3)
+ self.assertDictEqual(role3, role_ref3)
+ # Check that updating one doesn't change the others
+ role1['name'] = uuid.uuid4().hex
+ self.role_api.update_role(role1['id'], role1)
+ role_ref1 = self.role_api.get_role(role1['id'])
+ self.assertDictEqual(role1, role_ref1)
+ role_ref2 = self.role_api.get_role(role2['id'])
+ self.assertDictEqual(role2, role_ref2)
+ role_ref3 = self.role_api.get_role(role3['id'])
+ self.assertDictEqual(role3, role_ref3)
+ # Check that deleting one of these, doesn't affect the others
+ self.role_api.delete_role(role1['id'])
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.get_role,
+ role1['id'])
+ self.role_api.get_role(role2['id'])
+ self.role_api.get_role(role3['id'])
diff --git a/keystone-moon/keystone/tests/unit/assignment/test_backends.py b/keystone-moon/keystone/tests/unit/assignment/test_backends.py
new file mode 100644
index 00000000..eb40e569
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/assignment/test_backends.py
@@ -0,0 +1,3755 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo_config import cfg
+from six.moves import range
+from testtools import matchers
+
+from keystone import exception
+from keystone.tests import unit
+
+
+CONF = cfg.CONF
+
+
+class AssignmentTestHelperMixin(object):
+ """Mixin class to aid testing of assignments.
+
+ This class supports data driven test plans that enable:
+
+ - Creation of initial entities, such as domains, users, groups, projects
+ and roles
+ - Creation of assignments referencing the above entities
+ - A set of input parameters and expected outputs to list_role_assignments
+ based on the above test data
+
+ A test plan is a dict of the form:
+
+ test_plan = {
+ entities: details and number of entities,
+ group_memberships: group-user entity memberships,
+ assignments: list of assignments to create,
+ tests: list of pairs of input params and expected outputs}
+
+ An example test plan:
+
+ test_plan = {
+ # First, create the entities required. Entities are specified by
+ # a dict with the key being the entity type and the value an
+ # entity specification which can be one of:
+ #
+ # - a simple number, e.g. {'users': 3} creates 3 users
+ # - a dict where more information regarding the contents of the entity
+ # is required, e.g. {'domains' : {'users : 3}} creates a domain
+ # with three users
+ # - a list of entity specifications if multiple are required
+ #
+ # The following creates a domain that contains a single user, group and
+ # project, as well as creating three roles.
+
+ 'entities': {'domains': {'users': 1, 'groups': 1, 'projects': 1},
+ 'roles': 3},
+
+ # If it is required that an existing domain be used for the new
+ # entities, then the id of that domain can be included in the
+ # domain dict. For example, if alternatively we wanted to add 3 users
+ # to the default domain, add a second domain containing 3 projects as
+ # well as 5 additional empty domains, the entities would be defined as:
+ #
+ # 'entities': {'domains': [{'id': DEFAULT_DOMAIN, 'users': 3},
+ # {'projects': 3}, 5]},
+ #
+ # A project hierarchy can be specified within the 'projects' section by
+ # nesting the 'project' key, for example to create a project with three
+ # sub-projects you would use:
+
+ 'projects': {'project': 3}
+
+ # A more complex hierarchy can also be defined, for example the
+ # following would define three projects each containing a
+ # sub-project, each of which contain a further three sub-projects.
+
+ 'projects': [{'project': {'project': 3}},
+ {'project': {'project': 3}},
+ {'project': {'project': 3}}]
+
+ # If the 'roles' entity count is defined as top level key in 'entities'
+ # dict then these are global roles. If it is placed within the
+ # 'domain' dict, then they will be domain specific roles. A mix of
+ # domain specific and global roles are allowed, with the role index
+ # being calculated in the order they are defined in the 'entities'
+ # dict.
+
+ # A set of implied role specifications. In this case, prior role
+ # index 0 implies role index 1, and role 1 implies roles 2 and 3.
+
+ 'roles': [{'role': 0, 'implied_roles': [1]},
+ {'role': 1, 'implied_roles': [2, 3]}]
+
+ # A list of groups and their members. In this case make users with
+ # index 0 and 1 members of group with index 0. Users and Groups are
+ # indexed in the order they appear in the 'entities' key above.
+
+ 'group_memberships': [{'group': 0, 'users': [0, 1]}]
+
+ # Next, create assignments between the entities, referencing the
+ # entities by index, i.e. 'user': 0 refers to user[0]. Entities are
+ # indexed in the order they appear in the 'entities' key above within
+ # their entity type.
+
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'group': 0, 'role': 2, 'domain': 0},
+ {'user': 0, 'role': 2, 'project': 0}],
+
+ # Finally, define an array of tests where list_role_assignment() is
+ # called with the given input parameters and the results are then
+ # confirmed to be as given in 'results'. Again, all entities are
+ # referenced by index.
+
+ 'tests': [
+ {'params': {},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'group': 0, 'role': 2, 'domain': 0},
+ {'user': 0, 'role': 2, 'project': 0}]},
+ {'params': {'role': 2},
+ 'results': [{'group': 0, 'role': 2, 'domain': 0},
+ {'user': 0, 'role': 2, 'project': 0}]}]
+
+ # The 'params' key also supports the 'effective',
+ # 'inherited_to_projects' and 'source_from_group_ids' options to
+ # list_role_assignments.}
+
+ """
+
+ def _handle_project_spec(self, test_data, domain_id, project_spec,
+ parent_id=None):
+ """Handle the creation of a project or hierarchy of projects.
+
+ project_spec may either be a count of the number of projects to
+ create, or it may be a list of the form:
+
+ [{'project': project_spec}, {'project': project_spec}, ...]
+
+ This method is called recursively to handle the creation of a
+ hierarchy of projects.
+
+ """
+ def _create_project(domain_id, parent_id):
+ new_project = unit.new_project_ref(domain_id=domain_id,
+ parent_id=parent_id)
+ new_project = self.resource_api.create_project(new_project['id'],
+ new_project)
+ return new_project
+
+ if isinstance(project_spec, list):
+ for this_spec in project_spec:
+ self._handle_project_spec(
+ test_data, domain_id, this_spec, parent_id=parent_id)
+ elif isinstance(project_spec, dict):
+ new_proj = _create_project(domain_id, parent_id)
+ test_data['projects'].append(new_proj)
+ self._handle_project_spec(
+ test_data, domain_id, project_spec['project'],
+ parent_id=new_proj['id'])
+ else:
+ for _ in range(project_spec):
+ test_data['projects'].append(
+ _create_project(domain_id, parent_id))
+
+ def _create_role(self, domain_id=None):
+ new_role = unit.new_role_ref(domain_id=domain_id)
+ return self.role_api.create_role(new_role['id'], new_role)
+
+ def _handle_domain_spec(self, test_data, domain_spec):
+ """Handle the creation of domains and their contents.
+
+ domain_spec may either be a count of the number of empty domains to
+ create, a dict describing the domain contents, or a list of
+ domain_specs.
+
+ In the case when a list is provided, this method calls itself
+ recursively to handle the list elements.
+
+ This method will insert any entities created into test_data
+
+ """
+ def _create_domain(domain_id=None):
+ if domain_id is None:
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'],
+ new_domain)
+ return new_domain
+ else:
+ # The test plan specified an existing domain to use
+ return self.resource_api.get_domain(domain_id)
+
+ def _create_entity_in_domain(entity_type, domain_id):
+ """Create a user or group entity in the domain."""
+ if entity_type == 'users':
+ new_entity = unit.new_user_ref(domain_id=domain_id)
+ new_entity = self.identity_api.create_user(new_entity)
+ elif entity_type == 'groups':
+ new_entity = unit.new_group_ref(domain_id=domain_id)
+ new_entity = self.identity_api.create_group(new_entity)
+ elif entity_type == 'roles':
+ new_entity = self._create_role(domain_id=domain_id)
+ else:
+ # Must be a bad test plan
+ raise exception.NotImplemented()
+ return new_entity
+
+ if isinstance(domain_spec, list):
+ for x in domain_spec:
+ self._handle_domain_spec(test_data, x)
+ elif isinstance(domain_spec, dict):
+ # If there is a domain ID specified, then use it
+ the_domain = _create_domain(domain_spec.get('id'))
+ test_data['domains'].append(the_domain)
+ for entity_type, value in domain_spec.items():
+ if entity_type == 'id':
+ # We already used this above to determine whether to
+ # use and existing domain
+ continue
+ if entity_type == 'projects':
+ # If it's projects, we need to handle the potential
+ # specification of a project hierarchy
+ self._handle_project_spec(
+ test_data, the_domain['id'], value)
+ else:
+ # It's a count of number of entities
+ for _ in range(value):
+ test_data[entity_type].append(
+ _create_entity_in_domain(
+ entity_type, the_domain['id']))
+ else:
+ for _ in range(domain_spec):
+ test_data['domains'].append(_create_domain())
+
+ def create_entities(self, entity_pattern):
+ """Create the entities specified in the test plan.
+
+ Process the 'entities' key in the test plan, creating the requested
+ entities. Each created entity will be added to the array of entities
+ stored in the returned test_data object, e.g.:
+
+ test_data['users'] = [user[0], user[1]....]
+
+ """
+ test_data = {}
+ for entity in ['users', 'groups', 'domains', 'projects', 'roles']:
+ test_data[entity] = []
+
+ # Create any domains requested and, if specified, any entities within
+ # those domains
+ if 'domains' in entity_pattern:
+ self._handle_domain_spec(test_data, entity_pattern['domains'])
+
+ # Create any roles requested
+ if 'roles' in entity_pattern:
+ for _ in range(entity_pattern['roles']):
+ test_data['roles'].append(self._create_role())
+
+ return test_data
+
+ def _convert_entity_shorthand(self, key, shorthand_data, reference_data):
+ """Convert a shorthand entity description into a full ID reference.
+
+ In test plan definitions, we allow a shorthand for referencing to an
+ entity of the form:
+
+ 'user': 0
+
+ which is actually shorthand for:
+
+ 'user_id': reference_data['users'][0]['id']
+
+ This method converts the shorthand version into the full reference.
+
+ """
+ expanded_key = '%s_id' % key
+ reference_index = '%ss' % key
+ index_value = (
+ reference_data[reference_index][shorthand_data[key]]['id'])
+ return expanded_key, index_value
+
+ def create_implied_roles(self, implied_pattern, test_data):
+ """Create the implied roles specified in the test plan."""
+ for implied_spec in implied_pattern:
+ # Each implied role specification is a dict of the form:
+ #
+ # {'role': 0, 'implied_roles': list of roles}
+
+ prior_role = test_data['roles'][implied_spec['role']]['id']
+ if isinstance(implied_spec['implied_roles'], list):
+ for this_role in implied_spec['implied_roles']:
+ implied_role = test_data['roles'][this_role]['id']
+ self.role_api.create_implied_role(prior_role, implied_role)
+ else:
+ implied_role = (
+ test_data['roles'][implied_spec['implied_roles']]['id'])
+ self.role_api.create_implied_role(prior_role, implied_role)
+
+ def create_group_memberships(self, group_pattern, test_data):
+ """Create the group memberships specified in the test plan."""
+ for group_spec in group_pattern:
+ # Each membership specification is a dict of the form:
+ #
+ # {'group': 0, 'users': [list of user indexes]}
+ #
+ # Add all users in the list to the specified group, first
+ # converting from index to full entity ID.
+ group_value = test_data['groups'][group_spec['group']]['id']
+ for user_index in group_spec['users']:
+ user_value = test_data['users'][user_index]['id']
+ self.identity_api.add_user_to_group(user_value, group_value)
+ return test_data
+
+ def create_assignments(self, assignment_pattern, test_data):
+ """Create the assignments specified in the test plan."""
+ # First store how many assignments are already in the system,
+ # so during the tests we can check the number of new assignments
+ # created.
+ test_data['initial_assignment_count'] = (
+ len(self.assignment_api.list_role_assignments()))
+
+ # Now create the new assignments in the test plan
+ for assignment in assignment_pattern:
+ # Each assignment is a dict of the form:
+ #
+ # { 'user': 0, 'project':1, 'role': 6}
+ #
+ # where the value of each item is the index into the array of
+ # entities created earlier.
+ #
+ # We process the assignment dict to create the args required to
+ # make the create_grant() call.
+ args = {}
+ for param in assignment:
+ if param == 'inherited_to_projects':
+ args[param] = assignment[param]
+ else:
+ # Turn 'entity : 0' into 'entity_id = ac6736ba873d'
+ # where entity in user, group, project or domain
+ key, value = self._convert_entity_shorthand(
+ param, assignment, test_data)
+ args[key] = value
+ self.assignment_api.create_grant(**args)
+ return test_data
+
+ def execute_assignment_cases(self, test_plan, test_data):
+ """Execute the test plan, based on the created test_data."""
+ def check_results(expected, actual, param_arg_count):
+ if param_arg_count == 0:
+ # It was an unfiltered call, so default fixture assignments
+ # might be polluting our answer - so we take into account
+ # how many assignments there were before the test.
+ self.assertEqual(
+ len(expected) + test_data['initial_assignment_count'],
+ len(actual))
+ else:
+ self.assertThat(actual, matchers.HasLength(len(expected)))
+
+ for each_expected in expected:
+ expected_assignment = {}
+ for param in each_expected:
+ if param == 'inherited_to_projects':
+ expected_assignment[param] = each_expected[param]
+ elif param == 'indirect':
+ # We're expecting the result to contain an indirect
+ # dict with the details how the role came to be placed
+ # on this entity - so convert the key/value pairs of
+ # that dict into real entity references.
+ indirect_term = {}
+ for indirect_param in each_expected[param]:
+ key, value = self._convert_entity_shorthand(
+ indirect_param, each_expected[param],
+ test_data)
+ indirect_term[key] = value
+ expected_assignment[param] = indirect_term
+ else:
+ # Convert a simple shorthand entry into a full
+ # entity reference
+ key, value = self._convert_entity_shorthand(
+ param, each_expected, test_data)
+ expected_assignment[key] = value
+ self.assertIn(expected_assignment, actual)
+
+ def convert_group_ids_sourced_from_list(index_list, reference_data):
+ value_list = []
+ for group_index in index_list:
+ value_list.append(
+ reference_data['groups'][group_index]['id'])
+ return value_list
+
+ # Go through each test in the array, processing the input params, which
+ # we build into an args dict, and then call list_role_assignments. Then
+ # check the results against those specified in the test plan.
+ for test in test_plan.get('tests', []):
+ args = {}
+ for param in test['params']:
+ if param in ['effective', 'inherited', 'include_subtree']:
+ # Just pass the value into the args
+ args[param] = test['params'][param]
+ elif param == 'source_from_group_ids':
+ # Convert the list of indexes into a list of IDs
+ args[param] = convert_group_ids_sourced_from_list(
+ test['params']['source_from_group_ids'], test_data)
+ else:
+ # Turn 'entity : 0' into 'entity_id = ac6736ba873d'
+ # where entity in user, group, project or domain
+ key, value = self._convert_entity_shorthand(
+ param, test['params'], test_data)
+ args[key] = value
+ results = self.assignment_api.list_role_assignments(**args)
+ check_results(test['results'], results, len(args))
+
+ def execute_assignment_plan(self, test_plan):
+ """Create entities, assignments and execute the test plan.
+
+ The standard method to call to create entities and assignments and
+ execute the tests as specified in the test_plan. The test_data
+ dict is returned so that, if required, the caller can execute
+ additional manual tests with the entities and assignments created.
+
+ """
+ test_data = self.create_entities(test_plan['entities'])
+ if 'implied_roles' in test_plan:
+ self.create_implied_roles(test_plan['implied_roles'], test_data)
+ if 'group_memberships' in test_plan:
+ self.create_group_memberships(test_plan['group_memberships'],
+ test_data)
+ if 'assignments' in test_plan:
+ test_data = self.create_assignments(test_plan['assignments'],
+ test_data)
+ self.execute_assignment_cases(test_plan, test_data)
+ return test_data
+
+
+class AssignmentTests(AssignmentTestHelperMixin):
+
+ def _get_domain_fixture(self):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ return domain
+
+ def test_project_add_and_remove_user_role(self):
+ user_ids = self.assignment_api.list_user_ids_for_project(
+ self.tenant_bar['id'])
+ self.assertNotIn(self.user_two['id'], user_ids)
+
+ self.assignment_api.add_role_to_user_and_project(
+ tenant_id=self.tenant_bar['id'],
+ user_id=self.user_two['id'],
+ role_id=self.role_other['id'])
+ user_ids = self.assignment_api.list_user_ids_for_project(
+ self.tenant_bar['id'])
+ self.assertIn(self.user_two['id'], user_ids)
+
+ self.assignment_api.remove_role_from_user_and_project(
+ tenant_id=self.tenant_bar['id'],
+ user_id=self.user_two['id'],
+ role_id=self.role_other['id'])
+
+ user_ids = self.assignment_api.list_user_ids_for_project(
+ self.tenant_bar['id'])
+ self.assertNotIn(self.user_two['id'], user_ids)
+
+ def test_remove_user_role_not_assigned(self):
+ # Expect failure if attempt to remove a role that was never assigned to
+ # the user.
+ self.assertRaises(exception.RoleNotFound,
+ self.assignment_api.
+ remove_role_from_user_and_project,
+ tenant_id=self.tenant_bar['id'],
+ user_id=self.user_two['id'],
+ role_id=self.role_other['id'])
+
+ def test_list_user_ids_for_project(self):
+ user_ids = self.assignment_api.list_user_ids_for_project(
+ self.tenant_baz['id'])
+ self.assertEqual(2, len(user_ids))
+ self.assertIn(self.user_two['id'], user_ids)
+ self.assertIn(self.user_badguy['id'], user_ids)
+
+ def test_list_user_ids_for_project_no_duplicates(self):
+ # Create user
+ user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user_ref = self.identity_api.create_user(user_ref)
+ # Create project
+ project_ref = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(
+ project_ref['id'], project_ref)
+ # Create 2 roles and give user each role in project
+ for i in range(2):
+ role_ref = unit.new_role_ref()
+ self.role_api.create_role(role_ref['id'], role_ref)
+ self.assignment_api.add_role_to_user_and_project(
+ user_id=user_ref['id'],
+ tenant_id=project_ref['id'],
+ role_id=role_ref['id'])
+ # Get the list of user_ids in project
+ user_ids = self.assignment_api.list_user_ids_for_project(
+ project_ref['id'])
+ # Ensure the user is only returned once
+ self.assertEqual(1, len(user_ids))
+
+ def test_get_project_user_ids_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.assignment_api.list_user_ids_for_project,
+ uuid.uuid4().hex)
+
+ def test_list_role_assignments_unfiltered(self):
+ """Test unfiltered listing of role assignments."""
+ test_plan = {
+ # Create a domain, with a user, group & project
+ 'entities': {'domains': {'users': 1, 'groups': 1, 'projects': 1},
+ 'roles': 3},
+ # Create a grant of each type (user/group on project/domain)
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'group': 0, 'role': 2, 'domain': 0},
+ {'group': 0, 'role': 2, 'project': 0}],
+ 'tests': [
+ # Check that we get back the 4 assignments
+ {'params': {},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'group': 0, 'role': 2, 'domain': 0},
+ {'group': 0, 'role': 2, 'project': 0}]}
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_role_assignments_filtered_by_role(self):
+ """Test listing of role assignments filtered by role ID."""
+ test_plan = {
+ # Create a user, group & project in the default domain
+ 'entities': {'domains': {'id': CONF.identity.default_domain_id,
+ 'users': 1, 'groups': 1, 'projects': 1},
+ 'roles': 3},
+ # Create a grant of each type (user/group on project/domain)
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'group': 0, 'role': 2, 'domain': 0},
+ {'group': 0, 'role': 2, 'project': 0}],
+ 'tests': [
+ # Check that when filtering by role, we only get back those
+ # that match
+ {'params': {'role': 2},
+ 'results': [{'group': 0, 'role': 2, 'domain': 0},
+ {'group': 0, 'role': 2, 'project': 0}]}
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_group_role_assignment(self):
+ # When a group role assignment is created and the role assignments are
+ # listed then the group role assignment is included in the list.
+
+ test_plan = {
+ 'entities': {'domains': {'id': CONF.identity.default_domain_id,
+ 'groups': 1, 'projects': 1},
+ 'roles': 1},
+ 'assignments': [{'group': 0, 'role': 0, 'project': 0}],
+ 'tests': [
+ {'params': {},
+ 'results': [{'group': 0, 'role': 0, 'project': 0}]}
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_role_assignments_bad_role(self):
+ assignment_list = self.assignment_api.list_role_assignments(
+ role_id=uuid.uuid4().hex)
+ self.assertEqual([], assignment_list)
+
+ def test_add_duplicate_role_grant(self):
+ roles_ref = self.assignment_api.get_roles_for_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'])
+ self.assertNotIn(self.role_admin['id'], roles_ref)
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id'])
+ self.assertRaises(exception.Conflict,
+ self.assignment_api.add_role_to_user_and_project,
+ self.user_foo['id'],
+ self.tenant_bar['id'],
+ self.role_admin['id'])
+
+ def test_get_role_by_user_and_project_with_user_in_group(self):
+ """Test for get role by user and project, user was added into a group.
+
+ Test Plan:
+
+ - Create a user, a project & a group, add this user to group
+ - Create roles and grant them to user and project
+ - Check the role list get by the user and project was as expected
+
+ """
+ user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user_ref = self.identity_api.create_user(user_ref)
+
+ project_ref = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project_ref['id'], project_ref)
+
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group_id = self.identity_api.create_group(group)['id']
+ self.identity_api.add_user_to_group(user_ref['id'], group_id)
+
+ role_ref_list = []
+ for i in range(2):
+ role_ref = unit.new_role_ref()
+ self.role_api.create_role(role_ref['id'], role_ref)
+ role_ref_list.append(role_ref)
+
+ self.assignment_api.add_role_to_user_and_project(
+ user_id=user_ref['id'],
+ tenant_id=project_ref['id'],
+ role_id=role_ref['id'])
+
+ role_list = self.assignment_api.get_roles_for_user_and_project(
+ user_ref['id'],
+ project_ref['id'])
+
+ self.assertEqual(set([r['id'] for r in role_ref_list]),
+ set(role_list))
+
+ def test_get_role_by_user_and_project(self):
+ roles_ref = self.assignment_api.get_roles_for_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'])
+ self.assertNotIn(self.role_admin['id'], roles_ref)
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'], self.role_admin['id'])
+ roles_ref = self.assignment_api.get_roles_for_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'])
+ self.assertIn(self.role_admin['id'], roles_ref)
+ self.assertNotIn('member', roles_ref)
+
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'], 'member')
+ roles_ref = self.assignment_api.get_roles_for_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'])
+ self.assertIn(self.role_admin['id'], roles_ref)
+ self.assertIn('member', roles_ref)
+
+ def test_get_roles_for_user_and_domain(self):
+ """Test for getting roles for user on a domain.
+
+ Test Plan:
+
+ - Create a domain, with 2 users
+ - Check no roles yet exit
+ - Give user1 two roles on the domain, user2 one role
+ - Get roles on user1 and the domain - maybe sure we only
+ get back the 2 roles on user1
+ - Delete both roles from user1
+ - Check we get no roles back for user1 on domain
+
+ """
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ new_user1 = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user1 = self.identity_api.create_user(new_user1)
+ new_user2 = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user2 = self.identity_api.create_user(new_user2)
+ roles_ref = self.assignment_api.list_grants(
+ user_id=new_user1['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+ # Now create the grants (roles are defined in default_fixtures)
+ self.assignment_api.create_grant(user_id=new_user1['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+ self.assignment_api.create_grant(user_id=new_user1['id'],
+ domain_id=new_domain['id'],
+ role_id='other')
+ self.assignment_api.create_grant(user_id=new_user2['id'],
+ domain_id=new_domain['id'],
+ role_id='admin')
+ # Read back the roles for user1 on domain
+ roles_ids = self.assignment_api.get_roles_for_user_and_domain(
+ new_user1['id'], new_domain['id'])
+ self.assertEqual(2, len(roles_ids))
+ self.assertIn(self.role_member['id'], roles_ids)
+ self.assertIn(self.role_other['id'], roles_ids)
+
+ # Now delete both grants for user1
+ self.assignment_api.delete_grant(user_id=new_user1['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+ self.assignment_api.delete_grant(user_id=new_user1['id'],
+ domain_id=new_domain['id'],
+ role_id='other')
+ roles_ref = self.assignment_api.list_grants(
+ user_id=new_user1['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+
+ def test_get_roles_for_user_and_domain_returns_not_found(self):
+ """Test errors raised when getting roles for user on a domain.
+
+ Test Plan:
+
+ - Check non-existing user gives UserNotFound
+ - Check non-existing domain gives DomainNotFound
+
+ """
+ new_domain = self._get_domain_fixture()
+ new_user1 = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user1 = self.identity_api.create_user(new_user1)
+
+ self.assertRaises(exception.UserNotFound,
+ self.assignment_api.get_roles_for_user_and_domain,
+ uuid.uuid4().hex,
+ new_domain['id'])
+
+ self.assertRaises(exception.DomainNotFound,
+ self.assignment_api.get_roles_for_user_and_domain,
+ new_user1['id'],
+ uuid.uuid4().hex)
+
+ def test_get_roles_for_user_and_project_returns_not_found(self):
+ self.assertRaises(exception.UserNotFound,
+ self.assignment_api.get_roles_for_user_and_project,
+ uuid.uuid4().hex,
+ self.tenant_bar['id'])
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.assignment_api.get_roles_for_user_and_project,
+ self.user_foo['id'],
+ uuid.uuid4().hex)
+
+ def test_add_role_to_user_and_project_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.assignment_api.add_role_to_user_and_project,
+ self.user_foo['id'],
+ uuid.uuid4().hex,
+ self.role_admin['id'])
+
+ self.assertRaises(exception.RoleNotFound,
+ self.assignment_api.add_role_to_user_and_project,
+ self.user_foo['id'],
+ self.tenant_bar['id'],
+ uuid.uuid4().hex)
+
+ def test_add_role_to_user_and_project_no_user(self):
+ # If add_role_to_user_and_project and the user doesn't exist, then
+ # no error.
+ user_id_not_exist = uuid.uuid4().hex
+ self.assignment_api.add_role_to_user_and_project(
+ user_id_not_exist, self.tenant_bar['id'], self.role_admin['id'])
+
+ def test_remove_role_from_user_and_project(self):
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'], 'member')
+ self.assignment_api.remove_role_from_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'], 'member')
+ roles_ref = self.assignment_api.get_roles_for_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'])
+ self.assertNotIn('member', roles_ref)
+ self.assertRaises(exception.NotFound,
+ self.assignment_api.
+ remove_role_from_user_and_project,
+ self.user_foo['id'],
+ self.tenant_bar['id'],
+ 'member')
+
+ def test_get_role_grant_by_user_and_project(self):
+ roles_ref = self.assignment_api.list_grants(
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_bar['id'])
+ self.assertEqual(1, len(roles_ref))
+ self.assignment_api.create_grant(user_id=self.user_foo['id'],
+ project_id=self.tenant_bar['id'],
+ role_id=self.role_admin['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_bar['id'])
+ self.assertIn(self.role_admin['id'],
+ [role_ref['id'] for role_ref in roles_ref])
+
+ self.assignment_api.create_grant(user_id=self.user_foo['id'],
+ project_id=self.tenant_bar['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_bar['id'])
+
+ roles_ref_ids = []
+ for ref in roles_ref:
+ roles_ref_ids.append(ref['id'])
+ self.assertIn(self.role_admin['id'], roles_ref_ids)
+ self.assertIn('member', roles_ref_ids)
+
+ def test_remove_role_grant_from_user_and_project(self):
+ self.assignment_api.create_grant(user_id=self.user_foo['id'],
+ project_id=self.tenant_baz['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_baz['id'])
+ self.assertDictEqual(self.role_member, roles_ref[0])
+
+ self.assignment_api.delete_grant(user_id=self.user_foo['id'],
+ project_id=self.tenant_baz['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_baz['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_baz['id'],
+ role_id='member')
+
+ def test_get_role_assignment_by_project_not_found(self):
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.check_grant_role_id,
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_baz['id'],
+ role_id='member')
+
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.check_grant_role_id,
+ group_id=uuid.uuid4().hex,
+ project_id=self.tenant_baz['id'],
+ role_id='member')
+
+ def test_get_role_assignment_by_domain_not_found(self):
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.check_grant_role_id,
+ user_id=self.user_foo['id'],
+ domain_id=self.domain_default['id'],
+ role_id='member')
+
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.check_grant_role_id,
+ group_id=uuid.uuid4().hex,
+ domain_id=self.domain_default['id'],
+ role_id='member')
+
+ def test_del_role_assignment_by_project_not_found(self):
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ user_id=self.user_foo['id'],
+ project_id=self.tenant_baz['id'],
+ role_id='member')
+
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ group_id=uuid.uuid4().hex,
+ project_id=self.tenant_baz['id'],
+ role_id='member')
+
+ def test_del_role_assignment_by_domain_not_found(self):
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ user_id=self.user_foo['id'],
+ domain_id=self.domain_default['id'],
+ role_id='member')
+
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ group_id=uuid.uuid4().hex,
+ domain_id=self.domain_default['id'],
+ role_id='member')
+
+ def test_get_and_remove_role_grant_by_group_and_project(self):
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ new_group = unit.new_group_ref(domain_id=new_domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ new_user = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ project_id=self.tenant_bar['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ project_id=self.tenant_bar['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ project_id=self.tenant_bar['id'])
+ self.assertDictEqual(self.role_member, roles_ref[0])
+
+ self.assignment_api.delete_grant(group_id=new_group['id'],
+ project_id=self.tenant_bar['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ project_id=self.tenant_bar['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ group_id=new_group['id'],
+ project_id=self.tenant_bar['id'],
+ role_id='member')
+
+ def test_get_and_remove_role_grant_by_group_and_domain(self):
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ new_group = unit.new_group_ref(domain_id=new_domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ new_user = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertDictEqual(self.role_member, roles_ref[0])
+
+ self.assignment_api.delete_grant(group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+
+ def test_get_and_remove_correct_role_grant_from_a_mix(self):
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ new_project = unit.new_project_ref(domain_id=new_domain['id'])
+ self.resource_api.create_project(new_project['id'], new_project)
+ new_group = unit.new_group_ref(domain_id=new_domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ new_group2 = unit.new_group_ref(domain_id=new_domain['id'])
+ new_group2 = self.identity_api.create_group(new_group2)
+ new_user = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ new_user2 = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user2 = self.identity_api.create_user(new_user2)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+ # First check we have no grants
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+ # Now add the grant we are going to test for, and some others as
+ # well just to make sure we get back the right one
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+
+ self.assignment_api.create_grant(group_id=new_group2['id'],
+ domain_id=new_domain['id'],
+ role_id=self.role_admin['id'])
+ self.assignment_api.create_grant(user_id=new_user2['id'],
+ domain_id=new_domain['id'],
+ role_id=self.role_admin['id'])
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ project_id=new_project['id'],
+ role_id=self.role_admin['id'])
+
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertDictEqual(self.role_member, roles_ref[0])
+
+ self.assignment_api.delete_grant(group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+
+ def test_get_and_remove_role_grant_by_user_and_domain(self):
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ new_user = unit.new_user_ref(domain_id=new_domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ roles_ref = self.assignment_api.list_grants(
+ user_id=new_user['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(user_id=new_user['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ user_id=new_user['id'],
+ domain_id=new_domain['id'])
+ self.assertDictEqual(self.role_member, roles_ref[0])
+
+ self.assignment_api.delete_grant(user_id=new_user['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ user_id=new_user['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ user_id=new_user['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+
+ def test_get_and_remove_role_grant_by_group_and_cross_domain(self):
+ group1_domain1_role = unit.new_role_ref()
+ self.role_api.create_role(group1_domain1_role['id'],
+ group1_domain1_role)
+ group1_domain2_role = unit.new_role_ref()
+ self.role_api.create_role(group1_domain2_role['id'],
+ group1_domain2_role)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(0, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain2['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain1['id'],
+ role_id=group1_domain1_role['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain2['id'],
+ role_id=group1_domain2_role['id'])
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain1['id'])
+ self.assertDictEqual(group1_domain1_role, roles_ref[0])
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain2['id'])
+ self.assertDictEqual(group1_domain2_role, roles_ref[0])
+
+ self.assignment_api.delete_grant(group_id=group1['id'],
+ domain_id=domain2['id'],
+ role_id=group1_domain2_role['id'])
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain2['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ group_id=group1['id'],
+ domain_id=domain2['id'],
+ role_id=group1_domain2_role['id'])
+
+ def test_get_and_remove_role_grant_by_user_and_cross_domain(self):
+ user1_domain1_role = unit.new_role_ref()
+ self.role_api.create_role(user1_domain1_role['id'], user1_domain1_role)
+ user1_domain2_role = unit.new_role_ref()
+ self.role_api.create_role(user1_domain2_role['id'], user1_domain2_role)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(0, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain2['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=user1_domain1_role['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain2['id'],
+ role_id=user1_domain2_role['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain1['id'])
+ self.assertDictEqual(user1_domain1_role, roles_ref[0])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain2['id'])
+ self.assertDictEqual(user1_domain2_role, roles_ref[0])
+
+ self.assignment_api.delete_grant(user_id=user1['id'],
+ domain_id=domain2['id'],
+ role_id=user1_domain2_role['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain2['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.RoleAssignmentNotFound,
+ self.assignment_api.delete_grant,
+ user_id=user1['id'],
+ domain_id=domain2['id'],
+ role_id=user1_domain2_role['id'])
+
+ def test_role_grant_by_group_and_cross_domain_project(self):
+ role1 = unit.new_role_ref()
+ self.role_api.create_role(role1['id'], role1)
+ role2 = unit.new_role_ref()
+ self.role_api.create_role(role2['id'], role2)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ project1 = unit.new_project_ref(domain_id=domain2['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role2['id'])
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ project_id=project1['id'])
+
+ roles_ref_ids = []
+ for ref in roles_ref:
+ roles_ref_ids.append(ref['id'])
+ self.assertIn(role1['id'], roles_ref_ids)
+ self.assertIn(role2['id'], roles_ref_ids)
+
+ self.assignment_api.delete_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ project_id=project1['id'])
+ self.assertEqual(1, len(roles_ref))
+ self.assertDictEqual(role2, roles_ref[0])
+
+ def test_role_grant_by_user_and_cross_domain_project(self):
+ role1 = unit.new_role_ref()
+ self.role_api.create_role(role1['id'], role1)
+ role2 = unit.new_role_ref()
+ self.role_api.create_role(role2['id'], role2)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ project1 = unit.new_project_ref(domain_id=domain2['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role2['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+
+ roles_ref_ids = []
+ for ref in roles_ref:
+ roles_ref_ids.append(ref['id'])
+ self.assertIn(role1['id'], roles_ref_ids)
+ self.assertIn(role2['id'], roles_ref_ids)
+
+ self.assignment_api.delete_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(1, len(roles_ref))
+ self.assertDictEqual(role2, roles_ref[0])
+
+ def test_delete_user_grant_no_user(self):
+ # Can delete a grant where the user doesn't exist.
+ role = unit.new_role_ref()
+ role_id = role['id']
+ self.role_api.create_role(role_id, role)
+
+ user_id = uuid.uuid4().hex
+
+ self.assignment_api.create_grant(role_id, user_id=user_id,
+ project_id=self.tenant_bar['id'])
+
+ self.assignment_api.delete_grant(role_id, user_id=user_id,
+ project_id=self.tenant_bar['id'])
+
+ def test_delete_group_grant_no_group(self):
+ # Can delete a grant where the group doesn't exist.
+ role = unit.new_role_ref()
+ role_id = role['id']
+ self.role_api.create_role(role_id, role)
+
+ group_id = uuid.uuid4().hex
+
+ self.assignment_api.create_grant(role_id, group_id=group_id,
+ project_id=self.tenant_bar['id'])
+
+ self.assignment_api.delete_grant(role_id, group_id=group_id,
+ project_id=self.tenant_bar['id'])
+
+ def test_grant_crud_throws_exception_if_invalid_role(self):
+ """Ensure RoleNotFound thrown if role does not exist."""
+ def assert_role_not_found_exception(f, **kwargs):
+ self.assertRaises(exception.RoleNotFound, f,
+ role_id=uuid.uuid4().hex, **kwargs)
+
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user_resp = self.identity_api.create_user(user)
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group_resp = self.identity_api.create_group(group)
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project_resp = self.resource_api.create_project(project['id'], project)
+
+ for manager_call in [self.assignment_api.create_grant,
+ self.assignment_api.get_grant,
+ self.assignment_api.delete_grant]:
+ assert_role_not_found_exception(
+ manager_call,
+ user_id=user_resp['id'], project_id=project_resp['id'])
+ assert_role_not_found_exception(
+ manager_call,
+ group_id=group_resp['id'], project_id=project_resp['id'])
+ assert_role_not_found_exception(
+ manager_call,
+ user_id=user_resp['id'],
+ domain_id=CONF.identity.default_domain_id)
+ assert_role_not_found_exception(
+ manager_call,
+ group_id=group_resp['id'],
+ domain_id=CONF.identity.default_domain_id)
+
+ def test_multi_role_grant_by_user_group_on_project_domain(self):
+ role_list = []
+ for _ in range(10):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_list.append(role)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ group2 = unit.new_group_ref(domain_id=domain1['id'])
+ group2 = self.identity_api.create_group(group2)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+
+ self.identity_api.add_user_to_group(user1['id'],
+ group1['id'])
+ self.identity_api.add_user_to_group(user1['id'],
+ group2['id'])
+
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[0]['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[1]['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[2]['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[3]['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role_list[4]['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role_list[5]['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role_list[6]['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role_list[7]['id'])
+ roles_ref = self.assignment_api.list_grants(user_id=user1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(2, len(roles_ref))
+ self.assertIn(role_list[0], roles_ref)
+ self.assertIn(role_list[1], roles_ref)
+ roles_ref = self.assignment_api.list_grants(group_id=group1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(2, len(roles_ref))
+ self.assertIn(role_list[2], roles_ref)
+ self.assertIn(role_list[3], roles_ref)
+ roles_ref = self.assignment_api.list_grants(user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(2, len(roles_ref))
+ self.assertIn(role_list[4], roles_ref)
+ self.assertIn(role_list[5], roles_ref)
+ roles_ref = self.assignment_api.list_grants(group_id=group1['id'],
+ project_id=project1['id'])
+ self.assertEqual(2, len(roles_ref))
+ self.assertIn(role_list[6], roles_ref)
+ self.assertIn(role_list[7], roles_ref)
+
+ # Now test the alternate way of getting back lists of grants,
+ # where user and group roles are combined. These should match
+ # the above results.
+ combined_list = self.assignment_api.get_roles_for_user_and_project(
+ user1['id'], project1['id'])
+ self.assertEqual(4, len(combined_list))
+ self.assertIn(role_list[4]['id'], combined_list)
+ self.assertIn(role_list[5]['id'], combined_list)
+ self.assertIn(role_list[6]['id'], combined_list)
+ self.assertIn(role_list[7]['id'], combined_list)
+
+ combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
+ user1['id'], domain1['id'])
+ self.assertEqual(4, len(combined_role_list))
+ self.assertIn(role_list[0]['id'], combined_role_list)
+ self.assertIn(role_list[1]['id'], combined_role_list)
+ self.assertIn(role_list[2]['id'], combined_role_list)
+ self.assertIn(role_list[3]['id'], combined_role_list)
+
+ def test_multi_group_grants_on_project_domain(self):
+ """Test multiple group roles for user on project and domain.
+
+ Test Plan:
+
+ - Create 6 roles
+ - Create a domain, with a project, user and two groups
+ - Make the user a member of both groups
+ - Check no roles yet exit
+ - Assign a role to each user and both groups on both the
+ project and domain
+ - Get a list of effective roles for the user on both the
+ project and domain, checking we get back the correct three
+ roles
+
+ """
+ role_list = []
+ for _ in range(6):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_list.append(role)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ group2 = unit.new_group_ref(domain_id=domain1['id'])
+ group2 = self.identity_api.create_group(group2)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+
+ self.identity_api.add_user_to_group(user1['id'],
+ group1['id'])
+ self.identity_api.add_user_to_group(user1['id'],
+ group2['id'])
+
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[0]['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[1]['id'])
+ self.assignment_api.create_grant(group_id=group2['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[2]['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role_list[3]['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role_list[4]['id'])
+ self.assignment_api.create_grant(group_id=group2['id'],
+ project_id=project1['id'],
+ role_id=role_list[5]['id'])
+
+ # Read by the roles, ensuring we get the correct 3 roles for
+ # both project and domain
+ combined_list = self.assignment_api.get_roles_for_user_and_project(
+ user1['id'], project1['id'])
+ self.assertEqual(3, len(combined_list))
+ self.assertIn(role_list[3]['id'], combined_list)
+ self.assertIn(role_list[4]['id'], combined_list)
+ self.assertIn(role_list[5]['id'], combined_list)
+
+ combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
+ user1['id'], domain1['id'])
+ self.assertEqual(3, len(combined_role_list))
+ self.assertIn(role_list[0]['id'], combined_role_list)
+ self.assertIn(role_list[1]['id'], combined_role_list)
+ self.assertIn(role_list[2]['id'], combined_role_list)
+
+ def test_delete_role_with_user_and_group_grants(self):
+ role1 = unit.new_role_ref()
+ self.role_api.create_role(role1['id'], role1)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain1['id'],
+ role_id=role1['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(1, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ project_id=project1['id'])
+ self.assertEqual(1, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(1, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(1, len(roles_ref))
+ self.role_api.delete_role(role1['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(0, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(0, len(roles_ref))
+
+ def test_list_role_assignment_by_domain(self):
+ """Test listing of role assignment filtered by domain."""
+ test_plan = {
+ # A domain with 3 users, 1 group, a spoiler domain and 2 roles.
+ 'entities': {'domains': [{'users': 3, 'groups': 1}, 1],
+ 'roles': 2},
+ # Users 1 & 2 are in the group
+ 'group_memberships': [{'group': 0, 'users': [1, 2]}],
+ # Assign a role for user 0 and the group
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'group': 0, 'role': 1, 'domain': 0}],
+ 'tests': [
+ # List all effective assignments for domain[0].
+ # Should get one direct user role and user roles for each of
+ # the users in the group.
+ {'params': {'domain': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 1, 'role': 1, 'domain': 0,
+ 'indirect': {'group': 0}},
+ {'user': 2, 'role': 1, 'domain': 0,
+ 'indirect': {'group': 0}}
+ ]},
+ # Using domain[1] should return nothing
+ {'params': {'domain': 1, 'effective': True},
+ 'results': []},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_role_assignment_by_user_with_domain_group_roles(self):
+ """Test listing assignments by user, with group roles on a domain."""
+ test_plan = {
+ # A domain with 3 users, 3 groups, a spoiler domain
+ # plus 3 roles.
+ 'entities': {'domains': [{'users': 3, 'groups': 3}, 1],
+ 'roles': 3},
+ # Users 1 & 2 are in the group 0, User 1 also in group 1
+ 'group_memberships': [{'group': 0, 'users': [0, 1]},
+ {'group': 1, 'users': [0]}],
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'group': 0, 'role': 1, 'domain': 0},
+ {'group': 1, 'role': 2, 'domain': 0},
+ # ...and two spoiler assignments
+ {'user': 1, 'role': 1, 'domain': 0},
+ {'group': 2, 'role': 2, 'domain': 0}],
+ 'tests': [
+ # List all effective assignments for user[0].
+ # Should get one direct user role and a user roles for each of
+ # groups 0 and 1
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'domain': 0,
+ 'indirect': {'group': 0}},
+ {'user': 0, 'role': 2, 'domain': 0,
+ 'indirect': {'group': 1}}
+ ]},
+ # Adding domain[0] as a filter should return the same data
+ {'params': {'user': 0, 'domain': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'domain': 0,
+ 'indirect': {'group': 0}},
+ {'user': 0, 'role': 2, 'domain': 0,
+ 'indirect': {'group': 1}}
+ ]},
+ # Using domain[1] should return nothing
+ {'params': {'user': 0, 'domain': 1, 'effective': True},
+ 'results': []},
+ # Using user[2] should return nothing
+ {'params': {'user': 2, 'domain': 0, 'effective': True},
+ 'results': []},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_role_assignment_using_sourced_groups(self):
+ """Test listing assignments when restricted by source groups."""
+ test_plan = {
+ # The default domain with 3 users, 3 groups, 3 projects,
+ # plus 3 roles.
+ 'entities': {'domains': {'id': CONF.identity.default_domain_id,
+ 'users': 3, 'groups': 3, 'projects': 3},
+ 'roles': 3},
+ # Users 0 & 1 are in the group 0, User 0 also in group 1
+ 'group_memberships': [{'group': 0, 'users': [0, 1]},
+ {'group': 1, 'users': [0]}],
+ # Spread the assignments around - we want to be able to show that
+ # if sourced by group, assignments from other sources are excluded
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'group': 0, 'role': 1, 'project': 1},
+ {'group': 1, 'role': 2, 'project': 0},
+ {'group': 1, 'role': 2, 'project': 1},
+ {'user': 2, 'role': 1, 'project': 1},
+ {'group': 2, 'role': 2, 'project': 2}
+ ],
+ 'tests': [
+ # List all effective assignments sourced from groups 0 and 1
+ {'params': {'source_from_group_ids': [0, 1],
+ 'effective': True},
+ 'results': [{'group': 0, 'role': 1, 'project': 1},
+ {'group': 1, 'role': 2, 'project': 0},
+ {'group': 1, 'role': 2, 'project': 1}
+ ]},
+ # Adding a role a filter should further restrict the entries
+ {'params': {'source_from_group_ids': [0, 1], 'role': 2,
+ 'effective': True},
+ 'results': [{'group': 1, 'role': 2, 'project': 0},
+ {'group': 1, 'role': 2, 'project': 1}
+ ]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_role_assignment_using_sourced_groups_with_domains(self):
+ """Test listing domain assignments when restricted by source groups."""
+ test_plan = {
+ # A domain with 3 users, 3 groups, 3 projects, a second domain,
+ # plus 3 roles.
+ 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},
+ 1],
+ 'roles': 3},
+ # Users 0 & 1 are in the group 0, User 0 also in group 1
+ 'group_memberships': [{'group': 0, 'users': [0, 1]},
+ {'group': 1, 'users': [0]}],
+ # Spread the assignments around - we want to be able to show that
+ # if sourced by group, assignments from other sources are excluded
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'group': 0, 'role': 1, 'domain': 1},
+ {'group': 1, 'role': 2, 'project': 0},
+ {'group': 1, 'role': 2, 'project': 1},
+ {'user': 2, 'role': 1, 'project': 1},
+ {'group': 2, 'role': 2, 'project': 2}
+ ],
+ 'tests': [
+ # List all effective assignments sourced from groups 0 and 1
+ {'params': {'source_from_group_ids': [0, 1],
+ 'effective': True},
+ 'results': [{'group': 0, 'role': 1, 'domain': 1},
+ {'group': 1, 'role': 2, 'project': 0},
+ {'group': 1, 'role': 2, 'project': 1}
+ ]},
+ # Adding a role a filter should further restrict the entries
+ {'params': {'source_from_group_ids': [0, 1], 'role': 1,
+ 'effective': True},
+ 'results': [{'group': 0, 'role': 1, 'domain': 1},
+ ]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_role_assignment_fails_with_userid_and_source_groups(self):
+ """Show we trap this unsupported internal combination of params."""
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+ self.assertRaises(exception.UnexpectedError,
+ self.assignment_api.list_role_assignments,
+ effective=True,
+ user_id=self.user_foo['id'],
+ source_from_group_ids=[group['id']])
+
+ def test_add_user_to_project(self):
+ self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+ self.user_foo['id'])
+ tenants = self.assignment_api.list_projects_for_user(
+ self.user_foo['id'])
+ self.assertIn(self.tenant_baz, tenants)
+
+ def test_add_user_to_project_missing_default_role(self):
+ self.role_api.delete_role(CONF.member_role_id)
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.get_role,
+ CONF.member_role_id)
+ self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+ self.user_foo['id'])
+ tenants = (
+ self.assignment_api.list_projects_for_user(self.user_foo['id']))
+ self.assertIn(self.tenant_baz, tenants)
+ default_role = self.role_api.get_role(CONF.member_role_id)
+ self.assertIsNotNone(default_role)
+
+ def test_add_user_to_project_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.assignment_api.add_user_to_project,
+ uuid.uuid4().hex,
+ self.user_foo['id'])
+
+ def test_add_user_to_project_no_user(self):
+ # If add_user_to_project and the user doesn't exist, then
+ # no error.
+ user_id_not_exist = uuid.uuid4().hex
+ self.assignment_api.add_user_to_project(self.tenant_bar['id'],
+ user_id_not_exist)
+
+ def test_remove_user_from_project(self):
+ self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+ self.user_foo['id'])
+ self.assignment_api.remove_user_from_project(self.tenant_baz['id'],
+ self.user_foo['id'])
+ tenants = self.assignment_api.list_projects_for_user(
+ self.user_foo['id'])
+ self.assertNotIn(self.tenant_baz, tenants)
+
+ def test_remove_user_from_project_race_delete_role(self):
+ self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+ self.user_foo['id'])
+ self.assignment_api.add_role_to_user_and_project(
+ tenant_id=self.tenant_baz['id'],
+ user_id=self.user_foo['id'],
+ role_id=self.role_other['id'])
+
+ # Mock a race condition, delete a role after
+ # get_roles_for_user_and_project() is called in
+ # remove_user_from_project().
+ roles = self.assignment_api.get_roles_for_user_and_project(
+ self.user_foo['id'], self.tenant_baz['id'])
+ self.role_api.delete_role(self.role_other['id'])
+ self.assignment_api.get_roles_for_user_and_project = mock.Mock(
+ return_value=roles)
+ self.assignment_api.remove_user_from_project(self.tenant_baz['id'],
+ self.user_foo['id'])
+ tenants = self.assignment_api.list_projects_for_user(
+ self.user_foo['id'])
+ self.assertNotIn(self.tenant_baz, tenants)
+
+ def test_remove_user_from_project_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.assignment_api.remove_user_from_project,
+ uuid.uuid4().hex,
+ self.user_foo['id'])
+
+ self.assertRaises(exception.UserNotFound,
+ self.assignment_api.remove_user_from_project,
+ self.tenant_bar['id'],
+ uuid.uuid4().hex)
+
+ self.assertRaises(exception.NotFound,
+ self.assignment_api.remove_user_from_project,
+ self.tenant_baz['id'],
+ self.user_foo['id'])
+
+ def test_list_user_project_ids_returns_not_found(self):
+ self.assertRaises(exception.UserNotFound,
+ self.assignment_api.list_projects_for_user,
+ uuid.uuid4().hex)
+
+ def test_delete_user_with_project_association(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ self.assignment_api.add_user_to_project(self.tenant_bar['id'],
+ user['id'])
+ self.identity_api.delete_user(user['id'])
+ self.assertRaises(exception.UserNotFound,
+ self.assignment_api.list_projects_for_user,
+ user['id'])
+
+ def test_delete_user_with_project_roles(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ self.assignment_api.add_role_to_user_and_project(
+ user['id'],
+ self.tenant_bar['id'],
+ self.role_member['id'])
+ self.identity_api.delete_user(user['id'])
+ self.assertRaises(exception.UserNotFound,
+ self.assignment_api.list_projects_for_user,
+ user['id'])
+
+ def test_delete_role_returns_not_found(self):
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.delete_role,
+ uuid.uuid4().hex)
+
+ def test_delete_project_with_role_assignments(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project['id'], project)
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], project['id'], 'member')
+ self.resource_api.delete_project(project['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.assignment_api.list_user_ids_for_project,
+ project['id'])
+
+ def test_delete_role_check_role_grant(self):
+ role = unit.new_role_ref()
+ alt_role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ self.role_api.create_role(alt_role['id'], alt_role)
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'], role['id'])
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'], alt_role['id'])
+ self.role_api.delete_role(role['id'])
+ roles_ref = self.assignment_api.get_roles_for_user_and_project(
+ self.user_foo['id'], self.tenant_bar['id'])
+ self.assertNotIn(role['id'], roles_ref)
+ self.assertIn(alt_role['id'], roles_ref)
+
+ def test_list_projects_for_user(self):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ user1 = unit.new_user_ref(domain_id=domain['id'])
+ user1 = self.identity_api.create_user(user1)
+ user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+ self.assertEqual(0, len(user_projects))
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=self.tenant_bar['id'],
+ role_id=self.role_member['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=self.tenant_baz['id'],
+ role_id=self.role_member['id'])
+ user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+ self.assertEqual(2, len(user_projects))
+
+ def test_list_projects_for_user_with_grants(self):
+ # Create two groups each with a role on a different project, and
+ # make user1 a member of both groups. Both these new projects
+ # should now be included, along with any direct user grants.
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ user1 = unit.new_user_ref(domain_id=domain['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain['id'])
+ group1 = self.identity_api.create_group(group1)
+ group2 = unit.new_group_ref(domain_id=domain['id'])
+ group2 = self.identity_api.create_group(group2)
+ project1 = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ project2 = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project2['id'], project2)
+ self.identity_api.add_user_to_group(user1['id'], group1['id'])
+ self.identity_api.add_user_to_group(user1['id'], group2['id'])
+
+ # Create 3 grants, one user grant, the other two as group grants
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=self.tenant_bar['id'],
+ role_id=self.role_member['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=self.role_admin['id'])
+ self.assignment_api.create_grant(group_id=group2['id'],
+ project_id=project2['id'],
+ role_id=self.role_admin['id'])
+ user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+ self.assertEqual(3, len(user_projects))
+
+ def test_create_grant_no_user(self):
+ # If call create_grant with a user that doesn't exist, doesn't fail.
+ self.assignment_api.create_grant(
+ self.role_other['id'],
+ user_id=uuid.uuid4().hex,
+ project_id=self.tenant_bar['id'])
+
+ def test_create_grant_no_group(self):
+ # If call create_grant with a group that doesn't exist, doesn't fail.
+ self.assignment_api.create_grant(
+ self.role_other['id'],
+ group_id=uuid.uuid4().hex,
+ project_id=self.tenant_bar['id'])
+
+ def test_delete_group_removes_role_assignments(self):
+ # When a group is deleted any role assignments for the group are
+ # removed.
+
+ MEMBER_ROLE_ID = 'member'
+
+ def get_member_assignments():
+ assignments = self.assignment_api.list_role_assignments()
+ return [x for x in assignments if x['role_id'] == MEMBER_ROLE_ID]
+
+ orig_member_assignments = get_member_assignments()
+
+ # Create a group.
+ new_group = unit.new_group_ref(
+ domain_id=CONF.identity.default_domain_id)
+ new_group = self.identity_api.create_group(new_group)
+
+ # Create a project.
+ new_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(new_project['id'], new_project)
+
+ # Assign a role to the group.
+ self.assignment_api.create_grant(
+ group_id=new_group['id'], project_id=new_project['id'],
+ role_id=MEMBER_ROLE_ID)
+
+ # Delete the group.
+ self.identity_api.delete_group(new_group['id'])
+
+ # Check that the role assignment for the group is gone
+ member_assignments = get_member_assignments()
+
+ self.assertThat(member_assignments,
+ matchers.Equals(orig_member_assignments))
+
+ def test_get_roles_for_groups_on_domain(self):
+ """Test retrieving group domain roles.
+
+ Test Plan:
+
+ - Create a domain, three groups and three roles
+ - Assign one an inherited and the others a non-inherited group role
+ to the domain
+ - Ensure that only the non-inherited roles are returned on the domain
+
+ """
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ group_list = []
+ group_id_list = []
+ role_list = []
+ for _ in range(3):
+ group = unit.new_group_ref(domain_id=domain1['id'])
+ group = self.identity_api.create_group(group)
+ group_list.append(group)
+ group_id_list.append(group['id'])
+
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_list.append(role)
+
+ # Assign the roles - one is inherited
+ self.assignment_api.create_grant(group_id=group_list[0]['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[0]['id'])
+ self.assignment_api.create_grant(group_id=group_list[1]['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[1]['id'])
+ self.assignment_api.create_grant(group_id=group_list[2]['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[2]['id'],
+ inherited_to_projects=True)
+
+ # Now get the effective roles for the groups on the domain project. We
+ # shouldn't get back the inherited role.
+
+ role_refs = self.assignment_api.get_roles_for_groups(
+ group_id_list, domain_id=domain1['id'])
+
+ self.assertThat(role_refs, matchers.HasLength(2))
+ self.assertIn(role_list[0], role_refs)
+ self.assertIn(role_list[1], role_refs)
+
+ def test_get_roles_for_groups_on_project(self):
+ """Test retrieving group project roles.
+
+ Test Plan:
+
+ - Create two domains, two projects, six groups and six roles
+ - Project1 is in Domain1, Project2 is in Domain2
+ - Domain2/Project2 are spoilers
+ - Assign a different direct group role to each project as well
+ as both an inherited and non-inherited role to each domain
+ - Get the group roles for Project 1 - depending on whether we have
+ enabled inheritance, we should either get back just the direct role
+ or both the direct one plus the inherited domain role from Domain 1
+
+ """
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ project2 = unit.new_project_ref(domain_id=domain2['id'])
+ self.resource_api.create_project(project2['id'], project2)
+ group_list = []
+ group_id_list = []
+ role_list = []
+ for _ in range(6):
+ group = unit.new_group_ref(domain_id=domain1['id'])
+ group = self.identity_api.create_group(group)
+ group_list.append(group)
+ group_id_list.append(group['id'])
+
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_list.append(role)
+
+ # Assign the roles - one inherited and one non-inherited on Domain1,
+ # plus one on Project1
+ self.assignment_api.create_grant(group_id=group_list[0]['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[0]['id'])
+ self.assignment_api.create_grant(group_id=group_list[1]['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[1]['id'],
+ inherited_to_projects=True)
+ self.assignment_api.create_grant(group_id=group_list[2]['id'],
+ project_id=project1['id'],
+ role_id=role_list[2]['id'])
+
+ # ...and a duplicate set of spoiler assignments to Domain2/Project2
+ self.assignment_api.create_grant(group_id=group_list[3]['id'],
+ domain_id=domain2['id'],
+ role_id=role_list[3]['id'])
+ self.assignment_api.create_grant(group_id=group_list[4]['id'],
+ domain_id=domain2['id'],
+ role_id=role_list[4]['id'],
+ inherited_to_projects=True)
+ self.assignment_api.create_grant(group_id=group_list[5]['id'],
+ project_id=project2['id'],
+ role_id=role_list[5]['id'])
+
+ # Now get the effective roles for all groups on the Project1. With
+ # inheritance off, we should only get back the direct role.
+
+ self.config_fixture.config(group='os_inherit', enabled=False)
+ role_refs = self.assignment_api.get_roles_for_groups(
+ group_id_list, project_id=project1['id'])
+
+ self.assertThat(role_refs, matchers.HasLength(1))
+ self.assertIn(role_list[2], role_refs)
+
+ # With inheritance on, we should also get back the inherited role from
+ # its owning domain.
+
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ role_refs = self.assignment_api.get_roles_for_groups(
+ group_id_list, project_id=project1['id'])
+
+ self.assertThat(role_refs, matchers.HasLength(2))
+ self.assertIn(role_list[1], role_refs)
+ self.assertIn(role_list[2], role_refs)
+
+ def test_list_domains_for_groups(self):
+ """Test retrieving domains for a list of groups.
+
+ Test Plan:
+
+ - Create three domains, three groups and one role
+ - Assign a non-inherited group role to two domains, and an inherited
+ group role to the third
+ - Ensure only the domains with non-inherited roles are returned
+
+ """
+ domain_list = []
+ group_list = []
+ group_id_list = []
+ for _ in range(3):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ domain_list.append(domain)
+
+ group = unit.new_group_ref(domain_id=domain['id'])
+ group = self.identity_api.create_group(group)
+ group_list.append(group)
+ group_id_list.append(group['id'])
+
+ role1 = unit.new_role_ref()
+ self.role_api.create_role(role1['id'], role1)
+
+ # Assign the roles - one is inherited
+ self.assignment_api.create_grant(group_id=group_list[0]['id'],
+ domain_id=domain_list[0]['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(group_id=group_list[1]['id'],
+ domain_id=domain_list[1]['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(group_id=group_list[2]['id'],
+ domain_id=domain_list[2]['id'],
+ role_id=role1['id'],
+ inherited_to_projects=True)
+
+ # Now list the domains that have roles for any of the 3 groups
+ # We shouldn't get back domain[2] since that had an inherited role.
+
+ domain_refs = (
+ self.assignment_api.list_domains_for_groups(group_id_list))
+
+ self.assertThat(domain_refs, matchers.HasLength(2))
+ self.assertIn(domain_list[0], domain_refs)
+ self.assertIn(domain_list[1], domain_refs)
+
+ def test_list_projects_for_groups(self):
+ """Test retrieving projects for a list of groups.
+
+ Test Plan:
+
+ - Create two domains, four projects, seven groups and seven roles
+ - Project1-3 are in Domain1, Project4 is in Domain2
+ - Domain2/Project4 are spoilers
+ - Project1 and 2 have direct group roles, Project3 has no direct
+ roles but should inherit a group role from Domain1
+ - Get the projects for the group roles that are assigned to Project1
+ Project2 and the inherited one on Domain1. Depending on whether we
+ have enabled inheritance, we should either get back just the projects
+ with direct roles (Project 1 and 2) or also Project3 due to its
+ inherited role from Domain1.
+
+ """
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ project1 = self.resource_api.create_project(project1['id'], project1)
+ project2 = unit.new_project_ref(domain_id=domain1['id'])
+ project2 = self.resource_api.create_project(project2['id'], project2)
+ project3 = unit.new_project_ref(domain_id=domain1['id'])
+ project3 = self.resource_api.create_project(project3['id'], project3)
+ project4 = unit.new_project_ref(domain_id=domain2['id'])
+ project4 = self.resource_api.create_project(project4['id'], project4)
+ group_list = []
+ role_list = []
+ for _ in range(7):
+ group = unit.new_group_ref(domain_id=domain1['id'])
+ group = self.identity_api.create_group(group)
+ group_list.append(group)
+
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_list.append(role)
+
+ # Assign the roles - one inherited and one non-inherited on Domain1,
+ # plus one on Project1 and Project2
+ self.assignment_api.create_grant(group_id=group_list[0]['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[0]['id'])
+ self.assignment_api.create_grant(group_id=group_list[1]['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[1]['id'],
+ inherited_to_projects=True)
+ self.assignment_api.create_grant(group_id=group_list[2]['id'],
+ project_id=project1['id'],
+ role_id=role_list[2]['id'])
+ self.assignment_api.create_grant(group_id=group_list[3]['id'],
+ project_id=project2['id'],
+ role_id=role_list[3]['id'])
+
+ # ...and a few of spoiler assignments to Domain2/Project4
+ self.assignment_api.create_grant(group_id=group_list[4]['id'],
+ domain_id=domain2['id'],
+ role_id=role_list[4]['id'])
+ self.assignment_api.create_grant(group_id=group_list[5]['id'],
+ domain_id=domain2['id'],
+ role_id=role_list[5]['id'],
+ inherited_to_projects=True)
+ self.assignment_api.create_grant(group_id=group_list[6]['id'],
+ project_id=project4['id'],
+ role_id=role_list[6]['id'])
+
+ # Now get the projects for the groups that have roles on Project1,
+ # Project2 and the inherited role on Domain!. With inheritance off,
+ # we should only get back the projects with direct role.
+
+ self.config_fixture.config(group='os_inherit', enabled=False)
+ group_id_list = [group_list[1]['id'], group_list[2]['id'],
+ group_list[3]['id']]
+ project_refs = (
+ self.assignment_api.list_projects_for_groups(group_id_list))
+
+ self.assertThat(project_refs, matchers.HasLength(2))
+ self.assertIn(project1, project_refs)
+ self.assertIn(project2, project_refs)
+
+ # With inheritance on, we should also get back the Project3 due to the
+ # inherited role from its owning domain.
+
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ project_refs = (
+ self.assignment_api.list_projects_for_groups(group_id_list))
+
+ self.assertThat(project_refs, matchers.HasLength(3))
+ self.assertIn(project1, project_refs)
+ self.assertIn(project2, project_refs)
+ self.assertIn(project3, project_refs)
+
+ def test_update_role_no_name(self):
+ # A user can update a role and not include the name.
+
+ # description is picked just because it's not name.
+ self.role_api.update_role(self.role_member['id'],
+ {'description': uuid.uuid4().hex})
+ # If the previous line didn't raise an exception then the test passes.
+
+ def test_update_role_same_name(self):
+ # A user can update a role and set the name to be the same as it was.
+
+ self.role_api.update_role(self.role_member['id'],
+ {'name': self.role_member['name']})
+ # If the previous line didn't raise an exception then the test passes.
+
+ def test_list_role_assignment_containing_names(self):
+ # Create Refs
+ new_role = unit.new_role_ref()
+ new_domain = self._get_domain_fixture()
+ new_user = unit.new_user_ref(domain_id=new_domain['id'])
+ new_project = unit.new_project_ref(domain_id=new_domain['id'])
+ new_group = unit.new_group_ref(domain_id=new_domain['id'])
+ # Create entities
+ new_role = self.role_api.create_role(new_role['id'], new_role)
+ new_user = self.identity_api.create_user(new_user)
+ new_group = self.identity_api.create_group(new_group)
+ self.resource_api.create_project(new_project['id'], new_project)
+ self.assignment_api.create_grant(user_id=new_user['id'],
+ project_id=new_project['id'],
+ role_id=new_role['id'])
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ project_id=new_project['id'],
+ role_id=new_role['id'])
+ self.assignment_api.create_grant(domain_id=new_domain['id'],
+ user_id=new_user['id'],
+ role_id=new_role['id'])
+ # Get the created assignments with the include_names flag
+ _asgmt_prj = self.assignment_api.list_role_assignments(
+ user_id=new_user['id'],
+ project_id=new_project['id'],
+ include_names=True)
+ _asgmt_grp = self.assignment_api.list_role_assignments(
+ group_id=new_group['id'],
+ project_id=new_project['id'],
+ include_names=True)
+ _asgmt_dmn = self.assignment_api.list_role_assignments(
+ domain_id=new_domain['id'],
+ user_id=new_user['id'],
+ include_names=True)
+ # Make sure we can get back the correct number of assignments
+ self.assertThat(_asgmt_prj, matchers.HasLength(1))
+ self.assertThat(_asgmt_grp, matchers.HasLength(1))
+ self.assertThat(_asgmt_dmn, matchers.HasLength(1))
+ # get the first assignment
+ first_asgmt_prj = _asgmt_prj[0]
+ first_asgmt_grp = _asgmt_grp[0]
+ first_asgmt_dmn = _asgmt_dmn[0]
+ # Assert the names are correct in the project response
+ self.assertEqual(new_project['name'],
+ first_asgmt_prj['project_name'])
+ self.assertEqual(new_project['domain_id'],
+ first_asgmt_prj['project_domain_id'])
+ self.assertEqual(new_user['name'],
+ first_asgmt_prj['user_name'])
+ self.assertEqual(new_user['domain_id'],
+ first_asgmt_prj['user_domain_id'])
+ self.assertEqual(new_role['name'],
+ first_asgmt_prj['role_name'])
+ # Assert the names are correct in the group response
+ self.assertEqual(new_group['name'],
+ first_asgmt_grp['group_name'])
+ self.assertEqual(new_group['domain_id'],
+ first_asgmt_grp['group_domain_id'])
+ self.assertEqual(new_project['name'],
+ first_asgmt_grp['project_name'])
+ self.assertEqual(new_project['domain_id'],
+ first_asgmt_grp['project_domain_id'])
+ self.assertEqual(new_role['name'],
+ first_asgmt_grp['role_name'])
+ # Assert the names are correct in the domain response
+ self.assertEqual(new_domain['name'],
+ first_asgmt_dmn['domain_name'])
+ self.assertEqual(new_user['name'],
+ first_asgmt_dmn['user_name'])
+ self.assertEqual(new_user['domain_id'],
+ first_asgmt_dmn['user_domain_id'])
+ self.assertEqual(new_role['name'],
+ first_asgmt_dmn['role_name'])
+
+ def test_list_role_assignment_does_not_contain_names(self):
+ """Test names are not included with list role assignments.
+
+ Scenario:
+ - names are NOT included by default
+ - names are NOT included when include_names=False
+
+ """
+ def assert_does_not_contain_names(assignment):
+ first_asgmt_prj = assignment[0]
+ self.assertNotIn('project_name', first_asgmt_prj)
+ self.assertNotIn('project_domain_id', first_asgmt_prj)
+ self.assertNotIn('user_name', first_asgmt_prj)
+ self.assertNotIn('user_domain_id', first_asgmt_prj)
+ self.assertNotIn('role_name', first_asgmt_prj)
+
+ # Create Refs
+ new_role = unit.new_role_ref()
+ new_domain = self._get_domain_fixture()
+ new_user = unit.new_user_ref(domain_id=new_domain['id'])
+ new_project = unit.new_project_ref(domain_id=new_domain['id'])
+ # Create entities
+ new_role = self.role_api.create_role(new_role['id'], new_role)
+ new_user = self.identity_api.create_user(new_user)
+ self.resource_api.create_project(new_project['id'], new_project)
+ self.assignment_api.create_grant(user_id=new_user['id'],
+ project_id=new_project['id'],
+ role_id=new_role['id'])
+ # Get the created assignments with NO include_names flag
+ role_assign_without_names = self.assignment_api.list_role_assignments(
+ user_id=new_user['id'],
+ project_id=new_project['id'])
+ assert_does_not_contain_names(role_assign_without_names)
+ # Get the created assignments with include_names=False
+ role_assign_without_names = self.assignment_api.list_role_assignments(
+ user_id=new_user['id'],
+ project_id=new_project['id'],
+ include_names=False)
+ assert_does_not_contain_names(role_assign_without_names)
+
+ def test_delete_user_assignments_user_same_id_as_group(self):
+ """Test deleting user assignments when user_id == group_id.
+
+ In this scenario, only user assignments must be deleted (i.e.
+ USER_DOMAIN or USER_PROJECT).
+
+ Test plan:
+ * Create a user and a group with the same ID;
+ * Create four roles and assign them to both user and group;
+ * Delete all user assignments;
+ * Group assignments must stay intact.
+ """
+ # Create a common ID
+ common_id = uuid.uuid4().hex
+ # Create a project
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project = self.resource_api.create_project(project['id'], project)
+ # Create a user
+ user = unit.new_user_ref(id=common_id,
+ domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.driver.create_user(common_id, user)
+ self.assertEqual(common_id, user['id'])
+ # Create a group
+ group = unit.new_group_ref(id=common_id,
+ domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.driver.create_group(common_id, group)
+ self.assertEqual(common_id, group['id'])
+ # Create four roles
+ roles = []
+ for _ in range(4):
+ role = unit.new_role_ref()
+ roles.append(self.role_api.create_role(role['id'], role))
+ # Assign roles for user
+ self.assignment_api.driver.create_grant(
+ user_id=user['id'], domain_id=CONF.identity.default_domain_id,
+ role_id=roles[0]['id'])
+ self.assignment_api.driver.create_grant(user_id=user['id'],
+ project_id=project['id'],
+ role_id=roles[1]['id'])
+ # Assign roles for group
+ self.assignment_api.driver.create_grant(
+ group_id=group['id'], domain_id=CONF.identity.default_domain_id,
+ role_id=roles[2]['id'])
+ self.assignment_api.driver.create_grant(group_id=group['id'],
+ project_id=project['id'],
+ role_id=roles[3]['id'])
+ # Make sure they were assigned
+ user_assignments = self.assignment_api.list_role_assignments(
+ user_id=user['id'])
+ self.assertThat(user_assignments, matchers.HasLength(2))
+ group_assignments = self.assignment_api.list_role_assignments(
+ group_id=group['id'])
+ self.assertThat(group_assignments, matchers.HasLength(2))
+ # Delete user assignments
+ self.assignment_api.delete_user_assignments(user_id=user['id'])
+ # Assert only user assignments were deleted
+ user_assignments = self.assignment_api.list_role_assignments(
+ user_id=user['id'])
+ self.assertThat(user_assignments, matchers.HasLength(0))
+ group_assignments = self.assignment_api.list_role_assignments(
+ group_id=group['id'])
+ self.assertThat(group_assignments, matchers.HasLength(2))
+ # Make sure these remaining assignments are group-related
+ for assignment in group_assignments:
+ self.assertThat(assignment.keys(), matchers.Contains('group_id'))
+
+ def test_delete_group_assignments_group_same_id_as_user(self):
+ """Test deleting group assignments when group_id == user_id.
+
+ In this scenario, only group assignments must be deleted (i.e.
+ GROUP_DOMAIN or GROUP_PROJECT).
+
+ Test plan:
+ * Create a group and a user with the same ID;
+ * Create four roles and assign them to both group and user;
+ * Delete all group assignments;
+ * User assignments must stay intact.
+ """
+ # Create a common ID
+ common_id = uuid.uuid4().hex
+ # Create a project
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project = self.resource_api.create_project(project['id'], project)
+ # Create a user
+ user = unit.new_user_ref(id=common_id,
+ domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.driver.create_user(common_id, user)
+ self.assertEqual(common_id, user['id'])
+ # Create a group
+ group = unit.new_group_ref(id=common_id,
+ domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.driver.create_group(common_id, group)
+ self.assertEqual(common_id, group['id'])
+ # Create four roles
+ roles = []
+ for _ in range(4):
+ role = unit.new_role_ref()
+ roles.append(self.role_api.create_role(role['id'], role))
+ # Assign roles for user
+ self.assignment_api.driver.create_grant(
+ user_id=user['id'], domain_id=CONF.identity.default_domain_id,
+ role_id=roles[0]['id'])
+ self.assignment_api.driver.create_grant(user_id=user['id'],
+ project_id=project['id'],
+ role_id=roles[1]['id'])
+ # Assign roles for group
+ self.assignment_api.driver.create_grant(
+ group_id=group['id'], domain_id=CONF.identity.default_domain_id,
+ role_id=roles[2]['id'])
+ self.assignment_api.driver.create_grant(group_id=group['id'],
+ project_id=project['id'],
+ role_id=roles[3]['id'])
+ # Make sure they were assigned
+ user_assignments = self.assignment_api.list_role_assignments(
+ user_id=user['id'])
+ self.assertThat(user_assignments, matchers.HasLength(2))
+ group_assignments = self.assignment_api.list_role_assignments(
+ group_id=group['id'])
+ self.assertThat(group_assignments, matchers.HasLength(2))
+ # Delete group assignments
+ self.assignment_api.delete_group_assignments(group_id=group['id'])
+ # Assert only group assignments were deleted
+ group_assignments = self.assignment_api.list_role_assignments(
+ group_id=group['id'])
+ self.assertThat(group_assignments, matchers.HasLength(0))
+ user_assignments = self.assignment_api.list_role_assignments(
+ user_id=user['id'])
+ self.assertThat(user_assignments, matchers.HasLength(2))
+ # Make sure these remaining assignments are user-related
+ for assignment in group_assignments:
+ self.assertThat(assignment.keys(), matchers.Contains('user_id'))
+
+ def test_remove_foreign_assignments_when_deleting_a_domain(self):
+ # A user and a group are in default domain and have assigned a role on
+ # two new domains. This test makes sure that when one of the new
+ # domains is deleted, the role assignments for the user and the group
+ # from the default domain are deleted only on that domain.
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+
+ role = unit.new_role_ref()
+ role = self.role_api.create_role(role['id'], role)
+
+ new_domains = [unit.new_domain_ref(), unit.new_domain_ref()]
+ for new_domain in new_domains:
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+
+ self.assignment_api.create_grant(group_id=group['id'],
+ domain_id=new_domain['id'],
+ role_id=role['id'])
+ self.assignment_api.create_grant(user_id=self.user_two['id'],
+ domain_id=new_domain['id'],
+ role_id=role['id'])
+
+ # Check there are 4 role assignments for that role
+ role_assignments = self.assignment_api.list_role_assignments(
+ role_id=role['id'])
+ self.assertThat(role_assignments, matchers.HasLength(4))
+
+ # Delete first new domain and check only 2 assignments were left
+ self.resource_api.update_domain(new_domains[0]['id'],
+ {'enabled': False})
+ self.resource_api.delete_domain(new_domains[0]['id'])
+
+ role_assignments = self.assignment_api.list_role_assignments(
+ role_id=role['id'])
+ self.assertThat(role_assignments, matchers.HasLength(2))
+
+ # Delete second new domain and check no assignments were left
+ self.resource_api.update_domain(new_domains[1]['id'],
+ {'enabled': False})
+ self.resource_api.delete_domain(new_domains[1]['id'])
+
+ role_assignments = self.assignment_api.list_role_assignments(
+ role_id=role['id'])
+ self.assertEqual([], role_assignments)
+
+
+class InheritanceTests(AssignmentTestHelperMixin):
+
+ def test_role_assignments_user_domain_to_project_inheritance(self):
+ test_plan = {
+ 'entities': {'domains': {'users': 2, 'projects': 1},
+ 'roles': 3},
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'user': 0, 'role': 2, 'domain': 0,
+ 'inherited_to_projects': True},
+ {'user': 1, 'role': 1, 'project': 0}],
+ 'tests': [
+ # List all direct assignments for user[0]
+ {'params': {'user': 0},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'user': 0, 'role': 2, 'domain': 0,
+ 'inherited_to_projects': 'projects'}]},
+ # Now the effective ones - so the domain role should turn into
+ # a project role
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'domain': 0}}]},
+ # Narrow down to effective roles for user[0] and project[0]
+ {'params': {'user': 0, 'project': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 1, 'project': 0},
+ {'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'domain': 0}}]}
+ ]
+ }
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ self.execute_assignment_plan(test_plan)
+
+ def test_inherited_role_assignments_excluded_if_os_inherit_false(self):
+ test_plan = {
+ 'entities': {'domains': {'users': 2, 'groups': 1, 'projects': 1},
+ 'roles': 4},
+ 'group_memberships': [{'group': 0, 'users': [0]}],
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'user': 0, 'role': 2, 'domain': 0,
+ 'inherited_to_projects': True},
+ {'user': 1, 'role': 1, 'project': 0},
+ {'group': 0, 'role': 3, 'project': 0}],
+ 'tests': [
+ # List all direct assignments for user[0], since os-inherit is
+ # disabled, we should not see the inherited role
+ {'params': {'user': 0},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0}]},
+ # Same in effective mode - inherited roles should not be
+ # included or expanded...but the group role should now
+ # turn up as a user role, since group expansion is not
+ # part of os-inherit.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'project': 0},
+ {'user': 0, 'role': 3, 'project': 0,
+ 'indirect': {'group': 0}}]},
+ ]
+ }
+ self.config_fixture.config(group='os_inherit', enabled=False)
+ self.execute_assignment_plan(test_plan)
+
+ def _test_crud_inherited_and_direct_assignment(self, **kwargs):
+ """Tests inherited and direct assignments for the actor and target
+
+ Ensure it is possible to create both inherited and direct role
+ assignments for the same actor on the same target. The actor and the
+ target are specified in the kwargs as ('user_id' or 'group_id') and
+ ('project_id' or 'domain_id'), respectively.
+
+ """
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ # Create a new role to avoid assignments loaded from default fixtures
+ role = unit.new_role_ref()
+ role = self.role_api.create_role(role['id'], role)
+
+ # Define the common assignment entity
+ assignment_entity = {'role_id': role['id']}
+ assignment_entity.update(kwargs)
+
+ # Define assignments under test
+ direct_assignment_entity = assignment_entity.copy()
+ inherited_assignment_entity = assignment_entity.copy()
+ inherited_assignment_entity['inherited_to_projects'] = 'projects'
+
+ # Create direct assignment and check grants
+ self.assignment_api.create_grant(inherited_to_projects=False,
+ **assignment_entity)
+
+ grants = self.assignment_api.list_role_assignments(role_id=role['id'])
+ self.assertThat(grants, matchers.HasLength(1))
+ self.assertIn(direct_assignment_entity, grants)
+
+ # Now add inherited assignment and check grants
+ self.assignment_api.create_grant(inherited_to_projects=True,
+ **assignment_entity)
+
+ grants = self.assignment_api.list_role_assignments(role_id=role['id'])
+ self.assertThat(grants, matchers.HasLength(2))
+ self.assertIn(direct_assignment_entity, grants)
+ self.assertIn(inherited_assignment_entity, grants)
+
+ # Delete both and check grants
+ self.assignment_api.delete_grant(inherited_to_projects=False,
+ **assignment_entity)
+ self.assignment_api.delete_grant(inherited_to_projects=True,
+ **assignment_entity)
+
+ grants = self.assignment_api.list_role_assignments(role_id=role['id'])
+ self.assertEqual([], grants)
+
+ def test_crud_inherited_and_direct_assignment_for_user_on_domain(self):
+ self._test_crud_inherited_and_direct_assignment(
+ user_id=self.user_foo['id'],
+ domain_id=CONF.identity.default_domain_id)
+
+ def test_crud_inherited_and_direct_assignment_for_group_on_domain(self):
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+
+ self._test_crud_inherited_and_direct_assignment(
+ group_id=group['id'], domain_id=CONF.identity.default_domain_id)
+
+ def test_crud_inherited_and_direct_assignment_for_user_on_project(self):
+ self._test_crud_inherited_and_direct_assignment(
+ user_id=self.user_foo['id'], project_id=self.tenant_baz['id'])
+
+ def test_crud_inherited_and_direct_assignment_for_group_on_project(self):
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+
+ self._test_crud_inherited_and_direct_assignment(
+ group_id=group['id'], project_id=self.tenant_baz['id'])
+
+ def test_inherited_role_grants_for_user(self):
+ """Test inherited user roles.
+
+ Test Plan:
+
+ - Enable OS-INHERIT extension
+ - Create 3 roles
+ - Create a domain, with a project and a user
+ - Check no roles yet exit
+ - Assign a direct user role to the project and a (non-inherited)
+ user role to the domain
+ - Get a list of effective roles - should only get the one direct role
+ - Now add an inherited user role to the domain
+ - Get a list of effective roles - should have two roles, one
+ direct and one by virtue of the inherited user role
+ - Also get effective roles for the domain - the role marked as
+ inherited should not show up
+
+ """
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ role_list = []
+ for _ in range(3):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_list.append(role)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+
+ # Create the first two roles - the domain one is not inherited
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role_list[0]['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[1]['id'])
+
+ # Now get the effective roles for the user and project, this
+ # should only include the direct role assignment on the project
+ combined_list = self.assignment_api.get_roles_for_user_and_project(
+ user1['id'], project1['id'])
+ self.assertEqual(1, len(combined_list))
+ self.assertIn(role_list[0]['id'], combined_list)
+
+ # Now add an inherited role on the domain
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[2]['id'],
+ inherited_to_projects=True)
+
+ # Now get the effective roles for the user and project again, this
+ # should now include the inherited role on the domain
+ combined_list = self.assignment_api.get_roles_for_user_and_project(
+ user1['id'], project1['id'])
+ self.assertEqual(2, len(combined_list))
+ self.assertIn(role_list[0]['id'], combined_list)
+ self.assertIn(role_list[2]['id'], combined_list)
+
+ # Finally, check that the inherited role does not appear as a valid
+ # directly assigned role on the domain itself
+ combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
+ user1['id'], domain1['id'])
+ self.assertEqual(1, len(combined_role_list))
+ self.assertIn(role_list[1]['id'], combined_role_list)
+
+ # TODO(henry-nash): The test above uses get_roles_for_user_and_project
+ # and get_roles_for_user_and_domain, which will, in a subsequent patch,
+ # be re-implemented to simply call list_role_assignments (see blueprint
+ # remove-role-metadata).
+ #
+ # The test plan below therefore mirrors this test, to ensure that
+ # list_role_assignments works the same. Once get_roles_for_user_and
+ # project/domain have been re-implemented then the manual tests above
+ # can be refactored to simply ensure it gives the same answers.
+ test_plan = {
+ # A domain with a user & project, plus 3 roles.
+ 'entities': {'domains': {'users': 1, 'projects': 1},
+ 'roles': 3},
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 1, 'domain': 0},
+ {'user': 0, 'role': 2, 'domain': 0,
+ 'inherited_to_projects': True}],
+ 'tests': [
+ # List all effective assignments for user[0] on project[0].
+ # Should get one direct role and one inherited role.
+ {'params': {'user': 0, 'project': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'domain': 0}}]},
+ # Ensure effective mode on the domain does not list the
+ # inherited role on that domain
+ {'params': {'user': 0, 'domain': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 1, 'domain': 0}]},
+ # Ensure non-inherited mode also only returns the non-inherited
+ # role on the domain
+ {'params': {'user': 0, 'domain': 0, 'inherited': False},
+ 'results': [{'user': 0, 'role': 1, 'domain': 0}]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_inherited_role_grants_for_group(self):
+ """Test inherited group roles.
+
+ Test Plan:
+
+ - Enable OS-INHERIT extension
+ - Create 4 roles
+ - Create a domain, with a project, user and two groups
+ - Make the user a member of both groups
+ - Check no roles yet exit
+ - Assign a direct user role to the project and a (non-inherited)
+ group role on the domain
+ - Get a list of effective roles - should only get the one direct role
+ - Now add two inherited group roles to the domain
+ - Get a list of effective roles - should have three roles, one
+ direct and two by virtue of inherited group roles
+
+ """
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ role_list = []
+ for _ in range(4):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_list.append(role)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ group2 = unit.new_group_ref(domain_id=domain1['id'])
+ group2 = self.identity_api.create_group(group2)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+
+ self.identity_api.add_user_to_group(user1['id'],
+ group1['id'])
+ self.identity_api.add_user_to_group(user1['id'],
+ group2['id'])
+
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(0, len(roles_ref))
+
+ # Create two roles - the domain one is not inherited
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role_list[0]['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[1]['id'])
+
+ # Now get the effective roles for the user and project, this
+ # should only include the direct role assignment on the project
+ combined_list = self.assignment_api.get_roles_for_user_and_project(
+ user1['id'], project1['id'])
+ self.assertEqual(1, len(combined_list))
+ self.assertIn(role_list[0]['id'], combined_list)
+
+ # Now add to more group roles, both inherited, to the domain
+ self.assignment_api.create_grant(group_id=group2['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[2]['id'],
+ inherited_to_projects=True)
+ self.assignment_api.create_grant(group_id=group2['id'],
+ domain_id=domain1['id'],
+ role_id=role_list[3]['id'],
+ inherited_to_projects=True)
+
+ # Now get the effective roles for the user and project again, this
+ # should now include the inherited roles on the domain
+ combined_list = self.assignment_api.get_roles_for_user_and_project(
+ user1['id'], project1['id'])
+ self.assertEqual(3, len(combined_list))
+ self.assertIn(role_list[0]['id'], combined_list)
+ self.assertIn(role_list[2]['id'], combined_list)
+ self.assertIn(role_list[3]['id'], combined_list)
+
+ # TODO(henry-nash): The test above uses get_roles_for_user_and_project
+ # which will, in a subsequent patch, be re-implemented to simply call
+ # list_role_assignments (see blueprint remove-role-metadata).
+ #
+ # The test plan below therefore mirrors this test, to ensure that
+ # list_role_assignments works the same. Once
+ # get_roles_for_user_and_project has been re-implemented then the
+ # manual tests above can be refactored to simply ensure it gives
+ # the same answers.
+ test_plan = {
+ # A domain with a user and project, 2 groups, plus 4 roles.
+ 'entities': {'domains': {'users': 1, 'projects': 1, 'groups': 2},
+ 'roles': 4},
+ 'group_memberships': [{'group': 0, 'users': [0]},
+ {'group': 1, 'users': [0]}],
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'group': 0, 'role': 1, 'domain': 0},
+ {'group': 1, 'role': 2, 'domain': 0,
+ 'inherited_to_projects': True},
+ {'group': 1, 'role': 3, 'domain': 0,
+ 'inherited_to_projects': True}],
+ 'tests': [
+ # List all effective assignments for user[0] on project[0].
+ # Should get one direct role and both inherited roles, but
+ # not the direct one on domain[0], even though user[0] is
+ # in group[0].
+ {'params': {'user': 0, 'project': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'domain': 0, 'group': 1}},
+ {'user': 0, 'role': 3, 'project': 0,
+ 'indirect': {'domain': 0, 'group': 1}}]}
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_projects_for_user_with_inherited_grants(self):
+ """Test inherited user roles.
+
+ Test Plan:
+
+ - Enable OS-INHERIT extension
+ - Create a domain, with two projects and a user
+ - Assign an inherited user role on the domain, as well as a direct
+ user role to a separate project in a different domain
+ - Get a list of projects for user, should return all three projects
+
+ """
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ user1 = unit.new_user_ref(domain_id=domain['id'])
+ user1 = self.identity_api.create_user(user1)
+ project1 = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ project2 = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project2['id'], project2)
+
+ # Create 2 grants, one on a project and one inherited grant
+ # on the domain
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=self.tenant_bar['id'],
+ role_id=self.role_member['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain['id'],
+ role_id=self.role_admin['id'],
+ inherited_to_projects=True)
+ # Should get back all three projects, one by virtue of the direct
+ # grant, plus both projects in the domain
+ user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+ self.assertEqual(3, len(user_projects))
+
+ # TODO(henry-nash): The test above uses list_projects_for_user
+ # which may, in a subsequent patch, be re-implemented to call
+ # list_role_assignments and then report only the distinct projects.
+ #
+ # The test plan below therefore mirrors this test, to ensure that
+ # list_role_assignments works the same. Once list_projects_for_user
+ # has been re-implemented then the manual tests above can be
+ # refactored.
+ test_plan = {
+ # A domain with 1 project, plus a second domain with 2 projects,
+ # as well as a user. Also, create 2 roles.
+ 'entities': {'domains': [{'projects': 1},
+ {'users': 1, 'projects': 2}],
+ 'roles': 2},
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 1, 'domain': 1,
+ 'inherited_to_projects': True}],
+ 'tests': [
+ # List all effective assignments for user[0]
+ # Should get one direct role plus one inherited role for each
+ # project in domain
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 1, 'project': 1,
+ 'indirect': {'domain': 1}},
+ {'user': 0, 'role': 1, 'project': 2,
+ 'indirect': {'domain': 1}}]}
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_projects_for_user_with_inherited_user_project_grants(self):
+ """Test inherited role assignments for users on nested projects.
+
+ Test Plan:
+
+ - Enable OS-INHERIT extension
+ - Create a hierarchy of projects with one root and one leaf project
+ - Assign an inherited user role on root project
+ - Assign a non-inherited user role on root project
+ - Get a list of projects for user, should return both projects
+ - Disable OS-INHERIT extension
+ - Get a list of projects for user, should return only root project
+
+ """
+ # Enable OS-INHERIT extension
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ root_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ root_project = self.resource_api.create_project(root_project['id'],
+ root_project)
+ leaf_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ parent_id=root_project['id'])
+ leaf_project = self.resource_api.create_project(leaf_project['id'],
+ leaf_project)
+
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+
+ # Grant inherited user role
+ self.assignment_api.create_grant(user_id=user['id'],
+ project_id=root_project['id'],
+ role_id=self.role_admin['id'],
+ inherited_to_projects=True)
+ # Grant non-inherited user role
+ self.assignment_api.create_grant(user_id=user['id'],
+ project_id=root_project['id'],
+ role_id=self.role_member['id'])
+ # Should get back both projects: because the direct role assignment for
+ # the root project and inherited role assignment for leaf project
+ user_projects = self.assignment_api.list_projects_for_user(user['id'])
+ self.assertEqual(2, len(user_projects))
+ self.assertIn(root_project, user_projects)
+ self.assertIn(leaf_project, user_projects)
+
+ # Disable OS-INHERIT extension
+ self.config_fixture.config(group='os_inherit', enabled=False)
+ # Should get back just root project - due the direct role assignment
+ user_projects = self.assignment_api.list_projects_for_user(user['id'])
+ self.assertEqual(1, len(user_projects))
+ self.assertIn(root_project, user_projects)
+
+ # TODO(henry-nash): The test above uses list_projects_for_user
+ # which may, in a subsequent patch, be re-implemented to call
+ # list_role_assignments and then report only the distinct projects.
+ #
+ # The test plan below therefore mirrors this test, to ensure that
+ # list_role_assignments works the same. Once list_projects_for_user
+ # has been re-implemented then the manual tests above can be
+ # refactored.
+ test_plan = {
+ # A domain with a project and sub-project, plus a user.
+ # Also, create 2 roles.
+ 'entities': {
+ 'domains': {'id': CONF.identity.default_domain_id, 'users': 1,
+ 'projects': {'project': 1}},
+ 'roles': 2},
+ # A direct role and an inherited role on the parent
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 1, 'project': 0,
+ 'inherited_to_projects': True}],
+ 'tests': [
+ # List all effective assignments for user[0] - should get back
+ # one direct role plus one inherited role.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 1, 'project': 1,
+ 'indirect': {'project': 0}}]}
+ ]
+ }
+
+ test_plan_with_os_inherit_disabled = {
+ 'tests': [
+ # List all effective assignments for user[0] - should only get
+ # back the one direct role.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0}]}
+ ]
+ }
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ test_data = self.execute_assignment_plan(test_plan)
+ self.config_fixture.config(group='os_inherit', enabled=False)
+ # Pass the existing test data in to allow execution of 2nd test plan
+ self.execute_assignment_cases(
+ test_plan_with_os_inherit_disabled, test_data)
+
+ def test_list_projects_for_user_with_inherited_group_grants(self):
+ """Test inherited group roles.
+
+ Test Plan:
+
+ - Enable OS-INHERIT extension
+ - Create two domains, each with two projects
+ - Create a user and group
+ - Make the user a member of the group
+ - Assign a user role two projects, an inherited
+ group role to one domain and an inherited regular role on
+ the other domain
+ - Get a list of projects for user, should return both pairs of projects
+ from the domain, plus the one separate project
+
+ """
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ project1 = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ project2 = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project2['id'], project2)
+ project3 = unit.new_project_ref(domain_id=domain2['id'])
+ self.resource_api.create_project(project3['id'], project3)
+ project4 = unit.new_project_ref(domain_id=domain2['id'])
+ self.resource_api.create_project(project4['id'], project4)
+ user1 = unit.new_user_ref(domain_id=domain['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain['id'])
+ group1 = self.identity_api.create_group(group1)
+ self.identity_api.add_user_to_group(user1['id'], group1['id'])
+
+ # Create 4 grants:
+ # - one user grant on a project in domain2
+ # - one user grant on a project in the default domain
+ # - one inherited user grant on domain
+ # - one inherited group grant on domain2
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project3['id'],
+ role_id=self.role_member['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=self.tenant_bar['id'],
+ role_id=self.role_member['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain['id'],
+ role_id=self.role_admin['id'],
+ inherited_to_projects=True)
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain2['id'],
+ role_id=self.role_admin['id'],
+ inherited_to_projects=True)
+ # Should get back all five projects, but without a duplicate for
+ # project3 (since it has both a direct user role and an inherited role)
+ user_projects = self.assignment_api.list_projects_for_user(user1['id'])
+ self.assertEqual(5, len(user_projects))
+
+ # TODO(henry-nash): The test above uses list_projects_for_user
+ # which may, in a subsequent patch, be re-implemented to call
+ # list_role_assignments and then report only the distinct projects.
+ #
+ # The test plan below therefore mirrors this test, to ensure that
+ # list_role_assignments works the same. Once list_projects_for_user
+ # has been re-implemented then the manual tests above can be
+ # refactored.
+ test_plan = {
+ # A domain with a 1 project, plus a second domain with 2 projects,
+ # as well as a user & group and a 3rd domain with 2 projects.
+ # Also, created 2 roles.
+ 'entities': {'domains': [{'projects': 1},
+ {'users': 1, 'groups': 1, 'projects': 2},
+ {'projects': 2}],
+ 'roles': 2},
+ 'group_memberships': [{'group': 0, 'users': [0]}],
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 0, 'project': 3},
+ {'user': 0, 'role': 1, 'domain': 1,
+ 'inherited_to_projects': True},
+ {'user': 0, 'role': 1, 'domain': 2,
+ 'inherited_to_projects': True}],
+ 'tests': [
+ # List all effective assignments for user[0]
+ # Should get back both direct roles plus roles on both projects
+ # from each domain. Duplicates should not be filtered out.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 3},
+ {'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 1, 'project': 1,
+ 'indirect': {'domain': 1}},
+ {'user': 0, 'role': 1, 'project': 2,
+ 'indirect': {'domain': 1}},
+ {'user': 0, 'role': 1, 'project': 3,
+ 'indirect': {'domain': 2}},
+ {'user': 0, 'role': 1, 'project': 4,
+ 'indirect': {'domain': 2}}]}
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_projects_for_user_with_inherited_group_project_grants(self):
+ """Test inherited role assignments for groups on nested projects.
+
+ Test Plan:
+
+ - Enable OS-INHERIT extension
+ - Create a hierarchy of projects with one root and one leaf project
+ - Assign an inherited group role on root project
+ - Assign a non-inherited group role on root project
+ - Get a list of projects for user, should return both projects
+ - Disable OS-INHERIT extension
+ - Get a list of projects for user, should return only root project
+
+ """
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ root_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ root_project = self.resource_api.create_project(root_project['id'],
+ root_project)
+ leaf_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ parent_id=root_project['id'])
+ leaf_project = self.resource_api.create_project(leaf_project['id'],
+ leaf_project)
+
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+ self.identity_api.add_user_to_group(user['id'], group['id'])
+
+ # Grant inherited group role
+ self.assignment_api.create_grant(group_id=group['id'],
+ project_id=root_project['id'],
+ role_id=self.role_admin['id'],
+ inherited_to_projects=True)
+ # Grant non-inherited group role
+ self.assignment_api.create_grant(group_id=group['id'],
+ project_id=root_project['id'],
+ role_id=self.role_member['id'])
+ # Should get back both projects: because the direct role assignment for
+ # the root project and inherited role assignment for leaf project
+ user_projects = self.assignment_api.list_projects_for_user(user['id'])
+ self.assertEqual(2, len(user_projects))
+ self.assertIn(root_project, user_projects)
+ self.assertIn(leaf_project, user_projects)
+
+ # Disable OS-INHERIT extension
+ self.config_fixture.config(group='os_inherit', enabled=False)
+ # Should get back just root project - due the direct role assignment
+ user_projects = self.assignment_api.list_projects_for_user(user['id'])
+ self.assertEqual(1, len(user_projects))
+ self.assertIn(root_project, user_projects)
+
+ # TODO(henry-nash): The test above uses list_projects_for_user
+ # which may, in a subsequent patch, be re-implemented to call
+ # list_role_assignments and then report only the distinct projects.
+ #
+ # The test plan below therefore mirrors this test, to ensure that
+ # list_role_assignments works the same. Once list_projects_for_user
+ # has been re-implemented then the manual tests above can be
+ # refactored.
+ test_plan = {
+ # A domain with a project ans sub-project, plus a user.
+ # Also, create 2 roles.
+ 'entities': {
+ 'domains': {'id': CONF.identity.default_domain_id, 'users': 1,
+ 'groups': 1,
+ 'projects': {'project': 1}},
+ 'roles': 2},
+ 'group_memberships': [{'group': 0, 'users': [0]}],
+ # A direct role and an inherited role on the parent
+ 'assignments': [{'group': 0, 'role': 0, 'project': 0},
+ {'group': 0, 'role': 1, 'project': 0,
+ 'inherited_to_projects': True}],
+ 'tests': [
+ # List all effective assignments for user[0] - should get back
+ # one direct role plus one inherited role.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0,
+ 'indirect': {'group': 0}},
+ {'user': 0, 'role': 1, 'project': 1,
+ 'indirect': {'group': 0, 'project': 0}}]}
+ ]
+ }
+
+ test_plan_with_os_inherit_disabled = {
+ 'tests': [
+ # List all effective assignments for user[0] - should only get
+ # back the one direct role.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0,
+ 'indirect': {'group': 0}}]}
+ ]
+ }
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ test_data = self.execute_assignment_plan(test_plan)
+ self.config_fixture.config(group='os_inherit', enabled=False)
+ # Pass the existing test data in to allow execution of 2nd test plan
+ self.execute_assignment_cases(
+ test_plan_with_os_inherit_disabled, test_data)
+
+ def test_list_assignments_for_tree(self):
+ """Test we correctly list direct assignments for a tree"""
+ # Enable OS-INHERIT extension
+ self.config_fixture.config(group='os_inherit', enabled=True)
+
+ test_plan = {
+ # Create a domain with a project hierarchy 3 levels deep:
+ #
+ # project 0
+ # ____________|____________
+ # | |
+ # project 1 project 4
+ # ______|_____ ______|_____
+ # | | | |
+ # project 2 project 3 project 5 project 6
+ #
+ # Also, create 1 user and 4 roles.
+ 'entities': {
+ 'domains': {
+ 'projects': {'project': [{'project': 2},
+ {'project': 2}]},
+ 'users': 1},
+ 'roles': 4},
+ 'assignments': [
+ # Direct assignment to projects 1 and 2
+ {'user': 0, 'role': 0, 'project': 1},
+ {'user': 0, 'role': 1, 'project': 2},
+ # Also an inherited assignment on project 1
+ {'user': 0, 'role': 2, 'project': 1,
+ 'inherited_to_projects': True},
+ # ...and two spoiler assignments, one to the root and one
+ # to project 4
+ {'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 3, 'project': 4}],
+ 'tests': [
+ # List all assignments for project 1 and its subtree.
+ {'params': {'project': 1, 'include_subtree': True},
+ 'results': [
+ # Only the actual assignments should be returned, no
+ # expansion of inherited assignments
+ {'user': 0, 'role': 0, 'project': 1},
+ {'user': 0, 'role': 1, 'project': 2},
+ {'user': 0, 'role': 2, 'project': 1,
+ 'inherited_to_projects': 'projects'}]}
+ ]
+ }
+
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_effective_assignments_for_tree(self):
+ """Test we correctly list effective assignments for a tree"""
+ # Enable OS-INHERIT extension
+ self.config_fixture.config(group='os_inherit', enabled=True)
+
+ test_plan = {
+ # Create a domain with a project hierarchy 3 levels deep:
+ #
+ # project 0
+ # ____________|____________
+ # | |
+ # project 1 project 4
+ # ______|_____ ______|_____
+ # | | | |
+ # project 2 project 3 project 5 project 6
+ #
+ # Also, create 1 user and 4 roles.
+ 'entities': {
+ 'domains': {
+ 'projects': {'project': [{'project': 2},
+ {'project': 2}]},
+ 'users': 1},
+ 'roles': 4},
+ 'assignments': [
+ # An inherited assignment on project 1
+ {'user': 0, 'role': 1, 'project': 1,
+ 'inherited_to_projects': True},
+ # A direct assignment to project 2
+ {'user': 0, 'role': 2, 'project': 2},
+ # ...and two spoiler assignments, one to the root and one
+ # to project 4
+ {'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 3, 'project': 4}],
+ 'tests': [
+ # List all effective assignments for project 1 and its subtree.
+ {'params': {'project': 1, 'effective': True,
+ 'include_subtree': True},
+ 'results': [
+ # The inherited assignment on project 1 should appear only
+ # on its children
+ {'user': 0, 'role': 1, 'project': 2,
+ 'indirect': {'project': 1}},
+ {'user': 0, 'role': 1, 'project': 3,
+ 'indirect': {'project': 1}},
+ # And finally the direct assignment on project 2
+ {'user': 0, 'role': 2, 'project': 2}]}
+ ]
+ }
+
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_effective_assignments_for_tree_with_mixed_assignments(self):
+ """Test that we correctly combine assignments for a tree.
+
+ In this test we want to ensure that when asking for a list of
+ assignments in a subtree, any assignments inherited from above the
+ subtree are correctly combined with any assignments within the subtree
+ itself.
+
+ """
+ # Enable OS-INHERIT extension
+ self.config_fixture.config(group='os_inherit', enabled=True)
+
+ test_plan = {
+ # Create a domain with a project hierarchy 3 levels deep:
+ #
+ # project 0
+ # ____________|____________
+ # | |
+ # project 1 project 4
+ # ______|_____ ______|_____
+ # | | | |
+ # project 2 project 3 project 5 project 6
+ #
+ # Also, create 2 users, 1 group and 4 roles.
+ 'entities': {
+ 'domains': {
+ 'projects': {'project': [{'project': 2},
+ {'project': 2}]},
+ 'users': 2, 'groups': 1},
+ 'roles': 4},
+ # Both users are part of the same group
+ 'group_memberships': [{'group': 0, 'users': [0, 1]}],
+ # We are going to ask for listing of assignment on project 1 and
+ # it's subtree. So first we'll add two inherited assignments above
+ # this (one user and one for a group that contains this user).
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0,
+ 'inherited_to_projects': True},
+ {'group': 0, 'role': 1, 'project': 0,
+ 'inherited_to_projects': True},
+ # Now an inherited assignment on project 1 itself,
+ # which should ONLY show up on its children
+ {'user': 0, 'role': 2, 'project': 1,
+ 'inherited_to_projects': True},
+ # ...and a direct assignment on one of those
+ # children
+ {'user': 0, 'role': 3, 'project': 2},
+ # The rest are spoiler assignments
+ {'user': 0, 'role': 2, 'project': 5},
+ {'user': 0, 'role': 3, 'project': 4}],
+ 'tests': [
+ # List all effective assignments for project 1 and its subtree.
+ {'params': {'project': 1, 'user': 0, 'effective': True,
+ 'include_subtree': True},
+ 'results': [
+ # First, we should see the inherited user assignment from
+ # project 0 on all projects in the subtree
+ {'user': 0, 'role': 0, 'project': 1,
+ 'indirect': {'project': 0}},
+ {'user': 0, 'role': 0, 'project': 2,
+ 'indirect': {'project': 0}},
+ {'user': 0, 'role': 0, 'project': 3,
+ 'indirect': {'project': 0}},
+ # Also the inherited group assignment from project 0 on
+ # the subtree
+ {'user': 0, 'role': 1, 'project': 1,
+ 'indirect': {'project': 0, 'group': 0}},
+ {'user': 0, 'role': 1, 'project': 2,
+ 'indirect': {'project': 0, 'group': 0}},
+ {'user': 0, 'role': 1, 'project': 3,
+ 'indirect': {'project': 0, 'group': 0}},
+ # The inherited assignment on project 1 should appear only
+ # on its children
+ {'user': 0, 'role': 2, 'project': 2,
+ 'indirect': {'project': 1}},
+ {'user': 0, 'role': 2, 'project': 3,
+ 'indirect': {'project': 1}},
+ # And finally the direct assignment on project 2
+ {'user': 0, 'role': 3, 'project': 2}]}
+ ]
+ }
+
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_effective_assignments_for_tree_with_domain_assignments(self):
+ """Test we correctly honor domain inherited assignments on the tree"""
+ # Enable OS-INHERIT extension
+ self.config_fixture.config(group='os_inherit', enabled=True)
+
+ test_plan = {
+ # Create a domain with a project hierarchy 3 levels deep:
+ #
+ # project 0
+ # ____________|____________
+ # | |
+ # project 1 project 4
+ # ______|_____ ______|_____
+ # | | | |
+ # project 2 project 3 project 5 project 6
+ #
+ # Also, create 1 user and 4 roles.
+ 'entities': {
+ 'domains': {
+ 'projects': {'project': [{'project': 2},
+ {'project': 2}]},
+ 'users': 1},
+ 'roles': 4},
+ 'assignments': [
+ # An inherited assignment on the domain (which should be
+ # applied to all the projects)
+ {'user': 0, 'role': 1, 'domain': 0,
+ 'inherited_to_projects': True},
+ # A direct assignment to project 2
+ {'user': 0, 'role': 2, 'project': 2},
+ # ...and two spoiler assignments, one to the root and one
+ # to project 4
+ {'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 3, 'project': 4}],
+ 'tests': [
+ # List all effective assignments for project 1 and its subtree.
+ {'params': {'project': 1, 'effective': True,
+ 'include_subtree': True},
+ 'results': [
+ # The inherited assignment from the domain should appear
+ # only on the part of the subtree we are interested in
+ {'user': 0, 'role': 1, 'project': 1,
+ 'indirect': {'domain': 0}},
+ {'user': 0, 'role': 1, 'project': 2,
+ 'indirect': {'domain': 0}},
+ {'user': 0, 'role': 1, 'project': 3,
+ 'indirect': {'domain': 0}},
+ # And finally the direct assignment on project 2
+ {'user': 0, 'role': 2, 'project': 2}]}
+ ]
+ }
+
+ self.execute_assignment_plan(test_plan)
+
+ def test_list_user_ids_for_project_with_inheritance(self):
+ test_plan = {
+ # A domain with a project and sub-project, plus four users,
+ # two groups, as well as 4 roles.
+ 'entities': {
+ 'domains': {'id': CONF.identity.default_domain_id, 'users': 4,
+ 'groups': 2,
+ 'projects': {'project': 1}},
+ 'roles': 4},
+ # Each group has a unique user member
+ 'group_memberships': [{'group': 0, 'users': [1]},
+ {'group': 1, 'users': [3]}],
+ # Set up assignments so that there should end up with four
+ # effective assignments on project 1 - one direct, one due to
+ # group membership and one user assignment inherited from the
+ # parent and one group assignment inhertied from the parent.
+ 'assignments': [{'user': 0, 'role': 0, 'project': 1},
+ {'group': 0, 'role': 1, 'project': 1},
+ {'user': 2, 'role': 2, 'project': 0,
+ 'inherited_to_projects': True},
+ {'group': 1, 'role': 3, 'project': 0,
+ 'inherited_to_projects': True}],
+ }
+ # Use assignment plan helper to create all the entities and
+ # assignments - then we'll run our own tests using the data
+ test_data = self.execute_assignment_plan(test_plan)
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ user_ids = self.assignment_api.list_user_ids_for_project(
+ test_data['projects'][1]['id'])
+ self.assertThat(user_ids, matchers.HasLength(4))
+ for x in range(0, 4):
+ self.assertIn(test_data['users'][x]['id'], user_ids)
+
+ def test_list_role_assignment_using_inherited_sourced_groups(self):
+ """Test listing inherited assignments when restricted by groups."""
+ test_plan = {
+ # A domain with 3 users, 3 groups, 3 projects, a second domain,
+ # plus 3 roles.
+ 'entities': {'domains': [{'users': 3, 'groups': 3, 'projects': 3},
+ 1],
+ 'roles': 3},
+ # Users 0 & 1 are in the group 0, User 0 also in group 1
+ 'group_memberships': [{'group': 0, 'users': [0, 1]},
+ {'group': 1, 'users': [0]}],
+ # Spread the assignments around - we want to be able to show that
+ # if sourced by group, assignments from other sources are excluded
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0},
+ {'group': 0, 'role': 1, 'domain': 1},
+ {'group': 1, 'role': 2, 'domain': 0,
+ 'inherited_to_projects': True},
+ {'group': 1, 'role': 2, 'project': 1},
+ {'user': 2, 'role': 1, 'project': 1,
+ 'inherited_to_projects': True},
+ {'group': 2, 'role': 2, 'project': 2}
+ ],
+ 'tests': [
+ # List all effective assignments sourced from groups 0 and 1.
+ # We should see the inherited group assigned on the 3 projects
+ # from domain 0, as well as the direct assignments.
+ {'params': {'source_from_group_ids': [0, 1],
+ 'effective': True},
+ 'results': [{'group': 0, 'role': 1, 'domain': 1},
+ {'group': 1, 'role': 2, 'project': 0,
+ 'indirect': {'domain': 0}},
+ {'group': 1, 'role': 2, 'project': 1,
+ 'indirect': {'domain': 0}},
+ {'group': 1, 'role': 2, 'project': 2,
+ 'indirect': {'domain': 0}},
+ {'group': 1, 'role': 2, 'project': 1}
+ ]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+
+class ImpliedRoleTests(AssignmentTestHelperMixin):
+
+ def test_implied_role_crd(self):
+ prior_role_ref = unit.new_role_ref()
+ self.role_api.create_role(prior_role_ref['id'], prior_role_ref)
+ implied_role_ref = unit.new_role_ref()
+ self.role_api.create_role(implied_role_ref['id'], implied_role_ref)
+
+ self.role_api.create_implied_role(
+ prior_role_ref['id'],
+ implied_role_ref['id'])
+ implied_role = self.role_api.get_implied_role(
+ prior_role_ref['id'],
+ implied_role_ref['id'])
+ expected_implied_role_ref = {
+ 'prior_role_id': prior_role_ref['id'],
+ 'implied_role_id': implied_role_ref['id']}
+ self.assertDictContainsSubset(
+ expected_implied_role_ref,
+ implied_role)
+
+ self.role_api.delete_implied_role(
+ prior_role_ref['id'],
+ implied_role_ref['id'])
+ self.assertRaises(exception.ImpliedRoleNotFound,
+ self.role_api.get_implied_role,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex)
+
+ def test_delete_implied_role_returns_not_found(self):
+ self.assertRaises(exception.ImpliedRoleNotFound,
+ self.role_api.delete_implied_role,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex)
+
+ def test_role_assignments_simple_tree_of_implied_roles(self):
+ """Test that implied roles are expanded out."""
+ test_plan = {
+ 'entities': {'domains': {'users': 1, 'projects': 1},
+ 'roles': 4},
+ # Three level tree of implied roles
+ 'implied_roles': [{'role': 0, 'implied_roles': 1},
+ {'role': 1, 'implied_roles': [2, 3]}],
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0}],
+ 'tests': [
+ # List all direct assignments for user[0], this should just
+ # show the one top level role assignment
+ {'params': {'user': 0},
+ 'results': [{'user': 0, 'role': 0, 'project': 0}]},
+ # Listing in effective mode should show the implied roles
+ # expanded out
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 1, 'project': 0,
+ 'indirect': {'role': 0}},
+ {'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'role': 1}},
+ {'user': 0, 'role': 3, 'project': 0,
+ 'indirect': {'role': 1}}]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_circular_inferences(self):
+ """Test that implied roles are expanded out."""
+ test_plan = {
+ 'entities': {'domains': {'users': 1, 'projects': 1},
+ 'roles': 4},
+ # Three level tree of implied roles
+ 'implied_roles': [{'role': 0, 'implied_roles': [1]},
+ {'role': 1, 'implied_roles': [2, 3]},
+ {'role': 3, 'implied_roles': [0]}],
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0}],
+ 'tests': [
+ # List all direct assignments for user[0], this should just
+ # show the one top level role assignment
+ {'params': {'user': 0},
+ 'results': [{'user': 0, 'role': 0, 'project': 0}]},
+ # Listing in effective mode should show the implied roles
+ # expanded out
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 0, 'project': 0,
+ 'indirect': {'role': 3}},
+ {'user': 0, 'role': 1, 'project': 0,
+ 'indirect': {'role': 0}},
+ {'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'role': 1}},
+ {'user': 0, 'role': 3, 'project': 0,
+ 'indirect': {'role': 1}}]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_role_assignments_directed_graph_of_implied_roles(self):
+ """Test that a role can have multiple, different prior roles."""
+ test_plan = {
+ 'entities': {'domains': {'users': 1, 'projects': 1},
+ 'roles': 6},
+ # Three level tree of implied roles, where one of the roles at the
+ # bottom is implied by more than one top level role
+ 'implied_roles': [{'role': 0, 'implied_roles': [1, 2]},
+ {'role': 1, 'implied_roles': [3, 4]},
+ {'role': 5, 'implied_roles': 4}],
+ # The user gets both top level roles
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 5, 'project': 0}],
+ 'tests': [
+ # The implied roles should be expanded out and there should be
+ # two entries for the role that had two different prior roles.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 5, 'project': 0},
+ {'user': 0, 'role': 1, 'project': 0,
+ 'indirect': {'role': 0}},
+ {'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'role': 0}},
+ {'user': 0, 'role': 3, 'project': 0,
+ 'indirect': {'role': 1}},
+ {'user': 0, 'role': 4, 'project': 0,
+ 'indirect': {'role': 1}},
+ {'user': 0, 'role': 4, 'project': 0,
+ 'indirect': {'role': 5}}]},
+ ]
+ }
+ test_data = self.execute_assignment_plan(test_plan)
+
+ # We should also be able to get a similar (yet summarized) answer to
+ # the above by calling get_roles_for_user_and_project(), which should
+ # list the role_ids, yet remove any duplicates
+ role_ids = self.assignment_api.get_roles_for_user_and_project(
+ test_data['users'][0]['id'], test_data['projects'][0]['id'])
+ # We should see 6 entries, not 7, since role index 5 appeared twice in
+ # the answer from list_role_assignments
+ self.assertThat(role_ids, matchers.HasLength(6))
+ for x in range(0, 5):
+ self.assertIn(test_data['roles'][x]['id'], role_ids)
+
+ def test_role_assignments_implied_roles_filtered_by_role(self):
+ """Test that you can filter by role even if roles are implied."""
+ test_plan = {
+ 'entities': {'domains': {'users': 1, 'projects': 2},
+ 'roles': 4},
+ # Three level tree of implied roles
+ 'implied_roles': [{'role': 0, 'implied_roles': 1},
+ {'role': 1, 'implied_roles': [2, 3]}],
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0},
+ {'user': 0, 'role': 3, 'project': 1}],
+ 'tests': [
+ # List effective roles filtering by one of the implied roles,
+ # showing that the filter was implied post expansion of
+ # implied roles (and that non impled roles are included in
+ # the filter
+ {'params': {'role': 3, 'effective': True},
+ 'results': [{'user': 0, 'role': 3, 'project': 0,
+ 'indirect': {'role': 1}},
+ {'user': 0, 'role': 3, 'project': 1}]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_role_assignments_simple_tree_of_implied_roles_on_domain(self):
+ """Test that implied roles are expanded out when placed on a domain."""
+ test_plan = {
+ 'entities': {'domains': {'users': 1},
+ 'roles': 4},
+ # Three level tree of implied roles
+ 'implied_roles': [{'role': 0, 'implied_roles': 1},
+ {'role': 1, 'implied_roles': [2, 3]}],
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0}],
+ 'tests': [
+ # List all direct assignments for user[0], this should just
+ # show the one top level role assignment
+ {'params': {'user': 0},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0}]},
+ # Listing in effective mode should how the implied roles
+ # expanded out
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0},
+ {'user': 0, 'role': 1, 'domain': 0,
+ 'indirect': {'role': 0}},
+ {'user': 0, 'role': 2, 'domain': 0,
+ 'indirect': {'role': 1}},
+ {'user': 0, 'role': 3, 'domain': 0,
+ 'indirect': {'role': 1}}]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
+
+ def test_role_assignments_inherited_implied_roles(self):
+ """Test that you can intermix inherited and implied roles."""
+ test_plan = {
+ 'entities': {'domains': {'users': 1, 'projects': 1},
+ 'roles': 4},
+ # Simply one level of implied roles
+ 'implied_roles': [{'role': 0, 'implied_roles': 1}],
+ # Assign to top level role as an inherited assignment to the
+ # domain
+ 'assignments': [{'user': 0, 'role': 0, 'domain': 0,
+ 'inherited_to_projects': True}],
+ 'tests': [
+ # List all direct assignments for user[0], this should just
+ # show the one top level role assignment
+ {'params': {'user': 0},
+ 'results': [{'user': 0, 'role': 0, 'domain': 0,
+ 'inherited_to_projects': 'projects'}]},
+ # List in effective mode - we should only see the initial and
+ # implied role on the project (since inherited roles are not
+ # active on their anchor point).
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 0, 'project': 0,
+ 'indirect': {'domain': 0}},
+ {'user': 0, 'role': 1, 'project': 0,
+ 'indirect': {'domain': 0, 'role': 0}}]},
+ ]
+ }
+ self.config_fixture.config(group='os_inherit', enabled=True)
+ self.execute_assignment_plan(test_plan)
+
+ def test_role_assignments_domain_specific_with_implied_roles(self):
+ test_plan = {
+ 'entities': {'domains': {'users': 1, 'projects': 1, 'roles': 2},
+ 'roles': 2},
+ # Two level tree of implied roles, with the top and 1st level being
+ # domain specific roles, and the bottom level being infered global
+ # roles.
+ 'implied_roles': [{'role': 0, 'implied_roles': [1]},
+ {'role': 1, 'implied_roles': [2, 3]}],
+ 'assignments': [{'user': 0, 'role': 0, 'project': 0}],
+ 'tests': [
+ # List all direct assignments for user[0], this should just
+ # show the one top level role assignment, even though this is a
+ # domain specific role (since we are in non-effective mode and
+ # we show any direct role assignment in that mode).
+ {'params': {'user': 0},
+ 'results': [{'user': 0, 'role': 0, 'project': 0}]},
+ # Now the effective ones - so the implied roles should be
+ # expanded out, as well as any domain specific roles should be
+ # removed.
+ {'params': {'user': 0, 'effective': True},
+ 'results': [{'user': 0, 'role': 2, 'project': 0,
+ 'indirect': {'role': 1}},
+ {'user': 0, 'role': 3, 'project': 0,
+ 'indirect': {'role': 1}}]},
+ ]
+ }
+ self.execute_assignment_plan(test_plan)
diff --git a/keystone-moon/keystone/tests/unit/assignment/test_core.py b/keystone-moon/keystone/tests/unit/assignment/test_core.py
new file mode 100644
index 00000000..494e19c3
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/assignment/test_core.py
@@ -0,0 +1,123 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit import default_fixtures
+
+
+class RoleTests(object):
+
+ def test_get_role_returns_not_found(self):
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.get_role,
+ uuid.uuid4().hex)
+
+ def test_create_duplicate_role_name_fails(self):
+ role = unit.new_role_ref(id='fake1', name='fake1name')
+ self.role_api.create_role('fake1', role)
+ role['id'] = 'fake2'
+ self.assertRaises(exception.Conflict,
+ self.role_api.create_role,
+ 'fake2',
+ role)
+
+ def test_rename_duplicate_role_name_fails(self):
+ role1 = unit.new_role_ref(id='fake1', name='fake1name')
+ role2 = unit.new_role_ref(id='fake2', name='fake2name')
+ self.role_api.create_role('fake1', role1)
+ self.role_api.create_role('fake2', role2)
+ role1['name'] = 'fake2name'
+ self.assertRaises(exception.Conflict,
+ self.role_api.update_role,
+ 'fake1',
+ role1)
+
+ def test_role_crud(self):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ role_ref = self.role_api.get_role(role['id'])
+ role_ref_dict = {x: role_ref[x] for x in role_ref}
+ self.assertDictEqual(role, role_ref_dict)
+
+ role['name'] = uuid.uuid4().hex
+ updated_role_ref = self.role_api.update_role(role['id'], role)
+ role_ref = self.role_api.get_role(role['id'])
+ role_ref_dict = {x: role_ref[x] for x in role_ref}
+ self.assertDictEqual(role, role_ref_dict)
+ self.assertDictEqual(role_ref_dict, updated_role_ref)
+
+ self.role_api.delete_role(role['id'])
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.get_role,
+ role['id'])
+
+ def test_update_role_returns_not_found(self):
+ role = unit.new_role_ref()
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.update_role,
+ role['id'],
+ role)
+
+ def test_list_roles(self):
+ roles = self.role_api.list_roles()
+ self.assertEqual(len(default_fixtures.ROLES), len(roles))
+ role_ids = set(role['id'] for role in roles)
+ expected_role_ids = set(role['id'] for role in default_fixtures.ROLES)
+ self.assertEqual(expected_role_ids, role_ids)
+
+ @unit.skip_if_cache_disabled('role')
+ def test_cache_layer_role_crud(self):
+ role = unit.new_role_ref()
+ role_id = role['id']
+ # Create role
+ self.role_api.create_role(role_id, role)
+ role_ref = self.role_api.get_role(role_id)
+ updated_role_ref = copy.deepcopy(role_ref)
+ updated_role_ref['name'] = uuid.uuid4().hex
+ # Update role, bypassing the role api manager
+ self.role_api.driver.update_role(role_id, updated_role_ref)
+ # Verify get_role still returns old ref
+ self.assertDictEqual(role_ref, self.role_api.get_role(role_id))
+ # Invalidate Cache
+ self.role_api.get_role.invalidate(self.role_api, role_id)
+ # Verify get_role returns the new role_ref
+ self.assertDictEqual(updated_role_ref,
+ self.role_api.get_role(role_id))
+ # Update role back to original via the assignment api manager
+ self.role_api.update_role(role_id, role_ref)
+ # Verify get_role returns the original role ref
+ self.assertDictEqual(role_ref, self.role_api.get_role(role_id))
+ # Delete role bypassing the role api manager
+ self.role_api.driver.delete_role(role_id)
+ # Verify get_role still returns the role_ref
+ self.assertDictEqual(role_ref, self.role_api.get_role(role_id))
+ # Invalidate cache
+ self.role_api.get_role.invalidate(self.role_api, role_id)
+ # Verify RoleNotFound is now raised
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.get_role,
+ role_id)
+ # recreate role
+ self.role_api.create_role(role_id, role)
+ self.role_api.get_role(role_id)
+ # delete role via the assignment api manager
+ self.role_api.delete_role(role_id)
+ # verity RoleNotFound is now raised
+ self.assertRaises(exception.RoleNotFound,
+ self.role_api.get_role,
+ role_id)
diff --git a/keystone-moon/keystone/tests/unit/backend/core_ldap.py b/keystone-moon/keystone/tests/unit/backend/core_ldap.py
index 869bb620..8b72c62a 100644
--- a/keystone-moon/keystone/tests/unit/backend/core_ldap.py
+++ b/keystone-moon/keystone/tests/unit/backend/core_ldap.py
@@ -86,6 +86,7 @@ class BaseBackendLdapCommon(object):
class BaseBackendLdap(object):
"""Mixin class to set up an all-LDAP configuration."""
+
def setUp(self):
# NOTE(dstanek): The database must be setup prior to calling the
# parent's setUp. The parent's setUp uses services (like
@@ -113,7 +114,7 @@ class BaseBackendLdapIdentitySqlEverythingElse(unit.SQLDriverOverrides):
super(BaseBackendLdapIdentitySqlEverythingElse, self).setUp()
self.clear_database()
self.load_backends()
- cache.configure_cache_region(cache.REGION)
+ cache.configure_cache()
sqldb.recreate()
self.load_fixtures(default_fixtures)
@@ -137,6 +138,7 @@ class BaseBackendLdapIdentitySqlEverythingElseWithMapping(object):
Setting backward_compatible_ids to False will enable this mapping.
"""
+
def config_overrides(self):
super(BaseBackendLdapIdentitySqlEverythingElseWithMapping,
self).config_overrides()
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py
new file mode 100644
index 00000000..da1490a7
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py
@@ -0,0 +1,39 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests.unit import test_backend_sql
+
+
+class SqlIdentityV8(test_backend_sql.SqlIdentity):
+ """Test that a V8 driver still passes the same tests.
+
+ We use the SQL driver as an example of a V8 legacy driver.
+
+ """
+
+ def config_overrides(self):
+ super(SqlIdentityV8, self).config_overrides()
+ # V8 SQL specific driver overrides
+ self.config_fixture.config(
+ group='assignment',
+ driver='keystone.assignment.V8_backends.sql.Assignment')
+ self.use_specific_sql_driver_version(
+ 'keystone.assignment', 'backends', 'V8_')
+
+ def test_delete_project_assignments_same_id_as_domain(self):
+ self.skipTest("V8 doesn't support project acting as a domain.")
+
+ def test_delete_user_assignments_user_same_id_as_group(self):
+ self.skipTest("Groups and users with the same ID are not supported.")
+
+ def test_delete_group_assignments_group_same_id_as_user(self):
+ self.skipTest("Groups and users with the same ID are not supported.")
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/assignment/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py
new file mode 100644
index 00000000..d5469768
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py
@@ -0,0 +1,108 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from six.moves import http_client
+
+from keystone.tests.unit import test_v3_federation
+
+
+class FederatedSetupMixinV8(object):
+ def useV8driver(self):
+ # We use the SQL driver as an example V8 driver, so override
+ # the current driver with that version.
+ self.config_fixture.config(
+ group='federation',
+ driver='keystone.federation.V8_backends.sql.Federation')
+ self.use_specific_sql_driver_version(
+ 'keystone.federation', 'backends', 'V8_')
+
+
+class FederatedIdentityProviderTestsV8(
+ test_v3_federation.FederatedIdentityProviderTests,
+ FederatedSetupMixinV8):
+ """Test that a V8 driver still passes the same tests."""
+
+ def config_overrides(self):
+ super(FederatedIdentityProviderTestsV8, self).config_overrides()
+ self.useV8driver()
+
+ def test_create_idp_remote_repeated(self):
+ """Creates two IdentityProvider entities with some remote_ids
+
+ A remote_id is the same for both so the second IdP is not
+ created because of the uniqueness of the remote_ids
+
+ Expect HTTP 409 Conflict code for the latter call.
+
+ Note: V9 drivers and later augment the conflict message with
+ additional information, which won't be present if we are running
+ a V8 driver - so override the newer tests to just ensure a
+ conflict message is raised.
+ """
+ body = self.default_body.copy()
+ repeated_remote_id = uuid.uuid4().hex
+ body['remote_ids'] = [uuid.uuid4().hex,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex,
+ repeated_remote_id]
+ self._create_default_idp(body=body)
+
+ url = self.base_url(suffix=uuid.uuid4().hex)
+ body['remote_ids'] = [uuid.uuid4().hex,
+ repeated_remote_id]
+ self.put(url, body={'identity_provider': body},
+ expected_status=http_client.CONFLICT)
+
+ def test_check_idp_uniqueness(self):
+ """Add same IdP twice.
+
+ Expect HTTP 409 Conflict code for the latter call.
+
+ Note: V9 drivers and later augment the conflict message with
+ additional information, which won't be present if we are running
+ a V8 driver - so override the newer tests to just ensure a
+ conflict message is raised.
+ """
+ url = self.base_url(suffix=uuid.uuid4().hex)
+ body = self._http_idp_input()
+ self.put(url, body={'identity_provider': body},
+ expected_status=http_client.CREATED)
+ self.put(url, body={'identity_provider': body},
+ expected_status=http_client.CONFLICT)
+
+
+class MappingCRUDTestsV8(
+ test_v3_federation.MappingCRUDTests,
+ FederatedSetupMixinV8):
+ """Test that a V8 driver still passes the same tests."""
+
+ def config_overrides(self):
+ super(MappingCRUDTestsV8, self).config_overrides()
+ self.useV8driver()
+
+
+class ServiceProviderTestsV8(
+ test_v3_federation.ServiceProviderTests,
+ FederatedSetupMixinV8):
+ """Test that a V8 driver still passes the same tests."""
+
+ def config_overrides(self):
+ super(ServiceProviderTestsV8, self).config_overrides()
+ self.useV8driver()
+
+ def test_filter_list_sp_by_id(self):
+ self.skipTest('Operation not supported in v8 and earlier drivers')
+
+ def test_filter_list_sp_by_enabled(self):
+ self.skipTest('Operation not supported in v8 and earlier drivers')
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/federation/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py
new file mode 100644
index 00000000..16acbdc3
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py
@@ -0,0 +1,71 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from keystone.resource.V8_backends import sql
+from keystone.tests import unit
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit.resource import test_backends
+from keystone.tests.unit import test_backend_sql
+
+
+class SqlIdentityV8(test_backend_sql.SqlIdentity):
+ """Test that a V8 driver still passes the same tests.
+
+ We use the SQL driver as an example of a V8 legacy driver.
+
+ """
+
+ def config_overrides(self):
+ super(SqlIdentityV8, self).config_overrides()
+ # V8 SQL specific driver overrides
+ self.config_fixture.config(
+ group='resource',
+ driver='keystone.resource.V8_backends.sql.Resource')
+ self.use_specific_sql_driver_version(
+ 'keystone.resource', 'backends', 'V8_')
+
+ def test_delete_projects_from_ids(self):
+ self.skipTest('Operation not supported in v8 and earlier drivers')
+
+ def test_delete_projects_from_ids_with_no_existing_project_id(self):
+ self.skipTest('Operation not supported in v8 and earlier drivers')
+
+ def test_delete_project_cascade(self):
+ self.skipTest('Operation not supported in v8 and earlier drivers')
+
+ def test_delete_large_project_cascade(self):
+ self.skipTest('Operation not supported in v8 and earlier drivers')
+
+ def test_hidden_project_domain_root_is_really_hidden(self):
+ self.skipTest('Operation not supported in v8 and earlier drivers')
+
+
+class TestSqlResourceDriverV8(unit.BaseTestCase,
+ test_backends.ResourceDriverTests):
+ def setUp(self):
+ super(TestSqlResourceDriverV8, self).setUp()
+
+ version_specifiers = {
+ 'keystone.resource': {
+ 'versionless_backend': 'backends',
+ 'versioned_backend': 'V8_backends'
+ }
+ }
+ self.useFixture(database.Database(version_specifiers))
+
+ self.driver = sql.Resource()
+
+ @unittest.skip('Null domain not allowed.')
+ def test_create_project_null_domain(self):
+ pass
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/resource/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py
new file mode 100644
index 00000000..d9378c30
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests.unit import test_backend_sql
+
+
+class SqlIdentityV8(test_backend_sql.SqlIdentity):
+ """Test that a V8 driver still passes the same tests.
+
+ We use the SQL driver as an example of a V8 legacy driver.
+
+ """
+
+ def config_overrides(self):
+ super(SqlIdentityV8, self).config_overrides()
+ # V8 SQL specific driver overrides
+ self.config_fixture.config(
+ group='role',
+ driver='keystone.assignment.V8_role_backends.sql.Role')
+ self.use_specific_sql_driver_version(
+ 'keystone.assignment', 'role_backends', 'V8_')
diff --git a/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/__init__.py b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/backend/legacy_drivers/role/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/catalog/test_backends.py b/keystone-moon/keystone/tests/unit/catalog/test_backends.py
new file mode 100644
index 00000000..55898015
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/catalog/test_backends.py
@@ -0,0 +1,588 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import mock
+from six.moves import range
+from testtools import matchers
+
+from keystone.catalog import core
+from keystone.common import driver_hints
+from keystone import exception
+from keystone.tests import unit
+
+
+class CatalogTests(object):
+
+ _legacy_endpoint_id_in_endpoint = True
+ _enabled_default_to_true_when_creating_endpoint = False
+
+ def test_region_crud(self):
+ # create
+ region_id = '0' * 255
+ new_region = unit.new_region_ref(id=region_id)
+ res = self.catalog_api.create_region(new_region)
+
+ # Ensure that we don't need to have a
+ # parent_region_id in the original supplied
+ # ref dict, but that it will be returned from
+ # the endpoint, with None value.
+ expected_region = new_region.copy()
+ expected_region['parent_region_id'] = None
+ self.assertDictEqual(expected_region, res)
+
+ # Test adding another region with the one above
+ # as its parent. We will check below whether deleting
+ # the parent successfully deletes any child regions.
+ parent_region_id = region_id
+ new_region = unit.new_region_ref(parent_region_id=parent_region_id)
+ region_id = new_region['id']
+ res = self.catalog_api.create_region(new_region)
+ self.assertDictEqual(new_region, res)
+
+ # list
+ regions = self.catalog_api.list_regions()
+ self.assertThat(regions, matchers.HasLength(2))
+ region_ids = [x['id'] for x in regions]
+ self.assertIn(parent_region_id, region_ids)
+ self.assertIn(region_id, region_ids)
+
+ # update
+ region_desc_update = {'description': uuid.uuid4().hex}
+ res = self.catalog_api.update_region(region_id, region_desc_update)
+ expected_region = new_region.copy()
+ expected_region['description'] = region_desc_update['description']
+ self.assertDictEqual(expected_region, res)
+
+ # delete
+ self.catalog_api.delete_region(parent_region_id)
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.delete_region,
+ parent_region_id)
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ parent_region_id)
+ # Ensure the child is also gone...
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ region_id)
+
+ def _create_region_with_parent_id(self, parent_id=None):
+ new_region = unit.new_region_ref(parent_region_id=parent_id)
+ self.catalog_api.create_region(new_region)
+ return new_region
+
+ def test_list_regions_filtered_by_parent_region_id(self):
+ new_region = self._create_region_with_parent_id()
+ parent_id = new_region['id']
+ new_region = self._create_region_with_parent_id(parent_id)
+ new_region = self._create_region_with_parent_id(parent_id)
+
+ # filter by parent_region_id
+ hints = driver_hints.Hints()
+ hints.add_filter('parent_region_id', parent_id)
+ regions = self.catalog_api.list_regions(hints)
+ for region in regions:
+ self.assertEqual(parent_id, region['parent_region_id'])
+
+ @unit.skip_if_cache_disabled('catalog')
+ def test_cache_layer_region_crud(self):
+ new_region = unit.new_region_ref()
+ region_id = new_region['id']
+ self.catalog_api.create_region(new_region.copy())
+ updated_region = copy.deepcopy(new_region)
+ updated_region['description'] = uuid.uuid4().hex
+ # cache the result
+ self.catalog_api.get_region(region_id)
+ # update the region bypassing catalog_api
+ self.catalog_api.driver.update_region(region_id, updated_region)
+ self.assertDictContainsSubset(new_region,
+ self.catalog_api.get_region(region_id))
+ self.catalog_api.get_region.invalidate(self.catalog_api, region_id)
+ self.assertDictContainsSubset(updated_region,
+ self.catalog_api.get_region(region_id))
+ # delete the region
+ self.catalog_api.driver.delete_region(region_id)
+ # still get the old region
+ self.assertDictContainsSubset(updated_region,
+ self.catalog_api.get_region(region_id))
+ self.catalog_api.get_region.invalidate(self.catalog_api, region_id)
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region, region_id)
+
+ @unit.skip_if_cache_disabled('catalog')
+ def test_invalidate_cache_when_updating_region(self):
+ new_region = unit.new_region_ref()
+ region_id = new_region['id']
+ self.catalog_api.create_region(new_region)
+
+ # cache the region
+ self.catalog_api.get_region(region_id)
+
+ # update the region via catalog_api
+ new_description = {'description': uuid.uuid4().hex}
+ self.catalog_api.update_region(region_id, new_description)
+
+ # assert that we can get the new region
+ current_region = self.catalog_api.get_region(region_id)
+ self.assertEqual(new_description['description'],
+ current_region['description'])
+
+ def test_create_region_with_duplicate_id(self):
+ new_region = unit.new_region_ref()
+ self.catalog_api.create_region(new_region)
+ # Create region again with duplicate id
+ self.assertRaises(exception.Conflict,
+ self.catalog_api.create_region,
+ new_region)
+
+ def test_get_region_returns_not_found(self):
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ uuid.uuid4().hex)
+
+ def test_delete_region_returns_not_found(self):
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.delete_region,
+ uuid.uuid4().hex)
+
+ def test_create_region_invalid_parent_region_returns_not_found(self):
+ new_region = unit.new_region_ref(parent_region_id='nonexisting')
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.create_region,
+ new_region)
+
+ def test_avoid_creating_circular_references_in_regions_update(self):
+ region_one = self._create_region_with_parent_id()
+
+ # self circle: region_one->region_one
+ self.assertRaises(exception.CircularRegionHierarchyError,
+ self.catalog_api.update_region,
+ region_one['id'],
+ {'parent_region_id': region_one['id']})
+
+ # region_one->region_two->region_one
+ region_two = self._create_region_with_parent_id(region_one['id'])
+ self.assertRaises(exception.CircularRegionHierarchyError,
+ self.catalog_api.update_region,
+ region_one['id'],
+ {'parent_region_id': region_two['id']})
+
+ # region_one region_two->region_three->region_four->region_two
+ region_three = self._create_region_with_parent_id(region_two['id'])
+ region_four = self._create_region_with_parent_id(region_three['id'])
+ self.assertRaises(exception.CircularRegionHierarchyError,
+ self.catalog_api.update_region,
+ region_two['id'],
+ {'parent_region_id': region_four['id']})
+
+ @mock.patch.object(core.CatalogDriverV8,
+ "_ensure_no_circle_in_hierarchical_regions")
+ def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle):
+ # turn off the enforcement so that cycles can be created for the test
+ mock_ensure_on_circle.return_value = None
+
+ region_one = self._create_region_with_parent_id()
+
+ # self circle: region_one->region_one
+ self.catalog_api.update_region(
+ region_one['id'],
+ {'parent_region_id': region_one['id']})
+ self.catalog_api.delete_region(region_one['id'])
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ region_one['id'])
+
+ # region_one->region_two->region_one
+ region_one = self._create_region_with_parent_id()
+ region_two = self._create_region_with_parent_id(region_one['id'])
+ self.catalog_api.update_region(
+ region_one['id'],
+ {'parent_region_id': region_two['id']})
+ self.catalog_api.delete_region(region_one['id'])
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ region_one['id'])
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ region_two['id'])
+
+ # region_one->region_two->region_three->region_one
+ region_one = self._create_region_with_parent_id()
+ region_two = self._create_region_with_parent_id(region_one['id'])
+ region_three = self._create_region_with_parent_id(region_two['id'])
+ self.catalog_api.update_region(
+ region_one['id'],
+ {'parent_region_id': region_three['id']})
+ self.catalog_api.delete_region(region_two['id'])
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ region_two['id'])
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ region_one['id'])
+ self.assertRaises(exception.RegionNotFound,
+ self.catalog_api.get_region,
+ region_three['id'])
+
+ def test_service_crud(self):
+ # create
+ new_service = unit.new_service_ref()
+ service_id = new_service['id']
+ res = self.catalog_api.create_service(service_id, new_service)
+ self.assertDictEqual(new_service, res)
+
+ # list
+ services = self.catalog_api.list_services()
+ self.assertIn(service_id, [x['id'] for x in services])
+
+ # update
+ service_name_update = {'name': uuid.uuid4().hex}
+ res = self.catalog_api.update_service(service_id, service_name_update)
+ expected_service = new_service.copy()
+ expected_service['name'] = service_name_update['name']
+ self.assertDictEqual(expected_service, res)
+
+ # delete
+ self.catalog_api.delete_service(service_id)
+ self.assertRaises(exception.ServiceNotFound,
+ self.catalog_api.delete_service,
+ service_id)
+ self.assertRaises(exception.ServiceNotFound,
+ self.catalog_api.get_service,
+ service_id)
+
+ def _create_random_service(self):
+ new_service = unit.new_service_ref()
+ service_id = new_service['id']
+ return self.catalog_api.create_service(service_id, new_service)
+
+ def test_service_filtering(self):
+ target_service = self._create_random_service()
+ unrelated_service1 = self._create_random_service()
+ unrelated_service2 = self._create_random_service()
+
+ # filter by type
+ hint_for_type = driver_hints.Hints()
+ hint_for_type.add_filter(name="type", value=target_service['type'])
+ services = self.catalog_api.list_services(hint_for_type)
+
+ self.assertEqual(1, len(services))
+ filtered_service = services[0]
+ self.assertEqual(target_service['type'], filtered_service['type'])
+ self.assertEqual(target_service['id'], filtered_service['id'])
+
+ # filter should have been removed, since it was already used by the
+ # backend
+ self.assertEqual(0, len(hint_for_type.filters))
+
+ # the backend shouldn't filter by name, since this is handled by the
+ # front end
+ hint_for_name = driver_hints.Hints()
+ hint_for_name.add_filter(name="name", value=target_service['name'])
+ services = self.catalog_api.list_services(hint_for_name)
+
+ self.assertEqual(3, len(services))
+
+ # filter should still be there, since it wasn't used by the backend
+ self.assertEqual(1, len(hint_for_name.filters))
+
+ self.catalog_api.delete_service(target_service['id'])
+ self.catalog_api.delete_service(unrelated_service1['id'])
+ self.catalog_api.delete_service(unrelated_service2['id'])
+
+ @unit.skip_if_cache_disabled('catalog')
+ def test_cache_layer_service_crud(self):
+ new_service = unit.new_service_ref()
+ service_id = new_service['id']
+ res = self.catalog_api.create_service(service_id, new_service)
+ self.assertDictEqual(new_service, res)
+ self.catalog_api.get_service(service_id)
+ updated_service = copy.deepcopy(new_service)
+ updated_service['description'] = uuid.uuid4().hex
+ # update bypassing catalog api
+ self.catalog_api.driver.update_service(service_id, updated_service)
+ self.assertDictContainsSubset(new_service,
+ self.catalog_api.get_service(service_id))
+ self.catalog_api.get_service.invalidate(self.catalog_api, service_id)
+ self.assertDictContainsSubset(updated_service,
+ self.catalog_api.get_service(service_id))
+
+ # delete bypassing catalog api
+ self.catalog_api.driver.delete_service(service_id)
+ self.assertDictContainsSubset(updated_service,
+ self.catalog_api.get_service(service_id))
+ self.catalog_api.get_service.invalidate(self.catalog_api, service_id)
+ self.assertRaises(exception.ServiceNotFound,
+ self.catalog_api.delete_service,
+ service_id)
+ self.assertRaises(exception.ServiceNotFound,
+ self.catalog_api.get_service,
+ service_id)
+
+ @unit.skip_if_cache_disabled('catalog')
+ def test_invalidate_cache_when_updating_service(self):
+ new_service = unit.new_service_ref()
+ service_id = new_service['id']
+ self.catalog_api.create_service(service_id, new_service)
+
+ # cache the service
+ self.catalog_api.get_service(service_id)
+
+ # update the service via catalog api
+ new_type = {'type': uuid.uuid4().hex}
+ self.catalog_api.update_service(service_id, new_type)
+
+ # assert that we can get the new service
+ current_service = self.catalog_api.get_service(service_id)
+ self.assertEqual(new_type['type'], current_service['type'])
+
+ def test_delete_service_with_endpoint(self):
+ # create a service
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ # create an endpoint attached to the service
+ endpoint = unit.new_endpoint_ref(service_id=service['id'],
+ region_id=None)
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+ # deleting the service should also delete the endpoint
+ self.catalog_api.delete_service(service['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.get_endpoint,
+ endpoint['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.delete_endpoint,
+ endpoint['id'])
+
+ def test_cache_layer_delete_service_with_endpoint(self):
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ # create an endpoint attached to the service
+ endpoint = unit.new_endpoint_ref(service_id=service['id'],
+ region_id=None)
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ # cache the result
+ self.catalog_api.get_service(service['id'])
+ self.catalog_api.get_endpoint(endpoint['id'])
+ # delete the service bypassing catalog api
+ self.catalog_api.driver.delete_service(service['id'])
+ self.assertDictContainsSubset(endpoint,
+ self.catalog_api.
+ get_endpoint(endpoint['id']))
+ self.assertDictContainsSubset(service,
+ self.catalog_api.
+ get_service(service['id']))
+ self.catalog_api.get_endpoint.invalidate(self.catalog_api,
+ endpoint['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.get_endpoint,
+ endpoint['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.delete_endpoint,
+ endpoint['id'])
+ # multiple endpoints associated with a service
+ second_endpoint = unit.new_endpoint_ref(service_id=service['id'],
+ region_id=None)
+ self.catalog_api.create_service(service['id'], service)
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ self.catalog_api.create_endpoint(second_endpoint['id'],
+ second_endpoint)
+ self.catalog_api.delete_service(service['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.get_endpoint,
+ endpoint['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.delete_endpoint,
+ endpoint['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.get_endpoint,
+ second_endpoint['id'])
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.delete_endpoint,
+ second_endpoint['id'])
+
+ def test_get_service_returns_not_found(self):
+ self.assertRaises(exception.ServiceNotFound,
+ self.catalog_api.get_service,
+ uuid.uuid4().hex)
+
+ def test_delete_service_returns_not_found(self):
+ self.assertRaises(exception.ServiceNotFound,
+ self.catalog_api.delete_service,
+ uuid.uuid4().hex)
+
+ def test_create_endpoint_nonexistent_service(self):
+ endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex,
+ region_id=None)
+ self.assertRaises(exception.ValidationError,
+ self.catalog_api.create_endpoint,
+ endpoint['id'],
+ endpoint)
+
+ def test_update_endpoint_nonexistent_service(self):
+ dummy_service, enabled_endpoint, dummy_disabled_endpoint = (
+ self._create_endpoints())
+ new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex)
+ self.assertRaises(exception.ValidationError,
+ self.catalog_api.update_endpoint,
+ enabled_endpoint['id'],
+ new_endpoint)
+
+ def test_create_endpoint_nonexistent_region(self):
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ endpoint = unit.new_endpoint_ref(service_id=service['id'])
+ self.assertRaises(exception.ValidationError,
+ self.catalog_api.create_endpoint,
+ endpoint['id'],
+ endpoint)
+
+ def test_update_endpoint_nonexistent_region(self):
+ dummy_service, enabled_endpoint, dummy_disabled_endpoint = (
+ self._create_endpoints())
+ new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex)
+ self.assertRaises(exception.ValidationError,
+ self.catalog_api.update_endpoint,
+ enabled_endpoint['id'],
+ new_endpoint)
+
+ def test_get_endpoint_returns_not_found(self):
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.get_endpoint,
+ uuid.uuid4().hex)
+
+ def test_delete_endpoint_returns_not_found(self):
+ self.assertRaises(exception.EndpointNotFound,
+ self.catalog_api.delete_endpoint,
+ uuid.uuid4().hex)
+
+ def test_create_endpoint(self):
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ endpoint = unit.new_endpoint_ref(service_id=service['id'],
+ region_id=None)
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
+
+ def test_update_endpoint(self):
+ dummy_service_ref, endpoint_ref, dummy_disabled_endpoint_ref = (
+ self._create_endpoints())
+ res = self.catalog_api.update_endpoint(endpoint_ref['id'],
+ {'interface': 'private'})
+ expected_endpoint = endpoint_ref.copy()
+ expected_endpoint['enabled'] = True
+ expected_endpoint['interface'] = 'private'
+ if self._legacy_endpoint_id_in_endpoint:
+ expected_endpoint['legacy_endpoint_id'] = None
+ if self._enabled_default_to_true_when_creating_endpoint:
+ expected_endpoint['enabled'] = True
+ self.assertDictEqual(expected_endpoint, res)
+
+ def _create_endpoints(self):
+ # Creates a service and 2 endpoints for the service in the same region.
+ # The 'public' interface is enabled and the 'internal' interface is
+ # disabled.
+
+ def create_endpoint(service_id, region, **kwargs):
+ ref = unit.new_endpoint_ref(
+ service_id=service_id,
+ region_id=region,
+ url='http://localhost/%s' % uuid.uuid4().hex,
+ **kwargs)
+
+ self.catalog_api.create_endpoint(ref['id'], ref)
+ return ref
+
+ # Create a service for use with the endpoints.
+ service_ref = unit.new_service_ref()
+ service_id = service_ref['id']
+ self.catalog_api.create_service(service_id, service_ref)
+
+ region = unit.new_region_ref()
+ self.catalog_api.create_region(region)
+
+ # Create endpoints
+ enabled_endpoint_ref = create_endpoint(service_id, region['id'])
+ disabled_endpoint_ref = create_endpoint(
+ service_id, region['id'], enabled=False, interface='internal')
+
+ return service_ref, enabled_endpoint_ref, disabled_endpoint_ref
+
+ def test_list_endpoints(self):
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ expected_ids = set([uuid.uuid4().hex for _ in range(3)])
+ for endpoint_id in expected_ids:
+ endpoint = unit.new_endpoint_ref(service_id=service['id'],
+ id=endpoint_id,
+ region_id=None)
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+ endpoints = self.catalog_api.list_endpoints()
+ self.assertEqual(expected_ids, set(e['id'] for e in endpoints))
+
+ def test_get_catalog_endpoint_disabled(self):
+ """Get back only enabled endpoints when get the v2 catalog."""
+ service_ref, enabled_endpoint_ref, dummy_disabled_endpoint_ref = (
+ self._create_endpoints())
+
+ user_id = uuid.uuid4().hex
+ project_id = uuid.uuid4().hex
+ catalog = self.catalog_api.get_catalog(user_id, project_id)
+
+ exp_entry = {
+ 'id': enabled_endpoint_ref['id'],
+ 'name': service_ref['name'],
+ 'publicURL': enabled_endpoint_ref['url'],
+ }
+
+ region = enabled_endpoint_ref['region_id']
+ self.assertEqual(exp_entry, catalog[region][service_ref['type']])
+
+ def test_get_v3_catalog_endpoint_disabled(self):
+ """Get back only enabled endpoints when get the v3 catalog."""
+ enabled_endpoint_ref = self._create_endpoints()[1]
+
+ user_id = uuid.uuid4().hex
+ project_id = uuid.uuid4().hex
+ catalog = self.catalog_api.get_v3_catalog(user_id, project_id)
+
+ endpoint_ids = [x['id'] for x in catalog[0]['endpoints']]
+ self.assertEqual([enabled_endpoint_ref['id']], endpoint_ids)
+
+ @unit.skip_if_cache_disabled('catalog')
+ def test_invalidate_cache_when_updating_endpoint(self):
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ # create an endpoint attached to the service
+ endpoint = unit.new_endpoint_ref(service_id=service['id'],
+ region_id=None)
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+
+ # cache the endpoint
+ self.catalog_api.get_endpoint(endpoint['id'])
+
+ # update the endpoint via catalog api
+ new_url = {'url': uuid.uuid4().hex}
+ self.catalog_api.update_endpoint(endpoint['id'], new_url)
+
+ # assert that we can get the new endpoint
+ current_endpoint = self.catalog_api.get_endpoint(endpoint['id'])
+ self.assertEqual(new_url['url'], current_endpoint['url'])
diff --git a/keystone-moon/keystone/tests/unit/catalog/test_core.py b/keystone-moon/keystone/tests/unit/catalog/test_core.py
index 2f334bb6..b04b0bb7 100644
--- a/keystone-moon/keystone/tests/unit/catalog/test_core.py
+++ b/keystone-moon/keystone/tests/unit/catalog/test_core.py
@@ -10,27 +10,25 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
+import uuid
from keystone.catalog import core
from keystone import exception
from keystone.tests import unit
-CONF = cfg.CONF
-
-
class FormatUrlTests(unit.BaseTestCase):
def test_successful_formatting(self):
url_template = ('http://$(public_bind_host)s:$(admin_port)d/'
- '$(tenant_id)s/$(user_id)s')
+ '$(tenant_id)s/$(user_id)s/$(project_id)s')
+ project_id = uuid.uuid4().hex
values = {'public_bind_host': 'server', 'admin_port': 9090,
- 'tenant_id': 'A', 'user_id': 'B'}
+ 'tenant_id': 'A', 'user_id': 'B', 'project_id': project_id}
actual_url = core.format_url(url_template, values)
- expected_url = 'http://server:9090/A/B'
- self.assertEqual(actual_url, expected_url)
+ expected_url = 'http://server:9090/A/B/%s' % (project_id,)
+ self.assertEqual(expected_url, actual_url)
def test_raises_malformed_on_missing_key(self):
self.assertRaises(exception.MalformedEndpoint,
@@ -73,7 +71,7 @@ class FormatUrlTests(unit.BaseTestCase):
url_template,
values)
- def test_substitution_with_allowed_keyerror(self):
+ def test_substitution_with_allowed_tenant_keyerror(self):
# No value of 'tenant_id' is passed into url_template.
# mod: format_url will return None instead of raising
# "MalformedEndpoint" exception.
@@ -86,3 +84,17 @@ class FormatUrlTests(unit.BaseTestCase):
'user_id': 'B'}
self.assertIsNone(core.format_url(url_template, values,
silent_keyerror_failures=['tenant_id']))
+
+ def test_substitution_with_allowed_project_keyerror(self):
+ # No value of 'project_id' is passed into url_template.
+ # mod: format_url will return None instead of raising
+ # "MalformedEndpoint" exception.
+ # This is intentional behavior since we don't want to skip
+ # all the later endpoints once there is an URL of endpoint
+ # trying to replace 'project_id' with None.
+ url_template = ('http://$(public_bind_host)s:$(admin_port)d/'
+ '$(project_id)s/$(user_id)s')
+ values = {'public_bind_host': 'server', 'admin_port': 9090,
+ 'user_id': 'B'}
+ self.assertIsNone(core.format_url(url_template, values,
+ silent_keyerror_failures=['project_id']))
diff --git a/keystone-moon/keystone/tests/unit/common/test_authorization.py b/keystone-moon/keystone/tests/unit/common/test_authorization.py
new file mode 100644
index 00000000..73ddbc61
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/common/test_authorization.py
@@ -0,0 +1,161 @@
+# Copyright 2015 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import copy
+import uuid
+
+from keystone.common import authorization
+from keystone import exception
+from keystone.federation import constants as federation_constants
+from keystone.models import token_model
+from keystone.tests import unit
+from keystone.tests.unit import test_token_provider
+
+
+class TestTokenToAuthContext(unit.BaseTestCase):
+ def test_token_is_project_scoped_with_trust(self):
+ # Check auth_context result when the token is project-scoped and has
+ # trust info.
+
+ # SAMPLE_V3_TOKEN has OS-TRUST:trust in it.
+ token_data = test_token_provider.SAMPLE_V3_TOKEN
+ token = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+ token_data=token_data)
+
+ auth_context = authorization.token_to_auth_context(token)
+
+ self.assertEqual(token, auth_context['token'])
+ self.assertTrue(auth_context['is_delegated_auth'])
+ self.assertEqual(token_data['token']['user']['id'],
+ auth_context['user_id'])
+ self.assertEqual(token_data['token']['user']['domain']['id'],
+ auth_context['user_domain_id'])
+ self.assertEqual(token_data['token']['project']['id'],
+ auth_context['project_id'])
+ self.assertEqual(token_data['token']['project']['domain']['id'],
+ auth_context['project_domain_id'])
+ self.assertNotIn('domain_id', auth_context)
+ self.assertNotIn('domain_name', auth_context)
+ self.assertEqual(token_data['token']['OS-TRUST:trust']['id'],
+ auth_context['trust_id'])
+ self.assertEqual(
+ token_data['token']['OS-TRUST:trust']['trustor_user_id'],
+ auth_context['trustor_id'])
+ self.assertEqual(
+ token_data['token']['OS-TRUST:trust']['trustee_user_id'],
+ auth_context['trustee_id'])
+ self.assertItemsEqual(
+ [r['name'] for r in token_data['token']['roles']],
+ auth_context['roles'])
+ self.assertIsNone(auth_context['consumer_id'])
+ self.assertIsNone(auth_context['access_token_id'])
+ self.assertNotIn('group_ids', auth_context)
+
+ def test_token_is_domain_scoped(self):
+ # Check contents of auth_context when token is domain-scoped.
+ token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN)
+ del token_data['token']['project']
+
+ domain_id = uuid.uuid4().hex
+ domain_name = uuid.uuid4().hex
+ token_data['token']['domain'] = {'id': domain_id, 'name': domain_name}
+
+ token = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+ token_data=token_data)
+
+ auth_context = authorization.token_to_auth_context(token)
+
+ self.assertNotIn('project_id', auth_context)
+ self.assertNotIn('project_domain_id', auth_context)
+
+ self.assertEqual(domain_id, auth_context['domain_id'])
+ self.assertEqual(domain_name, auth_context['domain_name'])
+
+ def test_token_is_unscoped(self):
+ # Check contents of auth_context when the token is unscoped.
+ token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN)
+ del token_data['token']['project']
+
+ token = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+ token_data=token_data)
+
+ auth_context = authorization.token_to_auth_context(token)
+
+ self.assertNotIn('project_id', auth_context)
+ self.assertNotIn('project_domain_id', auth_context)
+ self.assertNotIn('domain_id', auth_context)
+ self.assertNotIn('domain_name', auth_context)
+
+ def test_token_is_for_federated_user(self):
+ # When the token is for a federated user then group_ids is in
+ # auth_context.
+ token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN)
+
+ group_ids = [uuid.uuid4().hex for x in range(1, 5)]
+
+ federation_data = {'identity_provider': {'id': uuid.uuid4().hex},
+ 'protocol': {'id': 'saml2'},
+ 'groups': [{'id': gid} for gid in group_ids]}
+ token_data['token']['user'][federation_constants.FEDERATION] = (
+ federation_data)
+
+ token = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+ token_data=token_data)
+
+ auth_context = authorization.token_to_auth_context(token)
+
+ self.assertItemsEqual(group_ids, auth_context['group_ids'])
+
+ def test_oauth_variables_set_for_oauth_token(self):
+ token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN)
+ access_token_id = uuid.uuid4().hex
+ consumer_id = uuid.uuid4().hex
+ token_data['token']['OS-OAUTH1'] = {'access_token_id': access_token_id,
+ 'consumer_id': consumer_id}
+ token = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+ token_data=token_data)
+
+ auth_context = authorization.token_to_auth_context(token)
+
+ self.assertEqual(access_token_id, auth_context['access_token_id'])
+ self.assertEqual(consumer_id, auth_context['consumer_id'])
+
+ def test_oauth_variables_not_set(self):
+ token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN)
+ token = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+ token_data=token_data)
+
+ auth_context = authorization.token_to_auth_context(token)
+
+ self.assertIsNone(auth_context['access_token_id'])
+ self.assertIsNone(auth_context['consumer_id'])
+
+ def test_token_is_not_KeystoneToken_raises_exception(self):
+ # If the token isn't a KeystoneToken then an UnexpectedError exception
+ # is raised.
+ self.assertRaises(exception.UnexpectedError,
+ authorization.token_to_auth_context, {})
+
+ def test_user_id_missing_in_token_raises_exception(self):
+ # If there's no user ID in the token then an Unauthorized
+ # exception is raised.
+ token_data = copy.deepcopy(test_token_provider.SAMPLE_V3_TOKEN)
+ del token_data['token']['user']['id']
+
+ token = token_model.KeystoneToken(token_id=uuid.uuid4().hex,
+ token_data=token_data)
+
+ self.assertRaises(exception.Unauthorized,
+ authorization.token_to_auth_context, token)
diff --git a/keystone-moon/keystone/tests/unit/common/test_ldap.py b/keystone-moon/keystone/tests/unit/common/test_ldap.py
index e6e2c732..eed77286 100644
--- a/keystone-moon/keystone/tests/unit/common/test_ldap.py
+++ b/keystone-moon/keystone/tests/unit/common/test_ldap.py
@@ -27,6 +27,7 @@ from keystone.common.ldap import core as common_ldap_core
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import fakeldap
+from keystone.tests.unit.ksfixtures import database
CONF = cfg.CONF
@@ -195,8 +196,8 @@ class DnCompareTest(unit.BaseTestCase):
def test_startswith_unicode(self):
# dn_startswith accepts unicode.
- child = u'cn=cn=fäké,ou=OpenStäck'
- parent = 'ou=OpenStäck'
+ child = u'cn=fäké,ou=OpenStäck'
+ parent = u'ou=OpenStäck'
self.assertTrue(ks_ldap.dn_startswith(child, parent))
@@ -207,6 +208,8 @@ class LDAPDeleteTreeTest(unit.TestCase):
ks_ldap.register_handler('fake://',
fakeldap.FakeLdapNoSubtreeDelete)
+ self.useFixture(database.Database(self.sql_driver_version_overrides))
+
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -226,11 +229,11 @@ class LDAPDeleteTreeTest(unit.TestCase):
config_files.append(unit.dirs.tests_conf('backend_ldap.conf'))
return config_files
- def test_deleteTree(self):
+ def test_delete_tree(self):
"""Test manually deleting a tree.
Few LDAP servers support CONTROL_DELETETREE. This test
- exercises the alternate code paths in BaseLdap.deleteTree.
+ exercises the alternate code paths in BaseLdap.delete_tree.
"""
conn = self.identity_api.user.get_connection()
@@ -251,7 +254,7 @@ class LDAPDeleteTreeTest(unit.TestCase):
# cn=base
# cn=child,cn=base
# cn=grandchild,cn=child,cn=base
- # then attempt to deleteTree(cn=base)
+ # then attempt to delete_tree(cn=base)
base_id = 'base'
base_dn = create_entry(base_id)
child_dn = create_entry('child', base_dn)
@@ -273,8 +276,8 @@ class LDAPDeleteTreeTest(unit.TestCase):
self.assertRaises(ldap.NOT_ALLOWED_ON_NONLEAF,
conn.delete_s, child_dn)
- # call our deleteTree implementation
- self.identity_api.user.deleteTree(base_id)
+ # call our delete_tree implementation
+ self.identity_api.user.delete_tree(base_id)
self.assertRaises(ldap.NO_SUCH_OBJECT,
conn.search_s, base_dn, ldap.SCOPE_BASE)
self.assertRaises(ldap.NO_SUCH_OBJECT,
@@ -283,6 +286,24 @@ class LDAPDeleteTreeTest(unit.TestCase):
conn.search_s, grandchild_dn, ldap.SCOPE_BASE)
+class MultiURLTests(unit.TestCase):
+ """Tests for setting multiple LDAP URLs."""
+
+ def test_multiple_urls_with_comma_no_conn_pool(self):
+ urls = 'ldap://localhost,ldap://backup.localhost'
+ self.config_fixture.config(group='ldap', url=urls, use_pool=False)
+ base_ldap = ks_ldap.BaseLdap(CONF)
+ ldap_connection = base_ldap.get_connection()
+ self.assertEqual(urls, ldap_connection.conn.conn._uri)
+
+ def test_multiple_urls_with_comma_with_conn_pool(self):
+ urls = 'ldap://localhost,ldap://backup.localhost'
+ self.config_fixture.config(group='ldap', url=urls, use_pool=True)
+ base_ldap = ks_ldap.BaseLdap(CONF)
+ ldap_connection = base_ldap.get_connection()
+ self.assertEqual(urls, ldap_connection.conn.conn_pool.uri)
+
+
class SslTlsTest(unit.TestCase):
"""Tests for the SSL/TLS functionality in keystone.common.ldap.core."""
@@ -359,6 +380,7 @@ class LDAPPagedResultsTest(unit.TestCase):
ks_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.addCleanup(common_ldap_core._HANDLERS.clear)
+ self.useFixture(database.Database(self.sql_driver_version_overrides))
self.load_backends()
self.load_fixtures(default_fixtures)
diff --git a/keystone-moon/keystone/tests/unit/common/test_manager.py b/keystone-moon/keystone/tests/unit/common/test_manager.py
index 1bc19763..7ef91e15 100644
--- a/keystone-moon/keystone/tests/unit/common/test_manager.py
+++ b/keystone-moon/keystone/tests/unit/common/test_manager.py
@@ -24,7 +24,7 @@ class TestCreateLegacyDriver(unit.BaseTestCase):
Driver = manager.create_legacy_driver(catalog.CatalogDriverV8)
# NOTE(dstanek): I want to subvert the requirement for this
- # class to implement all of the abstractmethods.
+ # class to implement all of the abstract methods.
Driver.__abstractmethods__ = set()
impl = Driver()
@@ -32,8 +32,9 @@ class TestCreateLegacyDriver(unit.BaseTestCase):
'as_of': 'Liberty',
'what': 'keystone.catalog.core.Driver',
'in_favor_of': 'keystone.catalog.core.CatalogDriverV8',
- 'remove_in': 'N',
+ 'remove_in': mock.ANY,
}
mock_reporter.assert_called_with(mock.ANY, mock.ANY, details)
+ self.assertEqual('N', mock_reporter.call_args[0][2]['remove_in'][0])
self.assertIsInstance(impl, catalog.CatalogDriverV8)
diff --git a/keystone-moon/keystone/tests/unit/common/test_notifications.py b/keystone-moon/keystone/tests/unit/common/test_notifications.py
index 1ad8d50d..aa2e6f72 100644
--- a/keystone-moon/keystone/tests/unit/common/test_notifications.py
+++ b/keystone-moon/keystone/tests/unit/common/test_notifications.py
@@ -43,9 +43,7 @@ class ArbitraryException(Exception):
def register_callback(operation, resource_type=EXP_RESOURCE_TYPE):
- """Helper for creating and registering a mock callback.
-
- """
+ """Helper for creating and registering a mock callback."""
callback = mock.Mock(__name__='callback',
im_class=mock.Mock(__name__='class'))
notifications.register_event_callback(operation, resource_type, callback)
@@ -95,89 +93,14 @@ class AuditNotificationsTestCase(unit.BaseTestCase):
DISABLED_OPERATION)
-class NotificationsWrapperTestCase(unit.BaseTestCase):
- def create_fake_ref(self):
- resource_id = uuid.uuid4().hex
- return resource_id, {
- 'id': resource_id,
- 'key': uuid.uuid4().hex
- }
-
- @notifications.created(EXP_RESOURCE_TYPE)
- def create_resource(self, resource_id, data):
- return data
-
- def test_resource_created_notification(self):
- exp_resource_id, data = self.create_fake_ref()
- callback = register_callback(CREATED_OPERATION)
-
- self.create_resource(exp_resource_id, data)
- callback.assert_called_with('identity', EXP_RESOURCE_TYPE,
- CREATED_OPERATION,
- {'resource_info': exp_resource_id})
-
- @notifications.updated(EXP_RESOURCE_TYPE)
- def update_resource(self, resource_id, data):
- return data
-
- def test_resource_updated_notification(self):
- exp_resource_id, data = self.create_fake_ref()
- callback = register_callback(UPDATED_OPERATION)
-
- self.update_resource(exp_resource_id, data)
- callback.assert_called_with('identity', EXP_RESOURCE_TYPE,
- UPDATED_OPERATION,
- {'resource_info': exp_resource_id})
-
- @notifications.deleted(EXP_RESOURCE_TYPE)
- def delete_resource(self, resource_id):
- pass
-
- def test_resource_deleted_notification(self):
- exp_resource_id = uuid.uuid4().hex
- callback = register_callback(DELETED_OPERATION)
-
- self.delete_resource(exp_resource_id)
- callback.assert_called_with('identity', EXP_RESOURCE_TYPE,
- DELETED_OPERATION,
- {'resource_info': exp_resource_id})
-
- @notifications.created(EXP_RESOURCE_TYPE)
- def create_exception(self, resource_id):
- raise ArbitraryException()
-
- def test_create_exception_without_notification(self):
- callback = register_callback(CREATED_OPERATION)
- self.assertRaises(
- ArbitraryException, self.create_exception, uuid.uuid4().hex)
- self.assertFalse(callback.called)
-
- @notifications.created(EXP_RESOURCE_TYPE)
- def update_exception(self, resource_id):
- raise ArbitraryException()
-
- def test_update_exception_without_notification(self):
- callback = register_callback(UPDATED_OPERATION)
- self.assertRaises(
- ArbitraryException, self.update_exception, uuid.uuid4().hex)
- self.assertFalse(callback.called)
-
- @notifications.deleted(EXP_RESOURCE_TYPE)
- def delete_exception(self, resource_id):
- raise ArbitraryException()
-
- def test_delete_exception_without_notification(self):
- callback = register_callback(DELETED_OPERATION)
- self.assertRaises(
- ArbitraryException, self.delete_exception, uuid.uuid4().hex)
- self.assertFalse(callback.called)
-
-
class NotificationsTestCase(unit.BaseTestCase):
def test_send_notification(self):
- """Test the private method _send_notification to ensure event_type,
- payload, and context are built and passed properly.
+ """Test _send_notification.
+
+ Test the private method _send_notification to ensure event_type,
+ payload, and context are built and passed properly.
+
"""
resource = uuid.uuid4().hex
resource_type = EXP_RESOURCE_TYPE
@@ -203,6 +126,82 @@ class NotificationsTestCase(unit.BaseTestCase):
resource)
mocked.assert_called_once_with(*expected_args)
+ def test_send_notification_with_opt_out(self):
+ """Test the private method _send_notification with opt-out.
+
+ Test that _send_notification does not notify when a valid
+ notification_opt_out configuration is provided.
+ """
+ resource = uuid.uuid4().hex
+ resource_type = EXP_RESOURCE_TYPE
+ operation = CREATED_OPERATION
+ event_type = 'identity.%s.created' % resource_type
+
+ # NOTE(diazjf): Here we add notification_opt_out to the
+ # configuration so that we should return before _get_notifer is
+ # called. This is because we are opting out notifications for the
+ # passed resource_type and operation.
+ conf = self.useFixture(config_fixture.Config(CONF))
+ conf.config(notification_opt_out=event_type)
+
+ with mock.patch.object(notifications._get_notifier(),
+ '_notify') as mocked:
+
+ notifications._send_notification(operation, resource_type,
+ resource)
+ mocked.assert_not_called()
+
+ def test_send_audit_notification_with_opt_out(self):
+ """Test the private method _send_audit_notification with opt-out.
+
+ Test that _send_audit_notification does not notify when a valid
+ notification_opt_out configuration is provided.
+ """
+ resource_type = EXP_RESOURCE_TYPE
+
+ action = CREATED_OPERATION + '.' + resource_type
+ initiator = mock
+ target = mock
+ outcome = 'success'
+ event_type = 'identity.%s.created' % resource_type
+
+ conf = self.useFixture(config_fixture.Config(CONF))
+ conf.config(notification_opt_out=event_type)
+
+ with mock.patch.object(notifications._get_notifier(),
+ '_notify') as mocked:
+
+ notifications._send_audit_notification(action,
+ initiator,
+ outcome,
+ target,
+ event_type)
+ mocked.assert_not_called()
+
+ def test_opt_out_authenticate_event(self):
+ """Test that authenticate events are successfully opted out."""
+ resource_type = EXP_RESOURCE_TYPE
+
+ action = CREATED_OPERATION + '.' + resource_type
+ initiator = mock
+ target = mock
+ outcome = 'success'
+ event_type = 'identity.authenticate'
+ meter_name = '%s.%s' % (event_type, outcome)
+
+ conf = self.useFixture(config_fixture.Config(CONF))
+ conf.config(notification_opt_out=meter_name)
+
+ with mock.patch.object(notifications._get_notifier(),
+ '_notify') as mocked:
+
+ notifications._send_audit_notification(action,
+ initiator,
+ outcome,
+ target,
+ event_type)
+ mocked.assert_not_called()
+
class BaseNotificationTest(test_v3.RestfulTestCase):
@@ -213,13 +212,17 @@ class BaseNotificationTest(test_v3.RestfulTestCase):
self._audits = []
def fake_notify(operation, resource_type, resource_id,
- public=True):
+ actor_dict=None, public=True):
note = {
'resource_id': resource_id,
'operation': operation,
'resource_type': resource_type,
'send_notification_called': True,
'public': public}
+ if actor_dict:
+ note['actor_id'] = actor_dict.get('id')
+ note['actor_type'] = actor_dict.get('type')
+ note['actor_operation'] = actor_dict.get('actor_operation')
self._notifications.append(note)
self.useFixture(mockpatch.PatchObject(
@@ -249,17 +252,23 @@ class BaseNotificationTest(test_v3.RestfulTestCase):
self.useFixture(mockpatch.PatchObject(
notifications, '_send_audit_notification', fake_audit))
- def _assert_last_note(self, resource_id, operation, resource_type):
+ def _assert_last_note(self, resource_id, operation, resource_type,
+ actor_id=None, actor_type=None,
+ actor_operation=None):
# NOTE(stevemar): If 'basic' format is not used, then simply
# return since this assertion is not valid.
if CONF.notification_format != 'basic':
return
self.assertTrue(len(self._notifications) > 0)
note = self._notifications[-1]
- self.assertEqual(note['operation'], operation)
- self.assertEqual(note['resource_id'], resource_id)
- self.assertEqual(note['resource_type'], resource_type)
+ self.assertEqual(operation, note['operation'])
+ self.assertEqual(resource_id, note['resource_id'])
+ self.assertEqual(resource_type, note['resource_type'])
self.assertTrue(note['send_notification_called'])
+ if actor_id:
+ self.assertEqual(actor_id, note['actor_id'])
+ self.assertEqual(actor_type, note['actor_type'])
+ self.assertEqual(actor_operation, note['actor_operation'])
def _assert_last_audit(self, resource_id, operation, resource_type,
target_uri):
@@ -318,14 +327,14 @@ class BaseNotificationTest(test_v3.RestfulTestCase):
class NotificationsForEntities(BaseNotificationTest):
def test_create_group(self):
- group_ref = self.new_group_ref(domain_id=self.domain_id)
+ group_ref = unit.new_group_ref(domain_id=self.domain_id)
group_ref = self.identity_api.create_group(group_ref)
self._assert_last_note(group_ref['id'], CREATED_OPERATION, 'group')
self._assert_last_audit(group_ref['id'], CREATED_OPERATION, 'group',
cadftaxonomy.SECURITY_GROUP)
def test_create_project(self):
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
self._assert_last_note(
project_ref['id'], CREATED_OPERATION, 'project')
@@ -333,27 +342,27 @@ class NotificationsForEntities(BaseNotificationTest):
'project', cadftaxonomy.SECURITY_PROJECT)
def test_create_role(self):
- role_ref = self.new_role_ref()
+ role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role')
self._assert_last_audit(role_ref['id'], CREATED_OPERATION, 'role',
cadftaxonomy.SECURITY_ROLE)
def test_create_user(self):
- user_ref = self.new_user_ref(domain_id=self.domain_id)
+ user_ref = unit.new_user_ref(domain_id=self.domain_id)
user_ref = self.identity_api.create_user(user_ref)
self._assert_last_note(user_ref['id'], CREATED_OPERATION, 'user')
self._assert_last_audit(user_ref['id'], CREATED_OPERATION, 'user',
cadftaxonomy.SECURITY_ACCOUNT_USER)
def test_create_trust(self):
- trustor = self.new_user_ref(domain_id=self.domain_id)
+ trustor = unit.new_user_ref(domain_id=self.domain_id)
trustor = self.identity_api.create_user(trustor)
- trustee = self.new_user_ref(domain_id=self.domain_id)
+ trustee = unit.new_user_ref(domain_id=self.domain_id)
trustee = self.identity_api.create_user(trustee)
- role_ref = self.new_role_ref()
+ role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
- trust_ref = self.new_trust_ref(trustor['id'],
+ trust_ref = unit.new_trust_ref(trustor['id'],
trustee['id'])
self.trust_api.create_trust(trust_ref['id'],
trust_ref,
@@ -364,7 +373,7 @@ class NotificationsForEntities(BaseNotificationTest):
'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST)
def test_delete_group(self):
- group_ref = self.new_group_ref(domain_id=self.domain_id)
+ group_ref = unit.new_group_ref(domain_id=self.domain_id)
group_ref = self.identity_api.create_group(group_ref)
self.identity_api.delete_group(group_ref['id'])
self._assert_last_note(group_ref['id'], DELETED_OPERATION, 'group')
@@ -372,7 +381,7 @@ class NotificationsForEntities(BaseNotificationTest):
cadftaxonomy.SECURITY_GROUP)
def test_delete_project(self):
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
self.resource_api.delete_project(project_ref['id'])
self._assert_last_note(
@@ -381,7 +390,7 @@ class NotificationsForEntities(BaseNotificationTest):
'project', cadftaxonomy.SECURITY_PROJECT)
def test_delete_role(self):
- role_ref = self.new_role_ref()
+ role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
self.role_api.delete_role(role_ref['id'])
self._assert_last_note(role_ref['id'], DELETED_OPERATION, 'role')
@@ -389,7 +398,7 @@ class NotificationsForEntities(BaseNotificationTest):
cadftaxonomy.SECURITY_ROLE)
def test_delete_user(self):
- user_ref = self.new_user_ref(domain_id=self.domain_id)
+ user_ref = unit.new_user_ref(domain_id=self.domain_id)
user_ref = self.identity_api.create_user(user_ref)
self.identity_api.delete_user(user_ref['id'])
self._assert_last_note(user_ref['id'], DELETED_OPERATION, 'user')
@@ -397,14 +406,14 @@ class NotificationsForEntities(BaseNotificationTest):
cadftaxonomy.SECURITY_ACCOUNT_USER)
def test_create_domain(self):
- domain_ref = self.new_domain_ref()
+ domain_ref = unit.new_domain_ref()
self.resource_api.create_domain(domain_ref['id'], domain_ref)
self._assert_last_note(domain_ref['id'], CREATED_OPERATION, 'domain')
self._assert_last_audit(domain_ref['id'], CREATED_OPERATION, 'domain',
cadftaxonomy.SECURITY_DOMAIN)
def test_update_domain(self):
- domain_ref = self.new_domain_ref()
+ domain_ref = unit.new_domain_ref()
self.resource_api.create_domain(domain_ref['id'], domain_ref)
domain_ref['description'] = uuid.uuid4().hex
self.resource_api.update_domain(domain_ref['id'], domain_ref)
@@ -413,7 +422,7 @@ class NotificationsForEntities(BaseNotificationTest):
cadftaxonomy.SECURITY_DOMAIN)
def test_delete_domain(self):
- domain_ref = self.new_domain_ref()
+ domain_ref = unit.new_domain_ref()
self.resource_api.create_domain(domain_ref['id'], domain_ref)
domain_ref['enabled'] = False
self.resource_api.update_domain(domain_ref['id'], domain_ref)
@@ -423,12 +432,12 @@ class NotificationsForEntities(BaseNotificationTest):
cadftaxonomy.SECURITY_DOMAIN)
def test_delete_trust(self):
- trustor = self.new_user_ref(domain_id=self.domain_id)
+ trustor = unit.new_user_ref(domain_id=self.domain_id)
trustor = self.identity_api.create_user(trustor)
- trustee = self.new_user_ref(domain_id=self.domain_id)
+ trustee = unit.new_user_ref(domain_id=self.domain_id)
trustee = self.identity_api.create_user(trustee)
- role_ref = self.new_role_ref()
- trust_ref = self.new_trust_ref(trustor['id'], trustee['id'])
+ role_ref = unit.new_role_ref()
+ trust_ref = unit.new_trust_ref(trustor['id'], trustee['id'])
self.trust_api.create_trust(trust_ref['id'],
trust_ref,
[role_ref])
@@ -439,7 +448,9 @@ class NotificationsForEntities(BaseNotificationTest):
'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST)
def test_create_endpoint(self):
- endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+ endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref)
self._assert_notify_sent(endpoint_ref['id'], CREATED_OPERATION,
'endpoint')
@@ -447,7 +458,9 @@ class NotificationsForEntities(BaseNotificationTest):
'endpoint', cadftaxonomy.SECURITY_ENDPOINT)
def test_update_endpoint(self):
- endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+ endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref)
self.catalog_api.update_endpoint(endpoint_ref['id'], endpoint_ref)
self._assert_notify_sent(endpoint_ref['id'], UPDATED_OPERATION,
@@ -456,7 +469,9 @@ class NotificationsForEntities(BaseNotificationTest):
'endpoint', cadftaxonomy.SECURITY_ENDPOINT)
def test_delete_endpoint(self):
- endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+ endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
self.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref)
self.catalog_api.delete_endpoint(endpoint_ref['id'])
self._assert_notify_sent(endpoint_ref['id'], DELETED_OPERATION,
@@ -465,7 +480,7 @@ class NotificationsForEntities(BaseNotificationTest):
'endpoint', cadftaxonomy.SECURITY_ENDPOINT)
def test_create_service(self):
- service_ref = self.new_service_ref()
+ service_ref = unit.new_service_ref()
self.catalog_api.create_service(service_ref['id'], service_ref)
self._assert_notify_sent(service_ref['id'], CREATED_OPERATION,
'service')
@@ -473,7 +488,7 @@ class NotificationsForEntities(BaseNotificationTest):
'service', cadftaxonomy.SECURITY_SERVICE)
def test_update_service(self):
- service_ref = self.new_service_ref()
+ service_ref = unit.new_service_ref()
self.catalog_api.create_service(service_ref['id'], service_ref)
self.catalog_api.update_service(service_ref['id'], service_ref)
self._assert_notify_sent(service_ref['id'], UPDATED_OPERATION,
@@ -482,7 +497,7 @@ class NotificationsForEntities(BaseNotificationTest):
'service', cadftaxonomy.SECURITY_SERVICE)
def test_delete_service(self):
- service_ref = self.new_service_ref()
+ service_ref = unit.new_service_ref()
self.catalog_api.create_service(service_ref['id'], service_ref)
self.catalog_api.delete_service(service_ref['id'])
self._assert_notify_sent(service_ref['id'], DELETED_OPERATION,
@@ -491,7 +506,7 @@ class NotificationsForEntities(BaseNotificationTest):
'service', cadftaxonomy.SECURITY_SERVICE)
def test_create_region(self):
- region_ref = self.new_region_ref()
+ region_ref = unit.new_region_ref()
self.catalog_api.create_region(region_ref)
self._assert_notify_sent(region_ref['id'], CREATED_OPERATION,
'region')
@@ -499,7 +514,7 @@ class NotificationsForEntities(BaseNotificationTest):
'region', cadftaxonomy.SECURITY_REGION)
def test_update_region(self):
- region_ref = self.new_region_ref()
+ region_ref = unit.new_region_ref()
self.catalog_api.create_region(region_ref)
self.catalog_api.update_region(region_ref['id'], region_ref)
self._assert_notify_sent(region_ref['id'], UPDATED_OPERATION,
@@ -508,7 +523,7 @@ class NotificationsForEntities(BaseNotificationTest):
'region', cadftaxonomy.SECURITY_REGION)
def test_delete_region(self):
- region_ref = self.new_region_ref()
+ region_ref = unit.new_region_ref()
self.catalog_api.create_region(region_ref)
self.catalog_api.delete_region(region_ref['id'])
self._assert_notify_sent(region_ref['id'], DELETED_OPERATION,
@@ -517,7 +532,7 @@ class NotificationsForEntities(BaseNotificationTest):
'region', cadftaxonomy.SECURITY_REGION)
def test_create_policy(self):
- policy_ref = self.new_policy_ref()
+ policy_ref = unit.new_policy_ref()
self.policy_api.create_policy(policy_ref['id'], policy_ref)
self._assert_notify_sent(policy_ref['id'], CREATED_OPERATION,
'policy')
@@ -525,7 +540,7 @@ class NotificationsForEntities(BaseNotificationTest):
'policy', cadftaxonomy.SECURITY_POLICY)
def test_update_policy(self):
- policy_ref = self.new_policy_ref()
+ policy_ref = unit.new_policy_ref()
self.policy_api.create_policy(policy_ref['id'], policy_ref)
self.policy_api.update_policy(policy_ref['id'], policy_ref)
self._assert_notify_sent(policy_ref['id'], UPDATED_OPERATION,
@@ -534,7 +549,7 @@ class NotificationsForEntities(BaseNotificationTest):
'policy', cadftaxonomy.SECURITY_POLICY)
def test_delete_policy(self):
- policy_ref = self.new_policy_ref()
+ policy_ref = unit.new_policy_ref()
self.policy_api.create_policy(policy_ref['id'], policy_ref)
self.policy_api.delete_policy(policy_ref['id'])
self._assert_notify_sent(policy_ref['id'], DELETED_OPERATION,
@@ -543,7 +558,7 @@ class NotificationsForEntities(BaseNotificationTest):
'policy', cadftaxonomy.SECURITY_POLICY)
def test_disable_domain(self):
- domain_ref = self.new_domain_ref()
+ domain_ref = unit.new_domain_ref()
self.resource_api.create_domain(domain_ref['id'], domain_ref)
domain_ref['enabled'] = False
self.resource_api.update_domain(domain_ref['id'], domain_ref)
@@ -551,8 +566,7 @@ class NotificationsForEntities(BaseNotificationTest):
public=False)
def test_disable_of_disabled_domain_does_not_notify(self):
- domain_ref = self.new_domain_ref()
- domain_ref['enabled'] = False
+ domain_ref = unit.new_domain_ref(enabled=False)
self.resource_api.create_domain(domain_ref['id'], domain_ref)
# The domain_ref above is not changed during the create process. We
# can use the same ref to perform the update.
@@ -561,7 +575,7 @@ class NotificationsForEntities(BaseNotificationTest):
public=False)
def test_update_group(self):
- group_ref = self.new_group_ref(domain_id=self.domain_id)
+ group_ref = unit.new_group_ref(domain_id=self.domain_id)
group_ref = self.identity_api.create_group(group_ref)
self.identity_api.update_group(group_ref['id'], group_ref)
self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group')
@@ -569,7 +583,7 @@ class NotificationsForEntities(BaseNotificationTest):
cadftaxonomy.SECURITY_GROUP)
def test_update_project(self):
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
self.resource_api.update_project(project_ref['id'], project_ref)
self._assert_notify_sent(
@@ -578,7 +592,7 @@ class NotificationsForEntities(BaseNotificationTest):
'project', cadftaxonomy.SECURITY_PROJECT)
def test_disable_project(self):
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
project_ref['enabled'] = False
self.resource_api.update_project(project_ref['id'], project_ref)
@@ -586,8 +600,8 @@ class NotificationsForEntities(BaseNotificationTest):
public=False)
def test_disable_of_disabled_project_does_not_notify(self):
- project_ref = self.new_project_ref(domain_id=self.domain_id)
- project_ref['enabled'] = False
+ project_ref = unit.new_project_ref(domain_id=self.domain_id,
+ enabled=False)
self.resource_api.create_project(project_ref['id'], project_ref)
# The project_ref above is not changed during the create process. We
# can use the same ref to perform the update.
@@ -596,7 +610,7 @@ class NotificationsForEntities(BaseNotificationTest):
public=False)
def test_update_project_does_not_send_disable(self):
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
project_ref['enabled'] = True
self.resource_api.update_project(project_ref['id'], project_ref)
@@ -605,7 +619,7 @@ class NotificationsForEntities(BaseNotificationTest):
self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project')
def test_update_role(self):
- role_ref = self.new_role_ref()
+ role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
self.role_api.update_role(role_ref['id'], role_ref)
self._assert_last_note(role_ref['id'], UPDATED_OPERATION, 'role')
@@ -613,7 +627,7 @@ class NotificationsForEntities(BaseNotificationTest):
cadftaxonomy.SECURITY_ROLE)
def test_update_user(self):
- user_ref = self.new_user_ref(domain_id=self.domain_id)
+ user_ref = unit.new_user_ref(domain_id=self.domain_id)
user_ref = self.identity_api.create_user(user_ref)
self.identity_api.update_user(user_ref['id'], user_ref)
self._assert_last_note(user_ref['id'], UPDATED_OPERATION, 'user')
@@ -622,7 +636,7 @@ class NotificationsForEntities(BaseNotificationTest):
def test_config_option_no_events(self):
self.config_fixture.config(notification_format='basic')
- role_ref = self.new_role_ref()
+ role_ref = unit.new_role_ref()
self.role_api.create_role(role_ref['id'], role_ref)
# The regular notifications will still be emitted, since they are
# used for callback handling.
@@ -630,6 +644,28 @@ class NotificationsForEntities(BaseNotificationTest):
# No audit event should have occurred
self.assertEqual(0, len(self._audits))
+ def test_add_user_to_group(self):
+ user_ref = unit.new_user_ref(domain_id=self.domain_id)
+ user_ref = self.identity_api.create_user(user_ref)
+ group_ref = unit.new_group_ref(domain_id=self.domain_id)
+ group_ref = self.identity_api.create_group(group_ref)
+ self.identity_api.add_user_to_group(user_ref['id'], group_ref['id'])
+ self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group',
+ actor_id=user_ref['id'], actor_type='user',
+ actor_operation='added')
+
+ def test_remove_user_from_group(self):
+ user_ref = unit.new_user_ref(domain_id=self.domain_id)
+ user_ref = self.identity_api.create_user(user_ref)
+ group_ref = unit.new_group_ref(domain_id=self.domain_id)
+ group_ref = self.identity_api.create_group(group_ref)
+ self.identity_api.add_user_to_group(user_ref['id'], group_ref['id'])
+ self.identity_api.remove_user_from_group(user_ref['id'],
+ group_ref['id'])
+ self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group',
+ actor_id=user_ref['id'], actor_type='user',
+ actor_operation='removed')
+
class CADFNotificationsForEntities(NotificationsForEntities):
@@ -638,7 +674,7 @@ class CADFNotificationsForEntities(NotificationsForEntities):
self.config_fixture.config(notification_format='cadf')
def test_initiator_data_is_set(self):
- ref = self.new_domain_ref()
+ ref = unit.new_domain_ref()
resp = self.post('/domains', body={'domain': ref})
resource_id = resp.result.get('domain').get('id')
self._assert_last_audit(resource_id, CREATED_OPERATION, 'domain',
@@ -809,7 +845,7 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
def test_notification_received(self):
callback = register_callback(CREATED_OPERATION, 'project')
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
self.assertTrue(callback.called)
@@ -854,7 +890,7 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
callback_called.append(True)
Foo()
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
self.assertEqual([True], callback_called)
@@ -877,7 +913,7 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
callback_called.append('cb1')
Foo()
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project_ref['id'], project_ref)
self.assertItemsEqual(['cb1', 'cb0'], callback_called)
@@ -919,7 +955,7 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
# something like:
# self.assertRaises(TypeError, Foo)
Foo()
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
self.assertRaises(TypeError, self.resource_api.create_project,
project_ref['id'], project_ref)
@@ -963,13 +999,13 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
def _assert_last_note(self, action, user_id, event_type=None):
self.assertTrue(self._notifications)
note = self._notifications[-1]
- self.assertEqual(note['action'], action)
+ self.assertEqual(action, note['action'])
initiator = note['initiator']
- self.assertEqual(initiator.id, user_id)
- self.assertEqual(initiator.host.address, self.LOCAL_HOST)
+ self.assertEqual(user_id, initiator.id)
+ self.assertEqual(self.LOCAL_HOST, initiator.host.address)
self.assertTrue(note['send_notification_called'])
if event_type:
- self.assertEqual(note['event_type'], event_type)
+ self.assertEqual(event_type, note['event_type'])
def _assert_event(self, role_id, project=None, domain=None,
user=None, group=None, inherit=False):
@@ -1006,7 +1042,6 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
'id': 'openstack:782689dd-f428-4f13-99c7-5c70f94a5ac1'
}
"""
-
note = self._notifications[-1]
event = note['event']
if project:
@@ -1073,7 +1108,7 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
user=self.user_id)
def test_group_domain_grant(self):
- group_ref = self.new_group_ref(domain_id=self.domain_id)
+ group_ref = unit.new_group_ref(domain_id=self.domain_id)
group = self.identity_api.create_group(group_ref)
self.identity_api.add_user_to_group(self.user_id, group['id'])
url = ('/domains/%s/groups/%s/roles/%s' %
@@ -1087,7 +1122,7 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
# A notification is sent when add_role_to_user_and_project is called on
# the assignment manager.
- project_ref = self.new_project_ref(self.domain_id)
+ project_ref = unit.new_project_ref(self.domain_id)
project = self.resource_api.create_project(
project_ref['id'], project_ref)
tenant_id = project['id']
@@ -1097,7 +1132,7 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
self.assertTrue(self._notifications)
note = self._notifications[-1]
- self.assertEqual(note['action'], 'created.role_assignment')
+ self.assertEqual('created.role_assignment', note['action'])
self.assertTrue(note['send_notification_called'])
self._assert_event(self.role_id, project=tenant_id, user=self.user_id)
@@ -1111,7 +1146,7 @@ class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
self.assertTrue(self._notifications)
note = self._notifications[-1]
- self.assertEqual(note['action'], 'deleted.role_assignment')
+ self.assertEqual('deleted.role_assignment', note['action'])
self.assertTrue(note['send_notification_called'])
self._assert_event(self.role_id, project=self.project_id,
@@ -1126,7 +1161,9 @@ class TestCallbackRegistration(unit.BaseTestCase):
self.mock_log.logger.getEffectiveLevel.return_value = logging.DEBUG
def verify_log_message(self, data):
- """Tests that use this are a little brittle because adding more
+ """Verify log message.
+
+ Tests that use this are a little brittle because adding more
logging can break them.
TODO(dstanek): remove the need for this in a future refactoring
diff --git a/keystone-moon/keystone/tests/unit/common/test_sql_core.py b/keystone-moon/keystone/tests/unit/common/test_sql_core.py
index b110ed08..7d20eb03 100644
--- a/keystone-moon/keystone/tests/unit/common/test_sql_core.py
+++ b/keystone-moon/keystone/tests/unit/common/test_sql_core.py
@@ -32,14 +32,14 @@ class TestModelDictMixin(unit.BaseTestCase):
def test_creating_a_model_instance_from_a_dict(self):
d = {'id': utils.new_uuid(), 'text': utils.new_uuid()}
m = TestModel.from_dict(d)
- self.assertEqual(m.id, d['id'])
- self.assertEqual(m.text, d['text'])
+ self.assertEqual(d['id'], m.id)
+ self.assertEqual(d['text'], m.text)
def test_creating_a_dict_from_a_model_instance(self):
m = TestModel(id=utils.new_uuid(), text=utils.new_uuid())
d = m.to_dict()
- self.assertEqual(m.id, d['id'])
- self.assertEqual(m.text, d['text'])
+ self.assertEqual(d['id'], m.id)
+ self.assertEqual(d['text'], m.text)
def test_creating_a_model_instance_from_an_invalid_dict(self):
d = {'id': utils.new_uuid(), 'text': utils.new_uuid(), 'extra': None}
@@ -49,4 +49,4 @@ class TestModelDictMixin(unit.BaseTestCase):
expected = {'id': utils.new_uuid(), 'text': utils.new_uuid()}
m = TestModel(id=expected['id'], text=expected['text'])
m.extra = 'this should not be in the dictionary'
- self.assertEqual(m.to_dict(), expected)
+ self.assertEqual(expected, m.to_dict())
diff --git a/keystone-moon/keystone/tests/unit/common/test_utils.py b/keystone-moon/keystone/tests/unit/common/test_utils.py
index d52eb729..3641aacd 100644
--- a/keystone-moon/keystone/tests/unit/common/test_utils.py
+++ b/keystone-moon/keystone/tests/unit/common/test_utils.py
@@ -1,3 +1,4 @@
+# encoding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -16,12 +17,13 @@ import uuid
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
+import six
from keystone.common import utils as common_utils
from keystone import exception
-from keystone import service
from keystone.tests import unit
from keystone.tests.unit import utils
+from keystone.version import service
CONF = cfg.CONF
@@ -36,6 +38,38 @@ class UtilsTestCase(unit.BaseTestCase):
super(UtilsTestCase, self).setUp()
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
+ def test_resource_uuid(self):
+ uuid_str = '536e28c2017e405e89b25a1ed777b952'
+ self.assertEqual(uuid_str, common_utils.resource_uuid(uuid_str))
+
+ # Exact 64 length string.
+ uuid_str = ('536e28c2017e405e89b25a1ed777b952'
+ 'f13de678ac714bb1b7d1e9a007c10db5')
+ resource_id_namespace = common_utils.RESOURCE_ID_NAMESPACE
+ transformed_id = uuid.uuid5(resource_id_namespace, uuid_str).hex
+ self.assertEqual(transformed_id, common_utils.resource_uuid(uuid_str))
+
+ # Non-ASCII character test.
+ non_ascii_ = 'ß' * 32
+ transformed_id = uuid.uuid5(resource_id_namespace, non_ascii_).hex
+ self.assertEqual(transformed_id,
+ common_utils.resource_uuid(non_ascii_))
+
+ # This input is invalid because it's length is more than 64.
+ invalid_input = 'x' * 65
+ self.assertRaises(ValueError, common_utils.resource_uuid,
+ invalid_input)
+
+ # 64 length unicode string, to mimic what is returned from mapping_id
+ # backend.
+ uuid_str = six.text_type('536e28c2017e405e89b25a1ed777b952'
+ 'f13de678ac714bb1b7d1e9a007c10db5')
+ resource_id_namespace = common_utils.RESOURCE_ID_NAMESPACE
+ if six.PY2:
+ uuid_str = uuid_str.encode('utf-8')
+ transformed_id = uuid.uuid5(resource_id_namespace, uuid_str).hex
+ self.assertEqual(transformed_id, common_utils.resource_uuid(uuid_str))
+
def test_hash(self):
password = 'right'
wrong = 'wrongwrong' # Two wrongs don't make a right
@@ -153,6 +187,18 @@ class UtilsTestCase(unit.BaseTestCase):
expected_json = '{"field":"value"}'
self.assertEqual(expected_json, json)
+ def test_url_safe_check(self):
+ base_str = 'i am safe'
+ self.assertFalse(common_utils.is_not_url_safe(base_str))
+ for i in common_utils.URL_RESERVED_CHARS:
+ self.assertTrue(common_utils.is_not_url_safe(base_str + i))
+
+ def test_url_safe_with_unicode_check(self):
+ base_str = u'i am \xe7afe'
+ self.assertFalse(common_utils.is_not_url_safe(base_str))
+ for i in common_utils.URL_RESERVED_CHARS:
+ self.assertTrue(common_utils.is_not_url_safe(base_str + i))
+
class ServiceHelperTests(unit.BaseTestCase):
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf
index 2097b68b..96a0ffa9 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_ldap_sql.conf
@@ -1,5 +1,5 @@
[database]
-#For a specific location file based sqlite use:
+#For a specific location file based SQLite use:
#connection = sqlite:////tmp/keystone.db
#To Test MySQL:
#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf b/keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf
index 59cb8577..bb9ee08f 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_liveldap.conf
@@ -4,11 +4,7 @@ user = cn=Manager,dc=openstack,dc=org
password = test
suffix = dc=openstack,dc=org
group_tree_dn = ou=UserGroups,dc=openstack,dc=org
-role_tree_dn = ou=Roles,dc=openstack,dc=org
-project_tree_dn = ou=Projects,dc=openstack,dc=org
user_tree_dn = ou=Users,dc=openstack,dc=org
-project_enabled_emulation = True
user_enabled_emulation = True
user_mail_attribute = mail
use_dumb_member = True
-
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf
index 142ca203..2495f036 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_mysql.conf
@@ -1,4 +1,4 @@
-#Used for running the Migrate tests against a live Mysql Server
+#Used for running the Migrate tests against a live MySQL Server
#See _sql_livetest.py
[database]
connection = mysql+pymysql://keystone:keystone@localhost/keystone_test?charset=utf8
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf b/keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf
index a85f5226..c36e05f9 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_pool_liveldap.conf
@@ -4,10 +4,7 @@ user = cn=Manager,dc=openstack,dc=org
password = test
suffix = dc=openstack,dc=org
group_tree_dn = ou=UserGroups,dc=openstack,dc=org
-role_tree_dn = ou=Roles,dc=openstack,dc=org
-project_tree_dn = ou=Projects,dc=openstack,dc=org
user_tree_dn = ou=Users,dc=openstack,dc=org
-project_enabled_emulation = True
user_enabled_emulation = True
user_mail_attribute = mail
use_dumb_member = True
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf b/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf
index 063177bd..f2828e2e 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_sql.conf
@@ -1,5 +1,5 @@
[database]
-#For a specific location file based sqlite use:
+#For a specific location file based SQLite use:
#connection = sqlite:////tmp/keystone.db
#To Test MySQL:
#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
diff --git a/keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf b/keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf
index d35b9139..b66044b7 100644
--- a/keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/backend_tls_liveldap.conf
@@ -4,10 +4,7 @@ user = dc=Manager,dc=openstack,dc=org
password = test
suffix = dc=openstack,dc=org
group_tree_dn = ou=UserGroups,dc=openstack,dc=org
-role_tree_dn = ou=Roles,dc=openstack,dc=org
-project_tree_dn = ou=Projects,dc=openstack,dc=org
user_tree_dn = ou=Users,dc=openstack,dc=org
-project_enabled_emulation = True
user_enabled_emulation = True
user_mail_attribute = mail
use_dumb_member = True
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf
index 2dd86c25..64d01d48 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf
@@ -11,4 +11,4 @@ password = password
suffix = cn=example,cn=com
[identity]
-driver = ldap \ No newline at end of file
+driver = ldap
diff --git a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf
index ba22cdf9..af540537 100644
--- a/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf
+++ b/keystone-moon/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf
@@ -8,4 +8,5 @@ password = password
suffix = cn=example,cn=com
[identity]
-driver = ldap \ No newline at end of file
+driver = ldap
+list_limit = 101
diff --git a/keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py b/keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py
index 5804f1c0..52a6095b 100644
--- a/keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py
+++ b/keystone-moon/keystone/tests/unit/contrib/federation/test_utils.py
@@ -12,13 +12,20 @@
import uuid
+from oslo_config import cfg
+from oslo_config import fixture as config_fixture
+from oslo_serialization import jsonutils
+
from keystone.auth.plugins import mapped
-from keystone.contrib.federation import utils as mapping_utils
from keystone import exception
+from keystone.federation import utils as mapping_utils
from keystone.tests import unit
from keystone.tests.unit import mapping_fixtures
+FAKE_MAPPING_ID = uuid.uuid4().hex
+
+
class MappingRuleEngineTests(unit.BaseTestCase):
"""A class for testing the mapping rule engine."""
@@ -50,10 +57,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
a direct mapping for the users name.
"""
-
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.ADMIN_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('FirstName')
@@ -71,18 +77,15 @@ class MappingRuleEngineTests(unit.BaseTestCase):
This will not match since the email in the assertion will fail
the regex test. It is set to match any @example.com address.
But the incoming value is set to eviltester@example.org.
- RuleProcessor should return list of empty group_ids.
+ RuleProcessor should raise ValidationError.
"""
-
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.BAD_TESTER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
-
- self.assertValidMappedUserObject(mapped_properties)
- self.assertIsNone(mapped_properties['user'].get('name'))
- self.assertListEqual(list(), mapped_properties['group_ids'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
+ self.assertRaises(exception.ValidationError,
+ rp.process,
+ assertion)
def test_rule_engine_regex_many_groups(self):
"""Should return group CONTRACTOR_GROUP_ID.
@@ -93,10 +96,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
a match.
"""
-
mapping = mapping_fixtures.MAPPING_TESTER_REGEX
assertion = mapping_fixtures.TESTER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
@@ -116,10 +118,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
mapping.
"""
-
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
@@ -138,10 +139,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
has `not_any_of`, and direct mapping to a username, no group.
"""
-
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.CUSTOMER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
@@ -160,10 +160,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
rules must be matched, including a `not_any_of`.
"""
-
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
@@ -183,10 +182,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
regex set to True.
"""
-
mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
assertion = mapping_fixtures.DEVELOPER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
self.assertValidMappedUserObject(values)
@@ -203,18 +201,15 @@ class MappingRuleEngineTests(unit.BaseTestCase):
The email in the assertion will fail the regex test.
It is set to reject any @example.org address, but the
incoming value is set to evildeveloper@example.org.
- RuleProcessor should return list of empty group_ids.
+ RuleProcessor should yield ValidationError.
"""
-
mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX
assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- mapped_properties = rp.process(assertion)
-
- self.assertValidMappedUserObject(mapped_properties)
- self.assertIsNone(mapped_properties['user'].get('name'))
- self.assertListEqual(list(), mapped_properties['group_ids'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
+ self.assertRaises(exception.ValidationError,
+ rp.process,
+ assertion)
def _rule_engine_regex_match_and_many_groups(self, assertion):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
@@ -223,9 +218,8 @@ class MappingRuleEngineTests(unit.BaseTestCase):
Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
"""
-
mapping = mapping_fixtures.MAPPING_LARGE
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
@@ -265,16 +259,29 @@ class MappingRuleEngineTests(unit.BaseTestCase):
Expect RuleProcessor to discard non string object, which
is required for a correct rule match. RuleProcessor will result with
- empty list of groups.
+ ValidationError.
"""
mapping = mapping_fixtures.MAPPING_SMALL
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertValidMappedUserObject(mapped_properties)
- self.assertIsNone(mapped_properties['user'].get('name'))
- self.assertListEqual(list(), mapped_properties['group_ids'])
+ self.assertRaises(exception.ValidationError,
+ rp.process,
+ assertion)
+
+ def test_using_remote_direct_mapping_that_doesnt_exist_fails(self):
+ """Test for the correct error when referring to a bad remote match.
+
+ The remote match must exist in a rule when a local section refers to
+ a remote matching using the format (e.g. {0} in a local section).
+ """
+ mapping = mapping_fixtures.MAPPING_DIRECT_MAPPING_THROUGH_KEYWORD
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
+ assertion = mapping_fixtures.CUSTOMER_ASSERTION
+
+ self.assertRaises(exception.DirectMappingError,
+ rp.process,
+ assertion)
def test_rule_engine_returns_group_names(self):
"""Check whether RuleProcessor returns group names with their domains.
@@ -285,7 +292,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
"""
mapping = mapping_fixtures.MAPPING_GROUP_NAMES
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
@@ -317,10 +324,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
correctly filters out Manager and only allows Developer and Contractor.
"""
-
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
@@ -354,10 +360,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
correctly filters out Manager and Developer and only allows Contractor.
"""
-
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
@@ -383,10 +388,9 @@ class MappingRuleEngineTests(unit.BaseTestCase):
entry in the remote rules.
"""
-
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
@@ -412,7 +416,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
self.assertRaises(exception.ValidationError, rp.process, assertion)
def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self):
@@ -423,7 +427,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
"""
mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN
assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
self.assertRaises(exception.ValidationError, rp.process, assertion)
def test_rule_engine_no_groups_allowed(self):
@@ -436,7 +440,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
"""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertListEqual(mapped_properties['group_names'], [])
@@ -444,41 +448,19 @@ class MappingRuleEngineTests(unit.BaseTestCase):
self.assertEqual('tbo', mapped_properties['user']['name'])
def test_mapping_federated_domain_specified(self):
- """Test mapping engine when domain 'ephemeral' is explicitely set.
+ """Test mapping engine when domain 'ephemeral' is explicitly set.
For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion
EMPLOYEE_ASSERTION
"""
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
- def test_create_user_object_with_bad_mapping(self):
- """Test if user object is created even with bad mapping.
-
- User objects will be created by mapping engine always as long as there
- is corresponding local rule. This test shows, that even with assertion
- where no group names nor ids are matched, but there is 'blind' rule for
- mapping user, such object will be created.
-
- In this test MAPPING_EHPEMERAL_USER expects UserName set to jsmith
- whereas value from assertion is 'tbo'.
-
- """
- mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER
- rp = mapping_utils.RuleProcessor(mapping['rules'])
- assertion = mapping_fixtures.CONTRACTOR_ASSERTION
- mapped_properties = rp.process(assertion)
- self.assertIsNotNone(mapped_properties)
- self.assertValidMappedUserObject(mapped_properties)
-
- self.assertNotIn('id', mapped_properties['user'])
- self.assertNotIn('name', mapped_properties['user'])
-
def test_set_ephemeral_domain_to_ephemeral_users(self):
"""Test auto assigning service domain to ephemeral users.
@@ -488,7 +470,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
"""
mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
@@ -497,7 +479,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
def test_local_user_local_domain(self):
"""Test that local users can have non-service domains assigned."""
mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
@@ -514,19 +496,21 @@ class MappingRuleEngineTests(unit.BaseTestCase):
- Check if the user has proper domain ('federated') set
- Check if the user has property type set ('ephemeral')
- Check if user's name is properly mapped from the assertion
- - Check if user's id is properly set and equal to name, as it was not
- explicitely specified in the mapping.
+ - Check if unique_id is properly set and equal to display_name,
+ as it was not explicitly specified in the mapping.
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username({}, mapped_properties)
- self.assertEqual('jsmith', mapped_properties['user']['id'])
self.assertEqual('jsmith', mapped_properties['user']['name'])
+ unique_id, display_name = mapped.get_user_unique_id_and_display_name(
+ {}, mapped_properties)
+ self.assertEqual('jsmith', unique_id)
+ self.assertEqual('jsmith', display_name)
def test_user_identifications_name_and_federated_domain(self):
"""Test varius mapping options and how users are identified.
@@ -537,20 +521,19 @@ class MappingRuleEngineTests(unit.BaseTestCase):
- Check if the user has proper domain ('federated') set
- Check if the user has propert type set ('ephemeral')
- Check if user's name is properly mapped from the assertion
- - Check if user's id is properly set and equal to name, as it was not
- explicitely specified in the mapping.
+ - Check if the unique_id and display_name are properly set
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
mapped_properties = rp.process(assertion)
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username({}, mapped_properties)
- self.assertEqual('tbo', mapped_properties['user']['name'])
- self.assertEqual('abc123%40example.com',
- mapped_properties['user']['id'])
+ unique_id, display_name = mapped.get_user_unique_id_and_display_name(
+ {}, mapped_properties)
+ self.assertEqual('tbo', display_name)
+ self.assertEqual('abc123%40example.com', unique_id)
def test_user_identification_id(self):
"""Test varius mapping options and how users are identified.
@@ -560,21 +543,21 @@ class MappingRuleEngineTests(unit.BaseTestCase):
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has propert type set ('ephemeral')
- - Check if user's id is properly mapped from the assertion
- - Check if user's name is properly set and equal to id, as it was not
- explicitely specified in the mapping.
+ - Check if user's display_name is properly set and equal to unique_id,
+ as it was not explicitly specified in the mapping.
"""
mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.ADMIN_ASSERTION
mapped_properties = rp.process(assertion)
context = {'environment': {}}
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username(context, mapped_properties)
- self.assertEqual('bob', mapped_properties['user']['name'])
- self.assertEqual('bob', mapped_properties['user']['id'])
+ unique_id, display_name = mapped.get_user_unique_id_and_display_name(
+ context, mapped_properties)
+ self.assertEqual('bob', unique_id)
+ self.assertEqual('bob', display_name)
def test_user_identification_id_and_name(self):
"""Test varius mapping options and how users are identified.
@@ -584,8 +567,8 @@ class MappingRuleEngineTests(unit.BaseTestCase):
Test plan:
- Check if the user has proper domain ('federated') set
- Check if the user has proper type set ('ephemeral')
- - Check if user's name is properly mapped from the assertion
- - Check if user's id is properly set and and equal to value hardcoded
+ - Check if display_name is properly set from the assertion
+ - Check if unique_id is properly set and and equal to value hardcoded
in the mapping
This test does two iterations with different assertions used as input
@@ -601,19 +584,21 @@ class MappingRuleEngineTests(unit.BaseTestCase):
(mapping_fixtures.EMPLOYEE_ASSERTION, 'tbo')]
for assertion, exp_user_name in testcases:
mapping = mapping_fixtures.MAPPING_USER_IDS
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
context = {'environment': {}}
self.assertIsNotNone(mapped_properties)
self.assertValidMappedUserObject(mapped_properties)
- mapped.setup_username(context, mapped_properties)
- self.assertEqual(exp_user_name, mapped_properties['user']['name'])
- self.assertEqual('abc123%40example.com',
- mapped_properties['user']['id'])
+ unique_id, display_name = (
+ mapped.get_user_unique_id_and_display_name(context,
+ mapped_properties)
+ )
+ self.assertEqual(exp_user_name, display_name)
+ self.assertEqual('abc123%40example.com', unique_id)
def test_whitelist_pass_through(self):
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = mapping_fixtures.DEVELOPER_ASSERTION
mapped_properties = rp.process(assertion)
self.assertValidMappedUserObject(mapped_properties)
@@ -622,13 +607,119 @@ class MappingRuleEngineTests(unit.BaseTestCase):
self.assertEqual('Developer',
mapped_properties['group_names'][0]['name'])
+ def test_mapping_with_incorrect_local_keys(self):
+ mapping = mapping_fixtures.MAPPING_BAD_LOCAL_SETUP
+ self.assertRaises(exception.ValidationError,
+ mapping_utils.validate_mapping_structure,
+ mapping)
+
+ def test_mapping_with_group_name_and_domain(self):
+ mapping = mapping_fixtures.MAPPING_GROUP_NAMES
+ mapping_utils.validate_mapping_structure(mapping)
+
def test_type_not_in_assertion(self):
"""Test that if the remote "type" is not in the assertion it fails."""
mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH
- rp = mapping_utils.RuleProcessor(mapping['rules'])
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
assertion = {uuid.uuid4().hex: uuid.uuid4().hex}
+ self.assertRaises(exception.ValidationError,
+ rp.process,
+ assertion)
+
+ def test_rule_engine_group_ids_mapping_whitelist(self):
+ """Test mapping engine when group_ids is explicitly set
+
+ Also test whitelists on group ids
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST
+ assertion = mapping_fixtures.GROUP_IDS_ASSERTION
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
mapped_properties = rp.process(assertion)
- self.assertValidMappedUserObject(mapped_properties)
+ self.assertIsNotNone(mapped_properties)
+ self.assertEqual('opilotte', mapped_properties['user']['name'])
+ self.assertListEqual([], mapped_properties['group_names'])
+ self.assertItemsEqual(['abc123', 'ghi789', 'klm012'],
+ mapped_properties['group_ids'])
- self.assertNotIn('id', mapped_properties['user'])
- self.assertNotIn('name', mapped_properties['user'])
+ def test_rule_engine_group_ids_mapping_blacklist(self):
+ """Test mapping engine when group_ids is explicitly set.
+
+ Also test blacklists on group ids
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_BLACKLIST
+ assertion = mapping_fixtures.GROUP_IDS_ASSERTION
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertEqual('opilotte', mapped_properties['user']['name'])
+ self.assertListEqual([], mapped_properties['group_names'])
+ self.assertItemsEqual(['abc123', 'ghi789', 'klm012'],
+ mapped_properties['group_ids'])
+
+ def test_rule_engine_group_ids_mapping_only_one_group(self):
+ """Test mapping engine when group_ids is explicitly set.
+
+ If the group ids list has only one group,
+ test if the transformation is done correctly
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST
+ assertion = mapping_fixtures.GROUP_IDS_ASSERTION_ONLY_ONE_GROUP
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertEqual('opilotte', mapped_properties['user']['name'])
+ self.assertListEqual([], mapped_properties['group_names'])
+ self.assertItemsEqual(['210mlk', '321cba'],
+ mapped_properties['group_ids'])
+
+
+class TestUnicodeAssertionData(unit.BaseTestCase):
+ """Ensure that unicode data in the assertion headers works.
+
+ Bug #1525250 reported that something was not getting correctly encoded
+ and/or decoded when assertion data contained non-ASCII characters.
+
+ This test class mimics what happens in a real HTTP request.
+ """
+
+ def setUp(self):
+ super(TestUnicodeAssertionData, self).setUp()
+ self.config_fixture = self.useFixture(config_fixture.Config(cfg.CONF))
+ self.config_fixture.config(group='federation',
+ assertion_prefix='PFX')
+
+ def _pull_mapping_rules_from_the_database(self):
+ # NOTE(dstanek): In a live system. The rules are dumped into JSON bytes
+ # before being # stored in the database. Upon retrieval the bytes are
+ # loaded and the resulting dictionary is full of unicode text strings.
+ # Most of tests in this file incorrectly assume the mapping fixture
+ # dictionary is the same as what it would look like coming out of the
+ # database. The string, when coming out of the database, are all text.
+ return jsonutils.loads(jsonutils.dumps(
+ mapping_fixtures.MAPPING_UNICODE))
+
+ def _pull_assertion_from_the_request_headers(self):
+ # NOTE(dstanek): In a live system the bytes for the assertion are
+ # pulled from the HTTP headers. These bytes may be decodable as
+ # ISO-8859-1 according to Section 3.2.4 of RFC 7230. Let's assume
+ # that our web server plugins are correctly encoding the data.
+ context = dict(environment=mapping_fixtures.UNICODE_NAME_ASSERTION)
+ data = mapping_utils.get_assertion_params_from_env(context)
+ # NOTE(dstanek): keystone.auth.plugins.mapped
+ return dict(data)
+
+ def test_unicode(self):
+ mapping = self._pull_mapping_rules_from_the_database()
+ assertion = self._pull_assertion_from_the_request_headers()
+
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
+ values = rp.process(assertion)
+
+ fn = assertion.get('PFX_FirstName')
+ ln = assertion.get('PFX_LastName')
+ full_name = '%s %s' % (fn, ln)
+ user_name = values.get('user', {}).get('name')
+ self.assertEqual(full_name, user_name)
diff --git a/keystone-moon/keystone/tests/unit/core.py b/keystone-moon/keystone/tests/unit/core.py
index eb8b9f65..1054e131 100644
--- a/keystone-moon/keystone/tests/unit/core.py
+++ b/keystone-moon/keystone/tests/unit/core.py
@@ -14,8 +14,11 @@
from __future__ import absolute_import
import atexit
+import base64
import datetime
import functools
+import hashlib
+import json
import logging
import os
import re
@@ -28,14 +31,16 @@ import warnings
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
+from oslo_context import context as oslo_context
+from oslo_context import fixture as oslo_ctx_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
-import oslotest.base as oslotest
from oslotest import mockpatch
from paste.deploy import loadwsgi
import six
from sqlalchemy import exc
+import testtools
from testtools import testcase
# NOTE(ayoung)
@@ -45,24 +50,20 @@ from keystone.common import environment # noqa
environment.use_eventlet()
from keystone import auth
-from keystone.common import config as common_cfg
+from keystone.common import config
from keystone.common import dependency
-from keystone.common import kvs
from keystone.common.kvs import core as kvs_core
from keystone.common import sql
-from keystone import config
-from keystone import controllers
from keystone import exception
from keystone import notifications
-from keystone.policy.backends import rules
from keystone.server import common
-from keystone import service
from keystone.tests.unit import ksfixtures
+from keystone.version import controllers
+from keystone.version import service
config.configure()
-LOG = log.getLogger(__name__)
PID = six.text_type(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
@@ -82,7 +83,6 @@ TMPDIR = _calc_tmpdir()
CONF = cfg.CONF
log.register_options(CONF)
-rules.init()
IN_MEM_DB_CONN_STRING = 'sqlite://'
@@ -208,6 +208,22 @@ def skip_if_cache_disabled(*sections):
return wrapper
+def skip_if_cache_is_enabled(*sections):
+ def wrapper(f):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ if CONF.cache.enabled:
+ for s in sections:
+ conf_sec = getattr(CONF, s, None)
+ if conf_sec is not None:
+ if getattr(conf_sec, 'caching', True):
+ raise testcase.TestSkipped('%s caching enabled.' %
+ s)
+ return f(*args, **kwargs)
+ return inner
+ return wrapper
+
+
def skip_if_no_multiple_domains_support(f):
"""Decorator to skip tests for identity drivers limited to one domain."""
@functools.wraps(f)
@@ -223,113 +239,230 @@ class UnexpectedExit(Exception):
pass
-def new_ref():
- """Populates a ref with attributes common to some API entities."""
- return {
+def new_region_ref(parent_region_id=None, **kwargs):
+ ref = {
'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
- 'enabled': True}
-
+ 'parent_region_id': parent_region_id}
-def new_region_ref():
- ref = new_ref()
- # Region doesn't have name or enabled.
- del ref['name']
- del ref['enabled']
- ref['parent_region_id'] = None
+ ref.update(kwargs)
return ref
-def new_service_ref():
- ref = new_ref()
- ref['type'] = uuid.uuid4().hex
+def new_service_ref(**kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True,
+ 'type': uuid.uuid4().hex,
+ }
+ ref.update(kwargs)
return ref
-def new_endpoint_ref(service_id, interface='public', default_region_id=None,
- **kwargs):
- ref = new_ref()
- del ref['enabled'] # enabled is optional
- ref['interface'] = interface
- ref['service_id'] = service_id
- ref['url'] = 'https://' + uuid.uuid4().hex + '.com'
- ref['region_id'] = default_region_id
+NEEDS_REGION_ID = object()
+
+
+def new_endpoint_ref(service_id, interface='public',
+ region_id=NEEDS_REGION_ID, **kwargs):
+
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'interface': interface,
+ 'service_id': service_id,
+ 'url': 'https://' + uuid.uuid4().hex + '.com',
+ }
+
+ if region_id is NEEDS_REGION_ID:
+ ref['region_id'] = uuid.uuid4().hex
+ elif region_id is None and kwargs.get('region') is not None:
+ # pre-3.2 form endpoints are not supported by this function
+ raise NotImplementedError("use new_endpoint_ref_with_region")
+ else:
+ ref['region_id'] = region_id
ref.update(kwargs)
return ref
-def new_domain_ref():
- ref = new_ref()
+def new_endpoint_ref_with_region(service_id, region, interface='public',
+ **kwargs):
+ """Define an endpoint_ref having a pre-3.2 form.
+
+ Contains the deprecated 'region' instead of 'region_id'.
+ """
+ ref = new_endpoint_ref(service_id, interface, region=region,
+ region_id='invalid', **kwargs)
+ del ref['region_id']
return ref
-def new_project_ref(domain_id=None, parent_id=None, is_domain=False):
- ref = new_ref()
- ref['domain_id'] = domain_id
- ref['parent_id'] = parent_id
- ref['is_domain'] = is_domain
+def new_domain_ref(**kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True
+ }
+ ref.update(kwargs)
+ return ref
+
+
+def new_project_ref(domain_id=None, is_domain=False, **kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True,
+ 'domain_id': domain_id,
+ 'is_domain': is_domain,
+ }
+ # NOTE(henry-nash): We don't include parent_id in the initial list above
+ # since specifying it is optional depending on where the project sits in
+ # the hierarchy (and a parent_id of None has meaning - i.e. it's a top
+ # level project).
+ ref.update(kwargs)
return ref
-def new_user_ref(domain_id, project_id=None):
- ref = new_ref()
- ref['domain_id'] = domain_id
- ref['email'] = uuid.uuid4().hex
- ref['password'] = uuid.uuid4().hex
+def new_user_ref(domain_id, project_id=None, **kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'enabled': True,
+ 'domain_id': domain_id,
+ 'email': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex,
+ }
if project_id:
ref['default_project_id'] = project_id
+ ref.update(kwargs)
return ref
-def new_group_ref(domain_id):
- ref = new_ref()
- ref['domain_id'] = domain_id
+def new_federated_user_ref(idp_id=None, protocol_id=None, **kwargs):
+ ref = {
+ 'idp_id': idp_id or 'ORG_IDP',
+ 'protocol_id': protocol_id or 'saml2',
+ 'unique_id': uuid.uuid4().hex,
+ 'display_name': uuid.uuid4().hex,
+ }
+ ref.update(kwargs)
return ref
-def new_credential_ref(user_id, project_id=None, cred_type=None):
- ref = dict()
- ref['id'] = uuid.uuid4().hex
- ref['user_id'] = user_id
- if cred_type == 'ec2':
- ref['type'] = 'ec2'
- ref['blob'] = uuid.uuid4().hex
- else:
- ref['type'] = 'cert'
- ref['blob'] = uuid.uuid4().hex
+def new_group_ref(domain_id, **kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'domain_id': domain_id
+ }
+ ref.update(kwargs)
+ return ref
+
+
+def new_credential_ref(user_id, project_id=None, type='cert', **kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'user_id': user_id,
+ 'type': type,
+ }
+
if project_id:
ref['project_id'] = project_id
+ if 'blob' not in kwargs:
+ ref['blob'] = uuid.uuid4().hex
+
+ ref.update(kwargs)
return ref
-def new_role_ref():
- ref = new_ref()
- # Roles don't have a description or the enabled flag
- del ref['description']
- del ref['enabled']
+def new_cert_credential(user_id, project_id=None, blob=None, **kwargs):
+ if blob is None:
+ blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex}
+
+ credential = new_credential_ref(user_id=user_id,
+ project_id=project_id,
+ blob=json.dumps(blob),
+ type='cert',
+ **kwargs)
+ return blob, credential
+
+
+def new_ec2_credential(user_id, project_id=None, blob=None, **kwargs):
+ if blob is None:
+ blob = {
+ 'access': uuid.uuid4().hex,
+ 'secret': uuid.uuid4().hex,
+ 'trust_id': None
+ }
+
+ if 'id' not in kwargs:
+ access = blob['access'].encode('utf-8')
+ kwargs['id'] = hashlib.sha256(access).hexdigest()
+
+ credential = new_credential_ref(user_id=user_id,
+ project_id=project_id,
+ blob=json.dumps(blob),
+ type='ec2',
+ **kwargs)
+ return blob, credential
+
+
+def new_totp_credential(user_id, project_id=None, blob=None):
+ if not blob:
+ blob = base64.b32encode(uuid.uuid4().hex).rstrip('=')
+ credential = new_credential_ref(user_id=user_id,
+ project_id=project_id,
+ blob=blob,
+ type='totp')
+ return credential
+
+
+def new_role_ref(**kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'domain_id': None
+ }
+ ref.update(kwargs)
return ref
-def new_policy_ref():
- ref = new_ref()
- ref['blob'] = uuid.uuid4().hex
- ref['type'] = uuid.uuid4().hex
+def new_policy_ref(**kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True,
+ # Store serialized JSON data as the blob to mimic real world usage.
+ 'blob': json.dumps({'data': uuid.uuid4().hex, }),
+ 'type': uuid.uuid4().hex,
+ }
+
+ ref.update(kwargs)
return ref
def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
- allow_redelegation=False):
- ref = dict()
- ref['id'] = uuid.uuid4().hex
- ref['trustor_user_id'] = trustor_user_id
- ref['trustee_user_id'] = trustee_user_id
- ref['impersonation'] = impersonation or False
- ref['project_id'] = project_id
- ref['remaining_uses'] = remaining_uses
- ref['allow_redelegation'] = allow_redelegation
+ allow_redelegation=False, redelegation_count=None, **kwargs):
+ ref = {
+ 'id': uuid.uuid4().hex,
+ 'trustor_user_id': trustor_user_id,
+ 'trustee_user_id': trustee_user_id,
+ 'impersonation': impersonation or False,
+ 'project_id': project_id,
+ 'remaining_uses': remaining_uses,
+ 'allow_redelegation': allow_redelegation,
+ }
+
+ if isinstance(redelegation_count, int):
+ ref.update(redelegation_count=redelegation_count)
if isinstance(expires, six.string_types):
ref['expires_at'] = expires
@@ -351,10 +484,25 @@ def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None,
for role_name in role_names:
ref['roles'].append({'name': role_name})
+ ref.update(kwargs)
return ref
-class BaseTestCase(oslotest.BaseTestCase):
+def create_user(api, domain_id, **kwargs):
+ """Create a user via the API. Keep the created password.
+
+ The password is saved and restored when api.create_user() is called.
+ Only use this routine if there is a requirement for the user object to
+ have a valid password after api.create_user() is called.
+ """
+ user = new_user_ref(domain_id=domain_id, **kwargs)
+ password = user['password']
+ user = api.create_user(user)
+ user['password'] = password
+ return user
+
+
+class BaseTestCase(testtools.TestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once the
@@ -365,6 +513,10 @@ class BaseTestCase(oslotest.BaseTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
+
+ self.useFixture(fixtures.NestedTempfile())
+ self.useFixture(fixtures.TempHomeDir())
+
self.useFixture(mockpatch.PatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
@@ -373,6 +525,10 @@ class BaseTestCase(oslotest.BaseTestCase):
module='^keystone\\.')
warnings.simplefilter('error', exc.SAWarning)
self.addCleanup(warnings.resetwarnings)
+ # Ensure we have an empty threadlocal context at the start of each
+ # test.
+ self.assertIsNone(oslo_context.get_current())
+ self.useFixture(oslo_ctx_fixture.ClearRequestContext())
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
@@ -395,6 +551,9 @@ class TestCase(BaseTestCase):
def config_files(self):
return []
+ def _policy_fixture(self):
+ return ksfixtures.Policy(dirs.etc('policy.json'), self.config_fixture)
+
def config_overrides(self):
# NOTE(morganfainberg): enforce config_overrides can only ever be
# called a single time.
@@ -403,18 +562,19 @@ class TestCase(BaseTestCase):
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
- self.config_fixture.config(group='oslo_policy',
- policy_file=dirs.etc('policy.json'))
+
+ self.useFixture(self._policy_fixture())
+
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
- proxies=['keystone.tests.unit.test_cache.CacheIsolatingProxy'])
+ proxies=['oslo_cache.testing.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
- driver='templated',
+ driver='sql',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='kvs',
@@ -422,7 +582,6 @@ class TestCase(BaseTestCase):
('keystone.tests.unit.test_kvs.'
'KVSBackendForcedKeyMangleFixture'),
'keystone.tests.unit.test_kvs.KVSBackendFixture'])
- self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='signing', certfile=signing_certfile,
keyfile=signing_keyfile,
@@ -444,17 +603,15 @@ class TestCase(BaseTestCase):
'routes.middleware=INFO',
'stevedore.extension=INFO',
'keystone.notifications=INFO',
- 'keystone.common._memcache_pool=INFO',
'keystone.common.ldap=INFO',
])
self.auth_plugin_config_override()
def auth_plugin_config_override(self, methods=None, **method_classes):
- if methods is not None:
- self.config_fixture.config(group='auth', methods=methods)
- common_cfg.setup_authentication()
- if method_classes:
- self.config_fixture.config(group='auth', **method_classes)
+ self.useFixture(
+ ksfixtures.ConfigAuthPlugins(self.config_fixture,
+ methods,
+ **method_classes))
def _assert_config_overrides_called(self):
assert self.__config_overrides_called is True
@@ -462,6 +619,7 @@ class TestCase(BaseTestCase):
def setUp(self):
super(TestCase, self).setUp()
self.__config_overrides_called = False
+ self.__load_backends_called = False
self.addCleanup(CONF.reset)
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.addCleanup(delattr, self, 'config_fixture')
@@ -473,9 +631,10 @@ class TestCase(BaseTestCase):
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
self.useFixture(mockpatch.PatchObject(
- common_cfg, '_register_auth_plugin_opt',
+ config, '_register_auth_plugin_opt',
new=mocked_register_auth_plugin_opt))
+ self.sql_driver_version_overrides = {}
self.config_overrides()
# NOTE(morganfainberg): ensure config_overrides has been called.
self.addCleanup(self._assert_config_overrides_called)
@@ -498,8 +657,6 @@ class TestCase(BaseTestCase):
# tests aren't used.
self.addCleanup(dependency.reset)
- self.addCleanup(kvs.INMEMDB.clear)
-
# Ensure Notification subscriptions and resource types are empty
self.addCleanup(notifications.clear_subscribers)
self.addCleanup(notifications.reset_notifier)
@@ -515,7 +672,6 @@ class TestCase(BaseTestCase):
def load_backends(self):
"""Initializes each manager and assigns them to an attribute."""
-
# TODO(blk-u): Shouldn't need to clear the registry here, but some
# tests call load_backends multiple times. These should be fixed to
# only call load_backends once.
@@ -541,7 +697,7 @@ class TestCase(BaseTestCase):
This is useful to load managers initialized by extensions. No extra
backends are loaded by default.
- :return: dict of name -> manager
+ :returns: dict of name -> manager
"""
return {}
@@ -573,7 +729,8 @@ class TestCase(BaseTestCase):
fixtures_to_cleanup.append(attrname)
for tenant in fixtures.TENANTS:
- if hasattr(self, 'tenant_%s' % tenant['id']):
+ tenant_attr_name = 'tenant_%s' % tenant['name'].lower()
+ if hasattr(self, tenant_attr_name):
try:
# This will clear out any roles on the project as well
self.resource_api.delete_project(tenant['id'])
@@ -582,9 +739,8 @@ class TestCase(BaseTestCase):
rv = self.resource_api.create_project(
tenant['id'], tenant)
- attrname = 'tenant_%s' % tenant['id']
- setattr(self, attrname, rv)
- fixtures_to_cleanup.append(attrname)
+ setattr(self, tenant_attr_name, rv)
+ fixtures_to_cleanup.append(tenant_attr_name)
for role in fixtures.ROLES:
try:
@@ -625,6 +781,17 @@ class TestCase(BaseTestCase):
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
+ for role_assignment in fixtures.ROLE_ASSIGNMENTS:
+ role_id = role_assignment['role_id']
+ user = role_assignment['user']
+ tenant_id = role_assignment['tenant_id']
+ user_id = getattr(self, 'user_%s' % user)['id']
+ try:
+ self.assignment_api.add_role_to_user_and_project(
+ user_id, tenant_id, role_id)
+ except exception.Conflict:
+ pass
+
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def _paste_config(self, config):
@@ -648,6 +815,10 @@ class TestCase(BaseTestCase):
:param delta: Maximum allowable time delta, defined in seconds.
"""
+ if a == b:
+ # Short-circuit if the values are the same.
+ return
+
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertTrue(abs(a - b).seconds <= delta, msg)
@@ -664,11 +835,11 @@ class TestCase(BaseTestCase):
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
- if isinstance(exc_value.args[0], unicode):
- if not expected_regexp.search(unicode(exc_value)):
+ if isinstance(exc_value.args[0], six.text_type):
+ if not expected_regexp.search(six.text_type(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
- (expected_regexp.pattern, unicode(exc_value)))
+ (expected_regexp.pattern, six.text_type(exc_value)))
else:
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
@@ -708,12 +879,29 @@ class TestCase(BaseTestCase):
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
+
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(group='catalog', driver='sql')
self.config_fixture.config(group='identity', driver='sql')
self.config_fixture.config(group='policy', driver='sql')
- self.config_fixture.config(group='revoke', driver='sql')
self.config_fixture.config(group='token', driver='sql')
self.config_fixture.config(group='trust', driver='sql')
+
+ def use_specific_sql_driver_version(self, driver_path,
+ versionless_backend, version_suffix):
+ """Add this versioned driver to the list that will be loaded.
+
+ :param driver_path: The path to the drivers, e.g. 'keystone.assignment'
+ :param versionless_backend: The name of the versionless drivers, e.g.
+ 'backends'
+ :param version_suffix: The suffix for the version , e.g. ``V8_``
+
+ This method assumes that versioned drivers are named:
+ <version_suffix><name of versionless driver>, e.g. 'V8_backends'.
+
+ """
+ self.sql_driver_version_overrides[driver_path] = {
+ 'versionless_backend': versionless_backend,
+ 'versioned_backend': version_suffix + versionless_backend}
diff --git a/keystone-moon/keystone/tests/unit/default_fixtures.py b/keystone-moon/keystone/tests/unit/default_fixtures.py
index 80b0665f..7f661986 100644
--- a/keystone-moon/keystone/tests/unit/default_fixtures.py
+++ b/keystone-moon/keystone/tests/unit/default_fixtures.py
@@ -14,53 +14,67 @@
# NOTE(dolph): please try to avoid additional fixtures if possible; test suite
# performance may be negatively affected.
+import uuid
+BAR_TENANT_ID = uuid.uuid4().hex
+BAZ_TENANT_ID = uuid.uuid4().hex
+MTU_TENANT_ID = uuid.uuid4().hex
+SERVICE_TENANT_ID = uuid.uuid4().hex
DEFAULT_DOMAIN_ID = 'default'
TENANTS = [
{
- 'id': 'bar',
+ 'id': BAR_TENANT_ID,
'name': 'BAR',
'domain_id': DEFAULT_DOMAIN_ID,
'description': 'description',
'enabled': True,
- 'parent_id': None,
+ 'parent_id': DEFAULT_DOMAIN_ID,
'is_domain': False,
}, {
- 'id': 'baz',
+ 'id': BAZ_TENANT_ID,
'name': 'BAZ',
'domain_id': DEFAULT_DOMAIN_ID,
'description': 'description',
'enabled': True,
- 'parent_id': None,
+ 'parent_id': DEFAULT_DOMAIN_ID,
'is_domain': False,
}, {
- 'id': 'mtu',
+ 'id': MTU_TENANT_ID,
'name': 'MTU',
'description': 'description',
'enabled': True,
'domain_id': DEFAULT_DOMAIN_ID,
- 'parent_id': None,
+ 'parent_id': DEFAULT_DOMAIN_ID,
'is_domain': False,
}, {
- 'id': 'service',
+ 'id': SERVICE_TENANT_ID,
'name': 'service',
'description': 'description',
'enabled': True,
'domain_id': DEFAULT_DOMAIN_ID,
- 'parent_id': None,
+ 'parent_id': DEFAULT_DOMAIN_ID,
'is_domain': False,
}
]
# NOTE(ja): a role of keystone_admin is done in setUp
USERS = [
+ # NOTE(morganfainberg): Admin user for replacing admin_token_auth
+ {
+ 'id': 'reqadmin',
+ 'name': 'REQ_ADMIN',
+ 'domain_id': DEFAULT_DOMAIN_ID,
+ 'password': 'password',
+ 'tenants': [],
+ 'enabled': True
+ },
{
'id': 'foo',
'name': 'FOO',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'foo2',
- 'tenants': ['bar'],
+ 'tenants': [BAR_TENANT_ID],
'enabled': True,
'email': 'foo@bar.com',
}, {
@@ -69,8 +83,8 @@ USERS = [
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'two2',
'enabled': True,
- 'default_project_id': 'baz',
- 'tenants': ['baz'],
+ 'default_project_id': BAZ_TENANT_ID,
+ 'tenants': [BAZ_TENANT_ID],
'email': 'two@three.com',
}, {
'id': 'badguy',
@@ -78,8 +92,8 @@ USERS = [
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'bad',
'enabled': False,
- 'default_project_id': 'baz',
- 'tenants': ['baz'],
+ 'default_project_id': BAZ_TENANT_ID,
+ 'tenants': [BAZ_TENANT_ID],
'email': 'bad@guy.com',
}, {
'id': 'sna',
@@ -87,7 +101,7 @@ USERS = [
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'snafu',
'enabled': True,
- 'tenants': ['bar'],
+ 'tenants': [BAR_TENANT_ID],
'email': 'sna@snl.coom',
}
]
@@ -96,30 +110,45 @@ ROLES = [
{
'id': 'admin',
'name': 'admin',
+ 'domain_id': None,
}, {
'id': 'member',
'name': 'Member',
+ 'domain_id': None,
}, {
'id': '9fe2ff9ee4384b1894a90878d3e92bab',
'name': '_member_',
+ 'domain_id': None,
}, {
'id': 'other',
'name': 'Other',
+ 'domain_id': None,
}, {
'id': 'browser',
'name': 'Browser',
+ 'domain_id': None,
}, {
'id': 'writer',
'name': 'Writer',
+ 'domain_id': None,
}, {
'id': 'service',
'name': 'Service',
+ 'domain_id': None,
}
]
+# NOTE(morganfainberg): Admin assignment for replacing admin_token_auth
+ROLE_ASSIGNMENTS = [
+ {
+ 'user': 'reqadmin',
+ 'tenant_id': SERVICE_TENANT_ID,
+ 'role_id': 'admin'
+ },
+]
+
DOMAINS = [{'description':
- (u'Owns users and tenants (i.e. projects)'
- ' available on Identity API v2.'),
+ (u'The default domain'),
'enabled': True,
'id': DEFAULT_DOMAIN_ID,
'name': u'Default'}]
diff --git a/keystone-moon/keystone/tests/unit/external/README.rst b/keystone-moon/keystone/tests/unit/external/README.rst
new file mode 100644
index 00000000..e8f9fa65
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/external/README.rst
@@ -0,0 +1,9 @@
+This directory contains interface tests for external libraries. The goal
+is not to test every possible path through a library's code and get 100%
+coverage. It's to give us a level of confidence that their general interface
+remains the same through version upgrades.
+
+This gives us a place to put these tests without having to litter our
+own tests with assertions that are not directly related to the code
+under test. The expectations for the external library are all in one
+place so it makes it easier for us to find out what they are.
diff --git a/keystone-moon/keystone/tests/unit/external/__init__.py b/keystone-moon/keystone/tests/unit/external/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/external/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/external/test_timeutils.py b/keystone-moon/keystone/tests/unit/external/test_timeutils.py
new file mode 100644
index 00000000..7fc72d58
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/external/test_timeutils.py
@@ -0,0 +1,33 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo_utils import timeutils
+
+import keystone.tests.unit as tests
+
+
+class TestTimeUtils(tests.BaseTestCase):
+
+ def test_parsing_date_strings_returns_a_datetime(self):
+ example_date_str = '2015-09-23T04:45:37.196621Z'
+ dt = timeutils.parse_strtime(example_date_str, fmt=tests.TIME_FORMAT)
+ self.assertIsInstance(dt, datetime.datetime)
+
+ def test_parsing_invalid_date_strings_raises_a_ValueError(self):
+ example_date_str = ''
+ simple_format = '%Y'
+ self.assertRaises(ValueError,
+ timeutils.parse_strtime,
+ example_date_str,
+ fmt=simple_format)
diff --git a/keystone-moon/keystone/tests/unit/fakeldap.py b/keystone-moon/keystone/tests/unit/fakeldap.py
index 2f1ebe57..9ad1f218 100644
--- a/keystone-moon/keystone/tests/unit/fakeldap.py
+++ b/keystone-moon/keystone/tests/unit/fakeldap.py
@@ -18,10 +18,11 @@
This class does very little error checking, and knows nothing about ldap
class definitions. It implements the minimum emulation of the python ldap
-library to work with nova.
+library to work with keystone.
"""
+import random
import re
import shelve
@@ -67,7 +68,13 @@ def _internal_attr(attr_name, value_or_values):
if dn == 'cn=Doe\\, John,ou=Users,cn=example,cn=com':
return 'CN=Doe\\2C John,OU=Users,CN=example,CN=com'
- dn = ldap.dn.str2dn(core.utf8_encode(dn))
+ try:
+ dn = ldap.dn.str2dn(core.utf8_encode(dn))
+ except ldap.DECODING_ERROR:
+ # NOTE(amakarov): In case of IDs instead of DNs in group members
+ # they must be handled as regular values.
+ return normalize_value(dn)
+
norm = []
for part in dn:
name, val, i = part[0]
@@ -132,7 +139,6 @@ def _paren_groups(source):
def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
-
def match_with_wildcards(norm_val, val_list):
# Case insensitive checking with wildcards
if norm_val.startswith('*'):
@@ -209,6 +215,7 @@ class FakeShelve(dict):
FakeShelves = {}
+PendingRequests = {}
class FakeLdap(core.LDAPHandler):
@@ -534,18 +541,60 @@ class FakeLdap(core.LDAPHandler):
self._ldap_options[option] = invalue
def get_option(self, option):
- value = self._ldap_options.get(option, None)
+ value = self._ldap_options.get(option)
return value
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
- raise exception.NotImplemented()
+ if clientctrls is not None or timeout != -1 or sizelimit != 0:
+ raise exception.NotImplemented()
+
+ # only passing a single server control is supported by this fake ldap
+ if len(serverctrls) > 1:
+ raise exception.NotImplemented()
+
+ # search_ext is async and returns an identifier used for
+ # retrieving the results via result3(). This will be emulated by
+ # storing the request in a variable with random integer key and
+ # performing the real lookup in result3()
+ msgid = random.randint(0, 1000)
+ PendingRequests[msgid] = (base, scope, filterstr, attrlist, attrsonly,
+ serverctrls)
+ return msgid
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
- raise exception.NotImplemented()
+ """Execute async request
+
+ Only msgid param is supported. Request info is fetched from global
+ variable `PendingRequests` by msgid, executed using search_s and
+ limited if requested.
+ """
+ if all != 1 or timeout is not None or resp_ctrl_classes is not None:
+ raise exception.NotImplemented()
+
+ params = PendingRequests[msgid]
+ # search_s accepts a subset of parameters of search_ext,
+ # that's why we use only the first 5.
+ results = self.search_s(*params[:5])
+
+ # extract limit from serverctrl
+ serverctrls = params[5]
+ ctrl = serverctrls[0]
+
+ if ctrl.size:
+ rdata = results[:ctrl.size]
+ else:
+ rdata = results
+
+ # real result3 returns various service info -- rtype, rmsgid,
+ # serverctrls. Now this info is not used, so all this info is None
+ rtype = None
+ rmsgid = None
+ serverctrls = None
+ return (rtype, rdata, rmsgid, serverctrls)
class FakeLdapPool(FakeLdap):
diff --git a/keystone-moon/keystone/tests/unit/filtering.py b/keystone-moon/keystone/tests/unit/filtering.py
index 93e0bc28..59301299 100644
--- a/keystone-moon/keystone/tests/unit/filtering.py
+++ b/keystone-moon/keystone/tests/unit/filtering.py
@@ -49,7 +49,6 @@ class FilterTests(object):
one.
"""
-
f = getattr(self.identity_api, 'create_%s' % entity_type, None)
if f is None:
f = getattr(self.resource_api, 'create_%s' % entity_type, None)
@@ -65,7 +64,6 @@ class FilterTests(object):
one.
"""
-
f = getattr(self.identity_api, 'delete_%s' % entity_type, None)
if f is None:
f = getattr(self.resource_api, 'delete_%s' % entity_type, None)
@@ -81,7 +79,6 @@ class FilterTests(object):
one.
"""
-
f = getattr(self.identity_api, 'list_%ss' % entity_type, None)
if f is None:
f = getattr(self.resource_api, 'list_%ss' % entity_type, None)
diff --git a/keystone-moon/keystone/tests/unit/identity/test_backends.py b/keystone-moon/keystone/tests/unit/identity/test_backends.py
new file mode 100644
index 00000000..8b5c0def
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/identity/test_backends.py
@@ -0,0 +1,1297 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo_config import cfg
+from six.moves import range
+from testtools import matchers
+
+from keystone.common import driver_hints
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit import filtering
+
+
+CONF = cfg.CONF
+
+
+class IdentityTests(object):
+
+ def _get_domain_fixture(self):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ return domain
+
+ def _set_domain_scope(self, domain_id):
+ # We only provide a domain scope if we have multiple drivers
+ if CONF.identity.domain_specific_drivers_enabled:
+ return domain_id
+
+ def test_authenticate_bad_user(self):
+ self.assertRaises(AssertionError,
+ self.identity_api.authenticate,
+ context={},
+ user_id=uuid.uuid4().hex,
+ password=self.user_foo['password'])
+
+ def test_authenticate_bad_password(self):
+ self.assertRaises(AssertionError,
+ self.identity_api.authenticate,
+ context={},
+ user_id=self.user_foo['id'],
+ password=uuid.uuid4().hex)
+
+ def test_authenticate(self):
+ user_ref = self.identity_api.authenticate(
+ context={},
+ user_id=self.user_sna['id'],
+ password=self.user_sna['password'])
+ # NOTE(termie): the password field is left in user_sna to make
+ # it easier to authenticate in tests, but should
+ # not be returned by the api
+ self.user_sna.pop('password')
+ self.user_sna['enabled'] = True
+ self.assertDictEqual(self.user_sna, user_ref)
+
+ def test_authenticate_and_get_roles_no_metadata(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+
+ # Remove user id. It is ignored by create_user() and will break the
+ # subset test below.
+ del user['id']
+
+ new_user = self.identity_api.create_user(user)
+ self.assignment_api.add_user_to_project(self.tenant_baz['id'],
+ new_user['id'])
+ user_ref = self.identity_api.authenticate(
+ context={},
+ user_id=new_user['id'],
+ password=user['password'])
+ self.assertNotIn('password', user_ref)
+ # NOTE(termie): the password field is left in user_sna to make
+ # it easier to authenticate in tests, but should
+ # not be returned by the api
+ user.pop('password')
+ self.assertDictContainsSubset(user, user_ref)
+ role_list = self.assignment_api.get_roles_for_user_and_project(
+ new_user['id'], self.tenant_baz['id'])
+ self.assertEqual(1, len(role_list))
+ self.assertIn(CONF.member_role_id, role_list)
+
+ def test_authenticate_if_no_password_set(self):
+ id_ = uuid.uuid4().hex
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ self.identity_api.create_user(user)
+
+ self.assertRaises(AssertionError,
+ self.identity_api.authenticate,
+ context={},
+ user_id=id_,
+ password='password')
+
+ def test_create_unicode_user_name(self):
+ unicode_name = u'name \u540d\u5b57'
+ user = unit.new_user_ref(name=unicode_name,
+ domain_id=CONF.identity.default_domain_id)
+ ref = self.identity_api.create_user(user)
+ self.assertEqual(unicode_name, ref['name'])
+
+ def test_get_user(self):
+ user_ref = self.identity_api.get_user(self.user_foo['id'])
+ # NOTE(termie): the password field is left in user_foo to make
+ # it easier to authenticate in tests, but should
+ # not be returned by the api
+ self.user_foo.pop('password')
+ self.assertDictEqual(self.user_foo, user_ref)
+
+ @unit.skip_if_cache_disabled('identity')
+ def test_cache_layer_get_user(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ self.identity_api.create_user(user)
+ ref = self.identity_api.get_user_by_name(user['name'],
+ user['domain_id'])
+ # cache the result.
+ self.identity_api.get_user(ref['id'])
+ # delete bypassing identity api
+ domain_id, driver, entity_id = (
+ self.identity_api._get_domain_driver_and_entity_id(ref['id']))
+ driver.delete_user(entity_id)
+
+ self.assertDictEqual(ref, self.identity_api.get_user(ref['id']))
+ self.identity_api.get_user.invalidate(self.identity_api, ref['id'])
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.get_user, ref['id'])
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ ref = self.identity_api.get_user_by_name(user['name'],
+ user['domain_id'])
+ user['description'] = uuid.uuid4().hex
+ # cache the result.
+ self.identity_api.get_user(ref['id'])
+ # update using identity api and get back updated user.
+ user_updated = self.identity_api.update_user(ref['id'], user)
+ self.assertDictContainsSubset(self.identity_api.get_user(ref['id']),
+ user_updated)
+ self.assertDictContainsSubset(
+ self.identity_api.get_user_by_name(ref['name'], ref['domain_id']),
+ user_updated)
+
+ def test_get_user_returns_not_found(self):
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.get_user,
+ uuid.uuid4().hex)
+
+ def test_get_user_by_name(self):
+ user_ref = self.identity_api.get_user_by_name(
+ self.user_foo['name'], CONF.identity.default_domain_id)
+ # NOTE(termie): the password field is left in user_foo to make
+ # it easier to authenticate in tests, but should
+ # not be returned by the api
+ self.user_foo.pop('password')
+ self.assertDictEqual(self.user_foo, user_ref)
+
+ @unit.skip_if_cache_disabled('identity')
+ def test_cache_layer_get_user_by_name(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ self.identity_api.create_user(user)
+ ref = self.identity_api.get_user_by_name(user['name'],
+ user['domain_id'])
+ # delete bypassing the identity api.
+ domain_id, driver, entity_id = (
+ self.identity_api._get_domain_driver_and_entity_id(ref['id']))
+ driver.delete_user(entity_id)
+
+ self.assertDictEqual(ref, self.identity_api.get_user_by_name(
+ user['name'], CONF.identity.default_domain_id))
+ self.identity_api.get_user_by_name.invalidate(
+ self.identity_api, user['name'], CONF.identity.default_domain_id)
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.get_user_by_name,
+ user['name'], CONF.identity.default_domain_id)
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ ref = self.identity_api.get_user_by_name(user['name'],
+ user['domain_id'])
+ user['description'] = uuid.uuid4().hex
+ user_updated = self.identity_api.update_user(ref['id'], user)
+ self.assertDictContainsSubset(self.identity_api.get_user(ref['id']),
+ user_updated)
+ self.assertDictContainsSubset(
+ self.identity_api.get_user_by_name(ref['name'], ref['domain_id']),
+ user_updated)
+
+ def test_get_user_by_name_returns_not_found(self):
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.get_user_by_name,
+ uuid.uuid4().hex,
+ CONF.identity.default_domain_id)
+
+ def test_create_duplicate_user_name_fails(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ self.assertRaises(exception.Conflict,
+ self.identity_api.create_user,
+ user)
+
+ def test_create_duplicate_user_name_in_different_domains(self):
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+
+ user2 = unit.new_user_ref(name=user1['name'],
+ domain_id=new_domain['id'])
+
+ self.identity_api.create_user(user1)
+ self.identity_api.create_user(user2)
+
+ def test_move_user_between_domains(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ user = unit.new_user_ref(domain_id=domain1['id'])
+ user = self.identity_api.create_user(user)
+ user['domain_id'] = domain2['id']
+ # Update the user asserting that a deprecation warning is emitted
+ with mock.patch(
+ 'oslo_log.versionutils.report_deprecated_feature') as mock_dep:
+ self.identity_api.update_user(user['id'], user)
+ self.assertTrue(mock_dep.called)
+
+ updated_user_ref = self.identity_api.get_user(user['id'])
+ self.assertEqual(domain2['id'], updated_user_ref['domain_id'])
+
+ def test_move_user_between_domains_with_clashing_names_fails(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ # First, create a user in domain1
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ # Now create a user in domain2 with a potentially clashing
+ # name - which should work since we have domain separation
+ user2 = unit.new_user_ref(name=user1['name'],
+ domain_id=domain2['id'])
+ user2 = self.identity_api.create_user(user2)
+ # Now try and move user1 into the 2nd domain - which should
+ # fail since the names clash
+ user1['domain_id'] = domain2['id']
+ self.assertRaises(exception.Conflict,
+ self.identity_api.update_user,
+ user1['id'],
+ user1)
+
+ def test_rename_duplicate_user_name_fails(self):
+ user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user2 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ self.identity_api.create_user(user1)
+ user2 = self.identity_api.create_user(user2)
+ user2['name'] = user1['name']
+ self.assertRaises(exception.Conflict,
+ self.identity_api.update_user,
+ user2['id'],
+ user2)
+
+ def test_update_user_id_fails(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ original_id = user['id']
+ user['id'] = 'fake2'
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.update_user,
+ original_id,
+ user)
+ user_ref = self.identity_api.get_user(original_id)
+ self.assertEqual(original_id, user_ref['id'])
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.get_user,
+ 'fake2')
+
+ def test_delete_user_with_group_project_domain_links(self):
+ role1 = unit.new_role_ref()
+ self.role_api.create_role(role1['id'], role1)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ self.assignment_api.create_grant(user_id=user1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(user_id=user1['id'],
+ domain_id=domain1['id'],
+ role_id=role1['id'])
+ self.identity_api.add_user_to_group(user_id=user1['id'],
+ group_id=group1['id'])
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ project_id=project1['id'])
+ self.assertEqual(1, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ user_id=user1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(1, len(roles_ref))
+ self.identity_api.check_user_in_group(
+ user_id=user1['id'],
+ group_id=group1['id'])
+ self.identity_api.delete_user(user1['id'])
+ self.assertRaises(exception.NotFound,
+ self.identity_api.check_user_in_group,
+ user1['id'],
+ group1['id'])
+
+ def test_delete_group_with_user_project_domain_links(self):
+ role1 = unit.new_role_ref()
+ self.role_api.create_role(role1['id'], role1)
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ user1 = unit.new_user_ref(domain_id=domain1['id'])
+ user1 = self.identity_api.create_user(user1)
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+
+ self.assignment_api.create_grant(group_id=group1['id'],
+ project_id=project1['id'],
+ role_id=role1['id'])
+ self.assignment_api.create_grant(group_id=group1['id'],
+ domain_id=domain1['id'],
+ role_id=role1['id'])
+ self.identity_api.add_user_to_group(user_id=user1['id'],
+ group_id=group1['id'])
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ project_id=project1['id'])
+ self.assertEqual(1, len(roles_ref))
+ roles_ref = self.assignment_api.list_grants(
+ group_id=group1['id'],
+ domain_id=domain1['id'])
+ self.assertEqual(1, len(roles_ref))
+ self.identity_api.check_user_in_group(
+ user_id=user1['id'],
+ group_id=group1['id'])
+ self.identity_api.delete_group(group1['id'])
+ self.identity_api.get_user(user1['id'])
+
+ def test_update_user_returns_not_found(self):
+ user_id = uuid.uuid4().hex
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.update_user,
+ user_id,
+ {'id': user_id,
+ 'domain_id': CONF.identity.default_domain_id})
+
+ def test_delete_user_returns_not_found(self):
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.delete_user,
+ uuid.uuid4().hex)
+
+ def test_create_user_long_name_fails(self):
+ user = unit.new_user_ref(name='a' * 256,
+ domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.create_user,
+ user)
+
+ def test_create_user_blank_name_fails(self):
+ user = unit.new_user_ref(name='',
+ domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.create_user,
+ user)
+
+ def test_create_user_missed_password(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ self.identity_api.get_user(user['id'])
+ # Make sure the user is not allowed to login
+ # with a password that is empty string or None
+ self.assertRaises(AssertionError,
+ self.identity_api.authenticate,
+ context={},
+ user_id=user['id'],
+ password='')
+ self.assertRaises(AssertionError,
+ self.identity_api.authenticate,
+ context={},
+ user_id=user['id'],
+ password=None)
+
+ def test_create_user_none_password(self):
+ user = unit.new_user_ref(password=None,
+ domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ self.identity_api.get_user(user['id'])
+ # Make sure the user is not allowed to login
+ # with a password that is empty string or None
+ self.assertRaises(AssertionError,
+ self.identity_api.authenticate,
+ context={},
+ user_id=user['id'],
+ password='')
+ self.assertRaises(AssertionError,
+ self.identity_api.authenticate,
+ context={},
+ user_id=user['id'],
+ password=None)
+
+ def test_create_user_invalid_name_fails(self):
+ user = unit.new_user_ref(name=None,
+ domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.create_user,
+ user)
+
+ user = unit.new_user_ref(name=123,
+ domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.create_user,
+ user)
+
+ def test_create_user_invalid_enabled_type_string(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id,
+ # invalid string value
+ enabled='true')
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.create_user,
+ user)
+
+ def test_update_user_long_name_fails(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ user['name'] = 'a' * 256
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.update_user,
+ user['id'],
+ user)
+
+ def test_update_user_blank_name_fails(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ user['name'] = ''
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.update_user,
+ user['id'],
+ user)
+
+ def test_update_user_invalid_name_fails(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+
+ user['name'] = None
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.update_user,
+ user['id'],
+ user)
+
+ user['name'] = 123
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.update_user,
+ user['id'],
+ user)
+
+ def test_list_users(self):
+ users = self.identity_api.list_users(
+ domain_scope=self._set_domain_scope(
+ CONF.identity.default_domain_id))
+ self.assertEqual(len(default_fixtures.USERS), len(users))
+ user_ids = set(user['id'] for user in users)
+ expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
+ for user in default_fixtures.USERS)
+ for user_ref in users:
+ self.assertNotIn('password', user_ref)
+ self.assertEqual(expected_user_ids, user_ids)
+
+ def test_list_groups(self):
+ group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group2 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group1 = self.identity_api.create_group(group1)
+ group2 = self.identity_api.create_group(group2)
+ groups = self.identity_api.list_groups(
+ domain_scope=self._set_domain_scope(
+ CONF.identity.default_domain_id))
+ self.assertEqual(2, len(groups))
+ group_ids = []
+ for group in groups:
+ group_ids.append(group.get('id'))
+ self.assertIn(group1['id'], group_ids)
+ self.assertIn(group2['id'], group_ids)
+
+ def test_create_user_doesnt_modify_passed_in_dict(self):
+ new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ original_user = new_user.copy()
+ self.identity_api.create_user(new_user)
+ self.assertDictEqual(original_user, new_user)
+
+ def test_update_user_enable(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertTrue(user_ref['enabled'])
+
+ user['enabled'] = False
+ self.identity_api.update_user(user['id'], user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertEqual(user['enabled'], user_ref['enabled'])
+
+ # If not present, enabled field should not be updated
+ del user['enabled']
+ self.identity_api.update_user(user['id'], user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertFalse(user_ref['enabled'])
+
+ user['enabled'] = True
+ self.identity_api.update_user(user['id'], user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertEqual(user['enabled'], user_ref['enabled'])
+
+ del user['enabled']
+ self.identity_api.update_user(user['id'], user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertTrue(user_ref['enabled'])
+
+ # Integers are valid Python's booleans. Explicitly test it.
+ user['enabled'] = 0
+ self.identity_api.update_user(user['id'], user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertFalse(user_ref['enabled'])
+
+ # Any integers other than 0 are interpreted as True
+ user['enabled'] = -42
+ self.identity_api.update_user(user['id'], user)
+ user_ref = self.identity_api.get_user(user['id'])
+ # NOTE(breton): below, attribute `enabled` is explicitly tested to be
+ # equal True. assertTrue should not be used, because it converts
+ # the passed value to bool().
+ self.assertIs(user_ref['enabled'], True)
+
+ def test_update_user_name(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertEqual(user['name'], user_ref['name'])
+
+ changed_name = user_ref['name'] + '_changed'
+ user_ref['name'] = changed_name
+ updated_user = self.identity_api.update_user(user_ref['id'], user_ref)
+
+ # NOTE(dstanek): the SQL backend adds an 'extra' field containing a
+ # dictionary of the extra fields in addition to the
+ # fields in the object. For the details see:
+ # SqlIdentity.test_update_project_returns_extra
+ updated_user.pop('extra', None)
+
+ self.assertDictEqual(user_ref, updated_user)
+
+ user_ref = self.identity_api.get_user(user_ref['id'])
+ self.assertEqual(changed_name, user_ref['name'])
+
+ def test_update_user_enable_fails(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertTrue(user_ref['enabled'])
+
+ # Strings are not valid boolean values
+ user['enabled'] = 'false'
+ self.assertRaises(exception.ValidationError,
+ self.identity_api.update_user,
+ user['id'],
+ user)
+
+ def test_add_user_to_group(self):
+ domain = self._get_domain_fixture()
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+ groups = self.identity_api.list_groups_for_user(new_user['id'])
+
+ found = False
+ for x in groups:
+ if (x['id'] == new_group['id']):
+ found = True
+ self.assertTrue(found)
+
+ def test_add_user_to_group_returns_not_found(self):
+ domain = self._get_domain_fixture()
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.add_user_to_group,
+ new_user['id'],
+ uuid.uuid4().hex)
+
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.add_user_to_group,
+ uuid.uuid4().hex,
+ new_group['id'])
+
+ self.assertRaises(exception.NotFound,
+ self.identity_api.add_user_to_group,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex)
+
+ def test_check_user_in_group(self):
+ domain = self._get_domain_fixture()
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+ self.identity_api.check_user_in_group(new_user['id'], new_group['id'])
+
+ def test_check_user_not_in_group(self):
+ new_group = unit.new_group_ref(
+ domain_id=CONF.identity.default_domain_id)
+ new_group = self.identity_api.create_group(new_group)
+
+ new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ new_user = self.identity_api.create_user(new_user)
+
+ self.assertRaises(exception.NotFound,
+ self.identity_api.check_user_in_group,
+ new_user['id'],
+ new_group['id'])
+
+ def test_check_user_in_group_returns_not_found(self):
+ new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ new_user = self.identity_api.create_user(new_user)
+
+ new_group = unit.new_group_ref(
+ domain_id=CONF.identity.default_domain_id)
+ new_group = self.identity_api.create_group(new_group)
+
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.check_user_in_group,
+ uuid.uuid4().hex,
+ new_group['id'])
+
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.check_user_in_group,
+ new_user['id'],
+ uuid.uuid4().hex)
+
+ self.assertRaises(exception.NotFound,
+ self.identity_api.check_user_in_group,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex)
+
+ def test_list_users_in_group(self):
+ domain = self._get_domain_fixture()
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ # Make sure we get an empty list back on a new group, not an error.
+ user_refs = self.identity_api.list_users_in_group(new_group['id'])
+ self.assertEqual([], user_refs)
+ # Make sure we get the correct users back once they have been added
+ # to the group.
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+ user_refs = self.identity_api.list_users_in_group(new_group['id'])
+ found = False
+ for x in user_refs:
+ if (x['id'] == new_user['id']):
+ found = True
+ self.assertNotIn('password', x)
+ self.assertTrue(found)
+
+ def test_list_users_in_group_returns_not_found(self):
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.list_users_in_group,
+ uuid.uuid4().hex)
+
+ def test_list_groups_for_user(self):
+ domain = self._get_domain_fixture()
+ test_groups = []
+ test_users = []
+ GROUP_COUNT = 3
+ USER_COUNT = 2
+
+ for x in range(0, USER_COUNT):
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ test_users.append(new_user)
+ positive_user = test_users[0]
+ negative_user = test_users[1]
+
+ for x in range(0, USER_COUNT):
+ group_refs = self.identity_api.list_groups_for_user(
+ test_users[x]['id'])
+ self.assertEqual(0, len(group_refs))
+
+ for x in range(0, GROUP_COUNT):
+ before_count = x
+ after_count = x + 1
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ test_groups.append(new_group)
+
+ # add the user to the group and ensure that the
+ # group count increases by one for each
+ group_refs = self.identity_api.list_groups_for_user(
+ positive_user['id'])
+ self.assertEqual(before_count, len(group_refs))
+ self.identity_api.add_user_to_group(
+ positive_user['id'],
+ new_group['id'])
+ group_refs = self.identity_api.list_groups_for_user(
+ positive_user['id'])
+ self.assertEqual(after_count, len(group_refs))
+
+ # Make sure the group count for the unrelated user did not change
+ group_refs = self.identity_api.list_groups_for_user(
+ negative_user['id'])
+ self.assertEqual(0, len(group_refs))
+
+ # remove the user from each group and ensure that
+ # the group count reduces by one for each
+ for x in range(0, 3):
+ before_count = GROUP_COUNT - x
+ after_count = GROUP_COUNT - x - 1
+ group_refs = self.identity_api.list_groups_for_user(
+ positive_user['id'])
+ self.assertEqual(before_count, len(group_refs))
+ self.identity_api.remove_user_from_group(
+ positive_user['id'],
+ test_groups[x]['id'])
+ group_refs = self.identity_api.list_groups_for_user(
+ positive_user['id'])
+ self.assertEqual(after_count, len(group_refs))
+ # Make sure the group count for the unrelated user
+ # did not change
+ group_refs = self.identity_api.list_groups_for_user(
+ negative_user['id'])
+ self.assertEqual(0, len(group_refs))
+
+ def test_remove_user_from_group(self):
+ domain = self._get_domain_fixture()
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+ groups = self.identity_api.list_groups_for_user(new_user['id'])
+ self.assertIn(new_group['id'], [x['id'] for x in groups])
+ self.identity_api.remove_user_from_group(new_user['id'],
+ new_group['id'])
+ groups = self.identity_api.list_groups_for_user(new_user['id'])
+ self.assertNotIn(new_group['id'], [x['id'] for x in groups])
+
+ def test_remove_user_from_group_returns_not_found(self):
+ domain = self._get_domain_fixture()
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.remove_user_from_group,
+ new_user['id'],
+ uuid.uuid4().hex)
+
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.remove_user_from_group,
+ uuid.uuid4().hex,
+ new_group['id'])
+
+ self.assertRaises(exception.NotFound,
+ self.identity_api.remove_user_from_group,
+ uuid.uuid4().hex,
+ uuid.uuid4().hex)
+
+ def test_group_crud(self):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ group = unit.new_group_ref(domain_id=domain['id'])
+ group = self.identity_api.create_group(group)
+ group_ref = self.identity_api.get_group(group['id'])
+ self.assertDictContainsSubset(group, group_ref)
+
+ group['name'] = uuid.uuid4().hex
+ self.identity_api.update_group(group['id'], group)
+ group_ref = self.identity_api.get_group(group['id'])
+ self.assertDictContainsSubset(group, group_ref)
+
+ self.identity_api.delete_group(group['id'])
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.get_group,
+ group['id'])
+
+ def test_get_group_by_name(self):
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group_name = group['name']
+ group = self.identity_api.create_group(group)
+ spoiler = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ self.identity_api.create_group(spoiler)
+
+ group_ref = self.identity_api.get_group_by_name(
+ group_name, CONF.identity.default_domain_id)
+ self.assertDictEqual(group, group_ref)
+
+ def test_get_group_by_name_returns_not_found(self):
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.get_group_by_name,
+ uuid.uuid4().hex,
+ CONF.identity.default_domain_id)
+
+ @unit.skip_if_cache_disabled('identity')
+ def test_cache_layer_group_crud(self):
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+ # cache the result
+ group_ref = self.identity_api.get_group(group['id'])
+ # delete the group bypassing identity api.
+ domain_id, driver, entity_id = (
+ self.identity_api._get_domain_driver_and_entity_id(group['id']))
+ driver.delete_group(entity_id)
+
+ self.assertEqual(group_ref, self.identity_api.get_group(group['id']))
+ self.identity_api.get_group.invalidate(self.identity_api, group['id'])
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.get_group, group['id'])
+
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+ # cache the result
+ self.identity_api.get_group(group['id'])
+ group['name'] = uuid.uuid4().hex
+ group_ref = self.identity_api.update_group(group['id'], group)
+ # after updating through identity api, get updated group
+ self.assertDictContainsSubset(self.identity_api.get_group(group['id']),
+ group_ref)
+
+ def test_create_duplicate_group_name_fails(self):
+ group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group2 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id,
+ name=group1['name'])
+ group1 = self.identity_api.create_group(group1)
+ self.assertRaises(exception.Conflict,
+ self.identity_api.create_group,
+ group2)
+
+ def test_create_duplicate_group_name_in_different_domains(self):
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group2 = unit.new_group_ref(domain_id=new_domain['id'],
+ name=group1['name'])
+ group1 = self.identity_api.create_group(group1)
+ group2 = self.identity_api.create_group(group2)
+
+ def test_move_group_between_domains(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ group = unit.new_group_ref(domain_id=domain1['id'])
+ group = self.identity_api.create_group(group)
+ group['domain_id'] = domain2['id']
+ # Update the group asserting that a deprecation warning is emitted
+ with mock.patch(
+ 'oslo_log.versionutils.report_deprecated_feature') as mock_dep:
+ self.identity_api.update_group(group['id'], group)
+ self.assertTrue(mock_dep.called)
+
+ updated_group_ref = self.identity_api.get_group(group['id'])
+ self.assertEqual(domain2['id'], updated_group_ref['domain_id'])
+
+ def test_move_group_between_domains_with_clashing_names_fails(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ # First, create a group in domain1
+ group1 = unit.new_group_ref(domain_id=domain1['id'])
+ group1 = self.identity_api.create_group(group1)
+ # Now create a group in domain2 with a potentially clashing
+ # name - which should work since we have domain separation
+ group2 = unit.new_group_ref(name=group1['name'],
+ domain_id=domain2['id'])
+ group2 = self.identity_api.create_group(group2)
+ # Now try and move group1 into the 2nd domain - which should
+ # fail since the names clash
+ group1['domain_id'] = domain2['id']
+ self.assertRaises(exception.Conflict,
+ self.identity_api.update_group,
+ group1['id'],
+ group1)
+
+ def test_user_crud(self):
+ user_dict = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id)
+ del user_dict['id']
+ user = self.identity_api.create_user(user_dict)
+ user_ref = self.identity_api.get_user(user['id'])
+ del user_dict['password']
+ user_ref_dict = {x: user_ref[x] for x in user_ref}
+ self.assertDictContainsSubset(user_dict, user_ref_dict)
+
+ user_dict['password'] = uuid.uuid4().hex
+ self.identity_api.update_user(user['id'], user_dict)
+ user_ref = self.identity_api.get_user(user['id'])
+ del user_dict['password']
+ user_ref_dict = {x: user_ref[x] for x in user_ref}
+ self.assertDictContainsSubset(user_dict, user_ref_dict)
+
+ self.identity_api.delete_user(user['id'])
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.get_user,
+ user['id'])
+
+ def test_arbitrary_attributes_are_returned_from_create_user(self):
+ attr_value = uuid.uuid4().hex
+ user_data = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id,
+ arbitrary_attr=attr_value)
+
+ user = self.identity_api.create_user(user_data)
+
+ self.assertEqual(attr_value, user['arbitrary_attr'])
+
+ def test_arbitrary_attributes_are_returned_from_get_user(self):
+ attr_value = uuid.uuid4().hex
+ user_data = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id,
+ arbitrary_attr=attr_value)
+
+ user_data = self.identity_api.create_user(user_data)
+
+ user = self.identity_api.get_user(user_data['id'])
+ self.assertEqual(attr_value, user['arbitrary_attr'])
+
+ def test_new_arbitrary_attributes_are_returned_from_update_user(self):
+ user_data = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id)
+
+ user = self.identity_api.create_user(user_data)
+ attr_value = uuid.uuid4().hex
+ user['arbitrary_attr'] = attr_value
+ updated_user = self.identity_api.update_user(user['id'], user)
+
+ self.assertEqual(attr_value, updated_user['arbitrary_attr'])
+
+ def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
+ attr_value = uuid.uuid4().hex
+ user_data = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id,
+ arbitrary_attr=attr_value)
+
+ new_attr_value = uuid.uuid4().hex
+ user = self.identity_api.create_user(user_data)
+ user['arbitrary_attr'] = new_attr_value
+ updated_user = self.identity_api.update_user(user['id'], user)
+
+ self.assertEqual(new_attr_value, updated_user['arbitrary_attr'])
+
+ def test_user_update_and_user_get_return_same_response(self):
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+
+ user = self.identity_api.create_user(user)
+
+ updated_user = {'enabled': False}
+ updated_user_ref = self.identity_api.update_user(
+ user['id'], updated_user)
+
+ # SQL backend adds 'extra' field
+ updated_user_ref.pop('extra', None)
+
+ self.assertIs(False, updated_user_ref['enabled'])
+
+ user_ref = self.identity_api.get_user(user['id'])
+ self.assertDictEqual(updated_user_ref, user_ref)
+
+
+class FilterTests(filtering.FilterTests):
+ def test_list_entities_filtered(self):
+ for entity in ['user', 'group', 'project']:
+ # Create 20 entities
+ entity_list = self._create_test_data(entity, 20)
+
+ # Try filtering to get one an exact item out of the list
+ hints = driver_hints.Hints()
+ hints.add_filter('name', entity_list[10]['name'])
+ entities = self._list_entities(entity)(hints=hints)
+ self.assertEqual(1, len(entities))
+ self.assertEqual(entity_list[10]['id'], entities[0]['id'])
+ # Check the driver has removed the filter from the list hints
+ self.assertFalse(hints.get_exact_filter_by_name('name'))
+ self._delete_test_data(entity, entity_list)
+
+ def test_list_users_inexact_filtered(self):
+ # Create 20 users, some with specific names. We set the names at create
+ # time (rather than updating them), since the LDAP driver does not
+ # support name updates.
+ user_name_data = {
+ # user index: name for user
+ 5: 'The',
+ 6: 'The Ministry',
+ 7: 'The Ministry of',
+ 8: 'The Ministry of Silly',
+ 9: 'The Ministry of Silly Walks',
+ # ...and one for useful case insensitivity testing
+ 10: 'The ministry of silly walks OF'
+ }
+ user_list = self._create_test_data(
+ 'user', 20, domain_id=CONF.identity.default_domain_id,
+ name_dict=user_name_data)
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'ministry', comparator='contains')
+ users = self.identity_api.list_users(hints=hints)
+ self.assertEqual(5, len(users))
+ self._match_with_list(users, user_list,
+ list_start=6, list_end=11)
+ # TODO(henry-nash) Check inexact filter has been removed.
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'The', comparator='startswith')
+ users = self.identity_api.list_users(hints=hints)
+ self.assertEqual(6, len(users))
+ self._match_with_list(users, user_list,
+ list_start=5, list_end=11)
+ # TODO(henry-nash) Check inexact filter has been removed.
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'of', comparator='endswith')
+ users = self.identity_api.list_users(hints=hints)
+ self.assertEqual(2, len(users))
+ # We can't assume we will get back the users in any particular order
+ self.assertIn(user_list[7]['id'], [users[0]['id'], users[1]['id']])
+ self.assertIn(user_list[10]['id'], [users[0]['id'], users[1]['id']])
+ # TODO(henry-nash) Check inexact filter has been removed.
+
+ # TODO(henry-nash): Add some case sensitive tests. However,
+ # these would be hard to validate currently, since:
+ #
+ # For SQL, the issue is that MySQL 0.7, by default, is installed in
+ # case insensitive mode (which is what is run by default for our
+ # SQL backend tests). For production deployments. OpenStack
+ # assumes a case sensitive database. For these tests, therefore, we
+ # need to be able to check the sensitivity of the database so as to
+ # know whether to run case sensitive tests here.
+ #
+ # For LDAP/AD, although dependent on the schema being used, attributes
+ # are typically configured to be case aware, but not case sensitive.
+
+ self._delete_test_data('user', user_list)
+
+ def _groups_for_user_data(self):
+ number_of_groups = 10
+ group_name_data = {
+ # entity index: name for entity
+ 5: 'The',
+ 6: 'The Ministry',
+ 9: 'The Ministry of Silly Walks',
+ }
+ group_list = self._create_test_data(
+ 'group', number_of_groups,
+ domain_id=CONF.identity.default_domain_id,
+ name_dict=group_name_data)
+ user_list = self._create_test_data('user', 2)
+
+ for group in range(7):
+ # Create membership, including with two out of the three groups
+ # with well know names
+ self.identity_api.add_user_to_group(user_list[0]['id'],
+ group_list[group]['id'])
+ # ...and some spoiler memberships
+ for group in range(7, number_of_groups):
+ self.identity_api.add_user_to_group(user_list[1]['id'],
+ group_list[group]['id'])
+
+ return group_list, user_list
+
+ def test_groups_for_user_inexact_filtered(self):
+ """Test use of filtering doesn't break groups_for_user listing.
+
+ Some backends may use filtering to achieve the list of groups for a
+ user, so test that it can combine a second filter.
+
+ Test Plan:
+
+ - Create 10 groups, some with names we can filter on
+ - Create 2 users
+ - Assign 1 of those users to most of the groups, including some of the
+ well known named ones
+ - Assign the other user to other groups as spoilers
+ - Ensure that when we list groups for users with a filter on the group
+ name, both restrictions have been enforced on what is returned.
+
+ """
+ group_list, user_list = self._groups_for_user_data()
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'Ministry', comparator='contains')
+ groups = self.identity_api.list_groups_for_user(
+ user_list[0]['id'], hints=hints)
+ # We should only get back one group, since of the two that contain
+ # 'Ministry' the user only belongs to one.
+ self.assertThat(len(groups), matchers.Equals(1))
+ self.assertEqual(group_list[6]['id'], groups[0]['id'])
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'The', comparator='startswith')
+ groups = self.identity_api.list_groups_for_user(
+ user_list[0]['id'], hints=hints)
+ # We should only get back 2 out of the 3 groups that start with 'The'
+ # hence showing that both "filters" have been applied
+ self.assertThat(len(groups), matchers.Equals(2))
+ self.assertIn(group_list[5]['id'], [groups[0]['id'], groups[1]['id']])
+ self.assertIn(group_list[6]['id'], [groups[0]['id'], groups[1]['id']])
+
+ hints.add_filter('name', 'The', comparator='endswith')
+ groups = self.identity_api.list_groups_for_user(
+ user_list[0]['id'], hints=hints)
+ # We should only get back one group since it is the only one that
+ # ends with 'The'
+ self.assertThat(len(groups), matchers.Equals(1))
+ self.assertEqual(group_list[5]['id'], groups[0]['id'])
+
+ self._delete_test_data('user', user_list)
+ self._delete_test_data('group', group_list)
+
+ def test_groups_for_user_exact_filtered(self):
+ """Test exact filters doesn't break groups_for_user listing."""
+ group_list, user_list = self._groups_for_user_data()
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'The Ministry', comparator='equals')
+ groups = self.identity_api.list_groups_for_user(
+ user_list[0]['id'], hints=hints)
+ # We should only get back 1 out of the 3 groups with name 'The
+ # Ministry' hence showing that both "filters" have been applied.
+ self.assertEqual(1, len(groups))
+ self.assertEqual(group_list[6]['id'], groups[0]['id'])
+ self._delete_test_data('user', user_list)
+ self._delete_test_data('group', group_list)
+
+ def _get_user_name_field_size(self):
+ """Return the size of the user name field for the backend.
+
+ Subclasses can override this method to indicate that the user name
+ field is limited in length. The user name is the field used in the test
+ that validates that a filter value works even if it's longer than a
+ field.
+
+ If the backend doesn't limit the value length then return None.
+
+ """
+ return None
+
+ def test_filter_value_wider_than_field(self):
+ # If a filter value is given that's larger than the field in the
+ # backend then no values are returned.
+
+ user_name_field_size = self._get_user_name_field_size()
+
+ if user_name_field_size is None:
+ # The backend doesn't limit the size of the user name, so pass this
+ # test.
+ return
+
+ # Create some users just to make sure would return something if the
+ # filter was ignored.
+ self._create_test_data('user', 2)
+
+ hints = driver_hints.Hints()
+ value = 'A' * (user_name_field_size + 1)
+ hints.add_filter('name', value)
+ users = self.identity_api.list_users(hints=hints)
+ self.assertEqual([], users)
+
+ def _list_users_in_group_data(self):
+ number_of_users = 10
+ user_name_data = {
+ 1: 'Arthur Conan Doyle',
+ 3: 'Arthur Rimbaud',
+ 9: 'Arthur Schopenhauer',
+ }
+ user_list = self._create_test_data(
+ 'user', number_of_users,
+ domain_id=CONF.identity.default_domain_id,
+ name_dict=user_name_data)
+ group = self._create_one_entity(
+ 'group', CONF.identity.default_domain_id, 'Great Writers')
+ for i in range(7):
+ self.identity_api.add_user_to_group(user_list[i]['id'],
+ group['id'])
+
+ return user_list, group
+
+ def test_list_users_in_group_inexact_filtered(self):
+ user_list, group = self._list_users_in_group_data()
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'Arthur', comparator='contains')
+ users = self.identity_api.list_users_in_group(group['id'], hints=hints)
+ self.assertThat(len(users), matchers.Equals(2))
+ self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']])
+ self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']])
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'Arthur', comparator='startswith')
+ users = self.identity_api.list_users_in_group(group['id'], hints=hints)
+ self.assertThat(len(users), matchers.Equals(2))
+ self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']])
+ self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']])
+
+ hints = driver_hints.Hints()
+ hints.add_filter('name', 'Doyle', comparator='endswith')
+ users = self.identity_api.list_users_in_group(group['id'], hints=hints)
+ self.assertThat(len(users), matchers.Equals(1))
+ self.assertEqual(user_list[1]['id'], users[0]['id'])
+
+ self._delete_test_data('user', user_list)
+ self._delete_entity('group')(group['id'])
+
+ def test_list_users_in_group_exact_filtered(self):
+ hints = driver_hints.Hints()
+ user_list, group = self._list_users_in_group_data()
+ hints.add_filter('name', 'Arthur Rimbaud', comparator='equals')
+ users = self.identity_api.list_users_in_group(group['id'], hints=hints)
+ self.assertEqual(1, len(users))
+ self.assertEqual(user_list[3]['id'], users[0]['id'])
+ self._delete_test_data('user', user_list)
+ self._delete_entity('group')(group['id'])
+
+
+class LimitTests(filtering.FilterTests):
+ ENTITIES = ['user', 'group', 'project']
+
+ def setUp(self):
+ """Setup for Limit Test Cases."""
+ self.entity_lists = {}
+
+ for entity in self.ENTITIES:
+ # Create 20 entities
+ self.entity_lists[entity] = self._create_test_data(entity, 20)
+ self.addCleanup(self.clean_up_entities)
+
+ def clean_up_entities(self):
+ """Clean up entity test data from Limit Test Cases."""
+ for entity in self.ENTITIES:
+ self._delete_test_data(entity, self.entity_lists[entity])
+ del self.entity_lists
+
+ def _test_list_entity_filtered_and_limited(self, entity):
+ self.config_fixture.config(list_limit=10)
+ # Should get back just 10 entities
+ hints = driver_hints.Hints()
+ entities = self._list_entities(entity)(hints=hints)
+ self.assertEqual(hints.limit['limit'], len(entities))
+ self.assertTrue(hints.limit['truncated'])
+
+ # Override with driver specific limit
+ if entity == 'project':
+ self.config_fixture.config(group='resource', list_limit=5)
+ else:
+ self.config_fixture.config(group='identity', list_limit=5)
+
+ # Should get back just 5 users
+ hints = driver_hints.Hints()
+ entities = self._list_entities(entity)(hints=hints)
+ self.assertEqual(hints.limit['limit'], len(entities))
+
+ # Finally, let's pretend we want to get the full list of entities,
+ # even with the limits set, as part of some internal calculation.
+ # Calling the API without a hints list should achieve this, and
+ # return at least the 20 entries we created (there may be other
+ # entities lying around created by other tests/setup).
+ entities = self._list_entities(entity)()
+ self.assertTrue(len(entities) >= 20)
+ self._match_with_list(self.entity_lists[entity], entities)
+
+ def test_list_users_filtered_and_limited(self):
+ self._test_list_entity_filtered_and_limited('user')
+
+ def test_list_groups_filtered_and_limited(self):
+ self._test_list_entity_filtered_and_limited('group')
+
+ def test_list_projects_filtered_and_limited(self):
+ self._test_list_entity_filtered_and_limited('project')
diff --git a/keystone-moon/keystone/tests/unit/identity/test_controllers.py b/keystone-moon/keystone/tests/unit/identity/test_controllers.py
new file mode 100644
index 00000000..ed2fe3ff
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/identity/test_controllers.py
@@ -0,0 +1,65 @@
+# Copyright 2016 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+
+from keystone import exception
+from keystone.identity import controllers
+from keystone.tests import unit
+from keystone.tests.unit.ksfixtures import database
+
+
+CONF = cfg.CONF
+
+_ADMIN_CONTEXT = {'is_admin': True, 'query_string': {}}
+
+
+class UserTestCaseNoDefaultDomain(unit.TestCase):
+
+ def setUp(self):
+ super(UserTestCaseNoDefaultDomain, self).setUp()
+ self.useFixture(database.Database())
+ self.load_backends()
+ self.user_controller = controllers.User()
+
+ def test_setup(self):
+ # Other tests in this class assume there's no default domain, so make
+ # sure the setUp worked as expected.
+ self.assertRaises(
+ exception.DomainNotFound,
+ self.resource_api.get_domain, CONF.identity.default_domain_id)
+
+ def test_get_users(self):
+ # When list_users is done and there's no default domain, the result is
+ # an empty list.
+ res = self.user_controller.get_users(_ADMIN_CONTEXT)
+ self.assertEqual([], res['users'])
+
+ def test_get_user_by_name(self):
+ # When get_user_by_name is done and there's no default domain, the
+ # result is 404 Not Found
+ user_name = uuid.uuid4().hex
+ self.assertRaises(
+ exception.UserNotFound,
+ self.user_controller.get_user_by_name, _ADMIN_CONTEXT, user_name)
+
+ def test_create_user(self):
+ # When a user is created using the v2 controller and there's no default
+ # domain, it doesn't fail with can't find domain (a default domain is
+ # created)
+ user = {'name': uuid.uuid4().hex}
+ self.user_controller.create_user(_ADMIN_CONTEXT, user)
+ # If the above doesn't fail then this is successful.
diff --git a/keystone-moon/keystone/tests/unit/identity/test_core.py b/keystone-moon/keystone/tests/unit/identity/test_core.py
index e9845401..39f3c701 100644
--- a/keystone-moon/keystone/tests/unit/identity/test_core.py
+++ b/keystone-moon/keystone/tests/unit/identity/test_core.py
@@ -138,7 +138,7 @@ class TestDatabaseDomainConfigs(unit.TestCase):
def test_loading_config_from_database(self):
self.config_fixture.config(domain_configurations_from_database=True,
group='identity')
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
# Override two config options for our domain
conf = {'ldap': {'url': uuid.uuid4().hex,
@@ -165,7 +165,7 @@ class TestDatabaseDomainConfigs(unit.TestCase):
# Now turn off using database domain configuration and check that the
# default config file values are now seen instead of the overrides.
CONF.set_override('domain_configurations_from_database', False,
- 'identity')
+ 'identity', enforce_type=True)
domain_config = identity.DomainConfigs()
domain_config.setup_domain_drivers(fake_standard_driver,
self.resource_api)
diff --git a/keystone-moon/keystone/tests/unit/identity_mapping.py b/keystone-moon/keystone/tests/unit/identity_mapping.py
index 7fb8063f..4ba4f0c2 100644
--- a/keystone-moon/keystone/tests/unit/identity_mapping.py
+++ b/keystone-moon/keystone/tests/unit/identity_mapping.py
@@ -17,7 +17,6 @@ from keystone.identity.mapping_backends import sql as mapping_sql
def list_id_mappings():
"""List all id_mappings for testing purposes."""
-
- a_session = sql.get_session()
- refs = a_session.query(mapping_sql.IDMapping).all()
- return [x.to_dict() for x in refs]
+ with sql.session_for_read() as session:
+ refs = session.query(mapping_sql.IDMapping).all()
+ return [x.to_dict() for x in refs]
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/__init__.py b/keystone-moon/keystone/tests/unit/ksfixtures/__init__.py
index 81b80298..4b914752 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/__init__.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/__init__.py
@@ -11,5 +11,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from keystone.tests.unit.ksfixtures.auth_plugins import ConfigAuthPlugins # noqa
from keystone.tests.unit.ksfixtures.cache import Cache # noqa
from keystone.tests.unit.ksfixtures.key_repository import KeyRepository # noqa
+from keystone.tests.unit.ksfixtures.policy import Policy # noqa
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/appserver.py b/keystone-moon/keystone/tests/unit/ksfixtures/appserver.py
index ea1e6255..a23b804f 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/appserver.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/appserver.py
@@ -29,8 +29,7 @@ ADMIN = 'admin'
class AppServer(fixtures.Fixture):
- """A fixture for managing an application server instance.
- """
+ """A fixture for managing an application server instance."""
def __init__(self, config, name, cert=None, key=None, ca=None,
cert_required=False, host='127.0.0.1', port=0):
@@ -72,7 +71,8 @@ class AppServer(fixtures.Fixture):
def _update_config_opt(self):
"""Updates the config with the actual port used."""
opt_name = self._get_config_option_for_section_name()
- CONF.set_override(opt_name, self.port, group='eventlet_server')
+ CONF.set_override(opt_name, self.port, group='eventlet_server',
+ enforce_type=True)
def _get_config_option_for_section_name(self):
"""Maps Paster config section names to port option names."""
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/auth_plugins.py b/keystone-moon/keystone/tests/unit/ksfixtures/auth_plugins.py
new file mode 100644
index 00000000..68ba6f3a
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/auth_plugins.py
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+
+from keystone.common import config as common_cfg
+
+
+class ConfigAuthPlugins(fixtures.Fixture):
+ """A fixture for setting up and tearing down a auth plugins."""
+
+ def __init__(self, config_fixture, methods, **method_classes):
+ super(ConfigAuthPlugins, self).__init__()
+ self.methods = methods
+ self.config_fixture = config_fixture
+ self.method_classes = method_classes
+
+ def setUp(self):
+ super(ConfigAuthPlugins, self).setUp()
+ if self.methods:
+ self.config_fixture.config(group='auth', methods=self.methods)
+ common_cfg.setup_authentication()
+ if self.method_classes:
+ self.config_fixture.config(group='auth', **self.method_classes)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/cache.py b/keystone-moon/keystone/tests/unit/ksfixtures/cache.py
index 74566f1e..e0833ae2 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/cache.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/cache.py
@@ -13,11 +13,17 @@
import fixtures
+from keystone import catalog
from keystone.common import cache
+CACHE_REGIONS = (cache.CACHE_REGION, catalog.COMPUTED_CATALOG_REGION)
+
+
class Cache(fixtures.Fixture):
- """A fixture for setting up and tearing down the cache between test cases.
+ """A fixture for setting up the cache between test cases.
+
+ This will also tear down an existing cache if one is already configured.
"""
def setUp(self):
@@ -29,8 +35,9 @@ class Cache(fixtures.Fixture):
# NOTE(morganfainberg): The only way to reconfigure the CacheRegion
# object on each setUp() call is to remove the .backend property.
- if cache.REGION.is_configured:
- del cache.REGION.backend
+ for region in CACHE_REGIONS:
+ if region.is_configured:
+ del region.backend
- # ensure the cache region instance is setup
- cache.configure_cache_region(cache.REGION)
+ # ensure the cache region instance is setup
+ cache.configure_cache(region=region)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/database.py b/keystone-moon/keystone/tests/unit/ksfixtures/database.py
index 6f23a99d..52c35cee 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/database.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/database.py
@@ -28,12 +28,13 @@ CONF = cfg.CONF
def run_once(f):
"""A decorator to ensure the decorated function is only executed once.
- The decorated function cannot expect any arguments.
+ The decorated function is assumed to have a one parameter.
+
"""
@functools.wraps(f)
- def wrapper():
+ def wrapper(one):
if not wrapper.already_ran:
- f()
+ f(one)
wrapper.already_ran = True
wrapper.already_ran = False
return wrapper
@@ -51,7 +52,7 @@ def initialize_sql_session():
@run_once
-def _load_sqlalchemy_models():
+def _load_sqlalchemy_models(version_specifiers):
"""Find all modules containing SQLAlchemy models and import them.
This creates more consistent, deterministic test runs because tables
@@ -66,6 +67,24 @@ def _load_sqlalchemy_models():
as more models are imported. Importing all models at the start of
the test run avoids this problem.
+ version_specifiers is a dict that contains any specific driver versions
+ that have been requested. The dict is of the form:
+
+ {<module_name> : {'versioned_backend' : <name of backend requested>,
+ 'versionless_backend' : <name of default backend>}
+ }
+
+ For example:
+
+ {'keystone.assignment': {'versioned_backend' : 'V8_backends',
+ 'versionless_backend' : 'backends'},
+ 'keystone.identity': {'versioned_backend' : 'V9_backends',
+ 'versionless_backend' : 'backends'}
+ }
+
+ The version_specifiers will be used to load the correct driver. The
+ algorithm for this assumes that versioned drivers begin in 'V'.
+
"""
keystone_root = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
@@ -78,25 +97,59 @@ def _load_sqlalchemy_models():
# The root will be prefixed with an instance of os.sep, which will
# make the root after replacement '.<root>', the 'keystone' part
# of the module path is always added to the front
- module_name = ('keystone.%s.sql' %
+ module_root = ('keystone.%s' %
root.replace(os.sep, '.').lstrip('.'))
+ module_components = module_root.split('.')
+ module_without_backends = ''
+ for x in range(0, len(module_components) - 1):
+ module_without_backends += module_components[x] + '.'
+ module_without_backends = module_without_backends.rstrip('.')
+ this_backend = module_components[len(module_components) - 1]
+
+ # At this point module_without_backends might be something like
+ # 'keystone.assignment', while this_backend might be something
+ # 'V8_backends'.
+
+ if module_without_backends.startswith('keystone.contrib'):
+ # All the sql modules have now been moved into the core tree
+ # so no point in loading these again here (and, in fact, doing
+ # so might break trying to load a versioned driver.
+ continue
+
+ if module_without_backends in version_specifiers:
+ # OK, so there is a request for a specific version of this one.
+ # We therefore should skip any other versioned backend as well
+ # as the non-versioned one.
+ version = version_specifiers[module_without_backends]
+ if ((this_backend != version['versioned_backend'] and
+ this_backend.startswith('V')) or
+ this_backend == version['versionless_backend']):
+ continue
+ else:
+ # No versioned driver requested, so ignore any that are
+ # versioned
+ if this_backend.startswith('V'):
+ continue
+
+ module_name = module_root + '.sql'
__import__(module_name)
class Database(fixtures.Fixture):
- """A fixture for setting up and tearing down a database.
-
- """
+ """A fixture for setting up and tearing down a database."""
- def __init__(self):
+ def __init__(self, version_specifiers=None):
super(Database, self).__init__()
initialize_sql_session()
- _load_sqlalchemy_models()
+ if version_specifiers is None:
+ version_specifiers = {}
+ _load_sqlalchemy_models(version_specifiers)
def setUp(self):
super(Database, self).setUp()
- self.engine = sql.get_engine()
+ with sql.session_for_write() as session:
+ self.engine = session.get_bind()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py b/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py
index 918087ad..9977b206 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/hacking.py
@@ -112,73 +112,6 @@ class HackingCode(fixtures.Fixture):
(8, 8, 'K004'),
]}
- assert_no_translations_for_debug_logging = {
- 'code': """
- import logging
- import logging as stlib_logging
- from keystone.i18n import _
- from keystone.i18n import _ as oslo_i18n
- from oslo_log import log
- from oslo_log import log as oslo_logging
-
- # stdlib logging
- L0 = logging.getLogger()
- L0.debug(_('text'))
- class C:
- def __init__(self):
- L0.debug(oslo_i18n('text', {}))
-
- # stdlib logging w/ alias and specifying a logger
- class C:
- def __init__(self):
- self.L1 = logging.getLogger(__name__)
- def m(self):
- self.L1.debug(
- _('text'), {}
- )
-
- # oslo logging and specifying a logger
- L2 = logging.getLogger(__name__)
- L2.debug(oslo_i18n('text'))
-
- # oslo logging w/ alias
- class C:
- def __init__(self):
- self.L3 = oslo_logging.getLogger()
- self.L3.debug(_('text'))
-
- # translation on a separate line
- msg = _('text')
- L2.debug(msg)
-
- # this should not fail
- if True:
- msg = _('message %s') % X
- L2.error(msg)
- raise TypeError(msg)
- if True:
- msg = 'message'
- L2.debug(msg)
-
- # this should not fail
- if True:
- if True:
- msg = _('message')
- else:
- msg = _('message')
- L2.debug(msg)
- raise Exception(msg)
- """,
- 'expected_errors': [
- (10, 9, 'K005'),
- (13, 17, 'K005'),
- (21, 12, 'K005'),
- (26, 9, 'K005'),
- (32, 22, 'K005'),
- (36, 9, 'K005'),
- ]
- }
-
dict_constructor = {
'code': """
lower_res = {k.lower(): v for k, v in six.iteritems(res[1])}
@@ -219,12 +152,12 @@ class HackingLogging(fixtures.Fixture):
LOG.info(_('text'))
class C:
def __init__(self):
- LOG.warn(oslo_i18n('text', {}))
- LOG.warn(_LW('text', {}))
+ LOG.warning(oslo_i18n('text', {}))
+ LOG.warning(_LW('text', {}))
""",
'expected_errors': [
(3, 9, 'K006'),
- (6, 17, 'K006'),
+ (6, 20, 'K006'),
],
},
{
@@ -287,13 +220,13 @@ class HackingLogging(fixtures.Fixture):
LOG = logging.getLogger()
# ensure the correct helper is being used
- LOG.warn(_LI('this should cause an error'))
+ LOG.warning(_LI('this should cause an error'))
# debug should not allow any helpers either
LOG.debug(_LI('this should cause an error'))
""",
'expected_errors': [
- (4, 9, 'K006'),
+ (4, 12, 'K006'),
(7, 10, 'K005'),
],
},
@@ -302,7 +235,7 @@ class HackingLogging(fixtures.Fixture):
# this should not be an error
L = log.getLogger(__name__)
msg = _('text')
- L.warn(msg)
+ L.warning(msg)
raise Exception(msg)
""",
'expected_errors': [],
@@ -312,7 +245,7 @@ class HackingLogging(fixtures.Fixture):
L = log.getLogger(__name__)
def f():
msg = _('text')
- L2.warn(msg)
+ L2.warning(msg)
something = True # add an extra statement here
raise Exception(msg)
""",
@@ -323,11 +256,11 @@ class HackingLogging(fixtures.Fixture):
LOG = log.getLogger(__name__)
def func():
msg = _('text')
- LOG.warn(msg)
+ LOG.warning(msg)
raise Exception('some other message')
""",
'expected_errors': [
- (4, 13, 'K006'),
+ (4, 16, 'K006'),
],
},
{
@@ -337,7 +270,7 @@ class HackingLogging(fixtures.Fixture):
msg = _('text')
else:
msg = _('text')
- LOG.warn(msg)
+ LOG.warning(msg)
raise Exception(msg)
""",
'expected_errors': [
@@ -350,28 +283,28 @@ class HackingLogging(fixtures.Fixture):
msg = _('text')
else:
msg = _('text')
- LOG.warn(msg)
+ LOG.warning(msg)
""",
'expected_errors': [
- (6, 9, 'K006'),
+ (6, 12, 'K006'),
],
},
{
'code': """
LOG = log.getLogger(__name__)
msg = _LW('text')
- LOG.warn(msg)
+ LOG.warning(msg)
raise Exception(msg)
""",
'expected_errors': [
- (3, 9, 'K007'),
+ (3, 12, 'K007'),
],
},
{
'code': """
LOG = log.getLogger(__name__)
msg = _LW('text')
- LOG.warn(msg)
+ LOG.warning(msg)
msg = _('something else')
raise Exception(msg)
""",
@@ -381,18 +314,18 @@ class HackingLogging(fixtures.Fixture):
'code': """
LOG = log.getLogger(__name__)
msg = _LW('hello %s') % 'world'
- LOG.warn(msg)
+ LOG.warning(msg)
raise Exception(msg)
""",
'expected_errors': [
- (3, 9, 'K007'),
+ (3, 12, 'K007'),
],
},
{
'code': """
LOG = log.getLogger(__name__)
msg = _LW('hello %s') % 'world'
- LOG.warn(msg)
+ LOG.warning(msg)
""",
'expected_errors': [],
},
@@ -409,3 +342,76 @@ class HackingLogging(fixtures.Fixture):
'expected_errors': [],
},
]
+
+ assert_not_using_deprecated_warn = {
+ 'code': """
+ # Logger.warn has been deprecated in Python3 in favor of
+ # Logger.warning
+ LOG = log.getLogger(__name__)
+ LOG.warn(_LW('text'))
+ """,
+ 'expected_errors': [
+ (4, 9, 'K009'),
+ ],
+ }
+
+ assert_no_translations_for_debug_logging = {
+ 'code': """
+ # stdlib logging
+ L0 = logging.getLogger()
+ L0.debug(_('text'))
+ class C:
+ def __init__(self):
+ L0.debug(oslo_i18n('text', {}))
+
+ # stdlib logging w/ alias and specifying a logger
+ class C:
+ def __init__(self):
+ self.L1 = logging.getLogger(__name__)
+ def m(self):
+ self.L1.debug(
+ _('text'), {}
+ )
+
+ # oslo logging and specifying a logger
+ L2 = logging.getLogger(__name__)
+ L2.debug(oslo_i18n('text'))
+
+ # oslo logging w/ alias
+ class C:
+ def __init__(self):
+ self.L3 = oslo_logging.getLogger()
+ self.L3.debug(_('text'))
+
+ # translation on a separate line
+ msg = _('text')
+ L2.debug(msg)
+
+ # this should not fail
+ if True:
+ msg = _('message %s') % X
+ L2.error(msg)
+ raise TypeError(msg)
+ if True:
+ msg = 'message'
+ L2.debug(msg)
+
+ # this should not fail
+ if True:
+ if True:
+ msg = _('message')
+ else:
+ msg = _('message')
+ L2.debug(msg)
+ raise Exception(msg)
+ """,
+ 'expected_errors': [
+ (3, 9, 'K005'),
+ (6, 17, 'K005'),
+ (14, 12, 'K005'),
+ (19, 9, 'K005'),
+ (25, 22, 'K005'),
+ (29, 9, 'K005'),
+ ]
+ }
+
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py b/keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py
index b2cbe067..6cd8cc0b 100644
--- a/keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/ldapdb.py
@@ -19,8 +19,7 @@ from keystone.tests.unit import fakeldap
class LDAPDatabase(fixtures.Fixture):
- """A fixture for setting up and tearing down an LDAP database.
- """
+ """A fixture for setting up and tearing down an LDAP database."""
def setUp(self):
super(LDAPDatabase, self).setUp()
diff --git a/keystone-moon/keystone/tests/unit/ksfixtures/policy.py b/keystone-moon/keystone/tests/unit/ksfixtures/policy.py
new file mode 100644
index 00000000..b883f980
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/ksfixtures/policy.py
@@ -0,0 +1,33 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+from oslo_policy import opts
+
+from keystone.policy.backends import rules
+
+
+class Policy(fixtures.Fixture):
+ """A fixture for working with policy configuration."""
+
+ def __init__(self, policy_file, config_fixture):
+ self._policy_file = policy_file
+ self._config_fixture = config_fixture
+
+ def setUp(self):
+ super(Policy, self).setUp()
+ opts.set_defaults(self._config_fixture.conf)
+ self._config_fixture.config(group='oslo_policy',
+ policy_file=self._policy_file)
+ rules.init()
+ self.addCleanup(rules.reset)
diff --git a/keystone-moon/keystone/tests/unit/mapping_fixtures.py b/keystone-moon/keystone/tests/unit/mapping_fixtures.py
index 94b07133..9dc980aa 100644
--- a/keystone-moon/keystone/tests/unit/mapping_fixtures.py
+++ b/keystone-moon/keystone/tests/unit/mapping_fixtures.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -463,6 +465,30 @@ MAPPING_TESTER_REGEX = {
]
}
+
+MAPPING_DIRECT_MAPPING_THROUGH_KEYWORD = {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": "{0}"
+ },
+ {
+ "group": TESTER_GROUP_ID
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName",
+ "any_one_of": [
+ "bwilliams"
+ ]
+ }
+ ]
+ }
+ ]
+}
+
MAPPING_DEVELOPER_REGEX = {
"rules": [
{
@@ -760,7 +786,7 @@ MAPPING_GROUPS_BLACKLIST = {
]
}
-# Excercise all possibilities of user identitfication. Values are hardcoded on
+# Exercise all possibilities of user identification. Values are hardcoded on
# purpose.
MAPPING_USER_IDS = {
"rules": [
@@ -1036,6 +1062,78 @@ MAPPING_WITH_DOMAINID_ONLY = {
]
}
+MAPPING_GROUPS_IDS_WHITELIST = {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ }
+ },
+ {
+ "group_ids": "{1}"
+ },
+ {
+ "group": {
+ "id": "{2}"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "name"
+ },
+ {
+ "type": "group_ids",
+ "whitelist": [
+ "abc123", "ghi789", "321cba"
+ ]
+ },
+ {
+ "type": "group"
+ }
+ ]
+ }
+ ]
+}
+
+MAPPING_GROUPS_IDS_BLACKLIST = {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}"
+ }
+ },
+ {
+ "group_ids": "{1}"
+ },
+ {
+ "group": {
+ "id": "{2}"
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "name"
+ },
+ {
+ "type": "group_ids",
+ "blacklist": [
+ "def456"
+ ]
+ },
+ {
+ "type": "group"
+ }
+ ]
+ }
+ ]
+}
+
# Mapping used by tokenless test cases, it maps the domain_name only.
MAPPING_WITH_DOMAINNAME_ONLY = {
'rules': [
@@ -1184,6 +1282,26 @@ MAPPING_GROUPS_WHITELIST_PASS_THROUGH = {
]
}
+MAPPING_BAD_LOCAL_SETUP = {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0}",
+ "domain": {"id": "default"}
+ },
+ "whatisthis": "local"
+ }
+ ],
+ "remote": [
+ {
+ "type": "UserName"
+ }
+ ]
+ }
+ ]
+}
EMPLOYEE_ASSERTION = {
'Email': 'tim@example.com',
@@ -1310,3 +1428,59 @@ UNMATCHED_GROUP_ASSERTION = {
'REMOTE_USER': 'Any Momoose',
'REMOTE_USER_GROUPS': 'EXISTS;NO_EXISTS'
}
+
+GROUP_IDS_ASSERTION = {
+ 'name': 'opilotte',
+ 'group_ids': 'abc123;def456;ghi789',
+ 'group': 'klm012'
+}
+
+GROUP_IDS_ASSERTION_ONLY_ONE_GROUP = {
+ 'name': 'opilotte',
+ 'group_ids': '321cba',
+ 'group': '210mlk'
+}
+
+UNICODE_NAME_ASSERTION = {
+ 'PFX_Email': 'jon@example.com',
+ 'PFX_UserName': 'jonkare',
+ 'PFX_FirstName': 'Jon KÃ¥re',
+ 'PFX_LastName': 'Hellån',
+ 'PFX_orgPersonType': 'Admin;Chief'
+}
+
+MAPPING_UNICODE = {
+ "rules": [
+ {
+ "local": [
+ {
+ "user": {
+ "name": "{0} {1}",
+ "email": "{2}"
+ },
+ "group": {
+ "id": EMPLOYEE_GROUP_ID
+ }
+ }
+ ],
+ "remote": [
+ {
+ "type": "PFX_FirstName"
+ },
+ {
+ "type": "PFX_LastName"
+ },
+ {
+ "type": "PFX_Email"
+ },
+ {
+ "type": "PFX_orgPersonType",
+ "any_one_of": [
+ "Admin",
+ "Big Cheese"
+ ]
+ }
+ ]
+ },
+ ],
+}
diff --git a/keystone-moon/keystone/tests/unit/policy/__init__.py b/keystone-moon/keystone/tests/unit/policy/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/policy/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/policy/test_backends.py b/keystone-moon/keystone/tests/unit/policy/test_backends.py
new file mode 100644
index 00000000..7b672420
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/policy/test_backends.py
@@ -0,0 +1,86 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystone import exception
+from keystone.tests import unit
+
+
+class PolicyTests(object):
+ def test_create(self):
+ ref = unit.new_policy_ref()
+ res = self.policy_api.create_policy(ref['id'], ref)
+ self.assertDictEqual(ref, res)
+
+ def test_get(self):
+ ref = unit.new_policy_ref()
+ res = self.policy_api.create_policy(ref['id'], ref)
+
+ res = self.policy_api.get_policy(ref['id'])
+ self.assertDictEqual(ref, res)
+
+ def test_list(self):
+ ref = unit.new_policy_ref()
+ self.policy_api.create_policy(ref['id'], ref)
+
+ res = self.policy_api.list_policies()
+ res = [x for x in res if x['id'] == ref['id']][0]
+ self.assertDictEqual(ref, res)
+
+ def test_update(self):
+ ref = unit.new_policy_ref()
+ self.policy_api.create_policy(ref['id'], ref)
+ orig = ref
+
+ ref = unit.new_policy_ref()
+
+ # (cannot change policy ID)
+ self.assertRaises(exception.ValidationError,
+ self.policy_api.update_policy,
+ orig['id'],
+ ref)
+
+ ref['id'] = orig['id']
+ res = self.policy_api.update_policy(orig['id'], ref)
+ self.assertDictEqual(ref, res)
+
+ def test_delete(self):
+ ref = unit.new_policy_ref()
+ self.policy_api.create_policy(ref['id'], ref)
+
+ self.policy_api.delete_policy(ref['id'])
+ self.assertRaises(exception.PolicyNotFound,
+ self.policy_api.delete_policy,
+ ref['id'])
+ self.assertRaises(exception.PolicyNotFound,
+ self.policy_api.get_policy,
+ ref['id'])
+ res = self.policy_api.list_policies()
+ self.assertFalse(len([x for x in res if x['id'] == ref['id']]))
+
+ def test_get_policy_returns_not_found(self):
+ self.assertRaises(exception.PolicyNotFound,
+ self.policy_api.get_policy,
+ uuid.uuid4().hex)
+
+ def test_update_policy_returns_not_found(self):
+ ref = unit.new_policy_ref()
+ self.assertRaises(exception.PolicyNotFound,
+ self.policy_api.update_policy,
+ ref['id'],
+ ref)
+
+ def test_delete_policy_returns_not_found(self):
+ self.assertRaises(exception.PolicyNotFound,
+ self.policy_api.delete_policy,
+ uuid.uuid4().hex)
diff --git a/keystone-moon/keystone/tests/unit/resource/__init__.py b/keystone-moon/keystone/tests/unit/resource/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/resource/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/resource/backends/__init__.py b/keystone-moon/keystone/tests/unit/resource/backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/resource/backends/__init__.py
diff --git a/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py b/keystone-moon/keystone/tests/unit/resource/backends/test_sql.py
index b9df1a55..79ad3df2 100644
--- a/keystone-moon/keystone/common/sql/migrate_repo/versions/067_drop_redundant_mysql_index.py
+++ b/keystone-moon/keystone/tests/unit/resource/backends/test_sql.py
@@ -10,16 +10,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy
+from keystone.resource.backends import sql
+from keystone.tests import unit
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit.resource import test_backends
-def upgrade(migrate_engine):
- # NOTE(viktors): Migration 062 removed FK from `assignment` table, but
- # MySQL silently creates indexes on FK constraints, so we should remove
- # this index manually.
- if migrate_engine.name == 'mysql':
- meta = sqlalchemy.MetaData(bind=migrate_engine)
- table = sqlalchemy.Table('assignment', meta, autoload=True)
- for index in table.indexes:
- if [c.name for c in index.columns] == ['role_id']:
- index.drop(migrate_engine)
+class TestSqlResourceDriver(unit.BaseTestCase,
+ test_backends.ResourceDriverTests):
+ def setUp(self):
+ super(TestSqlResourceDriver, self).setUp()
+ self.useFixture(database.Database())
+ self.driver = sql.Resource()
diff --git a/keystone-moon/keystone/tests/unit/resource/config_backends/__init__.py b/keystone-moon/keystone/tests/unit/resource/config_backends/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/resource/config_backends/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/resource/config_backends/test_sql.py b/keystone-moon/keystone/tests/unit/resource/config_backends/test_sql.py
new file mode 100644
index 00000000..b4c5f262
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/resource/config_backends/test_sql.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone.common import sql
+from keystone.resource.config_backends import sql as config_sql
+from keystone.tests import unit
+from keystone.tests.unit.backend import core_sql
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit.resource import test_core
+
+
+class SqlDomainConfigModels(core_sql.BaseBackendSqlModels):
+
+ def test_whitelisted_model(self):
+ cols = (('domain_id', sql.String, 64),
+ ('group', sql.String, 255),
+ ('option', sql.String, 255),
+ ('value', sql.JsonBlob, None))
+ self.assertExpectedSchema('whitelisted_config', cols)
+
+ def test_sensitive_model(self):
+ cols = (('domain_id', sql.String, 64),
+ ('group', sql.String, 255),
+ ('option', sql.String, 255),
+ ('value', sql.JsonBlob, None))
+ self.assertExpectedSchema('sensitive_config', cols)
+
+
+class SqlDomainConfigDriver(unit.BaseTestCase,
+ test_core.DomainConfigDriverTests):
+ def setUp(self):
+ super(SqlDomainConfigDriver, self).setUp()
+ self.useFixture(database.Database())
+ self.driver = config_sql.DomainConfig()
+
+
+class SqlDomainConfig(core_sql.BaseBackendSqlTests,
+ test_core.DomainConfigTests):
+ def setUp(self):
+ super(SqlDomainConfig, self).setUp()
+ # test_core.DomainConfigTests is effectively a mixin class, so make
+ # sure we call its setup
+ test_core.DomainConfigTests.setUp(self)
diff --git a/keystone-moon/keystone/tests/unit/resource/test_backends.py b/keystone-moon/keystone/tests/unit/resource/test_backends.py
new file mode 100644
index 00000000..eed4c6ba
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/resource/test_backends.py
@@ -0,0 +1,1669 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import mock
+from oslo_config import cfg
+from six.moves import range
+from testtools import matchers
+
+from keystone.common import driver_hints
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit import utils as test_utils
+
+
+CONF = cfg.CONF
+
+
+class ResourceTests(object):
+
+ domain_count = len(default_fixtures.DOMAINS)
+
+ def test_get_project(self):
+ tenant_ref = self.resource_api.get_project(self.tenant_bar['id'])
+ self.assertDictEqual(self.tenant_bar, tenant_ref)
+
+ def test_get_project_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ uuid.uuid4().hex)
+
+ def test_get_project_by_name(self):
+ tenant_ref = self.resource_api.get_project_by_name(
+ self.tenant_bar['name'],
+ CONF.identity.default_domain_id)
+ self.assertDictEqual(self.tenant_bar, tenant_ref)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_get_project_by_name_for_project_acting_as_a_domain(self):
+ """Tests get_project_by_name works when the domain_id is None."""
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id, is_domain=False)
+ project = self.resource_api.create_project(project['id'], project)
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project_by_name,
+ project['name'],
+ None)
+
+ # Test that querying with domain_id as None will find the project
+ # acting as a domain, even if it's name is the same as the regular
+ # project above.
+ project2 = unit.new_project_ref(is_domain=True,
+ name=project['name'])
+ project2 = self.resource_api.create_project(project2['id'], project2)
+
+ project_ref = self.resource_api.get_project_by_name(
+ project2['name'], None)
+
+ self.assertEqual(project2, project_ref)
+
+ def test_get_project_by_name_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project_by_name,
+ uuid.uuid4().hex,
+ CONF.identity.default_domain_id)
+
+ def test_create_duplicate_project_id_fails(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project_id = project['id']
+ self.resource_api.create_project(project_id, project)
+ project['name'] = 'fake2'
+ self.assertRaises(exception.Conflict,
+ self.resource_api.create_project,
+ project_id,
+ project)
+
+ def test_create_duplicate_project_name_fails(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project_id = project['id']
+ self.resource_api.create_project(project_id, project)
+ project['id'] = 'fake2'
+ self.assertRaises(exception.Conflict,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ def test_create_duplicate_project_name_in_different_domains(self):
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+ project1 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project2 = unit.new_project_ref(name=project1['name'],
+ domain_id=new_domain['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ self.resource_api.create_project(project2['id'], project2)
+
+ def test_move_project_between_domains(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ project = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project['id'], project)
+ project['domain_id'] = domain2['id']
+ # Update the project asserting that a deprecation warning is emitted
+ with mock.patch(
+ 'oslo_log.versionutils.report_deprecated_feature') as mock_dep:
+ self.resource_api.update_project(project['id'], project)
+ self.assertTrue(mock_dep.called)
+
+ updated_project_ref = self.resource_api.get_project(project['id'])
+ self.assertEqual(domain2['id'], updated_project_ref['domain_id'])
+
+ def test_move_project_between_domains_with_clashing_names_fails(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ # First, create a project in domain1
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ # Now create a project in domain2 with a potentially clashing
+ # name - which should work since we have domain separation
+ project2 = unit.new_project_ref(name=project1['name'],
+ domain_id=domain2['id'])
+ self.resource_api.create_project(project2['id'], project2)
+ # Now try and move project1 into the 2nd domain - which should
+ # fail since the names clash
+ project1['domain_id'] = domain2['id']
+ self.assertRaises(exception.Conflict,
+ self.resource_api.update_project,
+ project1['id'],
+ project1)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_move_project_with_children_between_domains_fails(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ project = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project['id'], project)
+ child_project = unit.new_project_ref(domain_id=domain1['id'],
+ parent_id=project['id'])
+ self.resource_api.create_project(child_project['id'], child_project)
+ project['domain_id'] = domain2['id']
+
+ # Update is not allowed, since updating the whole subtree would be
+ # necessary
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'],
+ project)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_move_project_not_root_between_domains_fails(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ project = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project['id'], project)
+ child_project = unit.new_project_ref(domain_id=domain1['id'],
+ parent_id=project['id'])
+ self.resource_api.create_project(child_project['id'], child_project)
+ child_project['domain_id'] = domain2['id']
+
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ child_project['id'],
+ child_project)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_move_root_project_between_domains_succeeds(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+ root_project = unit.new_project_ref(domain_id=domain1['id'])
+ root_project = self.resource_api.create_project(root_project['id'],
+ root_project)
+
+ root_project['domain_id'] = domain2['id']
+ self.resource_api.update_project(root_project['id'], root_project)
+ project_from_db = self.resource_api.get_project(root_project['id'])
+
+ self.assertEqual(domain2['id'], project_from_db['domain_id'])
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_update_domain_id_project_is_domain_fails(self):
+ other_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(other_domain['id'], other_domain)
+ project = unit.new_project_ref(is_domain=True)
+ self.resource_api.create_project(project['id'], project)
+ project['domain_id'] = other_domain['id']
+
+ # Update of domain_id of projects acting as domains is not allowed
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'],
+ project)
+
+ def test_rename_duplicate_project_name_fails(self):
+ project1 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project2 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project1['id'], project1)
+ self.resource_api.create_project(project2['id'], project2)
+ project2['name'] = project1['name']
+ self.assertRaises(exception.Error,
+ self.resource_api.update_project,
+ project2['id'],
+ project2)
+
+ def test_update_project_id_does_nothing(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project_id = project['id']
+ self.resource_api.create_project(project['id'], project)
+ project['id'] = 'fake2'
+ self.resource_api.update_project(project_id, project)
+ project_ref = self.resource_api.get_project(project_id)
+ self.assertEqual(project_id, project_ref['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ 'fake2')
+
+ def test_delete_domain_with_user_group_project_links(self):
+ # TODO(chungg):add test case once expected behaviour defined
+ pass
+
+ def test_update_project_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.update_project,
+ uuid.uuid4().hex,
+ dict())
+
+ def test_delete_project_returns_not_found(self):
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.delete_project,
+ uuid.uuid4().hex)
+
+ def test_create_update_delete_unicode_project(self):
+ unicode_project_name = u'name \u540d\u5b57'
+ project = unit.new_project_ref(
+ name=unicode_project_name,
+ domain_id=CONF.identity.default_domain_id)
+ project = self.resource_api.create_project(project['id'], project)
+ self.resource_api.update_project(project['id'], project)
+ self.resource_api.delete_project(project['id'])
+
+ def test_create_project_with_no_enabled_field(self):
+ ref = unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
+ del ref['enabled']
+ self.resource_api.create_project(ref['id'], ref)
+
+ project = self.resource_api.get_project(ref['id'])
+ self.assertIs(project['enabled'], True)
+
+ def test_create_project_long_name_fails(self):
+ project = unit.new_project_ref(
+ name='a' * 65, domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ def test_create_project_blank_name_fails(self):
+ project = unit.new_project_ref(
+ name='', domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ def test_create_project_invalid_name_fails(self):
+ project = unit.new_project_ref(
+ name=None, domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+ project = unit.new_project_ref(
+ name=123, domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ def test_update_project_blank_name_fails(self):
+ project = unit.new_project_ref(
+ name='fake1', domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project['id'], project)
+ project['name'] = ''
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'],
+ project)
+
+ def test_update_project_long_name_fails(self):
+ project = unit.new_project_ref(
+ name='fake1', domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project['id'], project)
+ project['name'] = 'a' * 65
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'],
+ project)
+
+ def test_update_project_invalid_name_fails(self):
+ project = unit.new_project_ref(
+ name='fake1', domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project['id'], project)
+ project['name'] = None
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'],
+ project)
+
+ project['name'] = 123
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'],
+ project)
+
+ def test_update_project_invalid_enabled_type_string(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertTrue(project_ref['enabled'])
+
+ # Strings are not valid boolean values
+ project['enabled'] = "false"
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ project['id'],
+ project)
+
+ def test_create_project_invalid_enabled_type_string(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ # invalid string value
+ enabled="true")
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ def test_create_project_invalid_domain_id(self):
+ project = unit.new_project_ref(domain_id=uuid.uuid4().hex)
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ def test_list_domains(self):
+ domain1 = unit.new_domain_ref()
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ self.resource_api.create_domain(domain2['id'], domain2)
+ domains = self.resource_api.list_domains()
+ self.assertEqual(3, len(domains))
+ domain_ids = []
+ for domain in domains:
+ domain_ids.append(domain.get('id'))
+ self.assertIn(CONF.identity.default_domain_id, domain_ids)
+ self.assertIn(domain1['id'], domain_ids)
+ self.assertIn(domain2['id'], domain_ids)
+
+ def test_list_projects(self):
+ project_refs = self.resource_api.list_projects()
+ project_count = len(default_fixtures.TENANTS) + self.domain_count
+ self.assertEqual(project_count, len(project_refs))
+ for project in default_fixtures.TENANTS:
+ self.assertIn(project, project_refs)
+
+ def test_list_projects_with_multiple_filters(self):
+ # Create a project
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project = self.resource_api.create_project(project['id'], project)
+
+ # Build driver hints with the project's name and inexistent description
+ hints = driver_hints.Hints()
+ hints.add_filter('name', project['name'])
+ hints.add_filter('description', uuid.uuid4().hex)
+
+ # Retrieve projects based on hints and check an empty list is returned
+ projects = self.resource_api.list_projects(hints)
+ self.assertEqual([], projects)
+
+ # Build correct driver hints
+ hints = driver_hints.Hints()
+ hints.add_filter('name', project['name'])
+ hints.add_filter('description', project['description'])
+
+ # Retrieve projects based on hints
+ projects = self.resource_api.list_projects(hints)
+
+ # Check that the returned list contains only the first project
+ self.assertEqual(1, len(projects))
+ self.assertEqual(project, projects[0])
+
+ def test_list_projects_for_domain(self):
+ project_ids = ([x['id'] for x in
+ self.resource_api.list_projects_in_domain(
+ CONF.identity.default_domain_id)])
+ # Only the projects from the default fixtures are expected, since
+ # filtering by domain does not include any project that acts as a
+ # domain.
+ self.assertThat(
+ project_ids, matchers.HasLength(len(default_fixtures.TENANTS)))
+ self.assertIn(self.tenant_bar['id'], project_ids)
+ self.assertIn(self.tenant_baz['id'], project_ids)
+ self.assertIn(self.tenant_mtu['id'], project_ids)
+ self.assertIn(self.tenant_service['id'], project_ids)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_list_projects_acting_as_domain(self):
+ initial_domains = self.resource_api.list_domains()
+
+ # Creating 5 projects that act as domains
+ new_projects_acting_as_domains = []
+ for i in range(5):
+ project = unit.new_project_ref(is_domain=True)
+ project = self.resource_api.create_project(project['id'], project)
+ new_projects_acting_as_domains.append(project)
+
+ # Creating a few regular project to ensure it doesn't mess with the
+ # ones that act as domains
+ self._create_projects_hierarchy(hierarchy_size=2)
+
+ projects = self.resource_api.list_projects_acting_as_domain()
+ expected_number_projects = (
+ len(initial_domains) + len(new_projects_acting_as_domains))
+ self.assertEqual(expected_number_projects, len(projects))
+ for project in new_projects_acting_as_domains:
+ self.assertIn(project, projects)
+ for domain in initial_domains:
+ self.assertIn(domain['id'], [p['id'] for p in projects])
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_list_projects_for_alternate_domain(self):
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ project2 = unit.new_project_ref(domain_id=domain1['id'])
+ self.resource_api.create_project(project2['id'], project2)
+ project_ids = ([x['id'] for x in
+ self.resource_api.list_projects_in_domain(
+ domain1['id'])])
+ self.assertEqual(2, len(project_ids))
+ self.assertIn(project1['id'], project_ids)
+ self.assertIn(project2['id'], project_ids)
+
+ def _create_projects_hierarchy(self, hierarchy_size=2,
+ domain_id=None,
+ is_domain=False,
+ parent_project_id=None):
+ """Creates a project hierarchy with specified size.
+
+ :param hierarchy_size: the desired hierarchy size, default is 2 -
+ a project with one child.
+ :param domain_id: domain where the projects hierarchy will be created.
+ :param is_domain: if the hierarchy will have the is_domain flag active
+ or not.
+ :param parent_project_id: if the intention is to create a
+ sub-hierarchy, sets the sub-hierarchy root. Defaults to creating
+ a new hierarchy, i.e. a new root project.
+
+ :returns projects: a list of the projects in the created hierarchy.
+
+ """
+ if domain_id is None:
+ domain_id = CONF.identity.default_domain_id
+ if parent_project_id:
+ project = unit.new_project_ref(parent_id=parent_project_id,
+ domain_id=domain_id,
+ is_domain=is_domain)
+ else:
+ project = unit.new_project_ref(domain_id=domain_id,
+ is_domain=is_domain)
+ project_id = project['id']
+ project = self.resource_api.create_project(project_id, project)
+
+ projects = [project]
+ for i in range(1, hierarchy_size):
+ new_project = unit.new_project_ref(parent_id=project_id,
+ domain_id=domain_id)
+
+ self.resource_api.create_project(new_project['id'], new_project)
+ projects.append(new_project)
+ project_id = new_project['id']
+
+ return projects
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_create_domain_with_project_api(self):
+ project = unit.new_project_ref(is_domain=True)
+ ref = self.resource_api.create_project(project['id'], project)
+ self.assertTrue(ref['is_domain'])
+ self.resource_api.get_domain(ref['id'])
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_project_as_a_domain_uniqueness_constraints(self):
+ """Tests project uniqueness for those acting as domains.
+
+ If it is a project acting as a domain, we can't have two or more with
+ the same name.
+
+ """
+ # Create two projects acting as a domain
+ project = unit.new_project_ref(is_domain=True)
+ project = self.resource_api.create_project(project['id'], project)
+ project2 = unit.new_project_ref(is_domain=True)
+ project2 = self.resource_api.create_project(project2['id'], project2)
+
+ # All projects acting as domains have a null domain_id, so should not
+ # be able to create another with the same name but a different
+ # project ID.
+ new_project = project.copy()
+ new_project['id'] = uuid.uuid4().hex
+
+ self.assertRaises(exception.Conflict,
+ self.resource_api.create_project,
+ new_project['id'],
+ new_project)
+
+ # We also should not be able to update one to have a name clash
+ project2['name'] = project['name']
+ self.assertRaises(exception.Conflict,
+ self.resource_api.update_project,
+ project2['id'],
+ project2)
+
+ # But updating it to a unique name is OK
+ project2['name'] = uuid.uuid4().hex
+ self.resource_api.update_project(project2['id'], project2)
+
+ # Finally, it should be OK to create a project with same name as one of
+ # these acting as a domain, as long as it is a regular project
+ project3 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id, name=project2['name'])
+ self.resource_api.create_project(project3['id'], project3)
+ # In fact, it should be OK to create such a project in the domain which
+ # has the matching name.
+ # TODO(henry-nash): Once we fully support projects acting as a domain,
+ # add a test here to create a sub-project with a name that matches its
+ # project acting as a domain
+
+ @unit.skip_if_no_multiple_domains_support
+ @test_utils.wip('waiting for sub projects acting as domains support')
+ def test_is_domain_sub_project_has_parent_domain_id(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id, is_domain=True)
+ self.resource_api.create_project(project['id'], project)
+
+ sub_project = unit.new_project_ref(domain_id=project['id'],
+ parent_id=project['id'],
+ is_domain=True)
+
+ ref = self.resource_api.create_project(sub_project['id'], sub_project)
+ self.assertTrue(ref['is_domain'])
+ self.assertEqual(project['id'], ref['parent_id'])
+ self.assertEqual(project['id'], ref['domain_id'])
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_delete_domain_with_project_api(self):
+ project = unit.new_project_ref(domain_id=None,
+ is_domain=True)
+ self.resource_api.create_project(project['id'], project)
+
+ # Check that a corresponding domain was created
+ self.resource_api.get_domain(project['id'])
+
+ # Try to delete the enabled project that acts as a domain
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.delete_project,
+ project['id'])
+
+ # Disable the project
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
+
+ # Successfully delete the project
+ self.resource_api.delete_project(project['id'])
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project['id'])
+
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain,
+ project['id'])
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_create_subproject_acting_as_domain_fails(self):
+ root_project = unit.new_project_ref(is_domain=True)
+ self.resource_api.create_project(root_project['id'], root_project)
+
+ sub_project = unit.new_project_ref(is_domain=True,
+ parent_id=root_project['id'])
+
+ # Creation of sub projects acting as domains is not allowed yet
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ sub_project['id'], sub_project)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_create_domain_under_regular_project_hierarchy_fails(self):
+ # Projects acting as domains can't have a regular project as parent
+ projects_hierarchy = self._create_projects_hierarchy()
+ parent = projects_hierarchy[1]
+ project = unit.new_project_ref(domain_id=parent['id'],
+ parent_id=parent['id'],
+ is_domain=True)
+
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project['id'], project)
+
+ @unit.skip_if_no_multiple_domains_support
+ @test_utils.wip('waiting for sub projects acting as domains support')
+ def test_create_project_under_domain_hierarchy(self):
+ projects_hierarchy = self._create_projects_hierarchy(is_domain=True)
+ parent = projects_hierarchy[1]
+ project = unit.new_project_ref(domain_id=parent['id'],
+ parent_id=parent['id'],
+ is_domain=False)
+
+ ref = self.resource_api.create_project(project['id'], project)
+ self.assertFalse(ref['is_domain'])
+ self.assertEqual(parent['id'], ref['parent_id'])
+ self.assertEqual(parent['id'], ref['domain_id'])
+
+ def test_create_project_without_is_domain_flag(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ del project['is_domain']
+ ref = self.resource_api.create_project(project['id'], project)
+ # The is_domain flag should be False by default
+ self.assertFalse(ref['is_domain'])
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_create_project_passing_is_domain_flag_true(self):
+ project = unit.new_project_ref(is_domain=True)
+
+ ref = self.resource_api.create_project(project['id'], project)
+ self.assertTrue(ref['is_domain'])
+
+ def test_create_project_passing_is_domain_flag_false(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id, is_domain=False)
+
+ ref = self.resource_api.create_project(project['id'], project)
+ self.assertIs(False, ref['is_domain'])
+
+ @test_utils.wip('waiting for support for parent_id to imply domain_id')
+ def test_create_project_with_parent_id_and_without_domain_id(self):
+ # First create a domain
+ project = unit.new_project_ref(is_domain=True)
+ self.resource_api.create_project(project['id'], project)
+ # Now create a child by just naming the parent_id
+ sub_project = unit.new_project_ref(parent_id=project['id'])
+ ref = self.resource_api.create_project(sub_project['id'], sub_project)
+
+ # The domain_id should be set to the parent domain_id
+ self.assertEqual(project['domain_id'], ref['domain_id'])
+
+ def test_create_project_with_domain_id_and_without_parent_id(self):
+ # First create a domain
+ project = unit.new_project_ref(is_domain=True)
+ self.resource_api.create_project(project['id'], project)
+ # Now create a child by just naming the domain_id
+ sub_project = unit.new_project_ref(domain_id=project['id'])
+ ref = self.resource_api.create_project(sub_project['id'], sub_project)
+
+ # The parent_id and domain_id should be set to the id of the project
+ # acting as a domain
+ self.assertEqual(project['id'], ref['parent_id'])
+ self.assertEqual(project['id'], ref['domain_id'])
+
+ def test_create_project_with_domain_id_mismatch_to_parent_domain(self):
+ # First create a domain
+ project = unit.new_project_ref(is_domain=True)
+ self.resource_api.create_project(project['id'], project)
+ # Now try to create a child with the above as its parent, but
+ # specifying a different domain.
+ sub_project = unit.new_project_ref(
+ parent_id=project['id'], domain_id=CONF.identity.default_domain_id)
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ sub_project['id'], sub_project)
+
+ def test_check_leaf_projects(self):
+ projects_hierarchy = self._create_projects_hierarchy()
+ root_project = projects_hierarchy[0]
+ leaf_project = projects_hierarchy[1]
+
+ self.assertFalse(self.resource_api.is_leaf_project(
+ root_project['id']))
+ self.assertTrue(self.resource_api.is_leaf_project(
+ leaf_project['id']))
+
+ # Delete leaf_project
+ self.resource_api.delete_project(leaf_project['id'])
+
+ # Now, root_project should be leaf
+ self.assertTrue(self.resource_api.is_leaf_project(
+ root_project['id']))
+
+ def test_list_projects_in_subtree(self):
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+ project1 = projects_hierarchy[0]
+ project2 = projects_hierarchy[1]
+ project3 = projects_hierarchy[2]
+ project4 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ parent_id=project2['id'])
+ self.resource_api.create_project(project4['id'], project4)
+
+ subtree = self.resource_api.list_projects_in_subtree(project1['id'])
+ self.assertEqual(3, len(subtree))
+ self.assertIn(project2, subtree)
+ self.assertIn(project3, subtree)
+ self.assertIn(project4, subtree)
+
+ subtree = self.resource_api.list_projects_in_subtree(project2['id'])
+ self.assertEqual(2, len(subtree))
+ self.assertIn(project3, subtree)
+ self.assertIn(project4, subtree)
+
+ subtree = self.resource_api.list_projects_in_subtree(project3['id'])
+ self.assertEqual(0, len(subtree))
+
+ def test_list_projects_in_subtree_with_circular_reference(self):
+ project1 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project1 = self.resource_api.create_project(project1['id'], project1)
+
+ project2 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ parent_id=project1['id'])
+ self.resource_api.create_project(project2['id'], project2)
+
+ project1['parent_id'] = project2['id'] # Adds cyclic reference
+
+ # NOTE(dstanek): The manager does not allow parent_id to be updated.
+ # Instead will directly use the driver to create the cyclic
+ # reference.
+ self.resource_api.driver.update_project(project1['id'], project1)
+
+ subtree = self.resource_api.list_projects_in_subtree(project1['id'])
+
+ # NOTE(dstanek): If a cyclic reference is detected the code bails
+ # and returns None instead of falling into the infinite
+ # recursion trap.
+ self.assertIsNone(subtree)
+
+ def test_list_projects_in_subtree_invalid_project_id(self):
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.list_projects_in_subtree,
+ None)
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.list_projects_in_subtree,
+ uuid.uuid4().hex)
+
+ def test_list_project_parents(self):
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+ project1 = projects_hierarchy[0]
+ project2 = projects_hierarchy[1]
+ project3 = projects_hierarchy[2]
+ project4 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ parent_id=project2['id'])
+ self.resource_api.create_project(project4['id'], project4)
+
+ parents1 = self.resource_api.list_project_parents(project3['id'])
+ self.assertEqual(3, len(parents1))
+ self.assertIn(project1, parents1)
+ self.assertIn(project2, parents1)
+
+ parents2 = self.resource_api.list_project_parents(project4['id'])
+ self.assertEqual(parents1, parents2)
+
+ parents = self.resource_api.list_project_parents(project1['id'])
+ # It has the default domain as parent
+ self.assertEqual(1, len(parents))
+
+ def test_update_project_enabled_cascade(self):
+ """Test update_project_cascade
+
+ Ensures the enabled attribute is correctly updated across
+ a simple 3-level projects hierarchy.
+ """
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+ parent = projects_hierarchy[0]
+
+ # Disable in parent project disables the whole subtree
+ parent['enabled'] = False
+ # Store the ref from backend in another variable so we don't bother
+ # to remove other attributes that were not originally provided and
+ # were set in the manager, like parent_id and domain_id.
+ parent_ref = self.resource_api.update_project(parent['id'],
+ parent,
+ cascade=True)
+
+ subtree = self.resource_api.list_projects_in_subtree(parent['id'])
+ self.assertEqual(2, len(subtree))
+ self.assertFalse(parent_ref['enabled'])
+ self.assertFalse(subtree[0]['enabled'])
+ self.assertFalse(subtree[1]['enabled'])
+
+ # Enable parent project enables the whole subtree
+ parent['enabled'] = True
+ parent_ref = self.resource_api.update_project(parent['id'],
+ parent,
+ cascade=True)
+
+ subtree = self.resource_api.list_projects_in_subtree(parent['id'])
+ self.assertEqual(2, len(subtree))
+ self.assertTrue(parent_ref['enabled'])
+ self.assertTrue(subtree[0]['enabled'])
+ self.assertTrue(subtree[1]['enabled'])
+
+ def test_cannot_enable_cascade_with_parent_disabled(self):
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+ grandparent = projects_hierarchy[0]
+ parent = projects_hierarchy[1]
+
+ grandparent['enabled'] = False
+ self.resource_api.update_project(grandparent['id'],
+ grandparent,
+ cascade=True)
+ subtree = self.resource_api.list_projects_in_subtree(parent['id'])
+ self.assertFalse(subtree[0]['enabled'])
+
+ parent['enabled'] = True
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.update_project,
+ parent['id'],
+ parent,
+ cascade=True)
+
+ def test_update_cascade_only_accepts_enabled(self):
+ # Update cascade does not accept any other attribute but 'enabled'
+ new_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(new_project['id'], new_project)
+
+ new_project['name'] = 'project1'
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.update_project,
+ new_project['id'],
+ new_project,
+ cascade=True)
+
+ def test_list_project_parents_invalid_project_id(self):
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.list_project_parents,
+ None)
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.list_project_parents,
+ uuid.uuid4().hex)
+
+ def test_create_project_doesnt_modify_passed_in_dict(self):
+ new_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ original_project = new_project.copy()
+ self.resource_api.create_project(new_project['id'], new_project)
+ self.assertDictEqual(original_project, new_project)
+
+ def test_update_project_enable(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertTrue(project_ref['enabled'])
+
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertEqual(project['enabled'], project_ref['enabled'])
+
+ # If not present, enabled field should not be updated
+ del project['enabled']
+ self.resource_api.update_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertFalse(project_ref['enabled'])
+
+ project['enabled'] = True
+ self.resource_api.update_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertEqual(project['enabled'], project_ref['enabled'])
+
+ del project['enabled']
+ self.resource_api.update_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertTrue(project_ref['enabled'])
+
+ def test_create_invalid_domain_fails(self):
+ new_group = unit.new_group_ref(domain_id="doesnotexist")
+ self.assertRaises(exception.DomainNotFound,
+ self.identity_api.create_group,
+ new_group)
+ new_user = unit.new_user_ref(domain_id="doesnotexist")
+ self.assertRaises(exception.DomainNotFound,
+ self.identity_api.create_user,
+ new_user)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_project_crud(self):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ project = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertDictContainsSubset(project, project_ref)
+
+ project['name'] = uuid.uuid4().hex
+ self.resource_api.update_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertDictContainsSubset(project, project_ref)
+
+ self.resource_api.delete_project(project['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project['id'])
+
+ def test_domain_delete_hierarchy(self):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+
+ # Creating a root and a leaf project inside the domain
+ projects_hierarchy = self._create_projects_hierarchy(
+ domain_id=domain['id'])
+ root_project = projects_hierarchy[0]
+ leaf_project = projects_hierarchy[0]
+
+ # Disable the domain
+ domain['enabled'] = False
+ self.resource_api.update_domain(domain['id'], domain)
+
+ # Delete the domain
+ self.resource_api.delete_domain(domain['id'])
+
+ # Make sure the domain no longer exists
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain,
+ domain['id'])
+
+ # Make sure the root project no longer exists
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ root_project['id'])
+
+ # Make sure the leaf project no longer exists
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ leaf_project['id'])
+
+ def test_delete_projects_from_ids(self):
+ """Tests the resource backend call delete_projects_from_ids.
+
+ Tests the normal flow of the delete_projects_from_ids backend call,
+ that ensures no project on the list exists after it is succesfully
+ called.
+ """
+ project1_ref = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project2_ref = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ projects = (project1_ref, project2_ref)
+ for project in projects:
+ self.resource_api.create_project(project['id'], project)
+
+ # Setting up the ID's list
+ projects_ids = [p['id'] for p in projects]
+ self.resource_api.driver.delete_projects_from_ids(projects_ids)
+
+ # Ensuring projects no longer exist at backend level
+ for project_id in projects_ids:
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.driver.get_project,
+ project_id)
+
+ # Passing an empty list is silently ignored
+ self.resource_api.driver.delete_projects_from_ids([])
+
+ def test_delete_projects_from_ids_with_no_existing_project_id(self):
+ """Tests delete_projects_from_ids issues warning if not found.
+
+ Tests the resource backend call delete_projects_from_ids passing a
+ non existing ID in project_ids, which is logged and ignored by
+ the backend.
+ """
+ project_ref = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project_ref['id'], project_ref)
+
+ # Setting up the ID's list
+ projects_ids = (project_ref['id'], uuid.uuid4().hex)
+ with mock.patch('keystone.resource.backends.sql.LOG') as mock_log:
+ self.resource_api.delete_projects_from_ids(projects_ids)
+ self.assertTrue(mock_log.warning.called)
+ # The existing project was deleted.
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.driver.get_project,
+ project_ref['id'])
+
+ # Even if we only have one project, and it does not exist, it returns
+ # no error.
+ self.resource_api.driver.delete_projects_from_ids([uuid.uuid4().hex])
+
+ def test_delete_project_cascade(self):
+ # create a hierarchy with 3 levels
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+ root_project = projects_hierarchy[0]
+ project1 = projects_hierarchy[1]
+ project2 = projects_hierarchy[2]
+
+ # Disabling all projects before attempting to delete
+ for project in (project2, project1, root_project):
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
+
+ self.resource_api.delete_project(root_project['id'], cascade=True)
+
+ for project in projects_hierarchy:
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project['id'])
+
+ def test_delete_large_project_cascade(self):
+ """Try delete a large project with cascade true.
+
+ Tree we will create::
+
+ +-p1-+
+ | |
+ p5 p2
+ | |
+ p6 +-p3-+
+ | |
+ p7 p4
+ """
+ # create a hierarchy with 4 levels
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=4)
+ p1 = projects_hierarchy[0]
+ # Add the left branch to the hierarchy (p5, p6)
+ self._create_projects_hierarchy(hierarchy_size=2,
+ parent_project_id=p1['id'])
+ # Add p7 to the hierarchy
+ p3_id = projects_hierarchy[2]['id']
+ self._create_projects_hierarchy(hierarchy_size=1,
+ parent_project_id=p3_id)
+ # Reverse the hierarchy to disable the leaf first
+ prjs_hierarchy = ([p1] + self.resource_api.list_projects_in_subtree(
+ p1['id']))[::-1]
+
+ # Disabling all projects before attempting to delete
+ for project in prjs_hierarchy:
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
+
+ self.resource_api.delete_project(p1['id'], cascade=True)
+ for project in prjs_hierarchy:
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project['id'])
+
+ def test_cannot_delete_project_cascade_with_enabled_child(self):
+ # create a hierarchy with 3 levels
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+ root_project = projects_hierarchy[0]
+ project1 = projects_hierarchy[1]
+ project2 = projects_hierarchy[2]
+
+ project2['enabled'] = False
+ self.resource_api.update_project(project2['id'], project2)
+
+ # Cannot cascade delete root_project, since project1 is enabled
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.delete_project,
+ root_project['id'],
+ cascade=True)
+
+ # Ensuring no project was deleted, not even project2
+ self.resource_api.get_project(root_project['id'])
+ self.resource_api.get_project(project1['id'])
+ self.resource_api.get_project(project2['id'])
+
+ def test_hierarchical_projects_crud(self):
+ # create a hierarchy with just a root project (which is a leaf as well)
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=1)
+ root_project1 = projects_hierarchy[0]
+
+ # create a hierarchy with one root project and one leaf project
+ projects_hierarchy = self._create_projects_hierarchy()
+ root_project2 = projects_hierarchy[0]
+ leaf_project = projects_hierarchy[1]
+
+ # update description from leaf_project
+ leaf_project['description'] = 'new description'
+ self.resource_api.update_project(leaf_project['id'], leaf_project)
+ proj_ref = self.resource_api.get_project(leaf_project['id'])
+ self.assertDictEqual(leaf_project, proj_ref)
+
+ # update the parent_id is not allowed
+ leaf_project['parent_id'] = root_project1['id']
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.update_project,
+ leaf_project['id'],
+ leaf_project)
+
+ # delete root_project1
+ self.resource_api.delete_project(root_project1['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ root_project1['id'])
+
+ # delete root_project2 is not allowed since it is not a leaf project
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.delete_project,
+ root_project2['id'])
+
+ def test_create_project_with_invalid_parent(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id, parent_id='fake')
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_create_leaf_project_with_different_domain(self):
+ root_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(root_project['id'], root_project)
+
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ leaf_project = unit.new_project_ref(domain_id=domain['id'],
+ parent_id=root_project['id'])
+
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ leaf_project['id'],
+ leaf_project)
+
+ def test_delete_hierarchical_leaf_project(self):
+ projects_hierarchy = self._create_projects_hierarchy()
+ root_project = projects_hierarchy[0]
+ leaf_project = projects_hierarchy[1]
+
+ self.resource_api.delete_project(leaf_project['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ leaf_project['id'])
+
+ self.resource_api.delete_project(root_project['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ root_project['id'])
+
+ def test_delete_hierarchical_not_leaf_project(self):
+ projects_hierarchy = self._create_projects_hierarchy()
+ root_project = projects_hierarchy[0]
+
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.delete_project,
+ root_project['id'])
+
+ def test_update_project_parent(self):
+ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3)
+ project1 = projects_hierarchy[0]
+ project2 = projects_hierarchy[1]
+ project3 = projects_hierarchy[2]
+
+ # project2 is the parent from project3
+ self.assertEqual(project3.get('parent_id'), project2['id'])
+
+ # try to update project3 parent to parent1
+ project3['parent_id'] = project1['id']
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.update_project,
+ project3['id'],
+ project3)
+
+ def test_create_project_under_disabled_one(self):
+ project1 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id, enabled=False)
+ self.resource_api.create_project(project1['id'], project1)
+
+ project2 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ parent_id=project1['id'])
+
+ # It's not possible to create a project under a disabled one in the
+ # hierarchy
+ self.assertRaises(exception.ValidationError,
+ self.resource_api.create_project,
+ project2['id'],
+ project2)
+
+ def test_disable_hierarchical_leaf_project(self):
+ projects_hierarchy = self._create_projects_hierarchy()
+ leaf_project = projects_hierarchy[1]
+
+ leaf_project['enabled'] = False
+ self.resource_api.update_project(leaf_project['id'], leaf_project)
+
+ project_ref = self.resource_api.get_project(leaf_project['id'])
+ self.assertEqual(leaf_project['enabled'], project_ref['enabled'])
+
+ def test_disable_hierarchical_not_leaf_project(self):
+ projects_hierarchy = self._create_projects_hierarchy()
+ root_project = projects_hierarchy[0]
+
+ root_project['enabled'] = False
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.update_project,
+ root_project['id'],
+ root_project)
+
+ def test_enable_project_with_disabled_parent(self):
+ projects_hierarchy = self._create_projects_hierarchy()
+ root_project = projects_hierarchy[0]
+ leaf_project = projects_hierarchy[1]
+
+ # Disable leaf and root
+ leaf_project['enabled'] = False
+ self.resource_api.update_project(leaf_project['id'], leaf_project)
+ root_project['enabled'] = False
+ self.resource_api.update_project(root_project['id'], root_project)
+
+ # Try to enable the leaf project, it's not possible since it has
+ # a disabled parent
+ leaf_project['enabled'] = True
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.update_project,
+ leaf_project['id'],
+ leaf_project)
+
+ def _get_hierarchy_depth(self, project_id):
+ return len(self.resource_api.list_project_parents(project_id)) + 1
+
+ def test_check_hierarchy_depth(self):
+ # Should be allowed to have a hierarchy of the max depth specified
+ # in the config option plus one (to allow for the additional project
+ # acting as a domain after an upgrade)
+ projects_hierarchy = self._create_projects_hierarchy(
+ CONF.max_project_tree_depth)
+ leaf_project = projects_hierarchy[CONF.max_project_tree_depth - 1]
+
+ depth = self._get_hierarchy_depth(leaf_project['id'])
+ self.assertEqual(CONF.max_project_tree_depth + 1, depth)
+
+ # Creating another project in the hierarchy shouldn't be allowed
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id,
+ parent_id=leaf_project['id'])
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.create_project,
+ project['id'],
+ project)
+
+ def test_project_update_missing_attrs_with_a_value(self):
+ # Creating a project with no description attribute.
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ del project['description']
+ project = self.resource_api.create_project(project['id'], project)
+
+ # Add a description attribute.
+ project['description'] = uuid.uuid4().hex
+ self.resource_api.update_project(project['id'], project)
+
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertDictEqual(project, project_ref)
+
+ def test_project_update_missing_attrs_with_a_falsey_value(self):
+ # Creating a project with no description attribute.
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ del project['description']
+ project = self.resource_api.create_project(project['id'], project)
+
+ # Add a description attribute.
+ project['description'] = ''
+ self.resource_api.update_project(project['id'], project)
+
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertDictEqual(project, project_ref)
+
+ def test_domain_crud(self):
+ domain = unit.new_domain_ref()
+ domain_ref = self.resource_api.create_domain(domain['id'], domain)
+ self.assertDictEqual(domain, domain_ref)
+ domain_ref = self.resource_api.get_domain(domain['id'])
+ self.assertDictEqual(domain, domain_ref)
+
+ domain['name'] = uuid.uuid4().hex
+ domain_ref = self.resource_api.update_domain(domain['id'], domain)
+ self.assertDictEqual(domain, domain_ref)
+ domain_ref = self.resource_api.get_domain(domain['id'])
+ self.assertDictEqual(domain, domain_ref)
+
+ # Ensure an 'enabled' domain cannot be deleted
+ self.assertRaises(exception.ForbiddenNotSecurity,
+ self.resource_api.delete_domain,
+ domain_id=domain['id'])
+
+ # Disable the domain
+ domain['enabled'] = False
+ self.resource_api.update_domain(domain['id'], domain)
+
+ # Delete the domain
+ self.resource_api.delete_domain(domain['id'])
+
+ # Make sure the domain no longer exists
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain,
+ domain['id'])
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_domain_name_case_sensitivity(self):
+ # create a ref with a lowercase name
+ domain_name = 'test_domain'
+ ref = unit.new_domain_ref(name=domain_name)
+
+ lower_case_domain = self.resource_api.create_domain(ref['id'], ref)
+
+ # assign a new ID to the ref with the same name, but in uppercase
+ ref['id'] = uuid.uuid4().hex
+ ref['name'] = domain_name.upper()
+ upper_case_domain = self.resource_api.create_domain(ref['id'], ref)
+
+ # We can get each domain by name
+ lower_case_domain_ref = self.resource_api.get_domain_by_name(
+ domain_name)
+ self.assertDictEqual(lower_case_domain, lower_case_domain_ref)
+
+ upper_case_domain_ref = self.resource_api.get_domain_by_name(
+ domain_name.upper())
+ self.assertDictEqual(upper_case_domain, upper_case_domain_ref)
+
+ def test_project_attribute_update(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(project['id'], project)
+
+ # pick a key known to be non-existent
+ key = 'description'
+
+ def assert_key_equals(value):
+ project_ref = self.resource_api.update_project(
+ project['id'], project)
+ self.assertEqual(value, project_ref[key])
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertEqual(value, project_ref[key])
+
+ def assert_get_key_is(value):
+ project_ref = self.resource_api.update_project(
+ project['id'], project)
+ self.assertIs(project_ref.get(key), value)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertIs(project_ref.get(key), value)
+
+ # add an attribute that doesn't exist, set it to a falsey value
+ value = ''
+ project[key] = value
+ assert_key_equals(value)
+
+ # set an attribute with a falsey value to null
+ value = None
+ project[key] = value
+ assert_get_key_is(value)
+
+ # do it again, in case updating from this situation is handled oddly
+ value = None
+ project[key] = value
+ assert_get_key_is(value)
+
+ # set a possibly-null value to a falsey value
+ value = ''
+ project[key] = value
+ assert_key_equals(value)
+
+ # set a falsey value to a truthy value
+ value = uuid.uuid4().hex
+ project[key] = value
+ assert_key_equals(value)
+
+ @unit.skip_if_cache_disabled('resource')
+ @unit.skip_if_no_multiple_domains_support
+ def test_domain_rename_invalidates_get_domain_by_name_cache(self):
+ domain = unit.new_domain_ref()
+ domain_id = domain['id']
+ domain_name = domain['name']
+ self.resource_api.create_domain(domain_id, domain)
+ domain_ref = self.resource_api.get_domain_by_name(domain_name)
+ domain_ref['name'] = uuid.uuid4().hex
+ self.resource_api.update_domain(domain_id, domain_ref)
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain_by_name,
+ domain_name)
+
+ @unit.skip_if_cache_disabled('resource')
+ def test_cache_layer_domain_crud(self):
+ domain = unit.new_domain_ref()
+ domain_id = domain['id']
+ # Create Domain
+ self.resource_api.create_domain(domain_id, domain)
+ project_domain_ref = self.resource_api.get_project(domain_id)
+ domain_ref = self.resource_api.get_domain(domain_id)
+ updated_project_domain_ref = copy.deepcopy(project_domain_ref)
+ updated_project_domain_ref['name'] = uuid.uuid4().hex
+ updated_domain_ref = copy.deepcopy(domain_ref)
+ updated_domain_ref['name'] = updated_project_domain_ref['name']
+ # Update domain, bypassing resource api manager
+ self.resource_api.driver.update_project(domain_id,
+ updated_project_domain_ref)
+ # Verify get_domain still returns the domain
+ self.assertDictContainsSubset(
+ domain_ref, self.resource_api.get_domain(domain_id))
+ # Invalidate cache
+ self.resource_api.get_domain.invalidate(self.resource_api,
+ domain_id)
+ # Verify get_domain returns the updated domain
+ self.assertDictContainsSubset(
+ updated_domain_ref, self.resource_api.get_domain(domain_id))
+ # Update the domain back to original ref, using the assignment api
+ # manager
+ self.resource_api.update_domain(domain_id, domain_ref)
+ self.assertDictContainsSubset(
+ domain_ref, self.resource_api.get_domain(domain_id))
+ # Make sure domain is 'disabled', bypass resource api manager
+ project_domain_ref_disabled = project_domain_ref.copy()
+ project_domain_ref_disabled['enabled'] = False
+ self.resource_api.driver.update_project(domain_id,
+ project_domain_ref_disabled)
+ self.resource_api.driver.update_project(domain_id, {'enabled': False})
+ # Delete domain, bypassing resource api manager
+ self.resource_api.driver.delete_project(domain_id)
+ # Verify get_domain still returns the domain
+ self.assertDictContainsSubset(
+ domain_ref, self.resource_api.get_domain(domain_id))
+ # Invalidate cache
+ self.resource_api.get_domain.invalidate(self.resource_api,
+ domain_id)
+ # Verify get_domain now raises DomainNotFound
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain, domain_id)
+ # Recreate Domain
+ self.resource_api.create_domain(domain_id, domain)
+ self.resource_api.get_domain(domain_id)
+ # Make sure domain is 'disabled', bypass resource api manager
+ domain['enabled'] = False
+ self.resource_api.driver.update_project(domain_id, domain)
+ self.resource_api.driver.update_project(domain_id, {'enabled': False})
+ # Delete domain
+ self.resource_api.delete_domain(domain_id)
+ # verify DomainNotFound raised
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain,
+ domain_id)
+
+ @unit.skip_if_cache_disabled('resource')
+ @unit.skip_if_no_multiple_domains_support
+ def test_project_rename_invalidates_get_project_by_name_cache(self):
+ domain = unit.new_domain_ref()
+ project = unit.new_project_ref(domain_id=domain['id'])
+ project_id = project['id']
+ project_name = project['name']
+ self.resource_api.create_domain(domain['id'], domain)
+ # Create a project
+ self.resource_api.create_project(project_id, project)
+ self.resource_api.get_project_by_name(project_name, domain['id'])
+ project['name'] = uuid.uuid4().hex
+ self.resource_api.update_project(project_id, project)
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project_by_name,
+ project_name,
+ domain['id'])
+
+ @unit.skip_if_cache_disabled('resource')
+ @unit.skip_if_no_multiple_domains_support
+ def test_cache_layer_project_crud(self):
+ domain = unit.new_domain_ref()
+ project = unit.new_project_ref(domain_id=domain['id'])
+ project_id = project['id']
+ self.resource_api.create_domain(domain['id'], domain)
+ # Create a project
+ self.resource_api.create_project(project_id, project)
+ self.resource_api.get_project(project_id)
+ updated_project = copy.deepcopy(project)
+ updated_project['name'] = uuid.uuid4().hex
+ # Update project, bypassing resource manager
+ self.resource_api.driver.update_project(project_id,
+ updated_project)
+ # Verify get_project still returns the original project_ref
+ self.assertDictContainsSubset(
+ project, self.resource_api.get_project(project_id))
+ # Invalidate cache
+ self.resource_api.get_project.invalidate(self.resource_api,
+ project_id)
+ # Verify get_project now returns the new project
+ self.assertDictContainsSubset(
+ updated_project,
+ self.resource_api.get_project(project_id))
+ # Update project using the resource_api manager back to original
+ self.resource_api.update_project(project['id'], project)
+ # Verify get_project returns the original project_ref
+ self.assertDictContainsSubset(
+ project, self.resource_api.get_project(project_id))
+ # Delete project bypassing resource
+ self.resource_api.driver.delete_project(project_id)
+ # Verify get_project still returns the project_ref
+ self.assertDictContainsSubset(
+ project, self.resource_api.get_project(project_id))
+ # Invalidate cache
+ self.resource_api.get_project.invalidate(self.resource_api,
+ project_id)
+ # Verify ProjectNotFound now raised
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project_id)
+ # recreate project
+ self.resource_api.create_project(project_id, project)
+ self.resource_api.get_project(project_id)
+ # delete project
+ self.resource_api.delete_project(project_id)
+ # Verify ProjectNotFound is raised
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project_id)
+
+ @unit.skip_if_no_multiple_domains_support
+ def test_get_default_domain_by_name(self):
+ domain_name = 'default'
+
+ domain = unit.new_domain_ref(name=domain_name)
+ self.resource_api.create_domain(domain['id'], domain)
+
+ domain_ref = self.resource_api.get_domain_by_name(domain_name)
+ self.assertEqual(domain, domain_ref)
+
+ def test_get_not_default_domain_by_name(self):
+ domain_name = 'foo'
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain_by_name,
+ domain_name)
+
+ def test_project_update_and_project_get_return_same_response(self):
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+
+ self.resource_api.create_project(project['id'], project)
+
+ updated_project = {'enabled': False}
+ updated_project_ref = self.resource_api.update_project(
+ project['id'], updated_project)
+
+ # SQL backend adds 'extra' field
+ updated_project_ref.pop('extra', None)
+
+ self.assertIs(False, updated_project_ref['enabled'])
+
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertDictEqual(updated_project_ref, project_ref)
+
+
+class ResourceDriverTests(object):
+ """Tests for the resource driver.
+
+ Subclasses must set self.driver to the driver instance.
+
+ """
+
+ def test_create_project(self):
+ project_id = uuid.uuid4().hex
+ project = {
+ 'name': uuid.uuid4().hex,
+ 'id': project_id,
+ 'domain_id': uuid.uuid4().hex,
+ }
+ self.driver.create_project(project_id, project)
+
+ def test_create_project_all_defined_properties(self):
+ project_id = uuid.uuid4().hex
+ project = {
+ 'name': uuid.uuid4().hex,
+ 'id': project_id,
+ 'domain_id': uuid.uuid4().hex,
+ 'description': uuid.uuid4().hex,
+ 'enabled': True,
+ 'parent_id': uuid.uuid4().hex,
+ 'is_domain': True,
+ }
+ self.driver.create_project(project_id, project)
+
+ def test_create_project_null_domain(self):
+ project_id = uuid.uuid4().hex
+ project = {
+ 'name': uuid.uuid4().hex,
+ 'id': project_id,
+ 'domain_id': None,
+ }
+ self.driver.create_project(project_id, project)
+
+ def test_create_project_same_name_same_domain_conflict(self):
+ name = uuid.uuid4().hex
+ domain_id = uuid.uuid4().hex
+
+ project_id = uuid.uuid4().hex
+ project = {
+ 'name': name,
+ 'id': project_id,
+ 'domain_id': domain_id,
+ }
+ self.driver.create_project(project_id, project)
+
+ project_id = uuid.uuid4().hex
+ project = {
+ 'name': name,
+ 'id': project_id,
+ 'domain_id': domain_id,
+ }
+ self.assertRaises(exception.Conflict, self.driver.create_project,
+ project_id, project)
+
+ def test_create_project_same_id_conflict(self):
+ project_id = uuid.uuid4().hex
+
+ project = {
+ 'name': uuid.uuid4().hex,
+ 'id': project_id,
+ 'domain_id': uuid.uuid4().hex,
+ }
+ self.driver.create_project(project_id, project)
+
+ project = {
+ 'name': uuid.uuid4().hex,
+ 'id': project_id,
+ 'domain_id': uuid.uuid4().hex,
+ }
+ self.assertRaises(exception.Conflict, self.driver.create_project,
+ project_id, project)
diff --git a/keystone-moon/keystone/tests/unit/resource/test_controllers.py b/keystone-moon/keystone/tests/unit/resource/test_controllers.py
new file mode 100644
index 00000000..b8f247c8
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/resource/test_controllers.py
@@ -0,0 +1,57 @@
+# Copyright 2016 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+
+from keystone import exception
+from keystone.resource import controllers
+from keystone.tests import unit
+from keystone.tests.unit.ksfixtures import database
+
+
+CONF = cfg.CONF
+
+_ADMIN_CONTEXT = {'is_admin': True, 'query_string': {}}
+
+
+class TenantTestCaseNoDefaultDomain(unit.TestCase):
+
+ def setUp(self):
+ super(TenantTestCaseNoDefaultDomain, self).setUp()
+ self.useFixture(database.Database())
+ self.load_backends()
+ self.tenant_controller = controllers.Tenant()
+
+ def test_setup(self):
+ # Other tests in this class assume there's no default domain, so make
+ # sure the setUp worked as expected.
+ self.assertRaises(
+ exception.DomainNotFound,
+ self.resource_api.get_domain, CONF.identity.default_domain_id)
+
+ def test_get_all_projects(self):
+ # When get_all_projects is done and there's no default domain, the
+ # result is an empty list.
+ res = self.tenant_controller.get_all_projects(_ADMIN_CONTEXT)
+ self.assertEqual([], res['tenants'])
+
+ def test_create_project(self):
+ # When a project is created using the v2 controller and there's no
+ # default domain, it doesn't fail with can't find domain (a default
+ # domain is created)
+ tenant = {'name': uuid.uuid4().hex}
+ self.tenant_controller.create_project(_ADMIN_CONTEXT, tenant)
+ # If the above doesn't fail then this is successful.
diff --git a/keystone-moon/keystone/tests/unit/resource/test_core.py b/keystone-moon/keystone/tests/unit/resource/test_core.py
new file mode 100644
index 00000000..2eb87e4c
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/resource/test_core.py
@@ -0,0 +1,692 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import mock
+from testtools import matchers
+
+from oslo_config import cfg
+from oslotest import mockpatch
+
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit.ksfixtures import database
+
+
+CONF = cfg.CONF
+
+
+class TestResourceManagerNoFixtures(unit.SQLDriverOverrides, unit.TestCase):
+
+ def setUp(self):
+ super(TestResourceManagerNoFixtures, self).setUp()
+ self.useFixture(database.Database(self.sql_driver_version_overrides))
+ self.load_backends()
+
+ def test_ensure_default_domain_exists(self):
+ # When there's no default domain, ensure_default_domain_exists creates
+ # it.
+
+ # First make sure there's no default domain.
+ self.assertRaises(
+ exception.DomainNotFound,
+ self.resource_api.get_domain, CONF.identity.default_domain_id)
+
+ self.resource_api.ensure_default_domain_exists()
+ default_domain = self.resource_api.get_domain(
+ CONF.identity.default_domain_id)
+
+ expected_domain = {
+ 'id': CONF.identity.default_domain_id,
+ 'name': 'Default',
+ 'enabled': True,
+ 'description': 'Domain created automatically to support V2.0 '
+ 'operations.',
+ }
+ self.assertEqual(expected_domain, default_domain)
+
+ def test_ensure_default_domain_exists_already_exists(self):
+ # When there's already a default domain, ensure_default_domain_exists
+ # doesn't do anything.
+
+ name = uuid.uuid4().hex
+ description = uuid.uuid4().hex
+ domain_attrs = {
+ 'id': CONF.identity.default_domain_id,
+ 'name': name,
+ 'description': description,
+ }
+ self.resource_api.create_domain(CONF.identity.default_domain_id,
+ domain_attrs)
+
+ self.resource_api.ensure_default_domain_exists()
+
+ default_domain = self.resource_api.get_domain(
+ CONF.identity.default_domain_id)
+
+ expected_domain = {
+ 'id': CONF.identity.default_domain_id,
+ 'name': name,
+ 'enabled': True,
+ 'description': description,
+ }
+
+ self.assertEqual(expected_domain, default_domain)
+
+ def test_ensure_default_domain_exists_fails(self):
+ # When there's an unexpected exception creating domain it's passed on.
+
+ self.useFixture(mockpatch.PatchObject(
+ self.resource_api, 'create_domain',
+ side_effect=exception.UnexpectedError))
+
+ self.assertRaises(exception.UnexpectedError,
+ self.resource_api.ensure_default_domain_exists)
+
+ def test_update_project_name_conflict(self):
+ name = uuid.uuid4().hex
+ description = uuid.uuid4().hex
+ domain_attrs = {
+ 'id': CONF.identity.default_domain_id,
+ 'name': name,
+ 'description': description,
+ }
+ domain = self.resource_api.create_domain(
+ CONF.identity.default_domain_id, domain_attrs)
+ project1 = unit.new_project_ref(domain_id=domain['id'],
+ name=uuid.uuid4().hex)
+ self.resource_api.create_project(project1['id'], project1)
+ project2 = unit.new_project_ref(domain_id=domain['id'],
+ name=uuid.uuid4().hex)
+ project = self.resource_api.create_project(project2['id'], project2)
+
+ self.assertRaises(exception.Conflict,
+ self.resource_api.update_project,
+ project['id'], {'name': project1['name']})
+
+
+class DomainConfigDriverTests(object):
+
+ def _domain_config_crud(self, sensitive):
+ domain = uuid.uuid4().hex
+ group = uuid.uuid4().hex
+ option = uuid.uuid4().hex
+ value = uuid.uuid4().hex
+ self.driver.create_config_option(
+ domain, group, option, value, sensitive)
+ res = self.driver.get_config_option(
+ domain, group, option, sensitive)
+ config = {'group': group, 'option': option, 'value': value}
+ self.assertEqual(config, res)
+
+ value = uuid.uuid4().hex
+ self.driver.update_config_option(
+ domain, group, option, value, sensitive)
+ res = self.driver.get_config_option(
+ domain, group, option, sensitive)
+ config = {'group': group, 'option': option, 'value': value}
+ self.assertEqual(config, res)
+
+ self.driver.delete_config_options(
+ domain, group, option, sensitive)
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.driver.get_config_option,
+ domain, group, option, sensitive)
+ # ...and silent if we try to delete it again
+ self.driver.delete_config_options(
+ domain, group, option, sensitive)
+
+ def test_whitelisted_domain_config_crud(self):
+ self._domain_config_crud(sensitive=False)
+
+ def test_sensitive_domain_config_crud(self):
+ self._domain_config_crud(sensitive=True)
+
+ def _list_domain_config(self, sensitive):
+ """Test listing by combination of domain, group & option."""
+ config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+ 'value': uuid.uuid4().hex}
+ # Put config2 in the same group as config1
+ config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
+ 'value': uuid.uuid4().hex}
+ config3 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+ 'value': 100}
+ domain = uuid.uuid4().hex
+
+ for config in [config1, config2, config3]:
+ self.driver.create_config_option(
+ domain, config['group'], config['option'],
+ config['value'], sensitive)
+
+ # Try listing all items from a domain
+ res = self.driver.list_config_options(
+ domain, sensitive=sensitive)
+ self.assertThat(res, matchers.HasLength(3))
+ for res_entry in res:
+ self.assertIn(res_entry, [config1, config2, config3])
+
+ # Try listing by domain and group
+ res = self.driver.list_config_options(
+ domain, group=config1['group'], sensitive=sensitive)
+ self.assertThat(res, matchers.HasLength(2))
+ for res_entry in res:
+ self.assertIn(res_entry, [config1, config2])
+
+ # Try listing by domain, group and option
+ res = self.driver.list_config_options(
+ domain, group=config2['group'],
+ option=config2['option'], sensitive=sensitive)
+ self.assertThat(res, matchers.HasLength(1))
+ self.assertEqual(config2, res[0])
+
+ def test_list_whitelisted_domain_config_crud(self):
+ self._list_domain_config(False)
+
+ def test_list_sensitive_domain_config_crud(self):
+ self._list_domain_config(True)
+
+ def _delete_domain_configs(self, sensitive):
+ """Test deleting by combination of domain, group & option."""
+ config1 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+ 'value': uuid.uuid4().hex}
+ # Put config2 and config3 in the same group as config1
+ config2 = {'group': config1['group'], 'option': uuid.uuid4().hex,
+ 'value': uuid.uuid4().hex}
+ config3 = {'group': config1['group'], 'option': uuid.uuid4().hex,
+ 'value': uuid.uuid4().hex}
+ config4 = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+ 'value': uuid.uuid4().hex}
+ domain = uuid.uuid4().hex
+
+ for config in [config1, config2, config3, config4]:
+ self.driver.create_config_option(
+ domain, config['group'], config['option'],
+ config['value'], sensitive)
+
+ # Try deleting by domain, group and option
+ res = self.driver.delete_config_options(
+ domain, group=config2['group'],
+ option=config2['option'], sensitive=sensitive)
+ res = self.driver.list_config_options(
+ domain, sensitive=sensitive)
+ self.assertThat(res, matchers.HasLength(3))
+ for res_entry in res:
+ self.assertIn(res_entry, [config1, config3, config4])
+
+ # Try deleting by domain and group
+ res = self.driver.delete_config_options(
+ domain, group=config4['group'], sensitive=sensitive)
+ res = self.driver.list_config_options(
+ domain, sensitive=sensitive)
+ self.assertThat(res, matchers.HasLength(2))
+ for res_entry in res:
+ self.assertIn(res_entry, [config1, config3])
+
+ # Try deleting all items from a domain
+ res = self.driver.delete_config_options(
+ domain, sensitive=sensitive)
+ res = self.driver.list_config_options(
+ domain, sensitive=sensitive)
+ self.assertThat(res, matchers.HasLength(0))
+
+ def test_delete_whitelisted_domain_configs(self):
+ self._delete_domain_configs(False)
+
+ def test_delete_sensitive_domain_configs(self):
+ self._delete_domain_configs(True)
+
+ def _create_domain_config_twice(self, sensitive):
+ """Test conflict error thrown if create the same option twice."""
+ config = {'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex,
+ 'value': uuid.uuid4().hex}
+ domain = uuid.uuid4().hex
+
+ self.driver.create_config_option(
+ domain, config['group'], config['option'],
+ config['value'], sensitive=sensitive)
+ self.assertRaises(exception.Conflict,
+ self.driver.create_config_option,
+ domain, config['group'], config['option'],
+ config['value'], sensitive=sensitive)
+
+ def test_create_whitelisted_domain_config_twice(self):
+ self._create_domain_config_twice(False)
+
+ def test_create_sensitive_domain_config_twice(self):
+ self._create_domain_config_twice(True)
+
+
+class DomainConfigTests(object):
+
+ def setUp(self):
+ self.domain = unit.new_domain_ref()
+ self.resource_api.create_domain(self.domain['id'], self.domain)
+ self.addCleanup(self.clean_up_domain)
+
+ def clean_up_domain(self):
+ # NOTE(henry-nash): Deleting the domain will also delete any domain
+ # configs for this domain.
+ self.domain['enabled'] = False
+ self.resource_api.update_domain(self.domain['id'], self.domain)
+ self.resource_api.delete_domain(self.domain['id'])
+ del self.domain
+
+ def test_create_domain_config_including_sensitive_option(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+
+ # password is sensitive, so check that the whitelisted portion and
+ # the sensitive piece have been stored in the appropriate locations.
+ res = self.domain_config_api.get_config(self.domain['id'])
+ config_whitelisted = copy.deepcopy(config)
+ config_whitelisted['ldap'].pop('password')
+ self.assertEqual(config_whitelisted, res)
+ res = self.domain_config_api.driver.get_config_option(
+ self.domain['id'], 'ldap', 'password', sensitive=True)
+ self.assertEqual(config['ldap']['password'], res['value'])
+
+ # Finally, use the non-public API to get back the whole config
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertEqual(config, res)
+
+ def test_get_partial_domain_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+
+ res = self.domain_config_api.get_config(self.domain['id'],
+ group='identity')
+ config_partial = copy.deepcopy(config)
+ config_partial.pop('ldap')
+ self.assertEqual(config_partial, res)
+ res = self.domain_config_api.get_config(
+ self.domain['id'], group='ldap', option='user_tree_dn')
+ self.assertEqual({'user_tree_dn': config['ldap']['user_tree_dn']}, res)
+ # ...but we should fail to get a sensitive option
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.get_config, self.domain['id'],
+ group='ldap', option='password')
+
+ def test_delete_partial_domain_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+
+ self.domain_config_api.delete_config(
+ self.domain['id'], group='identity')
+ config_partial = copy.deepcopy(config)
+ config_partial.pop('identity')
+ config_partial['ldap'].pop('password')
+ res = self.domain_config_api.get_config(self.domain['id'])
+ self.assertEqual(config_partial, res)
+
+ self.domain_config_api.delete_config(
+ self.domain['id'], group='ldap', option='url')
+ config_partial = copy.deepcopy(config_partial)
+ config_partial['ldap'].pop('url')
+ res = self.domain_config_api.get_config(self.domain['id'])
+ self.assertEqual(config_partial, res)
+
+ def test_get_options_not_in_domain_config(self):
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.get_config, self.domain['id'])
+ config = {'ldap': {'url': uuid.uuid4().hex}}
+
+ self.domain_config_api.create_config(self.domain['id'], config)
+
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.get_config, self.domain['id'],
+ group='identity')
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.get_config, self.domain['id'],
+ group='ldap', option='user_tree_dn')
+
+ def test_get_sensitive_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertEqual({}, res)
+ self.domain_config_api.create_config(self.domain['id'], config)
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertEqual(config, res)
+
+ def test_update_partial_domain_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+
+ # Try updating a group
+ new_config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_filter': uuid.uuid4().hex}}
+ res = self.domain_config_api.update_config(
+ self.domain['id'], new_config, group='ldap')
+ expected_config = copy.deepcopy(config)
+ expected_config['ldap']['url'] = new_config['ldap']['url']
+ expected_config['ldap']['user_filter'] = (
+ new_config['ldap']['user_filter'])
+ expected_full_config = copy.deepcopy(expected_config)
+ expected_config['ldap'].pop('password')
+ res = self.domain_config_api.get_config(self.domain['id'])
+ self.assertEqual(expected_config, res)
+ # The sensitive option should still exist
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertEqual(expected_full_config, res)
+
+ # Try updating a single whitelisted option
+ self.domain_config_api.delete_config(self.domain['id'])
+ self.domain_config_api.create_config(self.domain['id'], config)
+ new_config = {'url': uuid.uuid4().hex}
+ res = self.domain_config_api.update_config(
+ self.domain['id'], new_config, group='ldap', option='url')
+
+ # Make sure whitelisted and full config is updated
+ expected_whitelisted_config = copy.deepcopy(config)
+ expected_whitelisted_config['ldap']['url'] = new_config['url']
+ expected_full_config = copy.deepcopy(expected_whitelisted_config)
+ expected_whitelisted_config['ldap'].pop('password')
+ self.assertEqual(expected_whitelisted_config, res)
+ res = self.domain_config_api.get_config(self.domain['id'])
+ self.assertEqual(expected_whitelisted_config, res)
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertEqual(expected_full_config, res)
+
+ # Try updating a single sensitive option
+ self.domain_config_api.delete_config(self.domain['id'])
+ self.domain_config_api.create_config(self.domain['id'], config)
+ new_config = {'password': uuid.uuid4().hex}
+ res = self.domain_config_api.update_config(
+ self.domain['id'], new_config, group='ldap', option='password')
+ # The whitelisted config should not have changed...
+ expected_whitelisted_config = copy.deepcopy(config)
+ expected_full_config = copy.deepcopy(config)
+ expected_whitelisted_config['ldap'].pop('password')
+ self.assertEqual(expected_whitelisted_config, res)
+ res = self.domain_config_api.get_config(self.domain['id'])
+ self.assertEqual(expected_whitelisted_config, res)
+ expected_full_config['ldap']['password'] = new_config['password']
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ # ...but the sensitive piece should have.
+ self.assertEqual(expected_full_config, res)
+
+ def test_update_invalid_partial_domain_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ # An extra group, when specifying one group should fail
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.update_config,
+ self.domain['id'], config, group='ldap')
+ # An extra option, when specifying one option should fail
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.update_config,
+ self.domain['id'], config['ldap'],
+ group='ldap', option='url')
+
+ # Now try the right number of groups/options, but just not
+ # ones that are in the config provided
+ config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}}
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.update_config,
+ self.domain['id'], config, group='identity')
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.update_config,
+ self.domain['id'], config['ldap'], group='ldap',
+ option='url')
+
+ # Now some valid groups/options, but just not ones that are in the
+ # existing config
+ config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+ config_wrong_group = {'identity': {'driver': uuid.uuid4().hex}}
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.update_config,
+ self.domain['id'], config_wrong_group,
+ group='identity')
+ config_wrong_option = {'url': uuid.uuid4().hex}
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.update_config,
+ self.domain['id'], config_wrong_option,
+ group='ldap', option='url')
+
+ # And finally just some bad groups/options
+ bad_group = uuid.uuid4().hex
+ config = {bad_group: {'user': uuid.uuid4().hex}}
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.update_config,
+ self.domain['id'], config, group=bad_group,
+ option='user')
+ bad_option = uuid.uuid4().hex
+ config = {'ldap': {bad_option: uuid.uuid4().hex}}
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.update_config,
+ self.domain['id'], config, group='ldap',
+ option=bad_option)
+
+ def test_create_invalid_domain_config(self):
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.create_config,
+ self.domain['id'], {})
+ config = {uuid.uuid4().hex: uuid.uuid4().hex}
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.create_config,
+ self.domain['id'], config)
+ config = {uuid.uuid4().hex: {uuid.uuid4().hex: uuid.uuid4().hex}}
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.create_config,
+ self.domain['id'], config)
+ config = {'ldap': {uuid.uuid4().hex: uuid.uuid4().hex}}
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.create_config,
+ self.domain['id'], config)
+ # Try an option that IS in the standard conf, but neither whitelisted
+ # or marked as sensitive
+ config = {'identity': {'user_tree_dn': uuid.uuid4().hex}}
+ self.assertRaises(exception.InvalidDomainConfig,
+ self.domain_config_api.create_config,
+ self.domain['id'], config)
+
+ def test_delete_invalid_partial_domain_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+ # Try deleting a group not in the config
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.delete_config,
+ self.domain['id'], group='identity')
+ # Try deleting an option not in the config
+ self.assertRaises(exception.DomainConfigNotFound,
+ self.domain_config_api.delete_config,
+ self.domain['id'],
+ group='ldap', option='user_tree_dn')
+
+ def test_sensitive_substitution_in_domain_config(self):
+ # Create a config that contains a whitelisted option that requires
+ # substitution of a sensitive option.
+ config = {'ldap': {'url': 'my_url/%(password)s',
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+
+ # Read back the config with the internal method and ensure that the
+ # substitution has taken place.
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ expected_url = (
+ config['ldap']['url'] % {'password': config['ldap']['password']})
+ self.assertEqual(expected_url, res['ldap']['url'])
+
+ def test_invalid_sensitive_substitution_in_domain_config(self):
+ """Check that invalid substitutions raise warnings."""
+ mock_log = mock.Mock()
+
+ invalid_option_config = {
+ 'ldap': {'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+
+ for invalid_option in ['my_url/%(passssword)s',
+ 'my_url/%(password',
+ 'my_url/%(password)',
+ 'my_url/%(password)d']:
+ invalid_option_config['ldap']['url'] = invalid_option
+ self.domain_config_api.create_config(
+ self.domain['id'], invalid_option_config)
+
+ with mock.patch('keystone.resource.core.LOG', mock_log):
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ mock_log.warning.assert_any_call(mock.ANY)
+ self.assertEqual(
+ invalid_option_config['ldap']['url'], res['ldap']['url'])
+
+ def test_escaped_sequence_in_domain_config(self):
+ """Check that escaped '%(' doesn't get interpreted."""
+ mock_log = mock.Mock()
+
+ escaped_option_config = {
+ 'ldap': {'url': 'my_url/%%(password)s',
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+
+ self.domain_config_api.create_config(
+ self.domain['id'], escaped_option_config)
+
+ with mock.patch('keystone.resource.core.LOG', mock_log):
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertFalse(mock_log.warn.called)
+ # The escaping '%' should have been removed
+ self.assertEqual('my_url/%(password)s', res['ldap']['url'])
+
+ @unit.skip_if_cache_disabled('domain_config')
+ def test_cache_layer_get_sensitive_config(self):
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+ # cache the result
+ res = self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id'])
+ self.assertEqual(config, res)
+
+ # delete, bypassing domain config manager api
+ self.domain_config_api.delete_config_options(self.domain['id'])
+ self.domain_config_api.delete_config_options(self.domain['id'],
+ sensitive=True)
+
+ self.assertDictEqual(
+ res, self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id']))
+ self.domain_config_api.get_config_with_sensitive_info.invalidate(
+ self.domain_config_api, self.domain['id'])
+ self.assertDictEqual(
+ {},
+ self.domain_config_api.get_config_with_sensitive_info(
+ self.domain['id']))
+
+ def test_delete_domain_deletes_configs(self):
+ """Test domain deletion clears the domain configs."""
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_tree_dn': uuid.uuid4().hex,
+ 'password': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(domain['id'], config)
+
+ # Now delete the domain
+ domain['enabled'] = False
+ self.resource_api.update_domain(domain['id'], domain)
+ self.resource_api.delete_domain(domain['id'])
+
+ # Check domain configs have also been deleted
+ self.assertRaises(
+ exception.DomainConfigNotFound,
+ self.domain_config_api.get_config,
+ domain['id'])
+
+ # The get_config_with_sensitive_info does not throw an exception if
+ # the config is empty, it just returns an empty dict
+ self.assertDictEqual(
+ {},
+ self.domain_config_api.get_config_with_sensitive_info(
+ domain['id']))
+
+ def test_config_registration(self):
+ type = uuid.uuid4().hex
+ self.domain_config_api.obtain_registration(
+ self.domain['id'], type)
+ self.domain_config_api.release_registration(
+ self.domain['id'], type=type)
+
+ # Make sure that once someone has it, nobody else can get it.
+ # This includes the domain who already has it.
+ self.domain_config_api.obtain_registration(
+ self.domain['id'], type)
+ self.assertFalse(
+ self.domain_config_api.obtain_registration(
+ self.domain['id'], type))
+
+ # Make sure we can read who does have it
+ self.assertEqual(
+ self.domain['id'],
+ self.domain_config_api.read_registration(type))
+
+ # Make sure releasing it is silent if the domain specified doesn't
+ # have the registration
+ domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ self.resource_api.create_domain(domain2['id'], domain2)
+ self.domain_config_api.release_registration(
+ domain2['id'], type=type)
+
+ # If nobody has the type registered, then trying to read it should
+ # raise ConfigRegistrationNotFound
+ self.domain_config_api.release_registration(
+ self.domain['id'], type=type)
+ self.assertRaises(exception.ConfigRegistrationNotFound,
+ self.domain_config_api.read_registration,
+ type)
+
+ # Finally check multiple registrations are cleared if you free the
+ # registration without specifying the type
+ type2 = uuid.uuid4().hex
+ self.domain_config_api.obtain_registration(
+ self.domain['id'], type)
+ self.domain_config_api.obtain_registration(
+ self.domain['id'], type2)
+ self.domain_config_api.release_registration(self.domain['id'])
+ self.assertRaises(exception.ConfigRegistrationNotFound,
+ self.domain_config_api.read_registration,
+ type)
+ self.assertRaises(exception.ConfigRegistrationNotFound,
+ self.domain_config_api.read_registration,
+ type2)
diff --git a/keystone-moon/keystone/tests/unit/rest.py b/keystone-moon/keystone/tests/unit/rest.py
index 35b47e2b..512c301d 100644
--- a/keystone-moon/keystone/tests/unit/rest.py
+++ b/keystone-moon/keystone/tests/unit/rest.py
@@ -61,7 +61,7 @@ class RestfulTestCase(unit.TestCase):
# Will need to reset the plug-ins
self.addCleanup(setattr, auth_controllers, 'AUTH_METHODS', {})
- self.useFixture(database.Database())
+ self.useFixture(database.Database(self.sql_driver_version_overrides))
self.load_backends()
self.load_fixtures(default_fixtures)
@@ -114,11 +114,10 @@ class RestfulTestCase(unit.TestCase):
example::
- self.assertResponseStatus(response, 204)
+ self.assertResponseStatus(response, http_client.NO_CONTENT)
"""
self.assertEqual(
- response.status_code,
- expected_status,
+ expected_status, response.status_code,
'Status code %s is not %s, as expected\n\n%s' %
(response.status_code, expected_status, response.body))
@@ -133,9 +132,9 @@ class RestfulTestCase(unit.TestCase):
Subclasses can override this function based on the expected response.
"""
- self.assertEqual(response.status_code, expected_status)
+ self.assertEqual(expected_status, response.status_code)
error = response.result['error']
- self.assertEqual(error['code'], response.status_code)
+ self.assertEqual(response.status_code, error['code'])
self.assertIsNotNone(error.get('title'))
def _to_content_type(self, body, headers, content_type=None):
@@ -146,7 +145,11 @@ class RestfulTestCase(unit.TestCase):
headers['Accept'] = 'application/json'
if body:
headers['Content-Type'] = 'application/json'
- return jsonutils.dumps(body)
+ # NOTE(davechen):dump the body to bytes since WSGI requires
+ # the body of the response to be `Bytestrings`.
+ # see pep-3333:
+ # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types
+ return jsonutils.dump_as_bytes(body)
def _from_content_type(self, response, content_type=None):
"""Attempt to decode JSON and XML automatically, if detected."""
@@ -213,6 +216,17 @@ class RestfulTestCase(unit.TestCase):
r = self.public_request(method='POST', path='/v2.0/tokens', body=body)
return self._get_token_id(r)
+ def get_admin_token(self):
+ return self._get_token({
+ 'auth': {
+ 'passwordCredentials': {
+ 'username': self.user_reqadmin['name'],
+ 'password': self.user_reqadmin['password']
+ },
+ 'tenantId': default_fixtures.SERVICE_TENANT_ID
+ }
+ })
+
def get_unscoped_token(self):
"""Convenience method so that we can test authenticated requests."""
return self._get_token({
diff --git a/keystone-moon/keystone/tests/unit/schema/__init__.py b/keystone-moon/keystone/tests/unit/schema/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/schema/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/schema/v2.py b/keystone-moon/keystone/tests/unit/schema/v2.py
new file mode 100644
index 00000000..ed260a00
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/schema/v2.py
@@ -0,0 +1,161 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import copy
+
+from keystone.common import validation
+from keystone.common.validation import parameter_types
+from keystone.common.validation import validators
+
+
+_project_properties = {
+ 'id': parameter_types.id_string,
+ 'name': parameter_types.name,
+ 'enabled': parameter_types.boolean,
+ 'description': validation.nullable(parameter_types.description),
+}
+
+_token_properties = {
+ 'audit_ids': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ 'minItems': 1,
+ 'maxItems': 2,
+ },
+ 'id': {'type': 'string'},
+ 'expires': {'type': 'string'},
+ 'issued_at': {'type': 'string'},
+ 'tenant': {
+ 'type': 'object',
+ 'properties': _project_properties,
+ 'required': ['id', 'name', 'enabled'],
+ 'additionalProperties': False,
+ },
+}
+
+_role_properties = {
+ 'name': parameter_types.name,
+}
+
+_user_properties = {
+ 'id': parameter_types.id_string,
+ 'name': parameter_types.name,
+ 'username': parameter_types.name,
+ 'roles': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': _role_properties,
+ 'required': ['name'],
+ 'additionalProperties': False,
+ },
+ },
+ 'roles_links': {
+ 'type': 'array',
+ 'maxItems': 0,
+ },
+}
+
+_metadata_properties = {
+ 'is_admin': {'type': 'integer'},
+ 'roles': {
+ 'type': 'array',
+ 'items': {'type': 'string'},
+ },
+}
+
+_endpoint_properties = {
+ 'id': {'type': 'string'},
+ 'adminURL': parameter_types.url,
+ 'internalURL': parameter_types.url,
+ 'publicURL': parameter_types.url,
+ 'region': {'type': 'string'},
+}
+
+_service_properties = {
+ 'type': {'type': 'string'},
+ 'name': parameter_types.name,
+ 'endpoints_links': {
+ 'type': 'array',
+ 'maxItems': 0,
+ },
+ 'endpoints': {
+ 'type': 'array',
+ 'minItems': 1,
+ 'items': {
+ 'type': 'object',
+ 'properties': _endpoint_properties,
+ 'required': ['id', 'publicURL'],
+ 'additionalProperties': False,
+ },
+ },
+}
+
+_base_access_properties = {
+ 'metadata': {
+ 'type': 'object',
+ 'properties': _metadata_properties,
+ 'required': ['is_admin', 'roles'],
+ 'additionalProperties': False,
+ },
+ 'serviceCatalog': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': _service_properties,
+ 'required': ['name', 'type', 'endpoints_links', 'endpoints'],
+ 'additionalProperties': False,
+ },
+ },
+ 'token': {
+ 'type': 'object',
+ 'properties': _token_properties,
+ 'required': ['audit_ids', 'id', 'expires', 'issued_at'],
+ 'additionalProperties': False,
+ },
+ 'user': {
+ 'type': 'object',
+ 'properties': _user_properties,
+ 'required': ['id', 'name', 'username', 'roles', 'roles_links'],
+ 'additionalProperties': False,
+ },
+}
+
+_unscoped_access_properties = copy.deepcopy(_base_access_properties)
+unscoped_metadata = _unscoped_access_properties['metadata']
+unscoped_metadata['properties']['roles']['maxItems'] = 0
+_unscoped_access_properties['user']['properties']['roles']['maxItems'] = 0
+_unscoped_access_properties['serviceCatalog']['maxItems'] = 0
+
+_scoped_access_properties = copy.deepcopy(_base_access_properties)
+_scoped_access_properties['metadata']['properties']['roles']['minItems'] = 1
+_scoped_access_properties['serviceCatalog']['minItems'] = 1
+_scoped_access_properties['user']['properties']['roles']['minItems'] = 1
+
+base_token_schema = {
+ 'type': 'object',
+ 'required': ['metadata', 'user', 'serviceCatalog', 'token'],
+ 'additionalProperties': False,
+}
+
+unscoped_token_schema = copy.deepcopy(base_token_schema)
+unscoped_token_schema['properties'] = _unscoped_access_properties
+
+scoped_token_schema = copy.deepcopy(base_token_schema)
+scoped_token_schema['properties'] = _scoped_access_properties
+
+# Validator objects
+unscoped_validator = validators.SchemaValidator(unscoped_token_schema)
+scoped_validator = validators.SchemaValidator(scoped_token_schema)
diff --git a/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py b/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py
index 24fc82dd..79065863 100644
--- a/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py
+++ b/keystone-moon/keystone/tests/unit/test_associate_project_endpoint_extension.py
@@ -15,24 +15,25 @@
import copy
import uuid
+import mock
+from oslo_log import versionutils
from six.moves import http_client
from testtools import matchers
+from keystone.contrib.endpoint_filter import routers
+from keystone.tests import unit
from keystone.tests.unit import test_v3
-class TestExtensionCase(test_v3.RestfulTestCase):
-
- EXTENSION_NAME = 'endpoint_filter'
- EXTENSION_TO_ADD = 'endpoint_filter_extension'
+class EndpointFilterTestCase(test_v3.RestfulTestCase):
def config_overrides(self):
- super(TestExtensionCase, self).config_overrides()
+ super(EndpointFilterTestCase, self).config_overrides()
self.config_fixture.config(
group='catalog', driver='endpoint_filter.sql')
def setUp(self):
- super(TestExtensionCase, self).setUp()
+ super(EndpointFilterTestCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
@@ -40,7 +41,17 @@ class TestExtensionCase(test_v3.RestfulTestCase):
'endpoint_id': self.endpoint_id})
-class EndpointFilterCRUDTestCase(TestExtensionCase):
+class EndpointFilterDeprecateTestCase(test_v3.RestfulTestCase):
+
+ @mock.patch.object(versionutils, 'report_deprecated_feature')
+ def test_exception_happens(self, mock_deprecator):
+ routers.EndpointFilterExtension(mock.ANY)
+ mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("Remove endpoint_filter_extension from", args[1])
+
+
+class EndpointFilterCRUDTestCase(EndpointFilterTestCase):
def test_create_endpoint_project_association(self):
"""PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
@@ -48,8 +59,7 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
Valid endpoint and project id test case.
"""
- self.put(self.default_request_url,
- expected_status=204)
+ self.put(self.default_request_url)
def test_create_endpoint_project_association_with_invalid_project(self):
"""PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
@@ -82,8 +92,7 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
"""
self.put(self.default_request_url,
- body={'project_id': self.default_domain_project_id},
- expected_status=204)
+ body={'project_id': self.default_domain_project_id})
def test_check_endpoint_project_association(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
@@ -91,13 +100,11 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
Valid project and endpoint id test case.
"""
- self.put(self.default_request_url,
- expected_status=204)
+ self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
def test_check_endpoint_project_association_with_invalid_project(self):
"""HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
@@ -169,8 +176,7 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
"""
r = self.get('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
- {'endpoint_id': self.endpoint_id},
- expected_status=200)
+ {'endpoint_id': self.endpoint_id})
self.assertValidProjectListResponse(r, expected_length=0)
def test_list_projects_associated_with_invalid_endpoint(self):
@@ -193,8 +199,7 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.default_domain_project_id,
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
def test_remove_endpoint_project_association_with_invalid_project(self):
"""DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
@@ -226,35 +231,167 @@ class EndpointFilterCRUDTestCase(TestExtensionCase):
self.put(self.default_request_url)
association_url = ('/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' %
{'endpoint_id': self.endpoint_id})
- r = self.get(association_url, expected_status=200)
+ r = self.get(association_url)
self.assertValidProjectListResponse(r, expected_length=1)
self.delete('/projects/%(project_id)s' % {
'project_id': self.default_domain_project_id})
- r = self.get(association_url, expected_status=200)
+ r = self.get(association_url)
self.assertValidProjectListResponse(r, expected_length=0)
def test_endpoint_project_association_cleanup_when_endpoint_deleted(self):
self.put(self.default_request_url)
association_url = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' % {
'project_id': self.default_domain_project_id}
- r = self.get(association_url, expected_status=200)
+ r = self.get(association_url)
self.assertValidEndpointListResponse(r, expected_length=1)
self.delete('/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id})
- r = self.get(association_url, expected_status=200)
+ r = self.get(association_url)
self.assertValidEndpointListResponse(r, expected_length=0)
+ @unit.skip_if_cache_disabled('catalog')
+ def test_create_endpoint_project_association_invalidates_cache(self):
+ # NOTE(davechen): create another endpoint which will be added to
+ # default project, this should be done at first since
+ # `create_endpoint` will also invalidate cache.
+ endpoint_id2 = uuid.uuid4().hex
+ endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
+ region_id=self.region_id,
+ interface='public',
+ id=endpoint_id2)
+ self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
+
+ # create endpoint project association.
+ self.put(self.default_request_url)
-class EndpointFilterTokenRequestTestCase(TestExtensionCase):
+ # should get back only one endpoint that was just created.
+ user_id = uuid.uuid4().hex
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ # there is only one endpoints associated with the default project.
+ self.assertEqual(1, len(catalog[0]['endpoints']))
+ self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id'])
+
+ # add the second endpoint to default project, bypassing
+ # catalog_api API manager.
+ self.catalog_api.driver.add_endpoint_to_project(
+ endpoint_id2,
+ self.default_domain_project_id)
+
+ # but, we can just get back one endpoint from the cache, since the
+ # catalog is pulled out from cache and its haven't been invalidated.
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertEqual(1, len(catalog[0]['endpoints']))
+
+ # remove the endpoint2 from the default project, and add it again via
+ # catalog_api API manager.
+ self.catalog_api.driver.remove_endpoint_from_project(
+ endpoint_id2,
+ self.default_domain_project_id)
+
+ # add second endpoint to default project, this can be done by calling
+ # the catalog_api API manager directly but call the REST API
+ # instead for consistency.
+ self.put('/OS-EP-FILTER/projects/%(project_id)s'
+ '/endpoints/%(endpoint_id)s' % {
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': endpoint_id2})
+
+ # should get back two endpoints since the cache has been
+ # invalidated when the second endpoint was added to default project.
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertEqual(2, len(catalog[0]['endpoints']))
+
+ ep_id_list = [catalog[0]['endpoints'][0]['id'],
+ catalog[0]['endpoints'][1]['id']]
+ self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+
+ @unit.skip_if_cache_disabled('catalog')
+ def test_remove_endpoint_from_project_invalidates_cache(self):
+ endpoint_id2 = uuid.uuid4().hex
+ endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
+ region_id=self.region_id,
+ interface='public',
+ id=endpoint_id2)
+ self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
+ # create endpoint project association.
+ self.put(self.default_request_url)
+
+ # add second endpoint to default project.
+ self.put('/OS-EP-FILTER/projects/%(project_id)s'
+ '/endpoints/%(endpoint_id)s' % {
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': endpoint_id2})
+
+ # should get back only one endpoint that was just created.
+ user_id = uuid.uuid4().hex
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ # there are two endpoints associated with the default project.
+ ep_id_list = [catalog[0]['endpoints'][0]['id'],
+ catalog[0]['endpoints'][1]['id']]
+ self.assertEqual(2, len(catalog[0]['endpoints']))
+ self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+
+ # remove the endpoint2 from the default project, bypassing
+ # catalog_api API manager.
+ self.catalog_api.driver.remove_endpoint_from_project(
+ endpoint_id2,
+ self.default_domain_project_id)
+
+ # but, we can just still get back two endpoints from the cache,
+ # since the catalog is pulled out from cache and its haven't
+ # been invalidated.
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertEqual(2, len(catalog[0]['endpoints']))
+
+ # add back the endpoint2 to the default project, and remove it by
+ # catalog_api API manage.
+ self.catalog_api.driver.add_endpoint_to_project(
+ endpoint_id2,
+ self.default_domain_project_id)
+
+ # remove the endpoint2 from the default project, this can be done
+ # by calling the catalog_api API manager directly but call
+ # the REST API instead for consistency.
+ self.delete('/OS-EP-FILTER/projects/%(project_id)s'
+ '/endpoints/%(endpoint_id)s' % {
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': endpoint_id2})
+
+ # should only get back one endpoint since the cache has been
+ # invalidated after the endpoint project association was removed.
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertEqual(1, len(catalog[0]['endpoints']))
+ self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id'])
+
+
+class EndpointFilterTokenRequestTestCase(EndpointFilterTestCase):
def test_project_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from project scoped token filtered."""
# create a project to work with
- ref = self.new_project_ref(domain_id=self.domain_id)
+ ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
@@ -276,8 +413,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': project['id'],
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
# attempt to authenticate without requesting a project
auth_data = self.build_authentication_request(
@@ -289,7 +425,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
- self.assertEqual(r.result['token']['project']['id'], project['id'])
+ self.assertEqual(project['id'], r.result['token']['project']['id'])
def test_default_scoped_token_using_endpoint_filter(self):
"""Verify endpoints from default scoped token filtered."""
@@ -297,8 +433,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
auth_data = self.build_authentication_request(
user_id=self.user['id'],
@@ -310,16 +445,24 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
- self.assertEqual(r.result['token']['project']['id'],
- self.project['id'])
+ self.assertEqual(self.project['id'],
+ r.result['token']['project']['id'])
+
+ # Ensure name of the service exists
+ self.assertIn('name', r.result['token']['catalog'][0])
+
+ # region and region_id should be the same in endpoints
+ endpoint = r.result['token']['catalog'][0]['endpoints'][0]
+ self.assertIn('region', endpoint)
+ self.assertIn('region_id', endpoint)
+ self.assertEqual(endpoint['region'], endpoint['region_id'])
def test_scoped_token_with_no_catalog_using_endpoint_filter(self):
"""Verify endpoint filter does not affect no catalog."""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
auth_data = self.build_authentication_request(
user_id=self.user['id'],
@@ -329,8 +472,8 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.assertValidProjectScopedTokenResponse(
r,
require_catalog=False)
- self.assertEqual(r.result['token']['project']['id'],
- self.project['id'])
+ self.assertEqual(self.project['id'],
+ r.result['token']['project']['id'])
def test_invalid_endpoint_project_association(self):
"""Verify an invalid endpoint-project association is handled."""
@@ -338,28 +481,26 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
# create a second temporary endpoint
- self.endpoint_id2 = uuid.uuid4().hex
- self.endpoint2 = self.new_endpoint_ref(service_id=self.service_id)
- self.endpoint2['id'] = self.endpoint_id2
- self.catalog_api.create_endpoint(
- self.endpoint_id2,
- self.endpoint2.copy())
+ endpoint_id2 = uuid.uuid4().hex
+ endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
+ region_id=self.region_id,
+ interface='public',
+ id=endpoint_id2)
+ self.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy())
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id2},
- expected_status=204)
+ 'endpoint_id': endpoint_id2})
# remove the temporary reference
# this will create inconsistency in the endpoint filter table
# which is fixed during the catalog creation for token request
- self.catalog_api.delete_endpoint(self.endpoint_id2)
+ self.catalog_api.delete_endpoint(endpoint_id2)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
@@ -371,8 +512,8 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
require_catalog=True,
endpoint_filter=True,
ep_filter_assoc=1)
- self.assertEqual(r.result['token']['project']['id'],
- self.project['id'])
+ self.assertEqual(self.project['id'],
+ r.result['token']['project']['id'])
def test_disabled_endpoint(self):
"""Test that a disabled endpoint is handled."""
@@ -380,8 +521,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
# Add a disabled endpoint to the default project.
@@ -399,8 +539,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': disabled_endpoint_id},
- expected_status=204)
+ 'endpoint_id': disabled_endpoint_id})
# Authenticate to get token with catalog
auth_data = self.build_authentication_request(
@@ -416,7 +555,9 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
def test_multiple_endpoint_project_associations(self):
def _create_an_endpoint():
- endpoint_ref = self.new_endpoint_ref(service_id=self.service_id)
+ endpoint_ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
r = self.post('/endpoints', body={'endpoint': endpoint_ref})
return r.result['endpoint']['id']
@@ -429,13 +570,11 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': endpoint_id1},
- expected_status=204)
+ 'endpoint_id': endpoint_id1})
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': endpoint_id2},
- expected_status=204)
+ 'endpoint_id': endpoint_id2})
# there should be only two endpoints in token catalog
auth_data = self.build_authentication_request(
@@ -454,8 +593,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
- expected_status=204)
+ 'endpoint_id': self.endpoint_id})
auth_data = self.build_authentication_request(
user_id=self.user['id'],
@@ -474,7 +612,7 @@ class EndpointFilterTokenRequestTestCase(TestExtensionCase):
auth_catalog.result['catalog'])
-class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
+class JsonHomeTests(EndpointFilterTestCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/'
'1.0/rel/endpoint_projects': {
@@ -545,7 +683,7 @@ class JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):
}
-class EndpointGroupCRUDTestCase(TestExtensionCase):
+class EndpointGroupCRUDTestCase(EndpointFilterTestCase):
DEFAULT_ENDPOINT_GROUP_BODY = {
'endpoint_group': {
@@ -638,7 +776,7 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
url = '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {
'endpoint_group_id': endpoint_group_id}
- self.head(url, expected_status=200)
+ self.head(url, expected_status=http_client.OK)
def test_check_invalid_endpoint_group(self):
"""HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
@@ -832,7 +970,7 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
self.project_id)
url = self._get_project_endpoint_group_url(
endpoint_group_id, self.project_id)
- self.head(url, expected_status=200)
+ self.head(url, expected_status=http_client.OK)
def test_check_endpoint_group_to_project_with_invalid_project_id(self):
"""Test HEAD with an invalid endpoint group and project association."""
@@ -891,7 +1029,7 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
"""
# create a service
- service_ref = self.new_service_ref()
+ service_ref = unit.new_service_ref()
response = self.post(
'/services',
body={'service': service_ref})
@@ -899,10 +1037,10 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
service_id = response.result['service']['id']
# create an endpoint
- endpoint_ref = self.new_endpoint_ref(service_id=service_id)
- response = self.post(
- '/endpoints',
- body={'endpoint': endpoint_ref})
+ endpoint_ref = unit.new_endpoint_ref(service_id=service_id,
+ interface='public',
+ region_id=self.region_id)
+ response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint_id = response.result['endpoint']['id']
# create an endpoint group
@@ -929,7 +1067,7 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
"""
# create a temporary service
- service_ref = self.new_service_ref()
+ service_ref = unit.new_service_ref()
response = self.post('/services', body={'service': service_ref})
service_id2 = response.result['service']['id']
@@ -957,7 +1095,16 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
'project_id': self.default_domain_project_id}
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
- self.assertEqual(len(endpoints), 2)
+ self.assertEqual(2, len(endpoints))
+
+ # Ensure catalog includes the endpoints from endpoint_group project
+ # association, this is needed when a project scoped token is issued
+ # and "endpoint_filter.sql" backend driver is in place.
+ user_id = uuid.uuid4().hex
+ catalog_list = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+ self.assertEqual(2, len(catalog_list))
# Now remove project endpoint group association
url = self._get_project_endpoint_group_url(
@@ -971,7 +1118,12 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
r = self.get(endpoints_url)
endpoints = self.assertValidEndpointListResponse(r)
- self.assertEqual(len(endpoints), 1)
+ self.assertEqual(1, len(endpoints))
+
+ catalog_list = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+ self.assertEqual(1, len(catalog_list))
def test_endpoint_group_project_cleanup_with_project(self):
# create endpoint group
@@ -979,7 +1131,7 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
@@ -1001,7 +1153,7 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
# create new project and associate with endpoint_group
- project_ref = self.new_project_ref(domain_id=self.domain_id)
+ project_ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': project_ref})
project = self.assertValidProjectResponse(r, project_ref)
url = self._get_project_endpoint_group_url(endpoint_group_id,
@@ -1049,6 +1201,153 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
self.get(project_endpoint_group_url,
expected_status=http_client.NOT_FOUND)
+ @unit.skip_if_cache_disabled('catalog')
+ def test_add_endpoint_group_to_project_invalidates_catalog_cache(self):
+ # create another endpoint with 'admin' interface which matches
+ # 'filters' definition in endpoint group, then there should be two
+ # endpoints returned when retrieving v3 catalog if cache works as
+ # expected.
+ # this should be done at first since `create_endpoint` will also
+ # invalidate cache.
+ endpoint_id2 = uuid.uuid4().hex
+ endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
+ region_id=self.region_id,
+ interface='admin',
+ id=endpoint_id2)
+ self.catalog_api.create_endpoint(endpoint_id2, endpoint2)
+
+ # create a project and endpoint association.
+ self.put(self.default_request_url)
+
+ # there is only one endpoint associated with the default project.
+ user_id = uuid.uuid4().hex
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1))
+
+ # create an endpoint group.
+ endpoint_group_id = self._create_valid_endpoint_group(
+ self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+ # add the endpoint group to default project, bypassing
+ # catalog_api API manager.
+ self.catalog_api.driver.add_endpoint_group_to_project(
+ endpoint_group_id,
+ self.default_domain_project_id)
+
+ # can get back only one endpoint from the cache, since the catalog
+ # is pulled out from cache.
+ invalid_catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertThat(invalid_catalog[0]['endpoints'],
+ matchers.HasLength(1))
+ self.assertEqual(catalog, invalid_catalog)
+
+ # remove the endpoint group from default project, and add it again via
+ # catalog_api API manager.
+ self.catalog_api.driver.remove_endpoint_group_from_project(
+ endpoint_group_id,
+ self.default_domain_project_id)
+
+ # add the endpoint group to default project.
+ self.catalog_api.add_endpoint_group_to_project(
+ endpoint_group_id,
+ self.default_domain_project_id)
+
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ # now, it will return 2 endpoints since the cache has been
+ # invalidated.
+ self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2))
+
+ ep_id_list = [catalog[0]['endpoints'][0]['id'],
+ catalog[0]['endpoints'][1]['id']]
+ self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+
+ @unit.skip_if_cache_disabled('catalog')
+ def test_remove_endpoint_group_from_project_invalidates_cache(self):
+ # create another endpoint with 'admin' interface which matches
+ # 'filters' definition in endpoint group, then there should be two
+ # endpoints returned when retrieving v3 catalog. But only one
+ # endpoint will return after the endpoint group's deletion if cache
+ # works as expected.
+ # this should be done at first since `create_endpoint` will also
+ # invalidate cache.
+ endpoint_id2 = uuid.uuid4().hex
+ endpoint2 = unit.new_endpoint_ref(service_id=self.service_id,
+ region_id=self.region_id,
+ interface='admin',
+ id=endpoint_id2)
+ self.catalog_api.create_endpoint(endpoint_id2, endpoint2)
+
+ # create project and endpoint association.
+ self.put(self.default_request_url)
+
+ # create an endpoint group.
+ endpoint_group_id = self._create_valid_endpoint_group(
+ self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY)
+
+ # add the endpoint group to default project.
+ self.catalog_api.add_endpoint_group_to_project(
+ endpoint_group_id,
+ self.default_domain_project_id)
+
+ # should get back two endpoints, one from endpoint project
+ # association, the other one is from endpoint_group project
+ # association.
+ user_id = uuid.uuid4().hex
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2))
+
+ ep_id_list = [catalog[0]['endpoints'][0]['id'],
+ catalog[0]['endpoints'][1]['id']]
+ self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+
+ # remove endpoint_group project association, bypassing
+ # catalog_api API manager.
+ self.catalog_api.driver.remove_endpoint_group_from_project(
+ endpoint_group_id,
+ self.default_domain_project_id)
+
+ # still get back two endpoints, since the catalog is pulled out
+ # from cache and the cache haven't been invalidated.
+ invalid_catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertThat(invalid_catalog[0]['endpoints'],
+ matchers.HasLength(2))
+ self.assertEqual(catalog, invalid_catalog)
+
+ # add back the endpoint_group project association and remove it from
+ # manager.
+ self.catalog_api.driver.add_endpoint_group_to_project(
+ endpoint_group_id,
+ self.default_domain_project_id)
+
+ self.catalog_api.remove_endpoint_group_from_project(
+ endpoint_group_id,
+ self.default_domain_project_id)
+
+ # should only get back one endpoint since the cache has been
+ # invalidated after the endpoint_group project association was
+ # removed.
+ catalog = self.catalog_api.get_v3_catalog(
+ user_id,
+ self.default_domain_project_id)
+
+ self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1))
+ self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id'])
+
def _create_valid_endpoint_group(self, url, body):
r = self.post(url, body=body)
return r.result['endpoint_group']['id']
@@ -1072,13 +1371,15 @@ class EndpointGroupCRUDTestCase(TestExtensionCase):
"""Creates an endpoint associated with service and project."""
if not service_id:
# create a new service
- service_ref = self.new_service_ref()
+ service_ref = unit.new_service_ref()
response = self.post(
'/services', body={'service': service_ref})
service_id = response.result['service']['id']
# create endpoint
- endpoint_ref = self.new_endpoint_ref(service_id=service_id)
+ endpoint_ref = unit.new_endpoint_ref(service_id=service_id,
+ interface='public',
+ region_id=self.region_id)
response = self.post('/endpoints', body={'endpoint': endpoint_ref})
endpoint = response.result['endpoint']
diff --git a/keystone-moon/keystone/tests/unit/test_auth.py b/keystone-moon/keystone/tests/unit/test_auth.py
index 6dd52c8a..6f44b316 100644
--- a/keystone-moon/keystone/tests/unit/test_auth.py
+++ b/keystone-moon/keystone/tests/unit/test_auth.py
@@ -14,6 +14,8 @@
import copy
import datetime
+import random
+import string
import uuid
import mock
@@ -26,11 +28,12 @@ from testtools import matchers
from keystone import assignment
from keystone import auth
from keystone.common import authorization
-from keystone import config
+from keystone.common import config
from keystone import exception
from keystone.models import token_model
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
+from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import database
from keystone import token
from keystone.token import provider
@@ -39,9 +42,10 @@ from keystone import trust
CONF = cfg.CONF
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
-DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
-HOST_URL = 'http://keystone:5001'
+HOST = ''.join(random.choice(string.ascii_lowercase) for x in range(
+ random.randint(5, 15)))
+HOST_URL = 'http://%s' % (HOST)
def _build_user_auth(token=None, user_id=None, username=None,
@@ -127,9 +131,7 @@ class AuthBadRequests(AuthTest):
context={}, auth={})
def test_empty_remote_user(self):
- """Verify that _authenticate_external() raises exception if
- REMOTE_USER is set as the empty string.
- """
+ """Verify exception is raised when REMOTE_USER is an empty string."""
context = {'environment': {'REMOTE_USER': ''}}
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
@@ -223,6 +225,36 @@ class AuthBadRequests(AuthTest):
self.controller.authenticate,
{}, body_dict)
+ def test_authenticate_fails_if_project_unsafe(self):
+ """Verify authenticate to a project with unsafe name fails."""
+ # Start with url name restrictions off, so we can create the unsafe
+ # named project
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='off')
+ unsafe_name = 'i am not / safe'
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id, name=unsafe_name)
+ self.resource_api.create_project(project['id'], project)
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_foo['id'], project['id'], self.role_member['id'])
+ no_context = {}
+
+ body_dict = _build_user_auth(
+ username=self.user_foo['name'],
+ password=self.user_foo['password'],
+ tenant_name=project['name'])
+
+ # Since name url restriction is off, we should be able to autenticate
+ self.controller.authenticate(no_context, body_dict)
+
+ # Set the name url restriction to strict and we should fail to
+ # authenticate
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='strict')
+ self.assertRaises(exception.Unauthorized,
+ self.controller.authenticate,
+ no_context, body_dict)
+
class AuthWithToken(AuthTest):
def test_unscoped_token(self):
@@ -286,7 +318,7 @@ class AuthWithToken(AuthTest):
def test_auth_scoped_token_bad_project_with_debug(self):
"""Authenticating with an invalid project fails."""
- # Bug 1379952 reports poor user feedback, even in debug mode,
+ # Bug 1379952 reports poor user feedback, even in insecure_debug mode,
# when the user accidentally passes a project name as an ID.
# This test intentionally does exactly that.
body_dict = _build_user_auth(
@@ -294,8 +326,8 @@ class AuthWithToken(AuthTest):
password=self.user_foo['password'],
tenant_id=self.tenant_bar['name'])
- # with debug enabled, this produces a friendly exception.
- self.config_fixture.config(debug=True)
+ # with insecure_debug enabled, this produces a friendly exception.
+ self.config_fixture.config(debug=True, insecure_debug=True)
e = self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
@@ -308,7 +340,7 @@ class AuthWithToken(AuthTest):
def test_auth_scoped_token_bad_project_without_debug(self):
"""Authenticating with an invalid project fails."""
- # Bug 1379952 reports poor user feedback, even in debug mode,
+ # Bug 1379952 reports poor user feedback, even in insecure_debug mode,
# when the user accidentally passes a project name as an ID.
# This test intentionally does exactly that.
body_dict = _build_user_auth(
@@ -316,8 +348,8 @@ class AuthWithToken(AuthTest):
password=self.user_foo['password'],
tenant_id=self.tenant_bar['name'])
- # with debug disabled, authentication failure details are suppressed.
- self.config_fixture.config(debug=False)
+ # with insecure_debug disabled (the default), authentication failure
+ # details are suppressed.
e = self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
@@ -336,9 +368,9 @@ class AuthWithToken(AuthTest):
self.tenant_bar['id'],
self.role_member['id'])
# Now create a group role for this user as well
- domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain1 = unit.new_domain_ref()
self.resource_api.create_domain(domain1['id'], domain1)
- new_group = {'domain_id': domain1['id'], 'name': uuid.uuid4().hex}
+ new_group = unit.new_group_ref(domain_id=domain1['id'])
new_group = self.identity_api.create_group(new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
@@ -428,10 +460,10 @@ class AuthWithToken(AuthTest):
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
- project1 = {'id': 'Project1', 'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID}
+ project1 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project1['id'], project1)
- role_one = {'id': 'role_one', 'name': uuid.uuid4().hex}
+ role_one = unit.new_role_ref(id='role_one')
self.role_api.create_role(role_one['id'], role_one)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project1['id'], role_one['id'])
@@ -464,12 +496,10 @@ class AuthWithToken(AuthTest):
no_context = {}
admin_context = dict(is_admin=True, query_string={})
- project = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID}
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project['id'], project)
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project['id'], role['id'])
@@ -642,6 +672,27 @@ class AuthWithToken(AuthTest):
token_id=token_2_id)
+class FernetAuthWithToken(AuthWithToken):
+ def config_overrides(self):
+ super(FernetAuthWithToken, self).config_overrides()
+ self.config_fixture.config(group='token', provider='fernet')
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+ def test_token_auth_with_binding(self):
+ self.config_fixture.config(group='token', bind=['kerberos'])
+ body_dict = _build_user_auth()
+ self.assertRaises(exception.NotImplemented,
+ self.controller.authenticate,
+ self.context_with_remote_user,
+ body_dict)
+
+ def test_revoke_with_no_audit_info(self):
+ self.skipTest('Fernet with v2.0 and revocation is broken')
+
+ def test_deleting_role_revokes_token(self):
+ self.skipTest('Fernet with v2.0 and revocation is broken')
+
+
class AuthWithPasswordCredentials(AuthTest):
def test_auth_invalid_user(self):
"""Verify exception is raised if invalid user."""
@@ -682,7 +733,7 @@ class AuthWithPasswordCredentials(AuthTest):
{}, body_dict)
def test_authenticate_blank_password_credentials(self):
- """Sending empty dict as passwordCredentials raises a 400 error."""
+ """Sending empty dict as passwordCredentials raises 400 Bad Requset."""
body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
@@ -708,27 +759,16 @@ class AuthWithPasswordCredentials(AuthTest):
# user in auth data is from the new default domain.
# 1) Create a new domain.
- new_domain_id = uuid.uuid4().hex
- new_domain = {
- 'description': uuid.uuid4().hex,
- 'enabled': True,
- 'id': new_domain_id,
- 'name': uuid.uuid4().hex,
- }
+ new_domain = unit.new_domain_ref()
+ new_domain_id = new_domain['id']
self.resource_api.create_domain(new_domain_id, new_domain)
# 2) Create user "foo" in new domain with different password than
# default-domain foo.
- new_user_password = uuid.uuid4().hex
- new_user = {
- 'name': self.user_foo['name'],
- 'domain_id': new_domain_id,
- 'password': new_user_password,
- 'email': 'foo@bar2.com',
- }
-
- new_user = self.identity_api.create_user(new_user)
+ new_user = unit.create_user(self.identity_api,
+ name=self.user_foo['name'],
+ domain_id=new_domain_id)
# 3) Update the default_domain_id config option to the new domain
@@ -739,7 +779,7 @@ class AuthWithPasswordCredentials(AuthTest):
body_dict = _build_user_auth(
username=self.user_foo['name'],
- password=new_user_password)
+ password=new_user['password'])
# The test is successful if this doesn't raise, so no need to assert.
self.controller.authenticate({}, body_dict)
@@ -856,7 +896,16 @@ class AuthWithTrust(AuthTest):
token_id=token_id,
token_data=self.token_provider_api.validate_token(token_id))
auth_context = authorization.token_to_auth_context(token_ref)
- return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context},
+ # NOTE(gyee): if public_endpoint and admin_endpoint are not set, which
+ # is the default, the base url will be constructed from the environment
+ # variables wsgi.url_scheme, SERVER_NAME, SERVER_PORT, and SCRIPT_NAME.
+ # We have to set them in the context so the base url can be constructed
+ # accordingly.
+ return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context,
+ 'wsgi.url_scheme': 'http',
+ 'SCRIPT_NAME': '/v3',
+ 'SERVER_PORT': '80',
+ 'SERVER_NAME': HOST},
'token_id': token_id,
'host_url': HOST_URL}
@@ -945,8 +994,9 @@ class AuthWithTrust(AuthTest):
expires_at="2010-06-04T08:44:31.999999Z")
def test_create_trust_without_project_id(self):
- """Verify that trust can be created without project id and
- token can be generated with that trust.
+ """Verify that trust can be created without project id.
+
+ Also, token can be generated with that trust.
"""
unscoped_token = self.get_unscoped_token(self.trustor['name'])
context = self._create_auth_context(
@@ -977,9 +1027,7 @@ class AuthWithTrust(AuthTest):
self.assertIn(role['id'], role_ids)
def test_get_trust_without_auth_context(self):
- """Verify that a trust cannot be retrieved when the auth context is
- missing.
- """
+ """Verify a trust cannot be retrieved if auth context is missing."""
unscoped_token = self.get_unscoped_token(self.trustor['name'])
context = self._create_auth_context(
unscoped_token['access']['token']['id'])
@@ -1001,8 +1049,6 @@ class AuthWithTrust(AuthTest):
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], new_trust['trustee_user_id'])
- # TODO(ayoung): Endpoints
-
def test_create_trust_impersonation(self):
new_trust = self.create_trust(self.sample_data, self.trustor['name'])
self.assertEqual(self.trustor['id'], new_trust['trustor_user_id'])
@@ -1131,7 +1177,7 @@ class AuthWithTrust(AuthTest):
request_body = _build_user_auth(token={'id': trust_token_id},
tenant_id=self.tenant_bar['id'])
self.assertRaises(
- exception.Forbidden,
+ exception.Unauthorized,
self.controller.authenticate, {}, request_body)
def test_delete_trust_revokes_token(self):
@@ -1211,35 +1257,6 @@ class AuthWithTrust(AuthTest):
new_trust['id'])['trust']
self.assertEqual(3, trust['remaining_uses'])
- def test_v2_trust_token_contains_trustor_user_id_and_impersonation(self):
- new_trust = self.create_trust(self.sample_data, self.trustor['name'])
- auth_response = self.fetch_v2_token_from_trust(new_trust)
-
- self.assertEqual(new_trust['trustee_user_id'],
- auth_response['access']['trust']['trustee_user_id'])
- self.assertEqual(new_trust['trustor_user_id'],
- auth_response['access']['trust']['trustor_user_id'])
- self.assertEqual(new_trust['impersonation'],
- auth_response['access']['trust']['impersonation'])
- self.assertEqual(new_trust['id'],
- auth_response['access']['trust']['id'])
-
- validate_response = self.controller.validate_token(
- context=dict(is_admin=True, query_string={}),
- token_id=auth_response['access']['token']['id'])
- self.assertEqual(
- new_trust['trustee_user_id'],
- validate_response['access']['trust']['trustee_user_id'])
- self.assertEqual(
- new_trust['trustor_user_id'],
- validate_response['access']['trust']['trustor_user_id'])
- self.assertEqual(
- new_trust['impersonation'],
- validate_response['access']['trust']['impersonation'])
- self.assertEqual(
- new_trust['id'],
- validate_response['access']['trust']['id'])
-
def disable_user(self, user):
user['enabled'] = False
self.identity_api.update_user(user['id'], user)
@@ -1328,34 +1345,21 @@ class AuthCatalog(unit.SQLDriverOverrides, AuthTest):
def _create_endpoints(self):
def create_region(**kwargs):
- ref = {'id': uuid.uuid4().hex}
- ref.update(kwargs)
+ ref = unit.new_region_ref(**kwargs)
self.catalog_api.create_region(ref)
return ref
def create_endpoint(service_id, region, **kwargs):
- id_ = uuid.uuid4().hex
- ref = {
- 'id': id_,
- 'interface': 'public',
- 'region_id': region,
- 'service_id': service_id,
- 'url': 'http://localhost/%s' % uuid.uuid4().hex,
- }
- ref.update(kwargs)
- self.catalog_api.create_endpoint(id_, ref)
- return ref
+ endpoint = unit.new_endpoint_ref(region_id=region,
+ service_id=service_id, **kwargs)
+
+ self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ return endpoint
# Create a service for use with the endpoints.
def create_service(**kwargs):
- id_ = uuid.uuid4().hex
- ref = {
- 'id': id_,
- 'name': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- }
- ref.update(kwargs)
- self.catalog_api.create_service(id_, ref)
+ ref = unit.new_service_ref(**kwargs)
+ self.catalog_api.create_service(ref['id'], ref)
return ref
enabled_service_ref = create_service(enabled=True)
diff --git a/keystone-moon/keystone/tests/unit/test_auth_plugin.py b/keystone-moon/keystone/tests/unit/test_auth_plugin.py
index 8dd22aa8..f0862ed6 100644
--- a/keystone-moon/keystone/tests/unit/test_auth_plugin.py
+++ b/keystone-moon/keystone/tests/unit/test_auth_plugin.py
@@ -183,7 +183,7 @@ class TestMapped(unit.TestCase):
# make sure Mapped plugin got invoked with the correct payload
((context, auth_payload, auth_context),
kwargs) = authenticate.call_args
- self.assertEqual(auth_payload['protocol'], method_name)
+ self.assertEqual(method_name, auth_payload['protocol'])
def test_supporting_multiple_methods(self):
for method_name in ['saml2', 'openid', 'x509']:
diff --git a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
index 6c2181aa..f72cad63 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_endpoint_policy.py
@@ -18,6 +18,7 @@ from six.moves import range
from testtools import matchers
from keystone import exception
+from keystone.tests import unit
class PolicyAssociationTests(object):
@@ -51,11 +52,11 @@ class PolicyAssociationTests(object):
5 - region 2, Service 0
"""
-
def new_endpoint(region_id, service_id):
- endpoint = {'id': uuid.uuid4().hex, 'interface': 'test',
- 'region_id': region_id, 'service_id': service_id,
- 'url': '/url'}
+ endpoint = unit.new_endpoint_ref(interface='test',
+ region_id=region_id,
+ service_id=service_id,
+ url='/url')
self.endpoint.append(self.catalog_api.create_endpoint(
endpoint['id'], endpoint))
@@ -63,18 +64,18 @@ class PolicyAssociationTests(object):
self.endpoint = []
self.service = []
self.region = []
+
+ parent_region_id = None
for i in range(3):
- policy = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex,
- 'blob': {'data': uuid.uuid4().hex}}
+ policy = unit.new_policy_ref()
self.policy.append(self.policy_api.create_policy(policy['id'],
policy))
- service = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex}
+ service = unit.new_service_ref()
self.service.append(self.catalog_api.create_service(service['id'],
service))
- region = {'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex}
- # Link the 3 regions together as a hierarchy, [0] at the top
- if i != 0:
- region['parent_region_id'] = self.region[i - 1]['id']
+ region = unit.new_region_ref(parent_region_id=parent_region_id)
+ # Link the regions together as a hierarchy, [0] at the top
+ parent_region_id = region['id']
self.region.append(self.catalog_api.create_region(region))
new_endpoint(self.region[0]['id'], self.service[0]['id'])
diff --git a/keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py b/keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py
index 6b691e5a..e6635e18 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_id_mapping_sql.py
@@ -19,6 +19,7 @@ from testtools import matchers
from keystone.common import sql
from keystone.identity.mapping_backends import mapping
+from keystone.tests import unit
from keystone.tests.unit import identity_mapping as mapping_sql
from keystone.tests.unit import test_backend_sql
@@ -42,9 +43,9 @@ class SqlIDMapping(test_backend_sql.SqlTests):
def load_sample_data(self):
self.addCleanup(self.clean_sample_data)
- domainA = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domainA = unit.new_domain_ref()
self.domainA = self.resource_api.create_domain(domainA['id'], domainA)
- domainB = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domainB = unit.new_domain_ref()
self.domainB = self.resource_api.create_domain(domainB['id'], domainB)
def clean_sample_data(self):
diff --git a/keystone-moon/keystone/tests/unit/test_backend_kvs.py b/keystone-moon/keystone/tests/unit/test_backend_kvs.py
index 7406192a..36af1c36 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_kvs.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_kvs.py
@@ -14,20 +14,17 @@
import datetime
import uuid
-from oslo_config import cfg
from oslo_utils import timeutils
import six
from keystone.common import utils
from keystone import exception
from keystone.tests import unit
-from keystone.tests.unit import test_backend
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit.token import test_backends as token_tests
-CONF = cfg.CONF
-
-
-class KvsToken(unit.TestCase, test_backend.TokenTests):
+class KvsToken(unit.TestCase, token_tests.TokenTests):
def setUp(self):
super(KvsToken, self).setUp()
self.load_backends()
@@ -103,64 +100,11 @@ class KvsToken(unit.TestCase, test_backend.TokenTests):
self.assertEqual(expected_user_token_list, user_token_list)
-class KvsCatalog(unit.TestCase, test_backend.CatalogTests):
- def setUp(self):
- super(KvsCatalog, self).setUp()
- self.load_backends()
- self._load_fake_catalog()
-
- def config_overrides(self):
- super(KvsCatalog, self).config_overrides()
- self.config_fixture.config(group='catalog', driver='kvs')
-
- def _load_fake_catalog(self):
- self.catalog_foobar = self.catalog_api.driver._create_catalog(
- 'foo', 'bar',
- {'RegionFoo': {'service_bar': {'foo': 'bar'}}})
-
- def test_get_catalog_404(self):
- # FIXME(dolph): this test should be moved up to test_backend
- # FIXME(dolph): exceptions should be UserNotFound and ProjectNotFound
- self.assertRaises(exception.NotFound,
- self.catalog_api.get_catalog,
- uuid.uuid4().hex,
- 'bar')
-
- self.assertRaises(exception.NotFound,
- self.catalog_api.get_catalog,
- 'foo',
- uuid.uuid4().hex)
-
- def test_get_catalog(self):
- catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
- self.assertDictEqual(catalog_ref, self.catalog_foobar)
-
- def test_get_catalog_endpoint_disabled(self):
- # This test doesn't apply to KVS because with the KVS backend the
- # application creates the catalog (including the endpoints) for each
- # user and project. Whether endpoints are enabled or disabled isn't
- # a consideration.
- f = super(KvsCatalog, self).test_get_catalog_endpoint_disabled
- self.assertRaises(exception.NotFound, f)
-
- def test_get_v3_catalog_endpoint_disabled(self):
- # There's no need to have disabled endpoints in the kvs catalog. Those
- # endpoints should just be removed from the store. This just tests
- # what happens currently when the super impl is called.
- f = super(KvsCatalog, self).test_get_v3_catalog_endpoint_disabled
- self.assertRaises(exception.NotFound, f)
-
- def test_list_regions_filtered_by_parent_region_id(self):
- self.skipTest('KVS backend does not support hints')
-
- def test_service_filtering(self):
- self.skipTest("kvs backend doesn't support filtering")
-
-
class KvsTokenCacheInvalidation(unit.TestCase,
- test_backend.TokenCacheInvalidation):
+ token_tests.TokenCacheInvalidation):
def setUp(self):
super(KvsTokenCacheInvalidation, self).setUp()
+ self.useFixture(database.Database(self.sql_driver_version_overrides))
self.load_backends()
self._create_test_data()
diff --git a/keystone-moon/keystone/tests/unit/test_backend_ldap.py b/keystone-moon/keystone/tests/unit/test_backend_ldap.py
index d96ec376..cf618633 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_ldap.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_ldap.py
@@ -20,11 +20,15 @@ import uuid
import ldap
import mock
from oslo_config import cfg
+from oslo_log import versionutils
+from oslotest import mockpatch
import pkg_resources
+from six.moves import http_client
from six.moves import range
from testtools import matchers
from keystone.common import cache
+from keystone.common import driver_hints
from keystone.common import ldap as common_ldap
from keystone.common.ldap import core as common_ldap_core
from keystone import exception
@@ -32,11 +36,14 @@ from keystone import identity
from keystone.identity.mapping_backends import mapping as map
from keystone import resource
from keystone.tests import unit
+from keystone.tests.unit.assignment import test_backends as assignment_tests
from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.identity import test_backends as identity_tests
from keystone.tests.unit import identity_mapping as mapping_sql
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit.ksfixtures import ldapdb
-from keystone.tests.unit import test_backend
+from keystone.tests.unit.resource import test_backends as resource_tests
+from keystone.tests.unit.utils import wip
CONF = cfg.CONF
@@ -115,7 +122,9 @@ def create_group_container(identity_api):
('ou', ['Groups'])])
-class BaseLDAPIdentity(test_backend.IdentityTests):
+class BaseLDAPIdentity(identity_tests.IdentityTests,
+ assignment_tests.AssignmentTests,
+ resource_tests.ResourceTests):
def setUp(self):
super(BaseLDAPIdentity, self).setUp()
@@ -123,6 +132,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.load_backends()
self.load_fixtures(default_fixtures)
+ self.config_fixture.config(group='os_inherit', enabled=False)
def _get_domain_fixture(self):
"""Domains in LDAP are read-only, so just return the static one."""
@@ -141,6 +151,13 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
config_files.append(unit.dirs.tests_conf('backend_ldap.conf'))
return config_files
+ def new_user_ref(self, domain_id, project_id=None, **kwargs):
+ ref = unit.new_user_ref(domain_id=domain_id, project_id=project_id,
+ **kwargs)
+ if 'id' not in kwargs:
+ del ref['id']
+ return ref
+
def get_user_enabled_vals(self, user):
user_dn = (
self.identity_api.driver.user._id_to_dn_string(user['id']))
@@ -156,17 +173,13 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
return None
def test_build_tree(self):
- """Regression test for building the tree names
- """
+ """Regression test for building the tree names."""
user_api = identity.backends.ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEqual("ou=Users,%s" % CONF.ldap.suffix, user_api.tree_dn)
def test_configurable_allowed_user_actions(self):
- user = {'name': u'fäké1',
- 'password': u'fäképass1',
- 'domain_id': CONF.identity.default_domain_id,
- 'tenants': ['bar']}
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
self.identity_api.get_user(user['id'])
@@ -185,10 +198,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
driver.user.allow_update = False
driver.user.allow_delete = False
- user = {'name': u'fäké1',
- 'password': u'fäképass1',
- 'domain_id': CONF.identity.default_domain_id,
- 'tenants': ['bar']}
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
user)
@@ -215,7 +225,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_user_filter(self):
user_ref = self.identity_api.get_user(self.user_foo['id'])
self.user_foo.pop('password')
- self.assertDictEqual(user_ref, self.user_foo)
+ self.assertDictEqual(self.user_foo, user_ref)
driver = self.identity_api._select_identity_driver(
user_ref['domain_id'])
@@ -227,6 +237,20 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.identity_api.get_user,
self.user_foo['id'])
+ def test_list_users_by_name_and_with_filter(self):
+ # confirm that the user is not exposed when it does not match the
+ # filter setting in conf even if it is requested by name in user list
+ hints = driver_hints.Hints()
+ hints.add_filter('name', self.user_foo['name'])
+ domain_id = self.user_foo['domain_id']
+ driver = self.identity_api._select_identity_driver(domain_id)
+ driver.user.ldap_filter = ('(|(cn=%s)(cn=%s))' %
+ (self.user_sna['id'], self.user_two['id']))
+ users = self.identity_api.list_users(
+ domain_scope=self._set_domain_scope(domain_id),
+ hints=hints)
+ self.assertEqual(0, len(users))
+
def test_remove_role_grant_from_user_and_project(self):
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
@@ -234,7 +258,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
- self.assertDictEqual(roles_ref[0], self.role_member)
+ self.assertDictEqual(self.role_member, roles_ref[0])
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
@@ -251,11 +275,9 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_get_and_remove_role_grant_by_group_and_project(self):
new_domain = self._get_domain_fixture()
- new_group = {'domain_id': new_domain['id'],
- 'name': uuid.uuid4().hex}
+ new_group = unit.new_group_ref(domain_id=new_domain['id'])
new_group = self.identity_api.create_group(new_group)
- new_user = {'name': 'new_user', 'enabled': True,
- 'domain_id': new_domain['id']}
+ new_user = self.new_user_ref(domain_id=new_domain['id'])
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
@@ -273,7 +295,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertNotEmpty(roles_ref)
- self.assertDictEqual(roles_ref[0], self.role_member)
+ self.assertDictEqual(self.role_member, roles_ref[0])
self.assignment_api.delete_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
@@ -289,7 +311,44 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
role_id='member')
def test_get_and_remove_role_grant_by_group_and_domain(self):
- self.skipTest('N/A: LDAP does not support multiple domains')
+ # TODO(henry-nash): We should really rewrite the tests in
+ # unit.resource.test_backends to be more flexible as to where the
+ # domains are sourced from, so that we would not need to override such
+ # tests here. This is raised as bug 1373865.
+ new_domain = self._get_domain_fixture()
+ new_group = unit.new_group_ref(domain_id=new_domain['id'],)
+ new_group = self.identity_api.create_group(new_group)
+ new_user = self.new_user_ref(domain_id=new_domain['id'])
+ new_user = self.identity_api.create_user(new_user)
+ self.identity_api.add_user_to_group(new_user['id'],
+ new_group['id'])
+
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+
+ self.assignment_api.create_grant(group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertDictEqual(self.role_member, roles_ref[0])
+
+ self.assignment_api.delete_grant(group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
+ roles_ref = self.assignment_api.list_grants(
+ group_id=new_group['id'],
+ domain_id=new_domain['id'])
+ self.assertEqual(0, len(roles_ref))
+ self.assertRaises(exception.NotFound,
+ self.assignment_api.delete_grant,
+ group_id=new_group['id'],
+ domain_id=new_domain['id'],
+ role_id='member')
def test_get_role_assignment_by_domain_not_found(self):
self.skipTest('N/A: LDAP does not support multiple domains')
@@ -327,10 +386,12 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_delete_group_with_user_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
+ def test_list_role_assignment_containing_names(self):
+ self.skipTest('N/A: LDAP does not support multiple domains')
+
def test_list_projects_for_user(self):
domain = self._get_domain_fixture()
- user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'domain_id': domain['id'], 'enabled': True}
+ user1 = self.new_user_ref(domain_id=domain['id'])
user1 = self.identity_api.create_user(user1)
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(0))
@@ -347,11 +408,10 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.assertThat(user_projects, matchers.HasLength(2))
# Now, check number of projects through groups
- user2 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'domain_id': domain['id'], 'enabled': True}
+ user2 = self.new_user_ref(domain_id=domain['id'])
user2 = self.identity_api.create_user(user2)
- group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
self.identity_api.add_user_to_group(user2['id'], group1['id'])
@@ -377,12 +437,11 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_list_projects_for_user_and_groups(self):
domain = self._get_domain_fixture()
# Create user1
- user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'domain_id': domain['id'], 'enabled': True}
+ user1 = self.new_user_ref(domain_id=domain['id'])
user1 = self.identity_api.create_user(user1)
# Create new group for user1
- group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
# Add user1 to group1
@@ -412,20 +471,17 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_list_projects_for_user_with_grants(self):
domain = self._get_domain_fixture()
- new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
- 'enabled': True, 'domain_id': domain['id']}
+ new_user = self.new_user_ref(domain_id=domain['id'])
new_user = self.identity_api.create_user(new_user)
- group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
- group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group2 = unit.new_group_ref(domain_id=domain['id'])
group2 = self.identity_api.create_group(group2)
- project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain['id']}
+ project1 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project1['id'], project1)
- project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain['id']}
+ project2 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project2['id'], project2)
self.identity_api.add_user_to_group(new_user['id'],
@@ -496,14 +552,11 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_list_role_assignments_unfiltered(self):
new_domain = self._get_domain_fixture()
- new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'enabled': True, 'domain_id': new_domain['id']}
+ new_user = self.new_user_ref(domain_id=new_domain['id'])
new_user = self.identity_api.create_user(new_user)
- new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
+ new_group = unit.new_group_ref(domain_id=new_domain['id'])
new_group = self.identity_api.create_group(new_group)
- new_project = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': new_domain['id']}
+ new_project = unit.new_project_ref(domain_id=new_domain['id'])
self.resource_api.create_project(new_project['id'], new_project)
# First check how many role grant already exist
@@ -520,13 +573,6 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
after_assignments = len(self.assignment_api.list_role_assignments())
self.assertEqual(existing_assignments + 2, after_assignments)
- def test_list_role_assignments_filtered_by_role(self):
- # Domain roles are not supported by the LDAP Assignment backend
- self.assertRaises(
- exception.NotImplemented,
- super(BaseLDAPIdentity, self).
- test_list_role_assignments_filtered_by_role)
-
def test_list_role_assignments_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.ldapdb.clear()
@@ -534,12 +580,9 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.load_fixtures(default_fixtures)
new_domain = self._get_domain_fixture()
- new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'enabled': True, 'domain_id': new_domain['id']}
+ new_user = self.new_user_ref(domain_id=new_domain['id'])
new_user = self.identity_api.create_user(new_user)
- new_project = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': new_domain['id']}
+ new_project = unit.new_project_ref(domain_id=new_domain['id'])
self.resource_api.create_project(new_project['id'], new_project)
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
@@ -558,8 +601,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.load_backends()
self.load_fixtures(default_fixtures)
- user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'enabled': True, 'domain_id': test_backend.DEFAULT_DOMAIN_ID}
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
@@ -582,10 +624,8 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
are returned.
"""
-
# Create a group
- group = dict(name=uuid.uuid4().hex,
- domain_id=CONF.identity.default_domain_id)
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
# Create a couple of users and add them to the group.
@@ -617,10 +657,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_list_group_members_when_no_members(self):
# List group members when there is no member in the group.
# No exception should be raised.
- group = {
- 'domain_id': CONF.identity.default_domain_id,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex}
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group = self.identity_api.create_group(group)
# If this doesn't raise, then the test is successful.
@@ -633,8 +670,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.load_fixtures(default_fixtures)
# Create a group
- group = dict(name=uuid.uuid4().hex,
- domain_id=CONF.identity.default_domain_id)
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
# Create a user
@@ -651,30 +687,23 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.assertNotIn(dumb_id, user_ids)
def test_list_domains(self):
+ # We have more domains here than the parent class, check for the
+ # correct number of domains for the multildap backend configs
+ domain1 = unit.new_domain_ref()
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ self.resource_api.create_domain(domain2['id'], domain2)
domains = self.resource_api.list_domains()
- self.assertEqual(
- [resource.calc_default_domain()],
- domains)
-
- def test_list_domains_non_default_domain_id(self):
- # If change the default_domain_id, the ID of the default domain
- # returned by list_domains changes is the new default_domain_id.
-
- new_domain_id = uuid.uuid4().hex
- self.config_fixture.config(group='identity',
- default_domain_id=new_domain_id)
-
- domains = self.resource_api.list_domains()
-
- self.assertEqual(new_domain_id, domains[0]['id'])
+ self.assertEqual(7, len(domains))
+ domain_ids = []
+ for domain in domains:
+ domain_ids.append(domain.get('id'))
+ self.assertIn(CONF.identity.default_domain_id, domain_ids)
+ self.assertIn(domain1['id'], domain_ids)
+ self.assertIn(domain2['id'], domain_ids)
def test_authenticate_requires_simple_bind(self):
- user = {
- 'name': 'NO_META',
- 'domain_id': test_backend.DEFAULT_DOMAIN_ID,
- 'password': 'no_meta2',
- 'enabled': True,
- }
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
@@ -689,34 +718,54 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
user_id=user['id'],
password=None)
- # (spzala)The group and domain crud tests below override the standard ones
- # in test_backend.py so that we can exclude the update name test, since we
- # do not yet support the update of either group or domain names with LDAP.
- # In the tests below, the update is demonstrated by updating description.
- # Refer to bug 1136403 for more detail.
- def test_group_crud(self):
- group = {
- 'domain_id': CONF.identity.default_domain_id,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex}
+ # The group and domain CRUD tests below override the standard ones in
+ # unit.identity.test_backends.py so that we can exclude the update name
+ # test, since we do not (and will not) support the update of either group
+ # or domain names with LDAP. In the tests below, the update is tested by
+ # updating description.
+ @mock.patch.object(versionutils, 'report_deprecated_feature')
+ def test_group_crud(self, mock_deprecator):
+ # NOTE(stevemar): As of the Mitaka release, we now check for calls that
+ # the LDAP write functionality has been deprecated.
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group = self.identity_api.create_group(group)
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("create_group for the LDAP identity backend", args[1])
+
group_ref = self.identity_api.get_group(group['id'])
- self.assertDictEqual(group_ref, group)
+ self.assertDictEqual(group, group_ref)
group['description'] = uuid.uuid4().hex
self.identity_api.update_group(group['id'], group)
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("update_group for the LDAP identity backend", args[1])
+
group_ref = self.identity_api.get_group(group['id'])
- self.assertDictEqual(group_ref, group)
+ self.assertDictEqual(group, group_ref)
self.identity_api.delete_group(group['id'])
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("delete_group for the LDAP identity backend", args[1])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
group['id'])
+ @mock.patch.object(versionutils, 'report_deprecated_feature')
+ def test_add_remove_user_group_deprecated(self, mock_deprecator):
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group = self.identity_api.create_group(group)
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user = self.identity_api.create_user(user)
+ self.identity_api.add_user_to_group(user['id'], group['id'])
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("add_user_to_group for the LDAP identity", args[1])
+
+ self.identity_api.remove_user_from_group(user['id'], group['id'])
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("remove_user_from_group for the LDAP identity", args[1])
+
@unit.skip_if_cache_disabled('identity')
def test_cache_layer_group_crud(self):
- group = {
- 'domain_id': CONF.identity.default_domain_id,
- 'name': uuid.uuid4().hex}
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group = self.identity_api.create_group(group)
# cache the result
group_ref = self.identity_api.get_group(group['id'])
@@ -731,9 +780,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group, group['id'])
- group = {
- 'domain_id': CONF.identity.default_domain_id,
- 'name': uuid.uuid4().hex}
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group = self.identity_api.create_group(group)
# cache the result
self.identity_api.get_group(group['id'])
@@ -749,11 +796,8 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
CONF.identity.default_domain_id)
driver.user.attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
- user = {'name': u'fäké1',
- 'password': u'fäképass1',
- 'domain_id': CONF.identity.default_domain_id,
- 'default_project_id': 'maps_to_none',
- }
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id,
+ project_id='maps_to_none')
# If this doesn't raise, then the test is successful.
user = self.identity_api.create_user(user)
@@ -765,9 +809,8 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
boolean_strings = ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
'TrUe' 'FaLse']
for name in boolean_strings:
- user = {
- 'name': name,
- 'domain_id': CONF.identity.default_domain_id}
+ user = self.new_user_ref(name=name,
+ domain_id=CONF.identity.default_domain_id)
user_ref = self.identity_api.create_user(user)
user_info = self.identity_api.get_user(user_ref['id'])
self.assertEqual(name, user_info['name'])
@@ -786,10 +829,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
driver.user.attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
- user = {'name': u'fäké1',
- 'password': u'fäképass1',
- 'domain_id': CONF.identity.default_domain_id,
- }
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_ref = self.identity_api.create_user(user)
@@ -818,19 +858,14 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
def test_user_id_comma(self):
"""Even if the user has a , in their ID, groups can be listed."""
-
# Create a user with a , in their ID
# NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
# Since we want to fake up this special ID, we'll squirt this
# direct into the driver and bypass the manager layer.
user_id = u'Doe, John'
- user = {
- 'id': user_id,
- 'name': self.getUniqueString(),
- 'password': self.getUniqueString(),
- 'domain_id': CONF.identity.default_domain_id,
- }
+ user = self.new_user_ref(id=user_id,
+ domain_id=CONF.identity.default_domain_id)
user = self.identity_api.driver.create_user(user_id, user)
# Now we'll use the manager to discover it, which will create a
@@ -843,13 +878,8 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
break
# Create a group
- group_id = uuid.uuid4().hex
- group = {
- 'id': group_id,
- 'name': self.getUniqueString(prefix='tuidc'),
- 'description': self.getUniqueString(),
- 'domain_id': CONF.identity.default_domain_id,
- }
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
+ group_id = group['id']
group = self.identity_api.driver.create_group(group_id, group)
# Now we'll use the manager to discover it, which will create a
# Public ID for it.
@@ -870,21 +900,15 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.assertThat(ref_list, matchers.Equals([group]))
def test_user_id_comma_grants(self):
- """Even if the user has a , in their ID, can get user and group grants.
- """
-
+ """List user and group grants, even with a comma in the user's ID."""
# Create a user with a , in their ID
# NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
# Since we want to fake up this special ID, we'll squirt this
# direct into the driver and bypass the manager layer
user_id = u'Doe, John'
- user = {
- 'id': user_id,
- 'name': self.getUniqueString(),
- 'password': self.getUniqueString(),
- 'domain_id': CONF.identity.default_domain_id,
- }
+ user = self.new_user_ref(id=user_id,
+ domain_id=CONF.identity.default_domain_id)
self.identity_api.driver.create_user(user_id, user)
# Now we'll use the manager to discover it, which will create a
@@ -943,8 +967,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
# There's no group fixture so create a group.
new_domain = self._get_domain_fixture()
- new_group = {'domain_id': new_domain['id'],
- 'name': uuid.uuid4().hex}
+ new_group = unit.new_group_ref(domain_id=new_domain['id'])
new_group = self.identity_api.create_group(new_group)
# Attempt to disable the group.
@@ -959,39 +982,55 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.assertNotIn('enabled', group_info)
def test_project_enabled_ignored_disable_error(self):
- # When the server is configured so that the enabled attribute is
- # ignored for projects, projects cannot be disabled.
-
- self.config_fixture.config(group='ldap',
- project_attribute_ignore=['enabled'])
-
- # Need to re-load backends for the config change to take effect.
- self.load_backends()
-
- # Attempt to disable the project.
- self.assertRaises(exception.ForbiddenAction,
- self.resource_api.update_project,
- self.tenant_baz['id'], {'enabled': False})
-
- project_info = self.resource_api.get_project(self.tenant_baz['id'])
-
- # Unlike other entities, if 'enabled' is ignored then 'enabled' is
- # returned as part of the ref.
- self.assertIs(True, project_info['enabled'])
+ self.skipTest('Resource LDAP has been removed')
def test_list_role_assignment_by_domain(self):
"""Multiple domain assignments are not supported."""
self.assertRaises(
- (exception.Forbidden, exception.DomainNotFound),
+ (exception.Forbidden, exception.DomainNotFound,
+ exception.ValidationError),
super(BaseLDAPIdentity, self).test_list_role_assignment_by_domain)
def test_list_role_assignment_by_user_with_domain_group_roles(self):
"""Multiple domain assignments are not supported."""
self.assertRaises(
- (exception.Forbidden, exception.DomainNotFound),
+ (exception.Forbidden, exception.DomainNotFound,
+ exception.ValidationError),
super(BaseLDAPIdentity, self).
test_list_role_assignment_by_user_with_domain_group_roles)
+ def test_domain_crud(self):
+ self.skipTest('Resource LDAP has been removed')
+
+ def test_list_role_assignment_using_sourced_groups_with_domains(self):
+ """Multiple domain assignments are not supported."""
+ self.assertRaises(
+ (exception.Forbidden, exception.ValidationError,
+ exception.DomainNotFound),
+ super(BaseLDAPIdentity, self).
+ test_list_role_assignment_using_sourced_groups_with_domains)
+
+ def test_create_project_with_domain_id_and_without_parent_id(self):
+ """Multiple domains are not supported."""
+ self.assertRaises(
+ exception.ValidationError,
+ super(BaseLDAPIdentity, self).
+ test_create_project_with_domain_id_and_without_parent_id)
+
+ def test_create_project_with_domain_id_mismatch_to_parent_domain(self):
+ """Multiple domains are not supported."""
+ self.assertRaises(
+ exception.ValidationError,
+ super(BaseLDAPIdentity, self).
+ test_create_project_with_domain_id_mismatch_to_parent_domain)
+
+ def test_remove_foreign_assignments_when_deleting_a_domain(self):
+ """Multiple domains are not supported."""
+ self.assertRaises(
+ (exception.ValidationError, exception.DomainNotFound),
+ super(BaseLDAPIdentity,
+ self).test_remove_foreign_assignments_when_deleting_a_domain)
+
class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
@@ -1002,46 +1041,46 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
self.useFixture(database.Database())
super(LDAPIdentity, self).setUp()
_assert_backends(self,
- assignment='ldap',
+ assignment='sql',
identity='ldap',
- resource='ldap')
+ resource='sql')
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
create_group_container(self.identity_api)
super(LDAPIdentity, self).load_fixtures(fixtures)
+ def test_list_domains(self):
+ domains = self.resource_api.list_domains()
+ self.assertEqual([resource.calc_default_domain()], domains)
+
def test_configurable_allowed_project_actions(self):
domain = self._get_domain_fixture()
- tenant = {'id': u'fäké1', 'name': u'fäké1', 'enabled': True,
- 'domain_id': domain['id']}
- self.resource_api.create_project(u'fäké1', tenant)
- tenant_ref = self.resource_api.get_project(u'fäké1')
- self.assertEqual(u'fäké1', tenant_ref['id'])
+ project = unit.new_project_ref(domain_id=domain['id'])
+ project = self.resource_api.create_project(project['id'], project)
+ project_ref = self.resource_api.get_project(project['id'])
+ self.assertEqual(project['id'], project_ref['id'])
- tenant['enabled'] = False
- self.resource_api.update_project(u'fäké1', tenant)
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
- self.resource_api.delete_project(u'fäké1')
+ self.resource_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
- u'fäké1')
+ project['id'])
def test_configurable_subtree_delete(self):
self.config_fixture.config(group='ldap', allow_subtree_delete=True)
self.load_backends()
- project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id}
+ project1 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project1['id'], project1)
- role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role1 = unit.new_role_ref()
self.role_api.create_role(role1['id'], role1)
- user1 = {'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'password': uuid.uuid4().hex,
- 'enabled': True}
+ user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user1 = self.identity_api.create_user(user1)
self.assignment_api.add_role_to_user_and_project(
@@ -1062,48 +1101,10 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
self.assertEqual(0, len(list))
def test_configurable_forbidden_project_actions(self):
- self.config_fixture.config(
- group='ldap', project_allow_create=False,
- project_allow_update=False, project_allow_delete=False)
- self.load_backends()
-
- domain = self._get_domain_fixture()
- tenant = {'id': u'fäké1', 'name': u'fäké1', 'domain_id': domain['id']}
- self.assertRaises(exception.ForbiddenAction,
- self.resource_api.create_project,
- u'fäké1',
- tenant)
-
- self.tenant_bar['enabled'] = False
- self.assertRaises(exception.ForbiddenAction,
- self.resource_api.update_project,
- self.tenant_bar['id'],
- self.tenant_bar)
- self.assertRaises(exception.ForbiddenAction,
- self.resource_api.delete_project,
- self.tenant_bar['id'])
+ self.skipTest('Resource LDAP has been removed')
def test_project_filter(self):
- tenant_ref = self.resource_api.get_project(self.tenant_bar['id'])
- self.assertDictEqual(tenant_ref, self.tenant_bar)
-
- self.config_fixture.config(group='ldap',
- project_filter='(CN=DOES_NOT_MATCH)')
- self.load_backends()
- # NOTE(morganfainberg): CONF.ldap.project_filter will not be
- # dynamically changed at runtime. This invalidate is a work-around for
- # the expectation that it is safe to change config values in tests that
- # could affect what the drivers would return up to the manager. This
- # solves this assumption when working with aggressive (on-create)
- # cache population.
- self.role_api.get_role.invalidate(self.role_api,
- self.role_member['id'])
- self.role_api.get_role(self.role_member['id'])
- self.resource_api.get_project.invalidate(self.resource_api,
- self.tenant_bar['id'])
- self.assertRaises(exception.ProjectNotFound,
- self.resource_api.get_project,
- self.tenant_bar['id'])
+ self.skipTest('Resource LDAP has been removed')
def test_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
@@ -1116,71 +1117,10 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
dumb_id)
def test_project_attribute_mapping(self):
- self.config_fixture.config(
- group='ldap', project_name_attribute='ou',
- project_desc_attribute='description',
- project_enabled_attribute='enabled')
- self.ldapdb.clear()
- self.load_backends()
- self.load_fixtures(default_fixtures)
- # NOTE(morganfainberg): CONF.ldap.project_name_attribute,
- # CONF.ldap.project_desc_attribute, and
- # CONF.ldap.project_enabled_attribute will not be
- # dynamically changed at runtime. This invalidate is a work-around for
- # the expectation that it is safe to change config values in tests that
- # could affect what the drivers would return up to the manager. This
- # solves this assumption when working with aggressive (on-create)
- # cache population.
- self.resource_api.get_project.invalidate(self.resource_api,
- self.tenant_baz['id'])
- tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
- self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
- self.assertEqual(self.tenant_baz['name'], tenant_ref['name'])
- self.assertEqual(
- self.tenant_baz['description'],
- tenant_ref['description'])
- self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
-
- self.config_fixture.config(group='ldap',
- project_name_attribute='description',
- project_desc_attribute='ou')
- self.load_backends()
- # NOTE(morganfainberg): CONF.ldap.project_name_attribute,
- # CONF.ldap.project_desc_attribute, and
- # CONF.ldap.project_enabled_attribute will not be
- # dynamically changed at runtime. This invalidate is a work-around for
- # the expectation that it is safe to change config values in tests that
- # could affect what the drivers would return up to the manager. This
- # solves this assumption when working with aggressive (on-create)
- # cache population.
- self.resource_api.get_project.invalidate(self.resource_api,
- self.tenant_baz['id'])
- tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
- self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
- self.assertEqual(self.tenant_baz['description'], tenant_ref['name'])
- self.assertEqual(self.tenant_baz['name'], tenant_ref['description'])
- self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
+ self.skipTest('Resource LDAP has been removed')
def test_project_attribute_ignore(self):
- self.config_fixture.config(
- group='ldap',
- project_attribute_ignore=['name', 'description', 'enabled'])
- self.ldapdb.clear()
- self.load_backends()
- self.load_fixtures(default_fixtures)
- # NOTE(morganfainberg): CONF.ldap.project_attribute_ignore will not be
- # dynamically changed at runtime. This invalidate is a work-around for
- # the expectation that it is safe to change configs values in tests
- # that could affect what the drivers would return up to the manager.
- # This solves this assumption when working with aggressive (on-create)
- # cache population.
- self.resource_api.get_project.invalidate(self.resource_api,
- self.tenant_baz['id'])
- tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
- self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
- self.assertNotIn('name', tenant_ref)
- self.assertNotIn('description', tenant_ref)
- self.assertNotIn('enabled', tenant_ref)
+ self.skipTest('Resource LDAP has been removed')
def test_user_enable_attribute_mask(self):
self.config_fixture.config(group='ldap', user_enabled_mask=2,
@@ -1189,8 +1129,7 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
self.load_backends()
self.load_fixtures(default_fixtures)
- user = {'name': u'fäké1', 'enabled': True,
- 'domain_id': CONF.identity.default_domain_id}
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user_ref = self.identity_api.create_user(user)
@@ -1237,14 +1176,12 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
self.load_backends()
self.load_fixtures(default_fixtures)
- user1 = {'name': u'fäké1', 'enabled': True,
- 'domain_id': CONF.identity.default_domain_id}
+ user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
- user2 = {'name': u'fäké2', 'enabled': False,
- 'domain_id': CONF.identity.default_domain_id}
+ user2 = self.new_user_ref(enabled=False,
+ domain_id=CONF.identity.default_domain_id)
- user3 = {'name': u'fäké3',
- 'domain_id': CONF.identity.default_domain_id}
+ user3 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
# Ensure that the LDAP attribute is False for a newly created
# enabled user.
@@ -1473,15 +1410,28 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
group='ldap',
user_additional_attribute_mapping=['description:name'])
self.load_backends()
- user = {
- 'name': 'EXTRA_ATTRIBUTES',
- 'password': 'extra',
- 'domain_id': CONF.identity.default_domain_id
- }
+ user = self.new_user_ref(name='EXTRA_ATTRIBUTES',
+ password='extra',
+ domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
dn, attrs = self.identity_api.driver.user._ldap_get(user['id'])
self.assertThat([user['name']], matchers.Equals(attrs['description']))
+ def test_user_description_attribute_mapping(self):
+ self.config_fixture.config(
+ group='ldap',
+ user_description_attribute='displayName')
+ self.load_backends()
+
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id,
+ displayName=uuid.uuid4().hex)
+ description = user['displayName']
+ user = self.identity_api.create_user(user)
+ res = self.identity_api.driver.user.get_all()
+
+ new_user = [u for u in res if u['id'] == user['id']][0]
+ self.assertThat(new_user['description'], matchers.Equals(description))
+
def test_user_extra_attribute_mapping_description_is_returned(self):
# Given a mapping like description:description, the description is
# returned.
@@ -1491,13 +1441,9 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
user_additional_attribute_mapping=['description:description'])
self.load_backends()
- description = uuid.uuid4().hex
- user = {
- 'name': uuid.uuid4().hex,
- 'description': description,
- 'password': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id
- }
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id,
+ description=uuid.uuid4().hex)
+ description = user['description']
user = self.identity_api.create_user(user)
res = self.identity_api.driver.user.get_all()
@@ -1551,52 +1497,17 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
'fake': 'invalid', 'invalid2': ''}
self.assertDictEqual(expected_dict, mapping)
-# TODO(henry-nash): These need to be removed when the full LDAP implementation
-# is submitted - see Bugs 1092187, 1101287, 1101276, 1101289
-
- def test_domain_crud(self):
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'enabled': True, 'description': uuid.uuid4().hex}
- self.assertRaises(exception.Forbidden,
+ def test_create_domain(self):
+ domain = unit.new_domain_ref()
+ self.assertRaises(exception.ValidationError,
self.resource_api.create_domain,
domain['id'],
domain)
- self.assertRaises(exception.Conflict,
- self.resource_api.create_domain,
- CONF.identity.default_domain_id,
- domain)
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.get_domain,
- domain['id'])
-
- domain['description'] = uuid.uuid4().hex
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.update_domain,
- domain['id'],
- domain)
- self.assertRaises(exception.Forbidden,
- self.resource_api.update_domain,
- CONF.identity.default_domain_id,
- domain)
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.get_domain,
- domain['id'])
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.delete_domain,
- domain['id'])
- self.assertRaises(exception.Forbidden,
- self.resource_api.delete_domain,
- CONF.identity.default_domain_id)
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.get_domain,
- domain['id'])
@unit.skip_if_no_multiple_domains_support
def test_create_domain_case_sensitivity(self):
# domains are read-only, so case sensitivity isn't an issue
- ref = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
+ ref = unit.new_domain_ref()
self.assertRaises(exception.Forbidden,
self.resource_api.create_domain,
ref['id'],
@@ -1624,22 +1535,18 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
# NOTE(topol): LDAP implementation does not currently support the
# updating of a project name so this method override
# provides a different update test
- project = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'description': uuid.uuid4().hex,
- 'enabled': True,
- 'parent_id': None,
- 'is_domain': False}
- self.resource_api.create_project(project['id'], project)
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+
+ project = self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
- self.assertDictEqual(project_ref, project)
+ self.assertDictEqual(project, project_ref)
project['description'] = uuid.uuid4().hex
self.resource_api.update_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
- self.assertDictEqual(project_ref, project)
+ self.assertDictEqual(project, project_ref)
self.resource_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
@@ -1651,12 +1558,11 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
# NOTE(morganfainberg): LDAP implementation does not currently support
# updating project names. This method override provides a different
# update test.
- project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'description': uuid.uuid4().hex}
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
project_id = project['id']
# Create a project
- self.resource_api.create_project(project_id, project)
+ project = self.resource_api.create_project(project_id, project)
self.resource_api.get_project(project_id)
updated_project = copy.deepcopy(project)
updated_project['description'] = uuid.uuid4().hex
@@ -1700,70 +1606,10 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
self.resource_api.get_project,
project_id)
- def _assert_create_hierarchy_not_allowed(self):
- domain = self._get_domain_fixture()
-
- project1 = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': '',
- 'domain_id': domain['id'],
- 'enabled': True,
- 'parent_id': None,
- 'is_domain': False}
- self.resource_api.create_project(project1['id'], project1)
-
- # Creating project2 under project1. LDAP will not allow
- # the creation of a project with parent_id being set
- project2 = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': '',
- 'domain_id': domain['id'],
- 'enabled': True,
- 'parent_id': project1['id'],
- 'is_domain': False}
-
- self.assertRaises(exception.InvalidParentProject,
- self.resource_api.create_project,
- project2['id'],
- project2)
-
- # Now, we'll create project 2 with no parent
- project2['parent_id'] = None
- self.resource_api.create_project(project2['id'], project2)
-
- # Returning projects to be used across the tests
- return [project1, project2]
-
- def _assert_create_is_domain_project_not_allowed(self):
- """Tests that we can't create more than one project acting as domain.
-
- This method will be used at any test that require the creation of a
- project that act as a domain. LDAP does not support multiple domains
- and the only domain it has (default) is immutable.
- """
- domain = self._get_domain_fixture()
- project = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': '',
- 'domain_id': domain['id'],
- 'enabled': True,
- 'parent_id': None,
- 'is_domain': True}
-
- self.assertRaises(exception.ValidationError,
- self.resource_api.create_project,
- project['id'], project)
-
def test_update_is_domain_field(self):
domain = self._get_domain_fixture()
- project = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': '',
- 'domain_id': domain['id'],
- 'enabled': True,
- 'parent_id': None,
- 'is_domain': False}
- self.resource_api.create_project(project['id'], project)
+ project = unit.new_project_ref(domain_id=domain['id'])
+ project = self.resource_api.create_project(project['id'], project)
# Try to update the is_domain field to True
project['is_domain'] = True
@@ -1772,97 +1618,87 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
project['id'], project)
def test_delete_is_domain_project(self):
- self._assert_create_is_domain_project_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_create_domain_under_regular_project_hierarchy_fails(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_create_not_is_domain_project_under_is_domain_hierarchy(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
- def test_create_is_domain_project(self):
- self._assert_create_is_domain_project_not_allowed()
+ def test_create_project_passing_is_domain_flag_true(self):
+ self.skipTest('Resource LDAP has been removed')
def test_create_project_with_parent_id_and_without_domain_id(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_check_leaf_projects(self):
- projects = self._assert_create_hierarchy_not_allowed()
- for project in projects:
- self.assertTrue(self.resource_api.is_leaf_project(project))
+ self.skipTest('Resource LDAP has been removed')
def test_list_projects_in_subtree(self):
- projects = self._assert_create_hierarchy_not_allowed()
- for project in projects:
- subtree_list = self.resource_api.list_projects_in_subtree(
- project['id'])
- self.assertEqual(0, len(subtree_list))
+ self.skipTest('Resource LDAP has been removed')
def test_list_projects_in_subtree_with_circular_reference(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_list_project_parents(self):
- projects = self._assert_create_hierarchy_not_allowed()
- for project in projects:
- parents_list = self.resource_api.list_project_parents(
- project['id'])
- self.assertEqual(0, len(parents_list))
+ self.skipTest('Resource LDAP has been removed')
+
+ def test_update_project_enabled_cascade(self):
+ self.skipTest('Resource LDAP has been removed')
+
+ def test_cannot_enable_cascade_with_parent_disabled(self):
+ self.skipTest('Resource LDAP has been removed')
def test_hierarchical_projects_crud(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_create_project_under_disabled_one(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_create_project_with_invalid_parent(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_create_leaf_project_with_invalid_domain(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_update_project_parent(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_enable_project_with_disabled_parent(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_disable_hierarchical_leaf_project(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_disable_hierarchical_not_leaf_project(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_delete_hierarchical_leaf_project(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_delete_hierarchical_not_leaf_project(self):
- self._assert_create_hierarchy_not_allowed()
+ self.skipTest('Resource LDAP has been removed')
def test_check_hierarchy_depth(self):
- projects = self._assert_create_hierarchy_not_allowed()
- for project in projects:
- depth = self._get_hierarchy_depth(project['id'])
- self.assertEqual(1, depth)
+ self.skipTest('Resource LDAP has been removed')
def test_multi_role_grant_by_user_group_on_project_domain(self):
# This is a partial implementation of the standard test that
- # is defined in test_backend.py. It omits both domain and
- # group grants. since neither of these are yet supported by
- # the ldap backend.
+ # is defined in unit.assignment.test_backends.py. It omits
+ # both domain and group grants. since neither of these are
+ # yet supported by the ldap backend.
role_list = []
for _ in range(2):
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
role_list.append(role)
- user1 = {'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'password': uuid.uuid4().hex,
- 'enabled': True}
+ user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user1 = self.identity_api.create_user(user1)
- project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id}
+ project1 = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(project1['id'], project1)
self.assignment_api.add_role_to_user_and_project(
@@ -1947,7 +1783,7 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
- group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group = unit.new_group_ref(domain_id=domain['id'])
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
# Fetch the test groups and ensure that they don't contain a dn.
@@ -1960,16 +1796,14 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
def test_list_groups_for_user_no_dn(self):
# Create a test user.
- user = {'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'password': uuid.uuid4().hex, 'enabled': True}
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
# Create some test groups and add the test user as a member.
domain = self._get_domain_fixture()
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
- group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group = unit.new_group_ref(domain_id=domain['id'])
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
self.identity_api.add_user_to_group(user['id'], group['id'])
@@ -1987,9 +1821,7 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
CONF.identity.default_domain_id)
driver.user.id_attr = 'mail'
- user = {'name': u'fäké1',
- 'password': u'fäképass1',
- 'domain_id': CONF.identity.default_domain_id}
+ user = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
user_ref = self.identity_api.get_user(user['id'])
# 'email' attribute should've created because it is also being used
@@ -2083,6 +1915,35 @@ class LDAPIdentity(BaseLDAPIdentity, unit.TestCase):
self.assertEqual('Foo Bar', user_ref['name'])
+class LDAPLimitTests(unit.TestCase, identity_tests.LimitTests):
+ def setUp(self):
+ super(LDAPLimitTests, self).setUp()
+
+ self.useFixture(ldapdb.LDAPDatabase())
+ self.useFixture(database.Database(self.sql_driver_version_overrides))
+ self.load_backends()
+ self.load_fixtures(default_fixtures)
+ identity_tests.LimitTests.setUp(self)
+ _assert_backends(self,
+ assignment='sql',
+ identity='ldap',
+ resource='sql')
+
+ def config_overrides(self):
+ super(LDAPLimitTests, self).config_overrides()
+ self.config_fixture.config(group='identity', driver='ldap')
+ self.config_fixture.config(group='identity',
+ list_limit=len(default_fixtures.USERS) - 1)
+
+ def config_files(self):
+ config_files = super(LDAPLimitTests, self).config_files()
+ config_files.append(unit.dirs.tests_conf('backend_ldap.conf'))
+ return config_files
+
+ def test_list_projects_filtered_and_limited(self):
+ self.skipTest("ldap for storing projects is deprecated")
+
+
class LDAPIdentityEnabledEmulation(LDAPIdentity):
def setUp(self):
super(LDAPIdentityEnabledEmulation, self).setUp()
@@ -2092,10 +1953,7 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
for obj in [self.tenant_bar, self.tenant_baz, self.user_foo,
self.user_two, self.user_badguy]:
obj.setdefault('enabled', True)
- _assert_backends(self,
- assignment='ldap',
- identity='ldap',
- resource='ldap')
+ _assert_backends(self, identity='ldap')
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
@@ -2110,60 +1968,62 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
def config_overrides(self):
super(LDAPIdentityEnabledEmulation, self).config_overrides()
self.config_fixture.config(group='ldap',
- user_enabled_emulation=True,
- project_enabled_emulation=True)
+ user_enabled_emulation=True)
def test_project_crud(self):
# NOTE(topol): LDAPIdentityEnabledEmulation will create an
# enabled key in the project dictionary so this
# method override handles this side-effect
- project = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'description': uuid.uuid4().hex,
- 'parent_id': None,
- 'is_domain': False}
-
- self.resource_api.create_project(project['id'], project)
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+
+ project = self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
# self.resource_api.create_project adds an enabled
# key with a value of True when LDAPIdentityEnabledEmulation
# is used so we now add this expected key to the project dictionary
project['enabled'] = True
- self.assertDictEqual(project_ref, project)
+ self.assertDictEqual(project, project_ref)
project['description'] = uuid.uuid4().hex
self.resource_api.update_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
- self.assertDictEqual(project_ref, project)
+ self.assertDictEqual(project, project_ref)
self.resource_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
project['id'])
- def test_user_crud(self):
- user_dict = {
- 'domain_id': CONF.identity.default_domain_id,
- 'name': uuid.uuid4().hex,
- 'password': uuid.uuid4().hex}
+ @mock.patch.object(versionutils, 'report_deprecated_feature')
+ def test_user_crud(self, mock_deprecator):
+ # NOTE(stevemar): As of the Mitaka release, we now check for calls that
+ # the LDAP write functionality has been deprecated.
+ user_dict = self.new_user_ref(
+ domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user_dict)
- user_dict['enabled'] = True
- user_ref = self.identity_api.get_user(user['id'])
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("create_user for the LDAP identity backend", args[1])
+
del user_dict['password']
+ user_ref = self.identity_api.get_user(user['id'])
user_ref_dict = {x: user_ref[x] for x in user_ref}
self.assertDictContainsSubset(user_dict, user_ref_dict)
user_dict['password'] = uuid.uuid4().hex
- self.identity_api.update_user(user['id'], user)
- user_ref = self.identity_api.get_user(user['id'])
+ self.identity_api.update_user(user['id'], user_dict)
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("update_user for the LDAP identity backend", args[1])
+
del user_dict['password']
+ user_ref = self.identity_api.get_user(user['id'])
user_ref_dict = {x: user_ref[x] for x in user_ref}
self.assertDictContainsSubset(user_dict, user_ref_dict)
self.identity_api.delete_user(user['id'])
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("delete_user for the LDAP identity backend", args[1])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
@@ -2192,8 +2052,8 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
self.load_fixtures(default_fixtures)
# Create a user and ensure they are enabled.
- user1 = {'name': u'fäké1', 'enabled': True,
- 'domain_id': CONF.identity.default_domain_id}
+ user1 = unit.new_user_ref(enabled=True,
+ domain_id=CONF.identity.default_domain_id)
user_ref = self.identity_api.create_user(user1)
self.assertIs(True, user_ref['enabled'])
@@ -2208,14 +2068,12 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
self.load_backends()
self.load_fixtures(default_fixtures)
- user1 = {'name': u'fäké1', 'enabled': True,
- 'domain_id': CONF.identity.default_domain_id}
+ user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
- user2 = {'name': u'fäké2', 'enabled': False,
- 'domain_id': CONF.identity.default_domain_id}
+ user2 = self.new_user_ref(enabled=False,
+ domain_id=CONF.identity.default_domain_id)
- user3 = {'name': u'fäké3',
- 'domain_id': CONF.identity.default_domain_id}
+ user3 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
# Ensure that the enabled LDAP attribute is not set for a
# newly created enabled user.
@@ -2282,121 +2140,103 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
user_ref = user_api.get('123456789')
self.assertIs(False, user_ref['enabled'])
+ def test_escape_member_dn(self):
+ # The enabled member DN is properly escaped when querying for enabled
+ # user.
-class LdapIdentitySqlAssignment(BaseLDAPIdentity, unit.SQLDriverOverrides,
- unit.TestCase):
+ object_id = uuid.uuid4().hex
+ driver = self.identity_api._select_identity_driver(
+ CONF.identity.default_domain_id)
- def config_files(self):
- config_files = super(LdapIdentitySqlAssignment, self).config_files()
- config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf'))
- return config_files
+ # driver.user is the EnabledEmuMixIn implementation used for this test.
+ mixin_impl = driver.user
- def setUp(self):
- sqldb = self.useFixture(database.Database())
- super(LdapIdentitySqlAssignment, self).setUp()
- self.ldapdb.clear()
- self.load_backends()
- cache.configure_cache_region(cache.REGION)
+ # ) is a special char in a filter and must be escaped.
+ sample_dn = 'cn=foo)bar'
+ # LDAP requires ) is escaped by being replaced with "\29"
+ sample_dn_filter_esc = r'cn=foo\29bar'
- sqldb.recreate()
- self.load_fixtures(default_fixtures)
- # defaulted by the data load
- self.user_foo['enabled'] = True
- _assert_backends(self,
- assignment='sql',
- identity='ldap',
- resource='sql')
+ # Override the tree_dn, it's used to build the enabled member filter
+ mixin_impl.tree_dn = sample_dn
- def config_overrides(self):
- super(LdapIdentitySqlAssignment, self).config_overrides()
- self.config_fixture.config(group='identity', driver='ldap')
- self.config_fixture.config(group='resource', driver='sql')
- self.config_fixture.config(group='assignment', driver='sql')
+ # The filter that _get_enabled is going to build contains the
+ # tree_dn, which better be escaped in this case.
+ exp_filter = '(%s=%s=%s,%s)' % (
+ mixin_impl.member_attribute, mixin_impl.id_attr, object_id,
+ sample_dn_filter_esc)
- def test_domain_crud(self):
- pass
+ with mixin_impl.get_connection() as conn:
+ m = self.useFixture(mockpatch.PatchObject(conn, 'search_s')).mock
+ mixin_impl._get_enabled(object_id, conn)
+ # The 3rd argument is the DN.
+ self.assertEqual(exp_filter, m.call_args[0][2])
- def test_list_domains(self):
- domains = self.resource_api.list_domains()
- self.assertEqual([resource.calc_default_domain()], domains)
- def test_list_domains_non_default_domain_id(self):
- # If change the default_domain_id, the ID of the default domain
- # returned by list_domains doesn't change because the SQL identity
- # backend reads it from the database, which doesn't get updated by
- # config change.
+class LDAPPosixGroupsTest(unit.TestCase):
- orig_default_domain_id = CONF.identity.default_domain_id
+ def setUp(self):
- new_domain_id = uuid.uuid4().hex
- self.config_fixture.config(group='identity',
- default_domain_id=new_domain_id)
+ super(LDAPPosixGroupsTest, self).setUp()
- domains = self.resource_api.list_domains()
+ self.useFixture(ldapdb.LDAPDatabase())
+ self.useFixture(database.Database())
- self.assertEqual(orig_default_domain_id, domains[0]['id'])
+ self.load_backends()
+ self.load_fixtures(default_fixtures)
- def test_create_domain(self):
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'enabled': True}
- self.assertRaises(exception.Forbidden,
- self.resource_api.create_domain,
- domain['id'],
- domain)
+ _assert_backends(self, identity='ldap')
- def test_get_and_remove_role_grant_by_group_and_domain(self):
- # TODO(henry-nash): We should really rewrite the tests in test_backend
- # to be more flexible as to where the domains are sourced from, so
- # that we would not need to override such tests here. This is raised
- # as bug 1373865.
- new_domain = self._get_domain_fixture()
- new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
- new_group = self.identity_api.create_group(new_group)
- new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
- 'enabled': True, 'domain_id': new_domain['id']}
- new_user = self.identity_api.create_user(new_user)
- self.identity_api.add_user_to_group(new_user['id'],
- new_group['id'])
+ def load_fixtures(self, fixtures):
+ # Override super impl since need to create group container.
+ create_group_container(self.identity_api)
+ super(LDAPPosixGroupsTest, self).load_fixtures(fixtures)
- roles_ref = self.assignment_api.list_grants(
- group_id=new_group['id'],
- domain_id=new_domain['id'])
- self.assertEqual(0, len(roles_ref))
+ def config_overrides(self):
+ super(LDAPPosixGroupsTest, self).config_overrides()
+ self.config_fixture.config(group='identity', driver='ldap')
+ self.config_fixture.config(group='ldap', group_members_are_ids=True,
+ group_member_attribute='memberUID')
- self.assignment_api.create_grant(group_id=new_group['id'],
- domain_id=new_domain['id'],
- role_id='member')
+ def config_files(self):
+ config_files = super(LDAPPosixGroupsTest, self).config_files()
+ config_files.append(unit.dirs.tests_conf('backend_ldap.conf'))
+ return config_files
- roles_ref = self.assignment_api.list_grants(
- group_id=new_group['id'],
- domain_id=new_domain['id'])
- self.assertDictEqual(roles_ref[0], self.role_member)
+ def _get_domain_fixture(self):
+ """Domains in LDAP are read-only, so just return the static one."""
+ return self.resource_api.get_domain(CONF.identity.default_domain_id)
- self.assignment_api.delete_grant(group_id=new_group['id'],
- domain_id=new_domain['id'],
- role_id='member')
- roles_ref = self.assignment_api.list_grants(
- group_id=new_group['id'],
- domain_id=new_domain['id'])
- self.assertEqual(0, len(roles_ref))
- self.assertRaises(exception.NotFound,
- self.assignment_api.delete_grant,
- group_id=new_group['id'],
- domain_id=new_domain['id'],
- role_id='member')
+ def test_posix_member_id(self):
+ domain = self._get_domain_fixture()
+ new_group = unit.new_group_ref(domain_id=domain['id'])
+ new_group = self.identity_api.create_group(new_group)
+ # Make sure we get an empty list back on a new group, not an error.
+ user_refs = self.identity_api.list_users_in_group(new_group['id'])
+ self.assertEqual([], user_refs)
+ # Make sure we get the correct users back once they have been added
+ # to the group.
+ new_user = unit.new_user_ref(domain_id=domain['id'])
+ new_user = self.identity_api.create_user(new_user)
- def test_project_enabled_ignored_disable_error(self):
- # Override
- self.skipTest("Doesn't apply since LDAP configuration is ignored for "
- "SQL assignment backend.")
+ # NOTE(amakarov): Create the group directly using LDAP operations
+ # rather than going through the manager.
+ group_api = self.identity_api.driver.group
+ group_ref = group_api.get(new_group['id'])
+ mod = (ldap.MOD_ADD, group_api.member_attribute, new_user['id'])
+ conn = group_api.get_connection()
+ conn.modify_s(group_ref['dn'], [mod])
- def test_list_role_assignments_filtered_by_role(self):
- # Domain roles are supported by the SQL Assignment backend
- base = super(BaseLDAPIdentity, self)
- base.test_list_role_assignments_filtered_by_role()
+ # Testing the case "the group contains a user"
+ user_refs = self.identity_api.list_users_in_group(new_group['id'])
+ self.assertIn(new_user['id'], (x['id'] for x in user_refs))
+ # Testing the case "the user is a member of a group"
+ group_refs = self.identity_api.list_groups_for_user(new_user['id'])
+ self.assertIn(new_group['id'], (x['id'] for x in group_refs))
-class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
+
+class LdapIdentityWithMapping(
+ BaseLDAPIdentity, unit.SQLDriverOverrides, unit.TestCase):
"""Class to test mapping of default LDAP backend.
The default configuration is not to enable mapping when using a single
@@ -2405,8 +2245,28 @@ class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
Setting backward_compatible_ids to False will enable this mapping.
"""
+
+ def config_files(self):
+ config_files = super(LdapIdentityWithMapping, self).config_files()
+ config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf'))
+ return config_files
+
+ def setUp(self):
+ sqldb = self.useFixture(database.Database())
+ super(LdapIdentityWithMapping, self).setUp()
+ self.ldapdb.clear()
+ self.load_backends()
+ cache.configure_cache()
+
+ sqldb.recreate()
+ self.load_fixtures(default_fixtures)
+ # defaulted by the data load
+ self.user_foo['enabled'] = True
+ _assert_backends(self, identity='ldap')
+
def config_overrides(self):
- super(LdapIdentitySqlAssignmentWithMapping, self).config_overrides()
+ super(LdapIdentityWithMapping, self).config_overrides()
+ self.config_fixture.config(group='identity', driver='ldap')
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
@@ -2420,13 +2280,9 @@ class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
"""
initial_mappings = len(mapping_sql.list_id_mappings())
- user1 = {'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'password': uuid.uuid4().hex, 'enabled': True}
+ user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user1 = self.identity_api.create_user(user1)
- user2 = {'name': uuid.uuid4().hex,
- 'domain_id': CONF.identity.default_domain_id,
- 'password': uuid.uuid4().hex, 'enabled': True}
+ user2 = self.new_user_ref(domain_id=CONF.identity.default_domain_id)
user2 = self.identity_api.create_user(user2)
mappings = mapping_sql.list_id_mappings()
self.assertEqual(initial_mappings + 2, len(mappings))
@@ -2453,35 +2309,29 @@ class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
self.skipTest('N/A: We never generate the same ID for a user and '
'group in our mapping table')
+ def test_list_domains(self):
+ domains = self.resource_api.list_domains()
+ self.assertEqual([resource.calc_default_domain()], domains)
+
class BaseMultiLDAPandSQLIdentity(object):
"""Mixin class with support methods for domain-specific config testing."""
- def create_user(self, domain_id):
- user = {'name': uuid.uuid4().hex,
- 'domain_id': domain_id,
- 'password': uuid.uuid4().hex,
- 'enabled': True}
- user_ref = self.identity_api.create_user(user)
- # Put the password back in, since this is used later by tests to
- # authenticate.
- user_ref['password'] = user['password']
- return user_ref
-
def create_users_across_domains(self):
"""Create a set of users, each with a role on their own domain."""
-
# We also will check that the right number of id mappings get created
initial_mappings = len(mapping_sql.list_id_mappings())
- self.users['user0'] = self.create_user(
+ self.users['user0'] = unit.create_user(
+ self.identity_api,
self.domains['domain_default']['id'])
self.assignment_api.create_grant(
user_id=self.users['user0']['id'],
domain_id=self.domains['domain_default']['id'],
role_id=self.role_member['id'])
for x in range(1, self.domain_count):
- self.users['user%s' % x] = self.create_user(
+ self.users['user%s' % x] = unit.create_user(
+ self.identity_api,
self.domains['domain%s' % x]['id'])
self.assignment_api.create_grant(
user_id=self.users['user%s' % x]['id'],
@@ -2506,13 +2356,13 @@ class BaseMultiLDAPandSQLIdentity(object):
self.identity_api._get_domain_driver_and_entity_id(
user['id']))
- if expected_status == 200:
+ if expected_status == http_client.OK:
ref = driver.get_user(entity_id)
ref = self.identity_api._set_domain_id_and_mapping(
ref, domain_id, driver, map.EntityType.USER)
user = user.copy()
del user['password']
- self.assertDictEqual(ref, user)
+ self.assertDictEqual(user, ref)
else:
# TODO(henry-nash): Use AssertRaises here, although
# there appears to be an issue with using driver.get_user
@@ -2570,6 +2420,7 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
domain.
"""
+
def setUp(self):
sqldb = self.useFixture(database.Database())
super(MultiLDAPandSQLIdentity, self).setUp()
@@ -2614,11 +2465,14 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
# Create some identity entities BEFORE we switch to multi-backend, so
# we can test that these are still accessible
self.users = {}
- self.users['userA'] = self.create_user(
+ self.users['userA'] = unit.create_user(
+ self.identity_api,
self.domains['domain_default']['id'])
- self.users['userB'] = self.create_user(
+ self.users['userB'] = unit.create_user(
+ self.identity_api,
self.domains['domain1']['id'])
- self.users['userC'] = self.create_user(
+ self.users['userC'] = unit.create_user(
+ self.identity_api,
self.domains['domain3']['id'])
def enable_multi_domain(self):
@@ -2631,7 +2485,8 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
"""
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
- domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap')
+ domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap',
+ list_limit=1000)
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
@@ -2640,14 +2495,6 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
# if no specific config defined for this domain
return self.identity_api.domain_configs.get_domain_conf(domain_id)
- def test_list_domains(self):
- self.skipTest(
- 'N/A: Not relevant for multi ldap testing')
-
- def test_list_domains_non_default_domain_id(self):
- self.skipTest(
- 'N/A: Not relevant for multi ldap testing')
-
def test_list_users(self):
# Override the standard list users, since we have added an extra user
# to the default domain, so the number of expected users is one more
@@ -2664,6 +2511,36 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
+ @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get_all')
+ def test_list_limit_domain_specific_inheritance(self, ldap_get_all):
+ # passiging hints is important, because if it's not passed, limiting
+ # is considered be disabled
+ hints = driver_hints.Hints()
+ self.identity_api.list_users(
+ domain_scope=self.domains['domain2']['id'],
+ hints=hints)
+ # since list_limit is not specified in keystone.domain2.conf, it should
+ # take the default, which is 1000
+ self.assertTrue(ldap_get_all.called)
+ args, kwargs = ldap_get_all.call_args
+ hints = args[0]
+ self.assertEqual(1000, hints.limit['limit'])
+
+ @mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get_all')
+ def test_list_limit_domain_specific_override(self, ldap_get_all):
+ # passiging hints is important, because if it's not passed, limiting
+ # is considered to be disabled
+ hints = driver_hints.Hints()
+ self.identity_api.list_users(
+ domain_scope=self.domains['domain1']['id'],
+ hints=hints)
+ # this should have the list_limit set in Keystone.domain1.conf, which
+ # is 101
+ self.assertTrue(ldap_get_all.called)
+ args, kwargs = ldap_get_all.call_args
+ hints = args[0]
+ self.assertEqual(101, hints.limit['limit'])
+
def test_domain_segregation(self):
"""Test that separate configs have segregated the domain.
@@ -2680,21 +2557,23 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
check_user = self.check_user
check_user(self.users['user0'],
- self.domains['domain_default']['id'], 200)
+ self.domains['domain_default']['id'], http_client.OK)
for domain in [self.domains['domain1']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user0'], domain, exception.UserNotFound)
- check_user(self.users['user1'], self.domains['domain1']['id'], 200)
+ check_user(self.users['user1'], self.domains['domain1']['id'],
+ http_client.OK)
for domain in [self.domains['domain_default']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user1'], domain, exception.UserNotFound)
- check_user(self.users['user2'], self.domains['domain2']['id'], 200)
+ check_user(self.users['user2'], self.domains['domain2']['id'],
+ http_client.OK)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
self.domains['domain3']['id'],
@@ -2704,10 +2583,14 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
# domain3 and domain4 share the same backend, so you should be
# able to see user3 and user4 from either.
- check_user(self.users['user3'], self.domains['domain3']['id'], 200)
- check_user(self.users['user3'], self.domains['domain4']['id'], 200)
- check_user(self.users['user4'], self.domains['domain3']['id'], 200)
- check_user(self.users['user4'], self.domains['domain4']['id'], 200)
+ check_user(self.users['user3'], self.domains['domain3']['id'],
+ http_client.OK)
+ check_user(self.users['user3'], self.domains['domain4']['id'],
+ http_client.OK)
+ check_user(self.users['user4'], self.domains['domain3']['id'],
+ http_client.OK)
+ check_user(self.users['user4'], self.domains['domain4']['id'],
+ http_client.OK)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
@@ -2789,19 +2672,12 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
self.assertEqual('fake://memory1', conf.ldap.url)
def test_delete_domain_with_user_added(self):
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'enabled': True}
- project = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': domain['id'],
- 'description': uuid.uuid4().hex,
- 'parent_id': None,
- 'enabled': True,
- 'is_domain': False}
+ domain = unit.new_domain_ref()
+ project = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_domain(domain['id'], domain)
- self.resource_api.create_project(project['id'], project)
+ project = self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
- self.assertDictEqual(project_ref, project)
+ self.assertDictEqual(project, project_ref)
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=project['id'],
@@ -2839,13 +2715,37 @@ class MultiLDAPandSQLIdentity(BaseLDAPIdentity, unit.SQLDriverOverrides,
def test_list_role_assignment_by_domain(self):
# With multi LDAP this method should work, so override the override
# from BaseLDAPIdentity
- super(BaseLDAPIdentity, self).test_list_role_assignment_by_domain
+ super(BaseLDAPIdentity, self).test_list_role_assignment_by_domain()
def test_list_role_assignment_by_user_with_domain_group_roles(self):
# With multi LDAP this method should work, so override the override
# from BaseLDAPIdentity
super(BaseLDAPIdentity, self).\
- test_list_role_assignment_by_user_with_domain_group_roles
+ test_list_role_assignment_by_user_with_domain_group_roles()
+
+ def test_list_role_assignment_using_sourced_groups_with_domains(self):
+ # With SQL Assignment this method should work, so override the override
+ # from BaseLDAPIdentity
+ base = super(BaseLDAPIdentity, self)
+ base.test_list_role_assignment_using_sourced_groups_with_domains()
+
+ def test_create_project_with_domain_id_and_without_parent_id(self):
+ # With multi LDAP this method should work, so override the override
+ # from BaseLDAPIdentity
+ super(BaseLDAPIdentity, self).\
+ test_create_project_with_domain_id_and_without_parent_id()
+
+ def test_create_project_with_domain_id_mismatch_to_parent_domain(self):
+ # With multi LDAP this method should work, so override the override
+ # from BaseLDAPIdentity
+ super(BaseLDAPIdentity, self).\
+ test_create_project_with_domain_id_mismatch_to_parent_domain()
+
+ def test_remove_foreign_assignments_when_deleting_a_domain(self):
+ # With multi LDAP this method should work, so override the override
+ # from BaseLDAPIdentity
+ base = super(BaseLDAPIdentity, self)
+ base.test_remove_foreign_assignments_when_deleting_a_domain()
class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
@@ -2870,7 +2770,7 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
def enable_multi_domain(self):
# The values below are the same as in the domain_configs_multi_ldap
- # cdirectory of test config_files.
+ # directory of test config_files.
default_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
@@ -2883,7 +2783,8 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
- 'identity': {'driver': 'ldap'}
+ 'identity': {'driver': 'ldap',
+ 'list_limit': '101'}
}
domain2_config = {
'ldap': {'url': 'fake://memory',
@@ -2904,7 +2805,8 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
- domain_configurations_from_database=True)
+ domain_configurations_from_database=True,
+ list_limit=1000)
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
@@ -2933,7 +2835,6 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
def test_reloading_domain_config(self):
"""Ensure domain drivers are reloaded on a config modification."""
-
domain_cfgs = self.identity_api.domain_configs
# Create a new config for the default domain, hence overwriting the
@@ -2965,7 +2866,6 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
def test_setting_multiple_sql_driver_raises_exception(self):
"""Ensure setting multiple domain specific sql drivers is prevented."""
-
new_config = {'identity': {'driver': 'sql'}}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, new_config)
@@ -2979,7 +2879,6 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
def test_same_domain_gets_sql_driver(self):
"""Ensure we can set an SQL driver if we have had it before."""
-
new_config = {'identity': {'driver': 'sql'}}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, new_config)
@@ -2997,8 +2896,7 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
def test_delete_domain_clears_sql_registration(self):
"""Ensure registration is deleted when a domain is deleted."""
-
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain = unit.new_domain_ref()
domain = self.resource_api.create_domain(domain['id'], domain)
new_config = {'identity': {'driver': 'sql'}}
self.domain_config_api.create_config(domain['id'], new_config)
@@ -3025,8 +2923,7 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
def test_orphaned_registration_does_not_prevent_getting_sql_driver(self):
"""Ensure we self heal an orphaned sql registration."""
-
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain = unit.new_domain_ref()
domain = self.resource_api.create_domain(domain['id'], domain)
new_config = {'identity': {'driver': 'sql'}}
self.domain_config_api.create_config(domain['id'], new_config)
@@ -3047,7 +2944,7 @@ class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
# should still be able to set another domain to SQL, since we should
# self heal this issue.
- self.resource_api.driver.delete_domain(domain['id'])
+ self.resource_api.driver.delete_project(domain['id'])
# Invalidate cache (so we will see the domain has gone)
self.resource_api.get_domain.invalidate(
self.resource_api, domain['id'])
@@ -3072,6 +2969,7 @@ class DomainSpecificLDAPandSQLIdentity(
Although the default driver still exists, we don't use it.
"""
+
def setUp(self):
sqldb = self.useFixture(database.Database())
super(DomainSpecificLDAPandSQLIdentity, self).setUp()
@@ -3133,6 +3031,17 @@ class DomainSpecificLDAPandSQLIdentity(
self.skipTest(
'N/A: Not relevant for multi ldap testing')
+ def test_not_delete_domain_with_enabled_subdomains(self):
+ self.skipTest(
+ 'N/A: Not relevant for multi ldap testing')
+
+ def test_delete_domain(self):
+ # With this restricted multi LDAP class, tests that use multiple
+ # domains and identity, are still not supported
+ self.assertRaises(
+ exception.DomainNotFound,
+ super(BaseLDAPIdentity, self).test_delete_domain_with_project_api)
+
def test_list_users(self):
# Override the standard list users, since we have added an extra user
# to the default domain, so the number of expected users is one more
@@ -3164,12 +3073,12 @@ class DomainSpecificLDAPandSQLIdentity(
# driver, but won't find it via any other domain driver
self.check_user(self.users['user0'],
- self.domains['domain_default']['id'], 200)
+ self.domains['domain_default']['id'], http_client.OK)
self.check_user(self.users['user0'],
self.domains['domain1']['id'], exception.UserNotFound)
self.check_user(self.users['user1'],
- self.domains['domain1']['id'], 200)
+ self.domains['domain1']['id'], http_client.OK)
self.check_user(self.users['user1'],
self.domains['domain_default']['id'],
exception.UserNotFound)
@@ -3182,10 +3091,10 @@ class DomainSpecificLDAPandSQLIdentity(
domain_scope=self.domains['domain1']['id']),
matchers.HasLength(1))
- def test_add_role_grant_to_user_and_project_404(self):
+ def test_add_role_grant_to_user_and_project_returns_not_found(self):
self.skipTest('Blocked by bug 1101287')
- def test_get_role_grants_for_user_and_project_404(self):
+ def test_get_role_grants_for_user_and_project_returns_not_found(self):
self.skipTest('Blocked by bug 1101287')
def test_list_projects_for_user_with_grants(self):
@@ -3223,6 +3132,25 @@ class DomainSpecificLDAPandSQLIdentity(
base = super(BaseLDAPIdentity, self)
base.test_list_role_assignments_filtered_by_role()
+ def test_delete_domain_with_project_api(self):
+ # With this restricted multi LDAP class, tests that use multiple
+ # domains and identity, are still not supported
+ self.assertRaises(
+ exception.DomainNotFound,
+ super(BaseLDAPIdentity, self).test_delete_domain_with_project_api)
+
+ def test_create_project_with_domain_id_and_without_parent_id(self):
+ # With restricted multi LDAP, tests that don't use identity, but do
+ # required aditional domains will work
+ base = super(BaseLDAPIdentity, self)
+ base.test_create_project_with_domain_id_and_without_parent_id()
+
+ def test_create_project_with_domain_id_mismatch_to_parent_domain(self):
+ # With restricted multi LDAP, tests that don't use identity, but do
+ # required aditional domains will work
+ base = super(BaseLDAPIdentity, self)
+ base.test_create_project_with_domain_id_mismatch_to_parent_domain()
+
class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
"""Class to test simplest use of domain-specific SQL driver.
@@ -3236,6 +3164,7 @@ class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
- A separate SQL backend for domain1
"""
+
def initial_setup(self, sqldb):
# We aren't setting up any initial data ahead of switching to
# domain-specific operation, so make the switch straight away.
@@ -3323,7 +3252,7 @@ class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
'domain2')
-class LdapFilterTests(test_backend.FilterTests, unit.TestCase):
+class LdapFilterTests(identity_tests.FilterTests, unit.TestCase):
def setUp(self):
super(LdapFilterTests, self).setUp()
@@ -3333,7 +3262,7 @@ class LdapFilterTests(test_backend.FilterTests, unit.TestCase):
self.load_backends()
self.load_fixtures(default_fixtures)
sqldb.recreate()
- _assert_backends(self, assignment='ldap', identity='ldap')
+ _assert_backends(self, identity='ldap')
def config_overrides(self):
super(LdapFilterTests, self).config_overrides()
@@ -3344,13 +3273,15 @@ class LdapFilterTests(test_backend.FilterTests, unit.TestCase):
config_files.append(unit.dirs.tests_conf('backend_ldap.conf'))
return config_files
- def test_list_users_in_group_filtered(self):
+ @wip('Not supported by LDAP identity driver')
+ def test_list_users_in_group_inexact_filtered(self):
+ # The LDAP identity driver currently does not support filtering on the
+ # listing users for a given group, so will fail this test.
+ super(LdapFilterTests,
+ self).test_list_users_in_group_inexact_filtered()
+
+ @wip('Not supported by LDAP identity driver')
+ def test_list_users_in_group_exact_filtered(self):
# The LDAP identity driver currently does not support filtering on the
# listing users for a given group, so will fail this test.
- try:
- super(LdapFilterTests, self).test_list_users_in_group_filtered()
- except matchers.MismatchError:
- return
- # We shouldn't get here...if we do, it means someone has implemented
- # filtering, so we can remove this test override.
- self.assertTrue(False)
+ super(LdapFilterTests, self).test_list_users_in_group_exact_filtered()
diff --git a/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py b/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py
index 2b714b57..ec789d04 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_ldap_pool.py
@@ -38,7 +38,7 @@ class LdapPoolCommonTestMixin(object):
# by default use_pool and use_auth_pool is enabled in test pool config
user_ref = self.identity_api.get_user(self.user_foo['id'])
self.user_foo.pop('password')
- self.assertDictEqual(user_ref, self.user_foo)
+ self.assertDictEqual(self.user_foo, user_ref)
handler = ldap_core._get_connection(CONF.ldap.url, use_pool=True)
self.assertIsInstance(handler, ldap_core.PooledLDAPHandler)
@@ -151,22 +151,22 @@ class LdapPoolCommonTestMixin(object):
# Open 3 connections first
with _get_conn() as _: # conn1
- self.assertEqual(len(ldappool_cm), 1)
+ self.assertEqual(1, len(ldappool_cm))
with _get_conn() as _: # conn2
- self.assertEqual(len(ldappool_cm), 2)
+ self.assertEqual(2, len(ldappool_cm))
with _get_conn() as _: # conn2
_.unbind_ext_s()
- self.assertEqual(len(ldappool_cm), 3)
+ self.assertEqual(3, len(ldappool_cm))
# Then open 3 connections again and make sure size does not grow
# over 3
with _get_conn() as _: # conn1
- self.assertEqual(len(ldappool_cm), 1)
+ self.assertEqual(1, len(ldappool_cm))
with _get_conn() as _: # conn2
- self.assertEqual(len(ldappool_cm), 2)
+ self.assertEqual(2, len(ldappool_cm))
with _get_conn() as _: # conn3
_.unbind_ext_s()
- self.assertEqual(len(ldappool_cm), 3)
+ self.assertEqual(3, len(ldappool_cm))
def test_password_change_with_pool(self):
old_password = self.user_sna['password']
@@ -181,14 +181,14 @@ class LdapPoolCommonTestMixin(object):
self.user_sna.pop('password')
self.user_sna['enabled'] = True
- self.assertDictEqual(user_ref, self.user_sna)
+ self.assertDictEqual(self.user_sna, user_ref)
new_password = 'new_password'
user_ref['password'] = new_password
self.identity_api.update_user(user_ref['id'], user_ref)
# now authenticate again to make sure new password works with
- # conneciton pool
+ # connection pool
user_ref2 = self.identity_api.authenticate(
context={},
user_id=self.user_sna['id'],
@@ -207,14 +207,15 @@ class LdapPoolCommonTestMixin(object):
password=old_password)
-class LdapIdentitySqlAssignment(LdapPoolCommonTestMixin,
- test_backend_ldap.LdapIdentitySqlAssignment,
- unit.TestCase):
+class LDAPIdentity(LdapPoolCommonTestMixin,
+ test_backend_ldap.LDAPIdentity,
+ unit.TestCase):
"""Executes tests in existing base class with pooled LDAP handler."""
+
def setUp(self):
self.useFixture(mockpatch.PatchObject(
ldap_core.PooledLDAPHandler, 'Connector', fakeldap.FakeLdapPool))
- super(LdapIdentitySqlAssignment, self).setUp()
+ super(LDAPIdentity, self).setUp()
self.addCleanup(self.cleanup_pools)
# storing to local variable to avoid long references
@@ -225,7 +226,7 @@ class LdapIdentitySqlAssignment(LdapPoolCommonTestMixin,
self.identity_api.get_user(self.user_foo['id'])
def config_files(self):
- config_files = super(LdapIdentitySqlAssignment, self).config_files()
+ config_files = super(LDAPIdentity, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_ldap_pool.conf'))
return config_files
diff --git a/keystone-moon/keystone/tests/unit/test_backend_rules.py b/keystone-moon/keystone/tests/unit/test_backend_rules.py
index 9a11fddc..c32c3307 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_rules.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_rules.py
@@ -15,10 +15,10 @@
from keystone import exception
from keystone.tests import unit
-from keystone.tests.unit import test_backend
+from keystone.tests.unit.policy import test_backends as policy_tests
-class RulesPolicy(unit.TestCase, test_backend.PolicyTests):
+class RulesPolicy(unit.TestCase, policy_tests.PolicyTests):
def setUp(self):
super(RulesPolicy, self).setUp()
self.load_backends()
@@ -47,14 +47,17 @@ class RulesPolicy(unit.TestCase, test_backend.PolicyTests):
self.assertRaises(exception.NotImplemented,
super(RulesPolicy, self).test_delete)
- def test_get_policy_404(self):
+ def test_get_policy_returns_not_found(self):
self.assertRaises(exception.NotImplemented,
- super(RulesPolicy, self).test_get_policy_404)
+ super(RulesPolicy,
+ self).test_get_policy_returns_not_found)
- def test_update_policy_404(self):
+ def test_update_policy_returns_not_found(self):
self.assertRaises(exception.NotImplemented,
- super(RulesPolicy, self).test_update_policy_404)
+ super(RulesPolicy,
+ self).test_update_policy_returns_not_found)
- def test_delete_policy_404(self):
+ def test_delete_policy_returns_not_found(self):
self.assertRaises(exception.NotImplemented,
- super(RulesPolicy, self).test_delete_policy_404)
+ super(RulesPolicy,
+ self).test_delete_policy_returns_not_found)
diff --git a/keystone-moon/keystone/tests/unit/test_backend_sql.py b/keystone-moon/keystone/tests/unit/test_backend_sql.py
index 69fac63a..2e703fff 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_sql.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_sql.py
@@ -29,22 +29,28 @@ from keystone.common import driver_hints
from keystone.common import sql
from keystone import exception
from keystone.identity.backends import sql as identity_sql
+from keystone import resource
from keystone.tests import unit
+from keystone.tests.unit.assignment import test_backends as assignment_tests
+from keystone.tests.unit.catalog import test_backends as catalog_tests
from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.identity import test_backends as identity_tests
from keystone.tests.unit.ksfixtures import database
-from keystone.tests.unit import test_backend
+from keystone.tests.unit.policy import test_backends as policy_tests
+from keystone.tests.unit.resource import test_backends as resource_tests
+from keystone.tests.unit.token import test_backends as token_tests
+from keystone.tests.unit.trust import test_backends as trust_tests
from keystone.token.persistence.backends import sql as token_sql
CONF = cfg.CONF
-DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlTests(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
super(SqlTests, self).setUp()
- self.useFixture(database.Database())
+ self.useFixture(database.Database(self.sql_driver_version_overrides))
self.load_backends()
# populate the engine with tables & fixtures
@@ -124,14 +130,33 @@ class SqlModels(SqlTests):
def test_user_model(self):
cols = (('id', sql.String, 64),
- ('name', sql.String, 255),
- ('password', sql.String, 128),
- ('domain_id', sql.String, 64),
('default_project_id', sql.String, 64),
('enabled', sql.Boolean, None),
('extra', sql.JsonBlob, None))
self.assertExpectedSchema('user', cols)
+ def test_local_user_model(self):
+ cols = (('id', sql.Integer, None),
+ ('user_id', sql.String, 64),
+ ('name', sql.String, 255),
+ ('domain_id', sql.String, 64))
+ self.assertExpectedSchema('local_user', cols)
+
+ def test_password_model(self):
+ cols = (('id', sql.Integer, None),
+ ('local_user_id', sql.Integer, None),
+ ('password', sql.String, 128))
+ self.assertExpectedSchema('password', cols)
+
+ def test_federated_user_model(self):
+ cols = (('id', sql.Integer, None),
+ ('user_id', sql.String, 64),
+ ('idp_id', sql.String, 64),
+ ('protocol_id', sql.String, 64),
+ ('unique_id', sql.String, 255),
+ ('display_name', sql.String, 255))
+ self.assertExpectedSchema('federated_user', cols)
+
def test_group_model(self):
cols = (('id', sql.String, 64),
('name', sql.String, 64),
@@ -171,17 +196,58 @@ class SqlModels(SqlTests):
('user_id', sql.String, 64))
self.assertExpectedSchema('user_group_membership', cols)
-
-class SqlIdentity(SqlTests, test_backend.IdentityTests):
+ def test_revocation_event_model(self):
+ cols = (('id', sql.Integer, None),
+ ('domain_id', sql.String, 64),
+ ('project_id', sql.String, 64),
+ ('user_id', sql.String, 64),
+ ('role_id', sql.String, 64),
+ ('trust_id', sql.String, 64),
+ ('consumer_id', sql.String, 64),
+ ('access_token_id', sql.String, 64),
+ ('issued_before', sql.DateTime, None),
+ ('expires_at', sql.DateTime, None),
+ ('revoked_at', sql.DateTime, None),
+ ('audit_id', sql.String, 32),
+ ('audit_chain_id', sql.String, 32))
+ self.assertExpectedSchema('revocation_event', cols)
+
+
+class SqlIdentity(SqlTests, identity_tests.IdentityTests,
+ assignment_tests.AssignmentTests,
+ resource_tests.ResourceTests):
def test_password_hashed(self):
- session = sql.get_session()
- user_ref = self.identity_api._get_user(session, self.user_foo['id'])
- self.assertNotEqual(user_ref['password'], self.user_foo['password'])
+ with sql.session_for_read() as session:
+ user_ref = self.identity_api._get_user(session,
+ self.user_foo['id'])
+ self.assertNotEqual(self.user_foo['password'],
+ user_ref['password'])
+
+ def test_create_user_with_null_password(self):
+ user_dict = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id)
+ user_dict["password"] = None
+ new_user_dict = self.identity_api.create_user(user_dict)
+ with sql.session_for_read() as session:
+ new_user_ref = self.identity_api._get_user(session,
+ new_user_dict['id'])
+ self.assertFalse(new_user_ref.local_user.passwords)
+
+ def test_update_user_with_null_password(self):
+ user_dict = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.assertTrue(user_dict['password'])
+ new_user_dict = self.identity_api.create_user(user_dict)
+ new_user_dict["password"] = None
+ new_user_dict = self.identity_api.update_user(new_user_dict['id'],
+ new_user_dict)
+ with sql.session_for_read() as session:
+ new_user_ref = self.identity_api._get_user(session,
+ new_user_dict['id'])
+ self.assertFalse(new_user_ref.local_user.passwords)
def test_delete_user_with_project_association(self):
- user = {'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID,
- 'password': uuid.uuid4().hex}
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
@@ -191,16 +257,15 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
user['id'])
def test_create_null_user_name(self):
- user = {'name': None,
- 'domain_id': DEFAULT_DOMAIN_ID,
- 'password': uuid.uuid4().hex}
+ user = unit.new_user_ref(name=None,
+ domain_id=CONF.identity.default_domain_id)
self.assertRaises(exception.ValidationError,
self.identity_api.create_user,
user)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user_by_name,
user['name'],
- DEFAULT_DOMAIN_ID)
+ CONF.identity.default_domain_id)
def test_create_user_case_sensitivity(self):
# user name case sensitivity is down to the fact that it is marked as
@@ -208,25 +273,59 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
# LDAP.
# create a ref with a lowercase name
- ref = {
- 'name': uuid.uuid4().hex.lower(),
- 'domain_id': DEFAULT_DOMAIN_ID}
+ ref = unit.new_user_ref(name=uuid.uuid4().hex.lower(),
+ domain_id=CONF.identity.default_domain_id)
ref = self.identity_api.create_user(ref)
# assign a new ID with the same name, but this time in uppercase
ref['name'] = ref['name'].upper()
self.identity_api.create_user(ref)
+ def test_create_federated_user_unique_constraint(self):
+ federated_dict = unit.new_federated_user_ref()
+ user_dict = self.shadow_users_api.create_federated_user(federated_dict)
+ user_dict = self.identity_api.get_user(user_dict["id"])
+ self.assertIsNotNone(user_dict["id"])
+ self.assertRaises(exception.Conflict,
+ self.shadow_users_api.create_federated_user,
+ federated_dict)
+
+ def test_get_federated_user(self):
+ federated_dict = unit.new_federated_user_ref()
+ user_dict_create = self.shadow_users_api.create_federated_user(
+ federated_dict)
+ user_dict_get = self.shadow_users_api.get_federated_user(
+ federated_dict["idp_id"],
+ federated_dict["protocol_id"],
+ federated_dict["unique_id"])
+ self.assertItemsEqual(user_dict_create, user_dict_get)
+ self.assertEqual(user_dict_create["id"], user_dict_get["id"])
+
+ def test_update_federated_user_display_name(self):
+ federated_dict = unit.new_federated_user_ref()
+ user_dict_create = self.shadow_users_api.create_federated_user(
+ federated_dict)
+ new_display_name = uuid.uuid4().hex
+ self.shadow_users_api.update_federated_user_display_name(
+ federated_dict["idp_id"],
+ federated_dict["protocol_id"],
+ federated_dict["unique_id"],
+ new_display_name)
+ user_ref = self.shadow_users_api._get_federated_user(
+ federated_dict["idp_id"],
+ federated_dict["protocol_id"],
+ federated_dict["unique_id"])
+ self.assertEqual(user_ref.federated_users[0].display_name,
+ new_display_name)
+ self.assertEqual(user_dict_create["id"], user_ref.id)
+
def test_create_project_case_sensitivity(self):
# project name case sensitivity is down to the fact that it is marked
# as an SQL UNIQUE column, which may not be valid for other backends,
# like LDAP.
# create a ref with a lowercase name
- ref = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex.lower(),
- 'domain_id': DEFAULT_DOMAIN_ID}
+ ref = unit.new_project_ref(domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(ref['id'], ref)
# assign a new ID with the same name, but this time in uppercase
@@ -235,25 +334,22 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
self.resource_api.create_project(ref['id'], ref)
def test_create_null_project_name(self):
- tenant = {'id': uuid.uuid4().hex,
- 'name': None,
- 'domain_id': DEFAULT_DOMAIN_ID}
+ project = unit.new_project_ref(
+ name=None, domain_id=CONF.identity.default_domain_id)
self.assertRaises(exception.ValidationError,
self.resource_api.create_project,
- tenant['id'],
- tenant)
+ project['id'],
+ project)
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
- tenant['id'])
+ project['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project_by_name,
- tenant['name'],
- DEFAULT_DOMAIN_ID)
+ project['name'],
+ CONF.identity.default_domain_id)
def test_delete_project_with_user_association(self):
- user = {'name': 'fakeuser',
- 'domain_id': DEFAULT_DOMAIN_ID,
- 'password': 'passwd'}
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
@@ -261,52 +357,6 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
tenants = self.assignment_api.list_projects_for_user(user['id'])
self.assertEqual([], tenants)
- def test_metadata_removed_on_delete_user(self):
- # A test to check that the internal representation
- # or roles is correctly updated when a user is deleted
- user = {'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID,
- 'password': 'passwd'}
- user = self.identity_api.create_user(user)
- role = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
- self.role_api.create_role(role['id'], role)
- self.assignment_api.add_role_to_user_and_project(
- user['id'],
- self.tenant_bar['id'],
- role['id'])
- self.identity_api.delete_user(user['id'])
-
- # Now check whether the internal representation of roles
- # has been deleted
- self.assertRaises(exception.MetadataNotFound,
- self.assignment_api._get_metadata,
- user['id'],
- self.tenant_bar['id'])
-
- def test_metadata_removed_on_delete_project(self):
- # A test to check that the internal representation
- # or roles is correctly updated when a project is deleted
- user = {'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID,
- 'password': 'passwd'}
- user = self.identity_api.create_user(user)
- role = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
- self.role_api.create_role(role['id'], role)
- self.assignment_api.add_role_to_user_and_project(
- user['id'],
- self.tenant_bar['id'],
- role['id'])
- self.resource_api.delete_project(self.tenant_bar['id'])
-
- # Now check whether the internal representation of roles
- # has been deleted
- self.assertRaises(exception.MetadataNotFound,
- self.assignment_api._get_metadata,
- user['id'],
- self.tenant_bar['id'])
-
def test_update_project_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
@@ -317,20 +367,17 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
This behavior is specific to the SQL driver.
"""
- tenant_id = uuid.uuid4().hex
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
- tenant = {
- 'id': tenant_id,
- 'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID,
- arbitrary_key: arbitrary_value}
- ref = self.resource_api.create_project(tenant_id, tenant)
+ project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ project[arbitrary_key] = arbitrary_value
+ ref = self.resource_api.create_project(project['id'], project)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('extra'))
- tenant['name'] = uuid.uuid4().hex
- ref = self.resource_api.update_project(tenant_id, tenant)
+ ref['name'] = uuid.uuid4().hex
+ ref = self.resource_api.update_project(ref['id'], ref)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
@@ -346,11 +393,9 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
"""
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
- user = {
- 'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID,
- 'password': uuid.uuid4().hex,
- arbitrary_key: arbitrary_value}
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
+ user[arbitrary_key] = arbitrary_value
+ del user["id"]
ref = self.identity_api.create_user(user)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('password'))
@@ -365,30 +410,25 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
def test_sql_user_to_dict_null_default_project_id(self):
- user = {
- 'name': uuid.uuid4().hex,
- 'domain_id': DEFAULT_DOMAIN_ID,
- 'password': uuid.uuid4().hex}
-
+ user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
user = self.identity_api.create_user(user)
- session = sql.get_session()
- query = session.query(identity_sql.User)
- query = query.filter_by(id=user['id'])
- raw_user_ref = query.one()
- self.assertIsNone(raw_user_ref.default_project_id)
- user_ref = raw_user_ref.to_dict()
- self.assertNotIn('default_project_id', user_ref)
- session.close()
+ with sql.session_for_read() as session:
+ query = session.query(identity_sql.User)
+ query = query.filter_by(id=user['id'])
+ raw_user_ref = query.one()
+ self.assertIsNone(raw_user_ref.default_project_id)
+ user_ref = raw_user_ref.to_dict()
+ self.assertNotIn('default_project_id', user_ref)
+ session.close()
def test_list_domains_for_user(self):
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'domain_id': domain['id'], 'enabled': True}
+ user = unit.new_user_ref(domain_id=domain['id'])
- test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ test_domain1 = unit.new_domain_ref()
self.resource_api.create_domain(test_domain1['id'], test_domain1)
- test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ test_domain2 = unit.new_domain_ref()
self.resource_api.create_domain(test_domain2['id'], test_domain2)
user = self.identity_api.create_user(user)
@@ -407,21 +447,20 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
# Create two groups each with a role on a different domain, and
# make user1 a member of both groups. Both these new domains
# should now be included, along with any direct user grants.
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'domain_id': domain['id'], 'enabled': True}
+ user = unit.new_user_ref(domain_id=domain['id'])
user = self.identity_api.create_user(user)
- group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
- group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
+ group2 = unit.new_group_ref(domain_id=domain['id'])
group2 = self.identity_api.create_group(group2)
- test_domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ test_domain1 = unit.new_domain_ref()
self.resource_api.create_domain(test_domain1['id'], test_domain1)
- test_domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ test_domain2 = unit.new_domain_ref()
self.resource_api.create_domain(test_domain2['id'], test_domain2)
- test_domain3 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ test_domain3 = unit.new_domain_ref()
self.resource_api.create_domain(test_domain3['id'], test_domain3)
self.identity_api.add_user_to_group(user['id'], group1['id'])
@@ -451,17 +490,16 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
- When listing domains for user, neither domain should be returned
"""
- domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain1 = unit.new_domain_ref()
domain1 = self.resource_api.create_domain(domain1['id'], domain1)
- domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain2 = unit.new_domain_ref()
domain2 = self.resource_api.create_domain(domain2['id'], domain2)
- user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
- 'domain_id': domain1['id'], 'enabled': True}
+ user = unit.new_user_ref(domain_id=domain1['id'])
user = self.identity_api.create_user(user)
- group = {'name': uuid.uuid4().hex, 'domain_id': domain1['id']}
+ group = unit.new_group_ref(domain_id=domain1['id'])
group = self.identity_api.create_group(group)
self.identity_api.add_user_to_group(user['id'], group['id'])
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# Create a grant on each domain, one user grant, one group grant,
@@ -480,25 +518,143 @@ class SqlIdentity(SqlTests, test_backend.IdentityTests):
# roles assignments.
self.assertThat(user_domains, matchers.HasLength(0))
+ def test_storing_null_domain_id_in_project_ref(self):
+ """Test the special storage of domain_id=None in sql resource driver.
+
+ The resource driver uses a special value in place of None for domain_id
+ in the project record. This shouldn't escape the driver. Hence we test
+ the interface to ensure that you can store a domain_id of None, and
+ that any special value used inside the driver does not escape through
+ the interface.
+
+ """
+ spoiler_project = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.resource_api.create_project(spoiler_project['id'],
+ spoiler_project)
+
+ # First let's create a project with a None domain_id and make sure we
+ # can read it back.
+ project = unit.new_project_ref(domain_id=None, is_domain=True)
+ project = self.resource_api.create_project(project['id'], project)
+ ref = self.resource_api.get_project(project['id'])
+ self.assertDictEqual(project, ref)
+
+ # Can we get it by name?
+ ref = self.resource_api.get_project_by_name(project['name'], None)
+ self.assertDictEqual(project, ref)
+
+ # Can we filter for them - create a second domain to ensure we are
+ # testing the receipt of more than one.
+ project2 = unit.new_project_ref(domain_id=None, is_domain=True)
+ project2 = self.resource_api.create_project(project2['id'], project2)
+ hints = driver_hints.Hints()
+ hints.add_filter('domain_id', None)
+ refs = self.resource_api.list_projects(hints)
+ self.assertThat(refs, matchers.HasLength(2 + self.domain_count))
+ self.assertIn(project, refs)
+ self.assertIn(project2, refs)
+
+ # Can we update it?
+ project['name'] = uuid.uuid4().hex
+ self.resource_api.update_project(project['id'], project)
+ ref = self.resource_api.get_project(project['id'])
+ self.assertDictEqual(project, ref)
+
+ # Finally, make sure we can delete it
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
+ self.resource_api.delete_project(project['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project['id'])
+
+ def test_hidden_project_domain_root_is_really_hidden(self):
+ """Ensure we cannot access the hidden root of all project domains.
+
+ Calling any of the driver methods should result in the same as
+ would be returned if we passed a project that does not exist. We don't
+ test create_project, since we do not allow a caller of our API to
+ specify their own ID for a new entity.
+
+ """
+ def _exercise_project_api(ref_id):
+ driver = self.resource_api.driver
+ self.assertRaises(exception.ProjectNotFound,
+ driver.get_project,
+ ref_id)
+
+ self.assertRaises(exception.ProjectNotFound,
+ driver.get_project_by_name,
+ resource.NULL_DOMAIN_ID,
+ ref_id)
+
+ project_ids = [x['id'] for x in
+ driver.list_projects(driver_hints.Hints())]
+ self.assertNotIn(ref_id, project_ids)
+
+ projects = driver.list_projects_from_ids([ref_id])
+ self.assertThat(projects, matchers.HasLength(0))
-class SqlTrust(SqlTests, test_backend.TrustTests):
+ project_ids = [x for x in
+ driver.list_project_ids_from_domain_ids([ref_id])]
+ self.assertNotIn(ref_id, project_ids)
+
+ self.assertRaises(exception.DomainNotFound,
+ driver.list_projects_in_domain,
+ ref_id)
+
+ project_ids = [
+ x['id'] for x in
+ driver.list_projects_acting_as_domain(driver_hints.Hints())]
+ self.assertNotIn(ref_id, project_ids)
+
+ projects = driver.list_projects_in_subtree(ref_id)
+ self.assertThat(projects, matchers.HasLength(0))
+
+ self.assertRaises(exception.ProjectNotFound,
+ driver.list_project_parents,
+ ref_id)
+
+ # A non-existing project just returns True from the driver
+ self.assertTrue(driver.is_leaf_project(ref_id))
+
+ self.assertRaises(exception.ProjectNotFound,
+ driver.update_project,
+ ref_id,
+ {})
+
+ self.assertRaises(exception.ProjectNotFound,
+ driver.delete_project,
+ ref_id)
+
+ # Deleting list of projects that includes a non-existing project
+ # should be silent
+ driver.delete_projects_from_ids([ref_id])
+
+ _exercise_project_api(uuid.uuid4().hex)
+ _exercise_project_api(resource.NULL_DOMAIN_ID)
+
+
+class SqlTrust(SqlTests, trust_tests.TrustTests):
pass
-class SqlToken(SqlTests, test_backend.TokenTests):
+class SqlToken(SqlTests, token_tests.TokenTests):
def test_token_revocation_list_uses_right_columns(self):
# This query used to be heavy with too many columns. We want
# to make sure it is only running with the minimum columns
# necessary.
expected_query_args = (token_sql.TokenModel.id,
- token_sql.TokenModel.expires)
+ token_sql.TokenModel.expires,
+ token_sql.TokenModel.extra,)
with mock.patch.object(token_sql, 'sql') as mock_sql:
tok = token_sql.Token()
tok.list_revoked_tokens()
- mock_query = mock_sql.get_session().query
+ mock_query = mock_sql.session_for_read().__enter__().query
mock_query.assert_called_with(*expected_query_args)
def test_flush_expired_tokens_batch(self):
@@ -523,8 +679,12 @@ class SqlToken(SqlTests, test_backend.TokenTests):
# other tests below test the differences between how they use the batch
# strategy
with mock.patch.object(token_sql, 'sql') as mock_sql:
- mock_sql.get_session().query().filter().delete.return_value = 0
- mock_sql.get_session().bind.dialect.name = 'mysql'
+ mock_sql.session_for_write().__enter__(
+ ).query().filter().delete.return_value = 0
+
+ mock_sql.session_for_write().__enter__(
+ ).bind.dialect.name = 'mysql'
+
tok = token_sql.Token()
expiry_mock = mock.Mock()
ITERS = [1, 2, 3]
@@ -535,7 +695,10 @@ class SqlToken(SqlTests, test_backend.TokenTests):
# The expiry strategy is only invoked once, the other calls are via
# the yield return.
self.assertEqual(1, expiry_mock.call_count)
- mock_delete = mock_sql.get_session().query().filter().delete
+
+ mock_delete = mock_sql.session_for_write().__enter__(
+ ).query().filter().delete
+
self.assertThat(mock_delete.call_args_list,
matchers.HasLength(len(ITERS)))
@@ -550,12 +713,12 @@ class SqlToken(SqlTests, test_backend.TokenTests):
if i == 0:
# The first time the batch iterator returns, it should return
# the first result that comes back from the database.
- self.assertEqual(x, 'test')
+ self.assertEqual('test', x)
elif i == 1:
# The second time, the database range function should return
# nothing, so the batch iterator returns the result of the
# upper_bound function
- self.assertEqual(x, "final value")
+ self.assertEqual("final value", x)
else:
self.fail("range batch function returned more than twice")
@@ -568,39 +731,30 @@ class SqlToken(SqlTests, test_backend.TokenTests):
tok = token_sql.Token()
db2_strategy = tok._expiry_range_strategy('ibm_db_sa')
self.assertIsInstance(db2_strategy, functools.partial)
- self.assertEqual(db2_strategy.func, token_sql._expiry_range_batched)
- self.assertEqual(db2_strategy.keywords, {'batch_size': 100})
+ self.assertEqual(token_sql._expiry_range_batched, db2_strategy.func)
+ self.assertEqual({'batch_size': 100}, db2_strategy.keywords)
def test_expiry_range_strategy_mysql(self):
tok = token_sql.Token()
mysql_strategy = tok._expiry_range_strategy('mysql')
self.assertIsInstance(mysql_strategy, functools.partial)
- self.assertEqual(mysql_strategy.func, token_sql._expiry_range_batched)
- self.assertEqual(mysql_strategy.keywords, {'batch_size': 1000})
+ self.assertEqual(token_sql._expiry_range_batched, mysql_strategy.func)
+ self.assertEqual({'batch_size': 1000}, mysql_strategy.keywords)
-class SqlCatalog(SqlTests, test_backend.CatalogTests):
+class SqlCatalog(SqlTests, catalog_tests.CatalogTests):
_legacy_endpoint_id_in_endpoint = True
_enabled_default_to_true_when_creating_endpoint = True
def test_catalog_ignored_malformed_urls(self):
- service = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- }
- self.catalog_api.create_service(service['id'], service.copy())
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
malformed_url = "http://192.168.1.104:8774/v2/$(tenant)s"
- endpoint = {
- 'id': uuid.uuid4().hex,
- 'region_id': None,
- 'service_id': service['id'],
- 'interface': 'public',
- 'url': malformed_url,
- }
+ endpoint = unit.new_endpoint_ref(service_id=service['id'],
+ url=malformed_url,
+ region_id=None)
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
# NOTE(dstanek): there are no valid URLs, so nothing is in the catalog
@@ -608,21 +762,11 @@ class SqlCatalog(SqlTests, test_backend.CatalogTests):
self.assertEqual({}, catalog)
def test_get_catalog_with_empty_public_url(self):
- service = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- }
- self.catalog_api.create_service(service['id'], service.copy())
-
- endpoint = {
- 'id': uuid.uuid4().hex,
- 'region_id': None,
- 'interface': 'public',
- 'url': '',
- 'service_id': service['id'],
- }
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ endpoint = unit.new_endpoint_ref(url='', service_id=service['id'],
+ region_id=None)
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
catalog = self.catalog_api.get_catalog('user', 'tenant')
@@ -633,22 +777,12 @@ class SqlCatalog(SqlTests, test_backend.CatalogTests):
self.assertIsNone(catalog_endpoint.get('adminURL'))
self.assertIsNone(catalog_endpoint.get('internalURL'))
- def test_create_endpoint_region_404(self):
- service = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- }
- self.catalog_api.create_service(service['id'], service.copy())
-
- endpoint = {
- 'id': uuid.uuid4().hex,
- 'region_id': uuid.uuid4().hex,
- 'service_id': service['id'],
- 'interface': 'public',
- 'url': uuid.uuid4().hex,
- }
+ def test_create_endpoint_region_returns_not_found(self):
+ service = unit.new_service_ref()
+ self.catalog_api.create_service(service['id'], service)
+
+ endpoint = unit.new_endpoint_ref(region_id=uuid.uuid4().hex,
+ service_id=service['id'])
self.assertRaises(exception.ValidationError,
self.catalog_api.create_endpoint,
@@ -656,21 +790,14 @@ class SqlCatalog(SqlTests, test_backend.CatalogTests):
endpoint.copy())
def test_create_region_invalid_id(self):
- region = {
- 'id': '0' * 256,
- 'description': '',
- 'extra': {},
- }
+ region = unit.new_region_ref(id='0' * 256)
self.assertRaises(exception.StringLengthExceeded,
self.catalog_api.create_region,
- region.copy())
+ region)
def test_create_region_invalid_parent_id(self):
- region = {
- 'id': uuid.uuid4().hex,
- 'parent_region_id': '0' * 256,
- }
+ region = unit.new_region_ref(parent_region_id='0' * 256)
self.assertRaises(exception.RegionNotFound,
self.catalog_api.create_region,
@@ -678,77 +805,57 @@ class SqlCatalog(SqlTests, test_backend.CatalogTests):
def test_delete_region_with_endpoint(self):
# create a region
- region = {
- 'id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- }
+ region = unit.new_region_ref()
self.catalog_api.create_region(region)
# create a child region
- child_region = {
- 'id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'parent_id': region['id']
- }
+ child_region = unit.new_region_ref(parent_region_id=region['id'])
self.catalog_api.create_region(child_region)
# create a service
- service = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- }
+ service = unit.new_service_ref()
self.catalog_api.create_service(service['id'], service)
# create an endpoint attached to the service and child region
- child_endpoint = {
- 'id': uuid.uuid4().hex,
- 'region_id': child_region['id'],
- 'interface': uuid.uuid4().hex[:8],
- 'url': uuid.uuid4().hex,
- 'service_id': service['id'],
- }
+ child_endpoint = unit.new_endpoint_ref(region_id=child_region['id'],
+ service_id=service['id'])
+
self.catalog_api.create_endpoint(child_endpoint['id'], child_endpoint)
self.assertRaises(exception.RegionDeletionError,
self.catalog_api.delete_region,
child_region['id'])
# create an endpoint attached to the service and parent region
- endpoint = {
- 'id': uuid.uuid4().hex,
- 'region_id': region['id'],
- 'interface': uuid.uuid4().hex[:8],
- 'url': uuid.uuid4().hex,
- 'service_id': service['id'],
- }
+ endpoint = unit.new_endpoint_ref(region_id=region['id'],
+ service_id=service['id'])
+
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
self.assertRaises(exception.RegionDeletionError,
self.catalog_api.delete_region,
region['id'])
-class SqlPolicy(SqlTests, test_backend.PolicyTests):
+class SqlPolicy(SqlTests, policy_tests.PolicyTests):
pass
-class SqlInheritance(SqlTests, test_backend.InheritanceTests):
+class SqlInheritance(SqlTests, assignment_tests.InheritanceTests):
pass
-class SqlTokenCacheInvalidation(SqlTests, test_backend.TokenCacheInvalidation):
+class SqlImpliedRoles(SqlTests, assignment_tests.ImpliedRoleTests):
+ pass
+
+
+class SqlTokenCacheInvalidation(SqlTests, token_tests.TokenCacheInvalidation):
def setUp(self):
super(SqlTokenCacheInvalidation, self).setUp()
self._create_test_data()
-class SqlFilterTests(SqlTests, test_backend.FilterTests):
-
- def _get_user_name_field_size(self):
- return identity_sql.User.name.type.length
+class SqlFilterTests(SqlTests, identity_tests.FilterTests):
def clean_up_entities(self):
"""Clean up entity test data from Filter Test Cases."""
-
for entity in ['user', 'group', 'project']:
self._delete_test_data(entity, self.entity_list[entity])
self._delete_test_data(entity, self.domain1_entity_list[entity])
@@ -760,11 +867,12 @@ class SqlFilterTests(SqlTests, test_backend.FilterTests):
del self.domain1
def test_list_entities_filtered_by_domain(self):
- # NOTE(henry-nash): This method is here rather than in test_backend
- # since any domain filtering with LDAP is handled by the manager
- # layer (and is already tested elsewhere) not at the driver level.
+ # NOTE(henry-nash): This method is here rather than in
+ # unit.identity.test_backends since any domain filtering with LDAP is
+ # handled by the manager layer (and is already tested elsewhere) not at
+ # the driver level.
self.addCleanup(self.clean_up_entities)
- self.domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ self.domain1 = unit.new_domain_ref()
self.resource_api.create_domain(self.domain1['id'], self.domain1)
self.entity_list = {}
@@ -804,7 +912,7 @@ class SqlFilterTests(SqlTests, test_backend.FilterTests):
# See if we can add a SQL command...use the group table instead of the
# user table since 'user' is reserved word for SQLAlchemy.
- group = {'name': uuid.uuid4().hex, 'domain_id': DEFAULT_DOMAIN_ID}
+ group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id)
group = self.identity_api.create_group(group)
hints = driver_hints.Hints()
@@ -816,10 +924,10 @@ class SqlFilterTests(SqlTests, test_backend.FilterTests):
self.assertTrue(len(groups) > 0)
-class SqlLimitTests(SqlTests, test_backend.LimitTests):
+class SqlLimitTests(SqlTests, identity_tests.LimitTests):
def setUp(self):
super(SqlLimitTests, self).setUp()
- test_backend.LimitTests.setUp(self)
+ identity_tests.LimitTests.setUp(self)
class FakeTable(sql.ModelBase):
@@ -850,11 +958,6 @@ class SqlDecorators(unit.TestCase):
tt = FakeTable(col='a')
self.assertEqual('a', tt.col)
- def test_non_ascii_init(self):
- # NOTE(I159): Non ASCII characters must cause UnicodeDecodeError
- # if encoding is not provided explicitly.
- self.assertRaises(UnicodeDecodeError, FakeTable, col='Я')
-
def test_conflict_happend(self):
self.assertRaises(exception.Conflict, FakeTable().insert)
self.assertRaises(exception.UnexpectedError, FakeTable().update)
@@ -876,21 +979,15 @@ class SqlModuleInitialization(unit.TestCase):
class SqlCredential(SqlTests):
def _create_credential_with_user_id(self, user_id=uuid.uuid4().hex):
- credential_id = uuid.uuid4().hex
- new_credential = {
- 'id': credential_id,
- 'user_id': user_id,
- 'project_id': uuid.uuid4().hex,
- 'blob': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- 'extra': uuid.uuid4().hex
- }
- self.credential_api.create_credential(credential_id, new_credential)
- return new_credential
+ credential = unit.new_credential_ref(user_id=user_id,
+ extra=uuid.uuid4().hex,
+ type=uuid.uuid4().hex)
+ self.credential_api.create_credential(credential['id'], credential)
+ return credential
def _validateCredentialList(self, retrieved_credentials,
expected_credentials):
- self.assertEqual(len(retrieved_credentials), len(expected_credentials))
+ self.assertEqual(len(expected_credentials), len(retrieved_credentials))
retrived_ids = [c['id'] for c in retrieved_credentials]
for cred in expected_credentials:
self.assertIn(cred['id'], retrived_ids)
@@ -920,3 +1017,9 @@ class SqlCredential(SqlTests):
credentials = self.credential_api.list_credentials_for_user(
self.user_foo['id'])
self._validateCredentialList(credentials, self.user_credentials)
+
+ def test_list_credentials_for_user_and_type(self):
+ cred = self.user_credentials[0]
+ credentials = self.credential_api.list_credentials_for_user(
+ self.user_foo['id'], type=cred['type'])
+ self._validateCredentialList(credentials, [cred])
diff --git a/keystone-moon/keystone/tests/unit/test_backend_templated.py b/keystone-moon/keystone/tests/unit/test_backend_templated.py
index 4a7bf9e5..ca957e78 100644
--- a/keystone-moon/keystone/tests/unit/test_backend_templated.py
+++ b/keystone-moon/keystone/tests/unit/test_backend_templated.py
@@ -19,16 +19,16 @@ from six.moves import zip
from keystone import catalog
from keystone.tests import unit
+from keystone.tests.unit.catalog import test_backends as catalog_tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
-from keystone.tests.unit import test_backend
BROKEN_WRITE_FUNCTIONALITY_MSG = ("Templated backend doesn't correctly "
"implement write operations")
-class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
+class TestTemplatedCatalog(unit.TestCase, catalog_tests.CatalogTests):
DEFAULT_FIXTURE = {
'RegionOne': {
@@ -64,8 +64,11 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
def test_get_catalog(self):
catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
- self.assertDictEqual(catalog_ref, self.DEFAULT_FIXTURE)
+ self.assertDictEqual(self.DEFAULT_FIXTURE, catalog_ref)
+ # NOTE(lbragstad): This test is skipped because the catalog is being
+ # modified within the test and not through the API.
+ @unit.skip_if_cache_is_enabled('catalog')
def test_catalog_ignored_malformed_urls(self):
# both endpoints are in the catalog
catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
@@ -85,7 +88,9 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
self.skipTest("Templated backend doesn't have disabled endpoints")
def assert_catalogs_equal(self, expected, observed):
- for e, o in zip(sorted(expected), sorted(observed)):
+ sort_key = lambda d: d['id']
+ for e, o in zip(sorted(expected, key=sort_key),
+ sorted(observed, key=sort_key)):
expected_endpoints = e.pop('endpoints')
observed_endpoints = o.pop('endpoints')
self.assertDictEqual(e, o)
@@ -126,9 +131,10 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
user_id = uuid.uuid4().hex
+ tenant_id = None
# If the URL has no 'tenant_id' to substitute, we will skip the
# endpoint which contains this kind of URL.
- catalog_ref = self.catalog_api.get_v3_catalog(user_id, tenant_id=None)
+ catalog_ref = self.catalog_api.get_v3_catalog(user_id, tenant_id)
exp_catalog = [
{'endpoints': [],
'type': 'compute',
@@ -155,8 +161,24 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
def test_service_filtering(self):
self.skipTest("Templated backend doesn't support filtering")
+ def test_list_services_with_hints(self):
+ hints = {}
+ services = self.catalog_api.list_services(hints=hints)
+ exp_services = [
+ {'type': 'compute',
+ 'description': '',
+ 'enabled': True,
+ 'name': "'Compute Service'",
+ 'id': 'compute'},
+ {'type': 'identity',
+ 'description': '',
+ 'enabled': True,
+ 'name': "'Identity Service'",
+ 'id': 'identity'}]
+ self.assertItemsEqual(exp_services, services)
+
# NOTE(dstanek): the following methods have been overridden
- # from test_backend.CatalogTests
+ # from unit.catalog.test_backends.CatalogTests.
def test_region_crud(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
@@ -172,10 +194,10 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
def test_create_region_with_duplicate_id(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
- def test_delete_region_404(self):
+ def test_delete_region_returns_not_found(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
- def test_create_region_invalid_parent_region_404(self):
+ def test_create_region_invalid_parent_region_returns_not_found(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
def test_avoid_creating_circular_references_in_regions_update(self):
@@ -203,7 +225,7 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
def test_cache_layer_delete_service_with_endpoint(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
- def test_delete_service_404(self):
+ def test_delete_service_returns_not_found(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
def test_update_endpoint_nonexistent_service(self):
@@ -215,10 +237,10 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
def test_update_endpoint_nonexistent_region(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
- def test_get_endpoint_404(self):
+ def test_get_endpoint_returns_not_found(self):
self.skipTest("Templated backend doesn't use IDs for endpoints.")
- def test_delete_endpoint_404(self):
+ def test_delete_endpoint_returns_not_found(self):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
def test_create_endpoint(self):
@@ -228,11 +250,11 @@ class TestTemplatedCatalog(unit.TestCase, test_backend.CatalogTests):
self.skipTest(BROKEN_WRITE_FUNCTIONALITY_MSG)
def test_list_endpoints(self):
- # NOTE(dstanek): a future commit will fix this functionality and
- # this test
- expected_ids = set()
+ expected_urls = set(['http://localhost:$(public_port)s/v2.0',
+ 'http://localhost:$(admin_port)s/v2.0',
+ 'http://localhost:8774/v1.1/$(tenant_id)s'])
endpoints = self.catalog_api.list_endpoints()
- self.assertEqual(expected_ids, set(e['id'] for e in endpoints))
+ self.assertEqual(expected_urls, set(e['url'] for e in endpoints))
@unit.skip_if_cache_disabled('catalog')
def test_invalidate_cache_when_updating_endpoint(self):
diff --git a/keystone-moon/keystone/tests/unit/test_catalog.py b/keystone-moon/keystone/tests/unit/test_catalog.py
index ada2de43..76e3055a 100644
--- a/keystone-moon/keystone/tests/unit/test_catalog.py
+++ b/keystone-moon/keystone/tests/unit/test_catalog.py
@@ -31,12 +31,9 @@ class V2CatalogTestCase(rest.RestfulTestCase):
super(V2CatalogTestCase, self).setUp()
self.useFixture(database.Database())
- self.service_id = uuid.uuid4().hex
self.service = unit.new_service_ref()
- self.service['id'] = self.service_id
- self.catalog_api.create_service(
- self.service_id,
- self.service.copy())
+ self.service_id = self.service['id']
+ self.catalog_api.create_service(self.service_id, self.service)
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
@@ -53,13 +50,14 @@ class V2CatalogTestCase(rest.RestfulTestCase):
"""Applicable only to JSON."""
return r.result['access']['token']['id']
- def _endpoint_create(self, expected_status=200, service_id=SERVICE_FIXTURE,
+ def _endpoint_create(self, expected_status=http_client.OK,
+ service_id=SERVICE_FIXTURE,
publicurl='http://localhost:8080',
internalurl='http://localhost:8080',
adminurl='http://localhost:8080'):
if service_id is SERVICE_FIXTURE:
service_id = self.service_id
- # FIXME(dolph): expected status should actually be 201 Created
+
path = '/v2.0/endpoints'
body = {
'endpoint': {
@@ -77,40 +75,33 @@ class V2CatalogTestCase(rest.RestfulTestCase):
return body, r
def _region_create(self):
- region_id = uuid.uuid4().hex
- self.catalog_api.create_region({'id': region_id})
+ region = unit.new_region_ref()
+ region_id = region['id']
+ self.catalog_api.create_region(region)
return region_id
- def _service_create(self):
- service_id = uuid.uuid4().hex
- service = unit.new_service_ref()
- service['id'] = service_id
- self.catalog_api.create_service(service_id, service)
- return service_id
-
def test_endpoint_create(self):
req_body, response = self._endpoint_create()
self.assertIn('endpoint', response.result)
self.assertIn('id', response.result['endpoint'])
for field, value in req_body['endpoint'].items():
- self.assertEqual(response.result['endpoint'][field], value)
+ self.assertEqual(value, response.result['endpoint'][field])
def test_pure_v3_endpoint_with_publicurl_visible_from_v2(self):
- """Test pure v3 endpoint can be fetched via v2 API.
+ """Test pure v3 endpoint can be fetched via v2.0 API.
- For those who are using v2 APIs, endpoints created by v3 API should
+ For those who are using v2.0 APIs, endpoints created by v3 API should
also be visible as there are no differences about the endpoints
- except the format or the internal implementation.
- And because public url is required for v2 API, so only the v3 endpoints
- of the service which has the public interface endpoint will be
- converted into v2 endpoints.
+ except the format or the internal implementation. Since publicURL is
+ required for v2.0 API, so only v3 endpoints of the service which have
+ the public interface endpoint will be converted into v2.0 endpoints.
"""
region_id = self._region_create()
- service_id = self._service_create()
- # create a v3 endpoint with three interfaces
+
+ # create v3 endpoints with three interfaces
body = {
- 'endpoint': unit.new_endpoint_ref(service_id,
- default_region_id=region_id)
+ 'endpoint': unit.new_endpoint_ref(self.service_id,
+ region_id=region_id)
}
for interface in catalog.controllers.INTERFACES:
body['endpoint']['interface'] = interface
@@ -122,11 +113,11 @@ class V2CatalogTestCase(rest.RestfulTestCase):
r = self.admin_request(token=self.get_scoped_token(),
path='/v2.0/endpoints')
- # v3 endpoints having public url can be fetched via v2.0 API
+ # Endpoints of the service which have a public interface endpoint
+ # will be returned via v2.0 API
self.assertEqual(1, len(r.result['endpoints']))
v2_endpoint = r.result['endpoints'][0]
- self.assertEqual(service_id, v2_endpoint['service_id'])
- # check urls just in case.
+ self.assertEqual(self.service_id, v2_endpoint['service_id'])
# This is not the focus of this test, so no different urls are used.
self.assertEqual(body['endpoint']['url'], v2_endpoint['publicurl'])
self.assertEqual(body['endpoint']['url'], v2_endpoint['adminurl'])
@@ -134,23 +125,23 @@ class V2CatalogTestCase(rest.RestfulTestCase):
self.assertNotIn('name', v2_endpoint)
v3_endpoint = self.catalog_api.get_endpoint(v2_endpoint['id'])
- # it's the v3 public endpoint's id as the generated v2 endpoint
+ # Checks the v3 public endpoint's id is the generated v2.0 endpoint
self.assertEqual('public', v3_endpoint['interface'])
- self.assertEqual(service_id, v3_endpoint['service_id'])
+ self.assertEqual(self.service_id, v3_endpoint['service_id'])
def test_pure_v3_endpoint_without_publicurl_invisible_from_v2(self):
- """Test pure v3 endpoint without public url can't be fetched via v2 API.
+ """Test that the v2.0 API can't fetch v3 endpoints without publicURLs.
- V2 API will return endpoints created by v3 API, but because public url
- is required for v2 API, so v3 endpoints without public url will be
- ignored.
+ v2.0 API will return endpoints created by v3 API, but publicURL is
+ required for the service in the v2.0 API, therefore v3 endpoints of
+ a service which don't have publicURL will be ignored.
"""
region_id = self._region_create()
- service_id = self._service_create()
+
# create a v3 endpoint without public interface
body = {
- 'endpoint': unit.new_endpoint_ref(service_id,
- default_region_id=region_id)
+ 'endpoint': unit.new_endpoint_ref(self.service_id,
+ region_id=region_id)
}
for interface in catalog.controllers.INTERFACES:
if interface == 'public':
@@ -164,7 +155,8 @@ class V2CatalogTestCase(rest.RestfulTestCase):
r = self.admin_request(token=self.get_scoped_token(),
path='/v2.0/endpoints')
- # v3 endpoints without public url won't be fetched via v2.0 API
+ # v3 endpoints of a service which don't have publicURL can't be
+ # fetched via v2.0 API
self.assertEqual(0, len(r.result['endpoints']))
def test_endpoint_create_with_null_adminurl(self):
@@ -209,7 +201,7 @@ class V2CatalogTestCase(rest.RestfulTestCase):
valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
# baseline tests that all valid URLs works
- self._endpoint_create(expected_status=200,
+ self._endpoint_create(expected_status=http_client.OK,
publicurl=valid_url,
internalurl=valid_url,
adminurl=valid_url)
@@ -297,28 +289,23 @@ class TestV2CatalogAPISQL(unit.TestCase):
self.useFixture(database.Database())
self.catalog_api = catalog.Manager()
- self.service_id = uuid.uuid4().hex
- service = {'id': self.service_id, 'name': uuid.uuid4().hex}
+ service = unit.new_service_ref()
+ self.service_id = service['id']
self.catalog_api.create_service(self.service_id, service)
- endpoint = self.new_endpoint_ref(service_id=self.service_id)
+ self.create_endpoint(service_id=self.service_id)
+
+ def create_endpoint(self, service_id, **kwargs):
+ endpoint = unit.new_endpoint_ref(service_id=service_id,
+ region_id=None,
+ **kwargs)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ return endpoint
def config_overrides(self):
super(TestV2CatalogAPISQL, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
- def new_endpoint_ref(self, service_id):
- return {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'interface': uuid.uuid4().hex[:8],
- 'service_id': service_id,
- 'url': uuid.uuid4().hex,
- 'region': uuid.uuid4().hex,
- }
-
def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
@@ -330,14 +317,12 @@ class TestV2CatalogAPISQL(unit.TestCase):
self.assertEqual(1, len(self.catalog_api.list_endpoints()))
# create a new, invalid endpoint - malformed type declaration
- endpoint = self.new_endpoint_ref(self.service_id)
- endpoint['url'] = 'http://keystone/%(tenant_id)'
- self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ self.create_endpoint(self.service_id,
+ url='http://keystone/%(tenant_id)')
# create a new, invalid endpoint - nonexistent key
- endpoint = self.new_endpoint_ref(self.service_id)
- endpoint['url'] = 'http://keystone/%(you_wont_find_me)s'
- self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ self.create_endpoint(self.service_id,
+ url='http://keystone/%(you_wont_find_me)s')
# verify that the invalid endpoints don't appear in the catalog
catalog = self.catalog_api.get_catalog(user_id, tenant_id)
@@ -349,28 +334,22 @@ class TestV2CatalogAPISQL(unit.TestCase):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
- # create a service, with a name
- named_svc = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- }
+ # new_service_ref() returns a ref with a `name`.
+ named_svc = unit.new_service_ref()
self.catalog_api.create_service(named_svc['id'], named_svc)
- endpoint = self.new_endpoint_ref(service_id=named_svc['id'])
- self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ self.create_endpoint(service_id=named_svc['id'])
- # create a service, with no name
- unnamed_svc = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex
- }
+ # This time manually delete the generated `name`.
+ unnamed_svc = unit.new_service_ref()
+ del unnamed_svc['name']
self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc)
- endpoint = self.new_endpoint_ref(service_id=unnamed_svc['id'])
- self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ self.create_endpoint(service_id=unnamed_svc['id'])
region = None
catalog = self.catalog_api.get_catalog(user_id, tenant_id)
self.assertEqual(named_svc['name'],
catalog[region][named_svc['type']]['name'])
+
+ # verify a name is not generated when the service is passed to the API
self.assertEqual('', catalog[region][unnamed_svc['type']]['name'])
diff --git a/keystone-moon/keystone/tests/unit/test_cert_setup.py b/keystone-moon/keystone/tests/unit/test_cert_setup.py
index 769e7c8e..debf87f5 100644
--- a/keystone-moon/keystone/tests/unit/test_cert_setup.py
+++ b/keystone-moon/keystone/tests/unit/test_cert_setup.py
@@ -17,6 +17,7 @@ import os
import shutil
import mock
+from six.moves import http_client
from testtools import matchers
from keystone.common import environment
@@ -29,7 +30,6 @@ from keystone import token
SSLDIR = unit.dirs.tmp('ssl')
CONF = unit.CONF
-DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
CERTDIR = os.path.join(SSLDIR, 'certs')
@@ -74,17 +74,12 @@ class CertSetupTestCase(rest.RestfulTestCase):
controller = token.controllers.Auth()
self.config_fixture.config(group='signing', certfile='invalid')
- password = 'fake1'
- user = {
- 'name': 'fake1',
- 'password': password,
- 'domain_id': DEFAULT_DOMAIN_ID
- }
- user = self.identity_api.create_user(user)
+ user = unit.create_user(self.identity_api,
+ domain_id=CONF.identity.default_domain_id)
body_dict = {
'passwordCredentials': {
'userId': user['id'],
- 'password': password,
+ 'password': user['password'],
},
}
self.assertRaises(exception.UnexpectedError,
@@ -113,11 +108,13 @@ class CertSetupTestCase(rest.RestfulTestCase):
# requests don't have some of the normal information
signing_resp = self.request(self.public_app,
'/v2.0/certificates/signing',
- method='GET', expected_status=200)
+ method='GET',
+ expected_status=http_client.OK)
cacert_resp = self.request(self.public_app,
'/v2.0/certificates/ca',
- method='GET', expected_status=200)
+ method='GET',
+ expected_status=http_client.OK)
with open(CONF.signing.certfile) as f:
self.assertEqual(f.read(), signing_resp.text)
@@ -133,7 +130,7 @@ class CertSetupTestCase(rest.RestfulTestCase):
for accept in [None, 'text/html', 'application/json', 'text/xml']:
headers = {'Accept': accept} if accept else {}
resp = self.request(self.public_app, path, method='GET',
- expected_status=200,
+ expected_status=http_client.OK,
headers=headers)
self.assertEqual('text/html', resp.content_type)
@@ -146,7 +143,7 @@ class CertSetupTestCase(rest.RestfulTestCase):
def test_failure(self):
for path in ['/v2.0/certificates/signing', '/v2.0/certificates/ca']:
self.request(self.public_app, path, method='GET',
- expected_status=500)
+ expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_pki_certs_rebuild(self):
self.test_create_pki_certs()
@@ -228,15 +225,17 @@ class TestExecCommand(unit.TestCase):
ssl = openssl.ConfigureSSL('keystone_user', 'keystone_group')
ssl.exec_command(['ls'])
- @mock.patch.object(environment.subprocess.Popen, 'communicate')
- @mock.patch.object(environment.subprocess.Popen, 'poll')
- def test_running_an_invalid_command(self, mock_poll, mock_communicate):
+ @mock.patch.object(environment.subprocess, 'check_output')
+ def test_running_an_invalid_command(self, mock_check_output):
+ cmd = ['ls']
+
output = 'this is the output string'
- mock_communicate.return_value = (output, '')
- mock_poll.return_value = 1
+ error = environment.subprocess.CalledProcessError(returncode=1,
+ cmd=cmd,
+ output=output)
+ mock_check_output.side_effect = error
- cmd = ['ls']
ssl = openssl.ConfigureSSL('keystone_user', 'keystone_group')
e = self.assertRaises(environment.subprocess.CalledProcessError,
ssl.exec_command,
diff --git a/keystone-moon/keystone/tests/unit/test_cli.py b/keystone-moon/keystone/tests/unit/test_cli.py
index d967eb53..06f2e172 100644
--- a/keystone-moon/keystone/tests/unit/test_cli.py
+++ b/keystone-moon/keystone/tests/unit/test_cli.py
@@ -15,9 +15,11 @@
import os
import uuid
+import fixtures
import mock
from oslo_config import cfg
from six.moves import range
+from testtools import matchers
from keystone.cmd import cli
from keystone.common import dependency
@@ -42,6 +44,199 @@ class CliTestCase(unit.SQLDriverOverrides, unit.TestCase):
cli.TokenFlush.main()
+class CliBootStrapTestCase(unit.SQLDriverOverrides, unit.TestCase):
+
+ def setUp(self):
+ self.useFixture(database.Database())
+ super(CliBootStrapTestCase, self).setUp()
+
+ def config_files(self):
+ self.config_fixture.register_cli_opt(cli.command_opt)
+ config_files = super(CliBootStrapTestCase, self).config_files()
+ config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
+ return config_files
+
+ def config(self, config_files):
+ CONF(args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex],
+ project='keystone',
+ default_config_files=config_files)
+
+ def test_bootstrap(self):
+ bootstrap = cli.BootStrap()
+ self._do_test_bootstrap(bootstrap)
+
+ def _do_test_bootstrap(self, bootstrap):
+ bootstrap.do_bootstrap()
+ project = bootstrap.resource_manager.get_project_by_name(
+ bootstrap.project_name,
+ 'default')
+ user = bootstrap.identity_manager.get_user_by_name(
+ bootstrap.username,
+ 'default')
+ role = bootstrap.role_manager.get_role(bootstrap.role_id)
+ role_list = (
+ bootstrap.assignment_manager.get_roles_for_user_and_project(
+ user['id'],
+ project['id']))
+ self.assertIs(len(role_list), 1)
+ self.assertEqual(role_list[0], role['id'])
+ # NOTE(morganfainberg): Pass an empty context, it isn't used by
+ # `authenticate` method.
+ bootstrap.identity_manager.authenticate(
+ {},
+ user['id'],
+ bootstrap.password)
+
+ if bootstrap.region_id:
+ region = bootstrap.catalog_manager.get_region(bootstrap.region_id)
+ self.assertEqual(self.region_id, region['id'])
+
+ if bootstrap.service_id:
+ svc = bootstrap.catalog_manager.get_service(bootstrap.service_id)
+ self.assertEqual(self.service_name, svc['name'])
+
+ self.assertEqual(set(['admin', 'public', 'internal']),
+ set(bootstrap.endpoints))
+
+ urls = {'public': self.public_url,
+ 'internal': self.internal_url,
+ 'admin': self.admin_url}
+
+ for interface, url in urls.items():
+ endpoint_id = bootstrap.endpoints[interface]
+ endpoint = bootstrap.catalog_manager.get_endpoint(endpoint_id)
+
+ self.assertEqual(self.region_id, endpoint['region_id'])
+ self.assertEqual(url, endpoint['url'])
+ self.assertEqual(svc['id'], endpoint['service_id'])
+ self.assertEqual(interface, endpoint['interface'])
+
+ def test_bootstrap_is_idempotent(self):
+ # NOTE(morganfainberg): Ensure we can run bootstrap multiple times
+ # without erroring.
+ bootstrap = cli.BootStrap()
+ self._do_test_bootstrap(bootstrap)
+ self._do_test_bootstrap(bootstrap)
+
+
+class CliBootStrapTestCaseWithEnvironment(CliBootStrapTestCase):
+
+ def config(self, config_files):
+ CONF(args=['bootstrap'], project='keystone',
+ default_config_files=config_files)
+
+ def setUp(self):
+ super(CliBootStrapTestCaseWithEnvironment, self).setUp()
+ self.password = uuid.uuid4().hex
+ self.username = uuid.uuid4().hex
+ self.project_name = uuid.uuid4().hex
+ self.role_name = uuid.uuid4().hex
+ self.service_name = uuid.uuid4().hex
+ self.public_url = uuid.uuid4().hex
+ self.internal_url = uuid.uuid4().hex
+ self.admin_url = uuid.uuid4().hex
+ self.region_id = uuid.uuid4().hex
+ self.default_domain = {
+ 'id': CONF.identity.default_domain_id,
+ 'name': 'Default',
+ }
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_PASSWORD',
+ newvalue=self.password))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_USERNAME',
+ newvalue=self.username))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_PROJECT_NAME',
+ newvalue=self.project_name))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_ROLE_NAME',
+ newvalue=self.role_name))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_SERVICE_NAME',
+ newvalue=self.service_name))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_PUBLIC_URL',
+ newvalue=self.public_url))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_INTERNAL_URL',
+ newvalue=self.internal_url))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_ADMIN_URL',
+ newvalue=self.admin_url))
+ self.useFixture(
+ fixtures.EnvironmentVariable('OS_BOOTSTRAP_REGION_ID',
+ newvalue=self.region_id))
+
+ def test_assignment_created_with_user_exists(self):
+ # test assignment can be created if user already exists.
+ bootstrap = cli.BootStrap()
+ bootstrap.resource_manager.create_domain(self.default_domain['id'],
+ self.default_domain)
+ user_ref = unit.new_user_ref(self.default_domain['id'],
+ name=self.username,
+ password=self.password)
+ bootstrap.identity_manager.create_user(user_ref)
+ self._do_test_bootstrap(bootstrap)
+
+ def test_assignment_created_with_project_exists(self):
+ # test assignment can be created if project already exists.
+ bootstrap = cli.BootStrap()
+ bootstrap.resource_manager.create_domain(self.default_domain['id'],
+ self.default_domain)
+ project_ref = unit.new_project_ref(self.default_domain['id'],
+ name=self.project_name)
+ bootstrap.resource_manager.create_project(project_ref['id'],
+ project_ref)
+ self._do_test_bootstrap(bootstrap)
+
+ def test_assignment_created_with_role_exists(self):
+ # test assignment can be created if role already exists.
+ bootstrap = cli.BootStrap()
+ bootstrap.resource_manager.create_domain(self.default_domain['id'],
+ self.default_domain)
+ role = unit.new_role_ref(name=self.role_name)
+ bootstrap.role_manager.create_role(role['id'], role)
+ self._do_test_bootstrap(bootstrap)
+
+ def test_assignment_created_with_region_exists(self):
+ # test assignment can be created if role already exists.
+ bootstrap = cli.BootStrap()
+ bootstrap.resource_manager.create_domain(self.default_domain['id'],
+ self.default_domain)
+ region = unit.new_region_ref(id=self.region_id)
+ bootstrap.catalog_manager.create_region(region)
+ self._do_test_bootstrap(bootstrap)
+
+ def test_endpoints_created_with_service_exists(self):
+ # test assignment can be created if role already exists.
+ bootstrap = cli.BootStrap()
+ bootstrap.resource_manager.create_domain(self.default_domain['id'],
+ self.default_domain)
+ service = unit.new_service_ref(name=self.service_name)
+ bootstrap.catalog_manager.create_service(service['id'], service)
+ self._do_test_bootstrap(bootstrap)
+
+ def test_endpoints_created_with_endpoint_exists(self):
+ # test assignment can be created if role already exists.
+ bootstrap = cli.BootStrap()
+ bootstrap.resource_manager.create_domain(self.default_domain['id'],
+ self.default_domain)
+ service = unit.new_service_ref(name=self.service_name)
+ bootstrap.catalog_manager.create_service(service['id'], service)
+
+ region = unit.new_region_ref(id=self.region_id)
+ bootstrap.catalog_manager.create_region(region)
+
+ endpoint = unit.new_endpoint_ref(interface='public',
+ service_id=service['id'],
+ url=self.public_url,
+ region_id=self.region_id)
+ bootstrap.catalog_manager.create_endpoint(endpoint['id'], endpoint)
+
+ self._do_test_bootstrap(bootstrap)
+
+
class CliDomainConfigAllTestCase(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
@@ -112,7 +307,8 @@ class CliDomainConfigAllTestCase(unit.SQLDriverOverrides, unit.TestCase):
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
- 'identity': {'driver': 'ldap'}
+ 'identity': {'driver': 'ldap',
+ 'list_limit': '101'}
}
domain2_config = {
'ldap': {'url': 'fake://memory',
@@ -182,8 +378,8 @@ class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase):
# Now try and upload the settings in the configuration file for the
# default domain
dependency.reset()
- with mock.patch('__builtin__.print') as mock_print:
- self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+ with mock.patch('six.moves.builtins.print') as mock_print:
+ self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
file_name = ('keystone.%s.conf' %
resource.calc_default_domain()['name'])
error_msg = _(
@@ -208,8 +404,8 @@ class CliDomainConfigNoOptionsTestCase(CliDomainConfigAllTestCase):
def test_config_upload(self):
dependency.reset()
- with mock.patch('__builtin__.print') as mock_print:
- self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+ with mock.patch('six.moves.builtins.print') as mock_print:
+ self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
mock_print.assert_has_calls(
[mock.call(
_('At least one option must be provided, use either '
@@ -225,8 +421,8 @@ class CliDomainConfigTooManyOptionsTestCase(CliDomainConfigAllTestCase):
def test_config_upload(self):
dependency.reset()
- with mock.patch('__builtin__.print') as mock_print:
- self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+ with mock.patch('six.moves.builtins.print') as mock_print:
+ self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
mock_print.assert_has_calls(
[mock.call(_('The --all option cannot be used with '
'the --domain-name option'))])
@@ -242,8 +438,8 @@ class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase):
def test_config_upload(self):
dependency.reset()
- with mock.patch('__builtin__.print') as mock_print:
- self.assertRaises(SystemExit, cli.DomainConfigUpload.main)
+ with mock.patch('six.moves.builtins.print') as mock_print:
+ self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
file_name = 'keystone.%s.conf' % self.invalid_domain_name
error_msg = (_(
'Invalid domain name: %(domain)s found in config file name: '
@@ -252,3 +448,31 @@ class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase):
'file': os.path.join(CONF.identity.domain_config_dir,
file_name)})
mock_print.assert_has_calls([mock.call(error_msg)])
+
+
+class TestDomainConfigFinder(unit.BaseTestCase):
+
+ def setUp(self):
+ super(TestDomainConfigFinder, self).setUp()
+ self.logging = self.useFixture(fixtures.LoggerFixture())
+
+ @mock.patch('os.walk')
+ def test_finder_ignores_files(self, mock_walk):
+ mock_walk.return_value = [
+ ['.', [], ['file.txt', 'keystone.conf', 'keystone.domain0.conf']],
+ ]
+
+ domain_configs = list(cli._domain_config_finder('.'))
+
+ expected_domain_configs = [('./keystone.domain0.conf', 'domain0')]
+ self.assertThat(domain_configs,
+ matchers.Equals(expected_domain_configs))
+
+ expected_msg_template = ('Ignoring file (%s) while scanning '
+ 'domain config directory')
+ self.assertThat(
+ self.logging.output,
+ matchers.Contains(expected_msg_template % 'file.txt'))
+ self.assertThat(
+ self.logging.output,
+ matchers.Contains(expected_msg_template % 'keystone.conf'))
diff --git a/keystone-moon/keystone/tests/unit/test_config.py b/keystone-moon/keystone/tests/unit/test_config.py
index 7984646d..d7e7809f 100644
--- a/keystone-moon/keystone/tests/unit/test_config.py
+++ b/keystone-moon/keystone/tests/unit/test_config.py
@@ -16,7 +16,7 @@ import uuid
from oslo_config import cfg
-from keystone import config
+from keystone.common import config
from keystone import exception
from keystone.tests import unit
diff --git a/keystone-moon/keystone/tests/unit/test_contrib_s3_core.py b/keystone-moon/keystone/tests/unit/test_contrib_s3_core.py
index 18c76dad..c9706da7 100644
--- a/keystone-moon/keystone/tests/unit/test_contrib_s3_core.py
+++ b/keystone-moon/keystone/tests/unit/test_contrib_s3_core.py
@@ -27,9 +27,9 @@ class S3ContribCore(unit.TestCase):
self.controller = s3.S3Controller()
- def test_good_signature(self):
+ def test_good_signature_v1(self):
creds_ref = {'secret':
- 'b121dd41cdcc42fe9f70e572e84295aa'}
+ u'b121dd41cdcc42fe9f70e572e84295aa'}
credentials = {'token':
'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB'
'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM'
@@ -40,9 +40,9 @@ class S3ContribCore(unit.TestCase):
self.assertIsNone(self.controller.check_signature(creds_ref,
credentials))
- def test_bad_signature(self):
+ def test_bad_signature_v1(self):
creds_ref = {'secret':
- 'b121dd41cdcc42fe9f70e572e84295aa'}
+ u'b121dd41cdcc42fe9f70e572e84295aa'}
credentials = {'token':
'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB'
'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM'
@@ -53,3 +53,51 @@ class S3ContribCore(unit.TestCase):
self.assertRaises(exception.Unauthorized,
self.controller.check_signature,
creds_ref, credentials)
+
+ def test_good_signature_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
+ 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy'
+ 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1'
+ 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==',
+ 'signature':
+ '730ba8f58df6ffeadd78f402e990b2910d60'
+ 'bc5c2aec63619734f096a4dd77be'}
+
+ self.assertIsNone(self.controller.check_signature(creds_ref,
+ credentials))
+
+ def test_bad_signature_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
+ 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy'
+ 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1'
+ 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==',
+ 'signature': uuid.uuid4().hex}
+
+ self.assertRaises(exception.Unauthorized,
+ self.controller.check_signature,
+ creds_ref, credentials)
+
+ def test_bad_token_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ # token has invalid format of first part
+ credentials = {'token':
+ 'QVdTNC1BQUEKWApYClg=',
+ 'signature': ''}
+ self.assertRaises(exception.Unauthorized,
+ self.controller.check_signature,
+ creds_ref, credentials)
+
+ # token has invalid format of scope
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgpYCi8vczMvYXdzTl9yZXF1ZXN0Clg=',
+ 'signature': ''}
+ self.assertRaises(exception.Unauthorized,
+ self.controller.check_signature,
+ creds_ref, credentials)
diff --git a/keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py b/keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py
index 8664e2c3..111aa5c6 100644
--- a/keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py
+++ b/keystone-moon/keystone/tests/unit/test_contrib_simple_cert.py
@@ -12,13 +12,13 @@
import uuid
+from six.moves import http_client
+
from keystone.tests.unit import test_v3
class BaseTestCase(test_v3.RestfulTestCase):
- EXTENSION_TO_ADD = 'simple_cert_extension'
-
CA_PATH = '/v3/OS-SIMPLE-CERT/ca'
CERT_PATH = '/v3/OS-SIMPLE-CERT/certificates'
@@ -31,10 +31,10 @@ class TestSimpleCert(BaseTestCase):
method='GET',
path=path,
headers={'Accept': content_type},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertEqual(content_type, response.content_type.lower())
- self.assertIn('---BEGIN', response.body)
+ self.assertIn(b'---BEGIN', response.body)
return response
@@ -54,4 +54,4 @@ class TestSimpleCert(BaseTestCase):
self.request(app=self.public_app,
method='GET',
path=path,
- expected_status=500)
+ expected_status=http_client.INTERNAL_SERVER_ERROR)
diff --git a/keystone-moon/keystone/tests/unit/test_credential.py b/keystone-moon/keystone/tests/unit/test_credential.py
new file mode 100644
index 00000000..e917ef71
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/test_credential.py
@@ -0,0 +1,265 @@
+# Copyright 2015 UnitedStack, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from keystoneclient.contrib.ec2 import utils as ec2_utils
+from six.moves import http_client
+
+from keystone.common import utils
+from keystone.contrib.ec2 import controllers
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit import default_fixtures
+from keystone.tests.unit.ksfixtures import database
+from keystone.tests.unit import rest
+
+CRED_TYPE_EC2 = controllers.CRED_TYPE_EC2
+
+
+class V2CredentialEc2TestCase(rest.RestfulTestCase):
+ def setUp(self):
+ super(V2CredentialEc2TestCase, self).setUp()
+ self.user_id = self.user_foo['id']
+ self.project_id = self.tenant_bar['id']
+
+ def _get_token_id(self, r):
+ return r.result['access']['token']['id']
+
+ def _get_ec2_cred(self):
+ uri = self._get_ec2_cred_uri()
+ r = self.public_request(method='POST', token=self.get_scoped_token(),
+ path=uri, body={'tenant_id': self.project_id})
+ return r.result['credential']
+
+ def _get_ec2_cred_uri(self):
+ return '/v2.0/users/%s/credentials/OS-EC2' % self.user_id
+
+ def test_ec2_cannot_get_non_ec2_credential(self):
+ access_key = uuid.uuid4().hex
+ cred_id = utils.hash_access_key(access_key)
+ non_ec2_cred = unit.new_credential_ref(
+ user_id=self.user_id,
+ project_id=self.project_id)
+ non_ec2_cred['id'] = cred_id
+ self.credential_api.create_credential(cred_id, non_ec2_cred)
+
+ # if access_key is not found, ec2 controller raises Unauthorized
+ # exception
+ path = '/'.join([self._get_ec2_cred_uri(), access_key])
+ self.public_request(method='GET', token=self.get_scoped_token(),
+ path=path,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def assertValidErrorResponse(self, r):
+ # FIXME(wwwjfy): it's copied from test_v3.py. The logic of this method
+ # in test_v2.py and test_v3.py (both are inherited from rest.py) has no
+ # difference, so they should be refactored into one place. Also, the
+ # function signatures in both files don't match the one in the parent
+ # class in rest.py.
+ resp = r.result
+ self.assertIsNotNone(resp.get('error'))
+ self.assertIsNotNone(resp['error'].get('code'))
+ self.assertIsNotNone(resp['error'].get('title'))
+ self.assertIsNotNone(resp['error'].get('message'))
+ self.assertEqual(int(resp['error']['code']), r.status_code)
+
+ def test_ec2_list_credentials(self):
+ self._get_ec2_cred()
+ uri = self._get_ec2_cred_uri()
+ r = self.public_request(method='GET', token=self.get_scoped_token(),
+ path=uri)
+ cred_list = r.result['credentials']
+ self.assertEqual(1, len(cred_list))
+
+ # non-EC2 credentials won't be fetched
+ non_ec2_cred = unit.new_credential_ref(
+ user_id=self.user_id,
+ project_id=self.project_id)
+ non_ec2_cred['type'] = uuid.uuid4().hex
+ self.credential_api.create_credential(non_ec2_cred['id'],
+ non_ec2_cred)
+ r = self.public_request(method='GET', token=self.get_scoped_token(),
+ path=uri)
+ cred_list_2 = r.result['credentials']
+ # still one element because non-EC2 credentials are not returned.
+ self.assertEqual(1, len(cred_list_2))
+ self.assertEqual(cred_list[0], cred_list_2[0])
+
+
+class V2CredentialEc2Controller(unit.TestCase):
+ def setUp(self):
+ super(V2CredentialEc2Controller, self).setUp()
+ self.useFixture(database.Database())
+ self.load_backends()
+ self.load_fixtures(default_fixtures)
+ self.user_id = self.user_foo['id']
+ self.project_id = self.tenant_bar['id']
+ self.controller = controllers.Ec2Controller()
+ self.blob, tmp_ref = unit.new_ec2_credential(
+ user_id=self.user_id,
+ project_id=self.project_id)
+
+ self.creds_ref = (controllers.Ec2Controller
+ ._convert_v3_to_ec2_credential(tmp_ref))
+
+ def test_signature_validate_no_host_port(self):
+ """Test signature validation with the access/secret provided."""
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ request = {'host': 'foo',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(request)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ self.assertTrue(self.controller.check_signature(self.creds_ref,
+ sig_ref))
+
+ def test_signature_validate_with_host_port(self):
+ """Test signature validation when host is bound with port.
+
+ Host is bound with a port, generally, the port here is not the
+ standard port for the protocol, like '80' for HTTP and port 443
+ for HTTPS, the port is not omitted by the client library.
+ """
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ request = {'host': 'foo:8181',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(request)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo:8181',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ self.assertTrue(self.controller.check_signature(self.creds_ref,
+ sig_ref))
+
+ def test_signature_validate_with_missed_host_port(self):
+ """Test signature validation when host is bound with well-known port.
+
+ Host is bound with a port, but the port is well-know port like '80'
+ for HTTP and port 443 for HTTPS, sometimes, client library omit
+ the port but then make the request with the port.
+ see (How to create the string to sign): 'http://docs.aws.amazon.com/
+ general/latest/gr/signature-version-2.html'.
+
+ Since "credentials['host']" is not set by client library but is
+ taken from "req.host", so caused the differences.
+ """
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ # Omit the port to generate the signature.
+ cnt_req = {'host': 'foo',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(cnt_req)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo:8080',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ # Check the signature again after omitting the port.
+ self.assertTrue(self.controller.check_signature(self.creds_ref,
+ sig_ref))
+
+ def test_signature_validate_no_signature(self):
+ """Signature is not presented in signature reference data."""
+ access = self.blob['access']
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+
+ sig_ref = {'access': access,
+ 'signature': None,
+ 'host': 'foo:8080',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ self.assertRaises(exception.Unauthorized,
+ self.controller.check_signature,
+ self.creds_ref, sig_ref)
+
+ def test_signature_validate_invalid_signature(self):
+ """Signature is not signed on the correct data."""
+ access = self.blob['access']
+ secret = self.blob['secret']
+ signer = ec2_utils.Ec2Signer(secret)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access}
+ request = {'host': 'bar',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+ signature = signer.generate(request)
+
+ sig_ref = {'access': access,
+ 'signature': signature,
+ 'host': 'foo:8080',
+ 'verb': 'GET',
+ 'path': '/bar',
+ 'params': params}
+
+ # Now validate the signature based on the dummy request
+ self.assertRaises(exception.Unauthorized,
+ self.controller.check_signature,
+ self.creds_ref, sig_ref)
+
+ def test_check_non_admin_user(self):
+ """Checking if user is admin causes uncaught error.
+
+ When checking if a user is an admin, keystone.exception.Unauthorized
+ is raised but not caught if the user is not an admin.
+ """
+ # make a non-admin user
+ context = {'is_admin': False, 'token_id': uuid.uuid4().hex}
+
+ # check if user is admin
+ # no exceptions should be raised
+ self.controller._is_admin(context)
diff --git a/keystone-moon/keystone/tests/unit/test_driver_hints.py b/keystone-moon/keystone/tests/unit/test_driver_hints.py
index c20d2ae7..75d76194 100644
--- a/keystone-moon/keystone/tests/unit/test_driver_hints.py
+++ b/keystone-moon/keystone/tests/unit/test_driver_hints.py
@@ -27,7 +27,7 @@ class ListHintsTests(test.TestCase):
self.assertEqual('t1', filter['name'])
self.assertEqual('data1', filter['value'])
self.assertEqual('equals', filter['comparator'])
- self.assertEqual(False, filter['case_sensitive'])
+ self.assertFalse(filter['case_sensitive'])
hints.filters.remove(filter)
filter_count = 0
diff --git a/keystone-moon/keystone/tests/unit/test_entry_points.py b/keystone-moon/keystone/tests/unit/test_entry_points.py
new file mode 100644
index 00000000..e973e942
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/test_entry_points.py
@@ -0,0 +1,48 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stevedore
+from testtools import matchers
+
+from keystone.tests.unit import core as test
+
+
+class TestPasteDeploymentEntryPoints(test.TestCase):
+ def test_entry_point_middleware(self):
+ """Assert that our list of expected middleware is present."""
+ expected_names = [
+ 'admin_token_auth',
+ 'build_auth_context',
+ 'crud_extension',
+ 'cors',
+ 'debug',
+ 'endpoint_filter_extension',
+ 'ec2_extension',
+ 'ec2_extension_v3',
+ 'federation_extension',
+ 'json_body',
+ 'oauth1_extension',
+ 'request_id',
+ 'revoke_extension',
+ 's3_extension',
+ 'simple_cert_extension',
+ 'sizelimit',
+ 'token_auth',
+ 'url_normalize',
+ 'user_crud_extension',
+ ]
+
+ em = stevedore.ExtensionManager('paste.filter_factory')
+
+ actual_names = [extension.name for extension in em]
+
+ self.assertThat(actual_names, matchers.ContainsAll(expected_names))
diff --git a/keystone-moon/keystone/tests/unit/test_exception.py b/keystone-moon/keystone/tests/unit/test_exception.py
index 4d602ccc..25ca2c09 100644
--- a/keystone-moon/keystone/tests/unit/test_exception.py
+++ b/keystone-moon/keystone/tests/unit/test_exception.py
@@ -67,7 +67,7 @@ class ExceptionTestCase(unit.BaseTestCase):
self.assertValidJsonRendering(e)
self.assertIn(target, six.text_type(e))
- def test_403_title(self):
+ def test_forbidden_title(self):
e = exception.Forbidden()
resp = wsgi.render_exception(e)
j = jsonutils.loads(resp.body)
@@ -123,7 +123,7 @@ class UnexpectedExceptionTestCase(ExceptionTestCase):
self.assertNotIn(self.exc_str, six.text_type(e))
def test_unexpected_error_debug(self):
- self.config_fixture.config(debug=True)
+ self.config_fixture.config(debug=True, insecure_debug=True)
e = exception.UnexpectedError(exception=self.exc_str)
self.assertIn(self.exc_str, six.text_type(e))
@@ -131,32 +131,48 @@ class UnexpectedExceptionTestCase(ExceptionTestCase):
self.config_fixture.config(debug=False)
e = UnexpectedExceptionTestCase.SubClassExc(
debug_info=self.exc_str)
- self.assertEqual(exception.UnexpectedError._message_format,
+ self.assertEqual(exception.UnexpectedError.message_format,
six.text_type(e))
def test_unexpected_error_subclass_debug(self):
- self.config_fixture.config(debug=True)
+ self.config_fixture.config(debug=True, insecure_debug=True)
subclass = self.SubClassExc
e = subclass(debug_info=self.exc_str)
expected = subclass.debug_message_format % {'debug_info': self.exc_str}
- translated_amendment = six.text_type(exception.SecurityError.amendment)
self.assertEqual(
- expected + six.text_type(' ') + translated_amendment,
+ '%s %s' % (expected, exception.SecurityError.amendment),
six.text_type(e))
def test_unexpected_error_custom_message_no_debug(self):
self.config_fixture.config(debug=False)
e = exception.UnexpectedError(self.exc_str)
- self.assertEqual(exception.UnexpectedError._message_format,
+ self.assertEqual(exception.UnexpectedError.message_format,
six.text_type(e))
def test_unexpected_error_custom_message_debug(self):
- self.config_fixture.config(debug=True)
+ self.config_fixture.config(debug=True, insecure_debug=True)
e = exception.UnexpectedError(self.exc_str)
- translated_amendment = six.text_type(exception.SecurityError.amendment)
self.assertEqual(
- self.exc_str + six.text_type(' ') + translated_amendment,
+ '%s %s' % (self.exc_str, exception.SecurityError.amendment),
+ six.text_type(e))
+
+ def test_unexpected_error_custom_message_exception_debug(self):
+ self.config_fixture.config(debug=True, insecure_debug=True)
+ orig_e = exception.NotFound(target=uuid.uuid4().hex)
+ e = exception.UnexpectedError(orig_e)
+ self.assertEqual(
+ '%s %s' % (six.text_type(orig_e),
+ exception.SecurityError.amendment),
+ six.text_type(e))
+
+ def test_unexpected_error_custom_message_binary_debug(self):
+ self.config_fixture.config(debug=True, insecure_debug=True)
+ binary_msg = b'something'
+ e = exception.UnexpectedError(binary_msg)
+ self.assertEqual(
+ '%s %s' % (six.text_type(binary_msg),
+ exception.SecurityError.amendment),
six.text_type(e))
@@ -176,7 +192,7 @@ class SecurityErrorTestCase(ExceptionTestCase):
self.assertNotIn(risky_info, six.text_type(e))
def test_unauthorized_exposure_in_debug(self):
- self.config_fixture.config(debug=True)
+ self.config_fixture.config(debug=True, insecure_debug=True)
risky_info = uuid.uuid4().hex
e = exception.Unauthorized(message=risky_info)
@@ -192,7 +208,7 @@ class SecurityErrorTestCase(ExceptionTestCase):
self.assertNotIn(risky_info, six.text_type(e))
def test_forbidden_exposure_in_debug(self):
- self.config_fixture.config(debug=True)
+ self.config_fixture.config(debug=True, insecure_debug=True)
risky_info = uuid.uuid4().hex
e = exception.Forbidden(message=risky_info)
@@ -208,23 +224,45 @@ class SecurityErrorTestCase(ExceptionTestCase):
self.assertValidJsonRendering(e)
self.assertNotIn(risky_info, six.text_type(e))
self.assertIn(action, six.text_type(e))
+ self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
- e = exception.ForbiddenAction(action=risky_info)
+ e = exception.ForbiddenAction(action=action)
self.assertValidJsonRendering(e)
- self.assertIn(risky_info, six.text_type(e))
+ self.assertIn(action, six.text_type(e))
+ self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
def test_forbidden_action_exposure_in_debug(self):
- self.config_fixture.config(debug=True)
+ self.config_fixture.config(debug=True, insecure_debug=True)
risky_info = uuid.uuid4().hex
+ action = uuid.uuid4().hex
- e = exception.ForbiddenAction(message=risky_info)
+ e = exception.ForbiddenAction(message=risky_info, action=action)
self.assertValidJsonRendering(e)
self.assertIn(risky_info, six.text_type(e))
+ self.assertIn(exception.SecurityError.amendment, six.text_type(e))
- e = exception.ForbiddenAction(action=risky_info)
+ e = exception.ForbiddenAction(action=action)
self.assertValidJsonRendering(e)
- self.assertIn(risky_info, six.text_type(e))
+ self.assertIn(action, six.text_type(e))
+ self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
+
+ def test_forbidden_action_no_message(self):
+ # When no custom message is given when the ForbiddenAction (or other
+ # SecurityError subclass) is created the exposed message is the same
+ # whether debug is enabled or not.
+
+ action = uuid.uuid4().hex
+
+ self.config_fixture.config(debug=False)
+ e = exception.ForbiddenAction(action=action)
+ exposed_message = six.text_type(e)
+ self.assertIn(action, exposed_message)
+ self.assertNotIn(exception.SecurityError.amendment, six.text_type(e))
+
+ self.config_fixture.config(debug=True)
+ e = exception.ForbiddenAction(action=action)
+ self.assertEqual(exposed_message, six.text_type(e))
def test_unicode_argument_message(self):
self.config_fixture.config(debug=False)
diff --git a/keystone-moon/keystone/tests/unit/test_hacking_checks.py b/keystone-moon/keystone/tests/unit/test_hacking_checks.py
index 962f5f8a..e279cc7f 100644
--- a/keystone-moon/keystone/tests/unit/test_hacking_checks.py
+++ b/keystone-moon/keystone/tests/unit/test_hacking_checks.py
@@ -86,25 +86,44 @@ class TestAssertingNoneEquality(BaseStyleCheck):
self.assert_has_errors(code, expected_errors=errors)
-class TestCheckForDebugLoggingIssues(BaseStyleCheck):
+class BaseLoggingCheck(BaseStyleCheck):
def get_checker(self):
return checks.CheckForLoggingIssues
+ def get_fixture(self):
+ return hacking_fixtures.HackingLogging()
+
+ def assert_has_errors(self, code, expected_errors=None):
+
+ # pull out the parts of the error that we'll match against
+ actual_errors = (e[:3] for e in self.run_check(code))
+ # adjust line numbers to make the fixture data more readable.
+ import_lines = len(self.code_ex.shared_imports.split('\n')) - 1
+ actual_errors = [(e[0] - import_lines, e[1], e[2])
+ for e in actual_errors]
+ self.assertEqual(expected_errors or [], actual_errors)
+
+
+class TestCheckForDebugLoggingIssues(BaseLoggingCheck):
+
def test_for_translations(self):
fixture = self.code_ex.assert_no_translations_for_debug_logging
- code = fixture['code']
+ code = self.code_ex.shared_imports + fixture['code']
errors = fixture['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
-class TestCheckForNonDebugLoggingIssues(BaseStyleCheck):
+class TestLoggingWithWarn(BaseLoggingCheck):
- def get_checker(self):
- return checks.CheckForLoggingIssues
+ def test(self):
+ data = self.code_ex.assert_not_using_deprecated_warn
+ code = self.code_ex.shared_imports + data['code']
+ errors = data['expected_errors']
+ self.assert_has_errors(code, expected_errors=errors)
- def get_fixture(self):
- return hacking_fixtures.HackingLogging()
+
+class TestCheckForNonDebugLoggingIssues(BaseLoggingCheck):
def test_for_translations(self):
for example in self.code_ex.examples:
@@ -112,15 +131,6 @@ class TestCheckForNonDebugLoggingIssues(BaseStyleCheck):
errors = example['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
- def assert_has_errors(self, code, expected_errors=None):
- # pull out the parts of the error that we'll match against
- actual_errors = (e[:3] for e in self.run_check(code))
- # adjust line numbers to make the fixure data more readable.
- import_lines = len(self.code_ex.shared_imports.split('\n')) - 1
- actual_errors = [(e[0] - import_lines, e[1], e[2])
- for e in actual_errors]
- self.assertEqual(expected_errors or [], actual_errors)
-
class TestDictConstructorWithSequenceCopy(BaseStyleCheck):
diff --git a/keystone-moon/keystone/tests/unit/test_kvs.py b/keystone-moon/keystone/tests/unit/test_kvs.py
index 18931f5d..a88ee1ac 100644
--- a/keystone-moon/keystone/tests/unit/test_kvs.py
+++ b/keystone-moon/keystone/tests/unit/test_kvs.py
@@ -17,7 +17,6 @@ import uuid
from dogpile.cache import api
from dogpile.cache import proxy
-from dogpile.cache import util
import mock
import six
from testtools import matchers
@@ -86,9 +85,12 @@ class RegionProxy2Fixture(proxy.ProxyBackend):
class TestMemcacheDriver(api.CacheBackend):
- """A test dogpile.cache backend that conforms to the mixin-mechanism for
+ """A test dogpile.cache backend.
+
+ This test backend conforms to the mixin-mechanism for
overriding set and set_multi methods on dogpile memcached drivers.
"""
+
class test_client(object):
# FIXME(morganfainberg): Convert this test client over to using mock
# and/or mock.MagicMock as appropriate
@@ -203,10 +205,10 @@ class KVSTest(unit.TestCase):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
- self.assertIs(kvs._region.key_mangler, util.sha1_mangle_key)
+ self.assertIs(kvs._region.key_mangler, core.sha1_mangle_key)
# The backend should also have the keymangler set the same as the
# region now.
- self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+ self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key)
def test_kvs_key_mangler_configuration_backend(self):
kvs = self._get_kvs_region()
@@ -217,7 +219,7 @@ class KVSTest(unit.TestCase):
def test_kvs_key_mangler_configuration_forced_backend(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.KVSBackendForcedKeyMangleFixture',
- key_mangler=util.sha1_mangle_key)
+ key_mangler=core.sha1_mangle_key)
expected = KVSBackendForcedKeyMangleFixture.key_mangler(self.key_foo)
self.assertEqual(expected, kvs._region.key_mangler(self.key_foo))
@@ -236,7 +238,7 @@ class KVSTest(unit.TestCase):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
- self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+ self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key)
kvs._set_key_mangler(test_key_mangler)
self.assertIs(kvs._region.backend.key_mangler, test_key_mangler)
@@ -432,7 +434,7 @@ class KVSTest(unit.TestCase):
no_expiry_keys=no_expiry_keys)
calculated_keys = set([kvs._region.key_mangler(key)
for key in no_expiry_keys])
- self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+ self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key)
self.assertSetEqual(calculated_keys,
kvs._region.backend.no_expiry_hashed_keys)
self.assertSetEqual(no_expiry_keys,
@@ -450,7 +452,7 @@ class KVSTest(unit.TestCase):
kvs.configure('openstack.kvs.Memcached',
memcached_backend='TestDriver',
no_expiry_keys=no_expiry_keys)
- self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+ self.assertIs(kvs._region.backend.key_mangler, core.sha1_mangle_key)
kvs._region.backend.key_mangler = None
self.assertSetEqual(kvs._region.backend.raw_no_expiry_keys,
kvs._region.backend.no_expiry_hashed_keys)
@@ -492,15 +494,15 @@ class KVSTest(unit.TestCase):
# Ensure the set_arguments are correct
self.assertDictEqual(
- kvs._region.backend._get_set_arguments_driver_attr(),
- expected_set_args)
+ expected_set_args,
+ kvs._region.backend._get_set_arguments_driver_attr())
# Set a key that would have an expiry and verify the correct result
# occurred and that the correct set_arguments were passed.
kvs.set(self.key_foo, self.value_foo)
self.assertDictEqual(
- kvs._region.backend.driver.client.set_arguments_passed,
- expected_set_args)
+ expected_set_args,
+ kvs._region.backend.driver.client.set_arguments_passed)
observed_foo_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
@@ -511,8 +513,8 @@ class KVSTest(unit.TestCase):
# occurred and that the correct set_arguments were passed.
kvs.set(self.key_bar, self.value_bar)
self.assertDictEqual(
- kvs._region.backend.driver.client.set_arguments_passed,
- expected_no_expiry_args)
+ expected_no_expiry_args,
+ kvs._region.backend.driver.client.set_arguments_passed)
observed_bar_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
@@ -523,8 +525,8 @@ class KVSTest(unit.TestCase):
# result occurred and that the correct set_arguments were passed.
kvs.set_multi(mapping_foo)
self.assertDictEqual(
- kvs._region.backend.driver.client.set_arguments_passed,
- expected_set_args)
+ expected_set_args,
+ kvs._region.backend.driver.client.set_arguments_passed)
observed_foo_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
@@ -535,8 +537,8 @@ class KVSTest(unit.TestCase):
# result occurred and that the correct set_arguments were passed.
kvs.set_multi(mapping_bar)
self.assertDictEqual(
- kvs._region.backend.driver.client.set_arguments_passed,
- expected_no_expiry_args)
+ expected_no_expiry_args,
+ kvs._region.backend.driver.client.set_arguments_passed)
observed_bar_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_livetest.py
index e2abd56d..4bce6a73 100644
--- a/keystone-moon/keystone/tests/unit/test_ldap_livetest.py
+++ b/keystone-moon/keystone/tests/unit/test_ldap_livetest.py
@@ -69,9 +69,6 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
create_object(CONF.ldap.role_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Roles'})
- create_object(CONF.ldap.project_tree_dn,
- {'objectclass': 'organizationalUnit',
- 'ou': 'Projects'})
create_object(CONF.ldap.group_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'UserGroups'})
@@ -82,8 +79,7 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
return config_files
def test_build_tree(self):
- """Regression test for building the tree names
- """
+ """Regression test for building the tree names."""
# logic is different from the fake backend.
user_api = identity_ldap.UserApi(CONF)
self.assertTrue(user_api)
@@ -134,6 +130,7 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
USER_COUNT = 2
for x in range(0, USER_COUNT):
+ # TODO(shaleh): use unit.new_user_ref()
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
@@ -147,8 +144,7 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
self.assertEqual(0, len(group_refs))
for x in range(0, GROUP_COUNT):
- new_group = {'domain_id': domain['id'],
- 'name': uuid.uuid4().hex}
+ new_group = unit.new_group_ref(domain_id=domain['id'])
new_group = self.identity_api.create_group(new_group)
test_groups.append(new_group)
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py
index 81e91ce5..a284114a 100644
--- a/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py
+++ b/keystone-moon/keystone/tests/unit/test_ldap_pool_livetest.py
@@ -105,6 +105,7 @@ class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
password=old_password)
def _create_user_and_authenticate(self, password):
+ # TODO(shaleh): port to new_user_ref()
user_dict = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
@@ -183,7 +184,7 @@ class LiveLDAPPoolIdentity(test_backend_ldap_pool.LdapPoolCommonTestMixin,
user_ref = self.identity_api.authenticate(
context={}, user_id=user['id'], password=old_password)
- self.assertDictEqual(user_ref, user)
+ self.assertDictEqual(user, user_ref)
def test_password_change_with_auth_pool_enabled_no_lifetime(self):
self.config_fixture.config(group='ldap',
diff --git a/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py b/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py
index 6b47bfd9..98e2882d 100644
--- a/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py
+++ b/keystone-moon/keystone/tests/unit/test_ldap_tls_livetest.py
@@ -50,6 +50,7 @@ class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
tls_req_cert='demand')
self.identity_api = identity.backends.ldap.Identity()
+ # TODO(shaleh): use new_user_ref()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
@@ -71,6 +72,7 @@ class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
tls_req_cert='demand')
self.identity_api = identity.backends.ldap.Identity()
+ # TODO(shaleh): use new_user_ref()
user = {'id': 'fake1',
'name': 'fake1',
'password': 'fakepass1',
@@ -95,6 +97,7 @@ class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
tls_cacertdir=None)
self.identity_api = identity.backends.ldap.Identity()
+ # TODO(shaleh): use new_user_ref()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
@@ -109,6 +112,7 @@ class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity):
tls_cacertdir='/etc/keystone/ssl/mythicalcertdir')
self.identity_api = identity.backends.ldap.Identity()
+ # TODO(shaleh): use new_user_ref()
user = {'name': 'fake1',
'password': 'fakepass1',
'tenants': ['bar']}
diff --git a/keystone-moon/keystone/tests/unit/test_middleware.py b/keystone-moon/keystone/tests/unit/test_middleware.py
index 0eedb9c6..d33e8c00 100644
--- a/keystone-moon/keystone/tests/unit/test_middleware.py
+++ b/keystone-moon/keystone/tests/unit/test_middleware.py
@@ -12,17 +12,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import hashlib
import uuid
from oslo_config import cfg
from six.moves import http_client
-import webob
+import webtest
from keystone.common import authorization
from keystone.common import tokenless_auth
-from keystone.contrib.federation import constants as federation_constants
from keystone import exception
+from keystone.federation import constants as federation_constants
from keystone import middleware
from keystone.tests import unit
from keystone.tests.unit import mapping_fixtures
@@ -32,104 +33,158 @@ from keystone.tests.unit import test_backend_sql
CONF = cfg.CONF
-def make_request(**kwargs):
- accept = kwargs.pop('accept', None)
- method = kwargs.pop('method', 'GET')
- body = kwargs.pop('body', None)
- req = webob.Request.blank('/', **kwargs)
- req.method = method
- if body is not None:
- req.body = body
- if accept is not None:
- req.accept = accept
- return req
+class MiddlewareRequestTestBase(unit.TestCase):
+ MIDDLEWARE_CLASS = None # override this in subclasses
-def make_response(**kwargs):
- body = kwargs.pop('body', None)
- return webob.Response(body)
+ def _application(self):
+ """A base wsgi application that returns a simple response."""
+ def app(environ, start_response):
+ # WSGI requires the body of the response to be six.binary_type
+ body = uuid.uuid4().hex.encode('utf-8')
+ resp_headers = [('Content-Type', 'text/html; charset=utf8'),
+ ('Content-Length', str(len(body)))]
+ start_response('200 OK', resp_headers)
+ return [body]
+ return app
+
+ def _generate_app_response(self, app, headers=None, method='get',
+ path='/', **kwargs):
+ """Given a wsgi application wrap it in webtest and call it."""
+ return getattr(webtest.TestApp(app), method)(path,
+ headers=headers or {},
+ **kwargs)
+
+ def _middleware_failure(self, exc, *args, **kwargs):
+ """Assert that an exception is being thrown from process_request."""
+ # NOTE(jamielennox): This is a little ugly. We need to call the webtest
+ # framework so that the correct RequestClass object is created for when
+ # we call process_request. However because we go via webtest we only
+ # see the response object and not the actual exception that is thrown
+ # by process_request. To get around this we subclass process_request
+ # with something that checks for the right type of exception being
+ # thrown so we can test the middle of the request process.
+ # TODO(jamielennox): Change these tests to test the value of the
+ # response rather than the error that is raised.
+
+ class _Failing(self.MIDDLEWARE_CLASS):
+
+ _called = False
+
+ def process_request(i_self, *i_args, **i_kwargs):
+ # i_ to distinguish it from and not clobber the outer vars
+ e = self.assertRaises(exc,
+ super(_Failing, i_self).process_request,
+ *i_args, **i_kwargs)
+ i_self._called = True
+ raise e
+
+ # by default the returned status when an uncaught exception is raised
+ # for validation or caught errors this will likely be 400
+ kwargs.setdefault('status', http_client.INTERNAL_SERVER_ERROR) # 500
+
+ app = _Failing(self._application())
+ resp = self._generate_app_response(app, *args, **kwargs)
+ self.assertTrue(app._called)
+ return resp
+
+ def _do_middleware_response(self, *args, **kwargs):
+ """Wrap a middleware around a sample application and call it."""
+ app = self.MIDDLEWARE_CLASS(self._application())
+ return self._generate_app_response(app, *args, **kwargs)
+
+ def _do_middleware_request(self, *args, **kwargs):
+ """The request object from a successful middleware call."""
+ return self._do_middleware_response(*args, **kwargs).request
+
+
+class TokenAuthMiddlewareTest(MiddlewareRequestTestBase):
+
+ MIDDLEWARE_CLASS = middleware.TokenAuthMiddleware
-class TokenAuthMiddlewareTest(unit.TestCase):
def test_request(self):
- req = make_request()
- req.headers[middleware.AUTH_TOKEN_HEADER] = 'MAGIC'
- middleware.TokenAuthMiddleware(None).process_request(req)
+ headers = {middleware.AUTH_TOKEN_HEADER: 'MAGIC'}
+ req = self._do_middleware_request(headers=headers)
context = req.environ[middleware.CONTEXT_ENV]
self.assertEqual('MAGIC', context['token_id'])
-class AdminTokenAuthMiddlewareTest(unit.TestCase):
+class AdminTokenAuthMiddlewareTest(MiddlewareRequestTestBase):
+
+ MIDDLEWARE_CLASS = middleware.AdminTokenAuthMiddleware
+
+ def config_overrides(self):
+ super(AdminTokenAuthMiddlewareTest, self).config_overrides()
+ self.config_fixture.config(
+ admin_token='ADMIN')
+
def test_request_admin(self):
- req = make_request()
- req.headers[middleware.AUTH_TOKEN_HEADER] = CONF.admin_token
- middleware.AdminTokenAuthMiddleware(None).process_request(req)
- context = req.environ[middleware.CONTEXT_ENV]
- self.assertTrue(context['is_admin'])
+ headers = {middleware.AUTH_TOKEN_HEADER: 'ADMIN'}
+ req = self._do_middleware_request(headers=headers)
+ self.assertTrue(req.environ[middleware.CONTEXT_ENV]['is_admin'])
def test_request_non_admin(self):
- req = make_request()
- req.headers[middleware.AUTH_TOKEN_HEADER] = 'NOT-ADMIN'
- middleware.AdminTokenAuthMiddleware(None).process_request(req)
- context = req.environ[middleware.CONTEXT_ENV]
- self.assertFalse(context['is_admin'])
+ headers = {middleware.AUTH_TOKEN_HEADER: 'NOT-ADMIN'}
+ req = self._do_middleware_request(headers=headers)
+ self.assertFalse(req.environ[middleware.CONTEXT_ENV]['is_admin'])
-class PostParamsMiddlewareTest(unit.TestCase):
- def test_request_with_params(self):
- req = make_request(body="arg1=one", method='POST')
- middleware.PostParamsMiddleware(None).process_request(req)
- params = req.environ[middleware.PARAMS_ENV]
- self.assertEqual({"arg1": "one"}, params)
+class JsonBodyMiddlewareTest(MiddlewareRequestTestBase):
+ MIDDLEWARE_CLASS = middleware.JsonBodyMiddleware
-class JsonBodyMiddlewareTest(unit.TestCase):
def test_request_with_params(self):
- req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
- content_type='application/json',
- method='POST')
- middleware.JsonBodyMiddleware(None).process_request(req)
- params = req.environ[middleware.PARAMS_ENV]
- self.assertEqual({"arg1": "one", "arg2": ["a"]}, params)
+ headers = {'Content-Type': 'application/json'}
+ params = '{"arg1": "one", "arg2": ["a"]}'
+ req = self._do_middleware_request(params=params,
+ headers=headers,
+ method='post')
+ self.assertEqual({"arg1": "one", "arg2": ["a"]},
+ req.environ[middleware.PARAMS_ENV])
def test_malformed_json(self):
- req = make_request(body='{"arg1": "on',
- content_type='application/json',
- method='POST')
- resp = middleware.JsonBodyMiddleware(None).process_request(req)
- self.assertEqual(http_client.BAD_REQUEST, resp.status_int)
+ headers = {'Content-Type': 'application/json'}
+ self._do_middleware_response(params='{"arg1": "on',
+ headers=headers,
+ method='post',
+ status=http_client.BAD_REQUEST)
def test_not_dict_body(self):
- req = make_request(body='42',
- content_type='application/json',
- method='POST')
- resp = middleware.JsonBodyMiddleware(None).process_request(req)
- self.assertEqual(http_client.BAD_REQUEST, resp.status_int)
- self.assertTrue('valid JSON object' in resp.json['error']['message'])
+ headers = {'Content-Type': 'application/json'}
+ resp = self._do_middleware_response(params='42',
+ headers=headers,
+ method='post',
+ status=http_client.BAD_REQUEST)
+
+ self.assertIn('valid JSON object', resp.json['error']['message'])
def test_no_content_type(self):
- req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
- method='POST')
- middleware.JsonBodyMiddleware(None).process_request(req)
- params = req.environ[middleware.PARAMS_ENV]
- self.assertEqual({"arg1": "one", "arg2": ["a"]}, params)
+ headers = {'Content-Type': ''}
+ params = '{"arg1": "one", "arg2": ["a"]}'
+ req = self._do_middleware_request(params=params,
+ headers=headers,
+ method='post')
+ self.assertEqual({"arg1": "one", "arg2": ["a"]},
+ req.environ[middleware.PARAMS_ENV])
def test_unrecognized_content_type(self):
- req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
- content_type='text/plain',
- method='POST')
- resp = middleware.JsonBodyMiddleware(None).process_request(req)
- self.assertEqual(http_client.BAD_REQUEST, resp.status_int)
+ headers = {'Content-Type': 'text/plain'}
+ self._do_middleware_response(params='{"arg1": "one", "arg2": ["a"]}',
+ headers=headers,
+ method='post',
+ status=http_client.BAD_REQUEST)
def test_unrecognized_content_type_without_body(self):
- req = make_request(content_type='text/plain',
- method='GET')
- middleware.JsonBodyMiddleware(None).process_request(req)
- params = req.environ.get(middleware.PARAMS_ENV, {})
- self.assertEqual({}, params)
+ headers = {'Content-Type': 'text/plain'}
+ req = self._do_middleware_request(headers=headers)
+ self.assertEqual({}, req.environ.get(middleware.PARAMS_ENV, {}))
+
+class AuthContextMiddlewareTest(test_backend_sql.SqlTests,
+ MiddlewareRequestTestBase):
-class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
+ MIDDLEWARE_CLASS = middleware.AuthContextMiddleware
def setUp(self):
super(AuthContextMiddlewareTest, self).setUp()
@@ -139,55 +194,32 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
self.config_fixture.config(group='tokenless_auth',
trusted_issuer=[self.trusted_issuer])
- # This idp_id is calculated based on
- # sha256(self.client_issuer)
- hashed_idp = hashlib.sha256(self.client_issuer)
+ # client_issuer is encoded because you can't hash
+ # unicode objects with hashlib.
+ # This idp_id is calculated based on sha256(self.client_issuer)
+ hashed_idp = hashlib.sha256(self.client_issuer.encode('utf-8'))
self.idp_id = hashed_idp.hexdigest()
self._load_sample_data()
def _load_sample_data(self):
- self.domain_id = uuid.uuid4().hex
- self.domain_name = uuid.uuid4().hex
- self.project_id = uuid.uuid4().hex
- self.project_name = uuid.uuid4().hex
- self.user_name = uuid.uuid4().hex
- self.user_password = uuid.uuid4().hex
- self.user_email = uuid.uuid4().hex
self.protocol_id = 'x509'
- self.role_id = uuid.uuid4().hex
- self.role_name = uuid.uuid4().hex
- # for ephemeral user
- self.group_name = uuid.uuid4().hex
# 1) Create a domain for the user.
- self.domain = {
- 'description': uuid.uuid4().hex,
- 'enabled': True,
- 'id': self.domain_id,
- 'name': self.domain_name,
- }
-
+ self.domain = unit.new_domain_ref()
+ self.domain_id = self.domain['id']
+ self.domain_name = self.domain['name']
self.resource_api.create_domain(self.domain_id, self.domain)
# 2) Create a project for the user.
- self.project = {
- 'description': uuid.uuid4().hex,
- 'domain_id': self.domain_id,
- 'enabled': True,
- 'id': self.project_id,
- 'name': self.project_name,
- }
+ self.project = unit.new_project_ref(domain_id=self.domain_id)
+ self.project_id = self.project['id']
+ self.project_name = self.project['name']
self.resource_api.create_project(self.project_id, self.project)
# 3) Create a user in new domain.
- self.user = {
- 'name': self.user_name,
- 'domain_id': self.domain_id,
- 'project_id': self.project_id,
- 'password': self.user_password,
- 'email': self.user_email,
- }
+ self.user = unit.new_user_ref(domain_id=self.domain_id,
+ project_id=self.project_id)
self.user = self.identity_api.create_user(self.user)
@@ -197,17 +229,13 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
self.idp)
# Add a role
- self.role = {
- 'id': self.role_id,
- 'name': self.role_name,
- }
+ self.role = unit.new_role_ref()
+ self.role_id = self.role['id']
+ self.role_name = self.role['name']
self.role_api.create_role(self.role_id, self.role)
# Add a group
- self.group = {
- 'name': self.group_name,
- 'domain_id': self.domain_id,
- }
+ self.group = unit.new_group_ref(domain_id=self.domain_id)
self.group = self.identity_api.create_group(self.group)
# Assign a role to the user on a project
@@ -282,7 +310,7 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
:param request: HTTP request
:param mapping_ref: A mapping in JSON structure will be setup in the
- backend DB for mapping an user or a group.
+ backend DB for mapping a user or a group.
:param exception_expected: Sets to True when an exception is expected
to raised based on the given arguments.
:returns: context an auth context contains user and role information
@@ -300,30 +328,27 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
return context
def test_context_already_exists(self):
- req = make_request()
- token_id = uuid.uuid4().hex
- req.environ[authorization.AUTH_CONTEXT_ENV] = {'token_id': token_id}
- context = self._create_context(request=req)
- self.assertEqual(token_id, context['token_id'])
+ stub_value = uuid.uuid4().hex
+ env = {authorization.AUTH_CONTEXT_ENV: stub_value}
+ req = self._do_middleware_request(extra_environ=env)
+ self.assertEqual(stub_value,
+ req.environ.get(authorization.AUTH_CONTEXT_ENV))
def test_not_applicable_to_token_request(self):
- env = {}
- env['PATH_INFO'] = '/auth/tokens'
- env['REQUEST_METHOD'] = 'POST'
- req = make_request(environ=env)
- context = self._create_context(request=req)
+ req = self._do_middleware_request(path='/auth/tokens', method='post')
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self.assertIsNone(context)
def test_no_tokenless_attributes_request(self):
- req = make_request()
- context = self._create_context(request=req)
+ req = self._do_middleware_request()
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self.assertIsNone(context)
def test_no_issuer_attribute_request(self):
env = {}
env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex
- req = make_request(environ=env)
- context = self._create_context(request=req)
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self.assertIsNone(context)
def test_has_only_issuer_and_project_name_request(self):
@@ -332,61 +357,51 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
# references to issuer of the client certificate.
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex
- req = make_request(environ=env)
- context = self._create_context(request=req,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_has_only_issuer_and_project_domain_name_request(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex
- req = make_request(environ=env)
- context = self._create_context(request=req,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_has_only_issuer_and_project_domain_id_request(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_DOMAIN_ID'] = uuid.uuid4().hex
- req = make_request(environ=env)
- context = self._create_context(request=req,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_missing_both_domain_and_project_request(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
- req = make_request(environ=env)
- context = self._create_context(request=req,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_empty_trusted_issuer_list(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex
- req = make_request(environ=env)
+
self.config_fixture.config(group='tokenless_auth',
trusted_issuer=[])
- context = self._create_context(request=req)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self.assertIsNone(context)
def test_client_issuer_not_trusted(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.untrusted_client_issuer
env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex
- req = make_request(environ=env)
- context = self._create_context(request=req)
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self.assertIsNone(context)
def test_proj_scope_with_proj_id_and_proj_dom_id_success(self):
@@ -397,24 +412,28 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
# SSL_CLIENT_USER_NAME and SSL_CLIENT_DOMAIN_NAME are the types
# defined in the mapping that will map to the user name and
# domain name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_id_only_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_name_and_proj_dom_id_success(self):
@@ -422,12 +441,14 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_name_and_proj_dom_name_success(self):
@@ -435,28 +456,29 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_name_only_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_id
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_mapping_with_userid_and_domainid_success(self):
env = {}
@@ -465,10 +487,12 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_ID'] = self.user['id']
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINID)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINID)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_mapping_with_userid_and_domainname_success(self):
@@ -478,10 +502,12 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_ID'] = self.user['id']
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINNAME)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINNAME)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_mapping_with_username_and_domainid_success(self):
@@ -489,12 +515,14 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_only_domain_name_fail(self):
@@ -503,14 +531,13 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_DOMAINNAME_ONLY,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_DOMAINNAME_ONLY)
+
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_only_domain_id_fail(self):
env = {}
@@ -518,29 +545,27 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_DOMAINID_ONLY,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_DOMAINID_ONLY)
+
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_missing_domain_data_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
- env['SSL_CLIENT_USER_NAME'] = self.user_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_ONLY,
- exception_expected=True)
- self.assertRaises(exception.ValidationError,
- context.process_request,
- req)
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_ONLY)
+
+ self._middleware_failure(exception.ValidationError,
+ extra_environ=env,
+ status=400)
def test_userid_success(self):
env = {}
@@ -548,10 +573,10 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_USER_ID'] = self.user['id']
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERID_ONLY)
+
+ self._load_mapping_rules(mapping_fixtures.MAPPING_WITH_USERID_ONLY)
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context)
def test_domain_disable_fail(self):
@@ -559,37 +584,35 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
- req = make_request(environ=env)
+
self.domain['enabled'] = False
self.domain = self.resource_api.update_domain(
self.domain['id'], self.domain)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID,
- exception_expected=True)
- self.assertRaises(exception.Unauthorized,
- context.process_request,
- req)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID)
+ self._middleware_failure(exception.Unauthorized,
+ extra_environ=env,
+ status=401)
def test_user_disable_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
- req = make_request(environ=env)
+
self.user['enabled'] = False
self.user = self.identity_api.update_user(self.user['id'], self.user)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID,
- exception_expected=True)
- self.assertRaises(AssertionError,
- context.process_request,
- req)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID)
+
+ self._middleware_failure(AssertionError,
+ extra_environ=env)
def test_invalid_user_fail(self):
env = {}
@@ -598,30 +621,29 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
- req = make_request(environ=env)
- context = self._create_context(
- request=req,
- mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME,
- exception_expected=True)
- self.assertRaises(exception.UserNotFound,
- context.process_request,
- req)
+
+ self._load_mapping_rules(
+ mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
+
+ self._middleware_failure(exception.UserNotFound,
+ extra_environ=env,
+ status=404)
def test_ephemeral_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
- req = make_request(environ=env)
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
- mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
+ mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER)
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
- context = self._create_context(
- request=req,
- mapping_ref=mapping)
+ self._load_mapping_rules(mapping)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context, ephemeral_user=True)
def test_ephemeral_with_default_user_type_success(self):
@@ -629,23 +651,25 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
- req = make_request(environ=env)
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
# this mapping does not have the user type defined
# and it should defaults to 'ephemeral' which is
# the expected type for the test case.
- mapping = mapping_fixtures.MAPPING_FOR_DEFAULT_EPHEMERAL_USER.copy()
+ mapping = copy.deepcopy(
+ mapping_fixtures.MAPPING_FOR_DEFAULT_EPHEMERAL_USER)
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
- context = self._create_context(
- request=req,
- mapping_ref=mapping)
+ self._load_mapping_rules(mapping)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context, ephemeral_user=True)
def test_ephemeral_any_user_success(self):
- """Ephemeral user does not need a specified user
+ """Verify ephemeral user does not need a specified user.
+
Keystone is not looking to match the user, but a corresponding group.
"""
env = {}
@@ -653,15 +677,15 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex
- req = make_request(environ=env)
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
- mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
+ mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER)
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
- context = self._create_context(
- request=req,
- mapping_ref=mapping)
+ self._load_mapping_rules(mapping)
+
+ req = self._do_middleware_request(extra_environ=env)
+ context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
self._assert_tokenless_auth_context(context, ephemeral_user=True)
def test_ephemeral_invalid_scope_fail(self):
@@ -669,43 +693,37 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex
env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex
- env['SSL_CLIENT_USER_NAME'] = self.user_name
- req = make_request(environ=env)
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
- mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
+ mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER)
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
- context = self._create_context(
- request=req,
- mapping_ref=mapping,
- exception_expected=True)
- self.assertRaises(exception.Unauthorized,
- context.process_request,
- req)
+ self._load_mapping_rules(mapping)
+
+ self._middleware_failure(exception.Unauthorized,
+ extra_environ=env,
+ status=401)
def test_ephemeral_no_group_found_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
- req = make_request(environ=env)
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
- mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
+ mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER)
mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex
- context = self._create_context(
- request=req,
- mapping_ref=mapping,
- exception_expected=True)
- self.assertRaises(exception.MappedGroupNotFound,
- context.process_request,
- req)
+ self._load_mapping_rules(mapping)
+
+ self._middleware_failure(exception.MappedGroupNotFound,
+ extra_environ=env)
def test_ephemeral_incorrect_mapping_fail(self):
- """Ephemeral user picks up the non-ephemeral user mapping.
+ """Test ephemeral user picking up the non-ephemeral user mapping.
+
Looking up the mapping with protocol Id 'x509' will load up
the non-ephemeral user mapping, results unauthenticated.
"""
@@ -713,21 +731,17 @@ class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
- env['SSL_CLIENT_USER_NAME'] = self.user_name
- req = make_request(environ=env)
+ env['SSL_CLIENT_USER_NAME'] = self.user['name']
# This will pick up the incorrect mapping
self.config_fixture.config(group='tokenless_auth',
protocol='x509')
self.protocol_id = 'x509'
- mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
+ mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER)
mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex
- context = self._create_context(
- request=req,
- mapping_ref=mapping,
- exception_expected=True)
- self.assertRaises(exception.MappedGroupNotFound,
- context.process_request,
- req)
+ self._load_mapping_rules(mapping)
+
+ self._middleware_failure(exception.MappedGroupNotFound,
+ extra_environ=env)
def test_create_idp_id_success(self):
env = {}
diff --git a/keystone-moon/keystone/tests/unit/test_policy.py b/keystone-moon/keystone/tests/unit/test_policy.py
index 686e2b70..d6e911e9 100644
--- a/keystone-moon/keystone/tests/unit/test_policy.py
+++ b/keystone-moon/keystone/tests/unit/test_policy.py
@@ -23,22 +23,11 @@ from testtools import matchers
from keystone import exception
from keystone.policy.backends import rules
from keystone.tests import unit
+from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
-class BasePolicyTestCase(unit.TestCase):
- def setUp(self):
- super(BasePolicyTestCase, self).setUp()
- rules.reset()
- self.addCleanup(rules.reset)
- self.addCleanup(self.clear_cache_safely)
-
- def clear_cache_safely(self):
- if rules._ENFORCER:
- rules._ENFORCER.clear()
-
-
-class PolicyFileTestCase(BasePolicyTestCase):
+class PolicyFileTestCase(unit.TestCase):
def setUp(self):
# self.tmpfilename should exist before setUp super is called
# this is to ensure it is available for the config_fixture in
@@ -48,10 +37,8 @@ class PolicyFileTestCase(BasePolicyTestCase):
super(PolicyFileTestCase, self).setUp()
self.target = {}
- def config_overrides(self):
- super(PolicyFileTestCase, self).config_overrides()
- self.config_fixture.config(group='oslo_policy',
- policy_file=self.tmpfilename)
+ def _policy_fixture(self):
+ return ksfixtures.Policy(self.tmpfilename, self.config_fixture)
def test_modified_policy_reloads(self):
action = "example:test"
@@ -65,21 +52,10 @@ class PolicyFileTestCase(BasePolicyTestCase):
self.assertRaises(exception.ForbiddenAction, rules.enforce,
empty_credentials, action, self.target)
- def test_invalid_policy_raises_error(self):
- action = "example:test"
- empty_credentials = {}
- invalid_json = '{"example:test": [],}'
- with open(self.tmpfilename, "w") as policyfile:
- policyfile.write(invalid_json)
- self.assertRaises(ValueError, rules.enforce,
- empty_credentials, action, self.target)
-
-class PolicyTestCase(BasePolicyTestCase):
+class PolicyTestCase(unit.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
- # NOTE(vish): preload rules to circumvent reloading from file
- rules.init()
self.rules = {
"true": [],
"example:allowed": [],
@@ -137,17 +113,16 @@ class PolicyTestCase(BasePolicyTestCase):
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
- # NOTE(dprince) we mix case in the Admin role here to ensure
+ # NOTE(dprince): We mix case in the Admin role here to ensure
# case is ignored
admin_credentials = {'roles': ['AdMiN']}
rules.enforce(admin_credentials, lowercase_action, self.target)
rules.enforce(admin_credentials, uppercase_action, self.target)
-class DefaultPolicyTestCase(BasePolicyTestCase):
+class DefaultPolicyTestCase(unit.TestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
- rules.init()
self.rules = {
"default": [],
@@ -160,7 +135,7 @@ class DefaultPolicyTestCase(BasePolicyTestCase):
# its enforce() method even though rules has been initialized via
# set_rules(). To make it easier to do our tests, we're going to
# monkeypatch load_roles() so it does nothing. This seem like a bug in
- # Oslo policy as we shoudn't have to reload the rules if they have
+ # Oslo policy as we shouldn't have to reload the rules if they have
# already been set using set_rules().
self._old_load_rules = rules._ENFORCER.load_rules
self.addCleanup(setattr, rules._ENFORCER, 'load_rules',
diff --git a/keystone-moon/keystone/tests/unit/test_revoke.py b/keystone-moon/keystone/tests/unit/test_revoke.py
index 9062981f..82c0125a 100644
--- a/keystone-moon/keystone/tests/unit/test_revoke.py
+++ b/keystone-moon/keystone/tests/unit/test_revoke.py
@@ -20,8 +20,8 @@ from six.moves import range
from testtools import matchers
from keystone.common import utils
-from keystone.contrib.revoke import model
from keystone import exception
+from keystone.models import revoke_model
from keystone.tests import unit
from keystone.tests.unit import test_backend_sql
from keystone.token import provider
@@ -46,7 +46,7 @@ def _past_time():
def _sample_blank_token():
issued_delta = datetime.timedelta(minutes=-2)
issued_at = timeutils.utcnow() + issued_delta
- token_data = model.blank_token_data(issued_at)
+ token_data = revoke_model.blank_token_data(issued_at)
return token_data
@@ -61,13 +61,12 @@ def _matches(event, token_values):
value for the attribute, and it does not match the token, no match
is possible, so skip the remaining checks.
- :param event one revocation event to match
- :param token_values dictionary with set of values taken from the
+ :param event: one revocation event to match
+ :param token_values: dictionary with set of values taken from the
token
- :returns if the token matches the revocation event, indicating the
+ :returns: True if the token matches the revocation event, indicating the
token has been revoked
"""
-
# The token has three attributes that can match the user_id
if event.user_id is not None:
for attribute_name in ['user_id', 'trustor_id', 'trustee_id']:
@@ -126,15 +125,16 @@ class RevokeTests(object):
self.revoke_api.revoke_by_user(user_id=1)
self.revoke_api.revoke_by_user(user_id=2)
past = timeutils.utcnow() - datetime.timedelta(seconds=1000)
- self.assertEqual(2, len(self.revoke_api.list_events(past)))
+ self.assertEqual(2, len(self.revoke_api.list_events(last_fetch=past)))
future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
- self.assertEqual(0, len(self.revoke_api.list_events(future)))
+ self.assertEqual(0,
+ len(self.revoke_api.list_events(last_fetch=future)))
def test_past_expiry_are_removed(self):
user_id = 1
self.revoke_api.revoke_by_expiration(user_id, _future_time())
self.assertEqual(1, len(self.revoke_api.list_events()))
- event = model.RevokeEvent()
+ event = revoke_model.RevokeEvent()
event.revoked_at = _past_time()
self.revoke_api.revoke(event)
self.assertEqual(1, len(self.revoke_api.list_events()))
@@ -184,32 +184,17 @@ class RevokeTests(object):
class SqlRevokeTests(test_backend_sql.SqlTests, RevokeTests):
def config_overrides(self):
super(SqlRevokeTests, self).config_overrides()
- self.config_fixture.config(group='revoke', driver='sql')
self.config_fixture.config(
group='token',
provider='pki',
revoke_by_id=False)
-class KvsRevokeTests(unit.TestCase, RevokeTests):
- def config_overrides(self):
- super(KvsRevokeTests, self).config_overrides()
- self.config_fixture.config(group='revoke', driver='kvs')
- self.config_fixture.config(
- group='token',
- provider='pki',
- revoke_by_id=False)
-
- def setUp(self):
- super(KvsRevokeTests, self).setUp()
- self.load_backends()
-
-
class RevokeTreeTests(unit.TestCase):
def setUp(self):
super(RevokeTreeTests, self).setUp()
self.events = []
- self.tree = model.RevokeTree()
+ self.tree = revoke_model.RevokeTree()
self._sample_data()
def _sample_data(self):
@@ -263,20 +248,20 @@ class RevokeTreeTests(unit.TestCase):
def _revoke_by_user(self, user_id):
return self.tree.add_event(
- model.RevokeEvent(user_id=user_id))
+ revoke_model.RevokeEvent(user_id=user_id))
def _revoke_by_audit_id(self, audit_id):
event = self.tree.add_event(
- model.RevokeEvent(audit_id=audit_id))
+ revoke_model.RevokeEvent(audit_id=audit_id))
self.events.append(event)
return event
def _revoke_by_audit_chain_id(self, audit_chain_id, project_id=None,
domain_id=None):
event = self.tree.add_event(
- model.RevokeEvent(audit_chain_id=audit_chain_id,
- project_id=project_id,
- domain_id=domain_id)
+ revoke_model.RevokeEvent(audit_chain_id=audit_chain_id,
+ project_id=project_id,
+ domain_id=domain_id)
)
self.events.append(event)
return event
@@ -284,46 +269,47 @@ class RevokeTreeTests(unit.TestCase):
def _revoke_by_expiration(self, user_id, expires_at, project_id=None,
domain_id=None):
event = self.tree.add_event(
- model.RevokeEvent(user_id=user_id,
- expires_at=expires_at,
- project_id=project_id,
- domain_id=domain_id))
+ revoke_model.RevokeEvent(user_id=user_id,
+ expires_at=expires_at,
+ project_id=project_id,
+ domain_id=domain_id))
self.events.append(event)
return event
def _revoke_by_grant(self, role_id, user_id=None,
domain_id=None, project_id=None):
event = self.tree.add_event(
- model.RevokeEvent(user_id=user_id,
- role_id=role_id,
- domain_id=domain_id,
- project_id=project_id))
+ revoke_model.RevokeEvent(user_id=user_id,
+ role_id=role_id,
+ domain_id=domain_id,
+ project_id=project_id))
self.events.append(event)
return event
def _revoke_by_user_and_project(self, user_id, project_id):
event = self.tree.add_event(
- model.RevokeEvent(project_id=project_id,
- user_id=user_id))
+ revoke_model.RevokeEvent(project_id=project_id,
+ user_id=user_id))
self.events.append(event)
return event
def _revoke_by_project_role_assignment(self, project_id, role_id):
event = self.tree.add_event(
- model.RevokeEvent(project_id=project_id,
- role_id=role_id))
+ revoke_model.RevokeEvent(project_id=project_id,
+ role_id=role_id))
self.events.append(event)
return event
def _revoke_by_domain_role_assignment(self, domain_id, role_id):
event = self.tree.add_event(
- model.RevokeEvent(domain_id=domain_id,
- role_id=role_id))
+ revoke_model.RevokeEvent(domain_id=domain_id,
+ role_id=role_id))
self.events.append(event)
return event
def _revoke_by_domain(self, domain_id):
- event = self.tree.add_event(model.RevokeEvent(domain_id=domain_id))
+ event = self.tree.add_event(
+ revoke_model.RevokeEvent(domain_id=domain_id))
self.events.append(event)
def _user_field_test(self, field_name):
diff --git a/keystone-moon/keystone/tests/unit/test_sql_livetest.py b/keystone-moon/keystone/tests/unit/test_sql_livetest.py
index e2186907..18b8ea91 100644
--- a/keystone-moon/keystone/tests/unit/test_sql_livetest.py
+++ b/keystone-moon/keystone/tests/unit/test_sql_livetest.py
@@ -13,7 +13,6 @@
# under the License.
from keystone.tests import unit
-from keystone.tests.unit import test_sql_migrate_extensions
from keystone.tests.unit import test_sql_upgrade
@@ -39,29 +38,6 @@ class MysqlMigrateTests(test_sql_upgrade.SqlUpgradeTests):
return files
-class PostgresqlRevokeExtensionsTests(
- test_sql_migrate_extensions.RevokeExtension):
- def setUp(self):
- self.skip_if_env_not_set('ENABLE_LIVE_POSTGRES_TEST')
- super(PostgresqlRevokeExtensionsTests, self).setUp()
-
- def config_files(self):
- files = super(PostgresqlRevokeExtensionsTests, self).config_files()
- files.append(unit.dirs.tests_conf("backend_postgresql.conf"))
- return files
-
-
-class MysqlRevokeExtensionsTests(test_sql_migrate_extensions.RevokeExtension):
- def setUp(self):
- self.skip_if_env_not_set('ENABLE_LIVE_MYSQL_TEST')
- super(MysqlRevokeExtensionsTests, self).setUp()
-
- def config_files(self):
- files = super(MysqlRevokeExtensionsTests, self).config_files()
- files.append(unit.dirs.tests_conf("backend_mysql.conf"))
- return files
-
-
class Db2MigrateTests(test_sql_upgrade.SqlUpgradeTests):
def setUp(self):
self.skip_if_env_not_set('ENABLE_LIVE_DB2_TEST')
diff --git a/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py b/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py
index f498fe94..0155f787 100644
--- a/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py
+++ b/keystone-moon/keystone/tests/unit/test_sql_migrate_extensions.py
@@ -29,369 +29,84 @@ WARNING::
all data will be lost.
"""
-import sqlalchemy
-import uuid
-
-from oslo_db import exception as db_exception
-from oslo_db.sqlalchemy import utils
-
from keystone.contrib import endpoint_filter
from keystone.contrib import endpoint_policy
-from keystone.contrib import example
from keystone.contrib import federation
from keystone.contrib import oauth1
from keystone.contrib import revoke
+from keystone import exception
from keystone.tests.unit import test_sql_upgrade
-class SqlUpgradeExampleExtension(test_sql_upgrade.SqlMigrateBase):
- def repo_package(self):
- return example
-
- def test_upgrade(self):
- self.assertTableDoesNotExist('example')
- self.upgrade(1, repository=self.repo_path)
- self.assertTableColumns('example', ['id', 'type', 'extra'])
+class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
+ OAUTH1_MIGRATIONS = 5
-class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
return oauth1
- def upgrade(self, version):
- super(SqlUpgradeOAuth1Extension, self).upgrade(
- version, repository=self.repo_path)
-
- def _assert_v1_3_tables(self):
- self.assertTableColumns('consumer',
- ['id',
- 'description',
- 'secret',
- 'extra'])
- self.assertTableColumns('request_token',
- ['id',
- 'request_secret',
- 'verifier',
- 'authorizing_user_id',
- 'requested_project_id',
- 'requested_roles',
- 'consumer_id',
- 'expires_at'])
- self.assertTableColumns('access_token',
- ['id',
- 'access_secret',
- 'authorizing_user_id',
- 'project_id',
- 'requested_roles',
- 'consumer_id',
- 'expires_at'])
-
- def _assert_v4_later_tables(self):
- self.assertTableColumns('consumer',
- ['id',
- 'description',
- 'secret',
- 'extra'])
- self.assertTableColumns('request_token',
- ['id',
- 'request_secret',
- 'verifier',
- 'authorizing_user_id',
- 'requested_project_id',
- 'role_ids',
- 'consumer_id',
- 'expires_at'])
- self.assertTableColumns('access_token',
- ['id',
- 'access_secret',
- 'authorizing_user_id',
- 'project_id',
- 'role_ids',
- 'consumer_id',
- 'expires_at'])
-
def test_upgrade(self):
- self.assertTableDoesNotExist('consumer')
- self.assertTableDoesNotExist('request_token')
- self.assertTableDoesNotExist('access_token')
- self.upgrade(1)
- self._assert_v1_3_tables()
-
- # NOTE(blk-u): Migrations 2-3 don't modify the tables in a way that we
- # can easily test for.
+ for version in range(self.OAUTH1_MIGRATIONS):
+ v = version + 1
+ self.assertRaises(exception.MigrationMovedFailure,
+ self.upgrade, version=v,
+ repository=self.repo_path)
- self.upgrade(4)
- self._assert_v4_later_tables()
- self.upgrade(5)
- self._assert_v4_later_tables()
+class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
+ ENDPOINT_FILTER_MIGRATIONS = 2
-class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
def repo_package(self):
return endpoint_filter
- def upgrade(self, version):
- super(EndpointFilterExtension, self).upgrade(
- version, repository=self.repo_path)
-
- def _assert_v1_tables(self):
- self.assertTableColumns('project_endpoint',
- ['endpoint_id', 'project_id'])
- self.assertTableDoesNotExist('endpoint_group')
- self.assertTableDoesNotExist('project_endpoint_group')
-
- def _assert_v2_tables(self):
- self.assertTableColumns('project_endpoint',
- ['endpoint_id', 'project_id'])
- self.assertTableColumns('endpoint_group',
- ['id', 'name', 'description', 'filters'])
- self.assertTableColumns('project_endpoint_group',
- ['endpoint_group_id', 'project_id'])
-
def test_upgrade(self):
- self.assertTableDoesNotExist('project_endpoint')
- self.upgrade(1)
- self._assert_v1_tables()
- self.assertTableColumns('project_endpoint',
- ['endpoint_id', 'project_id'])
- self.upgrade(2)
- self._assert_v2_tables()
+ for version in range(self.ENDPOINT_FILTER_MIGRATIONS):
+ v = version + 1
+ self.assertRaises(exception.MigrationMovedFailure,
+ self.upgrade, version=v,
+ repository=self.repo_path)
class EndpointPolicyExtension(test_sql_upgrade.SqlMigrateBase):
+
+ ENDPOINT_POLICY_MIGRATIONS = 1
+
def repo_package(self):
return endpoint_policy
def test_upgrade(self):
- self.assertTableDoesNotExist('policy_association')
- self.upgrade(1, repository=self.repo_path)
- self.assertTableColumns('policy_association',
- ['id', 'policy_id', 'endpoint_id',
- 'service_id', 'region_id'])
+ self.assertRaises(exception.MigrationMovedFailure,
+ self.upgrade,
+ version=self.ENDPOINT_POLICY_MIGRATIONS,
+ repository=self.repo_path)
class FederationExtension(test_sql_upgrade.SqlMigrateBase):
- """Test class for ensuring the Federation SQL."""
- def setUp(self):
- super(FederationExtension, self).setUp()
- self.identity_provider = 'identity_provider'
- self.federation_protocol = 'federation_protocol'
- self.service_provider = 'service_provider'
- self.mapping = 'mapping'
- self.remote_id_table = 'idp_remote_ids'
+ FEDERATION_MIGRATIONS = 8
def repo_package(self):
return federation
- def insert_dict(self, session, table_name, d):
- """Naively inserts key-value pairs into a table, given a dictionary."""
- table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
- insert = table.insert().values(**d)
- session.execute(insert)
- session.commit()
-
def test_upgrade(self):
- self.assertTableDoesNotExist(self.identity_provider)
- self.assertTableDoesNotExist(self.federation_protocol)
- self.assertTableDoesNotExist(self.mapping)
-
- self.upgrade(1, repository=self.repo_path)
- self.assertTableColumns(self.identity_provider,
- ['id',
- 'enabled',
- 'description'])
-
- self.assertTableColumns(self.federation_protocol,
- ['id',
- 'idp_id',
- 'mapping_id'])
-
- self.upgrade(2, repository=self.repo_path)
- self.assertTableColumns(self.mapping,
- ['id', 'rules'])
-
- federation_protocol = utils.get_table(
- self.engine,
- 'federation_protocol')
- with self.engine.begin() as conn:
- conn.execute(federation_protocol.insert(), id=0, idp_id=1)
- self.upgrade(3, repository=self.repo_path)
- federation_protocol = utils.get_table(
- self.engine,
- 'federation_protocol')
- self.assertFalse(federation_protocol.c.mapping_id.nullable)
-
- def test_service_provider_attributes_cannot_be_null(self):
- self.upgrade(6, repository=self.repo_path)
- self.assertTableColumns(self.service_provider,
- ['id', 'description', 'enabled', 'auth_url',
- 'sp_url'])
-
- session = self.Session()
- sp1 = {'id': uuid.uuid4().hex,
- 'auth_url': None,
- 'sp_url': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- sp2 = {'id': uuid.uuid4().hex,
- 'auth_url': uuid.uuid4().hex,
- 'sp_url': None,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- sp3 = {'id': uuid.uuid4().hex,
- 'auth_url': None,
- 'sp_url': None,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
-
- # Insert with 'auth_url' or 'sp_url' set to null must fail
- self.assertRaises(db_exception.DBError,
- self.insert_dict,
- session,
- self.service_provider,
- sp1)
- self.assertRaises(db_exception.DBError,
- self.insert_dict,
- session,
- self.service_provider,
- sp2)
- self.assertRaises(db_exception.DBError,
- self.insert_dict,
- session,
- self.service_provider,
- sp3)
-
- session.close()
-
- def test_fixup_service_provider_attributes(self):
- session = self.Session()
- sp1 = {'id': uuid.uuid4().hex,
- 'auth_url': None,
- 'sp_url': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- sp2 = {'id': uuid.uuid4().hex,
- 'auth_url': uuid.uuid4().hex,
- 'sp_url': None,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- sp3 = {'id': uuid.uuid4().hex,
- 'auth_url': None,
- 'sp_url': None,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- self.upgrade(5, repository=self.repo_path)
- self.assertTableColumns(self.service_provider,
- ['id', 'description', 'enabled', 'auth_url',
- 'sp_url'])
-
- # Before the migration, the table should accept null values
- self.insert_dict(session, self.service_provider, sp1)
- self.insert_dict(session, self.service_provider, sp2)
- self.insert_dict(session, self.service_provider, sp3)
-
- # Check if null values are updated to empty string when migrating
- session.close()
- self.upgrade(6, repository=self.repo_path)
- sp_table = sqlalchemy.Table(self.service_provider,
- self.metadata,
- autoload=True)
- session = self.Session()
- self.metadata.clear()
-
- sp = session.query(sp_table).filter(sp_table.c.id == sp1['id'])[0]
- self.assertEqual('', sp.auth_url)
-
- sp = session.query(sp_table).filter(sp_table.c.id == sp2['id'])[0]
- self.assertEqual('', sp.sp_url)
-
- sp = session.query(sp_table).filter(sp_table.c.id == sp3['id'])[0]
- self.assertEqual('', sp.auth_url)
- self.assertEqual('', sp.sp_url)
-
- def test_propagate_remote_id_to_separate_column(self):
- """Make sure empty remote_id is not propagated.
- Test scenario:
- - Upgrade database to version 6 where identity_provider table has a
- remote_id column
- - Add 3 identity provider objects, where idp1 and idp2 have valid
- remote_id parameter set, and idp3 has it empty (None).
- - Upgrade database to version 7 and expect migration scripts to
- properly move data rom identity_provider.remote_id column into
- separate table idp_remote_ids.
- - In the idp_remote_ids table expect to find entries for idp1 and idp2
- and not find anything for idp3 (identitified by idp's id)
-
- """
- session = self.Session()
- idp1 = {'id': uuid.uuid4().hex,
- 'remote_id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- idp2 = {'id': uuid.uuid4().hex,
- 'remote_id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- idp3 = {'id': uuid.uuid4().hex,
- 'remote_id': None,
- 'description': uuid.uuid4().hex,
- 'enabled': True}
- self.upgrade(6, repository=self.repo_path)
- self.assertTableColumns(self.identity_provider,
- ['id', 'description', 'enabled', 'remote_id'])
-
- self.insert_dict(session, self.identity_provider, idp1)
- self.insert_dict(session, self.identity_provider, idp2)
- self.insert_dict(session, self.identity_provider, idp3)
-
- session.close()
- self.upgrade(7, repository=self.repo_path)
-
- self.assertTableColumns(self.identity_provider,
- ['id', 'description', 'enabled'])
- remote_id_table = sqlalchemy.Table(self.remote_id_table,
- self.metadata,
- autoload=True)
-
- session = self.Session()
- self.metadata.clear()
-
- idp = session.query(remote_id_table).filter(
- remote_id_table.c.idp_id == idp1['id'])[0]
- self.assertEqual(idp1['remote_id'], idp.remote_id)
-
- idp = session.query(remote_id_table).filter(
- remote_id_table.c.idp_id == idp2['id'])[0]
- self.assertEqual(idp2['remote_id'], idp.remote_id)
-
- idp = session.query(remote_id_table).filter(
- remote_id_table.c.idp_id == idp3['id'])
- # NOTE(marek-denis): As idp3 had empty 'remote_id' attribute we expect
- # not to find it in the 'remote_id_table' table, hence count should be
- # 0.real
- self.assertEqual(0, idp.count())
-
- def test_add_relay_state_column(self):
- self.upgrade(8, repository=self.repo_path)
- self.assertTableColumns(self.service_provider,
- ['id', 'description', 'enabled', 'auth_url',
- 'relay_state_prefix', 'sp_url'])
+ for version in range(self.FEDERATION_MIGRATIONS):
+ v = version + 1
+ self.assertRaises(exception.MigrationMovedFailure,
+ self.upgrade, version=v,
+ repository=self.repo_path)
class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
- _REVOKE_COLUMN_NAMES = ['id', 'domain_id', 'project_id', 'user_id',
- 'role_id', 'trust_id', 'consumer_id',
- 'access_token_id', 'issued_before', 'expires_at',
- 'revoked_at']
+ REVOKE_MIGRATIONS = 2
def repo_package(self):
return revoke
def test_upgrade(self):
- self.assertTableDoesNotExist('revocation_event')
- self.upgrade(1, repository=self.repo_path)
- self.assertTableColumns('revocation_event',
- self._REVOKE_COLUMN_NAMES)
+ for version in range(self.REVOKE_MIGRATIONS):
+ v = version + 1
+ self.assertRaises(exception.MigrationMovedFailure,
+ self.upgrade, version=v,
+ repository=self.repo_path)
diff --git a/keystone-moon/keystone/tests/unit/test_sql_upgrade.py b/keystone-moon/keystone/tests/unit/test_sql_upgrade.py
index d617d445..5ca12f66 100644
--- a/keystone-moon/keystone/tests/unit/test_sql_upgrade.py
+++ b/keystone-moon/keystone/tests/unit/test_sql_upgrade.py
@@ -29,11 +29,13 @@ WARNING::
all data will be lost.
"""
-import copy
import json
import uuid
+import migrate
from migrate.versioning import api as versioning_api
+from migrate.versioning import repository
+import mock
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import migration
@@ -41,12 +43,10 @@ from oslo_db.sqlalchemy import session as db_session
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy import schema
+from testtools import matchers
from keystone.common import sql
-from keystone.common.sql import migrate_repo
from keystone.common.sql import migration_helpers
-from keystone.contrib import federation
-from keystone.contrib import revoke
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
@@ -54,7 +54,6 @@ from keystone.tests.unit.ksfixtures import database
CONF = cfg.CONF
-DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
# NOTE(morganfainberg): This should be updated when each DB migration collapse
# is done to mirror the expected structure of the DB in the format of
@@ -67,8 +66,8 @@ INITIAL_TABLE_STRUCTURE = {
'id', 'name', 'enabled', 'extra',
],
'endpoint': [
- 'id', 'legacy_endpoint_id', 'interface', 'region', 'service_id', 'url',
- 'enabled', 'extra',
+ 'id', 'legacy_endpoint_id', 'interface', 'region_id', 'service_id',
+ 'url', 'enabled', 'extra',
],
'group': [
'id', 'domain_id', 'name', 'description', 'extra',
@@ -78,6 +77,7 @@ INITIAL_TABLE_STRUCTURE = {
],
'project': [
'id', 'name', 'extra', 'description', 'enabled', 'domain_id',
+ 'parent_id',
],
'role': [
'id', 'name', 'extra',
@@ -108,23 +108,82 @@ INITIAL_TABLE_STRUCTURE = {
'assignment': [
'type', 'actor_id', 'target_id', 'role_id', 'inherited',
],
-}
-
-
-INITIAL_EXTENSION_TABLE_STRUCTURE = {
- 'revocation_event': [
- 'id', 'domain_id', 'project_id', 'user_id', 'role_id',
- 'trust_id', 'consumer_id', 'access_token_id',
- 'issued_before', 'expires_at', 'revoked_at', 'audit_id',
- 'audit_chain_id',
+ 'id_mapping': [
+ 'public_id', 'domain_id', 'local_id', 'entity_type',
+ ],
+ 'whitelisted_config': [
+ 'domain_id', 'group', 'option', 'value',
+ ],
+ 'sensitive_config': [
+ 'domain_id', 'group', 'option', 'value',
],
}
-EXTENSIONS = {'federation': federation,
- 'revoke': revoke}
+
+# Test migration_helpers.get_init_version separately to ensure it works before
+# using in the SqlUpgrade tests.
+class MigrationHelpersGetInitVersionTests(unit.TestCase):
+ @mock.patch.object(repository, 'Repository')
+ def test_get_init_version_no_path(self, repo):
+ migrate_versions = mock.MagicMock()
+ # make a version list starting with zero. `get_init_version` will
+ # return None for this value.
+ migrate_versions.versions.versions = list(range(0, 5))
+ repo.return_value = migrate_versions
+
+ # os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid
+ # an exception.
+ with mock.patch('os.path.isdir', return_value=True):
+ # since 0 is the smallest version expect None
+ version = migration_helpers.get_init_version()
+ self.assertIsNone(version)
+
+ # check that the default path was used as the first argument to the
+ # first invocation of repo. Cannot match the full path because it is
+ # based on where the test is run.
+ param = repo.call_args_list[0][0][0]
+ self.assertTrue(param.endswith('/sql/migrate_repo'))
+
+ @mock.patch.object(repository, 'Repository')
+ def test_get_init_version_with_path_initial_version_0(self, repo):
+ migrate_versions = mock.MagicMock()
+ # make a version list starting with zero. `get_init_version` will
+ # return None for this value.
+ migrate_versions.versions.versions = list(range(0, 5))
+ repo.return_value = migrate_versions
+
+ # os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid
+ # an exception.
+ with mock.patch('os.path.isdir', return_value=True):
+ path = '/keystone/migrate_repo/'
+
+ # since 0 is the smallest version expect None
+ version = migration_helpers.get_init_version(abs_path=path)
+ self.assertIsNone(version)
+
+ @mock.patch.object(repository, 'Repository')
+ def test_get_init_version_with_path(self, repo):
+ initial_version = 10
+
+ migrate_versions = mock.MagicMock()
+ migrate_versions.versions.versions = list(range(initial_version + 1,
+ initial_version + 5))
+ repo.return_value = migrate_versions
+
+ # os.path.isdir() is called by `find_migrate_repo()`. Mock it to avoid
+ # an exception.
+ with mock.patch('os.path.isdir', return_value=True):
+ path = '/keystone/migrate_repo/'
+
+ version = migration_helpers.get_init_version(abs_path=path)
+ self.assertEqual(initial_version, version)
class SqlMigrateBase(unit.SQLDriverOverrides, unit.TestCase):
+ # override this in subclasses. The default of zero covers tests such
+ # as extensions upgrades.
+ _initial_db_version = 0
+
def initialize_sql(self):
self.metadata = sqlalchemy.MetaData()
self.metadata.bind = self.engine
@@ -139,6 +198,7 @@ class SqlMigrateBase(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
super(SqlMigrateBase, self).setUp()
+ self.load_backends()
database.initialize_sql_session()
conn_str = CONF.database.connection
if (conn_str != unit.IN_MEM_DB_CONN_STRING and
@@ -155,7 +215,9 @@ class SqlMigrateBase(unit.SQLDriverOverrides, unit.TestCase):
connection='sqlite:///%s' % db_file)
# create and share a single sqlalchemy engine for testing
- self.engine = sql.get_engine()
+ with sql.session_for_write() as session:
+ self.engine = session.get_bind()
+ self.addCleanup(self.cleanup_instance('engine'))
self.Session = db_session.get_maker(self.engine, autocommit=False)
self.addCleanup(sqlalchemy.orm.session.Session.close_all)
@@ -164,7 +226,8 @@ class SqlMigrateBase(unit.SQLDriverOverrides, unit.TestCase):
self.repo_package())
self.schema = versioning_api.ControlledSchema.create(
self.engine,
- self.repo_path, self.initial_db_version)
+ self.repo_path,
+ self._initial_db_version)
# auto-detect the highest available schema version in the migrate_repo
self.max_version = self.schema.repository.version().version
@@ -229,6 +292,23 @@ class SqlMigrateBase(unit.SQLDriverOverrides, unit.TestCase):
else:
raise AssertionError('Table "%s" already exists' % table_name)
+ def assertTableCountsMatch(self, table1_name, table2_name):
+ try:
+ table1 = self.select_table(table1_name)
+ except sqlalchemy.exc.NoSuchTableError:
+ raise AssertionError('Table "%s" does not exist' % table1_name)
+ try:
+ table2 = self.select_table(table2_name)
+ except sqlalchemy.exc.NoSuchTableError:
+ raise AssertionError('Table "%s" does not exist' % table2_name)
+ session = self.Session()
+ table1_count = session.execute(table1.count()).scalar()
+ table2_count = session.execute(table2.count()).scalar()
+ if table1_count != table2_count:
+ raise AssertionError('Table counts do not match: {0} ({1}), {2} '
+ '({3})'.format(table1_name, table1_count,
+ table2_name, table2_count))
+
def upgrade(self, *args, **kwargs):
self._migrate(*args, **kwargs)
@@ -257,50 +337,30 @@ class SqlMigrateBase(unit.SQLDriverOverrides, unit.TestCase):
self.assertItemsEqual(expected_cols, actual_cols,
'%s table' % table_name)
- @property
- def initial_db_version(self):
- return getattr(self, '_initial_db_version', 0)
-
class SqlUpgradeTests(SqlMigrateBase):
-
- _initial_db_version = migrate_repo.DB_INIT_VERSION
+ _initial_db_version = migration_helpers.get_init_version()
def test_blank_db_to_start(self):
self.assertTableDoesNotExist('user')
def test_start_version_db_init_version(self):
- version = migration.db_version(sql.get_engine(), self.repo_path,
- migrate_repo.DB_INIT_VERSION)
+ with sql.session_for_write() as session:
+ version = migration.db_version(session.get_bind(), self.repo_path,
+ self._initial_db_version)
self.assertEqual(
- migrate_repo.DB_INIT_VERSION,
+ self._initial_db_version,
version,
- 'DB is not at version %s' % migrate_repo.DB_INIT_VERSION)
+ 'DB is not at version %s' % self._initial_db_version)
def test_upgrade_add_initial_tables(self):
- self.upgrade(migrate_repo.DB_INIT_VERSION + 1)
+ self.upgrade(self._initial_db_version + 1)
self.check_initial_table_structure()
def check_initial_table_structure(self):
for table in INITIAL_TABLE_STRUCTURE:
self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
- # Ensure the default domain was properly created.
- default_domain = migration_helpers.get_default_domain()
-
- meta = sqlalchemy.MetaData()
- meta.bind = self.engine
-
- domain_table = sqlalchemy.Table('domain', meta, autoload=True)
-
- session = self.Session()
- q = session.query(domain_table)
- refs = q.all()
-
- self.assertEqual(1, len(refs))
- for k in default_domain.keys():
- self.assertEqual(default_domain[k], getattr(refs[0], k))
-
def insert_dict(self, session, table_name, d, table=None):
"""Naively inserts key-value pairs into a table, given a dictionary."""
if table is None:
@@ -312,127 +372,43 @@ class SqlUpgradeTests(SqlMigrateBase):
session.execute(insert)
session.commit()
- def test_id_mapping(self):
- self.upgrade(50)
- self.assertTableDoesNotExist('id_mapping')
- self.upgrade(51)
- self.assertTableExists('id_mapping')
-
- def test_region_url_upgrade(self):
- self.upgrade(52)
- self.assertTableColumns('region',
- ['id', 'description', 'parent_region_id',
- 'extra', 'url'])
-
- def test_endpoint_region_upgrade_columns(self):
- self.upgrade(53)
- self.assertTableColumns('endpoint',
- ['id', 'legacy_endpoint_id', 'interface',
- 'service_id', 'url', 'extra', 'enabled',
- 'region_id'])
- region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
- self.assertEqual(255, region_table.c.id.type.length)
- self.assertEqual(255, region_table.c.parent_region_id.type.length)
- endpoint_table = sqlalchemy.Table('endpoint',
- self.metadata,
- autoload=True)
- self.assertEqual(255, endpoint_table.c.region_id.type.length)
-
- def test_endpoint_region_migration(self):
- self.upgrade(52)
- session = self.Session()
- _small_region_name = '0' * 30
- _long_region_name = '0' * 255
- _clashing_region_name = '0' * 70
-
- def add_service():
- service_id = uuid.uuid4().hex
-
- service = {
- 'id': service_id,
- 'type': uuid.uuid4().hex
- }
-
- self.insert_dict(session, 'service', service)
-
- return service_id
-
- def add_endpoint(service_id, region):
- endpoint_id = uuid.uuid4().hex
-
- endpoint = {
- 'id': endpoint_id,
- 'interface': uuid.uuid4().hex[:8],
- 'service_id': service_id,
- 'url': uuid.uuid4().hex,
- 'region': region
- }
- self.insert_dict(session, 'endpoint', endpoint)
-
- return endpoint_id
-
- _service_id_ = add_service()
- add_endpoint(_service_id_, region=_long_region_name)
- add_endpoint(_service_id_, region=_long_region_name)
- add_endpoint(_service_id_, region=_clashing_region_name)
- add_endpoint(_service_id_, region=_small_region_name)
- add_endpoint(_service_id_, region=None)
-
- # upgrade to 53
- session.close()
- self.upgrade(53)
- session = self.Session()
- self.metadata.clear()
+ def test_kilo_squash(self):
+ self.upgrade(67)
- region_table = sqlalchemy.Table('region', self.metadata, autoload=True)
- self.assertEqual(1, session.query(region_table).
- filter_by(id=_long_region_name).count())
- self.assertEqual(1, session.query(region_table).
- filter_by(id=_clashing_region_name).count())
- self.assertEqual(1, session.query(region_table).
- filter_by(id=_small_region_name).count())
+ # In 053 the size of ID and parent region ID columns were changed
+ table = sqlalchemy.Table('region', self.metadata, autoload=True)
+ self.assertEqual(255, table.c.id.type.length)
+ self.assertEqual(255, table.c.parent_region_id.type.length)
+ table = sqlalchemy.Table('endpoint', self.metadata, autoload=True)
+ self.assertEqual(255, table.c.region_id.type.length)
- endpoint_table = sqlalchemy.Table('endpoint',
- self.metadata,
- autoload=True)
- self.assertEqual(5, session.query(endpoint_table).count())
- self.assertEqual(2, session.query(endpoint_table).
- filter_by(region_id=_long_region_name).count())
- self.assertEqual(1, session.query(endpoint_table).
- filter_by(region_id=_clashing_region_name).count())
- self.assertEqual(1, session.query(endpoint_table).
- filter_by(region_id=_small_region_name).count())
-
- def test_add_actor_id_index(self):
- self.upgrade(53)
- self.upgrade(54)
+ # In 054 an index was created for the actor_id of the assignment table
table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_actor_id', ['actor_id']), index_data)
- def test_token_user_id_and_trust_id_index_upgrade(self):
- self.upgrade(54)
- self.upgrade(55)
+ # In 055 indexes were created for user and trust IDs in the token table
table = sqlalchemy.Table('token', self.metadata, autoload=True)
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_token_user_id', ['user_id']), index_data)
self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)
- def test_project_parent_id_upgrade(self):
- self.upgrade(61)
- self.assertTableColumns('project',
- ['id', 'name', 'extra', 'description',
- 'enabled', 'domain_id', 'parent_id'])
+ # In 062 the role ID foreign key was removed from the assignment table
+ if self.engine.name == "mysql":
+ self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
- def test_drop_assignment_role_fk(self):
- self.upgrade(61)
- self.assertTrue(self.does_fk_exist('assignment', 'role_id'))
- self.upgrade(62)
+ # In 064 the domain ID FK was removed from the group and user tables
if self.engine.name != 'sqlite':
# sqlite does not support FK deletions (or enforcement)
- self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
+ self.assertFalse(self.does_fk_exist('group', 'domain_id'))
+ self.assertFalse(self.does_fk_exist('user', 'domain_id'))
+
+ # In 067 the role ID index was removed from the assignment table
+ if self.engine.name == "mysql":
+ self.assertFalse(self._does_index_exist('assignment',
+ 'assignment_role_id_fkey'))
def test_insert_assignment_inherited_pk(self):
ASSIGNMENT_TABLE_NAME = 'assignment'
@@ -502,7 +478,6 @@ class SqlUpgradeTests(SqlMigrateBase):
def does_pk_exist(self, table, pk_column):
"""Checks whether a column is primary key on a table."""
-
inspector = reflection.Inspector.from_engine(self.engine)
pk_columns = inspector.get_pk_constraint(table)['constrained_columns']
@@ -515,119 +490,164 @@ class SqlUpgradeTests(SqlMigrateBase):
return True
return False
- def test_drop_region_url_upgrade(self):
- self.upgrade(63)
- self.assertTableColumns('region',
- ['id', 'description', 'parent_region_id',
- 'extra'])
-
- def test_domain_fk(self):
- self.upgrade(63)
- self.assertTrue(self.does_fk_exist('group', 'domain_id'))
- self.assertTrue(self.does_fk_exist('user', 'domain_id'))
- self.upgrade(64)
- if self.engine.name != 'sqlite':
- # sqlite does not support FK deletions (or enforcement)
- self.assertFalse(self.does_fk_exist('group', 'domain_id'))
- self.assertFalse(self.does_fk_exist('user', 'domain_id'))
-
- def test_add_domain_config(self):
- whitelisted_table = 'whitelisted_config'
- sensitive_table = 'sensitive_config'
- self.upgrade(64)
- self.assertTableDoesNotExist(whitelisted_table)
- self.assertTableDoesNotExist(sensitive_table)
- self.upgrade(65)
- self.assertTableColumns(whitelisted_table,
- ['domain_id', 'group', 'option', 'value'])
- self.assertTableColumns(sensitive_table,
- ['domain_id', 'group', 'option', 'value'])
-
- def test_fixup_service_name_value_upgrade(self):
- """Update service name data from `extra` to empty string."""
- def add_service(**extra_data):
- service_id = uuid.uuid4().hex
-
- service = {
- 'id': service_id,
- 'type': uuid.uuid4().hex,
- 'extra': json.dumps(extra_data),
- }
-
- self.insert_dict(session, 'service', service)
-
- return service_id
-
- self.upgrade(65)
- session = self.Session()
-
- # Services with extra values having a random attribute and
- # different combinations of name
- random_attr_name = uuid.uuid4().hex
- random_attr_value = uuid.uuid4().hex
- random_attr_str = "%s='%s'" % (random_attr_name, random_attr_value)
- random_attr_no_name = {random_attr_name: random_attr_value}
- random_attr_no_name_str = "%s='%s'" % (random_attr_name,
- random_attr_value)
- random_attr_name_value = {random_attr_name: random_attr_value,
- 'name': 'myname'}
- random_attr_name_value_str = 'name=myname,%s' % random_attr_str
- random_attr_name_empty = {random_attr_name: random_attr_value,
- 'name': ''}
- random_attr_name_empty_str = 'name=,%s' % random_attr_str
- random_attr_name_none = {random_attr_name: random_attr_value,
- 'name': None}
- random_attr_name_none_str = 'name=None,%s' % random_attr_str
-
- services = [
- (add_service(**random_attr_no_name),
- random_attr_name_empty, random_attr_no_name_str),
- (add_service(**random_attr_name_value),
- random_attr_name_value, random_attr_name_value_str),
- (add_service(**random_attr_name_empty),
- random_attr_name_empty, random_attr_name_empty_str),
- (add_service(**random_attr_name_none),
- random_attr_name_empty, random_attr_name_none_str),
- ]
-
- # NOTE(viktors): Add a service with empty extra field
- self.insert_dict(session, 'service',
- {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex})
-
- session.close()
- self.upgrade(66)
- session = self.Session()
-
- # Verify that the services have the expected values.
- self.metadata.clear()
- service_table = sqlalchemy.Table('service', self.metadata,
- autoload=True)
-
- def fetch_service_extra(service_id):
- cols = [service_table.c.extra]
- f = service_table.c.id == service_id
- s = sqlalchemy.select(cols).where(f)
- service = session.execute(s).fetchone()
- return json.loads(service.extra)
-
- for service_id, exp_extra, msg in services:
- extra = fetch_service_extra(service_id)
- self.assertDictEqual(exp_extra, extra, msg)
-
- def _does_index_exist(self, table_name, index_name):
+ def does_index_exist(self, table_name, index_name):
meta = sqlalchemy.MetaData(bind=self.engine)
- table = sqlalchemy.Table('assignment', meta, autoload=True)
+ table = sqlalchemy.Table(table_name, meta, autoload=True)
return index_name in [idx.name for idx in table.indexes]
- def test_drop_assignment_role_id_index_mysql(self):
- self.upgrade(66)
- if self.engine.name == "mysql":
- self.assertTrue(self._does_index_exist('assignment',
- 'assignment_role_id_fkey'))
- self.upgrade(67)
- if self.engine.name == "mysql":
- self.assertFalse(self._does_index_exist('assignment',
- 'assignment_role_id_fkey'))
+ def does_constraint_exist(self, table_name, constraint_name):
+ meta = sqlalchemy.MetaData(bind=self.engine)
+ table = sqlalchemy.Table(table_name, meta, autoload=True)
+ return constraint_name in [con.name for con in table.constraints]
+
+ def test_endpoint_policy_upgrade(self):
+ self.assertTableDoesNotExist('policy_association')
+ self.upgrade(81)
+ self.assertTableColumns('policy_association',
+ ['id', 'policy_id', 'endpoint_id',
+ 'service_id', 'region_id'])
+
+ @mock.patch.object(migration_helpers, 'get_db_version', return_value=1)
+ def test_endpoint_policy_already_migrated(self, mock_ep):
+
+ # By setting the return value to 1, the migration has already been
+ # run, and there's no need to create the table again
+
+ self.upgrade(81)
+
+ mock_ep.assert_called_once_with(extension='endpoint_policy',
+ engine=mock.ANY)
+
+ # It won't exist because we are mocking it, but we can verify
+ # that 081 did not create the table
+ self.assertTableDoesNotExist('policy_association')
+
+ def test_create_federation_tables(self):
+ self.identity_provider = 'identity_provider'
+ self.federation_protocol = 'federation_protocol'
+ self.service_provider = 'service_provider'
+ self.mapping = 'mapping'
+ self.remote_ids = 'idp_remote_ids'
+
+ self.assertTableDoesNotExist(self.identity_provider)
+ self.assertTableDoesNotExist(self.federation_protocol)
+ self.assertTableDoesNotExist(self.service_provider)
+ self.assertTableDoesNotExist(self.mapping)
+ self.assertTableDoesNotExist(self.remote_ids)
+
+ self.upgrade(82)
+ self.assertTableColumns(self.identity_provider,
+ ['id', 'description', 'enabled'])
+
+ self.assertTableColumns(self.federation_protocol,
+ ['id', 'idp_id', 'mapping_id'])
+
+ self.assertTableColumns(self.mapping,
+ ['id', 'rules'])
+
+ self.assertTableColumns(self.service_provider,
+ ['id', 'description', 'enabled', 'auth_url',
+ 'relay_state_prefix', 'sp_url'])
+
+ self.assertTableColumns(self.remote_ids, ['idp_id', 'remote_id'])
+
+ federation_protocol = sqlalchemy.Table(self.federation_protocol,
+ self.metadata,
+ autoload=True)
+ self.assertFalse(federation_protocol.c.mapping_id.nullable)
+
+ sp_table = sqlalchemy.Table(self.service_provider,
+ self.metadata,
+ autoload=True)
+ self.assertFalse(sp_table.c.auth_url.nullable)
+ self.assertFalse(sp_table.c.sp_url.nullable)
+
+ @mock.patch.object(migration_helpers, 'get_db_version', return_value=8)
+ def test_federation_already_migrated(self, mock_federation):
+
+ # By setting the return value to 8, the migration has already been
+ # run, and there's no need to create the table again.
+ self.upgrade(82)
+
+ mock_federation.assert_any_call(extension='federation',
+ engine=mock.ANY)
+
+ # It won't exist because we are mocking it, but we can verify
+ # that 082 did not create the table.
+ self.assertTableDoesNotExist('identity_provider')
+ self.assertTableDoesNotExist('federation_protocol')
+ self.assertTableDoesNotExist('mapping')
+ self.assertTableDoesNotExist('service_provider')
+ self.assertTableDoesNotExist('idp_remote_ids')
+
+ def test_create_oauth_tables(self):
+ consumer = 'consumer'
+ request_token = 'request_token'
+ access_token = 'access_token'
+ self.assertTableDoesNotExist(consumer)
+ self.assertTableDoesNotExist(request_token)
+ self.assertTableDoesNotExist(access_token)
+ self.upgrade(83)
+ self.assertTableColumns(consumer,
+ ['id',
+ 'description',
+ 'secret',
+ 'extra'])
+ self.assertTableColumns(request_token,
+ ['id',
+ 'request_secret',
+ 'verifier',
+ 'authorizing_user_id',
+ 'requested_project_id',
+ 'role_ids',
+ 'consumer_id',
+ 'expires_at'])
+ self.assertTableColumns(access_token,
+ ['id',
+ 'access_secret',
+ 'authorizing_user_id',
+ 'project_id',
+ 'role_ids',
+ 'consumer_id',
+ 'expires_at'])
+
+ @mock.patch.object(migration_helpers, 'get_db_version', return_value=5)
+ def test_oauth1_already_migrated(self, mock_oauth1):
+
+ # By setting the return value to 5, the migration has already been
+ # run, and there's no need to create the table again.
+ self.upgrade(83)
+
+ mock_oauth1.assert_any_call(extension='oauth1', engine=mock.ANY)
+
+ # It won't exist because we are mocking it, but we can verify
+ # that 083 did not create the table.
+ self.assertTableDoesNotExist('consumer')
+ self.assertTableDoesNotExist('request_token')
+ self.assertTableDoesNotExist('access_token')
+
+ def test_create_revoke_table(self):
+ self.assertTableDoesNotExist('revocation_event')
+ self.upgrade(84)
+ self.assertTableColumns('revocation_event',
+ ['id', 'domain_id', 'project_id', 'user_id',
+ 'role_id', 'trust_id', 'consumer_id',
+ 'access_token_id', 'issued_before',
+ 'expires_at', 'revoked_at',
+ 'audit_chain_id', 'audit_id'])
+
+ @mock.patch.object(migration_helpers, 'get_db_version', return_value=2)
+ def test_revoke_already_migrated(self, mock_revoke):
+
+ # By setting the return value to 2, the migration has already been
+ # run, and there's no need to create the table again.
+ self.upgrade(84)
+
+ mock_revoke.assert_any_call(extension='revoke', engine=mock.ANY)
+
+ # It won't exist because we are mocking it, but we can verify
+ # that 084 did not create the table.
+ self.assertTableDoesNotExist('revocation_event')
def test_project_is_domain_upgrade(self):
self.upgrade(74)
@@ -636,6 +656,13 @@ class SqlUpgradeTests(SqlMigrateBase):
'enabled', 'domain_id', 'parent_id',
'is_domain'])
+ def test_implied_roles_upgrade(self):
+ self.upgrade(87)
+ self.assertTableColumns('implied_role',
+ ['prior_role_id', 'implied_role_id'])
+ self.assertTrue(self.does_fk_exist('implied_role', 'prior_role_id'))
+ self.assertTrue(self.does_fk_exist('implied_role', 'implied_role_id'))
+
def test_add_config_registration(self):
config_registration = 'config_register'
self.upgrade(74)
@@ -643,136 +670,497 @@ class SqlUpgradeTests(SqlMigrateBase):
self.upgrade(75)
self.assertTableColumns(config_registration, ['type', 'domain_id'])
- def populate_user_table(self, with_pass_enab=False,
- with_pass_enab_domain=False):
- # Populate the appropriate fields in the user
- # table, depending on the parameters:
- #
- # Default: id, name, extra
- # pass_enab: Add password, enabled as well
- # pass_enab_domain: Add password, enabled and domain as well
- #
- this_table = sqlalchemy.Table("user",
- self.metadata,
- autoload=True)
- for user in default_fixtures.USERS:
- extra = copy.deepcopy(user)
- extra.pop('id')
- extra.pop('name')
-
- if with_pass_enab:
- password = extra.pop('password', None)
- enabled = extra.pop('enabled', True)
- ins = this_table.insert().values(
+ def test_endpoint_filter_upgrade(self):
+ def assert_tables_columns_exist():
+ self.assertTableColumns('project_endpoint',
+ ['endpoint_id', 'project_id'])
+ self.assertTableColumns('endpoint_group',
+ ['id', 'name', 'description', 'filters'])
+ self.assertTableColumns('project_endpoint_group',
+ ['endpoint_group_id', 'project_id'])
+
+ self.assertTableDoesNotExist('project_endpoint')
+ self.upgrade(85)
+ assert_tables_columns_exist()
+
+ @mock.patch.object(migration_helpers, 'get_db_version', return_value=2)
+ def test_endpoint_filter_already_migrated(self, mock_endpoint_filter):
+
+ # By setting the return value to 2, the migration has already been
+ # run, and there's no need to create the table again.
+ self.upgrade(85)
+
+ mock_endpoint_filter.assert_any_call(extension='endpoint_filter',
+ engine=mock.ANY)
+
+ # It won't exist because we are mocking it, but we can verify
+ # that 085 did not create the table.
+ self.assertTableDoesNotExist('project_endpoint')
+ self.assertTableDoesNotExist('endpoint_group')
+ self.assertTableDoesNotExist('project_endpoint_group')
+
+ def test_add_trust_unique_constraint_upgrade(self):
+ self.upgrade(86)
+ inspector = reflection.Inspector.from_engine(self.engine)
+ constraints = inspector.get_unique_constraints('trust')
+ constraint_names = [constraint['name'] for constraint in constraints]
+ self.assertIn('duplicate_trust_constraint', constraint_names)
+
+ def test_add_domain_specific_roles(self):
+ """Check database upgraded successfully for domain specific roles.
+
+ The following items need to be checked:
+
+ - The domain_id column has been added
+ - That it has been added to the uniqueness constraints
+ - Existing roles have their domain_id columns set to the specific
+ string of '<<null>>'
+
+ """
+ NULL_DOMAIN_ID = '<<null>>'
+
+ self.upgrade(87)
+ session = self.Session()
+ role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
+ # Add a role before we upgrade, so we can check that its new domain_id
+ # attribute is handled correctly
+ role_id = uuid.uuid4().hex
+ self.insert_dict(session, 'role',
+ {'id': role_id, 'name': uuid.uuid4().hex})
+ session.close()
+
+ self.upgrade(88)
+
+ session = self.Session()
+ self.metadata.clear()
+ self.assertTableColumns('role', ['id', 'name', 'domain_id', 'extra'])
+ # Check the domain_id has been added to the uniqueness constraint
+ inspector = reflection.Inspector.from_engine(self.engine)
+ constraints = inspector.get_unique_constraints('role')
+ constraint_columns = [
+ constraint['column_names'] for constraint in constraints
+ if constraint['name'] == 'ixu_role_name_domain_id']
+ self.assertIn('domain_id', constraint_columns[0])
+
+ # Now check our role has its domain_id attribute set correctly
+ role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
+ cols = [role_table.c.domain_id]
+ filter = role_table.c.id == role_id
+ statement = sqlalchemy.select(cols).where(filter)
+ role_entry = session.execute(statement).fetchone()
+ self.assertEqual(NULL_DOMAIN_ID, role_entry[0])
+
+ def test_add_root_of_all_domains(self):
+ NULL_DOMAIN_ID = '<<keystone.domain.root>>'
+ self.upgrade(89)
+ session = self.Session()
+
+ domain_table = sqlalchemy.Table(
+ 'domain', self.metadata, autoload=True)
+ query = session.query(domain_table).filter_by(id=NULL_DOMAIN_ID)
+ domain_from_db = query.one()
+ self.assertIn(NULL_DOMAIN_ID, domain_from_db)
+
+ project_table = sqlalchemy.Table(
+ 'project', self.metadata, autoload=True)
+ query = session.query(project_table).filter_by(id=NULL_DOMAIN_ID)
+ project_from_db = query.one()
+ self.assertIn(NULL_DOMAIN_ID, project_from_db)
+
+ session.close()
+
+ def test_add_local_user_and_password_tables(self):
+ local_user_table = 'local_user'
+ password_table = 'password'
+ self.upgrade(89)
+ self.assertTableDoesNotExist(local_user_table)
+ self.assertTableDoesNotExist(password_table)
+ self.upgrade(90)
+ self.assertTableColumns(local_user_table,
+ ['id',
+ 'user_id',
+ 'domain_id',
+ 'name'])
+ self.assertTableColumns(password_table,
+ ['id',
+ 'local_user_id',
+ 'password'])
+
+ def test_migrate_data_to_local_user_and_password_tables(self):
+ def get_expected_users():
+ expected_users = []
+ for test_user in default_fixtures.USERS:
+ user = {}
+ user['id'] = uuid.uuid4().hex
+ user['name'] = test_user['name']
+ user['domain_id'] = test_user['domain_id']
+ user['password'] = test_user['password']
+ user['enabled'] = True
+ user['extra'] = json.dumps(uuid.uuid4().hex)
+ user['default_project_id'] = uuid.uuid4().hex
+ expected_users.append(user)
+ return expected_users
+
+ def add_users_to_db(expected_users, user_table):
+ for user in expected_users:
+ ins = user_table.insert().values(
{'id': user['id'],
'name': user['name'],
- 'password': password,
- 'enabled': bool(enabled),
- 'extra': json.dumps(extra)})
- else:
- if with_pass_enab_domain:
- password = extra.pop('password', None)
- enabled = extra.pop('enabled', True)
- extra.pop('domain_id')
- ins = this_table.insert().values(
- {'id': user['id'],
- 'name': user['name'],
- 'domain_id': user['domain_id'],
- 'password': password,
- 'enabled': bool(enabled),
- 'extra': json.dumps(extra)})
- else:
- ins = this_table.insert().values(
- {'id': user['id'],
- 'name': user['name'],
- 'extra': json.dumps(extra)})
- self.engine.execute(ins)
-
- def populate_tenant_table(self, with_desc_enab=False,
- with_desc_enab_domain=False):
- # Populate the appropriate fields in the tenant or
- # project table, depending on the parameters
- #
- # Default: id, name, extra
- # desc_enab: Add description, enabled as well
- # desc_enab_domain: Add description, enabled and domain as well,
- # plus use project instead of tenant
- #
- if with_desc_enab_domain:
- # By this time tenants are now projects
- this_table = sqlalchemy.Table("project",
- self.metadata,
+ 'domain_id': user['domain_id'],
+ 'password': user['password'],
+ 'enabled': user['enabled'],
+ 'extra': user['extra'],
+ 'default_project_id': user['default_project_id']})
+ ins.execute()
+
+ def get_users_from_db(user_table, local_user_table, password_table):
+ sel = (
+ sqlalchemy.select([user_table.c.id,
+ user_table.c.enabled,
+ user_table.c.extra,
+ user_table.c.default_project_id,
+ local_user_table.c.name,
+ local_user_table.c.domain_id,
+ password_table.c.password])
+ .select_from(user_table.join(local_user_table,
+ user_table.c.id ==
+ local_user_table.c.user_id)
+ .join(password_table,
+ local_user_table.c.id ==
+ password_table.c.local_user_id))
+ )
+ user_rows = sel.execute()
+ users = []
+ for row in user_rows:
+ users.append(
+ {'id': row['id'],
+ 'name': row['name'],
+ 'domain_id': row['domain_id'],
+ 'password': row['password'],
+ 'enabled': row['enabled'],
+ 'extra': row['extra'],
+ 'default_project_id': row['default_project_id']})
+ return users
+
+ meta = sqlalchemy.MetaData()
+ meta.bind = self.engine
+
+ user_table_name = 'user'
+ local_user_table_name = 'local_user'
+ password_table_name = 'password'
+
+ # populate current user table
+ self.upgrade(90)
+ user_table = sqlalchemy.Table(user_table_name, meta, autoload=True)
+ expected_users = get_expected_users()
+ add_users_to_db(expected_users, user_table)
+
+ # upgrade to migration and test
+ self.upgrade(91)
+ self.assertTableCountsMatch(user_table_name, local_user_table_name)
+ self.assertTableCountsMatch(local_user_table_name, password_table_name)
+ meta.clear()
+ user_table = sqlalchemy.Table(user_table_name, meta, autoload=True)
+ local_user_table = sqlalchemy.Table(local_user_table_name, meta,
+ autoload=True)
+ password_table = sqlalchemy.Table(password_table_name, meta,
autoload=True)
+ actual_users = get_users_from_db(user_table, local_user_table,
+ password_table)
+ self.assertListEqual(expected_users, actual_users)
+
+ def test_migrate_user_with_null_password_to_password_tables(self):
+ USER_TABLE_NAME = 'user'
+ LOCAL_USER_TABLE_NAME = 'local_user'
+ PASSWORD_TABLE_NAME = 'password'
+ self.upgrade(90)
+ user_ref = unit.new_user_ref(uuid.uuid4().hex)
+ user_ref.pop('password')
+ # pop extra attribute which doesn't recognized by SQL expression
+ # layer.
+ user_ref.pop('email')
+ session = self.Session()
+ self.insert_dict(session, USER_TABLE_NAME, user_ref)
+ self.metadata.clear()
+ self.upgrade(91)
+ # migration should be successful.
+ self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME)
+ # no new entry was added to the password table because the
+ # user doesn't have a password.
+ password_table = self.select_table(PASSWORD_TABLE_NAME)
+ rows = session.execute(password_table.count()).scalar()
+ self.assertEqual(0, rows)
+
+ def test_migrate_user_skip_user_already_exist_in_local_user(self):
+ USER_TABLE_NAME = 'user'
+ LOCAL_USER_TABLE_NAME = 'local_user'
+ self.upgrade(90)
+ user1_ref = unit.new_user_ref(uuid.uuid4().hex)
+ # pop extra attribute which doesn't recognized by SQL expression
+ # layer.
+ user1_ref.pop('email')
+ user2_ref = unit.new_user_ref(uuid.uuid4().hex)
+ user2_ref.pop('email')
+ session = self.Session()
+ self.insert_dict(session, USER_TABLE_NAME, user1_ref)
+ self.insert_dict(session, USER_TABLE_NAME, user2_ref)
+ user_id = user1_ref.pop('id')
+ user_name = user1_ref.pop('name')
+ domain_id = user1_ref.pop('domain_id')
+ local_user_ref = {'user_id': user_id, 'name': user_name,
+ 'domain_id': domain_id}
+ self.insert_dict(session, LOCAL_USER_TABLE_NAME, local_user_ref)
+ self.metadata.clear()
+ self.upgrade(91)
+ # migration should be successful and user2_ref has been migrated to
+ # `local_user` table.
+ self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME)
+
+ def test_implied_roles_fk_on_delete_cascade(self):
+ if self.engine.name == 'sqlite':
+ self.skipTest('sqlite backend does not support foreign keys')
+
+ self.upgrade(92)
+
+ def _create_three_roles():
+ id_list = []
+ for _ in range(3):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ id_list.append(role['id'])
+ return id_list
+
+ role_id_list = _create_three_roles()
+ self.role_api.create_implied_role(role_id_list[0], role_id_list[1])
+ self.role_api.create_implied_role(role_id_list[0], role_id_list[2])
+
+ # assert that there are two roles implied by role 0.
+ implied_roles = self.role_api.list_implied_roles(role_id_list[0])
+ self.assertThat(implied_roles, matchers.HasLength(2))
+
+ self.role_api.delete_role(role_id_list[0])
+ # assert the cascade deletion is effective.
+ implied_roles = self.role_api.list_implied_roles(role_id_list[0])
+ self.assertThat(implied_roles, matchers.HasLength(0))
+
+ def test_domain_as_project_upgrade(self):
+
+ def _populate_domain_and_project_tables(session):
+ # Three domains, with various different attributes
+ self.domains = [{'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'enabled': True,
+ 'extra': {'description': uuid.uuid4().hex,
+ 'another_attribute': True}},
+ {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'enabled': True,
+ 'extra': {'description': uuid.uuid4().hex}},
+ {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'enabled': False}]
+ # Four projects, two top level, two children
+ self.projects = []
+ self.projects.append(unit.new_project_ref(
+ domain_id=self.domains[0]['id'],
+ parent_id=None))
+ self.projects.append(unit.new_project_ref(
+ domain_id=self.domains[0]['id'],
+ parent_id=self.projects[0]['id']))
+ self.projects.append(unit.new_project_ref(
+ domain_id=self.domains[1]['id'],
+ parent_id=None))
+ self.projects.append(unit.new_project_ref(
+ domain_id=self.domains[1]['id'],
+ parent_id=self.projects[2]['id']))
+
+ for domain in self.domains:
+ this_domain = domain.copy()
+ if 'extra' in this_domain:
+ this_domain['extra'] = json.dumps(this_domain['extra'])
+ self.insert_dict(session, 'domain', this_domain)
+ for project in self.projects:
+ self.insert_dict(session, 'project', project)
+
+ def _check_projects(projects):
+
+ def _assert_domain_matches_project(project):
+ for domain in self.domains:
+ if project.id == domain['id']:
+ self.assertEqual(domain['name'], project.name)
+ self.assertEqual(domain['enabled'], project.enabled)
+ if domain['id'] == self.domains[0]['id']:
+ self.assertEqual(domain['extra']['description'],
+ project.description)
+ self.assertEqual({'another_attribute': True},
+ json.loads(project.extra))
+ elif domain['id'] == self.domains[1]['id']:
+ self.assertEqual(domain['extra']['description'],
+ project.description)
+ self.assertEqual({}, json.loads(project.extra))
+
+ # We had domains 3 we created, which should now be projects acting
+ # as domains, To this we add the 4 original projects, plus the root
+ # of all domains row.
+ self.assertEqual(8, projects.count())
+
+ project_ids = []
+ for project in projects:
+ if project.is_domain:
+ self.assertEqual(NULL_DOMAIN_ID, project.domain_id)
+ self.assertIsNone(project.parent_id)
+ else:
+ self.assertIsNotNone(project.domain_id)
+ self.assertIsNotNone(project.parent_id)
+ project_ids.append(project.id)
+
+ for domain in self.domains:
+ self.assertIn(domain['id'], project_ids)
+ for project in self.projects:
+ self.assertIn(project['id'], project_ids)
+
+ # Now check the attributes of the domains came across OK
+ for project in projects:
+ _assert_domain_matches_project(project)
+
+ NULL_DOMAIN_ID = '<<keystone.domain.root>>'
+ self.upgrade(92)
+
+ session = self.Session()
+
+ _populate_domain_and_project_tables(session)
+
+ self.upgrade(93)
+ proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
+
+ projects = session.query(proj_table)
+ _check_projects(projects)
+
+ def test_add_federated_user_table(self):
+ federated_user_table = 'federated_user'
+ self.upgrade(93)
+ self.assertTableDoesNotExist(federated_user_table)
+ self.upgrade(94)
+ self.assertTableColumns(federated_user_table,
+ ['id',
+ 'user_id',
+ 'idp_id',
+ 'protocol_id',
+ 'unique_id',
+ 'display_name'])
+
+ def test_add_int_pkey_to_revocation_event_table(self):
+ meta = sqlalchemy.MetaData()
+ meta.bind = self.engine
+ REVOCATION_EVENT_TABLE_NAME = 'revocation_event'
+ self.upgrade(94)
+ revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME,
+ meta, autoload=True)
+ # assert id column is a string (before)
+ self.assertEqual('VARCHAR(64)', str(revocation_event_table.c.id.type))
+ self.upgrade(95)
+ meta.clear()
+ revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME,
+ meta, autoload=True)
+ # assert id column is an integer (after)
+ self.assertEqual('INTEGER', str(revocation_event_table.c.id.type))
+
+ def _add_unique_constraint_to_role_name(self,
+ constraint_name='ixu_role_name'):
+ meta = sqlalchemy.MetaData()
+ meta.bind = self.engine
+ role_table = sqlalchemy.Table('role', meta, autoload=True)
+ migrate.UniqueConstraint(role_table.c.name,
+ name=constraint_name).create()
+
+ def _drop_unique_constraint_to_role_name(self,
+ constraint_name='ixu_role_name'):
+ role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
+ migrate.UniqueConstraint(role_table.c.name,
+ name=constraint_name).drop()
+
+ def test_migration_88_drops_unique_constraint(self):
+ self.upgrade(87)
+ if self.engine.name == 'mysql':
+ self.assertTrue(self.does_index_exist('role', 'ixu_role_name'))
else:
- this_table = sqlalchemy.Table("tenant",
- self.metadata,
- autoload=True)
+ self.assertTrue(self.does_constraint_exist('role',
+ 'ixu_role_name'))
+ self.upgrade(88)
+ if self.engine.name == 'mysql':
+ self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
+ else:
+ self.assertFalse(self.does_constraint_exist('role',
+ 'ixu_role_name'))
- for tenant in default_fixtures.TENANTS:
- extra = copy.deepcopy(tenant)
- extra.pop('id')
- extra.pop('name')
-
- if with_desc_enab:
- desc = extra.pop('description', None)
- enabled = extra.pop('enabled', True)
- ins = this_table.insert().values(
- {'id': tenant['id'],
- 'name': tenant['name'],
- 'description': desc,
- 'enabled': bool(enabled),
- 'extra': json.dumps(extra)})
- else:
- if with_desc_enab_domain:
- desc = extra.pop('description', None)
- enabled = extra.pop('enabled', True)
- extra.pop('domain_id')
- ins = this_table.insert().values(
- {'id': tenant['id'],
- 'name': tenant['name'],
- 'domain_id': tenant['domain_id'],
- 'description': desc,
- 'enabled': bool(enabled),
- 'extra': json.dumps(extra)})
- else:
- ins = this_table.insert().values(
- {'id': tenant['id'],
- 'name': tenant['name'],
- 'extra': json.dumps(extra)})
- self.engine.execute(ins)
-
- def _mysql_check_all_tables_innodb(self):
- database = self.engine.url.database
-
- connection = self.engine.connect()
- # sanity check
- total = connection.execute("SELECT count(*) "
- "from information_schema.TABLES "
- "where TABLE_SCHEMA='%(database)s'" %
- dict(database=database))
- self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
-
- noninnodb = connection.execute("SELECT table_name "
- "from information_schema.TABLES "
- "where TABLE_SCHEMA='%(database)s' "
- "and ENGINE!='InnoDB' "
- "and TABLE_NAME!='migrate_version'" %
- dict(database=database))
- names = [x[0] for x in noninnodb]
- self.assertEqual([], names,
- "Non-InnoDB tables exist")
-
- connection.close()
+ def test_migration_88_inconsistent_constraint_name(self):
+ self.upgrade(87)
+ self._drop_unique_constraint_to_role_name()
+
+ constraint_name = uuid.uuid4().hex
+ self._add_unique_constraint_to_role_name(
+ constraint_name=constraint_name)
+
+ if self.engine.name == 'mysql':
+ self.assertTrue(self.does_index_exist('role', constraint_name))
+ self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
+ else:
+ self.assertTrue(self.does_constraint_exist('role',
+ constraint_name))
+ self.assertFalse(self.does_constraint_exist('role',
+ 'ixu_role_name'))
+
+ self.upgrade(88)
+ if self.engine.name == 'mysql':
+ self.assertFalse(self.does_index_exist('role', constraint_name))
+ self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
+ else:
+ self.assertFalse(self.does_constraint_exist('role',
+ constraint_name))
+ self.assertFalse(self.does_constraint_exist('role',
+ 'ixu_role_name'))
+
+ def test_migration_96(self):
+ self.upgrade(95)
+ if self.engine.name == 'mysql':
+ self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
+ else:
+ self.assertFalse(self.does_constraint_exist('role',
+ 'ixu_role_name'))
+
+ self.upgrade(96)
+ if self.engine.name == 'mysql':
+ self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
+ else:
+ self.assertFalse(self.does_constraint_exist('role',
+ 'ixu_role_name'))
+
+ def test_migration_96_constraint_exists(self):
+ self.upgrade(95)
+ self._add_unique_constraint_to_role_name()
+
+ if self.engine.name == 'mysql':
+ self.assertTrue(self.does_index_exist('role', 'ixu_role_name'))
+ else:
+ self.assertTrue(self.does_constraint_exist('role',
+ 'ixu_role_name'))
+
+ self.upgrade(96)
+ if self.engine.name == 'mysql':
+ self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
+ else:
+ self.assertFalse(self.does_constraint_exist('role',
+ 'ixu_role_name'))
class VersionTests(SqlMigrateBase):
- _initial_db_version = migrate_repo.DB_INIT_VERSION
+ _initial_db_version = migration_helpers.get_init_version()
def test_core_initial(self):
"""Get the version before migrated, it's the initial DB version."""
version = migration_helpers.get_db_version()
- self.assertEqual(migrate_repo.DB_INIT_VERSION, version)
+ self.assertEqual(self._initial_db_version, version)
def test_core_max(self):
"""When get the version after upgrading, it's the new version."""
@@ -793,97 +1181,15 @@ class VersionTests(SqlMigrateBase):
migration_helpers.get_db_version,
extension='federation')
- def test_extension_initial(self):
- """When get the initial version of an extension, it's 0."""
- for name, extension in EXTENSIONS.items():
- abs_path = migration_helpers.find_migrate_repo(extension)
- migration.db_version_control(sql.get_engine(), abs_path)
- version = migration_helpers.get_db_version(extension=name)
- self.assertEqual(0, version,
- 'Migrate version for %s is not 0' % name)
-
- def test_extension_migrated(self):
- """When get the version after migrating an extension, it's not 0."""
- for name, extension in EXTENSIONS.items():
- abs_path = migration_helpers.find_migrate_repo(extension)
- migration.db_version_control(sql.get_engine(), abs_path)
- migration.db_sync(sql.get_engine(), abs_path)
- version = migration_helpers.get_db_version(extension=name)
- self.assertTrue(
- version > 0,
- "Version for %s didn't change after migrated?" % name)
- # Verify downgrades cannot occur
- self.assertRaises(
- db_exception.DbMigrationError,
- migration_helpers._sync_extension_repo,
- extension=name,
- version=0)
-
- def test_extension_federation_upgraded_values(self):
- abs_path = migration_helpers.find_migrate_repo(federation)
- migration.db_version_control(sql.get_engine(), abs_path)
- migration.db_sync(sql.get_engine(), abs_path, version=6)
- idp_table = sqlalchemy.Table("identity_provider",
- self.metadata,
- autoload=True)
- idps = [{'id': uuid.uuid4().hex,
- 'enabled': True,
- 'description': uuid.uuid4().hex,
- 'remote_id': uuid.uuid4().hex},
- {'id': uuid.uuid4().hex,
- 'enabled': True,
- 'description': uuid.uuid4().hex,
- 'remote_id': uuid.uuid4().hex}]
- for idp in idps:
- ins = idp_table.insert().values({'id': idp['id'],
- 'enabled': idp['enabled'],
- 'description': idp['description'],
- 'remote_id': idp['remote_id']})
- self.engine.execute(ins)
- migration.db_sync(sql.get_engine(), abs_path)
- idp_remote_ids_table = sqlalchemy.Table("idp_remote_ids",
- self.metadata,
- autoload=True)
- for idp in idps:
- s = idp_remote_ids_table.select().where(
- idp_remote_ids_table.c.idp_id == idp['id'])
- remote = self.engine.execute(s).fetchone()
- self.assertEqual(idp['remote_id'],
- remote['remote_id'],
- 'remote_ids must be preserved during the '
- 'migration from identity_provider table to '
- 'idp_remote_ids table')
-
def test_unexpected_extension(self):
- """The version for an extension that doesn't exist raises ImportError.
-
- """
-
+ """The version for a non-existent extension raises ImportError."""
extension_name = uuid.uuid4().hex
self.assertRaises(ImportError,
migration_helpers.get_db_version,
extension=extension_name)
def test_unversioned_extension(self):
- """The version for extensions without migrations raise an exception.
-
- """
-
+ """The version for extensions without migrations raise an exception."""
self.assertRaises(exception.MigrationNotProvided,
migration_helpers.get_db_version,
extension='admin_crud')
-
- def test_initial_with_extension_version_None(self):
- """When performing a default migration, also migrate extensions."""
- migration_helpers.sync_database_to_version(extension=None,
- version=None)
- for table in INITIAL_EXTENSION_TABLE_STRUCTURE:
- self.assertTableColumns(table,
- INITIAL_EXTENSION_TABLE_STRUCTURE[table])
-
- def test_initial_with_extension_version_max(self):
- """When migrating to max version, do not migrate extensions."""
- migration_helpers.sync_database_to_version(extension=None,
- version=self.max_version)
- for table in INITIAL_EXTENSION_TABLE_STRUCTURE:
- self.assertTableDoesNotExist(table)
diff --git a/keystone-moon/keystone/tests/unit/test_token_provider.py b/keystone-moon/keystone/tests/unit/test_token_provider.py
index f60f7d53..5c71363b 100644
--- a/keystone-moon/keystone/tests/unit/test_token_provider.py
+++ b/keystone-moon/keystone/tests/unit/test_token_provider.py
@@ -16,6 +16,7 @@ import datetime
from oslo_config import cfg
from oslo_utils import timeutils
+from six.moves import reload_module
from keystone.common import dependency
from keystone.common import utils
@@ -781,6 +782,12 @@ class TestTokenProvider(unit.TestCase):
self.assertIsNone(
self.token_provider_api._is_valid_token(create_v3_token()))
+ def test_no_token_raises_token_not_found(self):
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ None)
+
# NOTE(ayoung): renamed to avoid automatic test detection
class PKIProviderTests(object):
@@ -803,7 +810,8 @@ class PKIProviderTests(object):
self.cms.subprocess = self.target_subprocess
self.environment.subprocess = self.target_subprocess
- reload(pki) # force module reload so the imports get re-evaluated
+ # force module reload so the imports get re-evaluated
+ reload_module(pki)
def test_get_token_id_error_handling(self):
# cause command-line failure
diff --git a/keystone-moon/keystone/tests/unit/test_url_middleware.py b/keystone-moon/keystone/tests/unit/test_url_middleware.py
index 217b302d..3b160b93 100644
--- a/keystone-moon/keystone/tests/unit/test_url_middleware.py
+++ b/keystone-moon/keystone/tests/unit/test_url_middleware.py
@@ -20,6 +20,7 @@ from keystone.tests import unit
class FakeApp(object):
"""Fakes a WSGI app URL normalized."""
+
def __call__(self, env, start_response):
resp = webob.Response()
resp.body = 'SUCCESS'
diff --git a/keystone-moon/keystone/tests/unit/test_v2.py b/keystone-moon/keystone/tests/unit/test_v2.py
index acdfca5f..e81c6040 100644
--- a/keystone-moon/keystone/tests/unit/test_v2.py
+++ b/keystone-moon/keystone/tests/unit/test_v2.py
@@ -23,9 +23,11 @@ from six.moves import http_client
from testtools import matchers
from keystone.common import extension as keystone_extension
+from keystone.tests import unit
+from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import rest
-
+from keystone.tests.unit.schema import v2
CONF = cfg.CONF
@@ -106,11 +108,11 @@ class CoreApiTests(object):
self.assertValidExtensionListResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
- def test_admin_extensions_404(self):
+ def test_admin_extensions_returns_not_found(self):
self.admin_request(path='/v2.0/extensions/invalid-extension',
expected_status=http_client.NOT_FOUND)
- def test_public_osksadm_extension_404(self):
+ def test_public_osksadm_extension_returns_not_found(self):
self.public_request(path='/v2.0/extensions/OS-KSADM',
expected_status=http_client.NOT_FOUND)
@@ -132,7 +134,7 @@ class CoreApiTests(object):
'tenantId': self.tenant_bar['id'],
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_authenticate_unscoped(self):
@@ -147,7 +149,7 @@ class CoreApiTests(object):
},
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidAuthenticationResponse(r)
def test_get_tenants_for_token(self):
@@ -164,7 +166,7 @@ class CoreApiTests(object):
token=token)
self.assertValidAuthenticationResponse(r)
- def test_invalid_token_404(self):
+ def test_invalid_token_returns_not_found(self):
token = self.get_scoped_token()
self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
@@ -179,7 +181,8 @@ class CoreApiTests(object):
self.tenant_service['id'],
self.role_service['id'])
- token = self.get_scoped_token(tenant_id='service')
+ token = self.get_scoped_token(
+ tenant_id=default_fixtures.SERVICE_TENANT_ID)
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
@@ -191,7 +194,8 @@ class CoreApiTests(object):
self.tenant_service['id'],
self.role_service['id'])
- token = self.get_scoped_token(tenant_id='service')
+ token = self.get_scoped_token(
+ tenant_id=default_fixtures.SERVICE_TENANT_ID)
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
@@ -234,7 +238,7 @@ class CoreApiTests(object):
'token_id': token,
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
def test_endpoints(self):
token = self.get_scoped_token()
@@ -273,6 +277,14 @@ class CoreApiTests(object):
token=token)
self.assertValidRoleListResponse(r)
+ def test_get_user_roles_without_tenant(self):
+ token = self.get_scoped_token()
+ self.admin_request(
+ path='/v2.0/users/%(user_id)s/roles' % {
+ 'user_id': self.user_foo['id'],
+ },
+ token=token, expected_status=http_client.NOT_IMPLEMENTED)
+
def test_get_user(self):
token = self.get_scoped_token()
r = self.admin_request(
@@ -370,7 +382,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
def test_error_response(self):
"""This triggers assertValidErrorResponse by convention."""
@@ -459,7 +471,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
@@ -470,7 +482,7 @@ class CoreApiTests(object):
'user_id': user_id
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Create a new tenant
@@ -485,7 +497,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
project_id = self._get_project_id(r.result)
@@ -501,7 +513,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
# 'member_role' should be in new_tenant
r = self.admin_request(
@@ -510,7 +522,7 @@ class CoreApiTests(object):
'user_id': user_id
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
self.assertEqual('_member_', self._get_role_name(r.result))
# 'member_role' should not be in tenant_bar any more
@@ -520,7 +532,7 @@ class CoreApiTests(object):
'user_id': user_id
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
self.assertNoRoles(r.result)
def test_update_user_with_invalid_tenant(self):
@@ -539,7 +551,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
@@ -571,7 +583,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
@@ -604,7 +616,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
@@ -615,7 +627,7 @@ class CoreApiTests(object):
'user_id': user_id
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Update user's tenant with old tenant id
@@ -630,7 +642,7 @@ class CoreApiTests(object):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
# 'member_role' should still be in tenant_bar
r = self.admin_request(
@@ -639,7 +651,7 @@ class CoreApiTests(object):
'user_id': user_id
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
self.assertEqual('_member_', self._get_role_name(r.result))
def test_authenticating_a_user_with_no_password(self):
@@ -721,7 +733,7 @@ class LegacyV2UsernameTests(object):
path='/v2.0/users',
token=token,
body=body,
- expected_status=200)
+ expected_status=http_client.OK)
def test_create_with_extra_username(self):
"""The response for creating a user will contain the extra fields."""
@@ -772,7 +784,7 @@ class LegacyV2UsernameTests(object):
'enabled': enabled,
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidUserResponse(r)
@@ -802,7 +814,7 @@ class LegacyV2UsernameTests(object):
'enabled': enabled,
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidUserResponse(r)
@@ -881,7 +893,7 @@ class LegacyV2UsernameTests(object):
'enabled': enabled,
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidUserResponse(r)
@@ -911,7 +923,7 @@ class LegacyV2UsernameTests(object):
'enabled': enabled,
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidUserResponse(r)
@@ -931,7 +943,7 @@ class LegacyV2UsernameTests(object):
'enabled': True,
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidUserResponse(r)
@@ -956,7 +968,7 @@ class LegacyV2UsernameTests(object):
'enabled': enabled,
},
},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidUserResponse(r)
@@ -979,6 +991,14 @@ class RestfulTestCase(rest.RestfulTestCase):
class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
+
+ def config_overrides(self):
+ super(V2TestCase, self).config_overrides()
+ self.config_fixture.config(
+ group='catalog',
+ driver='templated',
+ template_file=unit.dirs.tests('default_catalog.templates'))
+
def _get_user_id(self, r):
return r['user']['id']
@@ -1200,7 +1220,7 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
method='GET',
path='/v2.0/tokens/revoked',
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidRevocationListResponse(r)
def assertValidRevocationListResponse(self, response):
@@ -1231,7 +1251,7 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
method='GET',
path='/v2.0/tokens/revoked',
token=token1,
- expected_status=200)
+ expected_status=http_client.OK)
signed_text = r.result['signed']
data_json = cms.cms_verify(signed_text, CONF.signing.certfile,
@@ -1242,10 +1262,11 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
return (data, token2)
def test_fetch_revocation_list_md5(self):
- """If the server is configured for md5, then the revocation list has
- tokens hashed with MD5.
- """
+ """Hash for tokens in revocation list and server config should match.
+ If the server is configured for md5, then the revocation list has
+ tokens hashed with MD5.
+ """
# The default hash algorithm is md5.
hash_algorithm = 'md5'
@@ -1254,10 +1275,11 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
def test_fetch_revocation_list_sha256(self):
- """If the server is configured for sha256, then the revocation list has
- tokens hashed with SHA256
- """
+ """Hash for tokens in revocation list and server config should match.
+ If the server is configured for sha256, then the revocation list has
+ tokens hashed with SHA256.
+ """
hash_algorithm = 'sha256'
self.config_fixture.config(group='token',
hash_algorithm=hash_algorithm)
@@ -1333,7 +1355,7 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
},
},
},
- expected_status=200)
+ expected_status=http_client.OK)
# ensure password doesn't leak
user_id = r.result['user']['id']
@@ -1341,7 +1363,7 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
method='GET',
path='/v2.0/users/%s' % user_id,
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
self.assertNotIn('OS-KSADM:password', r.result['user'])
def test_updating_a_user_with_an_OSKSADM_password(self):
@@ -1360,7 +1382,7 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
},
},
token=token,
- expected_status=200)
+ expected_status=http_client.OK)
# successfully authenticate
self.public_request(
@@ -1374,13 +1396,12 @@ class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
},
},
},
- expected_status=200)
+ expected_status=http_client.OK)
class RevokeApiTestCase(V2TestCase):
def config_overrides(self):
super(RevokeApiTestCase, self).config_overrides()
- self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
provider='pki',
@@ -1402,6 +1423,27 @@ class TestFernetTokenProviderV2(RestfulTestCase):
super(TestFernetTokenProviderV2, self).setUp()
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+ # Add catalog data
+ self.region = unit.new_region_ref()
+ self.region_id = self.region['id']
+ self.catalog_api.create_region(self.region)
+
+ self.service = unit.new_service_ref()
+ self.service_id = self.service['id']
+ self.catalog_api.create_service(self.service_id, self.service)
+
+ self.endpoint = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
+ self.endpoint_id = self.endpoint['id']
+ self.catalog_api.create_endpoint(self.endpoint_id, self.endpoint)
+
+ def assertValidUnscopedTokenResponse(self, r):
+ v2.unscoped_validator.validate(r.json['access'])
+
+ def assertValidScopedTokenResponse(self, r):
+ v2.scoped_validator.validate(r.json['access'])
+
# Used by RestfulTestCase
def _get_token_id(self, r):
return r.result['access']['token']['id']
@@ -1432,11 +1474,12 @@ class TestFernetTokenProviderV2(RestfulTestCase):
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
unscoped_token = self.get_unscoped_token()
path = ('/v2.0/tokens/%s' % unscoped_token)
- self.admin_request(
+ resp = self.admin_request(
method='GET',
path=path,
token=admin_token,
- expected_status=200)
+ expected_status=http_client.OK)
+ self.assertValidUnscopedTokenResponse(resp)
def test_authenticate_scoped_token(self):
project_ref = self.new_project_ref()
@@ -1462,11 +1505,12 @@ class TestFernetTokenProviderV2(RestfulTestCase):
path = ('/v2.0/tokens/%s?belongsTo=%s' % (member_token,
project2_ref['id']))
# Validate token belongs to project
- self.admin_request(
+ resp = self.admin_request(
method='GET',
path=path,
token=admin_token,
- expected_status=200)
+ expected_status=http_client.OK)
+ self.assertValidScopedTokenResponse(resp)
def test_token_authentication_and_validation(self):
"""Test token authentication for Fernet token provider.
@@ -1491,16 +1535,17 @@ class TestFernetTokenProviderV2(RestfulTestCase):
}
}
},
- expected_status=200)
+ expected_status=http_client.OK)
token_id = self._get_token_id(r)
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token_id, project_ref['id']))
# Validate token belongs to project
- self.admin_request(
+ resp = self.admin_request(
method='GET',
path=path,
- token=CONF.admin_token,
- expected_status=200)
+ token=self.get_admin_token(),
+ expected_status=http_client.OK)
+ self.assertValidScopedTokenResponse(resp)
def test_rescoped_tokens_maintain_original_expiration(self):
project_ref = self.new_project_ref()
@@ -1522,7 +1567,7 @@ class TestFernetTokenProviderV2(RestfulTestCase):
},
# NOTE(lbragstad): This test may need to be refactored if Keystone
# decides to disallow rescoping using a scoped token.
- expected_status=200)
+ expected_status=http_client.OK)
original_token = resp.result['access']['token']['id']
original_expiration = resp.result['access']['token']['expires']
@@ -1537,8 +1582,9 @@ class TestFernetTokenProviderV2(RestfulTestCase):
}
}
},
- expected_status=200)
+ expected_status=http_client.OK)
rescoped_token = resp.result['access']['token']['id']
rescoped_expiration = resp.result['access']['token']['expires']
self.assertNotEqual(original_token, rescoped_token)
self.assertEqual(original_expiration, rescoped_expiration)
+ self.assertValidScopedTokenResponse(resp)
diff --git a/keystone-moon/keystone/tests/unit/test_v2_controller.py b/keystone-moon/keystone/tests/unit/test_v2_controller.py
index 581e6b9c..6cf8bc53 100644
--- a/keystone-moon/keystone/tests/unit/test_v2_controller.py
+++ b/keystone-moon/keystone/tests/unit/test_v2_controller.py
@@ -13,8 +13,11 @@
# under the License.
+import copy
import uuid
+from testtools import matchers
+
from keystone.assignment import controllers as assignment_controllers
from keystone import exception
from keystone.resource import controllers as resource_controllers
@@ -32,6 +35,7 @@ class TenantTestCase(unit.TestCase):
These tests exercise :class:`keystone.assignment.controllers.Tenant`.
"""
+
def setUp(self):
super(TenantTestCase, self).setUp()
self.useFixture(database.Database())
@@ -73,17 +77,18 @@ class TenantTestCase(unit.TestCase):
def test_list_projects_default_domain(self):
"""Test that list projects only returns those in the default domain."""
-
- domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'enabled': True}
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain['id']}
+ project1 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project1['id'], project1)
- # Check the real total number of projects, we should have the above
- # plus those in the default features
+ # Check the real total number of projects, we should have the:
+ # - tenants in the default fixtures
+ # - the project representing the default domain
+ # - the project representing the domain we created above
+ # - the project we created above
refs = self.resource_api.list_projects()
- self.assertEqual(len(default_fixtures.TENANTS) + 1, len(refs))
+ self.assertThat(
+ refs, matchers.HasLength(len(default_fixtures.TENANTS) + 3))
# Now list all projects using the v2 API - we should only get
# back those in the default features, since only those are in the
@@ -98,11 +103,52 @@ class TenantTestCase(unit.TestCase):
self.assertIn(tenant_copy, refs['tenants'])
def _create_is_domain_project(self):
- project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': 'default', 'is_domain': True}
+ project = unit.new_project_ref(is_domain=True)
project_ref = self.resource_api.create_project(project['id'], project)
return self.tenant_controller.v3_to_v2_project(project_ref)
+ def test_get_is_domain_project_not_found(self):
+ """Test that get project does not return is_domain projects."""
+ project = self._create_is_domain_project()
+
+ context = copy.deepcopy(_ADMIN_CONTEXT)
+ context['query_string']['name'] = project['name']
+
+ self.assertRaises(
+ exception.ProjectNotFound,
+ self.tenant_controller.get_all_projects,
+ context)
+
+ context = copy.deepcopy(_ADMIN_CONTEXT)
+ context['query_string']['name'] = project['id']
+
+ self.assertRaises(
+ exception.ProjectNotFound,
+ self.tenant_controller.get_all_projects,
+ context)
+
+ def test_create_is_domain_project_fails(self):
+ """Test that the creation of a project acting as a domain fails."""
+ project = {'name': uuid.uuid4().hex, 'domain_id': 'default',
+ 'is_domain': True}
+
+ self.assertRaises(
+ exception.ValidationError,
+ self.tenant_controller.create_project,
+ _ADMIN_CONTEXT,
+ project)
+
+ def test_create_project_passing_is_domain_false_fails(self):
+ """Test that passing is_domain=False is not allowed."""
+ project = {'name': uuid.uuid4().hex, 'domain_id': 'default',
+ 'is_domain': False}
+
+ self.assertRaises(
+ exception.ValidationError,
+ self.tenant_controller.create_project,
+ _ADMIN_CONTEXT,
+ project)
+
def test_update_is_domain_project_not_found(self):
"""Test that update is_domain project is not allowed in v2."""
project = self._create_is_domain_project()
@@ -113,8 +159,7 @@ class TenantTestCase(unit.TestCase):
self.tenant_controller.update_project,
_ADMIN_CONTEXT,
project['id'],
- project
- )
+ project)
def test_delete_is_domain_project_not_found(self):
"""Test that delete is_domain project is not allowed in v2."""
@@ -124,14 +169,12 @@ class TenantTestCase(unit.TestCase):
exception.ProjectNotFound,
self.tenant_controller.delete_project,
_ADMIN_CONTEXT,
- project['id']
- )
+ project['id'])
def test_list_is_domain_project_not_found(self):
"""Test v2 get_all_projects having projects that act as a domain.
- In v2 no project with the is_domain flag enabled should be
- returned.
+ In v2 no project with the is_domain flag enabled should be returned.
"""
project1 = self._create_is_domain_project()
project2 = self._create_is_domain_project()
diff --git a/keystone-moon/keystone/tests/unit/test_v3.py b/keystone-moon/keystone/tests/unit/test_v3.py
index 32c5e295..216d8c79 100644
--- a/keystone-moon/keystone/tests/unit/test_v3.py
+++ b/keystone-moon/keystone/tests/unit/test_v3.py
@@ -12,20 +12,25 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime
import uuid
+import mock
from oslo_config import cfg
+import oslo_context.context
from oslo_serialization import jsonutils
from oslo_utils import timeutils
+from six.moves import http_client
from testtools import matchers
+import webtest
from keystone import auth
from keystone.common import authorization
from keystone.common import cache
+from keystone.common.validation import validators
from keystone import exception
from keystone import middleware
-from keystone.policy.backends import rules
+from keystone.middleware import auth as middleware_auth
+from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import rest
@@ -38,6 +43,7 @@ TIME_FORMAT = unit.TIME_FORMAT
class AuthTestMixin(object):
"""To hold auth building helper functions."""
+
def build_auth_scope(self, project_id=None, project_name=None,
project_domain_id=None, project_domain_name=None,
domain_id=None, domain_name=None, trust_id=None,
@@ -116,7 +122,127 @@ class AuthTestMixin(object):
class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
- AuthTestMixin):
+ common_auth.AuthTestMixin):
+
+ def generate_token_schema(self, domain_scoped=False, project_scoped=False):
+ """Return a dictionary of token properties to validate against."""
+ properties = {
+ 'audit_ids': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ 'minItems': 1,
+ 'maxItems': 2,
+ },
+ 'bind': {
+ 'type': 'object',
+ 'properties': {
+ 'kerberos': {
+ 'type': 'string',
+ },
+ },
+ 'required': ['kerberos'],
+ 'additionalProperties': False,
+ },
+ 'expires_at': {'type': 'string'},
+ 'issued_at': {'type': 'string'},
+ 'methods': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ },
+ 'user': {
+ 'type': 'object',
+ 'required': ['id', 'name', 'domain'],
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'domain': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'}
+ },
+ 'required': ['id', 'name'],
+ 'additonalProperties': False,
+ }
+ },
+ 'additionalProperties': False,
+ }
+ }
+
+ if domain_scoped:
+ properties['catalog'] = {'type': 'array'}
+ properties['roles'] = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string', },
+ 'name': {'type': 'string', },
+ },
+ 'required': ['id', 'name', ],
+ 'additionalProperties': False,
+ },
+ 'minItems': 1,
+ }
+ properties['domain'] = {
+ 'domain': {
+ 'type': 'object',
+ 'required': ['id', 'name'],
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'}
+ },
+ 'additionalProperties': False
+ }
+ }
+ elif project_scoped:
+ properties['is_admin_project'] = {'type': 'boolean'}
+ properties['catalog'] = {'type': 'array'}
+ properties['roles'] = {'type': 'array'}
+ properties['project'] = {
+ 'type': ['object'],
+ 'required': ['id', 'name', 'domain'],
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'},
+ 'domain': {
+ 'type': ['object'],
+ 'required': ['id', 'name'],
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'name': {'type': 'string'}
+ },
+ 'additionalProperties': False
+ }
+ },
+ 'additionalProperties': False
+ }
+
+ schema = {
+ 'type': 'object',
+ 'properties': properties,
+ 'required': ['audit_ids', 'expires_at', 'issued_at', 'methods',
+ 'user'],
+ 'optional': ['bind'],
+ 'additionalProperties': False
+ }
+
+ if domain_scoped:
+ schema['required'].extend(['domain', 'roles'])
+ schema['optional'].append('catalog')
+ elif project_scoped:
+ schema['required'].append('project')
+ schema['optional'].append('bind')
+ schema['optional'].append('catalog')
+ schema['optional'].append('OS-TRUST:trust')
+ schema['optional'].append('is_admin_project')
+
+ return schema
+
def config_files(self):
config_files = super(RestfulTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
@@ -146,9 +272,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
pass
def setUp(self, app_conf='keystone'):
- """Setup for v3 Restful Test Cases.
-
- """
+ """Setup for v3 Restful Test Cases."""
new_paste_file = self.generate_paste_config()
self.addCleanup(self.remove_generated_paste_config)
if new_paste_file:
@@ -158,16 +282,9 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
self.empty_context = {'environment': {}}
- # Initialize the policy engine and allow us to write to a temp
- # file in each test to create the policies
- rules.reset()
-
- # drop the policy rules
- self.addCleanup(rules.reset)
-
def load_backends(self):
# ensure the cache region instance is setup
- cache.configure_cache_region(cache.REGION)
+ cache.configure_cache()
super(RestfulTestCase, self).load_backends()
@@ -183,53 +300,42 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
try:
self.resource_api.get_domain(DEFAULT_DOMAIN_ID)
except exception.DomainNotFound:
- domain = {'description': (u'Owns users and tenants (i.e. '
- u'projects) available on Identity '
- u'API v2.'),
- 'enabled': True,
- 'id': DEFAULT_DOMAIN_ID,
- 'name': u'Default'}
+ domain = unit.new_domain_ref(
+ description=(u'The default domain'),
+ id=DEFAULT_DOMAIN_ID,
+ name=u'Default')
self.resource_api.create_domain(DEFAULT_DOMAIN_ID, domain)
def load_sample_data(self):
self._populate_default_domain()
- self.domain_id = uuid.uuid4().hex
- self.domain = self.new_domain_ref()
- self.domain['id'] = self.domain_id
+ self.domain = unit.new_domain_ref()
+ self.domain_id = self.domain['id']
self.resource_api.create_domain(self.domain_id, self.domain)
- self.project_id = uuid.uuid4().hex
- self.project = self.new_project_ref(
- domain_id=self.domain_id)
- self.project['id'] = self.project_id
- self.resource_api.create_project(self.project_id, self.project)
+ self.project = unit.new_project_ref(domain_id=self.domain_id)
+ self.project_id = self.project['id']
+ self.project = self.resource_api.create_project(self.project_id,
+ self.project)
- self.user = self.new_user_ref(domain_id=self.domain_id)
- password = self.user['password']
- self.user = self.identity_api.create_user(self.user)
- self.user['password'] = password
+ self.user = unit.create_user(self.identity_api,
+ domain_id=self.domain_id)
self.user_id = self.user['id']
self.default_domain_project_id = uuid.uuid4().hex
- self.default_domain_project = self.new_project_ref(
+ self.default_domain_project = unit.new_project_ref(
domain_id=DEFAULT_DOMAIN_ID)
self.default_domain_project['id'] = self.default_domain_project_id
self.resource_api.create_project(self.default_domain_project_id,
self.default_domain_project)
- self.default_domain_user = self.new_user_ref(
+ self.default_domain_user = unit.create_user(
+ self.identity_api,
domain_id=DEFAULT_DOMAIN_ID)
- password = self.default_domain_user['password']
- self.default_domain_user = (
- self.identity_api.create_user(self.default_domain_user))
- self.default_domain_user['password'] = password
self.default_domain_user_id = self.default_domain_user['id']
# create & grant policy.json's default role for admin_required
- self.role_id = uuid.uuid4().hex
- self.role = self.new_role_ref()
- self.role['id'] = self.role_id
- self.role['name'] = 'admin'
+ self.role = unit.new_role_ref(name='admin')
+ self.role_id = self.role['id']
self.role_api.create_role(self.role_id, self.role)
self.assignment_api.add_role_to_user_and_project(
self.user_id, self.project_id, self.role_id)
@@ -240,81 +346,35 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
self.default_domain_user_id, self.project_id,
self.role_id)
- self.region_id = uuid.uuid4().hex
- self.region = self.new_region_ref()
- self.region['id'] = self.region_id
- self.catalog_api.create_region(
- self.region.copy())
-
- self.service_id = uuid.uuid4().hex
- self.service = self.new_service_ref()
- self.service['id'] = self.service_id
- self.catalog_api.create_service(
- self.service_id,
- self.service.copy())
-
- self.endpoint_id = uuid.uuid4().hex
- self.endpoint = self.new_endpoint_ref(service_id=self.service_id)
- self.endpoint['id'] = self.endpoint_id
- self.endpoint['region_id'] = self.region['id']
- self.catalog_api.create_endpoint(
- self.endpoint_id,
- self.endpoint.copy())
- # The server adds 'enabled' and defaults to True.
- self.endpoint['enabled'] = True
-
- def new_ref(self):
- """Populates a ref with attributes common to some API entities."""
- return unit.new_ref()
-
- def new_region_ref(self):
- return unit.new_region_ref()
-
- def new_service_ref(self):
- return unit.new_service_ref()
-
- def new_endpoint_ref(self, service_id, interface='public', **kwargs):
- return unit.new_endpoint_ref(
- service_id, interface=interface, default_region_id=self.region_id,
- **kwargs)
-
- def new_domain_ref(self):
- return unit.new_domain_ref()
-
- def new_project_ref(self, domain_id=None, parent_id=None, is_domain=False):
- return unit.new_project_ref(domain_id=domain_id, parent_id=parent_id,
- is_domain=is_domain)
-
- def new_user_ref(self, domain_id, project_id=None):
- return unit.new_user_ref(domain_id, project_id=project_id)
-
- def new_group_ref(self, domain_id):
- return unit.new_group_ref(domain_id)
-
- def new_credential_ref(self, user_id, project_id=None, cred_type=None):
- return unit.new_credential_ref(user_id, project_id=project_id,
- cred_type=cred_type)
+ # Create "req_admin" user for simulating a real user instead of the
+ # admin_token_auth middleware
+ self.user_reqadmin = unit.create_user(self.identity_api,
+ DEFAULT_DOMAIN_ID)
+ self.assignment_api.add_role_to_user_and_project(
+ self.user_reqadmin['id'],
+ self.default_domain_project_id,
+ self.role_id)
- def new_role_ref(self):
- return unit.new_role_ref()
+ self.region = unit.new_region_ref()
+ self.region_id = self.region['id']
+ self.catalog_api.create_region(self.region)
- def new_policy_ref(self):
- return unit.new_policy_ref()
+ self.service = unit.new_service_ref()
+ self.service_id = self.service['id']
+ self.catalog_api.create_service(self.service_id, self.service.copy())
- def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
- impersonation=None, expires=None, role_ids=None,
- role_names=None, remaining_uses=None,
- allow_redelegation=False):
- return unit.new_trust_ref(
- trustor_user_id, trustee_user_id, project_id=project_id,
- impersonation=impersonation, expires=expires, role_ids=role_ids,
- role_names=role_names, remaining_uses=remaining_uses,
- allow_redelegation=allow_redelegation)
+ self.endpoint = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
+ self.endpoint_id = self.endpoint['id']
+ self.catalog_api.create_endpoint(self.endpoint_id,
+ self.endpoint.copy())
+ # The server adds 'enabled' and defaults to True.
+ self.endpoint['enabled'] = True
def create_new_default_project_for_user(self, user_id, domain_id,
enable_project=True):
- ref = self.new_project_ref(domain_id=domain_id)
- ref['enabled'] = enable_project
+ ref = unit.new_project_ref(domain_id=domain_id, enabled=enable_project)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# set the user's preferred project
@@ -326,6 +386,34 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
return project
+ def get_admin_token(self):
+ """Convenience method so that we can test authenticated requests."""
+ r = self.admin_request(
+ method='POST',
+ path='/v3/auth/tokens',
+ body={
+ 'auth': {
+ 'identity': {
+ 'methods': ['password'],
+ 'password': {
+ 'user': {
+ 'name': self.user_reqadmin['name'],
+ 'password': self.user_reqadmin['password'],
+ 'domain': {
+ 'id': self.user_reqadmin['domain_id']
+ }
+ }
+ }
+ },
+ 'scope': {
+ 'project': {
+ 'id': self.default_domain_project_id,
+ }
+ }
+ }
+ })
+ return r.headers.get('X-Subject-Token')
+
def get_unscoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
@@ -407,11 +495,10 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
def get_requested_token(self, auth):
"""Request the specific token we want."""
-
- r = self.v3_authenticate_token(auth)
+ r = self.v3_create_token(auth)
return r.headers.get('X-Subject-Token')
- def v3_authenticate_token(self, auth, expected_status=201):
+ def v3_create_token(self, auth, expected_status=http_client.CREATED):
return self.admin_request(method='POST',
path='/v3/auth/tokens',
body=auth,
@@ -440,42 +527,31 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
return self.admin_request(path=path, token=token, **kwargs)
- def get(self, path, **kwargs):
- r = self.v3_request(method='GET', path=path, **kwargs)
- if 'expected_status' not in kwargs:
- self.assertResponseStatus(r, 200)
- return r
+ def get(self, path, expected_status=http_client.OK, **kwargs):
+ return self.v3_request(path, method='GET',
+ expected_status=expected_status, **kwargs)
- def head(self, path, **kwargs):
- r = self.v3_request(method='HEAD', path=path, **kwargs)
- if 'expected_status' not in kwargs:
- self.assertResponseStatus(r, 204)
- self.assertEqual('', r.body)
+ def head(self, path, expected_status=http_client.NO_CONTENT, **kwargs):
+ r = self.v3_request(path, method='HEAD',
+ expected_status=expected_status, **kwargs)
+ self.assertEqual(b'', r.body)
return r
- def post(self, path, **kwargs):
- r = self.v3_request(method='POST', path=path, **kwargs)
- if 'expected_status' not in kwargs:
- self.assertResponseStatus(r, 201)
- return r
+ def post(self, path, expected_status=http_client.CREATED, **kwargs):
+ return self.v3_request(path, method='POST',
+ expected_status=expected_status, **kwargs)
- def put(self, path, **kwargs):
- r = self.v3_request(method='PUT', path=path, **kwargs)
- if 'expected_status' not in kwargs:
- self.assertResponseStatus(r, 204)
- return r
+ def put(self, path, expected_status=http_client.NO_CONTENT, **kwargs):
+ return self.v3_request(path, method='PUT',
+ expected_status=expected_status, **kwargs)
- def patch(self, path, **kwargs):
- r = self.v3_request(method='PATCH', path=path, **kwargs)
- if 'expected_status' not in kwargs:
- self.assertResponseStatus(r, 200)
- return r
+ def patch(self, path, expected_status=http_client.OK, **kwargs):
+ return self.v3_request(path, method='PATCH',
+ expected_status=expected_status, **kwargs)
- def delete(self, path, **kwargs):
- r = self.v3_request(method='DELETE', path=path, **kwargs)
- if 'expected_status' not in kwargs:
- self.assertResponseStatus(r, 204)
- return r
+ def delete(self, path, expected_status=http_client.NO_CONTENT, **kwargs):
+ return self.v3_request(path, method='DELETE',
+ expected_status=expected_status, **kwargs)
def assertValidErrorResponse(self, r):
resp = r.result
@@ -582,7 +658,6 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
except Exception:
msg = '%s is not a valid ISO 8601 extended format date time.' % dt
raise AssertionError(msg)
- self.assertIsInstance(dt, datetime.datetime)
def assertValidTokenResponse(self, r, user=None):
self.assertTrue(r.headers.get('X-Subject-Token'))
@@ -611,11 +686,10 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
def assertValidUnscopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidTokenResponse(r, *args, **kwargs)
-
- self.assertNotIn('roles', token)
- self.assertNotIn('catalog', token)
- self.assertNotIn('project', token)
- self.assertNotIn('domain', token)
+ validator_object = validators.SchemaValidator(
+ self.generate_token_schema()
+ )
+ validator_object.validate(token)
return token
@@ -623,6 +697,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
require_catalog = kwargs.pop('require_catalog', True)
endpoint_filter = kwargs.pop('endpoint_filter', False)
ep_filter_assoc = kwargs.pop('ep_filter_assoc', 0)
+ is_admin_project = kwargs.pop('is_admin_project', False)
token = self.assertValidTokenResponse(r, *args, **kwargs)
if require_catalog:
@@ -650,40 +725,66 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
self.assertIn('id', role)
self.assertIn('name', role)
+ if is_admin_project:
+ # NOTE(samueldmq): We want to explicitly test for boolean
+ self.assertIs(True, token['is_admin_project'])
+ else:
+ self.assertNotIn('is_admin_project', token)
+
return token
def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
- self.assertIn('project', token)
- self.assertIn('id', token['project'])
- self.assertIn('name', token['project'])
- self.assertIn('domain', token['project'])
- self.assertIn('id', token['project']['domain'])
- self.assertIn('name', token['project']['domain'])
+ project_scoped_token_schema = self.generate_token_schema(
+ project_scoped=True)
+
+ if token.get('OS-TRUST:trust'):
+ trust_properties = {
+ 'OS-TRUST:trust': {
+ 'type': ['object'],
+ 'required': ['id', 'impersonation', 'trustor_user',
+ 'trustee_user'],
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'impersonation': {'type': 'boolean'},
+ 'trustor_user': {
+ 'type': 'object',
+ 'required': ['id'],
+ 'properties': {
+ 'id': {'type': 'string'}
+ },
+ 'additionalProperties': False
+ },
+ 'trustee_user': {
+ 'type': 'object',
+ 'required': ['id'],
+ 'properties': {
+ 'id': {'type': 'string'}
+ },
+ 'additionalProperties': False
+ }
+ },
+ 'additionalProperties': False
+ }
+ }
+ project_scoped_token_schema['properties'].update(trust_properties)
+
+ validator_object = validators.SchemaValidator(
+ project_scoped_token_schema)
+ validator_object.validate(token)
self.assertEqual(self.role_id, token['roles'][0]['id'])
return token
- def assertValidProjectTrustScopedTokenResponse(self, r, *args, **kwargs):
- token = self.assertValidProjectScopedTokenResponse(r, *args, **kwargs)
-
- trust = token.get('OS-TRUST:trust')
- self.assertIsNotNone(trust)
- self.assertIsNotNone(trust.get('id'))
- self.assertIsInstance(trust.get('impersonation'), bool)
- self.assertIsNotNone(trust.get('trustor_user'))
- self.assertIsNotNone(trust.get('trustee_user'))
- self.assertIsNotNone(trust['trustor_user'].get('id'))
- self.assertIsNotNone(trust['trustee_user'].get('id'))
-
def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
- self.assertIn('domain', token)
- self.assertIn('id', token['domain'])
- self.assertIn('name', token['domain'])
+ validator_object = validators.SchemaValidator(
+ self.generate_token_schema(domain_scoped=True)
+ )
+ validator_object.validate(token)
return token
@@ -876,7 +977,6 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
**kwargs)
def assertValidProject(self, entity, ref=None):
- self.assertIsNotNone(entity.get('domain_id'))
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
return entity
@@ -888,6 +988,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
resp,
'users',
self.assertValidUser,
+ keys_to_check=['name', 'enabled'],
*args,
**kwargs)
@@ -896,6 +997,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
resp,
'user',
self.assertValidUser,
+ keys_to_check=['name', 'enabled'],
*args,
**kwargs)
@@ -920,6 +1022,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
resp,
'groups',
self.assertValidGroup,
+ keys_to_check=['name', 'description', 'domain_id'],
*args,
**kwargs)
@@ -928,6 +1031,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
resp,
'group',
self.assertValidGroup,
+ keys_to_check=['name', 'description', 'domain_id'],
*args,
**kwargs)
@@ -979,6 +1083,21 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
*args,
**kwargs)
+ def assertRoleInListResponse(self, resp, ref, expected=1):
+ found_count = 0
+ for entity in resp.result.get('roles'):
+ try:
+ self.assertValidRole(entity, ref=ref)
+ except Exception:
+ # It doesn't match, so let's go onto the next one
+ pass
+ else:
+ found_count += 1
+ self.assertEqual(expected, found_count)
+
+ def assertRoleNotInListResponse(self, resp, ref):
+ self.assertRoleInListResponse(resp, ref=ref, expected=0)
+
def assertValidRoleResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
@@ -992,6 +1111,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
+ self.assertEqual(ref['domain_id'], entity['domain_id'])
return entity
# role assignment validation
@@ -1161,6 +1281,27 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
return entity
+ # Service providers (federation)
+
+ def assertValidServiceProvider(self, entity, ref=None, *args, **kwargs):
+
+ attributes = frozenset(['auth_url', 'id', 'enabled', 'description',
+ 'links', 'relay_state_prefix', 'sp_url'])
+ for attribute in attributes:
+ self.assertIsNotNone(entity.get(attribute))
+
+ def assertValidServiceProviderListResponse(self, resp, *args, **kwargs):
+ if kwargs.get('keys_to_check') is None:
+ kwargs['keys_to_check'] = ['auth_url', 'id', 'enabled',
+ 'description', 'relay_state_prefix',
+ 'sp_url']
+ return self.assertValidListResponse(
+ resp,
+ 'service_providers',
+ self.assertValidServiceProvider,
+ *args,
+ **kwargs)
+
def build_external_auth_request(self, remote_user,
remote_domain=None, auth_data=None,
kerberos=False):
@@ -1182,24 +1323,81 @@ class VersionTestCase(RestfulTestCase):
pass
+# NOTE(morganfainberg): To be removed when admin_token_auth is removed. This
+# has been split out to allow testing admin_token auth without enabling it
+# for other tests.
+class AuthContextMiddlewareAdminTokenTestCase(RestfulTestCase):
+ EXTENSION_TO_ADD = 'admin_token_auth'
+
+ def config_overrides(self):
+ super(AuthContextMiddlewareAdminTokenTestCase, self).config_overrides()
+ self.config_fixture.config(
+ admin_token='ADMIN')
+
+ # NOTE(morganfainberg): This is knowingly copied from below for simplicity
+ # during the deprecation cycle.
+ def _middleware_request(self, token, extra_environ=None):
+
+ def application(environ, start_response):
+ body = b'body'
+ headers = [('Content-Type', 'text/html; charset=utf8'),
+ ('Content-Length', str(len(body)))]
+ start_response('200 OK', headers)
+ return [body]
+
+ app = webtest.TestApp(middleware.AuthContextMiddleware(application),
+ extra_environ=extra_environ)
+ resp = app.get('/', headers={middleware.AUTH_TOKEN_HEADER: token})
+ self.assertEqual('body', resp.text) # just to make sure it worked
+ return resp.request
+
+ def test_admin_auth_context(self):
+ # test to make sure AuthContextMiddleware does not attempt to build the
+ # auth context if the admin_token middleware indicates it's admin
+ # already.
+ token_id = uuid.uuid4().hex # token doesn't matter.
+ # the admin_token middleware sets is_admin in the context.
+ extra_environ = {middleware.CONTEXT_ENV: {'is_admin': True}}
+ req = self._middleware_request(token_id, extra_environ)
+ auth_context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
+ self.assertDictEqual({}, auth_context)
+
+ @mock.patch.object(middleware_auth.versionutils,
+ 'report_deprecated_feature')
+ def test_admin_token_auth_context_deprecated(self, mock_report_deprecated):
+ # For backwards compatibility AuthContextMiddleware will check that the
+ # admin token (as configured in the CONF file) is present and not
+ # attempt to build the auth context. This is deprecated.
+ req = self._middleware_request('ADMIN')
+ auth_context = req.environ.get(authorization.AUTH_CONTEXT_ENV)
+ self.assertDictEqual({}, auth_context)
+ self.assertEqual(1, mock_report_deprecated.call_count)
+
+
# NOTE(gyee): test AuthContextMiddleware here instead of test_middleware.py
# because we need the token
class AuthContextMiddlewareTestCase(RestfulTestCase):
- def _mock_request_object(self, token_id):
- class fake_req(object):
- headers = {middleware.AUTH_TOKEN_HEADER: token_id}
- environ = {}
+ def _middleware_request(self, token, extra_environ=None):
+
+ def application(environ, start_response):
+ body = b'body'
+ headers = [('Content-Type', 'text/html; charset=utf8'),
+ ('Content-Length', str(len(body)))]
+ start_response('200 OK', headers)
+ return [body]
- return fake_req()
+ app = webtest.TestApp(middleware.AuthContextMiddleware(application),
+ extra_environ=extra_environ)
+ resp = app.get('/', headers={middleware.AUTH_TOKEN_HEADER: token})
+ self.assertEqual(b'body', resp.body) # just to make sure it worked
+ return resp.request
def test_auth_context_build_by_middleware(self):
# test to make sure AuthContextMiddleware successful build the auth
# context from the incoming auth token
admin_token = self.get_scoped_token()
- req = self._mock_request_object(admin_token)
- application = None
- middleware.AuthContextMiddleware(application).process_request(req)
+ req = self._middleware_request(admin_token)
self.assertEqual(
self.user['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['user_id'])
@@ -1208,28 +1406,16 @@ class AuthContextMiddlewareTestCase(RestfulTestCase):
overridden_context = 'OVERRIDDEN_CONTEXT'
# this token should not be used
token = uuid.uuid4().hex
- req = self._mock_request_object(token)
- req.environ[authorization.AUTH_CONTEXT_ENV] = overridden_context
- application = None
- middleware.AuthContextMiddleware(application).process_request(req)
+
+ extra_environ = {authorization.AUTH_CONTEXT_ENV: overridden_context}
+ req = self._middleware_request(token, extra_environ=extra_environ)
# make sure overridden context take precedence
self.assertEqual(overridden_context,
req.environ.get(authorization.AUTH_CONTEXT_ENV))
- def test_admin_token_auth_context(self):
- # test to make sure AuthContextMiddleware does not attempt to build
- # auth context if the incoming auth token is the special admin token
- req = self._mock_request_object(CONF.admin_token)
- application = None
- middleware.AuthContextMiddleware(application).process_request(req)
- self.assertDictEqual(req.environ.get(authorization.AUTH_CONTEXT_ENV),
- {})
-
def test_unscoped_token_auth_context(self):
unscoped_token = self.get_unscoped_token()
- req = self._mock_request_object(unscoped_token)
- application = None
- middleware.AuthContextMiddleware(application).process_request(req)
+ req = self._middleware_request(unscoped_token)
for key in ['project_id', 'domain_id', 'domain_name']:
self.assertNotIn(
key,
@@ -1237,9 +1423,7 @@ class AuthContextMiddlewareTestCase(RestfulTestCase):
def test_project_scoped_token_auth_context(self):
project_scoped_token = self.get_scoped_token()
- req = self._mock_request_object(project_scoped_token)
- application = None
- middleware.AuthContextMiddleware(application).process_request(req)
+ req = self._middleware_request(project_scoped_token)
self.assertEqual(
self.project['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['project_id'])
@@ -1251,9 +1435,7 @@ class AuthContextMiddlewareTestCase(RestfulTestCase):
self.put(path=path)
domain_scoped_token = self.get_domain_scoped_token()
- req = self._mock_request_object(domain_scoped_token)
- application = None
- middleware.AuthContextMiddleware(application).process_request(req)
+ req = self._middleware_request(domain_scoped_token)
self.assertEqual(
self.domain['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_id'])
@@ -1261,6 +1443,30 @@ class AuthContextMiddlewareTestCase(RestfulTestCase):
self.domain['name'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_name'])
+ def test_oslo_context(self):
+ # After AuthContextMiddleware runs, an
+ # oslo_context.context.RequestContext was created so that its fields
+ # can be logged. This test validates that the RequestContext was
+ # created and the fields are set as expected.
+
+ # Use a scoped token so more fields can be set.
+ token = self.get_scoped_token()
+
+ # oslo_middleware RequestId middleware sets openstack.request_id.
+ request_id = uuid.uuid4().hex
+ environ = {'openstack.request_id': request_id}
+ self._middleware_request(token, extra_environ=environ)
+
+ req_context = oslo_context.context.get_current()
+ self.assertEqual(request_id, req_context.request_id)
+ self.assertEqual(token, req_context.auth_token)
+ self.assertEqual(self.user['id'], req_context.user)
+ self.assertEqual(self.project['id'], req_context.tenant)
+ self.assertIsNone(req_context.domain)
+ self.assertEqual(self.user['domain_id'], req_context.user_domain)
+ self.assertEqual(self.project['domain_id'], req_context.project_domain)
+ self.assertFalse(req_context.is_admin)
+
class JsonHomeTestMixin(object):
"""JSON Home test
@@ -1273,6 +1479,7 @@ class JsonHomeTestMixin(object):
data must be in the response.
"""
+
def test_get_json_home(self):
resp = self.get('/', convert=False,
headers={'Accept': 'application/json-home'})
@@ -1295,7 +1502,6 @@ class AssignmentTestMixin(object):
Available filters are: domain_id, project_id, user_id, group_id,
role_id and inherited_to_projects.
"""
-
query_params = '?effective' if effective else ''
for k, v in filters.items():
@@ -1320,7 +1526,6 @@ class AssignmentTestMixin(object):
Provided attributes are expected to contain: domain_id or project_id,
user_id or group_id, role_id and, optionally, inherited_to_projects.
"""
-
if attribs.get('domain_id'):
link = '/domains/' + attribs['domain_id']
else:
@@ -1338,13 +1543,13 @@ class AssignmentTestMixin(object):
return link
- def build_role_assignment_entity(self, link=None, **attribs):
+ def build_role_assignment_entity(
+ self, link=None, prior_role_link=None, **attribs):
"""Build and return a role assignment entity with provided attributes.
Provided attributes are expected to contain: domain_id or project_id,
user_id or group_id, role_id and, optionally, inherited_to_projects.
"""
-
entity = {'links': {'assignment': (
link or self.build_role_assignment_link(**attribs))}}
@@ -1368,4 +1573,68 @@ class AssignmentTestMixin(object):
if attribs.get('inherited_to_projects'):
entity['scope']['OS-INHERIT:inherited_to'] = 'projects'
+ if prior_role_link:
+ entity['links']['prior_role'] = prior_role_link
+
+ return entity
+
+ def build_role_assignment_entity_include_names(self,
+ domain_ref=None,
+ role_ref=None,
+ group_ref=None,
+ user_ref=None,
+ project_ref=None,
+ inherited_assignment=None):
+ """Build and return a role assignment entity with provided attributes.
+
+ The expected attributes are: domain_ref or project_ref,
+ user_ref or group_ref, role_ref and, optionally, inherited_to_projects.
+ """
+ entity = {'links': {}}
+ attributes_for_links = {}
+ if project_ref:
+ dmn_name = self.resource_api.get_domain(
+ project_ref['domain_id'])['name']
+
+ entity['scope'] = {'project': {
+ 'id': project_ref['id'],
+ 'name': project_ref['name'],
+ 'domain': {
+ 'id': project_ref['domain_id'],
+ 'name': dmn_name}}}
+ attributes_for_links['project_id'] = project_ref['id']
+ else:
+ entity['scope'] = {'domain': {'id': domain_ref['id'],
+ 'name': domain_ref['name']}}
+ attributes_for_links['domain_id'] = domain_ref['id']
+ if user_ref:
+ dmn_name = self.resource_api.get_domain(
+ user_ref['domain_id'])['name']
+ entity['user'] = {'id': user_ref['id'],
+ 'name': user_ref['name'],
+ 'domain': {'id': user_ref['domain_id'],
+ 'name': dmn_name}}
+ attributes_for_links['user_id'] = user_ref['id']
+ else:
+ dmn_name = self.resource_api.get_domain(
+ group_ref['domain_id'])['name']
+ entity['group'] = {'id': group_ref['id'],
+ 'name': group_ref['name'],
+ 'domain': {
+ 'id': group_ref['domain_id'],
+ 'name': dmn_name}}
+ attributes_for_links['group_id'] = group_ref['id']
+
+ if role_ref:
+ entity['role'] = {'id': role_ref['id'],
+ 'name': role_ref['name']}
+ attributes_for_links['role_id'] = role_ref['id']
+
+ if inherited_assignment:
+ entity['scope']['OS-INHERIT:inherited_to'] = 'projects'
+ attributes_for_links['inherited_to_projects'] = True
+
+ entity['links']['assignment'] = self.build_role_assignment_link(
+ **attributes_for_links)
+
return entity
diff --git a/keystone-moon/keystone/tests/unit/test_v3_assignment.py b/keystone-moon/keystone/tests/unit/test_v3_assignment.py
index 6b15b1c3..86fb9f74 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_assignment.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_assignment.py
@@ -16,12 +16,10 @@ import uuid
from oslo_config import cfg
from six.moves import http_client
from six.moves import range
+from testtools import matchers
-from keystone.common import controller
-from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import test_v3
-from keystone.tests.unit import utils
CONF = cfg.CONF
@@ -29,1042 +27,20 @@ CONF = cfg.CONF
class AssignmentTestCase(test_v3.RestfulTestCase,
test_v3.AssignmentTestMixin):
- """Test domains, projects, roles and role assignments."""
+ """Test roles and role assignments."""
def setUp(self):
super(AssignmentTestCase, self).setUp()
- self.group = self.new_group_ref(
- domain_id=self.domain_id)
+ self.group = unit.new_group_ref(domain_id=self.domain_id)
self.group = self.identity_api.create_group(self.group)
self.group_id = self.group['id']
- self.credential_id = uuid.uuid4().hex
- self.credential = self.new_credential_ref(
- user_id=self.user['id'],
- project_id=self.project_id)
- self.credential['id'] = self.credential_id
- self.credential_api.create_credential(
- self.credential_id,
- self.credential)
-
- # Domain CRUD tests
-
- def test_create_domain(self):
- """Call ``POST /domains``."""
- ref = self.new_domain_ref()
- r = self.post(
- '/domains',
- body={'domain': ref})
- return self.assertValidDomainResponse(r, ref)
-
- def test_create_domain_case_sensitivity(self):
- """Call `POST /domains`` twice with upper() and lower() cased name."""
- ref = self.new_domain_ref()
-
- # ensure the name is lowercase
- ref['name'] = ref['name'].lower()
- r = self.post(
- '/domains',
- body={'domain': ref})
- self.assertValidDomainResponse(r, ref)
-
- # ensure the name is uppercase
- ref['name'] = ref['name'].upper()
- r = self.post(
- '/domains',
- body={'domain': ref})
- self.assertValidDomainResponse(r, ref)
-
- def test_create_domain_bad_request(self):
- """Call ``POST /domains``."""
- self.post('/domains', body={'domain': {}},
- expected_status=http_client.BAD_REQUEST)
-
- def test_list_domains(self):
- """Call ``GET /domains``."""
- resource_url = '/domains'
- r = self.get(resource_url)
- self.assertValidDomainListResponse(r, ref=self.domain,
- resource_url=resource_url)
-
- def test_get_domain(self):
- """Call ``GET /domains/{domain_id}``."""
- r = self.get('/domains/%(domain_id)s' % {
- 'domain_id': self.domain_id})
- self.assertValidDomainResponse(r, self.domain)
-
- def test_update_domain(self):
- """Call ``PATCH /domains/{domain_id}``."""
- ref = self.new_domain_ref()
- del ref['id']
- r = self.patch('/domains/%(domain_id)s' % {
- 'domain_id': self.domain_id},
- body={'domain': ref})
- self.assertValidDomainResponse(r, ref)
-
- def test_disable_domain(self):
- """Call ``PATCH /domains/{domain_id}`` (set enabled=False)."""
- # Create a 2nd set of entities in a 2nd domain
- self.domain2 = self.new_domain_ref()
- self.resource_api.create_domain(self.domain2['id'], self.domain2)
-
- self.project2 = self.new_project_ref(
- domain_id=self.domain2['id'])
- self.resource_api.create_project(self.project2['id'], self.project2)
-
- self.user2 = self.new_user_ref(
- domain_id=self.domain2['id'],
- project_id=self.project2['id'])
- password = self.user2['password']
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
-
- self.assignment_api.add_user_to_project(self.project2['id'],
- self.user2['id'])
-
- # First check a user in that domain can authenticate. The v2 user
- # cannot authenticate because they exist outside the default domain.
- body = {
- 'auth': {
- 'passwordCredentials': {
- 'userId': self.user2['id'],
- 'password': self.user2['password']
- },
- 'tenantId': self.project2['id']
- }
- }
- self.admin_request(
- path='/v2.0/tokens', method='POST', body=body,
- expected_status=http_client.UNAUTHORIZED)
-
- auth_data = self.build_authentication_request(
- user_id=self.user2['id'],
- password=self.user2['password'],
- project_id=self.project2['id'])
- self.v3_authenticate_token(auth_data)
-
- # Now disable the domain
- self.domain2['enabled'] = False
- r = self.patch('/domains/%(domain_id)s' % {
- 'domain_id': self.domain2['id']},
- body={'domain': {'enabled': False}})
- self.assertValidDomainResponse(r, self.domain2)
-
- # Make sure the user can no longer authenticate, via
- # either API
- body = {
- 'auth': {
- 'passwordCredentials': {
- 'userId': self.user2['id'],
- 'password': self.user2['password']
- },
- 'tenantId': self.project2['id']
- }
- }
- self.admin_request(
- path='/v2.0/tokens', method='POST', body=body,
- expected_status=http_client.UNAUTHORIZED)
-
- # Try looking up in v3 by name and id
- auth_data = self.build_authentication_request(
- user_id=self.user2['id'],
- password=self.user2['password'],
- project_id=self.project2['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
-
- auth_data = self.build_authentication_request(
- username=self.user2['name'],
- user_domain_id=self.domain2['id'],
- password=self.user2['password'],
- project_id=self.project2['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
-
- def test_delete_enabled_domain_fails(self):
- """Call ``DELETE /domains/{domain_id}`` (when domain enabled)."""
-
- # Try deleting an enabled domain, which should fail
- self.delete('/domains/%(domain_id)s' % {
- 'domain_id': self.domain['id']},
- expected_status=exception.ForbiddenAction.code)
-
- def test_delete_domain(self):
- """Call ``DELETE /domains/{domain_id}``.
-
- The sample data set up already has a user, group, project
- and credential that is part of self.domain. Since the user
- we will authenticate with is in this domain, we create a
- another set of entities in a second domain. Deleting this
- second domain should delete all these new entities. In addition,
- all the entities in the regular self.domain should be unaffected
- by the delete.
-
- Test Plan:
-
- - Create domain2 and a 2nd set of entities
- - Disable domain2
- - Delete domain2
- - Check entities in domain2 have been deleted
- - Check entities in self.domain are unaffected
-
- """
-
- # Create a 2nd set of entities in a 2nd domain
- self.domain2 = self.new_domain_ref()
- self.resource_api.create_domain(self.domain2['id'], self.domain2)
-
- self.project2 = self.new_project_ref(
- domain_id=self.domain2['id'])
- self.resource_api.create_project(self.project2['id'], self.project2)
-
- self.user2 = self.new_user_ref(
- domain_id=self.domain2['id'],
- project_id=self.project2['id'])
- self.user2 = self.identity_api.create_user(self.user2)
-
- self.group2 = self.new_group_ref(
- domain_id=self.domain2['id'])
- self.group2 = self.identity_api.create_group(self.group2)
-
- self.credential2 = self.new_credential_ref(
- user_id=self.user2['id'],
- project_id=self.project2['id'])
- self.credential_api.create_credential(
- self.credential2['id'],
- self.credential2)
-
- # Now disable the new domain and delete it
- self.domain2['enabled'] = False
- r = self.patch('/domains/%(domain_id)s' % {
- 'domain_id': self.domain2['id']},
- body={'domain': {'enabled': False}})
- self.assertValidDomainResponse(r, self.domain2)
- self.delete('/domains/%(domain_id)s' % {
- 'domain_id': self.domain2['id']})
-
- # Check all the domain2 relevant entities are gone
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.get_domain,
- self.domain2['id'])
- self.assertRaises(exception.ProjectNotFound,
- self.resource_api.get_project,
- self.project2['id'])
- self.assertRaises(exception.GroupNotFound,
- self.identity_api.get_group,
- self.group2['id'])
- self.assertRaises(exception.UserNotFound,
- self.identity_api.get_user,
- self.user2['id'])
- self.assertRaises(exception.CredentialNotFound,
- self.credential_api.get_credential,
- self.credential2['id'])
-
- # ...and that all self.domain entities are still here
- r = self.resource_api.get_domain(self.domain['id'])
- self.assertDictEqual(r, self.domain)
- r = self.resource_api.get_project(self.project['id'])
- self.assertDictEqual(r, self.project)
- r = self.identity_api.get_group(self.group['id'])
- self.assertDictEqual(r, self.group)
- r = self.identity_api.get_user(self.user['id'])
- self.user.pop('password')
- self.assertDictEqual(r, self.user)
- r = self.credential_api.get_credential(self.credential['id'])
- self.assertDictEqual(r, self.credential)
-
- def test_delete_default_domain_fails(self):
- # Attempting to delete the default domain results in 403 Forbidden.
-
- # Need to disable it first.
- self.patch('/domains/%(domain_id)s' % {
- 'domain_id': CONF.identity.default_domain_id},
- body={'domain': {'enabled': False}})
-
- self.delete('/domains/%(domain_id)s' % {
- 'domain_id': CONF.identity.default_domain_id},
- expected_status=exception.ForbiddenAction.code)
-
- def test_delete_new_default_domain_fails(self):
- # If change the default domain ID, deleting the new default domain
- # results in a 403 Forbidden.
-
- # Create a new domain that's not the default
- new_domain = self.new_domain_ref()
- new_domain_id = new_domain['id']
- self.resource_api.create_domain(new_domain_id, new_domain)
-
- # Disable the new domain so can delete it later.
- self.patch('/domains/%(domain_id)s' % {
- 'domain_id': new_domain_id},
- body={'domain': {'enabled': False}})
-
- # Change the default domain
- self.config_fixture.config(group='identity',
- default_domain_id=new_domain_id)
-
- # Attempt to delete the new domain
-
- self.delete('/domains/%(domain_id)s' % {'domain_id': new_domain_id},
- expected_status=exception.ForbiddenAction.code)
-
- def test_delete_old_default_domain(self):
- # If change the default domain ID, deleting the old default domain
- # works.
-
- # Create a new domain that's not the default
- new_domain = self.new_domain_ref()
- new_domain_id = new_domain['id']
- self.resource_api.create_domain(new_domain_id, new_domain)
-
- old_default_domain_id = CONF.identity.default_domain_id
-
- # Disable the default domain so we can delete it later.
- self.patch('/domains/%(domain_id)s' % {
- 'domain_id': old_default_domain_id},
- body={'domain': {'enabled': False}})
-
- # Change the default domain
- self.config_fixture.config(group='identity',
- default_domain_id=new_domain_id)
-
- # Delete the old default domain
-
- self.delete(
- '/domains/%(domain_id)s' % {'domain_id': old_default_domain_id})
-
- def test_token_revoked_once_domain_disabled(self):
- """Test token from a disabled domain has been invalidated.
-
- Test that a token that was valid for an enabled domain
- becomes invalid once that domain is disabled.
-
- """
-
- self.domain = self.new_domain_ref()
- self.resource_api.create_domain(self.domain['id'], self.domain)
-
- self.user2 = self.new_user_ref(domain_id=self.domain['id'])
- password = self.user2['password']
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
-
- # build a request body
- auth_body = self.build_authentication_request(
- user_id=self.user2['id'],
- password=self.user2['password'])
-
- # sends a request for the user's token
- token_resp = self.post('/auth/tokens', body=auth_body)
-
- subject_token = token_resp.headers.get('x-subject-token')
-
- # validates the returned token and it should be valid.
- self.head('/auth/tokens',
- headers={'x-subject-token': subject_token},
- expected_status=200)
-
- # now disable the domain
- self.domain['enabled'] = False
- url = "/domains/%(domain_id)s" % {'domain_id': self.domain['id']}
- self.patch(url,
- body={'domain': {'enabled': False}},
- expected_status=200)
-
- # validates the same token again and it should be 'not found'
- # as the domain has already been disabled.
- self.head('/auth/tokens',
- headers={'x-subject-token': subject_token},
- expected_status=http_client.NOT_FOUND)
-
- def test_delete_domain_hierarchy(self):
- """Call ``DELETE /domains/{domain_id}``."""
- domain = self.new_domain_ref()
- self.resource_api.create_domain(domain['id'], domain)
-
- root_project = self.new_project_ref(
- domain_id=domain['id'])
- self.resource_api.create_project(root_project['id'], root_project)
-
- leaf_project = self.new_project_ref(
- domain_id=domain['id'],
- parent_id=root_project['id'])
- self.resource_api.create_project(leaf_project['id'], leaf_project)
-
- # Need to disable it first.
- self.patch('/domains/%(domain_id)s' % {
- 'domain_id': domain['id']},
- body={'domain': {'enabled': False}})
-
- self.delete(
- '/domains/%(domain_id)s' % {
- 'domain_id': domain['id']})
-
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.get_domain,
- domain['id'])
-
- self.assertRaises(exception.ProjectNotFound,
- self.resource_api.get_project,
- root_project['id'])
-
- self.assertRaises(exception.ProjectNotFound,
- self.resource_api.get_project,
- leaf_project['id'])
-
- def test_forbid_operations_on_federated_domain(self):
- """Make sure one cannot operate on federated domain.
-
- This includes operations like create, update, delete
- on domain identified by id and name where difference variations of
- id 'Federated' are used.
-
- """
- def create_domains():
- for variation in ('Federated', 'FEDERATED',
- 'federated', 'fEderated'):
- domain = self.new_domain_ref()
- domain['id'] = variation
- yield domain
-
- for domain in create_domains():
- self.assertRaises(
- AssertionError, self.resource_api.create_domain,
- domain['id'], domain)
- self.assertRaises(
- AssertionError, self.resource_api.update_domain,
- domain['id'], domain)
- self.assertRaises(
- exception.DomainNotFound, self.resource_api.delete_domain,
- domain['id'])
-
- # swap 'name' with 'id' and try again, expecting the request to
- # gracefully fail
- domain['id'], domain['name'] = domain['name'], domain['id']
- self.assertRaises(
- AssertionError, self.resource_api.create_domain,
- domain['id'], domain)
- self.assertRaises(
- AssertionError, self.resource_api.update_domain,
- domain['id'], domain)
- self.assertRaises(
- exception.DomainNotFound, self.resource_api.delete_domain,
- domain['id'])
-
- def test_forbid_operations_on_defined_federated_domain(self):
- """Make sure one cannot operate on a user-defined federated domain.
-
- This includes operations like create, update, delete.
-
- """
-
- non_default_name = 'beta_federated_domain'
- self.config_fixture.config(group='federation',
- federated_domain_name=non_default_name)
- domain = self.new_domain_ref()
- domain['name'] = non_default_name
- self.assertRaises(AssertionError,
- self.resource_api.create_domain,
- domain['id'], domain)
- self.assertRaises(exception.DomainNotFound,
- self.resource_api.delete_domain,
- domain['id'])
- self.assertRaises(AssertionError,
- self.resource_api.update_domain,
- domain['id'], domain)
-
- # Project CRUD tests
-
- def test_list_projects(self):
- """Call ``GET /projects``."""
- resource_url = '/projects'
- r = self.get(resource_url)
- self.assertValidProjectListResponse(r, ref=self.project,
- resource_url=resource_url)
-
- def test_create_project(self):
- """Call ``POST /projects``."""
- ref = self.new_project_ref(domain_id=self.domain_id)
- r = self.post(
- '/projects',
- body={'project': ref})
- self.assertValidProjectResponse(r, ref)
-
- def test_create_project_bad_request(self):
- """Call ``POST /projects``."""
- self.post('/projects', body={'project': {}},
- expected_status=http_client.BAD_REQUEST)
-
- def test_create_project_invalid_domain_id(self):
- """Call ``POST /projects``."""
- ref = self.new_project_ref(domain_id=uuid.uuid4().hex)
- self.post('/projects', body={'project': ref},
- expected_status=http_client.BAD_REQUEST)
-
- def test_create_project_is_domain_not_allowed(self):
- """Call ``POST /projects``.
-
- Setting is_domain=True is not supported yet and should raise
- NotImplemented.
-
- """
- ref = self.new_project_ref(domain_id=self.domain_id, is_domain=True)
- self.post('/projects',
- body={'project': ref},
- expected_status=501)
-
- @utils.wip('waiting for projects acting as domains implementation')
- def test_create_project_without_parent_id_and_without_domain_id(self):
- """Call ``POST /projects``."""
-
- # Grant a domain role for the user
- collection_url = (
- '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
- 'domain_id': self.domain_id,
- 'user_id': self.user['id']})
- member_url = '%(collection_url)s/%(role_id)s' % {
- 'collection_url': collection_url,
- 'role_id': self.role_id}
- self.put(member_url)
-
- # Create an authentication request for a domain scoped token
- auth = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'],
- domain_id=self.domain_id)
-
- # Without domain_id and parent_id, the domain_id should be
- # normalized to the domain on the token, when using a domain
- # scoped token.
- ref = self.new_project_ref()
- r = self.post(
- '/projects',
- auth=auth,
- body={'project': ref})
- ref['domain_id'] = self.domain['id']
- self.assertValidProjectResponse(r, ref)
-
- @utils.wip('waiting for projects acting as domains implementation')
- def test_create_project_with_parent_id_and_no_domain_id(self):
- """Call ``POST /projects``."""
- # With only the parent_id, the domain_id should be
- # normalized to the parent's domain_id
- ref_child = self.new_project_ref(parent_id=self.project['id'])
-
- r = self.post(
- '/projects',
- body={'project': ref_child})
- self.assertEqual(r.result['project']['domain_id'],
- self.project['domain_id'])
- ref_child['domain_id'] = self.domain['id']
- self.assertValidProjectResponse(r, ref_child)
-
- def _create_projects_hierarchy(self, hierarchy_size=1):
- """Creates a single-branched project hierarchy with the specified size.
-
- :param hierarchy_size: the desired hierarchy size, default is 1 -
- a project with one child.
-
- :returns projects: a list of the projects in the created hierarchy.
-
- """
- new_ref = self.new_project_ref(domain_id=self.domain_id)
- resp = self.post('/projects', body={'project': new_ref})
-
- projects = [resp.result]
-
- for i in range(hierarchy_size):
- new_ref = self.new_project_ref(
- domain_id=self.domain_id,
- parent_id=projects[i]['project']['id'])
- resp = self.post('/projects',
- body={'project': new_ref})
- self.assertValidProjectResponse(resp, new_ref)
-
- projects.append(resp.result)
-
- return projects
-
- def test_list_projects_filtering_by_parent_id(self):
- """Call ``GET /projects?parent_id={project_id}``."""
- projects = self._create_projects_hierarchy(hierarchy_size=2)
-
- # Add another child to projects[1] - it will be projects[3]
- new_ref = self.new_project_ref(
- domain_id=self.domain_id,
- parent_id=projects[1]['project']['id'])
- resp = self.post('/projects',
- body={'project': new_ref})
- self.assertValidProjectResponse(resp, new_ref)
-
- projects.append(resp.result)
-
- # Query for projects[0] immediate children - it will
- # be only projects[1]
- r = self.get(
- '/projects?parent_id=%(project_id)s' % {
- 'project_id': projects[0]['project']['id']})
- self.assertValidProjectListResponse(r)
-
- projects_result = r.result['projects']
- expected_list = [projects[1]['project']]
-
- # projects[0] has projects[1] as child
- self.assertEqual(expected_list, projects_result)
-
- # Query for projects[1] immediate children - it will
- # be projects[2] and projects[3]
- r = self.get(
- '/projects?parent_id=%(project_id)s' % {
- 'project_id': projects[1]['project']['id']})
- self.assertValidProjectListResponse(r)
-
- projects_result = r.result['projects']
- expected_list = [projects[2]['project'], projects[3]['project']]
-
- # projects[1] has projects[2] and projects[3] as children
- self.assertEqual(expected_list, projects_result)
-
- # Query for projects[2] immediate children - it will be an empty list
- r = self.get(
- '/projects?parent_id=%(project_id)s' % {
- 'project_id': projects[2]['project']['id']})
- self.assertValidProjectListResponse(r)
-
- projects_result = r.result['projects']
- expected_list = []
-
- # projects[2] has no child, projects_result must be an empty list
- self.assertEqual(expected_list, projects_result)
-
- def test_create_hierarchical_project(self):
- """Call ``POST /projects``."""
- self._create_projects_hierarchy()
-
- def test_get_project(self):
- """Call ``GET /projects/{project_id}``."""
- r = self.get(
- '/projects/%(project_id)s' % {
- 'project_id': self.project_id})
- self.assertValidProjectResponse(r, self.project)
-
- def test_get_project_with_parents_as_list_with_invalid_id(self):
- """Call ``GET /projects/{project_id}?parents_as_list``."""
- self.get('/projects/%(project_id)s?parents_as_list' % {
- 'project_id': None}, expected_status=http_client.NOT_FOUND)
-
- self.get('/projects/%(project_id)s?parents_as_list' % {
- 'project_id': uuid.uuid4().hex},
- expected_status=http_client.NOT_FOUND)
-
- def test_get_project_with_subtree_as_list_with_invalid_id(self):
- """Call ``GET /projects/{project_id}?subtree_as_list``."""
- self.get('/projects/%(project_id)s?subtree_as_list' % {
- 'project_id': None}, expected_status=http_client.NOT_FOUND)
-
- self.get('/projects/%(project_id)s?subtree_as_list' % {
- 'project_id': uuid.uuid4().hex},
- expected_status=http_client.NOT_FOUND)
-
- def test_get_project_with_parents_as_ids(self):
- """Call ``GET /projects/{project_id}?parents_as_ids``."""
- projects = self._create_projects_hierarchy(hierarchy_size=2)
-
- # Query for projects[2] parents_as_ids
- r = self.get(
- '/projects/%(project_id)s?parents_as_ids' % {
- 'project_id': projects[2]['project']['id']})
-
- self.assertValidProjectResponse(r, projects[2]['project'])
- parents_as_ids = r.result['project']['parents']
-
- # Assert parents_as_ids is a structured dictionary correctly
- # representing the hierarchy. The request was made using projects[2]
- # id, hence its parents should be projects[1] and projects[0]. It
- # should have the following structure:
- # {
- # projects[1]: {
- # projects[0]: None
- # }
- # }
- expected_dict = {
- projects[1]['project']['id']: {
- projects[0]['project']['id']: None
- }
- }
- self.assertDictEqual(expected_dict, parents_as_ids)
-
- # Query for projects[0] parents_as_ids
- r = self.get(
- '/projects/%(project_id)s?parents_as_ids' % {
- 'project_id': projects[0]['project']['id']})
-
- self.assertValidProjectResponse(r, projects[0]['project'])
- parents_as_ids = r.result['project']['parents']
-
- # projects[0] has no parents, parents_as_ids must be None
- self.assertIsNone(parents_as_ids)
-
- def test_get_project_with_parents_as_list_with_full_access(self):
- """``GET /projects/{project_id}?parents_as_list`` with full access.
-
- Test plan:
-
- - Create 'parent', 'project' and 'subproject' projects;
- - Assign a user a role on each one of those projects;
- - Check that calling parents_as_list on 'subproject' returns both
- 'project' and 'parent'.
-
- """
-
- # Create the project hierarchy
- parent, project, subproject = self._create_projects_hierarchy(2)
-
- # Assign a role for the user on all the created projects
- for proj in (parent, project, subproject):
- self.put(self.build_role_assignment_link(
- role_id=self.role_id, user_id=self.user_id,
- project_id=proj['project']['id']))
-
- # Make the API call
- r = self.get('/projects/%(project_id)s?parents_as_list' %
- {'project_id': subproject['project']['id']})
- self.assertValidProjectResponse(r, subproject['project'])
-
- # Assert only 'project' and 'parent' are in the parents list
- self.assertIn(project, r.result['project']['parents'])
- self.assertIn(parent, r.result['project']['parents'])
- self.assertEqual(2, len(r.result['project']['parents']))
-
- def test_get_project_with_parents_as_list_with_partial_access(self):
- """``GET /projects/{project_id}?parents_as_list`` with partial access.
-
- Test plan:
-
- - Create 'parent', 'project' and 'subproject' projects;
- - Assign a user a role on 'parent' and 'subproject';
- - Check that calling parents_as_list on 'subproject' only returns
- 'parent'.
-
- """
-
- # Create the project hierarchy
- parent, project, subproject = self._create_projects_hierarchy(2)
-
- # Assign a role for the user on parent and subproject
- for proj in (parent, subproject):
- self.put(self.build_role_assignment_link(
- role_id=self.role_id, user_id=self.user_id,
- project_id=proj['project']['id']))
-
- # Make the API call
- r = self.get('/projects/%(project_id)s?parents_as_list' %
- {'project_id': subproject['project']['id']})
- self.assertValidProjectResponse(r, subproject['project'])
-
- # Assert only 'parent' is in the parents list
- self.assertIn(parent, r.result['project']['parents'])
- self.assertEqual(1, len(r.result['project']['parents']))
-
- def test_get_project_with_parents_as_list_and_parents_as_ids(self):
- """Call ``GET /projects/{project_id}?parents_as_list&parents_as_ids``.
-
- """
- projects = self._create_projects_hierarchy(hierarchy_size=2)
-
- self.get(
- '/projects/%(project_id)s?parents_as_list&parents_as_ids' % {
- 'project_id': projects[1]['project']['id']},
- expected_status=http_client.BAD_REQUEST)
-
- def test_get_project_with_subtree_as_ids(self):
- """Call ``GET /projects/{project_id}?subtree_as_ids``.
-
- This test creates a more complex hierarchy to test if the structured
- dictionary returned by using the ``subtree_as_ids`` query param
- correctly represents the hierarchy.
-
- The hierarchy contains 5 projects with the following structure::
-
- +--A--+
- | |
- +--B--+ C
- | |
- D E
-
-
- """
- projects = self._create_projects_hierarchy(hierarchy_size=2)
-
- # Add another child to projects[0] - it will be projects[3]
- new_ref = self.new_project_ref(
- domain_id=self.domain_id,
- parent_id=projects[0]['project']['id'])
- resp = self.post('/projects',
- body={'project': new_ref})
- self.assertValidProjectResponse(resp, new_ref)
- projects.append(resp.result)
-
- # Add another child to projects[1] - it will be projects[4]
- new_ref = self.new_project_ref(
- domain_id=self.domain_id,
- parent_id=projects[1]['project']['id'])
- resp = self.post('/projects',
- body={'project': new_ref})
- self.assertValidProjectResponse(resp, new_ref)
- projects.append(resp.result)
-
- # Query for projects[0] subtree_as_ids
- r = self.get(
- '/projects/%(project_id)s?subtree_as_ids' % {
- 'project_id': projects[0]['project']['id']})
- self.assertValidProjectResponse(r, projects[0]['project'])
- subtree_as_ids = r.result['project']['subtree']
-
- # The subtree hierarchy from projects[0] should have the following
- # structure:
- # {
- # projects[1]: {
- # projects[2]: None,
- # projects[4]: None
- # },
- # projects[3]: None
- # }
- expected_dict = {
- projects[1]['project']['id']: {
- projects[2]['project']['id']: None,
- projects[4]['project']['id']: None
- },
- projects[3]['project']['id']: None
- }
- self.assertDictEqual(expected_dict, subtree_as_ids)
-
- # Now query for projects[1] subtree_as_ids
- r = self.get(
- '/projects/%(project_id)s?subtree_as_ids' % {
- 'project_id': projects[1]['project']['id']})
- self.assertValidProjectResponse(r, projects[1]['project'])
- subtree_as_ids = r.result['project']['subtree']
-
- # The subtree hierarchy from projects[1] should have the following
- # structure:
- # {
- # projects[2]: None,
- # projects[4]: None
- # }
- expected_dict = {
- projects[2]['project']['id']: None,
- projects[4]['project']['id']: None
- }
- self.assertDictEqual(expected_dict, subtree_as_ids)
-
- # Now query for projects[3] subtree_as_ids
- r = self.get(
- '/projects/%(project_id)s?subtree_as_ids' % {
- 'project_id': projects[3]['project']['id']})
- self.assertValidProjectResponse(r, projects[3]['project'])
- subtree_as_ids = r.result['project']['subtree']
-
- # projects[3] has no subtree, subtree_as_ids must be None
- self.assertIsNone(subtree_as_ids)
-
- def test_get_project_with_subtree_as_list_with_full_access(self):
- """``GET /projects/{project_id}?subtree_as_list`` with full access.
-
- Test plan:
-
- - Create 'parent', 'project' and 'subproject' projects;
- - Assign a user a role on each one of those projects;
- - Check that calling subtree_as_list on 'parent' returns both 'parent'
- and 'subproject'.
-
- """
-
- # Create the project hierarchy
- parent, project, subproject = self._create_projects_hierarchy(2)
-
- # Assign a role for the user on all the created projects
- for proj in (parent, project, subproject):
- self.put(self.build_role_assignment_link(
- role_id=self.role_id, user_id=self.user_id,
- project_id=proj['project']['id']))
-
- # Make the API call
- r = self.get('/projects/%(project_id)s?subtree_as_list' %
- {'project_id': parent['project']['id']})
- self.assertValidProjectResponse(r, parent['project'])
-
- # Assert only 'project' and 'subproject' are in the subtree
- self.assertIn(project, r.result['project']['subtree'])
- self.assertIn(subproject, r.result['project']['subtree'])
- self.assertEqual(2, len(r.result['project']['subtree']))
-
- def test_get_project_with_subtree_as_list_with_partial_access(self):
- """``GET /projects/{project_id}?subtree_as_list`` with partial access.
-
- Test plan:
-
- - Create 'parent', 'project' and 'subproject' projects;
- - Assign a user a role on 'parent' and 'subproject';
- - Check that calling subtree_as_list on 'parent' returns 'subproject'.
-
- """
-
- # Create the project hierarchy
- parent, project, subproject = self._create_projects_hierarchy(2)
-
- # Assign a role for the user on parent and subproject
- for proj in (parent, subproject):
- self.put(self.build_role_assignment_link(
- role_id=self.role_id, user_id=self.user_id,
- project_id=proj['project']['id']))
-
- # Make the API call
- r = self.get('/projects/%(project_id)s?subtree_as_list' %
- {'project_id': parent['project']['id']})
- self.assertValidProjectResponse(r, parent['project'])
-
- # Assert only 'subproject' is in the subtree
- self.assertIn(subproject, r.result['project']['subtree'])
- self.assertEqual(1, len(r.result['project']['subtree']))
-
- def test_get_project_with_subtree_as_list_and_subtree_as_ids(self):
- """Call ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids``.
-
- """
- projects = self._create_projects_hierarchy(hierarchy_size=2)
-
- self.get(
- '/projects/%(project_id)s?subtree_as_list&subtree_as_ids' % {
- 'project_id': projects[1]['project']['id']},
- expected_status=http_client.BAD_REQUEST)
-
- def test_update_project(self):
- """Call ``PATCH /projects/{project_id}``."""
- ref = self.new_project_ref(domain_id=self.domain_id)
- del ref['id']
- r = self.patch(
- '/projects/%(project_id)s' % {
- 'project_id': self.project_id},
- body={'project': ref})
- self.assertValidProjectResponse(r, ref)
-
- def test_update_project_domain_id(self):
- """Call ``PATCH /projects/{project_id}`` with domain_id."""
- project = self.new_project_ref(domain_id=self.domain['id'])
- self.resource_api.create_project(project['id'], project)
- project['domain_id'] = CONF.identity.default_domain_id
- r = self.patch('/projects/%(project_id)s' % {
- 'project_id': project['id']},
- body={'project': project},
- expected_status=exception.ValidationError.code)
- self.config_fixture.config(domain_id_immutable=False)
- project['domain_id'] = self.domain['id']
- r = self.patch('/projects/%(project_id)s' % {
- 'project_id': project['id']},
- body={'project': project})
- self.assertValidProjectResponse(r, project)
-
- def test_update_project_parent_id(self):
- """Call ``PATCH /projects/{project_id}``."""
- projects = self._create_projects_hierarchy()
- leaf_project = projects[1]['project']
- leaf_project['parent_id'] = None
- self.patch(
- '/projects/%(project_id)s' % {
- 'project_id': leaf_project['id']},
- body={'project': leaf_project},
- expected_status=http_client.FORBIDDEN)
-
- def test_update_project_is_domain_not_allowed(self):
- """Call ``PATCH /projects/{project_id}`` with is_domain.
-
- The is_domain flag is immutable.
- """
- project = self.new_project_ref(domain_id=self.domain['id'])
- resp = self.post('/projects',
- body={'project': project})
- self.assertFalse(resp.result['project']['is_domain'])
-
- project['is_domain'] = True
- self.patch('/projects/%(project_id)s' % {
- 'project_id': resp.result['project']['id']},
- body={'project': project},
- expected_status=http_client.BAD_REQUEST)
-
- def test_disable_leaf_project(self):
- """Call ``PATCH /projects/{project_id}``."""
- projects = self._create_projects_hierarchy()
- leaf_project = projects[1]['project']
- leaf_project['enabled'] = False
- r = self.patch(
- '/projects/%(project_id)s' % {
- 'project_id': leaf_project['id']},
- body={'project': leaf_project})
- self.assertEqual(
- leaf_project['enabled'], r.result['project']['enabled'])
-
- def test_disable_not_leaf_project(self):
- """Call ``PATCH /projects/{project_id}``."""
- projects = self._create_projects_hierarchy()
- root_project = projects[0]['project']
- root_project['enabled'] = False
- self.patch(
- '/projects/%(project_id)s' % {
- 'project_id': root_project['id']},
- body={'project': root_project},
- expected_status=http_client.FORBIDDEN)
-
- def test_delete_project(self):
- """Call ``DELETE /projects/{project_id}``
-
- As well as making sure the delete succeeds, we ensure
- that any credentials that reference this projects are
- also deleted, while other credentials are unaffected.
-
- """
- # First check the credential for this project is present
- r = self.credential_api.get_credential(self.credential['id'])
- self.assertDictEqual(r, self.credential)
- # Create a second credential with a different project
- self.project2 = self.new_project_ref(
- domain_id=self.domain['id'])
- self.resource_api.create_project(self.project2['id'], self.project2)
- self.credential2 = self.new_credential_ref(
- user_id=self.user['id'],
- project_id=self.project2['id'])
- self.credential_api.create_credential(
- self.credential2['id'],
- self.credential2)
-
- # Now delete the project
- self.delete(
- '/projects/%(project_id)s' % {
- 'project_id': self.project_id})
-
- # Deleting the project should have deleted any credentials
- # that reference this project
- self.assertRaises(exception.CredentialNotFound,
- self.credential_api.get_credential,
- credential_id=self.credential['id'])
- # But the credential for project2 is unaffected
- r = self.credential_api.get_credential(self.credential2['id'])
- self.assertDictEqual(r, self.credential2)
-
- def test_delete_not_leaf_project(self):
- """Call ``DELETE /projects/{project_id}``."""
- projects = self._create_projects_hierarchy()
- self.delete(
- '/projects/%(project_id)s' % {
- 'project_id': projects[0]['project']['id']},
- expected_status=http_client.FORBIDDEN)
-
# Role CRUD tests
def test_create_role(self):
"""Call ``POST /roles``."""
- ref = self.new_role_ref()
+ ref = unit.new_role_ref()
r = self.post(
'/roles',
body={'role': ref})
@@ -1090,7 +66,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
def test_update_role(self):
"""Call ``PATCH /roles/{role_id}``."""
- ref = self.new_role_ref()
+ ref = unit.new_role_ref()
del ref['id']
r = self.patch('/roles/%(role_id)s' % {
'role_id': self.role_id},
@@ -1105,8 +81,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
def test_create_member_role(self):
"""Call ``POST /roles``."""
# specify only the name on creation
- ref = self.new_role_ref()
- ref['name'] = CONF.member_role_name
+ ref = unit.new_role_ref(name=CONF.member_role_name)
r = self.post(
'/roles',
body={'role': ref})
@@ -1118,35 +93,41 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
# Role Grants tests
def test_crud_user_project_role_grants(self):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+
collection_url = (
'/projects/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.project['id'],
'user_id': self.user['id']})
member_url = '%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
- 'role_id': self.role_id}
+ 'role_id': role['id']}
+
+ # There is a role assignment for self.user on self.project
+ r = self.get(collection_url)
+ self.assertValidRoleListResponse(r, ref=self.role,
+ expected_length=1)
self.put(member_url)
self.head(member_url)
r = self.get(collection_url)
- self.assertValidRoleListResponse(r, ref=self.role,
- resource_url=collection_url)
+ self.assertValidRoleListResponse(r, ref=role,
+ resource_url=collection_url,
+ expected_length=2)
- # FIXME(gyee): this test is no longer valid as user
- # have no role in the project. Can't get a scoped token
- # self.delete(member_url)
- # r = self.get(collection_url)
- # self.assertValidRoleListResponse(r, expected_length=0)
- # self.assertIn(collection_url, r.result['links']['self'])
+ self.delete(member_url)
+ r = self.get(collection_url)
+ self.assertValidRoleListResponse(r, ref=self.role, expected_length=1)
+ self.assertIn(collection_url, r.result['links']['self'])
def test_crud_user_project_role_grants_no_user(self):
- """Grant role on a project to a user that doesn't exist, 404 result.
+ """Grant role on a project to a user that doesn't exist.
When grant a role on a project to a user that doesn't exist, the server
returns Not Found for the user.
"""
-
user_id = uuid.uuid4().hex
collection_url = (
@@ -1179,13 +160,12 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
resource_url=collection_url)
def test_crud_user_domain_role_grants_no_user(self):
- """Grant role on a domain to a user that doesn't exist, 404 result.
+ """Grant role on a domain to a user that doesn't exist.
When grant a role on a domain to a user that doesn't exist, the server
returns 404 Not Found for the user.
"""
-
user_id = uuid.uuid4().hex
collection_url = (
@@ -1218,13 +198,12 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
resource_url=collection_url)
def test_crud_group_project_role_grants_no_group(self):
- """Grant role on a project to a group that doesn't exist, 404 result.
+ """Grant role on a project to a group that doesn't exist.
When grant a role on a project to a group that doesn't exist, the
server returns 404 Not Found for the group.
"""
-
group_id = uuid.uuid4().hex
collection_url = (
@@ -1258,13 +237,12 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
resource_url=collection_url)
def test_crud_group_domain_role_grants_no_group(self):
- """Grant role on a domain to a group that doesn't exist, 404 result.
+ """Grant role on a domain to a group that doesn't exist.
When grant a role on a domain to a group that doesn't exist, the server
returns 404 Not Found for the group.
"""
-
group_id = uuid.uuid4().hex
collection_url = (
@@ -1280,7 +258,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
def _create_new_user_and_assign_role_on_project(self):
"""Create a new user and assign user a role on a project."""
# Create a new user
- new_user = self.new_user_ref(domain_id=self.domain_id)
+ new_user = unit.new_user_ref(domain_id=self.domain_id)
user_ref = self.identity_api.create_user(new_user)
# Assign the user a role on the project
collection_url = (
@@ -1290,9 +268,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
member_url = ('%(collection_url)s/%(role_id)s' % {
'collection_url': collection_url,
'role_id': self.role_id})
- self.put(member_url, expected_status=204)
+ self.put(member_url)
# Check the user has the role assigned
- self.head(member_url, expected_status=204)
+ self.head(member_url)
return member_url, user_ref
def test_delete_user_before_removing_role_assignment_succeeds(self):
@@ -1301,7 +279,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
# Delete the user from identity backend
self.identity_api.driver.delete_user(user['id'])
# Clean up the role assignment
- self.delete(member_url, expected_status=204)
+ self.delete(member_url)
# Make sure the role is gone
self.head(member_url, expected_status=http_client.NOT_FOUND)
@@ -1310,8 +288,9 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
member_url, user = self._create_new_user_and_assign_role_on_project()
# Delete the user from identity backend
self.identity_api.delete_user(user['id'])
- # We should get a 404 when looking for the user in the identity
- # backend because we're not performing a delete operation on the role.
+ # We should get a 404 Not Found when looking for the user in the
+ # identity backend because we're not performing a delete operation on
+ # the role.
self.head(member_url, expected_status=http_client.NOT_FOUND)
def test_token_revoked_once_group_role_grant_revoked(self):
@@ -1344,7 +323,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
# validates the returned token; it should be valid.
self.head('/auth/tokens',
headers={'x-subject-token': token},
- expected_status=200)
+ expected_status=http_client.OK)
# revokes the grant from group on project.
self.assignment_api.delete_grant(role_id=self.role['id'],
@@ -1356,6 +335,126 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
headers={'x-subject-token': token},
expected_status=http_client.NOT_FOUND)
+ @unit.skip_if_cache_disabled('assignment')
+ def test_delete_grant_from_user_and_project_invalidate_cache(self):
+ # create a new project
+ new_project = unit.new_project_ref(domain_id=self.domain_id)
+ self.resource_api.create_project(new_project['id'], new_project)
+
+ collection_url = (
+ '/projects/%(project_id)s/users/%(user_id)s/roles' % {
+ 'project_id': new_project['id'],
+ 'user_id': self.user['id']})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ # create the user a grant on the new project
+ self.put(member_url)
+
+ # check the grant that was just created
+ self.head(member_url)
+ resp = self.get(collection_url)
+ self.assertValidRoleListResponse(resp, ref=self.role,
+ resource_url=collection_url)
+
+ # delete the grant
+ self.delete(member_url)
+
+ # get the collection and ensure there are no roles on the project
+ resp = self.get(collection_url)
+ self.assertListEqual(resp.json_body['roles'], [])
+
+ @unit.skip_if_cache_disabled('assignment')
+ def test_delete_grant_from_user_and_domain_invalidates_cache(self):
+ # create a new domain
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+
+ collection_url = (
+ '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+ 'domain_id': new_domain['id'],
+ 'user_id': self.user['id']})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ # create the user a grant on the new domain
+ self.put(member_url)
+
+ # check the grant that was just created
+ self.head(member_url)
+ resp = self.get(collection_url)
+ self.assertValidRoleListResponse(resp, ref=self.role,
+ resource_url=collection_url)
+
+ # delete the grant
+ self.delete(member_url)
+
+ # get the collection and ensure there are no roles on the domain
+ resp = self.get(collection_url)
+ self.assertListEqual(resp.json_body['roles'], [])
+
+ @unit.skip_if_cache_disabled('assignment')
+ def test_delete_grant_from_group_and_project_invalidates_cache(self):
+ # create a new project
+ new_project = unit.new_project_ref(domain_id=self.domain_id)
+ self.resource_api.create_project(new_project['id'], new_project)
+
+ collection_url = (
+ '/projects/%(project_id)s/groups/%(group_id)s/roles' % {
+ 'project_id': new_project['id'],
+ 'group_id': self.group['id']})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ # create the group a grant on the new project
+ self.put(member_url)
+
+ # check the grant that was just created
+ self.head(member_url)
+ resp = self.get(collection_url)
+ self.assertValidRoleListResponse(resp, ref=self.role,
+ resource_url=collection_url)
+
+ # delete the grant
+ self.delete(member_url)
+
+ # get the collection and ensure there are no roles on the project
+ resp = self.get(collection_url)
+ self.assertListEqual(resp.json_body['roles'], [])
+
+ @unit.skip_if_cache_disabled('assignment')
+ def test_delete_grant_from_group_and_domain_invalidates_cache(self):
+ # create a new domain
+ new_domain = unit.new_domain_ref()
+ self.resource_api.create_domain(new_domain['id'], new_domain)
+
+ collection_url = (
+ '/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
+ 'domain_id': new_domain['id'],
+ 'group_id': self.group['id']})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ # create the group a grant on the new domain
+ self.put(member_url)
+
+ # check the grant that was just created
+ self.head(member_url)
+ resp = self.get(collection_url)
+ self.assertValidRoleListResponse(resp, ref=self.role,
+ resource_url=collection_url)
+
+ # delete the grant
+ self.delete(member_url)
+
+ # get the collection and ensure there are no roles on the domain
+ resp = self.get(collection_url)
+ self.assertListEqual(resp.json_body['roles'], [])
+
# Role Assignments tests
def test_get_role_assignments(self):
@@ -1384,13 +483,11 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
been removed
"""
-
# Since the default fixtures already assign some roles to the
# user it creates, we also need a new user that will not have any
# existing assignments
- self.user1 = self.new_user_ref(
- domain_id=self.domain['id'])
- self.user1 = self.identity_api.create_user(self.user1)
+ user1 = unit.new_user_ref(domain_id=self.domain['id'])
+ user1 = self.identity_api.create_user(user1)
collection_url = '/role_assignments'
r = self.get(collection_url)
@@ -1412,7 +509,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
self.assertRoleAssignmentInListResponse(r, gd_entity)
ud_entity = self.build_role_assignment_entity(domain_id=self.domain_id,
- user_id=self.user1['id'],
+ user_id=user1['id'],
role_id=self.role_id)
self.put(ud_entity['links']['assignment'])
r = self.get(collection_url)
@@ -1434,7 +531,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
self.assertRoleAssignmentInListResponse(r, gp_entity)
up_entity = self.build_role_assignment_entity(
- project_id=self.project_id, user_id=self.user1['id'],
+ project_id=self.project_id, user_id=user1['id'],
role_id=self.role_id)
self.put(up_entity['links']['assignment'])
r = self.get(collection_url)
@@ -1475,18 +572,13 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
for each of the group members.
"""
- self.user1 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user1['password']
- self.user1 = self.identity_api.create_user(self.user1)
- self.user1['password'] = password
- self.user2 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user2['password']
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
- self.identity_api.add_user_to_group(self.user1['id'], self.group['id'])
- self.identity_api.add_user_to_group(self.user2['id'], self.group['id'])
+ user1 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+ user2 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+
+ self.identity_api.add_user_to_group(user1['id'], self.group['id'])
+ self.identity_api.add_user_to_group(user2['id'], self.group['id'])
collection_url = '/role_assignments'
r = self.get(collection_url)
@@ -1516,11 +608,11 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
resource_url=collection_url)
ud_entity = self.build_role_assignment_entity(
link=gd_entity['links']['assignment'], domain_id=self.domain_id,
- user_id=self.user1['id'], role_id=self.role_id)
+ user_id=user1['id'], role_id=self.role_id)
self.assertRoleAssignmentInListResponse(r, ud_entity)
ud_entity = self.build_role_assignment_entity(
link=gd_entity['links']['assignment'], domain_id=self.domain_id,
- user_id=self.user2['id'], role_id=self.role_id)
+ user_id=user2['id'], role_id=self.role_id)
self.assertRoleAssignmentInListResponse(r, ud_entity)
def test_check_effective_values_for_role_assignments(self):
@@ -1549,18 +641,13 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
know if we are getting effective roles or not
"""
- self.user1 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user1['password']
- self.user1 = self.identity_api.create_user(self.user1)
- self.user1['password'] = password
- self.user2 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user2['password']
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
- self.identity_api.add_user_to_group(self.user1['id'], self.group['id'])
- self.identity_api.add_user_to_group(self.user2['id'], self.group['id'])
+ user1 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+ user2 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+
+ self.identity_api.add_user_to_group(user1['id'], self.group['id'])
+ self.identity_api.add_user_to_group(user2['id'], self.group['id'])
collection_url = '/role_assignments'
r = self.get(collection_url)
@@ -1633,61 +720,53 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
token (all effective roles for a user on a project)
"""
-
# Since the default fixtures already assign some roles to the
# user it creates, we also need a new user that will not have any
# existing assignments
- self.user1 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user1['password']
- self.user1 = self.identity_api.create_user(self.user1)
- self.user1['password'] = password
- self.user2 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user2['password']
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
- self.group1 = self.new_group_ref(
- domain_id=self.domain['id'])
- self.group1 = self.identity_api.create_group(self.group1)
- self.identity_api.add_user_to_group(self.user1['id'],
- self.group1['id'])
- self.identity_api.add_user_to_group(self.user2['id'],
- self.group1['id'])
- self.project1 = self.new_project_ref(
- domain_id=self.domain['id'])
- self.resource_api.create_project(self.project1['id'], self.project1)
- self.role1 = self.new_role_ref()
+ user1 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+ user2 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+
+ group1 = unit.new_group_ref(domain_id=self.domain['id'])
+ group1 = self.identity_api.create_group(group1)
+ self.identity_api.add_user_to_group(user1['id'], group1['id'])
+ self.identity_api.add_user_to_group(user2['id'], group1['id'])
+ project1 = unit.new_project_ref(domain_id=self.domain['id'])
+ self.resource_api.create_project(project1['id'], project1)
+ self.role1 = unit.new_role_ref()
self.role_api.create_role(self.role1['id'], self.role1)
- self.role2 = self.new_role_ref()
+ self.role2 = unit.new_role_ref()
self.role_api.create_role(self.role2['id'], self.role2)
# Now add one of each of the four types of assignment
gd_entity = self.build_role_assignment_entity(
- domain_id=self.domain_id, group_id=self.group1['id'],
+ domain_id=self.domain_id, group_id=group1['id'],
role_id=self.role1['id'])
self.put(gd_entity['links']['assignment'])
ud_entity = self.build_role_assignment_entity(domain_id=self.domain_id,
- user_id=self.user1['id'],
+ user_id=user1['id'],
role_id=self.role2['id'])
self.put(ud_entity['links']['assignment'])
gp_entity = self.build_role_assignment_entity(
- project_id=self.project1['id'], group_id=self.group1['id'],
+ project_id=project1['id'],
+ group_id=group1['id'],
role_id=self.role1['id'])
self.put(gp_entity['links']['assignment'])
up_entity = self.build_role_assignment_entity(
- project_id=self.project1['id'], user_id=self.user1['id'],
+ project_id=project1['id'],
+ user_id=user1['id'],
role_id=self.role2['id'])
self.put(up_entity['links']['assignment'])
# Now list by various filters to make sure we get back the right ones
collection_url = ('/role_assignments?scope.project.id=%s' %
- self.project1['id'])
+ project1['id'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
@@ -1704,7 +783,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
self.assertRoleAssignmentInListResponse(r, ud_entity)
self.assertRoleAssignmentInListResponse(r, gd_entity)
- collection_url = '/role_assignments?user.id=%s' % self.user1['id']
+ collection_url = '/role_assignments?user.id=%s' % user1['id']
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
@@ -1712,7 +791,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
self.assertRoleAssignmentInListResponse(r, up_entity)
self.assertRoleAssignmentInListResponse(r, ud_entity)
- collection_url = '/role_assignments?group.id=%s' % self.group1['id']
+ collection_url = '/role_assignments?group.id=%s' % group1['id']
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
@@ -1733,8 +812,8 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
collection_url = (
'/role_assignments?user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
- 'user_id': self.user1['id'],
- 'project_id': self.project1['id']})
+ 'user_id': user1['id'],
+ 'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=1,
@@ -1746,7 +825,7 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
# assigned as well as by virtue of group membership
collection_url = ('/role_assignments?effective&user.id=%s' %
- self.user1['id'])
+ user1['id'])
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=4,
@@ -1756,17 +835,18 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
self.assertRoleAssignmentInListResponse(r, ud_entity)
# ...and the two via group membership...
gp1_link = self.build_role_assignment_link(
- project_id=self.project1['id'], group_id=self.group1['id'],
+ project_id=project1['id'],
+ group_id=group1['id'],
role_id=self.role1['id'])
gd1_link = self.build_role_assignment_link(domain_id=self.domain_id,
- group_id=self.group1['id'],
+ group_id=group1['id'],
role_id=self.role1['id'])
up1_entity = self.build_role_assignment_entity(
- link=gp1_link, project_id=self.project1['id'],
- user_id=self.user1['id'], role_id=self.role1['id'])
+ link=gp1_link, project_id=project1['id'],
+ user_id=user1['id'], role_id=self.role1['id'])
ud1_entity = self.build_role_assignment_entity(
- link=gd1_link, domain_id=self.domain_id, user_id=self.user1['id'],
+ link=gd1_link, domain_id=self.domain_id, user_id=user1['id'],
role_id=self.role1['id'])
self.assertRoleAssignmentInListResponse(r, up1_entity)
self.assertRoleAssignmentInListResponse(r, ud1_entity)
@@ -1778,8 +858,8 @@ class AssignmentTestCase(test_v3.RestfulTestCase,
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
- 'user_id': self.user1['id'],
- 'project_id': self.project1['id']})
+ 'user_id': user1['id'],
+ 'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r,
expected_length=2,
@@ -1804,7 +884,7 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
"""
def create_project_hierarchy(parent_id, depth):
- "Creates a random project hierarchy."
+ """Creates a random project hierarchy."""
if depth == 0:
return
@@ -1812,7 +892,7 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
subprojects = []
for i in range(breadth):
- subprojects.append(self.new_project_ref(
+ subprojects.append(unit.new_project_ref(
domain_id=self.domain_id, parent_id=parent_id))
self.resource_api.create_project(subprojects[-1]['id'],
subprojects[-1])
@@ -1823,12 +903,12 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
super(RoleAssignmentBaseTestCase, self).load_sample_data()
# Create a domain
- self.domain = self.new_domain_ref()
+ self.domain = unit.new_domain_ref()
self.domain_id = self.domain['id']
self.resource_api.create_domain(self.domain_id, self.domain)
# Create a project hierarchy
- self.project = self.new_project_ref(domain_id=self.domain_id)
+ self.project = unit.new_project_ref(domain_id=self.domain_id)
self.project_id = self.project['id']
self.resource_api.create_project(self.project_id, self.project)
@@ -1839,14 +919,14 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
# Create 3 users
self.user_ids = []
for i in range(3):
- user = self.new_user_ref(domain_id=self.domain_id)
+ user = unit.new_user_ref(domain_id=self.domain_id)
user = self.identity_api.create_user(user)
self.user_ids.append(user['id'])
# Create 3 groups
self.group_ids = []
for i in range(3):
- group = self.new_group_ref(domain_id=self.domain_id)
+ group = unit.new_group_ref(domain_id=self.domain_id)
group = self.identity_api.create_group(group)
self.group_ids.append(group['id'])
@@ -1861,7 +941,7 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
role_id=self.role_id)
# Create a role
- self.role = self.new_role_ref()
+ self.role = unit.new_role_ref()
self.role_id = self.role['id']
self.role_api.create_role(self.role_id, self.role)
@@ -1869,7 +949,7 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
self.default_user_id = self.user_ids[0]
self.default_group_id = self.group_ids[0]
- def get_role_assignments(self, expected_status=200, **filters):
+ def get_role_assignments(self, expected_status=http_client.OK, **filters):
"""Returns the result from querying role assignment API + queried URL.
Calls GET /v3/role_assignments?<params> and returns its result, where
@@ -1880,7 +960,6 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
queried URL.
"""
-
query_url = self._get_role_assignments_query_url(**filters)
response = self.get(query_url, expected_status=expected_status)
@@ -1903,11 +982,11 @@ class RoleAssignmentBaseTestCase(test_v3.RestfulTestCase,
class RoleAssignmentFailureTestCase(RoleAssignmentBaseTestCase):
"""Class for testing invalid query params on /v3/role_assignments API.
- Querying domain and project, or user and group results in a HTTP 400, since
- a role assignment must contain only a single pair of (actor, target). In
- addition, since filtering on role assignments applies only to the final
- result, effective mode cannot be combined with i) group or ii) domain and
- inherited, because it would always result in an empty list.
+ Querying domain and project, or user and group results in a HTTP 400 Bad
+ Request, since a role assignment must contain only a single pair of (actor,
+ target). In addition, since filtering on role assignments applies only to
+ the final result, effective mode cannot be combined with i) group or ii)
+ domain and inherited, because it would always result in an empty list.
"""
@@ -1959,7 +1038,6 @@ class RoleAssignmentDirectTestCase(RoleAssignmentBaseTestCase):
group_id, user_id and inherited_to_projects.
"""
-
# Fills default assignment with provided filters
test_assignment = self._set_default_assignment_attributes(**filters)
@@ -2188,10 +1266,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
def test_get_token_from_inherited_user_domain_role_grants(self):
# Create a new user to ensure that no grant is loaded from sample data
- user = self.new_user_ref(domain_id=self.domain_id)
- password = user['password']
- user = self.identity_api.create_user(user)
- user['password'] = password
+ user = unit.create_user(self.identity_api, domain_id=self.domain_id)
# Define domain and project authentication data
domain_auth_data = self.build_authentication_request(
@@ -2204,10 +1279,10 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
project_id=self.project_id)
# Check the user cannot get a domain nor a project token
- self.v3_authenticate_token(domain_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Grant non-inherited role for user on domain
non_inher_ud_link = self.build_role_assignment_link(
@@ -2215,12 +1290,12 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(non_inher_ud_link)
# Check the user can get only a domain token
- self.v3_authenticate_token(domain_auth_data)
- self.v3_authenticate_token(project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data)
+ self.v3_create_token(project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Create inherited role
- inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
+ inherited_role = unit.new_role_ref(name='inherited')
self.role_api.create_role(inherited_role['id'], inherited_role)
# Grant inherited role for user on domain
@@ -2230,33 +1305,30 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(inher_ud_link)
# Check the user can get both a domain and a project token
- self.v3_authenticate_token(domain_auth_data)
- self.v3_authenticate_token(project_auth_data)
+ self.v3_create_token(domain_auth_data)
+ self.v3_create_token(project_auth_data)
# Delete inherited grant
self.delete(inher_ud_link)
# Check the user can only get a domain token
- self.v3_authenticate_token(domain_auth_data)
- self.v3_authenticate_token(project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data)
+ self.v3_create_token(project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Delete non-inherited grant
self.delete(non_inher_ud_link)
# Check the user cannot get a domain token anymore
- self.v3_authenticate_token(domain_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_get_token_from_inherited_group_domain_role_grants(self):
# Create a new group and put a new user in it to
# ensure that no grant is loaded from sample data
- user = self.new_user_ref(domain_id=self.domain_id)
- password = user['password']
- user = self.identity_api.create_user(user)
- user['password'] = password
+ user = unit.create_user(self.identity_api, domain_id=self.domain_id)
- group = self.new_group_ref(domain_id=self.domain['id'])
+ group = unit.new_group_ref(domain_id=self.domain['id'])
group = self.identity_api.create_group(group)
self.identity_api.add_user_to_group(user['id'], group['id'])
@@ -2271,10 +1343,10 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
project_id=self.project_id)
# Check the user cannot get a domain nor a project token
- self.v3_authenticate_token(domain_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Grant non-inherited role for user on domain
non_inher_gd_link = self.build_role_assignment_link(
@@ -2282,12 +1354,12 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(non_inher_gd_link)
# Check the user can get only a domain token
- self.v3_authenticate_token(domain_auth_data)
- self.v3_authenticate_token(project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data)
+ self.v3_create_token(project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Create inherited role
- inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
+ inherited_role = unit.new_role_ref(name='inherited')
self.role_api.create_role(inherited_role['id'], inherited_role)
# Grant inherited role for user on domain
@@ -2297,27 +1369,27 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(inher_gd_link)
# Check the user can get both a domain and a project token
- self.v3_authenticate_token(domain_auth_data)
- self.v3_authenticate_token(project_auth_data)
+ self.v3_create_token(domain_auth_data)
+ self.v3_create_token(project_auth_data)
# Delete inherited grant
self.delete(inher_gd_link)
# Check the user can only get a domain token
- self.v3_authenticate_token(domain_auth_data)
- self.v3_authenticate_token(project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data)
+ self.v3_create_token(project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Delete non-inherited grant
self.delete(non_inher_gd_link)
# Check the user cannot get a domain token anymore
- self.v3_authenticate_token(domain_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(domain_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def _test_crud_inherited_and_direct_assignment_on_target(self, target_url):
# Create a new role to avoid assignments loaded from sample data
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# Define URLs
@@ -2360,7 +1432,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
def test_crud_user_inherited_domain_role_grants(self):
role_list = []
for _ in range(2):
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
role_list.append(role)
@@ -2409,22 +1481,16 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
"""
role_list = []
for _ in range(4):
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
role_list.append(role)
- domain = self.new_domain_ref()
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user1 = self.new_user_ref(
- domain_id=domain['id'])
- password = user1['password']
- user1 = self.identity_api.create_user(user1)
- user1['password'] = password
- project1 = self.new_project_ref(
- domain_id=domain['id'])
+ user1 = unit.create_user(self.identity_api, domain_id=domain['id'])
+ project1 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project1['id'], project1)
- project2 = self.new_project_ref(
- domain_id=domain['id'])
+ project2 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project2['id'], project2)
# Add some roles to the project
self.assignment_api.add_role_to_user_and_project(
@@ -2490,6 +1556,98 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
inherited_to_projects=True)
self.assertRoleAssignmentInListResponse(r, up_entity)
+ def test_list_role_assignments_include_names(self):
+ """Call ``GET /role_assignments with include names``.
+
+ Test Plan:
+
+ - Create a domain with a group and a user
+ - Create a project with a group and a user
+
+ """
+ role1 = unit.new_role_ref()
+ self.role_api.create_role(role1['id'], role1)
+ user1 = unit.create_user(self.identity_api, domain_id=self.domain_id)
+ group = unit.new_group_ref(domain_id=self.domain_id)
+ group = self.identity_api.create_group(group)
+ project1 = unit.new_project_ref(domain_id=self.domain_id)
+ self.resource_api.create_project(project1['id'], project1)
+
+ expected_entity1 = self.build_role_assignment_entity_include_names(
+ role_ref=role1,
+ project_ref=project1,
+ user_ref=user1)
+ self.put(expected_entity1['links']['assignment'])
+ expected_entity2 = self.build_role_assignment_entity_include_names(
+ role_ref=role1,
+ domain_ref=self.domain,
+ group_ref=group)
+ self.put(expected_entity2['links']['assignment'])
+ expected_entity3 = self.build_role_assignment_entity_include_names(
+ role_ref=role1,
+ domain_ref=self.domain,
+ user_ref=user1)
+ self.put(expected_entity3['links']['assignment'])
+ expected_entity4 = self.build_role_assignment_entity_include_names(
+ role_ref=role1,
+ project_ref=project1,
+ group_ref=group)
+ self.put(expected_entity4['links']['assignment'])
+
+ collection_url_domain = (
+ '/role_assignments?include_names&scope.domain.id=%(domain_id)s' % {
+ 'domain_id': self.domain_id})
+ rs_domain = self.get(collection_url_domain)
+ collection_url_project = (
+ '/role_assignments?include_names&'
+ 'scope.project.id=%(project_id)s' % {
+ 'project_id': project1['id']})
+ rs_project = self.get(collection_url_project)
+ collection_url_group = (
+ '/role_assignments?include_names&group.id=%(group_id)s' % {
+ 'group_id': group['id']})
+ rs_group = self.get(collection_url_group)
+ collection_url_user = (
+ '/role_assignments?include_names&user.id=%(user_id)s' % {
+ 'user_id': user1['id']})
+ rs_user = self.get(collection_url_user)
+ collection_url_role = (
+ '/role_assignments?include_names&role.id=%(role_id)s' % {
+ 'role_id': role1['id']})
+ rs_role = self.get(collection_url_role)
+ # Make sure all entities were created successfully
+ self.assertEqual(rs_domain.status_int, http_client.OK)
+ self.assertEqual(rs_project.status_int, http_client.OK)
+ self.assertEqual(rs_group.status_int, http_client.OK)
+ self.assertEqual(rs_user.status_int, http_client.OK)
+ # Make sure we can get back the correct number of entities
+ self.assertValidRoleAssignmentListResponse(
+ rs_domain,
+ expected_length=2,
+ resource_url=collection_url_domain)
+ self.assertValidRoleAssignmentListResponse(
+ rs_project,
+ expected_length=2,
+ resource_url=collection_url_project)
+ self.assertValidRoleAssignmentListResponse(
+ rs_group,
+ expected_length=2,
+ resource_url=collection_url_group)
+ self.assertValidRoleAssignmentListResponse(
+ rs_user,
+ expected_length=2,
+ resource_url=collection_url_user)
+ self.assertValidRoleAssignmentListResponse(
+ rs_role,
+ expected_length=4,
+ resource_url=collection_url_role)
+ # Verify all types of entities have the correct format
+ self.assertRoleAssignmentInListResponse(rs_domain, expected_entity2)
+ self.assertRoleAssignmentInListResponse(rs_project, expected_entity1)
+ self.assertRoleAssignmentInListResponse(rs_group, expected_entity4)
+ self.assertRoleAssignmentInListResponse(rs_user, expected_entity3)
+ self.assertRoleAssignmentInListResponse(rs_role, expected_entity1)
+
def test_list_role_assignments_for_disabled_inheritance_extension(self):
"""Call ``GET /role_assignments with inherited domain grants``.
@@ -2503,25 +1661,18 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
shows up.
"""
-
role_list = []
for _ in range(4):
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
role_list.append(role)
- domain = self.new_domain_ref()
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user1 = self.new_user_ref(
- domain_id=domain['id'])
- password = user1['password']
- user1 = self.identity_api.create_user(user1)
- user1['password'] = password
- project1 = self.new_project_ref(
- domain_id=domain['id'])
+ user1 = unit.create_user(self.identity_api, domain_id=domain['id'])
+ project1 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project1['id'], project1)
- project2 = self.new_project_ref(
- domain_id=domain['id'])
+ project2 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project2['id'], project2)
# Add some roles to the project
self.assignment_api.add_role_to_user_and_project(
@@ -2598,34 +1749,23 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
"""
role_list = []
for _ in range(4):
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
role_list.append(role)
- domain = self.new_domain_ref()
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user1 = self.new_user_ref(
- domain_id=domain['id'])
- password = user1['password']
- user1 = self.identity_api.create_user(user1)
- user1['password'] = password
- user2 = self.new_user_ref(
- domain_id=domain['id'])
- password = user2['password']
- user2 = self.identity_api.create_user(user2)
- user2['password'] = password
- group1 = self.new_group_ref(
- domain_id=domain['id'])
+ user1 = unit.create_user(self.identity_api, domain_id=domain['id'])
+ user2 = unit.create_user(self.identity_api, domain_id=domain['id'])
+ group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
self.identity_api.add_user_to_group(user1['id'],
group1['id'])
self.identity_api.add_user_to_group(user2['id'],
group1['id'])
- project1 = self.new_project_ref(
- domain_id=domain['id'])
+ project1 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project1['id'], project1)
- project2 = self.new_project_ref(
- domain_id=domain['id'])
+ project2 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project2['id'], project2)
# Add some roles to the project
self.assignment_api.add_role_to_user_and_project(
@@ -2704,25 +1844,18 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
"""
role_list = []
for _ in range(5):
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
role_list.append(role)
- domain = self.new_domain_ref()
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user1 = self.new_user_ref(
- domain_id=domain['id'])
- password = user1['password']
- user1 = self.identity_api.create_user(user1)
- user1['password'] = password
- group1 = self.new_group_ref(
- domain_id=domain['id'])
+ user1 = unit.create_user(self.identity_api, domain_id=domain['id'])
+ group1 = unit.new_group_ref(domain_id=domain['id'])
group1 = self.identity_api.create_group(group1)
- project1 = self.new_project_ref(
- domain_id=domain['id'])
+ project1 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project1['id'], project1)
- project2 = self.new_project_ref(
- domain_id=domain['id'])
+ project2 = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project2['id'], project2)
# Add some spoiler roles to the projects
self.assignment_api.add_role_to_user_and_project(
@@ -2790,17 +1923,17 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
"""
# Create project hierarchy
- root = self.new_project_ref(domain_id=self.domain['id'])
- leaf = self.new_project_ref(domain_id=self.domain['id'],
+ root = unit.new_project_ref(domain_id=self.domain['id'])
+ leaf = unit.new_project_ref(domain_id=self.domain['id'],
parent_id=root['id'])
self.resource_api.create_project(root['id'], root)
self.resource_api.create_project(leaf['id'], leaf)
# Create 'non-inherited' and 'inherited' roles
- non_inherited_role = {'id': uuid.uuid4().hex, 'name': 'non-inherited'}
+ non_inherited_role = unit.new_role_ref(name='non-inherited')
self.role_api.create_role(non_inherited_role['id'], non_inherited_role)
- inherited_role = {'id': uuid.uuid4().hex, 'name': 'inherited'}
+ inherited_role = unit.new_role_ref(name='inherited')
self.role_api.create_role(inherited_role['id'], inherited_role)
return (root['id'], leaf['id'],
@@ -2822,10 +1955,10 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
project_id=leaf_id)
# Check the user cannot get a token on root nor leaf project
- self.v3_authenticate_token(root_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(leaf_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(root_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Grant non-inherited role for user on leaf project
non_inher_up_link = self.build_role_assignment_link(
@@ -2834,9 +1967,9 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(non_inher_up_link)
# Check the user can only get a token on leaf project
- self.v3_authenticate_token(root_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(leaf_project_auth_data)
+ self.v3_create_token(root_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data)
# Grant inherited role for user on root project
inher_up_link = self.build_role_assignment_link(
@@ -2845,24 +1978,24 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(inher_up_link)
# Check the user still can get a token only on leaf project
- self.v3_authenticate_token(root_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(leaf_project_auth_data)
+ self.v3_create_token(root_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data)
# Delete non-inherited grant
self.delete(non_inher_up_link)
# Check the inherited role still applies for leaf project
- self.v3_authenticate_token(root_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(leaf_project_auth_data)
+ self.v3_create_token(root_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data)
# Delete inherited grant
self.delete(inher_up_link)
# Check the user cannot get a token on leaf project anymore
- self.v3_authenticate_token(leaf_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_get_token_from_inherited_group_project_role_grants(self):
# Create default scenario
@@ -2870,7 +2003,7 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self._setup_hierarchical_projects_scenario())
# Create group and add user to it
- group = self.new_group_ref(domain_id=self.domain['id'])
+ group = unit.new_group_ref(domain_id=self.domain['id'])
group = self.identity_api.create_group(group)
self.identity_api.add_user_to_group(self.user['id'], group['id'])
@@ -2885,10 +2018,10 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
project_id=leaf_id)
# Check the user cannot get a token on root nor leaf project
- self.v3_authenticate_token(root_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(leaf_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(root_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# Grant non-inherited role for group on leaf project
non_inher_gp_link = self.build_role_assignment_link(
@@ -2897,9 +2030,9 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(non_inher_gp_link)
# Check the user can only get a token on leaf project
- self.v3_authenticate_token(root_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(leaf_project_auth_data)
+ self.v3_create_token(root_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data)
# Grant inherited role for group on root project
inher_gp_link = self.build_role_assignment_link(
@@ -2908,22 +2041,22 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
self.put(inher_gp_link)
# Check the user still can get a token only on leaf project
- self.v3_authenticate_token(root_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
- self.v3_authenticate_token(leaf_project_auth_data)
+ self.v3_create_token(root_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data)
# Delete no-inherited grant
self.delete(non_inher_gp_link)
# Check the inherited role still applies for leaf project
- self.v3_authenticate_token(leaf_project_auth_data)
+ self.v3_create_token(leaf_project_auth_data)
# Delete inherited grant
self.delete(inher_gp_link)
# Check the user cannot get a token on leaf project anymore
- self.v3_authenticate_token(leaf_project_auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(leaf_project_auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_get_role_assignments_for_project_hierarchy(self):
"""Call ``GET /role_assignments``.
@@ -3028,6 +2161,154 @@ class AssignmentInheritanceTestCase(test_v3.RestfulTestCase,
inher_up_entity['scope']['project']['id'] = leaf_id
self.assertRoleAssignmentInListResponse(r, inher_up_entity)
+ def test_project_id_specified_if_include_subtree_specified(self):
+ """When using include_subtree, you must specify a project ID."""
+ self.get('/role_assignments?include_subtree=True',
+ expected_status=http_client.BAD_REQUEST)
+ self.get('/role_assignments?scope.project.id&'
+ 'include_subtree=True',
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_get_role_assignments_for_project_tree(self):
+ """Get role_assignment?scope.project.id=X?include_subtree``.
+
+ Test Plan:
+
+ - Create 2 roles and a hierarchy of projects with one root and one leaf
+ - Issue the URL to add a non-inherited user role to the root project
+ and the leaf project
+ - Issue the URL to get role assignments for the root project but
+ not the subtree - this should return just the root assignment
+ - Issue the URL to get role assignments for the root project and
+ it's subtree - this should return both assignments
+ - Check that explicitly setting include_subtree to False is the
+ equivalent to not including it at all in the query.
+
+ """
+ # Create default scenario
+ root_id, leaf_id, non_inherited_role_id, unused_role_id = (
+ self._setup_hierarchical_projects_scenario())
+
+ # Grant non-inherited role to root and leaf projects
+ non_inher_entity_root = self.build_role_assignment_entity(
+ project_id=root_id, user_id=self.user['id'],
+ role_id=non_inherited_role_id)
+ self.put(non_inher_entity_root['links']['assignment'])
+ non_inher_entity_leaf = self.build_role_assignment_entity(
+ project_id=leaf_id, user_id=self.user['id'],
+ role_id=non_inherited_role_id)
+ self.put(non_inher_entity_leaf['links']['assignment'])
+
+ # Without the subtree, we should get the one assignment on the
+ # root project
+ collection_url = (
+ '/role_assignments?scope.project.id=%(project)s' % {
+ 'project': root_id})
+ r = self.get(collection_url)
+ self.assertValidRoleAssignmentListResponse(
+ r, resource_url=collection_url)
+
+ self.assertThat(r.result['role_assignments'], matchers.HasLength(1))
+ self.assertRoleAssignmentInListResponse(r, non_inher_entity_root)
+
+ # With the subtree, we should get both assignments
+ collection_url = (
+ '/role_assignments?scope.project.id=%(project)s'
+ '&include_subtree=True' % {
+ 'project': root_id})
+ r = self.get(collection_url)
+ self.assertValidRoleAssignmentListResponse(
+ r, resource_url=collection_url)
+
+ self.assertThat(r.result['role_assignments'], matchers.HasLength(2))
+ self.assertRoleAssignmentInListResponse(r, non_inher_entity_root)
+ self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf)
+
+ # With subtree=0, we should also only get the one assignment on the
+ # root project
+ collection_url = (
+ '/role_assignments?scope.project.id=%(project)s'
+ '&include_subtree=0' % {
+ 'project': root_id})
+ r = self.get(collection_url)
+ self.assertValidRoleAssignmentListResponse(
+ r, resource_url=collection_url)
+
+ self.assertThat(r.result['role_assignments'], matchers.HasLength(1))
+ self.assertRoleAssignmentInListResponse(r, non_inher_entity_root)
+
+ def test_get_effective_role_assignments_for_project_tree(self):
+ """Get role_assignment ?project_id=X?include_subtree=True?effective``.
+
+ Test Plan:
+
+ - Create 2 roles and a hierarchy of projects with one root and 4 levels
+ of child project
+ - Issue the URL to add a non-inherited user role to the root project
+ and a level 1 project
+ - Issue the URL to add an inherited user role on the level 2 project
+ - Issue the URL to get effective role assignments for the level 1
+ project and it's subtree - this should return a role (non-inherited)
+ on the level 1 project and roles (inherited) on each of the level
+ 2, 3 and 4 projects
+
+ """
+ # Create default scenario
+ root_id, leaf_id, non_inherited_role_id, inherited_role_id = (
+ self._setup_hierarchical_projects_scenario())
+
+ # Add some extra projects to the project hierarchy
+ level2 = unit.new_project_ref(domain_id=self.domain['id'],
+ parent_id=leaf_id)
+ level3 = unit.new_project_ref(domain_id=self.domain['id'],
+ parent_id=level2['id'])
+ level4 = unit.new_project_ref(domain_id=self.domain['id'],
+ parent_id=level3['id'])
+ self.resource_api.create_project(level2['id'], level2)
+ self.resource_api.create_project(level3['id'], level3)
+ self.resource_api.create_project(level4['id'], level4)
+
+ # Grant non-inherited role to root (as a spoiler) and to
+ # the level 1 (leaf) project
+ non_inher_entity_root = self.build_role_assignment_entity(
+ project_id=root_id, user_id=self.user['id'],
+ role_id=non_inherited_role_id)
+ self.put(non_inher_entity_root['links']['assignment'])
+ non_inher_entity_leaf = self.build_role_assignment_entity(
+ project_id=leaf_id, user_id=self.user['id'],
+ role_id=non_inherited_role_id)
+ self.put(non_inher_entity_leaf['links']['assignment'])
+
+ # Grant inherited role to level 2
+ inher_entity = self.build_role_assignment_entity(
+ project_id=level2['id'], user_id=self.user['id'],
+ role_id=inherited_role_id, inherited_to_projects=True)
+ self.put(inher_entity['links']['assignment'])
+
+ # Get effective role assignments
+ collection_url = (
+ '/role_assignments?scope.project.id=%(project)s'
+ '&include_subtree=True&effective' % {
+ 'project': leaf_id})
+ r = self.get(collection_url)
+ self.assertValidRoleAssignmentListResponse(
+ r, resource_url=collection_url)
+
+ # There should be three assignments returned in total
+ self.assertThat(r.result['role_assignments'], matchers.HasLength(3))
+
+ # Assert that the user does not non-inherited role on root project
+ self.assertRoleAssignmentNotInListResponse(r, non_inher_entity_root)
+
+ # Assert that the user does have non-inherited role on leaf project
+ self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf)
+
+ # Assert that the user has inherited role on levels 3 and 4
+ inher_entity['scope']['project']['id'] = level3['id']
+ self.assertRoleAssignmentInListResponse(r, inher_entity)
+ inher_entity['scope']['project']['id'] = level4['id']
+ self.assertRoleAssignmentInListResponse(r, inher_entity)
+
def test_get_inherited_role_assignments_for_project_hierarchy(self):
"""Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``.
@@ -3089,7 +2370,7 @@ class AssignmentInheritanceDisabledTestCase(test_v3.RestfulTestCase):
self.config_fixture.config(group='os_inherit', enabled=False)
def test_crud_inherited_role_grants_failed_if_disabled(self):
- role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
base_collection_url = (
@@ -3107,118 +2388,484 @@ class AssignmentInheritanceDisabledTestCase(test_v3.RestfulTestCase):
self.delete(member_url, expected_status=http_client.NOT_FOUND)
-class AssignmentV3toV2MethodsTestCase(unit.TestCase):
- """Test domain V3 to V2 conversion methods."""
- def _setup_initial_projects(self):
- self.project_id = uuid.uuid4().hex
- self.domain_id = CONF.identity.default_domain_id
- self.parent_id = uuid.uuid4().hex
- # Project with only domain_id in ref
- self.project1 = {'id': self.project_id,
- 'name': self.project_id,
- 'domain_id': self.domain_id}
- # Project with both domain_id and parent_id in ref
- self.project2 = {'id': self.project_id,
- 'name': self.project_id,
- 'domain_id': self.domain_id,
- 'parent_id': self.parent_id}
- # Project with no domain_id and parent_id in ref
- self.project3 = {'id': self.project_id,
- 'name': self.project_id,
- 'domain_id': self.domain_id,
- 'parent_id': self.parent_id}
- # Expected result with no domain_id and parent_id
- self.expected_project = {'id': self.project_id,
- 'name': self.project_id}
-
- def test_v2controller_filter_domain_id(self):
- # V2.0 is not domain aware, ensure domain_id is popped off the ref.
- other_data = uuid.uuid4().hex
- domain_id = CONF.identity.default_domain_id
- ref = {'domain_id': domain_id,
- 'other_data': other_data}
-
- ref_no_domain = {'other_data': other_data}
- expected_ref = ref_no_domain.copy()
-
- updated_ref = controller.V2Controller.filter_domain_id(ref)
- self.assertIs(ref, updated_ref)
- self.assertDictEqual(ref, expected_ref)
- # Make sure we don't error/muck up data if domain_id isn't present
- updated_ref = controller.V2Controller.filter_domain_id(ref_no_domain)
- self.assertIs(ref_no_domain, updated_ref)
- self.assertDictEqual(ref_no_domain, expected_ref)
-
- def test_v3controller_filter_domain_id(self):
- # No data should be filtered out in this case.
- other_data = uuid.uuid4().hex
- domain_id = uuid.uuid4().hex
- ref = {'domain_id': domain_id,
- 'other_data': other_data}
-
- expected_ref = ref.copy()
- updated_ref = controller.V3Controller.filter_domain_id(ref)
- self.assertIs(ref, updated_ref)
- self.assertDictEqual(ref, expected_ref)
-
- def test_v2controller_filter_domain(self):
- other_data = uuid.uuid4().hex
- domain_id = uuid.uuid4().hex
- non_default_domain_ref = {'domain': {'id': domain_id},
- 'other_data': other_data}
- default_domain_ref = {'domain': {'id': 'default'},
- 'other_data': other_data}
- updated_ref = controller.V2Controller.filter_domain(default_domain_ref)
- self.assertNotIn('domain', updated_ref)
- self.assertRaises(exception.Unauthorized,
- controller.V2Controller.filter_domain,
- non_default_domain_ref)
-
- def test_v2controller_filter_project_parent_id(self):
- # V2.0 is not project hierarchy aware, ensure parent_id is popped off.
- other_data = uuid.uuid4().hex
- parent_id = uuid.uuid4().hex
- ref = {'parent_id': parent_id,
- 'other_data': other_data}
-
- ref_no_parent = {'other_data': other_data}
- expected_ref = ref_no_parent.copy()
-
- updated_ref = controller.V2Controller.filter_project_parent_id(ref)
- self.assertIs(ref, updated_ref)
- self.assertDictEqual(ref, expected_ref)
- # Make sure we don't error/muck up data if parent_id isn't present
- updated_ref = controller.V2Controller.filter_project_parent_id(
- ref_no_parent)
- self.assertIs(ref_no_parent, updated_ref)
- self.assertDictEqual(ref_no_parent, expected_ref)
-
- def test_v3_to_v2_project_method(self):
- self._setup_initial_projects()
- updated_project1 = controller.V2Controller.v3_to_v2_project(
- self.project1)
- self.assertIs(self.project1, updated_project1)
- self.assertDictEqual(self.project1, self.expected_project)
- updated_project2 = controller.V2Controller.v3_to_v2_project(
- self.project2)
- self.assertIs(self.project2, updated_project2)
- self.assertDictEqual(self.project2, self.expected_project)
- updated_project3 = controller.V2Controller.v3_to_v2_project(
- self.project3)
- self.assertIs(self.project3, updated_project3)
- self.assertDictEqual(self.project3, self.expected_project)
-
- def test_v3_to_v2_project_method_list(self):
- self._setup_initial_projects()
- project_list = [self.project1, self.project2, self.project3]
- updated_list = controller.V2Controller.v3_to_v2_project(project_list)
-
- self.assertEqual(len(updated_list), len(project_list))
-
- for i, ref in enumerate(updated_list):
- # Order should not change.
- self.assertIs(ref, project_list[i])
-
- self.assertDictEqual(self.project1, self.expected_project)
- self.assertDictEqual(self.project2, self.expected_project)
- self.assertDictEqual(self.project3, self.expected_project)
+class ImpliedRolesTests(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin,
+ unit.TestCase):
+ def _create_role(self):
+ """Call ``POST /roles``."""
+ ref = unit.new_role_ref()
+ r = self.post('/roles', body={'role': ref})
+ return self.assertValidRoleResponse(r, ref)
+
+ def test_list_implied_roles_none(self):
+ self.prior = self._create_role()
+ url = '/roles/%s/implies' % (self.prior['id'])
+ response = self.get(url).json["role_inference"]
+ self.assertEqual(self.prior['id'], response['prior_role']['id'])
+ self.assertEqual(0, len(response['implies']))
+
+ def _create_implied_role(self, prior, implied):
+ self.put('/roles/%s/implies/%s' % (prior['id'], implied['id']),
+ expected_status=http_client.CREATED)
+
+ def _delete_implied_role(self, prior, implied):
+ self.delete('/roles/%s/implies/%s' % (prior['id'], implied['id']))
+
+ def _setup_prior_two_implied(self):
+ self.prior = self._create_role()
+ self.implied1 = self._create_role()
+ self._create_implied_role(self.prior, self.implied1)
+ self.implied2 = self._create_role()
+ self._create_implied_role(self.prior, self.implied2)
+
+ def _assert_expected_implied_role_response(
+ self, expected_prior_id, expected_implied_ids):
+ r = self.get('/roles/%s/implies' % expected_prior_id)
+ response = r.json["role_inference"]
+ self.assertEqual(expected_prior_id, response['prior_role']['id'])
+
+ actual_implied_ids = [implied['id'] for implied in response['implies']]
+
+ for expected_id in expected_implied_ids:
+ self.assertIn(expected_id, actual_implied_ids)
+ self.assertEqual(len(expected_implied_ids), len(response['implies']))
+
+ self.assertIsNotNone(response['prior_role']['links']['self'])
+ for implied in response['implies']:
+ self.assertIsNotNone(implied['links']['self'])
+
+ def _assert_two_roles_implied(self):
+ self._assert_expected_implied_role_response(
+ self.prior['id'], [self.implied1['id'], self.implied2['id']])
+
+ def _assert_one_role_implied(self):
+ self._assert_expected_implied_role_response(
+ self.prior['id'], [self.implied1['id']])
+
+ self.get('/roles/%s/implies/%s' %
+ (self.prior['id'], self.implied2['id']),
+ expected_status=http_client.NOT_FOUND)
+
+ def _assert_two_rules_defined(self):
+ r = self.get('/role_inferences/')
+
+ rules = r.result['role_inferences']
+
+ self.assertEqual(self.prior['id'], rules[0]['prior_role']['id'])
+ self.assertEqual(2, len(rules[0]['implies']))
+ implied_ids = [implied['id'] for implied in rules[0]['implies']]
+ implied_names = [implied['name'] for implied in rules[0]['implies']]
+
+ self.assertIn(self.implied1['id'], implied_ids)
+ self.assertIn(self.implied2['id'], implied_ids)
+ self.assertIn(self.implied1['name'], implied_names)
+ self.assertIn(self.implied2['name'], implied_names)
+
+ def _assert_one_rule_defined(self):
+ r = self.get('/role_inferences/')
+ rules = r.result['role_inferences']
+ self.assertEqual(self.prior['id'], rules[0]['prior_role']['id'])
+ self.assertEqual(self.implied1['id'], rules[0]['implies'][0]['id'])
+ self.assertEqual(self.implied1['name'], rules[0]['implies'][0]['name'])
+ self.assertEqual(1, len(rules[0]['implies']))
+
+ def test_list_all_rules(self):
+ self._setup_prior_two_implied()
+ self._assert_two_rules_defined()
+
+ self._delete_implied_role(self.prior, self.implied2)
+ self._assert_one_rule_defined()
+
+ def test_CRD_implied_roles(self):
+
+ self._setup_prior_two_implied()
+ self._assert_two_roles_implied()
+
+ self._delete_implied_role(self.prior, self.implied2)
+ self._assert_one_role_implied()
+
+ def _create_three_roles(self):
+ self.role_list = []
+ for _ in range(3):
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+ self.role_list.append(role)
+
+ def _create_test_domain_user_project(self):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+ user = unit.create_user(self.identity_api, domain_id=domain['id'])
+ project = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project['id'], project)
+ return domain, user, project
+
+ def _assign_top_role_to_user_on_project(self, user, project):
+ self.assignment_api.add_role_to_user_and_project(
+ user['id'], project['id'], self.role_list[0]['id'])
+
+ def _build_effective_role_assignments_url(self, user):
+ return '/role_assignments?effective&user.id=%(user_id)s' % {
+ 'user_id': user['id']}
+
+ def _assert_all_roles_in_assignment(self, response, user):
+ # Now use the list role assignments api to check that all three roles
+ # appear in the collection
+ self.assertValidRoleAssignmentListResponse(
+ response,
+ expected_length=len(self.role_list),
+ resource_url=self._build_effective_role_assignments_url(user))
+
+ def _assert_initial_assignment_in_effective(self, response, user, project):
+ # The initial assignment should be there (the link url will be
+ # generated and checked automatically since it matches the assignment)
+ entity = self.build_role_assignment_entity(
+ project_id=project['id'],
+ user_id=user['id'], role_id=self.role_list[0]['id'])
+ self.assertRoleAssignmentInListResponse(response, entity)
+
+ def _assert_effective_role_for_implied_has_prior_in_links(
+ self, response, user, project, prior_index, implied_index):
+ # An effective role for an implied role will have the prior role
+ # assignment in the links
+ prior_link = '/prior_roles/%(prior)s/implies/%(implied)s' % {
+ 'prior': self.role_list[prior_index]['id'],
+ 'implied': self.role_list[implied_index]['id']}
+ link = self.build_role_assignment_link(
+ project_id=project['id'], user_id=user['id'],
+ role_id=self.role_list[prior_index]['id'])
+ entity = self.build_role_assignment_entity(
+ link=link, project_id=project['id'],
+ user_id=user['id'], role_id=self.role_list[implied_index]['id'],
+ prior_link=prior_link)
+ self.assertRoleAssignmentInListResponse(response, entity)
+
+ def test_list_role_assignments_with_implied_roles(self):
+ """Call ``GET /role_assignments`` with implied role grant.
+
+ Test Plan:
+
+ - Create a domain with a user and a project
+ - Create 3 roles
+ - Role 0 implies role 1 and role 1 implies role 2
+ - Assign the top role to the project
+ - Issue the URL to check effective roles on project - this
+ should return all 3 roles.
+ - Check the links of the 3 roles indicate the prior role where
+ appropriate
+
+ """
+ (domain, user, project) = self._create_test_domain_user_project()
+ self._create_three_roles()
+ self._create_implied_role(self.role_list[0], self.role_list[1])
+ self._create_implied_role(self.role_list[1], self.role_list[2])
+ self._assign_top_role_to_user_on_project(user, project)
+
+ response = self.get(self._build_effective_role_assignments_url(user))
+ r = response
+
+ self._assert_all_roles_in_assignment(r, user)
+ self._assert_initial_assignment_in_effective(response, user, project)
+ self._assert_effective_role_for_implied_has_prior_in_links(
+ response, user, project, 0, 1)
+ self._assert_effective_role_for_implied_has_prior_in_links(
+ response, user, project, 1, 2)
+
+ def _create_named_role(self, name):
+ role = unit.new_role_ref()
+ role['name'] = name
+ self.role_api.create_role(role['id'], role)
+ return role
+
+ def test_root_role_as_implied_role_forbidden(self):
+ """Test root role is forbidden to be set as an implied role.
+
+ Create 2 roles that are prohibited from being an implied role.
+ Create 1 additional role which should be accepted as an implied
+ role. Assure the prohibited role names cannot be set as an implied
+ role. Assure the accepted role name which is not a member of the
+ prohibited implied role list can be successfully set an implied
+ role.
+ """
+ prohibited_name1 = 'root1'
+ prohibited_name2 = 'root2'
+ accepted_name1 = 'implied1'
+
+ prohibited_names = [prohibited_name1, prohibited_name2]
+ self.config_fixture.config(group='assignment',
+ prohibited_implied_role=prohibited_names)
+
+ prior_role = self._create_role()
+
+ prohibited_role1 = self._create_named_role(prohibited_name1)
+ url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format(
+ prior_role_id=prior_role['id'],
+ implied_role_id=prohibited_role1['id'])
+ self.put(url, expected_status=http_client.FORBIDDEN)
+
+ prohibited_role2 = self._create_named_role(prohibited_name2)
+ url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format(
+ prior_role_id=prior_role['id'],
+ implied_role_id=prohibited_role2['id'])
+ self.put(url, expected_status=http_client.FORBIDDEN)
+
+ accepted_role1 = self._create_named_role(accepted_name1)
+ url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format(
+ prior_role_id=prior_role['id'],
+ implied_role_id=accepted_role1['id'])
+ self.put(url, expected_status=http_client.CREATED)
+
+ def test_trusts_from_implied_role(self):
+ self._create_three_roles()
+ self._create_implied_role(self.role_list[0], self.role_list[1])
+ self._create_implied_role(self.role_list[1], self.role_list[2])
+ self._assign_top_role_to_user_on_project(self.user, self.project)
+
+ # Create a trustee and assign the prior role to her
+ trustee = unit.create_user(self.identity_api, domain_id=self.domain_id)
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user['id'],
+ trustee_user_id=trustee['id'],
+ project_id=self.project['id'],
+ role_ids=[self.role_list[0]['id']])
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = r.result['trust']
+
+ # Only the role that was specified is in the trust, NOT implied roles
+ self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id'])
+ self.assertThat(trust['roles'], matchers.HasLength(1))
+
+ # Authenticate as the trustee
+ auth_data = self.build_authentication_request(
+ user_id=trustee['id'],
+ password=trustee['password'],
+ trust_id=trust['id'])
+ r = self.v3_create_token(auth_data)
+ token = r.result['token']
+ self.assertThat(token['roles'],
+ matchers.HasLength(len(self.role_list)))
+ for role in token['roles']:
+ self.assertIn(role, self.role_list)
+ for role in self.role_list:
+ self.assertIn(role, token['roles'])
+
+ def test_trusts_from_domain_specific_implied_role(self):
+ self._create_three_roles()
+ # Overwrite the first role with a domain specific role
+ role = unit.new_role_ref(domain_id=self.domain_id)
+ self.role_list[0] = self.role_api.create_role(role['id'], role)
+ self._create_implied_role(self.role_list[0], self.role_list[1])
+ self._create_implied_role(self.role_list[1], self.role_list[2])
+ self._assign_top_role_to_user_on_project(self.user, self.project)
+
+ # Create a trustee and assign the prior role to her
+ trustee = unit.create_user(self.identity_api, domain_id=self.domain_id)
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user['id'],
+ trustee_user_id=trustee['id'],
+ project_id=self.project['id'],
+ role_ids=[self.role_list[0]['id']])
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = r.result['trust']
+
+ # Only the role that was specified is in the trust, NOT implied roles
+ self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id'])
+ self.assertThat(trust['roles'], matchers.HasLength(1))
+
+ # Authenticate as the trustee
+ auth_data = self.build_authentication_request(
+ user_id=trustee['id'],
+ password=trustee['password'],
+ trust_id=trust['id'])
+ r = self.v3_create_token(auth_data)
+ token = r.result['token']
+
+ # The token should have the roles implies by the domain specific role,
+ # but not the domain specific role itself.
+ self.assertThat(token['roles'],
+ matchers.HasLength(len(self.role_list) - 1))
+ for role in token['roles']:
+ self.assertIn(role, self.role_list)
+ for role in [self.role_list[1], self.role_list[2]]:
+ self.assertIn(role, token['roles'])
+ self.assertNotIn(self.role_list[0], token['roles'])
+
+
+class DomainSpecificRoleTests(test_v3.RestfulTestCase, unit.TestCase):
+ def setUp(self):
+ def create_role(domain_id=None):
+ """Call ``POST /roles``."""
+ ref = unit.new_role_ref(domain_id=domain_id)
+ r = self.post(
+ '/roles',
+ body={'role': ref})
+ return self.assertValidRoleResponse(r, ref)
+
+ super(DomainSpecificRoleTests, self).setUp()
+ self.domainA = unit.new_domain_ref()
+ self.resource_api.create_domain(self.domainA['id'], self.domainA)
+ self.domainB = unit.new_domain_ref()
+ self.resource_api.create_domain(self.domainB['id'], self.domainB)
+
+ self.global_role1 = create_role()
+ self.global_role2 = create_role()
+ # Since there maybe other global roles already created, let's count
+ # them, so we can ensure we can check subsequent list responses
+ # are correct
+ r = self.get('/roles')
+ self.existing_global_roles = len(r.result['roles'])
+
+ # And now create some domain specific roles
+ self.domainA_role1 = create_role(domain_id=self.domainA['id'])
+ self.domainA_role2 = create_role(domain_id=self.domainA['id'])
+ self.domainB_role = create_role(domain_id=self.domainB['id'])
+
+ def test_get_and_list_domain_specific_roles(self):
+ # Check we can get a domain specific role
+ r = self.get('/roles/%s' % self.domainA_role1['id'])
+ self.assertValidRoleResponse(r, self.domainA_role1)
+
+ # If we list without specifying a domain, we should only get global
+ # roles back.
+ r = self.get('/roles')
+ self.assertValidRoleListResponse(
+ r, expected_length=self.existing_global_roles)
+ self.assertRoleInListResponse(r, self.global_role1)
+ self.assertRoleInListResponse(r, self.global_role2)
+ self.assertRoleNotInListResponse(r, self.domainA_role1)
+ self.assertRoleNotInListResponse(r, self.domainA_role2)
+ self.assertRoleNotInListResponse(r, self.domainB_role)
+
+ # Now list those in domainA, making sure that's all we get back
+ r = self.get('/roles?domain_id=%s' % self.domainA['id'])
+ self.assertValidRoleListResponse(r, expected_length=2)
+ self.assertRoleInListResponse(r, self.domainA_role1)
+ self.assertRoleInListResponse(r, self.domainA_role2)
+
+ def test_update_domain_specific_roles(self):
+ self.domainA_role1['name'] = uuid.uuid4().hex
+ self.patch('/roles/%(role_id)s' % {
+ 'role_id': self.domainA_role1['id']},
+ body={'role': self.domainA_role1})
+ r = self.get('/roles/%s' % self.domainA_role1['id'])
+ self.assertValidRoleResponse(r, self.domainA_role1)
+
+ def test_delete_domain_specific_roles(self):
+ # Check delete only removes that one domain role
+ self.delete('/roles/%(role_id)s' % {
+ 'role_id': self.domainA_role1['id']})
+
+ self.get('/roles/%s' % self.domainA_role1['id'],
+ expected_status=http_client.NOT_FOUND)
+ # Now re-list those in domainA, making sure there's only one left
+ r = self.get('/roles?domain_id=%s' % self.domainA['id'])
+ self.assertValidRoleListResponse(r, expected_length=1)
+ self.assertRoleInListResponse(r, self.domainA_role2)
+
+
+class ListUserProjectsTestCase(test_v3.RestfulTestCase):
+ """Tests for /users/<user>/projects"""
+
+ def load_sample_data(self):
+ # do not load base class's data, keep it focused on the tests
+
+ self.auths = []
+ self.domains = []
+ self.projects = []
+ self.roles = []
+ self.users = []
+
+ # Create 3 sets of domain, roles, projects, and users to demonstrate
+ # the right user's data is loaded and only projects they can access
+ # are returned.
+
+ for _ in range(3):
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+
+ user = unit.create_user(self.identity_api, domain_id=domain['id'])
+
+ role = unit.new_role_ref()
+ self.role_api.create_role(role['id'], role)
+
+ self.assignment_api.create_grant(role['id'],
+ user_id=user['id'],
+ domain_id=domain['id'])
+
+ project = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project['id'], project)
+
+ self.assignment_api.create_grant(role['id'],
+ user_id=user['id'],
+ project_id=project['id'])
+
+ auth = self.build_authentication_request(
+ user_id=user['id'],
+ password=user['password'],
+ domain_id=domain['id'])
+
+ self.auths.append(auth)
+ self.domains.append(domain)
+ self.projects.append(project)
+ self.roles.append(role)
+ self.users.append(user)
+
+ def test_list_all(self):
+ for i in range(len(self.users)):
+ user = self.users[i]
+ auth = self.auths[i]
+
+ url = '/users/%s/projects' % user['id']
+ result = self.get(url, auth=auth)
+ projects_result = result.json['projects']
+ self.assertEqual(1, len(projects_result))
+ self.assertEqual(self.projects[i]['id'], projects_result[0]['id'])
+
+ def test_list_enabled(self):
+ for i in range(len(self.users)):
+ user = self.users[i]
+ auth = self.auths[i]
+
+ # There are no disabled projects
+ url = '/users/%s/projects?enabled=True' % user['id']
+ result = self.get(url, auth=auth)
+ projects_result = result.json['projects']
+ self.assertEqual(1, len(projects_result))
+ self.assertEqual(self.projects[i]['id'], projects_result[0]['id'])
+
+ def test_list_disabled(self):
+ for i in range(len(self.users)):
+ user = self.users[i]
+ auth = self.auths[i]
+ project = self.projects[i]
+
+ # There are no disabled projects
+ url = '/users/%s/projects?enabled=False' % user['id']
+ result = self.get(url, auth=auth)
+ self.assertEqual(0, len(result.json['projects']))
+
+ # disable this one and check again
+ project['enabled'] = False
+ self.resource_api.update_project(project['id'], project)
+ result = self.get(url, auth=auth)
+ projects_result = result.json['projects']
+ self.assertEqual(1, len(projects_result))
+ self.assertEqual(self.projects[i]['id'], projects_result[0]['id'])
+
+ def test_list_by_domain_id(self):
+ for i in range(len(self.users)):
+ user = self.users[i]
+ domain = self.domains[i]
+ auth = self.auths[i]
+
+ # Try looking for projects with a non-existent domain_id
+ url = '/users/%s/projects?domain_id=%s' % (user['id'],
+ uuid.uuid4().hex)
+ result = self.get(url, auth=auth)
+ self.assertEqual(0, len(result.json['projects']))
+
+ # Now try a valid one
+ url = '/users/%s/projects?domain_id=%s' % (user['id'],
+ domain['id'])
+ result = self.get(url, auth=auth)
+ projects_result = result.json['projects']
+ self.assertEqual(1, len(projects_result))
+ self.assertEqual(self.projects[i]['id'], projects_result[0]['id'])
diff --git a/keystone-moon/keystone/tests/unit/test_v3_auth.py b/keystone-moon/keystone/tests/unit/test_v3_auth.py
index d53a85df..698feeb8 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_auth.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_auth.py
@@ -14,6 +14,7 @@
import copy
import datetime
+import itertools
import json
import operator
import uuid
@@ -21,6 +22,8 @@ import uuid
from keystoneclient.common import cms
import mock
from oslo_config import cfg
+from oslo_log import versionutils
+from oslo_utils import fixture
from oslo_utils import timeutils
from six.moves import http_client
from six.moves import range
@@ -28,9 +31,12 @@ from testtools import matchers
from testtools import testcase
from keystone import auth
+from keystone.auth.plugins import totp
from keystone.common import utils
+from keystone.contrib.revoke import routers
from keystone import exception
from keystone.policy.backends import rules
+from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import test_v3
@@ -38,7 +44,7 @@ from keystone.tests.unit import test_v3
CONF = cfg.CONF
-class TestAuthInfo(test_v3.AuthTestMixin, testcase.TestCase):
+class TestAuthInfo(common_auth.AuthTestMixin, testcase.TestCase):
def setUp(self):
super(TestAuthInfo, self).setUp()
auth.controllers.load_auth_methods()
@@ -121,7 +127,7 @@ class TokenAPITests(object):
# resolved in Python for multiple inheritance means that a setUp in this
# would get skipped by the testrunner.
def doSetUp(self):
- r = self.v3_authenticate_token(self.build_authentication_request(
+ r = self.v3_create_token(self.build_authentication_request(
username=self.user['name'],
user_domain_id=self.domain_id,
password=self.user['password']))
@@ -129,43 +135,372 @@ class TokenAPITests(object):
self.v3_token = r.headers.get('X-Subject-Token')
self.headers = {'X-Subject-Token': r.headers.get('X-Subject-Token')}
- def test_default_fixture_scope_token(self):
- self.assertIsNotNone(self.get_scoped_token())
+ def _make_auth_request(self, auth_data):
+ resp = self.post('/auth/tokens', body=auth_data)
+ token = resp.headers.get('X-Subject-Token')
+ return token
- def test_v3_v2_intermix_non_default_domain_failed(self):
- v3_token = self.get_requested_token(self.build_authentication_request(
+ def _get_unscoped_token(self):
+ auth_data = self.build_authentication_request(
user_id=self.user['id'],
- password=self.user['password']))
+ password=self.user['password'])
+ return self._make_auth_request(auth_data)
- # now validate the v3 token with v2 API
- self.admin_request(
- path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token,
- method='GET',
- expected_status=http_client.UNAUTHORIZED)
+ def _get_domain_scoped_token(self):
+ auth_data = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ domain_id=self.domain_id)
+ return self._make_auth_request(auth_data)
+
+ def _get_project_scoped_token(self):
+ auth_data = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project_id)
+ return self._make_auth_request(auth_data)
+
+ def _get_trust_scoped_token(self, trustee_user, trust):
+ auth_data = self.build_authentication_request(
+ user_id=trustee_user['id'],
+ password=trustee_user['password'],
+ trust_id=trust['id'])
+ return self._make_auth_request(auth_data)
+
+ def _create_trust(self, impersonation=False):
+ # Create a trustee user
+ trustee_user = unit.create_user(self.identity_api,
+ domain_id=self.domain_id)
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=trustee_user['id'],
+ project_id=self.project_id,
+ impersonation=impersonation,
+ role_ids=[self.role_id])
+
+ # Create a trust
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = self.assertValidTrustResponse(r)
+ return (trustee_user, trust)
+
+ def _validate_token(self, token, expected_status=http_client.OK):
+ return self.get(
+ '/auth/tokens',
+ headers={'X-Subject-Token': token},
+ expected_status=expected_status)
+
+ def _revoke_token(self, token, expected_status=http_client.NO_CONTENT):
+ return self.delete(
+ '/auth/tokens',
+ headers={'x-subject-token': token},
+ expected_status=expected_status)
+
+ def _set_user_enabled(self, user, enabled=True):
+ user['enabled'] = enabled
+ self.identity_api.update_user(user['id'], user)
+
+ def test_validate_unscoped_token(self):
+ unscoped_token = self._get_unscoped_token()
+ self._validate_token(unscoped_token)
+
+ def test_revoke_unscoped_token(self):
+ unscoped_token = self._get_unscoped_token()
+ self._validate_token(unscoped_token)
+ self._revoke_token(unscoped_token)
+ self._validate_token(unscoped_token,
+ expected_status=http_client.NOT_FOUND)
+
+ def test_unscoped_token_is_invalid_after_disabling_user(self):
+ unscoped_token = self._get_unscoped_token()
+ # Make sure the token is valid
+ self._validate_token(unscoped_token)
+ # Disable the user
+ self._set_user_enabled(self.user, enabled=False)
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ unscoped_token)
+
+ def test_unscoped_token_is_invalid_after_enabling_disabled_user(self):
+ unscoped_token = self._get_unscoped_token()
+ # Make sure the token is valid
+ self._validate_token(unscoped_token)
+ # Disable the user
+ self._set_user_enabled(self.user, enabled=False)
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ unscoped_token)
+ # Enable the user
+ self._set_user_enabled(self.user)
+ # Ensure validating a token for a re-enabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ unscoped_token)
+
+ def test_unscoped_token_is_invalid_after_disabling_user_domain(self):
+ unscoped_token = self._get_unscoped_token()
+ # Make sure the token is valid
+ self._validate_token(unscoped_token)
+ # Disable the user's domain
+ self.domain['enabled'] = False
+ self.resource_api.update_domain(self.domain['id'], self.domain)
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ unscoped_token)
+
+ def test_unscoped_token_is_invalid_after_changing_user_password(self):
+ unscoped_token = self._get_unscoped_token()
+ # Make sure the token is valid
+ self._validate_token(unscoped_token)
+ # Change user's password
+ self.user['password'] = 'Password1'
+ self.identity_api.update_user(self.user['id'], self.user)
+ # Ensure updating user's password revokes existing user's tokens
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ unscoped_token)
+
+ def test_validate_domain_scoped_token(self):
+ # Grant user access to domain
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=self.user['id'],
+ domain_id=self.domain['id'])
+ domain_scoped_token = self._get_domain_scoped_token()
+ resp = self._validate_token(domain_scoped_token)
+ resp_json = json.loads(resp.body)
+ self.assertIsNotNone(resp_json['token']['catalog'])
+ self.assertIsNotNone(resp_json['token']['roles'])
+ self.assertIsNotNone(resp_json['token']['domain'])
+
+ def test_domain_scoped_token_is_invalid_after_disabling_user(self):
+ # Grant user access to domain
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=self.user['id'],
+ domain_id=self.domain['id'])
+ domain_scoped_token = self._get_domain_scoped_token()
+ # Make sure the token is valid
+ self._validate_token(domain_scoped_token)
+ # Disable user
+ self._set_user_enabled(self.user, enabled=False)
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ domain_scoped_token)
+
+ def test_domain_scoped_token_is_invalid_after_deleting_grant(self):
+ # Grant user access to domain
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=self.user['id'],
+ domain_id=self.domain['id'])
+ domain_scoped_token = self._get_domain_scoped_token()
+ # Make sure the token is valid
+ self._validate_token(domain_scoped_token)
+ # Delete access to domain
+ self.assignment_api.delete_grant(self.role['id'],
+ user_id=self.user['id'],
+ domain_id=self.domain['id'])
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ domain_scoped_token)
+
+ def test_domain_scoped_token_invalid_after_disabling_domain(self):
+ # Grant user access to domain
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=self.user['id'],
+ domain_id=self.domain['id'])
+ domain_scoped_token = self._get_domain_scoped_token()
+ # Make sure the token is valid
+ self._validate_token(domain_scoped_token)
+ # Disable domain
+ self.domain['enabled'] = False
+ self.resource_api.update_domain(self.domain['id'], self.domain)
+ # Ensure validating a token for a disabled domain fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ domain_scoped_token)
+
+ def test_v2_validate_domain_scoped_token_returns_unauthorized(self):
+ # Test that validating a domain scoped token in v2.0 returns
+ # unauthorized.
+ # Grant user access to domain
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=self.user['id'],
+ domain_id=self.domain['id'])
+
+ scoped_token = self._get_domain_scoped_token()
+ self.assertRaises(exception.Unauthorized,
+ self.token_provider_api.validate_v2_token,
+ scoped_token)
+
+ def test_validate_project_scoped_token(self):
+ project_scoped_token = self._get_project_scoped_token()
+ self._validate_token(project_scoped_token)
+
+ def test_revoke_project_scoped_token(self):
+ project_scoped_token = self._get_project_scoped_token()
+ self._validate_token(project_scoped_token)
+ self._revoke_token(project_scoped_token)
+ self._validate_token(project_scoped_token,
+ expected_status=http_client.NOT_FOUND)
+
+ def test_project_scoped_token_is_invalid_after_disabling_user(self):
+ project_scoped_token = self._get_project_scoped_token()
+ # Make sure the token is valid
+ self._validate_token(project_scoped_token)
+ # Disable the user
+ self._set_user_enabled(self.user, enabled=False)
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ project_scoped_token)
+
+ def test_project_scoped_token_invalid_after_changing_user_password(self):
+ project_scoped_token = self._get_project_scoped_token()
+ # Make sure the token is valid
+ self._validate_token(project_scoped_token)
+ # Update user's password
+ self.user['password'] = 'Password1'
+ self.identity_api.update_user(self.user['id'], self.user)
+ # Ensure updating user's password revokes existing tokens
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ project_scoped_token)
+
+ def test_project_scoped_token_invalid_after_disabling_project(self):
+ project_scoped_token = self._get_project_scoped_token()
+ # Make sure the token is valid
+ self._validate_token(project_scoped_token)
+ # Disable project
+ self.project['enabled'] = False
+ self.resource_api.update_project(self.project['id'], self.project)
+ # Ensure validating a token for a disabled project fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ project_scoped_token)
+
+ def test_rescope_unscoped_token_with_trust(self):
+ trustee_user, trust = self._create_trust()
+ self._get_trust_scoped_token(trustee_user, trust)
+
+ def test_validate_a_trust_scoped_token(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+
+ def test_validate_a_trust_scoped_token_impersonated(self):
+ trustee_user, trust = self._create_trust(impersonation=True)
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+
+ def test_revoke_trust_scoped_token(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+ self._revoke_token(trust_scoped_token)
+ self._validate_token(trust_scoped_token,
+ expected_status=http_client.NOT_FOUND)
+
+ def test_trust_scoped_token_is_invalid_after_disabling_trustee(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+
+ # Disable trustee
+ trustee_update_ref = dict(enabled=False)
+ self.identity_api.update_user(trustee_user['id'], trustee_update_ref)
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ trust_scoped_token)
+
+ def test_trust_scoped_token_invalid_after_changing_trustee_password(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+ # Change trustee's password
+ trustee_update_ref = dict(password='Password1')
+ self.identity_api.update_user(trustee_user['id'], trustee_update_ref)
+ # Ensure updating trustee's password revokes existing tokens
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ trust_scoped_token)
+
+ def test_trust_scoped_token_is_invalid_after_disabling_trustor(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+
+ # Disable the trustor
+ trustor_update_ref = dict(enabled=False)
+ self.identity_api.update_user(self.user['id'], trustor_update_ref)
+ # Ensure validating a token for a disabled user fails
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ trust_scoped_token)
+
+ def test_trust_scoped_token_invalid_after_changing_trustor_password(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+
+ # Change trustor's password
+ trustor_update_ref = dict(password='Password1')
+ self.identity_api.update_user(self.user['id'], trustor_update_ref)
+ # Ensure updating trustor's password revokes existing user's tokens
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ trust_scoped_token)
+
+ def test_trust_scoped_token_invalid_after_disabled_trustor_domain(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Validate a trust scoped token
+ self._validate_token(trust_scoped_token)
+
+ # Disable trustor's domain
+ self.domain['enabled'] = False
+ self.resource_api.update_domain(self.domain['id'], self.domain)
+
+ trustor_update_ref = dict(password='Password1')
+ self.identity_api.update_user(self.user['id'], trustor_update_ref)
+ # Ensure updating trustor's password revokes existing user's tokens
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ trust_scoped_token)
+
+ def test_v2_validate_trust_scoped_token(self):
+ # Test that validating an trust scoped token in v2.0 returns
+ # unauthorized.
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ self.assertRaises(exception.Unauthorized,
+ self.token_provider_api.validate_v2_token,
+ trust_scoped_token)
+
+ def test_default_fixture_scope_token(self):
+ self.assertIsNotNone(self.get_scoped_token())
def test_v3_v2_intermix_new_default_domain(self):
# If the default_domain_id config option is changed, then should be
# able to validate a v3 token with user in the new domain.
# 1) Create a new domain for the user.
- new_domain = {
- 'description': uuid.uuid4().hex,
- 'enabled': True,
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- }
+ new_domain = unit.new_domain_ref()
self.resource_api.create_domain(new_domain['id'], new_domain)
# 2) Create user in new domain.
- new_user_password = uuid.uuid4().hex
- new_user = {
- 'name': uuid.uuid4().hex,
- 'domain_id': new_domain['id'],
- 'password': new_user_password,
- 'email': uuid.uuid4().hex,
- }
- new_user = self.identity_api.create_user(new_user)
+ new_user = unit.create_user(self.identity_api,
+ domain_id=new_domain['id'])
# 3) Update the default_domain_id config option to the new domain
self.config_fixture.config(
@@ -175,12 +510,12 @@ class TokenAPITests(object):
# 4) Get a token using v3 API.
v3_token = self.get_requested_token(self.build_authentication_request(
user_id=new_user['id'],
- password=new_user_password))
+ password=new_user['password']))
# 5) Validate token using v2 API.
self.admin_request(
path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token,
+ token=self.get_admin_token(),
method='GET')
def test_v3_v2_intermix_domain_scoped_token_failed(self):
@@ -199,10 +534,10 @@ class TokenAPITests(object):
self.admin_request(
method='GET',
path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token,
+ token=self.get_admin_token(),
expected_status=http_client.UNAUTHORIZED)
- def test_v3_v2_intermix_non_default_project_failed(self):
+ def test_v3_v2_intermix_non_default_project_succeed(self):
# self.project is in a non-default domain
v3_token = self.get_requested_token(self.build_authentication_request(
user_id=self.default_domain_user['id'],
@@ -213,10 +548,9 @@ class TokenAPITests(object):
self.admin_request(
method='GET',
path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token,
- expected_status=http_client.UNAUTHORIZED)
+ token=self.get_admin_token())
- def test_v3_v2_intermix_non_default_user_failed(self):
+ def test_v3_v2_intermix_non_default_user_succeed(self):
self.assignment_api.create_grant(
self.role['id'],
user_id=self.user['id'],
@@ -232,8 +566,7 @@ class TokenAPITests(object):
self.admin_request(
method='GET',
path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token,
- expected_status=http_client.UNAUTHORIZED)
+ token=self.get_admin_token())
def test_v3_v2_intermix_domain_scope_failed(self):
self.assignment_api.create_grant(
@@ -249,12 +582,12 @@ class TokenAPITests(object):
# v2 cannot reference projects outside the default domain
self.admin_request(
path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token,
+ token=self.get_admin_token(),
method='GET',
expected_status=http_client.UNAUTHORIZED)
def test_v3_v2_unscoped_token_intermix(self):
- r = self.v3_authenticate_token(self.build_authentication_request(
+ r = self.v3_create_token(self.build_authentication_request(
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password']))
self.assertValidUnscopedTokenResponse(r)
@@ -264,7 +597,7 @@ class TokenAPITests(object):
# now validate the v3 token with v2 API
r = self.admin_request(
path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token,
+ token=self.get_admin_token(),
method='GET')
v2_token_data = r.result
@@ -278,7 +611,7 @@ class TokenAPITests(object):
def test_v3_v2_token_intermix(self):
# FIXME(gyee): PKI tokens are not interchangeable because token
# data is baked into the token itself.
- r = self.v3_authenticate_token(self.build_authentication_request(
+ r = self.v3_create_token(self.build_authentication_request(
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
project_id=self.default_domain_project['id']))
@@ -290,7 +623,7 @@ class TokenAPITests(object):
r = self.admin_request(
method='GET',
path='/v2.0/tokens/%s' % v3_token,
- token=CONF.admin_token)
+ token=self.get_admin_token())
v2_token_data = r.result
self.assertEqual(v2_token_data['access']['user']['id'],
@@ -318,9 +651,7 @@ class TokenAPITests(object):
v2_token = v2_token_data['access']['token']['id']
r = self.get('/auth/tokens', headers={'X-Subject-Token': v2_token})
- # FIXME(dolph): Due to bug 1476329, v2 tokens validated on v3 are
- # missing timezones, so they will not pass this assertion.
- # self.assertValidUnscopedTokenResponse(r)
+ self.assertValidUnscopedTokenResponse(r)
v3_token_data = r.result
self.assertEqual(v2_token_data['access']['user']['id'],
@@ -347,9 +678,7 @@ class TokenAPITests(object):
v2_token = v2_token_data['access']['token']['id']
r = self.get('/auth/tokens', headers={'X-Subject-Token': v2_token})
- # FIXME(dolph): Due to bug 1476329, v2 tokens validated on v3 are
- # missing timezones, so they will not pass this assertion.
- # self.assertValidProjectScopedTokenResponse(r)
+ self.assertValidProjectScopedTokenResponse(r)
v3_token_data = r.result
self.assertEqual(v2_token_data['access']['user']['id'],
@@ -384,9 +713,8 @@ class TokenAPITests(object):
v2_token = r.result['access']['token']['id']
# Delete the v2 token using v3.
- resp = self.delete(
+ self.delete(
'/auth/tokens', headers={'X-Subject-Token': v2_token})
- self.assertEqual(resp.status_code, 204)
# Attempting to use the deleted token on v2 should fail.
self.admin_request(
@@ -397,7 +725,7 @@ class TokenAPITests(object):
expires = self.v3_token_data['token']['expires_at']
# rescope the token
- r = self.v3_authenticate_token(self.build_authentication_request(
+ r = self.v3_create_token(self.build_authentication_request(
token=self.v3_token,
project_id=self.project_id))
self.assertValidProjectScopedTokenResponse(r)
@@ -406,12 +734,24 @@ class TokenAPITests(object):
self.assertEqual(expires, r.result['token']['expires_at'])
def test_check_token(self):
- self.head('/auth/tokens', headers=self.headers, expected_status=200)
+ self.head('/auth/tokens', headers=self.headers,
+ expected_status=http_client.OK)
def test_validate_token(self):
r = self.get('/auth/tokens', headers=self.headers)
self.assertValidUnscopedTokenResponse(r)
+ def test_validate_missing_subject_token(self):
+ self.get('/auth/tokens',
+ expected_status=http_client.NOT_FOUND)
+
+ def test_validate_missing_auth_token(self):
+ self.admin_request(
+ method='GET',
+ path='/v3/projects',
+ token=None,
+ expected_status=http_client.UNAUTHORIZED)
+
def test_validate_token_nocatalog(self):
v3_token = self.get_requested_token(self.build_authentication_request(
user_id=self.user['id'],
@@ -422,6 +762,399 @@ class TokenAPITests(object):
headers={'X-Subject-Token': v3_token})
self.assertValidProjectScopedTokenResponse(r, require_catalog=False)
+ def test_is_admin_token_by_ids(self):
+ self.config_fixture.config(
+ group='resource',
+ admin_project_domain_name=self.domain['name'],
+ admin_project_name=self.project['name'])
+ r = self.v3_create_token(self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project['id']))
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=True)
+ v3_token = r.headers.get('X-Subject-Token')
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token})
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=True)
+
+ def test_is_admin_token_by_names(self):
+ self.config_fixture.config(
+ group='resource',
+ admin_project_domain_name=self.domain['name'],
+ admin_project_name=self.project['name'])
+ r = self.v3_create_token(self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_domain_name=self.domain['name'],
+ project_name=self.project['name']))
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=True)
+ v3_token = r.headers.get('X-Subject-Token')
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token})
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=True)
+
+ def test_token_for_non_admin_project_is_not_admin(self):
+ self.config_fixture.config(
+ group='resource',
+ admin_project_domain_name=self.domain['name'],
+ admin_project_name=uuid.uuid4().hex)
+ r = self.v3_create_token(self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project['id']))
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=False)
+ v3_token = r.headers.get('X-Subject-Token')
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token})
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=False)
+
+ def test_token_for_non_admin_domain_same_project_name_is_not_admin(self):
+ self.config_fixture.config(
+ group='resource',
+ admin_project_domain_name=uuid.uuid4().hex,
+ admin_project_name=self.project['name'])
+ r = self.v3_create_token(self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project['id']))
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=False)
+ v3_token = r.headers.get('X-Subject-Token')
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token})
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=False)
+
+ def test_only_admin_project_set_acts_as_non_admin(self):
+ self.config_fixture.config(
+ group='resource',
+ admin_project_name=self.project['name'])
+ r = self.v3_create_token(self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project['id']))
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=False)
+ v3_token = r.headers.get('X-Subject-Token')
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token})
+ self.assertValidProjectScopedTokenResponse(r, is_admin_project=False)
+
+ def _create_role(self, domain_id=None):
+ """Call ``POST /roles``."""
+ ref = unit.new_role_ref(domain_id=domain_id)
+ r = self.post('/roles', body={'role': ref})
+ return self.assertValidRoleResponse(r, ref)
+
+ def _create_implied_role(self, prior_id):
+ implied = self._create_role()
+ url = '/roles/%s/implies/%s' % (prior_id, implied['id'])
+ self.put(url, expected_status=http_client.CREATED)
+ return implied
+
+ def _delete_implied_role(self, prior_role_id, implied_role_id):
+ url = '/roles/%s/implies/%s' % (prior_role_id, implied_role_id)
+ self.delete(url)
+
+ def _get_scoped_token_roles(self, is_domain=False):
+ if is_domain:
+ v3_token = self.get_domain_scoped_token()
+ else:
+ v3_token = self.get_scoped_token()
+
+ r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token})
+ v3_token_data = r.result
+ token_roles = v3_token_data['token']['roles']
+ return token_roles
+
+ def _create_implied_role_shows_in_v3_token(self, is_domain):
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(1, len(token_roles))
+
+ prior = token_roles[0]['id']
+ implied1 = self._create_implied_role(prior)
+
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(2, len(token_roles))
+
+ implied2 = self._create_implied_role(prior)
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(3, len(token_roles))
+
+ token_role_ids = [role['id'] for role in token_roles]
+ self.assertIn(prior, token_role_ids)
+ self.assertIn(implied1['id'], token_role_ids)
+ self.assertIn(implied2['id'], token_role_ids)
+
+ def test_create_implied_role_shows_in_v3_project_token(self):
+ # regardless of the default chosen, this should always
+ # test with the option set.
+ self.config_fixture.config(group='token', infer_roles=True)
+ self._create_implied_role_shows_in_v3_token(False)
+
+ def test_create_implied_role_shows_in_v3_domain_token(self):
+ self.config_fixture.config(group='token', infer_roles=True)
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=self.user['id'],
+ domain_id=self.domain['id'])
+
+ self._create_implied_role_shows_in_v3_token(True)
+
+ def test_group_assigned_implied_role_shows_in_v3_token(self):
+ self.config_fixture.config(group='token', infer_roles=True)
+ is_domain = False
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(1, len(token_roles))
+
+ new_role = self._create_role()
+ prior = new_role['id']
+
+ new_group_ref = unit.new_group_ref(domain_id=self.domain['id'])
+ new_group = self.identity_api.create_group(new_group_ref)
+ self.assignment_api.create_grant(prior,
+ group_id=new_group['id'],
+ project_id=self.project['id'])
+
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(1, len(token_roles))
+
+ self.identity_api.add_user_to_group(self.user['id'],
+ new_group['id'])
+
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(2, len(token_roles))
+
+ implied1 = self._create_implied_role(prior)
+
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(3, len(token_roles))
+
+ implied2 = self._create_implied_role(prior)
+ token_roles = self._get_scoped_token_roles(is_domain)
+ self.assertEqual(4, len(token_roles))
+
+ token_role_ids = [role['id'] for role in token_roles]
+ self.assertIn(prior, token_role_ids)
+ self.assertIn(implied1['id'], token_role_ids)
+ self.assertIn(implied2['id'], token_role_ids)
+
+ def test_multiple_implied_roles_show_in_v3_token(self):
+ self.config_fixture.config(group='token', infer_roles=True)
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(1, len(token_roles))
+
+ prior = token_roles[0]['id']
+ implied1 = self._create_implied_role(prior)
+ implied2 = self._create_implied_role(prior)
+ implied3 = self._create_implied_role(prior)
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(4, len(token_roles))
+
+ token_role_ids = [role['id'] for role in token_roles]
+ self.assertIn(prior, token_role_ids)
+ self.assertIn(implied1['id'], token_role_ids)
+ self.assertIn(implied2['id'], token_role_ids)
+ self.assertIn(implied3['id'], token_role_ids)
+
+ def test_chained_implied_role_shows_in_v3_token(self):
+ self.config_fixture.config(group='token', infer_roles=True)
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(1, len(token_roles))
+
+ prior = token_roles[0]['id']
+ implied1 = self._create_implied_role(prior)
+ implied2 = self._create_implied_role(implied1['id'])
+ implied3 = self._create_implied_role(implied2['id'])
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(4, len(token_roles))
+
+ token_role_ids = [role['id'] for role in token_roles]
+
+ self.assertIn(prior, token_role_ids)
+ self.assertIn(implied1['id'], token_role_ids)
+ self.assertIn(implied2['id'], token_role_ids)
+ self.assertIn(implied3['id'], token_role_ids)
+
+ def test_implied_role_disabled_by_config(self):
+ self.config_fixture.config(group='token', infer_roles=False)
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(1, len(token_roles))
+
+ prior = token_roles[0]['id']
+ implied1 = self._create_implied_role(prior)
+ implied2 = self._create_implied_role(implied1['id'])
+ self._create_implied_role(implied2['id'])
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(1, len(token_roles))
+ token_role_ids = [role['id'] for role in token_roles]
+ self.assertIn(prior, token_role_ids)
+
+ def test_delete_implied_role_do_not_show_in_v3_token(self):
+ self.config_fixture.config(group='token', infer_roles=True)
+ token_roles = self._get_scoped_token_roles()
+ prior = token_roles[0]['id']
+ implied = self._create_implied_role(prior)
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(2, len(token_roles))
+ self._delete_implied_role(prior, implied['id'])
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(1, len(token_roles))
+
+ def test_unrelated_implied_roles_do_not_change_v3_token(self):
+ self.config_fixture.config(group='token', infer_roles=True)
+ token_roles = self._get_scoped_token_roles()
+ prior = token_roles[0]['id']
+ implied = self._create_implied_role(prior)
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(2, len(token_roles))
+
+ unrelated = self._create_role()
+ url = '/roles/%s/implies/%s' % (unrelated['id'], implied['id'])
+ self.put(url, expected_status=http_client.CREATED)
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(2, len(token_roles))
+
+ self._delete_implied_role(unrelated['id'], implied['id'])
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(2, len(token_roles))
+
+ def test_domain_scpecific_roles_do_not_show_v3_token(self):
+ self.config_fixture.config(group='token', infer_roles=True)
+ initial_token_roles = self._get_scoped_token_roles()
+
+ new_role = self._create_role(domain_id=self.domain_id)
+ self.assignment_api.create_grant(new_role['id'],
+ user_id=self.user['id'],
+ project_id=self.project['id'])
+ implied = self._create_implied_role(new_role['id'])
+
+ token_roles = self._get_scoped_token_roles()
+ self.assertEqual(len(initial_token_roles) + 1, len(token_roles))
+
+ # The implied role from the domain specific role should be in the
+ # token, but not the domain specific role itself.
+ token_role_ids = [role['id'] for role in token_roles]
+ self.assertIn(implied['id'], token_role_ids)
+ self.assertNotIn(new_role['id'], token_role_ids)
+
+ def test_remove_all_roles_from_scope_result_in_404(self):
+ # create a new user
+ new_user = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+
+ # give the new user a role on a project
+ path = '/projects/%s/users/%s/roles/%s' % (
+ self.project['id'], new_user['id'], self.role['id'])
+ self.put(path=path)
+
+ # authenticate as the new user and get a project-scoped token
+ auth_data = self.build_authentication_request(
+ user_id=new_user['id'],
+ password=new_user['password'],
+ project_id=self.project['id'])
+ subject_token_id = self.v3_create_token(auth_data).headers.get(
+ 'X-Subject-Token')
+
+ # make sure the project-scoped token is valid
+ headers = {'X-Subject-Token': subject_token_id}
+ r = self.get('/auth/tokens', headers=headers)
+ self.assertValidProjectScopedTokenResponse(r)
+
+ # remove the roles from the user for the given scope
+ path = '/projects/%s/users/%s/roles/%s' % (
+ self.project['id'], new_user['id'], self.role['id'])
+ self.delete(path=path)
+
+ # token validation should now result in 404
+ self.get('/auth/tokens', headers=headers,
+ expected_status=http_client.NOT_FOUND)
+
+
+class TokenDataTests(object):
+ """Test the data in specific token types."""
+
+ def test_unscoped_token_format(self):
+ # ensure the unscoped token response contains the appropriate data
+ r = self.get('/auth/tokens', headers=self.headers)
+ self.assertValidUnscopedTokenResponse(r)
+
+ def test_domain_scoped_token_format(self):
+ # ensure the domain scoped token response contains the appropriate data
+ self.assignment_api.create_grant(
+ self.role['id'],
+ user_id=self.default_domain_user['id'],
+ domain_id=self.domain['id'])
+
+ domain_scoped_token = self.get_requested_token(
+ self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ domain_id=self.domain['id'])
+ )
+ self.headers['X-Subject-Token'] = domain_scoped_token
+ r = self.get('/auth/tokens', headers=self.headers)
+ self.assertValidDomainScopedTokenResponse(r)
+
+ def test_project_scoped_token_format(self):
+ # ensure project scoped token responses contains the appropriate data
+ project_scoped_token = self.get_requested_token(
+ self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ project_id=self.default_domain_project['id'])
+ )
+ self.headers['X-Subject-Token'] = project_scoped_token
+ r = self.get('/auth/tokens', headers=self.headers)
+ self.assertValidProjectScopedTokenResponse(r)
+
+ def test_extra_data_in_unscoped_token_fails_validation(self):
+ # ensure unscoped token response contains the appropriate data
+ r = self.get('/auth/tokens', headers=self.headers)
+
+ # populate the response result with some extra data
+ r.result['token'][u'extra'] = unicode(uuid.uuid4().hex)
+ self.assertRaises(exception.SchemaValidationError,
+ self.assertValidUnscopedTokenResponse,
+ r)
+
+ def test_extra_data_in_domain_scoped_token_fails_validation(self):
+ # ensure domain scoped token response contains the appropriate data
+ self.assignment_api.create_grant(
+ self.role['id'],
+ user_id=self.default_domain_user['id'],
+ domain_id=self.domain['id'])
+
+ domain_scoped_token = self.get_requested_token(
+ self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ domain_id=self.domain['id'])
+ )
+ self.headers['X-Subject-Token'] = domain_scoped_token
+ r = self.get('/auth/tokens', headers=self.headers)
+
+ # populate the response result with some extra data
+ r.result['token'][u'extra'] = unicode(uuid.uuid4().hex)
+ self.assertRaises(exception.SchemaValidationError,
+ self.assertValidDomainScopedTokenResponse,
+ r)
+
+ def test_extra_data_in_project_scoped_token_fails_validation(self):
+ # ensure project scoped token responses contains the appropriate data
+ project_scoped_token = self.get_requested_token(
+ self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ project_id=self.default_domain_project['id'])
+ )
+ self.headers['X-Subject-Token'] = project_scoped_token
+ resp = self.get('/auth/tokens', headers=self.headers)
+
+ # populate the response result with some extra data
+ resp.result['token'][u'extra'] = unicode(uuid.uuid4().hex)
+ self.assertRaises(exception.SchemaValidationError,
+ self.assertValidProjectScopedTokenResponse,
+ resp)
+
class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
def config_overrides(self):
@@ -431,7 +1164,7 @@ class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
allow_rescope_scoped_token=False)
def test_rescoping_v3_to_v3_disabled(self):
- self.v3_authenticate_token(
+ self.v3_create_token(
self.build_authentication_request(
token=self.get_scoped_token(),
project_id=self.project_id),
@@ -465,7 +1198,7 @@ class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
def test_rescoping_v2_to_v3_disabled(self):
token = self._v2_token()
- self.v3_authenticate_token(
+ self.v3_create_token(
self.build_authentication_request(
token=token['access']['token']['id'],
project_id=self.project_id),
@@ -481,7 +1214,7 @@ class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
def test_rescoped_domain_token_disabled(self):
- self.domainA = self.new_domain_ref()
+ self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'], self.domainA)
self.assignment_api.create_grant(self.role['id'],
user_id=self.user['id'],
@@ -495,14 +1228,14 @@ class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase):
self.build_authentication_request(
token=unscoped_token,
domain_id=self.domainA['id']))
- self.v3_authenticate_token(
+ self.v3_create_token(
self.build_authentication_request(
token=domain_scoped_token,
project_id=self.project_id),
expected_status=http_client.FORBIDDEN)
-class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests, TokenDataTests):
def config_overrides(self):
super(TestPKITokenAPIs, self).config_overrides()
self.config_fixture.config(group='token', provider='pki')
@@ -518,7 +1251,7 @@ class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- resp = self.v3_authenticate_token(auth_data)
+ resp = self.v3_create_token(auth_data)
token_data = resp.result
token_id = resp.headers.get('X-Subject-Token')
self.assertIn('expires_at', token_data['token'])
@@ -542,7 +1275,7 @@ class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
project_id=self.default_domain_project['id'])
- resp = self.v3_authenticate_token(auth_data)
+ resp = self.v3_create_token(auth_data)
token_data = resp.result
token = resp.headers.get('X-Subject-Token')
@@ -550,7 +1283,7 @@ class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
token = cms.cms_hash_token(token)
path = '/v2.0/tokens/%s' % (token)
resp = self.admin_request(path=path,
- token=CONF.admin_token,
+ token=self.get_admin_token(),
method='GET')
v2_token = resp.result
self.assertEqual(v2_token['access']['user']['id'],
@@ -559,8 +1292,8 @@ class TestPKITokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
# just need to make sure the non fraction part agrees
self.assertIn(v2_token['access']['token']['expires'][:-1],
token_data['token']['expires_at'])
- self.assertEqual(v2_token['access']['user']['roles'][0]['id'],
- token_data['token']['roles'][0]['id'])
+ self.assertEqual(v2_token['access']['user']['roles'][0]['name'],
+ token_data['token']['roles'][0]['name'])
class TestPKIZTokenAPIs(TestPKITokenAPIs):
@@ -572,7 +1305,8 @@ class TestPKIZTokenAPIs(TestPKITokenAPIs):
return cms.pkiz_verify(*args, **kwargs)
-class TestUUIDTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+class TestUUIDTokenAPIs(test_v3.RestfulTestCase, TokenAPITests,
+ TokenDataTests):
def config_overrides(self):
super(TestUUIDTokenAPIs, self).config_overrides()
self.config_fixture.config(group='token', provider='uuid')
@@ -585,14 +1319,15 @@ class TestUUIDTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- resp = self.v3_authenticate_token(auth_data)
+ resp = self.v3_create_token(auth_data)
token_data = resp.result
token_id = resp.headers.get('X-Subject-Token')
self.assertIn('expires_at', token_data['token'])
self.assertFalse(cms.is_asn1_token(token_id))
-class TestFernetTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
+class TestFernetTokenAPIs(test_v3.RestfulTestCase, TokenAPITests,
+ TokenDataTests):
def config_overrides(self):
super(TestFernetTokenAPIs, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
@@ -602,6 +1337,34 @@ class TestFernetTokenAPIs(test_v3.RestfulTestCase, TokenAPITests):
super(TestFernetTokenAPIs, self).setUp()
self.doSetUp()
+ def _make_auth_request(self, auth_data):
+ token = super(TestFernetTokenAPIs, self)._make_auth_request(auth_data)
+ self.assertLess(len(token), 255)
+ return token
+
+ def test_validate_tampered_unscoped_token_fails(self):
+ unscoped_token = self._get_unscoped_token()
+ tampered_token = (unscoped_token[:50] + uuid.uuid4().hex +
+ unscoped_token[50 + 32:])
+ self._validate_token(tampered_token,
+ expected_status=http_client.NOT_FOUND)
+
+ def test_validate_tampered_project_scoped_token_fails(self):
+ project_scoped_token = self._get_project_scoped_token()
+ tampered_token = (project_scoped_token[:50] + uuid.uuid4().hex +
+ project_scoped_token[50 + 32:])
+ self._validate_token(tampered_token,
+ expected_status=http_client.NOT_FOUND)
+
+ def test_validate_tampered_trust_scoped_token_fails(self):
+ trustee_user, trust = self._create_trust()
+ trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
+ # Get a trust scoped token
+ tampered_token = (trust_scoped_token[:50] + uuid.uuid4().hex +
+ trust_scoped_token[50 + 32:])
+ self._validate_token(tampered_token,
+ expected_status=http_client.NOT_FOUND)
+
class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
"""Test token revoke using v3 Identity API by token owner and admin."""
@@ -616,29 +1379,22 @@ class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
"""
super(TestTokenRevokeSelfAndAdmin, self).load_sample_data()
# DomainA setup
- self.domainA = self.new_domain_ref()
+ self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'], self.domainA)
- self.userAdminA = self.new_user_ref(domain_id=self.domainA['id'])
- password = self.userAdminA['password']
- self.userAdminA = self.identity_api.create_user(self.userAdminA)
- self.userAdminA['password'] = password
+ self.userAdminA = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
- self.userNormalA = self.new_user_ref(
- domain_id=self.domainA['id'])
- password = self.userNormalA['password']
- self.userNormalA = self.identity_api.create_user(self.userNormalA)
- self.userNormalA['password'] = password
+ self.userNormalA = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.userAdminA['id'],
domain_id=self.domainA['id'])
- def config_overrides(self):
- super(TestTokenRevokeSelfAndAdmin, self).config_overrides()
- self.config_fixture.config(
- group='oslo_policy',
- policy_file=unit.dirs.etc('policy.v3cloudsample.json'))
+ def _policy_fixture(self):
+ return ksfixtures.Policy(unit.dirs.etc('policy.v3cloudsample.json'),
+ self.config_fixture)
def test_user_revokes_own_token(self):
user_token = self.get_requested_token(
@@ -655,11 +1411,13 @@ class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
password=self.userAdminA['password'],
domain_name=self.domainA['name']))
- self.head('/auth/tokens', headers=headers, expected_status=200,
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.OK,
token=adminA_token)
- self.head('/auth/tokens', headers=headers, expected_status=200,
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.OK,
token=user_token)
- self.delete('/auth/tokens', headers=headers, expected_status=204,
+ self.delete('/auth/tokens', headers=headers,
token=user_token)
# invalid X-Auth-Token and invalid X-Subject-Token
self.head('/auth/tokens', headers=headers,
@@ -693,11 +1451,13 @@ class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
password=self.userAdminA['password'],
domain_name=self.domainA['name']))
- self.head('/auth/tokens', headers=headers, expected_status=200,
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.OK,
token=adminA_token)
- self.head('/auth/tokens', headers=headers, expected_status=200,
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.OK,
token=user_token)
- self.delete('/auth/tokens', headers=headers, expected_status=204,
+ self.delete('/auth/tokens', headers=headers,
token=adminA_token)
# invalid X-Auth-Token and invalid X-Subject-Token
self.head('/auth/tokens', headers=headers,
@@ -714,14 +1474,12 @@ class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
def test_adminB_fails_revoking_userA_token(self):
# DomainB setup
- self.domainB = self.new_domain_ref()
+ self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'], self.domainB)
- self.userAdminB = self.new_user_ref(domain_id=self.domainB['id'])
- password = self.userAdminB['password']
- self.userAdminB = self.identity_api.create_user(self.userAdminB)
- self.userAdminB['password'] = password
+ userAdminB = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role['id'],
- user_id=self.userAdminB['id'],
+ user_id=userAdminB['id'],
domain_id=self.domainB['id'])
user_token = self.get_requested_token(
@@ -733,8 +1491,8 @@ class TestTokenRevokeSelfAndAdmin(test_v3.RestfulTestCase):
adminB_token = self.get_requested_token(
self.build_authentication_request(
- user_id=self.userAdminB['id'],
- password=self.userAdminB['password'],
+ user_id=userAdminB['id'],
+ password=userAdminB['password'],
domain_name=self.domainB['name']))
self.head('/auth/tokens', headers=headers,
@@ -750,7 +1508,6 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
def config_overrides(self):
super(TestTokenRevokeById, self).config_overrides()
- self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
provider='pki',
@@ -782,44 +1539,32 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
super(TestTokenRevokeById, self).setUp()
# Start by creating a couple of domains and projects
- self.domainA = self.new_domain_ref()
+ self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'], self.domainA)
- self.domainB = self.new_domain_ref()
+ self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'], self.domainB)
- self.projectA = self.new_project_ref(domain_id=self.domainA['id'])
+ self.projectA = unit.new_project_ref(domain_id=self.domainA['id'])
self.resource_api.create_project(self.projectA['id'], self.projectA)
- self.projectB = self.new_project_ref(domain_id=self.domainA['id'])
+ self.projectB = unit.new_project_ref(domain_id=self.domainA['id'])
self.resource_api.create_project(self.projectB['id'], self.projectB)
# Now create some users
- self.user1 = self.new_user_ref(
- domain_id=self.domainA['id'])
- password = self.user1['password']
- self.user1 = self.identity_api.create_user(self.user1)
- self.user1['password'] = password
-
- self.user2 = self.new_user_ref(
- domain_id=self.domainB['id'])
- password = self.user2['password']
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
-
- self.user3 = self.new_user_ref(
- domain_id=self.domainB['id'])
- password = self.user3['password']
- self.user3 = self.identity_api.create_user(self.user3)
- self.user3['password'] = password
-
- self.group1 = self.new_group_ref(
- domain_id=self.domainA['id'])
+ self.user1 = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
+
+ self.user2 = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
+
+ self.user3 = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
+
+ self.group1 = unit.new_group_ref(domain_id=self.domainA['id'])
self.group1 = self.identity_api.create_group(self.group1)
- self.group2 = self.new_group_ref(
- domain_id=self.domainA['id'])
+ self.group2 = unit.new_group_ref(domain_id=self.domainA['id'])
self.group2 = self.identity_api.create_group(self.group2)
- self.group3 = self.new_group_ref(
- domain_id=self.domainB['id'])
+ self.group3 = unit.new_group_ref(domain_id=self.domainB['id'])
self.group3 = self.identity_api.create_group(self.group3)
self.identity_api.add_user_to_group(self.user1['id'],
@@ -829,9 +1574,9 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.identity_api.add_user_to_group(self.user3['id'],
self.group2['id'])
- self.role1 = self.new_role_ref()
+ self.role1 = unit.new_role_ref()
self.role_api.create_role(self.role1['id'], self.role1)
- self.role2 = self.new_role_ref()
+ self.role2 = unit.new_role_ref()
self.role_api.create_role(self.role2['id'], self.role2)
self.assignment_api.create_grant(self.role2['id'],
@@ -864,13 +1609,13 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# confirm both tokens are valid
self.head('/auth/tokens',
headers={'X-Subject-Token': unscoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': scoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
# create a new role
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign a new role
@@ -883,10 +1628,10 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# both tokens should remain valid
self.head('/auth/tokens',
headers={'X-Subject-Token': unscoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': scoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
def test_deleting_user_grant_revokes_token(self):
"""Test deleting a user grant revokes token.
@@ -906,7 +1651,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# Confirm token is valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
# Delete the grant, which should invalidate the token
grant_url = (
'/projects/%(project_id)s/users/%(user_id)s/'
@@ -920,22 +1665,14 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
expected_status=http_client.NOT_FOUND)
def role_data_fixtures(self):
- self.projectC = self.new_project_ref(domain_id=self.domainA['id'])
+ self.projectC = unit.new_project_ref(domain_id=self.domainA['id'])
self.resource_api.create_project(self.projectC['id'], self.projectC)
- self.user4 = self.new_user_ref(domain_id=self.domainB['id'])
- password = self.user4['password']
- self.user4 = self.identity_api.create_user(self.user4)
- self.user4['password'] = password
- self.user5 = self.new_user_ref(
- domain_id=self.domainA['id'])
- password = self.user5['password']
- self.user5 = self.identity_api.create_user(self.user5)
- self.user5['password'] = password
- self.user6 = self.new_user_ref(
- domain_id=self.domainA['id'])
- password = self.user6['password']
- self.user6 = self.identity_api.create_user(self.user6)
- self.user6['password'] = password
+ self.user4 = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
+ self.user5 = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
+ self.user6 = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
self.identity_api.add_user_to_group(self.user5['id'],
self.group1['id'])
self.assignment_api.create_grant(self.role1['id'],
@@ -954,29 +1691,29 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
def test_deleting_role_revokes_token(self):
"""Test deleting a role revokes token.
- Add some additional test data, namely:
- - A third project (project C)
- - Three additional users - user4 owned by domainB and user5 and 6
- owned by domainA (different domain ownership should not affect
- the test results, just provided to broaden test coverage)
- - User5 is a member of group1
- - Group1 gets an additional assignment - role1 on projectB as
- well as its existing role1 on projectA
- - User4 has role2 on Project C
- - User6 has role1 on projectA and domainA
- - This allows us to create 5 tokens by virtue of different types
- of role assignment:
- - user1, scoped to ProjectA by virtue of user role1 assignment
- - user5, scoped to ProjectB by virtue of group role1 assignment
- - user4, scoped to ProjectC by virtue of user role2 assignment
- - user6, scoped to ProjectA by virtue of user role1 assignment
- - user6, scoped to DomainA by virtue of user role1 assignment
- - role1 is then deleted
- - Check the tokens on Project A and B, and DomainA are revoked,
- but not the one for Project C
+ Add some additional test data, namely:
+
+ - A third project (project C)
+ - Three additional users - user4 owned by domainB and user5 and 6 owned
+ by domainA (different domain ownership should not affect the test
+ results, just provided to broaden test coverage)
+ - User5 is a member of group1
+ - Group1 gets an additional assignment - role1 on projectB as well as
+ its existing role1 on projectA
+ - User4 has role2 on Project C
+ - User6 has role1 on projectA and domainA
+ - This allows us to create 5 tokens by virtue of different types of
+ role assignment:
+ - user1, scoped to ProjectA by virtue of user role1 assignment
+ - user5, scoped to ProjectB by virtue of group role1 assignment
+ - user4, scoped to ProjectC by virtue of user role2 assignment
+ - user6, scoped to ProjectA by virtue of user role1 assignment
+ - user6, scoped to DomainA by virtue of user role1 assignment
+ - role1 is then deleted
+ - Check the tokens on Project A and B, and DomainA are revoked, but not
+ the one for Project C
"""
-
self.role_data_fixtures()
# Now we are ready to start issuing requests
@@ -1008,19 +1745,19 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# Confirm tokens are valid
self.head('/auth/tokens',
headers={'X-Subject-Token': tokenA},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': tokenB},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': tokenC},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': tokenD},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': tokenE},
- expected_status=200)
+ expected_status=http_client.OK)
# Delete the role, which should invalidate the tokens
role_url = '/roles/%s' % self.role1['id']
@@ -1043,7 +1780,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# ...but the one using role2 is still valid
self.head('/auth/tokens',
headers={'X-Subject-Token': tokenC},
- expected_status=200)
+ expected_status=http_client.OK)
def test_domain_user_role_assignment_maintains_token(self):
"""Test user-domain role assignment maintains existing token.
@@ -1063,7 +1800,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# Confirm token is valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
# Assign a role, which should not affect the token
grant_url = (
'/domains/%(domain_id)s/users/%(user_id)s/'
@@ -1074,7 +1811,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.put(grant_url)
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
def test_disabling_project_revokes_token(self):
token = self.get_requested_token(
@@ -1086,7 +1823,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# confirm token is valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
# disable the project, which should invalidate the token
self.patch(
@@ -1097,7 +1834,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
expected_status=http_client.NOT_FOUND)
- self.v3_authenticate_token(
+ self.v3_create_token(
self.build_authentication_request(
user_id=self.user3['id'],
password=self.user3['password'],
@@ -1114,7 +1851,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# confirm token is valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
# delete the project, which should invalidate the token
self.delete(
@@ -1124,7 +1861,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
expected_status=http_client.NOT_FOUND)
- self.v3_authenticate_token(
+ self.v3_create_token(
self.build_authentication_request(
user_id=self.user3['id'],
password=self.user3['password'],
@@ -1163,13 +1900,13 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# Confirm tokens are valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token1},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': token2},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': token3},
- expected_status=200)
+ expected_status=http_client.OK)
# Delete the group grant, which should invalidate the
# tokens for user1 and user2
grant_url = (
@@ -1209,7 +1946,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# Confirm token is valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
# Delete the grant, which should invalidate the token
grant_url = (
'/domains/%(domain_id)s/groups/%(group_id)s/'
@@ -1220,7 +1957,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.put(grant_url)
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
def test_group_membership_changes_revokes_token(self):
"""Test add/removal to/from group revokes token.
@@ -1250,10 +1987,10 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# Confirm tokens are valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token1},
- expected_status=200)
+ expected_status=http_client.OK)
self.head('/auth/tokens',
headers={'X-Subject-Token': token2},
- expected_status=200)
+ expected_status=http_client.OK)
# Remove user1 from group1, which should invalidate
# the token
self.delete('/groups/%(group_id)s/users/%(user_id)s' % {
@@ -1265,18 +2002,17 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# But user2's token should still be valid
self.head('/auth/tokens',
headers={'X-Subject-Token': token2},
- expected_status=200)
+ expected_status=http_client.OK)
# Adding user2 to a group should not invalidate token
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group2['id'],
'user_id': self.user2['id']})
self.head('/auth/tokens',
headers={'X-Subject-Token': token2},
- expected_status=200)
+ expected_status=http_client.OK)
def test_removing_role_assignment_does_not_affect_other_users(self):
"""Revoking a role from one user should not affect other users."""
-
# This group grant is not needed for the test
self.delete(
'/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' %
@@ -1306,7 +2042,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.head('/auth/tokens',
headers={'X-Subject-Token': user1_token},
expected_status=http_client.NOT_FOUND)
- self.v3_authenticate_token(
+ self.v3_create_token(
self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'],
@@ -1316,8 +2052,8 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# authorization for the second user should still succeed
self.head('/auth/tokens',
headers={'X-Subject-Token': user3_token},
- expected_status=200)
- self.v3_authenticate_token(
+ expected_status=http_client.OK)
+ self.v3_create_token(
self.build_authentication_request(
user_id=self.user3['id'],
password=self.user3['password'],
@@ -1338,7 +2074,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
self.delete(
'/projects/%(project_id)s' % {'project_id': self.projectA['id']})
- # Make sure that we get a NotFound(404) when heading that role.
+ # Make sure that we get a 404 Not Found when heading that role.
self.head(role_path, expected_status=http_client.NOT_FOUND)
def get_v2_token(self, token=None, project_id=None):
@@ -1366,8 +2102,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
token = self.get_v2_token()
self.delete('/auth/tokens',
- headers={'X-Subject-Token': token},
- expected_status=204)
+ headers={'X-Subject-Token': token})
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
@@ -1397,8 +2132,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# revoke the project-scoped token.
self.delete('/auth/tokens',
- headers={'X-Subject-Token': project_scoped_token},
- expected_status=204)
+ headers={'X-Subject-Token': project_scoped_token})
# The project-scoped token is invalidated.
self.head('/auth/tokens',
@@ -1408,17 +2142,16 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# The unscoped token should still be valid.
self.head('/auth/tokens',
headers={'X-Subject-Token': unscoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
# The domain-scoped token should still be valid.
self.head('/auth/tokens',
headers={'X-Subject-Token': domain_scoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
# revoke the domain-scoped token.
self.delete('/auth/tokens',
- headers={'X-Subject-Token': domain_scoped_token},
- expected_status=204)
+ headers={'X-Subject-Token': domain_scoped_token})
# The domain-scoped token is invalid.
self.head('/auth/tokens',
@@ -1428,16 +2161,13 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# The unscoped token should still be valid.
self.head('/auth/tokens',
headers={'X-Subject-Token': unscoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
def test_revoke_token_from_token_v2(self):
# Test that a scoped token can be requested from an unscoped token,
# the scoped token can be revoked, and the unscoped token remains
# valid.
- # FIXME(blk-u): This isn't working correctly. The scoped token should
- # be revoked. See bug 1347318.
-
unscoped_token = self.get_v2_token()
# Get a project-scoped token from the unscoped token
@@ -1446,8 +2176,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# revoke the project-scoped token.
self.delete('/auth/tokens',
- headers={'X-Subject-Token': project_scoped_token},
- expected_status=204)
+ headers={'X-Subject-Token': project_scoped_token})
# The project-scoped token is invalidated.
self.head('/auth/tokens',
@@ -1457,7 +2186,7 @@ class TestTokenRevokeById(test_v3.RestfulTestCase):
# The unscoped token should still be valid.
self.head('/auth/tokens',
headers={'X-Subject-Token': unscoped_token},
- expected_status=200)
+ expected_status=http_client.OK)
class TestTokenRevokeByAssignment(TestTokenRevokeById):
@@ -1465,9 +2194,6 @@ class TestTokenRevokeByAssignment(TestTokenRevokeById):
def config_overrides(self):
super(TestTokenRevokeById, self).config_overrides()
self.config_fixture.config(
- group='revoke',
- driver='kvs')
- self.config_fixture.config(
group='token',
provider='uuid',
revoke_by_id=True)
@@ -1501,7 +2227,7 @@ class TestTokenRevokeByAssignment(TestTokenRevokeById):
# authorization for the projectA should still succeed
self.head('/auth/tokens',
headers={'X-Subject-Token': other_project_token},
- expected_status=200)
+ expected_status=http_client.OK)
# while token for the projectB should not
self.head('/auth/tokens',
headers={'X-Subject-Token': project_token},
@@ -1512,14 +2238,21 @@ class TestTokenRevokeByAssignment(TestTokenRevokeById):
self.assertIn(project_token, revoked_tokens)
-class TestTokenRevokeApi(TestTokenRevokeById):
- EXTENSION_NAME = 'revoke'
- EXTENSION_TO_ADD = 'revoke_extension'
+class RevokeContribTests(test_v3.RestfulTestCase):
+ @mock.patch.object(versionutils, 'report_deprecated_feature')
+ def test_exception_happens(self, mock_deprecator):
+ routers.RevokeExtension(mock.ANY)
+ mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("Remove revoke_extension from", args[1])
+
+
+class TestTokenRevokeApi(TestTokenRevokeById):
"""Test token revocation on the v3 Identity API."""
+
def config_overrides(self):
super(TestTokenRevokeApi, self).config_overrides()
- self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
provider='pki',
@@ -1536,15 +2269,19 @@ class TestTokenRevokeApi(TestTokenRevokeById):
expected_response = {'events': [{'project_id': project_id}]}
self.assertEqual(expected_response, events_response)
- def assertDomainInList(self, events_response, domain_id):
+ def assertDomainAndProjectInList(self, events_response, domain_id):
events = events_response['events']
- self.assertEqual(1, len(events))
- self.assertEqual(domain_id, events[0]['domain_id'])
+ self.assertEqual(2, len(events))
+ self.assertEqual(domain_id, events[0]['project_id'])
+ self.assertEqual(domain_id, events[1]['domain_id'])
self.assertIsNotNone(events[0]['issued_before'])
+ self.assertIsNotNone(events[1]['issued_before'])
self.assertIsNotNone(events_response['links'])
del (events_response['events'][0]['issued_before'])
+ del (events_response['events'][1]['issued_before'])
del (events_response['links'])
- expected_response = {'events': [{'domain_id': domain_id}]}
+ expected_response = {'events': [{'project_id': domain_id},
+ {'domain_id': domain_id}]}
self.assertEqual(expected_response, events_response)
def assertValidRevokedTokenResponse(self, events_response, **kwargs):
@@ -1563,62 +2300,55 @@ class TestTokenRevokeApi(TestTokenRevokeById):
def test_revoke_token(self):
scoped_token = self.get_scoped_token()
headers = {'X-Subject-Token': scoped_token}
- response = self.get('/auth/tokens', headers=headers,
- expected_status=200).json_body['token']
+ response = self.get('/auth/tokens', headers=headers).json_body['token']
- self.delete('/auth/tokens', headers=headers, expected_status=204)
+ self.delete('/auth/tokens', headers=headers)
self.head('/auth/tokens', headers=headers,
expected_status=http_client.NOT_FOUND)
- events_response = self.get('/OS-REVOKE/events',
- expected_status=200).json_body
+ events_response = self.get('/OS-REVOKE/events').json_body
self.assertValidRevokedTokenResponse(events_response,
audit_id=response['audit_ids'][0])
def test_revoke_v2_token(self):
token = self.get_v2_token()
headers = {'X-Subject-Token': token}
- response = self.get('/auth/tokens', headers=headers,
- expected_status=200).json_body['token']
- self.delete('/auth/tokens', headers=headers, expected_status=204)
+ response = self.get('/auth/tokens',
+ headers=headers).json_body['token']
+ self.delete('/auth/tokens', headers=headers)
self.head('/auth/tokens', headers=headers,
expected_status=http_client.NOT_FOUND)
- events_response = self.get('/OS-REVOKE/events',
- expected_status=200).json_body
+ events_response = self.get('/OS-REVOKE/events').json_body
self.assertValidRevokedTokenResponse(
events_response,
audit_id=response['audit_ids'][0])
- def test_revoke_by_id_false_410(self):
+ def test_revoke_by_id_false_returns_gone(self):
self.get('/auth/tokens/OS-PKI/revoked',
expected_status=http_client.GONE)
def test_list_delete_project_shows_in_event_list(self):
self.role_data_fixtures()
- events = self.get('/OS-REVOKE/events',
- expected_status=200).json_body['events']
+ events = self.get('/OS-REVOKE/events').json_body['events']
self.assertEqual([], events)
self.delete(
'/projects/%(project_id)s' % {'project_id': self.projectA['id']})
- events_response = self.get('/OS-REVOKE/events',
- expected_status=200).json_body
+ events_response = self.get('/OS-REVOKE/events').json_body
self.assertValidDeletedProjectResponse(events_response,
self.projectA['id'])
def test_disable_domain_shows_in_event_list(self):
- events = self.get('/OS-REVOKE/events',
- expected_status=200).json_body['events']
+ events = self.get('/OS-REVOKE/events').json_body['events']
self.assertEqual([], events)
disable_body = {'domain': {'enabled': False}}
self.patch(
'/domains/%(project_id)s' % {'project_id': self.domainA['id']},
body=disable_body)
- events = self.get('/OS-REVOKE/events',
- expected_status=200).json_body
+ events = self.get('/OS-REVOKE/events').json_body
- self.assertDomainInList(events, self.domainA['id'])
+ self.assertDomainAndProjectInList(events, self.domainA['id'])
def assertEventDataInList(self, events, **kwargs):
found = False
@@ -1646,30 +2376,31 @@ class TestTokenRevokeApi(TestTokenRevokeById):
def test_list_delete_token_shows_in_event_list(self):
self.role_data_fixtures()
- events = self.get('/OS-REVOKE/events',
- expected_status=200).json_body['events']
+ events = self.get('/OS-REVOKE/events').json_body['events']
self.assertEqual([], events)
scoped_token = self.get_scoped_token()
headers = {'X-Subject-Token': scoped_token}
auth_req = self.build_authentication_request(token=scoped_token)
- response = self.v3_authenticate_token(auth_req)
+ response = self.v3_create_token(auth_req)
token2 = response.json_body['token']
headers2 = {'X-Subject-Token': response.headers['X-Subject-Token']}
- response = self.v3_authenticate_token(auth_req)
+ response = self.v3_create_token(auth_req)
response.json_body['token']
headers3 = {'X-Subject-Token': response.headers['X-Subject-Token']}
- self.head('/auth/tokens', headers=headers, expected_status=200)
- self.head('/auth/tokens', headers=headers2, expected_status=200)
- self.head('/auth/tokens', headers=headers3, expected_status=200)
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.OK)
+ self.head('/auth/tokens', headers=headers2,
+ expected_status=http_client.OK)
+ self.head('/auth/tokens', headers=headers3,
+ expected_status=http_client.OK)
- self.delete('/auth/tokens', headers=headers, expected_status=204)
+ self.delete('/auth/tokens', headers=headers)
# NOTE(ayoung): not deleting token3, as it should be deleted
# by previous
- events_response = self.get('/OS-REVOKE/events',
- expected_status=200).json_body
+ events_response = self.get('/OS-REVOKE/events').json_body
events = events_response['events']
self.assertEqual(1, len(events))
self.assertEventDataInList(
@@ -1677,32 +2408,32 @@ class TestTokenRevokeApi(TestTokenRevokeById):
audit_id=token2['audit_ids'][1])
self.head('/auth/tokens', headers=headers,
expected_status=http_client.NOT_FOUND)
- self.head('/auth/tokens', headers=headers2, expected_status=200)
- self.head('/auth/tokens', headers=headers3, expected_status=200)
+ self.head('/auth/tokens', headers=headers2,
+ expected_status=http_client.OK)
+ self.head('/auth/tokens', headers=headers3,
+ expected_status=http_client.OK)
def test_list_with_filter(self):
self.role_data_fixtures()
- events = self.get('/OS-REVOKE/events',
- expected_status=200).json_body['events']
+ events = self.get('/OS-REVOKE/events').json_body['events']
self.assertEqual(0, len(events))
scoped_token = self.get_scoped_token()
headers = {'X-Subject-Token': scoped_token}
auth = self.build_authentication_request(token=scoped_token)
headers2 = {'X-Subject-Token': self.get_requested_token(auth)}
- self.delete('/auth/tokens', headers=headers, expected_status=204)
- self.delete('/auth/tokens', headers=headers2, expected_status=204)
+ self.delete('/auth/tokens', headers=headers)
+ self.delete('/auth/tokens', headers=headers2)
- events = self.get('/OS-REVOKE/events',
- expected_status=200).json_body['events']
+ events = self.get('/OS-REVOKE/events').json_body['events']
self.assertEqual(2, len(events))
future = utils.isotime(timeutils.utcnow() +
datetime.timedelta(seconds=1000))
- events = self.get('/OS-REVOKE/events?since=%s' % (future),
- expected_status=200).json_body['events']
+ events = self.get('/OS-REVOKE/events?since=%s' % (future)
+ ).json_body['events']
self.assertEqual(0, len(events))
@@ -1764,7 +2495,7 @@ class TestAuthExternalDomain(test_v3.RestfulTestCase):
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
'REMOTE_DOMAIN': remote_domain,
'AUTH_TYPE': 'Negotiate'})
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidProjectScopedTokenResponse(r)
self.assertEqual(self.user['name'], token['bind']['kerberos'])
@@ -1776,7 +2507,7 @@ class TestAuthExternalDomain(test_v3.RestfulTestCase):
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
'REMOTE_DOMAIN': remote_domain,
'AUTH_TYPE': 'Negotiate'})
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidUnscopedTokenResponse(r)
self.assertEqual(self.user['name'], token['bind']['kerberos'])
@@ -1820,7 +2551,7 @@ class TestAuthExternalDefaultDomain(test_v3.RestfulTestCase):
remote_user = self.default_domain_user['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
'AUTH_TYPE': 'Negotiate'})
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidProjectScopedTokenResponse(r)
self.assertEqual(self.default_domain_user['name'],
token['bind']['kerberos'])
@@ -1831,7 +2562,7 @@ class TestAuthExternalDefaultDomain(test_v3.RestfulTestCase):
remote_user = self.default_domain_user['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
'AUTH_TYPE': 'Negotiate'})
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidUnscopedTokenResponse(r)
self.assertEqual(self.default_domain_user['name'],
token['bind']['kerberos'])
@@ -1852,7 +2583,7 @@ class TestAuth(test_v3.RestfulTestCase):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def test_unscoped_token_with_user_domain_id(self):
@@ -1860,7 +2591,7 @@ class TestAuth(test_v3.RestfulTestCase):
username=self.user['name'],
user_domain_id=self.domain['id'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def test_unscoped_token_with_user_domain_name(self):
@@ -1868,7 +2599,7 @@ class TestAuth(test_v3.RestfulTestCase):
username=self.user['name'],
user_domain_name=self.domain['name'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def test_project_id_scoped_token_with_user_id(self):
@@ -1876,11 +2607,11 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidProjectScopedTokenResponse(r)
def _second_project_as_default(self):
- ref = self.new_project_ref(domain_id=self.domain_id)
+ ref = unit.new_project_ref(domain_id=self.domain_id)
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
@@ -1907,7 +2638,7 @@ class TestAuth(test_v3.RestfulTestCase):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidProjectScopedTokenResponse(r)
self.assertEqual(project['id'], r.result['token']['project']['id'])
@@ -1952,7 +2683,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
catalog = r.result['token']['catalog']
self.assertEqual(1, len(catalog))
@@ -1989,13 +2720,12 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertEqual([], r.result['token']['catalog'])
def test_auth_catalog_disabled_endpoint(self):
"""On authenticate, get a catalog that excludes disabled endpoints."""
-
# Create a disabled endpoint that's like the enabled one.
disabled_endpoint_ref = copy.copy(self.endpoint)
disabled_endpoint_id = uuid.uuid4().hex
@@ -2011,21 +2741,21 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self._check_disabled_endpoint_result(r.result['token']['catalog'],
disabled_endpoint_id)
def test_project_id_scoped_token_with_user_id_unauthorized(self):
- project = self.new_project_ref(domain_id=self.domain_id)
+ project = unit.new_project_ref(domain_id=self.domain_id)
self.resource_api.create_project(project['id'], project)
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=project['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_user_and_group_roles_scoped_token(self):
"""Test correct roles are returned in scoped token.
@@ -2049,30 +2779,19 @@ class TestAuth(test_v3.RestfulTestCase):
tokens
"""
-
- domainA = self.new_domain_ref()
+ domainA = unit.new_domain_ref()
self.resource_api.create_domain(domainA['id'], domainA)
- projectA = self.new_project_ref(domain_id=domainA['id'])
+ projectA = unit.new_project_ref(domain_id=domainA['id'])
self.resource_api.create_project(projectA['id'], projectA)
- user1 = self.new_user_ref(
- domain_id=domainA['id'])
- password = user1['password']
- user1 = self.identity_api.create_user(user1)
- user1['password'] = password
+ user1 = unit.create_user(self.identity_api, domain_id=domainA['id'])
- user2 = self.new_user_ref(
- domain_id=domainA['id'])
- password = user2['password']
- user2 = self.identity_api.create_user(user2)
- user2['password'] = password
+ user2 = unit.create_user(self.identity_api, domain_id=domainA['id'])
- group1 = self.new_group_ref(
- domain_id=domainA['id'])
+ group1 = unit.new_group_ref(domain_id=domainA['id'])
group1 = self.identity_api.create_group(group1)
- group2 = self.new_group_ref(
- domain_id=domainA['id'])
+ group2 = unit.new_group_ref(domain_id=domainA['id'])
group2 = self.identity_api.create_group(group2)
self.identity_api.add_user_to_group(user1['id'],
@@ -2083,7 +2802,7 @@ class TestAuth(test_v3.RestfulTestCase):
# Now create all the roles and assign them
role_list = []
for _ in range(8):
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
role_list.append(role)
@@ -2119,7 +2838,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=user1['id'],
password=user1['password'],
project_id=projectA['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidScopedTokenResponse(r)
roles_ids = []
for ref in token['roles']:
@@ -2133,7 +2852,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=user1['id'],
password=user1['password'],
domain_id=domainA['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidScopedTokenResponse(r)
roles_ids = []
for ref in token['roles']:
@@ -2151,7 +2870,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=user1['id'],
password=user1['password'],
project_id=projectA['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidScopedTokenResponse(r)
roles_ids = []
for ref in token['roles']:
@@ -2164,30 +2883,23 @@ class TestAuth(test_v3.RestfulTestCase):
def test_auth_token_cross_domain_group_and_project(self):
"""Verify getting a token in cross domain group/project roles."""
# create domain, project and group and grant roles to user
- domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ domain1 = unit.new_domain_ref()
self.resource_api.create_domain(domain1['id'], domain1)
- project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
- 'domain_id': domain1['id']}
+ project1 = unit.new_project_ref(domain_id=domain1['id'])
self.resource_api.create_project(project1['id'], project1)
- user_foo = self.new_user_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID)
- password = user_foo['password']
- user_foo = self.identity_api.create_user(user_foo)
- user_foo['password'] = password
- role_member = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
+ user_foo = unit.create_user(self.identity_api,
+ domain_id=test_v3.DEFAULT_DOMAIN_ID)
+ role_member = unit.new_role_ref()
self.role_api.create_role(role_member['id'], role_member)
- role_admin = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
+ role_admin = unit.new_role_ref()
self.role_api.create_role(role_admin['id'], role_admin)
- role_foo_domain1 = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
+ role_foo_domain1 = unit.new_role_ref()
self.role_api.create_role(role_foo_domain1['id'], role_foo_domain1)
- role_group_domain1 = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
+ role_group_domain1 = unit.new_role_ref()
self.role_api.create_role(role_group_domain1['id'], role_group_domain1)
self.assignment_api.add_user_to_project(project1['id'],
user_foo['id'])
- new_group = {'domain_id': domain1['id'], 'name': uuid.uuid4().hex}
+ new_group = unit.new_group_ref(domain_id=domain1['id'])
new_group = self.identity_api.create_group(new_group)
self.identity_api.add_user_to_group(user_foo['id'],
new_group['id'])
@@ -2216,7 +2928,7 @@ class TestAuth(test_v3.RestfulTestCase):
project_name=project1['name'],
project_domain_id=domain1['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
scoped_token = self.assertValidScopedTokenResponse(r)
project = scoped_token["project"]
roles_ids = []
@@ -2234,7 +2946,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_domain_id=self.domain['id'],
password=self.user['password'],
project_id=self.project['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidProjectScopedTokenResponse(r)
def test_project_id_scoped_token_with_user_domain_name(self):
@@ -2243,7 +2955,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_domain_name=self.domain['name'],
password=self.user['password'],
project_id=self.project['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidProjectScopedTokenResponse(r)
def test_domain_id_scoped_token_with_user_id(self):
@@ -2255,7 +2967,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_id_scoped_token_with_user_domain_id(self):
@@ -2268,7 +2980,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_domain_id=self.domain['id'],
password=self.user['password'],
domain_id=self.domain['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_id_scoped_token_with_user_domain_name(self):
@@ -2281,7 +2993,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_domain_name=self.domain['name'],
password=self.user['password'],
domain_id=self.domain['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_name_scoped_token_with_user_id(self):
@@ -2293,7 +3005,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
domain_name=self.domain['name'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_name_scoped_token_with_user_domain_id(self):
@@ -2306,7 +3018,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_domain_id=self.domain['id'],
password=self.user['password'],
domain_name=self.domain['name'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_name_scoped_token_with_user_domain_name(self):
@@ -2319,12 +3031,11 @@ class TestAuth(test_v3.RestfulTestCase):
user_domain_name=self.domain['name'],
password=self.user['password'],
domain_name=self.domain['name'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_scope_token_with_group_role(self):
- group = self.new_group_ref(
- domain_id=self.domain_id)
+ group = unit.new_group_ref(domain_id=self.domain_id)
group = self.identity_api.create_group(group)
# add user to group
@@ -2340,7 +3051,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_scope_token_with_name(self):
@@ -2353,7 +3064,7 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
domain_name=self.domain['name'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidDomainScopedTokenResponse(r)
def test_domain_scope_failed(self):
@@ -2361,21 +3072,21 @@ class TestAuth(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
domain_id=self.domain['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_auth_with_id(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
token = r.headers.get('X-Subject-Token')
# test token auth
auth_data = self.build_authentication_request(token=token)
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def get_v2_token(self, tenant_id=None):
@@ -2393,7 +3104,7 @@ class TestAuth(test_v3.RestfulTestCase):
def test_validate_v2_unscoped_token_with_v3_api(self):
v2_token = self.get_v2_token().result['access']['token']['id']
auth_data = self.build_authentication_request(token=v2_token)
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def test_validate_v2_scoped_token_with_v3_api(self):
@@ -2404,46 +3115,46 @@ class TestAuth(test_v3.RestfulTestCase):
auth_data = self.build_authentication_request(
token=v2_token,
project_id=self.default_domain_project['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidScopedTokenResponse(r)
def test_invalid_user_id(self):
auth_data = self.build_authentication_request(
user_id=uuid.uuid4().hex,
password=self.user['password'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_invalid_user_name(self):
auth_data = self.build_authentication_request(
username=uuid.uuid4().hex,
user_domain_id=self.domain['id'],
password=self.user['password'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_invalid_domain_id(self):
auth_data = self.build_authentication_request(
username=self.user['name'],
user_domain_id=uuid.uuid4().hex,
password=self.user['password'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_invalid_domain_name(self):
auth_data = self.build_authentication_request(
username=self.user['name'],
user_domain_name=uuid.uuid4().hex,
password=self.user['password'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_invalid_password(self):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=uuid.uuid4().hex)
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_remote_user_no_realm(self):
api = auth.controllers.Auth()
@@ -2524,7 +3235,7 @@ class TestAuth(test_v3.RestfulTestCase):
remote_user = self.default_domain_user['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
'AUTH_TYPE': 'Negotiate'})
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidUnscopedTokenResponse(r)
self.assertNotIn('bind', token)
@@ -2551,7 +3262,7 @@ class TestAuth(test_v3.RestfulTestCase):
remote_user = self.default_domain_user['name']
self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
'AUTH_TYPE': 'Negotiate'})
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
# the unscoped token should have bind information in it
token = self.assertValidUnscopedTokenResponse(r)
@@ -2562,7 +3273,7 @@ class TestAuth(test_v3.RestfulTestCase):
# using unscoped token with remote user succeeds
auth_params = {'token': token, 'project_id': self.project_id}
auth_data = self.build_authentication_request(**auth_params)
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = self.assertValidProjectScopedTokenResponse(r)
# the bind information should be carried over from the original token
@@ -2601,16 +3312,16 @@ class TestAuth(test_v3.RestfulTestCase):
token_data['token']['bind'])
def test_authenticating_a_user_with_no_password(self):
- user = self.new_user_ref(domain_id=self.domain['id'])
- user.pop('password', None) # can't have a password for this test
+ user = unit.new_user_ref(domain_id=self.domain['id'])
+ del user['password'] # can't have a password for this test
user = self.identity_api.create_user(user)
auth_data = self.build_authentication_request(
user_id=user['id'],
password='password')
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_disabled_default_project_result_in_unscoped_token(self):
# create a disabled project to work with
@@ -2626,11 +3337,11 @@ class TestAuth(test_v3.RestfulTestCase):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def test_disabled_default_project_domain_result_in_unscoped_token(self):
- domain_ref = self.new_domain_ref()
+ domain_ref = unit.new_domain_ref()
r = self.post('/domains', body={'domain': domain_ref})
domain = self.assertValidDomainResponse(r, domain_ref)
@@ -2652,7 +3363,7 @@ class TestAuth(test_v3.RestfulTestCase):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def test_no_access_to_default_project_result_in_unscoped_token(self):
@@ -2664,32 +3375,35 @@ class TestAuth(test_v3.RestfulTestCase):
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
self.assertValidUnscopedTokenResponse(r)
def test_disabled_scope_project_domain_result_in_401(self):
# create a disabled domain
- domain = self.new_domain_ref()
- domain['enabled'] = False
- self.resource_api.create_domain(domain['id'], domain)
+ domain = unit.new_domain_ref()
+ domain = self.resource_api.create_domain(domain['id'], domain)
- # create a project in the disabled domain
- project = self.new_project_ref(domain_id=domain['id'])
+ # create a project in the domain
+ project = unit.new_project_ref(domain_id=domain['id'])
self.resource_api.create_project(project['id'], project)
- # assign some role to self.user for the project in the disabled domain
+ # assign some role to self.user for the project in the domain
self.assignment_api.add_role_to_user_and_project(
self.user['id'],
project['id'],
self.role_id)
+ # Disable the domain
+ domain['enabled'] = False
+ self.resource_api.update_domain(domain['id'], domain)
+
# user should not be able to auth with project_id
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=project['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
# user should not be able to auth with project_name & domain
auth_data = self.build_authentication_request(
@@ -2697,8 +3411,8 @@ class TestAuth(test_v3.RestfulTestCase):
password=self.user['password'],
project_name=project['name'],
project_domain_id=domain['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_auth_methods_with_different_identities_fails(self):
# get the token for a user. This is self.user which is different from
@@ -2710,8 +3424,124 @@ class TestAuth(test_v3.RestfulTestCase):
token=token,
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_authenticate_fails_if_project_unsafe(self):
+ """Verify authenticate to a project with unsafe name fails."""
+ # Start with url name restrictions off, so we can create the unsafe
+ # named project
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='off')
+ unsafe_name = 'i am not / safe'
+ project = unit.new_project_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID,
+ name=unsafe_name)
+ self.resource_api.create_project(project['id'], project)
+ role_member = unit.new_role_ref()
+ self.role_api.create_role(role_member['id'], role_member)
+ self.assignment_api.add_role_to_user_and_project(
+ self.user['id'], project['id'], role_member['id'])
+
+ auth_data = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_name=project['name'],
+ project_domain_id=test_v3.DEFAULT_DOMAIN_ID)
+
+ # Since name url restriction is off, we should be able to autenticate
+ self.v3_create_token(auth_data)
+
+ # Set the name url restriction to new, which should still allow us to
+ # authenticate
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='new')
+ self.v3_create_token(auth_data)
+
+ # Set the name url restriction to strict and we should fail to
+ # authenticate
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='strict')
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_authenticate_fails_if_domain_unsafe(self):
+ """Verify authenticate to a domain with unsafe name fails."""
+ # Start with url name restrictions off, so we can create the unsafe
+ # named domain
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe='off')
+ unsafe_name = 'i am not / safe'
+ domain = unit.new_domain_ref(name=unsafe_name)
+ self.resource_api.create_domain(domain['id'], domain)
+ role_member = unit.new_role_ref()
+ self.role_api.create_role(role_member['id'], role_member)
+ self.assignment_api.create_grant(
+ role_member['id'],
+ user_id=self.user['id'],
+ domain_id=domain['id'])
+
+ auth_data = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ domain_name=domain['name'])
+
+ # Since name url restriction is off, we should be able to autenticate
+ self.v3_create_token(auth_data)
+
+ # Set the name url restriction to new, which should still allow us to
+ # authenticate
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='new')
+ self.v3_create_token(auth_data)
+
+ # Set the name url restriction to strict and we should fail to
+ # authenticate
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe='strict')
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_authenticate_fails_to_project_if_domain_unsafe(self):
+ """Verify authenticate to a project using unsafe domain name fails."""
+ # Start with url name restrictions off, so we can create the unsafe
+ # named domain
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe='off')
+ unsafe_name = 'i am not / safe'
+ domain = unit.new_domain_ref(name=unsafe_name)
+ self.resource_api.create_domain(domain['id'], domain)
+ # Add a (safely named) project to that domain
+ project = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project['id'], project)
+ role_member = unit.new_role_ref()
+ self.role_api.create_role(role_member['id'], role_member)
+ self.assignment_api.create_grant(
+ role_member['id'],
+ user_id=self.user['id'],
+ project_id=project['id'])
+
+ # An auth request via project ID, but specifying domain by name
+ auth_data = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_name=project['name'],
+ project_domain_name=domain['name'])
+
+ # Since name url restriction is off, we should be able to autenticate
+ self.v3_create_token(auth_data)
+
+ # Set the name url restriction to new, which should still allow us to
+ # authenticate
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='new')
+ self.v3_create_token(auth_data)
+
+ # Set the name url restriction to strict and we should fail to
+ # authenticate
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe='strict')
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
class TestAuthJSONExternal(test_v3.RestfulTestCase):
@@ -2736,7 +3566,7 @@ class TestTrustOptional(test_v3.RestfulTestCase):
super(TestTrustOptional, self).config_overrides()
self.config_fixture.config(group='trust', enabled=False)
- def test_trusts_404(self):
+ def test_trusts_returns_not_found(self):
self.get('/OS-TRUST/trusts', body={'trust': {}},
expected_status=http_client.NOT_FOUND)
self.post('/OS-TRUST/trusts', body={'trust': {}},
@@ -2747,11 +3577,11 @@ class TestTrustOptional(test_v3.RestfulTestCase):
user_id=self.user['id'],
password=self.user['password'],
trust_id=uuid.uuid4().hex)
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.FORBIDDEN)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.FORBIDDEN)
-class TestTrustRedelegation(test_v3.RestfulTestCase):
+class TrustAPIBehavior(test_v3.RestfulTestCase):
"""Redelegation valid and secure
Redelegation is a hierarchical structure of trusts between initial trustor
@@ -2778,7 +3608,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
"""
def config_overrides(self):
- super(TestTrustRedelegation, self).config_overrides()
+ super(TrustAPIBehavior, self).config_overrides()
self.config_fixture.config(
group='trust',
enabled=True,
@@ -2787,14 +3617,13 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
)
def setUp(self):
- super(TestTrustRedelegation, self).setUp()
+ super(TrustAPIBehavior, self).setUp()
# Create a trustee to delegate stuff to
- trustee_user_ref = self.new_user_ref(domain_id=self.domain_id)
- self.trustee_user = self.identity_api.create_user(trustee_user_ref)
- self.trustee_user['password'] = trustee_user_ref['password']
+ self.trustee_user = unit.create_user(self.identity_api,
+ domain_id=self.domain_id)
# trustor->trustee
- self.redelegated_trust_ref = self.new_trust_ref(
+ self.redelegated_trust_ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
@@ -2804,7 +3633,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
allow_redelegation=True)
# trustor->trustee (no redelegation)
- self.chained_trust_ref = self.new_trust_ref(
+ self.chained_trust_ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
@@ -2865,7 +3694,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
# Attempt to create a redelegated trust supposed to last longer
# than the parent trust: let's give it 10 minutes (>1 minute).
- too_long_live_chained_trust_ref = self.new_trust_ref(
+ too_long_live_chained_trust_ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
@@ -2894,7 +3723,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
def test_roles_subset(self):
# Build second role
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign a new role to the user
self.assignment_api.create_grant(role_id=role['id'],
@@ -2903,6 +3732,8 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
# Create first trust with extended set of roles
ref = self.redelegated_trust_ref
+ ref['expires_at'] = datetime.datetime.utcnow().replace(
+ year=2032).strftime(unit.TIME_FORMAT)
ref['roles'].append({'id': role['id']})
r = self.post('/OS-TRUST/trusts',
body={'trust': ref})
@@ -2915,6 +3746,9 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
trust_token = self._get_trust_token(trust)
# Chain second trust with roles subset
+ self.chained_trust_ref['expires_at'] = (
+ datetime.datetime.utcnow().replace(year=2028).strftime(
+ unit.TIME_FORMAT))
r = self.post('/OS-TRUST/trusts',
body={'trust': self.chained_trust_ref},
token=trust_token)
@@ -2927,7 +3761,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
def test_redelegate_with_role_by_name(self):
# For role by name testing
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
@@ -2935,19 +3769,23 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
expires=dict(minutes=1),
role_names=[self.role['name']],
allow_redelegation=True)
+ ref['expires_at'] = datetime.datetime.utcnow().replace(
+ year=2032).strftime(unit.TIME_FORMAT)
r = self.post('/OS-TRUST/trusts',
body={'trust': ref})
trust = self.assertValidTrustResponse(r)
# Ensure we can get a token with this trust
trust_token = self._get_trust_token(trust)
# Chain second trust with roles subset
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=True,
role_names=[self.role['name']],
allow_redelegation=True)
+ ref['expires_at'] = datetime.datetime.utcnow().replace(
+ year=2028).strftime(unit.TIME_FORMAT)
r = self.post('/OS-TRUST/trusts',
body={'trust': ref},
token=trust_token)
@@ -2962,7 +3800,7 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
trust_token = self._get_trust_token(trust)
# Build second trust with a role not in parent's roles
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign a new role to the user
self.assignment_api.create_grant(role_id=role['id'],
@@ -2980,12 +3818,18 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
expected_status=http_client.FORBIDDEN)
def test_redelegation_terminator(self):
+ self.redelegated_trust_ref['expires_at'] = (
+ datetime.datetime.utcnow().replace(year=2032).strftime(
+ unit.TIME_FORMAT))
r = self.post('/OS-TRUST/trusts',
body={'trust': self.redelegated_trust_ref})
trust = self.assertValidTrustResponse(r)
trust_token = self._get_trust_token(trust)
# Build second trust - the terminator
+ self.chained_trust_ref['expires_at'] = (
+ datetime.datetime.utcnow().replace(year=2028).strftime(
+ unit.TIME_FORMAT))
ref = dict(self.chained_trust_ref,
redelegation_count=1,
allow_redelegation=False)
@@ -3007,215 +3851,64 @@ class TestTrustRedelegation(test_v3.RestfulTestCase):
token=trust_token,
expected_status=http_client.FORBIDDEN)
+ def test_redelegation_without_impersonation(self):
+ # Update trust to not allow impersonation
+ self.redelegated_trust_ref['impersonation'] = False
-class TestTrustChain(test_v3.RestfulTestCase):
-
- def config_overrides(self):
- super(TestTrustChain, self).config_overrides()
- self.config_fixture.config(
- group='trust',
- enabled=True,
- allow_redelegation=True,
- max_redelegation_count=10
- )
-
- def setUp(self):
- super(TestTrustChain, self).setUp()
- # Create trust chain
- self.user_chain = list()
- self.trust_chain = list()
- for _ in range(3):
- user_ref = self.new_user_ref(domain_id=self.domain_id)
- user = self.identity_api.create_user(user_ref)
- user['password'] = user_ref['password']
- self.user_chain.append(user)
-
- # trustor->trustee
- trustee = self.user_chain[0]
- trust_ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=trustee['id'],
- project_id=self.project_id,
- impersonation=True,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
- trust_ref.update(
- allow_redelegation=True,
- redelegation_count=3)
-
- r = self.post('/OS-TRUST/trusts',
- body={'trust': trust_ref})
+ # Create trust
+ resp = self.post('/OS-TRUST/trusts',
+ body={'trust': self.redelegated_trust_ref},
+ expected_status=http_client.CREATED)
+ trust = self.assertValidTrustResponse(resp)
- trust = self.assertValidTrustResponse(r)
+ # Get trusted token without impersonation
auth_data = self.build_authentication_request(
- user_id=trustee['id'],
- password=trustee['password'],
+ user_id=self.trustee_user['id'],
+ password=self.trustee_user['password'],
trust_id=trust['id'])
trust_token = self.get_requested_token(auth_data)
- self.trust_chain.append(trust)
-
- for trustee in self.user_chain[1:]:
- trust_ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=trustee['id'],
- project_id=self.project_id,
- impersonation=True,
- role_ids=[self.role_id])
- trust_ref.update(
- allow_redelegation=True)
- r = self.post('/OS-TRUST/trusts',
- body={'trust': trust_ref},
- token=trust_token)
- trust = self.assertValidTrustResponse(r)
- auth_data = self.build_authentication_request(
- user_id=trustee['id'],
- password=trustee['password'],
- trust_id=trust['id'])
- trust_token = self.get_requested_token(auth_data)
- self.trust_chain.append(trust)
-
- trustee = self.user_chain[-1]
- trust = self.trust_chain[-1]
- auth_data = self.build_authentication_request(
- user_id=trustee['id'],
- password=trustee['password'],
- trust_id=trust['id'])
-
- self.last_token = self.get_requested_token(auth_data)
-
- def assert_user_authenticate(self, user):
- auth_data = self.build_authentication_request(
- user_id=user['id'],
- password=user['password']
- )
- r = self.v3_authenticate_token(auth_data)
- self.assertValidTokenResponse(r)
-
- def assert_trust_tokens_revoked(self, trust_id):
- trustee = self.user_chain[0]
- auth_data = self.build_authentication_request(
- user_id=trustee['id'],
- password=trustee['password']
- )
- r = self.v3_authenticate_token(auth_data)
- self.assertValidTokenResponse(r)
-
- revocation_response = self.get('/OS-REVOKE/events')
- revocation_events = revocation_response.json_body['events']
- found = False
- for event in revocation_events:
- if event.get('OS-TRUST:trust_id') == trust_id:
- found = True
- self.assertTrue(found, 'event with trust_id %s not found in list' %
- trust_id)
-
- def test_delete_trust_cascade(self):
- self.assert_user_authenticate(self.user_chain[0])
- self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': self.trust_chain[0]['id']},
- expected_status=204)
- headers = {'X-Subject-Token': self.last_token}
- self.head('/auth/tokens', headers=headers,
- expected_status=http_client.NOT_FOUND)
- self.assert_trust_tokens_revoked(self.trust_chain[0]['id'])
-
- def test_delete_broken_chain(self):
- self.assert_user_authenticate(self.user_chain[0])
- self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': self.trust_chain[1]['id']},
- expected_status=204)
-
- self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': self.trust_chain[0]['id']},
- expected_status=204)
-
- def test_trustor_roles_revoked(self):
- self.assert_user_authenticate(self.user_chain[0])
-
- self.assignment_api.remove_role_from_user_and_project(
- self.user_id, self.project_id, self.role_id
- )
-
- auth_data = self.build_authentication_request(
- token=self.last_token,
- trust_id=self.trust_chain[-1]['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.NOT_FOUND)
-
- def test_intermediate_user_disabled(self):
- self.assert_user_authenticate(self.user_chain[0])
-
- disabled = self.user_chain[0]
- disabled['enabled'] = False
- self.identity_api.update_user(disabled['id'], disabled)
-
- # Bypass policy enforcement
- with mock.patch.object(rules, 'enforce', return_value=True):
- headers = {'X-Subject-Token': self.last_token}
- self.head('/auth/tokens', headers=headers,
- expected_status=http_client.FORBIDDEN)
-
- def test_intermediate_user_deleted(self):
- self.assert_user_authenticate(self.user_chain[0])
-
- self.identity_api.delete_user(self.user_chain[0]['id'])
-
- # Bypass policy enforcement
- with mock.patch.object(rules, 'enforce', return_value=True):
- headers = {'X-Subject-Token': self.last_token}
- self.head('/auth/tokens', headers=headers,
- expected_status=http_client.FORBIDDEN)
-
-
-class TestTrustAuth(test_v3.RestfulTestCase):
- EXTENSION_NAME = 'revoke'
- EXTENSION_TO_ADD = 'revoke_extension'
-
- def config_overrides(self):
- super(TestTrustAuth, self).config_overrides()
- self.config_fixture.config(group='revoke', driver='kvs')
- self.config_fixture.config(
- group='token',
- provider='pki',
- revoke_by_id=False)
- self.config_fixture.config(group='trust', enabled=True)
+ # Create second user for redelegation
+ trustee_user_2 = unit.create_user(self.identity_api,
+ domain_id=self.domain_id)
- def setUp(self):
- super(TestTrustAuth, self).setUp()
-
- # create a trustee to delegate stuff to
- self.trustee_user = self.new_user_ref(domain_id=self.domain_id)
- password = self.trustee_user['password']
- self.trustee_user = self.identity_api.create_user(self.trustee_user)
- self.trustee_user['password'] = password
- self.trustee_user_id = self.trustee_user['id']
+ # Trust for redelegation
+ trust_ref_2 = unit.new_trust_ref(
+ trustor_user_id=self.trustee_user['id'],
+ trustee_user_id=trustee_user_2['id'],
+ project_id=self.project_id,
+ impersonation=False,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id],
+ allow_redelegation=False)
- def test_create_trust_bad_request(self):
- # The server returns a 403 Forbidden rather than a 400, see bug 1133435
- self.post('/OS-TRUST/trusts', body={'trust': {}},
- expected_status=http_client.FORBIDDEN)
+ # Creating a second trust should not be allowed since trustor does not
+ # have the role to delegate thus returning 404 NOT FOUND.
+ resp = self.post('/OS-TRUST/trusts',
+ body={'trust': trust_ref_2},
+ token=trust_token,
+ expected_status=http_client.NOT_FOUND)
def test_create_unscoped_trust(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id)
+ trustee_user_id=self.trustee_user['id'])
r = self.post('/OS-TRUST/trusts', body={'trust': ref})
self.assertValidTrustResponse(r, ref)
def test_create_trust_no_roles(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id)
self.post('/OS-TRUST/trusts', body={'trust': ref},
expected_status=http_client.FORBIDDEN)
def _initialize_test_consume_trust(self, count):
# Make sure remaining_uses is decremented as we consume the trust
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
remaining_uses=count,
role_ids=[self.role_id])
@@ -3223,30 +3916,29 @@ class TestTrustAuth(test_v3.RestfulTestCase):
# make sure the trust exists
trust = self.assertValidTrustResponse(r, ref)
r = self.get(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- expected_status=200)
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
# get a token for the trustee
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = r.headers.get('X-Subject-Token')
# get a trust token, consume one use
auth_data = self.build_authentication_request(
token=token,
trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
return trust
def test_consume_trust_once(self):
trust = self._initialize_test_consume_trust(2)
# check decremented value
r = self.get(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- expected_status=200)
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
trust = r.result.get('trust')
self.assertIsNotNone(trust)
self.assertEqual(1, trust['remaining_uses'])
+ # FIXME(lbragstad): Assert the role that is returned is the right role.
def test_create_one_time_use_trust(self):
trust = self._initialize_test_consume_trust(1)
@@ -3259,61 +3951,15 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
-
- def test_create_trust_with_bad_values_for_remaining_uses(self):
- # negative values for the remaining_uses parameter are forbidden
- self._create_trust_with_bad_remaining_use(bad_value=-1)
- # 0 is a forbidden value as well
- self._create_trust_with_bad_remaining_use(bad_value=0)
- # as are non integer values
- self._create_trust_with_bad_remaining_use(bad_value="a bad value")
- self._create_trust_with_bad_remaining_use(bad_value=7.2)
-
- def _create_trust_with_bad_remaining_use(self, bad_value):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- remaining_uses=bad_value,
- role_ids=[self.role_id])
- self.post('/OS-TRUST/trusts',
- body={'trust': ref},
- expected_status=http_client.BAD_REQUEST)
-
- def test_invalid_trust_request_without_impersonation(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- role_ids=[self.role_id])
-
- del ref['impersonation']
-
- self.post('/OS-TRUST/trusts',
- body={'trust': ref},
- expected_status=http_client.BAD_REQUEST)
-
- def test_invalid_trust_request_without_trustee(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- role_ids=[self.role_id])
-
- del ref['trustee_user_id']
-
- self.post('/OS-TRUST/trusts',
- body={'trust': ref},
- expected_status=http_client.BAD_REQUEST)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_create_unlimited_use_trust(self):
# by default trusts are unlimited in terms of tokens that can be
# generated from them, this test creates such a trust explicitly
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
remaining_uses=None,
role_ids=[self.role_id])
@@ -3321,322 +3967,25 @@ class TestTrustAuth(test_v3.RestfulTestCase):
trust = self.assertValidTrustResponse(r, ref)
r = self.get(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- expected_status=200)
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
token = r.headers.get('X-Subject-Token')
auth_data = self.build_authentication_request(
token=token,
trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
r = self.get(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- expected_status=200)
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
trust = r.result.get('trust')
self.assertIsNone(trust['remaining_uses'])
- def test_trust_crud(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- role_ids=[self.role_id])
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- trust = self.assertValidTrustResponse(r, ref)
-
- r = self.get(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- expected_status=200)
- self.assertValidTrustResponse(r, ref)
-
- # validate roles on the trust
- r = self.get(
- '/OS-TRUST/trusts/%(trust_id)s/roles' % {
- 'trust_id': trust['id']},
- expected_status=200)
- roles = self.assertValidRoleListResponse(r, self.role)
- self.assertIn(self.role['id'], [x['id'] for x in roles])
- self.head(
- '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
- 'trust_id': trust['id'],
- 'role_id': self.role['id']},
- expected_status=200)
- r = self.get(
- '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
- 'trust_id': trust['id'],
- 'role_id': self.role['id']},
- expected_status=200)
- self.assertValidRoleResponse(r, self.role)
-
- r = self.get('/OS-TRUST/trusts', expected_status=200)
- self.assertValidTrustListResponse(r, trust)
-
- # trusts are immutable
- self.patch(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- body={'trust': ref},
- expected_status=http_client.NOT_FOUND)
-
- self.delete(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- expected_status=204)
-
- self.get(
- '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
- expected_status=http_client.NOT_FOUND)
-
- def test_create_trust_trustee_404(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=uuid.uuid4().hex,
- project_id=self.project_id,
- role_ids=[self.role_id])
- self.post('/OS-TRUST/trusts', body={'trust': ref},
- expected_status=http_client.NOT_FOUND)
-
- def test_create_trust_trustor_trustee_backwards(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.trustee_user_id,
- trustee_user_id=self.user_id,
- project_id=self.project_id,
- role_ids=[self.role_id])
- self.post('/OS-TRUST/trusts', body={'trust': ref},
- expected_status=http_client.FORBIDDEN)
-
- def test_create_trust_project_404(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=uuid.uuid4().hex,
- role_ids=[self.role_id])
- self.post('/OS-TRUST/trusts', body={'trust': ref},
- expected_status=http_client.NOT_FOUND)
-
- def test_create_trust_role_id_404(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- role_ids=[uuid.uuid4().hex])
- self.post('/OS-TRUST/trusts', body={'trust': ref},
- expected_status=http_client.NOT_FOUND)
-
- def test_create_trust_role_name_404(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- role_names=[uuid.uuid4().hex])
- self.post('/OS-TRUST/trusts', body={'trust': ref},
- expected_status=http_client.NOT_FOUND)
-
- def test_v3_v2_intermix_trustor_not_in_default_domain_failed(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.default_domain_user_id,
- project_id=self.project_id,
- impersonation=False,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
-
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- trust = self.assertValidTrustResponse(r)
-
- auth_data = self.build_authentication_request(
- user_id=self.default_domain_user['id'],
- password=self.default_domain_user['password'],
- trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(
- r, self.default_domain_user)
-
- token = r.headers.get('X-Subject-Token')
-
- # now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(
- path=path, token=CONF.admin_token,
- method='GET', expected_status=http_client.UNAUTHORIZED)
-
- def test_v3_v2_intermix_trustor_not_in_default_domaini_failed(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.default_domain_user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.default_domain_project_id,
- impersonation=False,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
-
- auth_data = self.build_authentication_request(
- user_id=self.default_domain_user['id'],
- password=self.default_domain_user['password'],
- project_id=self.default_domain_project_id)
- token = self.get_requested_token(auth_data)
-
- r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
- trust = self.assertValidTrustResponse(r)
-
- auth_data = self.build_authentication_request(
- user_id=self.trustee_user['id'],
- password=self.trustee_user['password'],
- trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(
- r, self.trustee_user)
- token = r.headers.get('X-Subject-Token')
-
- # now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(
- path=path, token=CONF.admin_token,
- method='GET', expected_status=http_client.UNAUTHORIZED)
-
- def test_v3_v2_intermix_project_not_in_default_domaini_failed(self):
- # create a trustee in default domain to delegate stuff to
- trustee_user = self.new_user_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID)
- password = trustee_user['password']
- trustee_user = self.identity_api.create_user(trustee_user)
- trustee_user['password'] = password
- trustee_user_id = trustee_user['id']
-
- ref = self.new_trust_ref(
- trustor_user_id=self.default_domain_user_id,
- trustee_user_id=trustee_user_id,
- project_id=self.project_id,
- impersonation=False,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
-
- auth_data = self.build_authentication_request(
- user_id=self.default_domain_user['id'],
- password=self.default_domain_user['password'],
- project_id=self.default_domain_project_id)
- token = self.get_requested_token(auth_data)
-
- r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
- trust = self.assertValidTrustResponse(r)
-
- auth_data = self.build_authentication_request(
- user_id=trustee_user['id'],
- password=trustee_user['password'],
- trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(
- r, trustee_user)
- token = r.headers.get('X-Subject-Token')
-
- # now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(
- path=path, token=CONF.admin_token,
- method='GET', expected_status=http_client.UNAUTHORIZED)
-
- def test_v3_v2_intermix(self):
- # create a trustee in default domain to delegate stuff to
- trustee_user = self.new_user_ref(domain_id=test_v3.DEFAULT_DOMAIN_ID)
- password = trustee_user['password']
- trustee_user = self.identity_api.create_user(trustee_user)
- trustee_user['password'] = password
- trustee_user_id = trustee_user['id']
-
- ref = self.new_trust_ref(
- trustor_user_id=self.default_domain_user_id,
- trustee_user_id=trustee_user_id,
- project_id=self.default_domain_project_id,
- impersonation=False,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
- auth_data = self.build_authentication_request(
- user_id=self.default_domain_user['id'],
- password=self.default_domain_user['password'],
- project_id=self.default_domain_project_id)
- token = self.get_requested_token(auth_data)
-
- r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
- trust = self.assertValidTrustResponse(r)
-
- auth_data = self.build_authentication_request(
- user_id=trustee_user['id'],
- password=trustee_user['password'],
- trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(
- r, trustee_user)
- token = r.headers.get('X-Subject-Token')
-
- # now validate the v3 token with v2 API
- path = '/v2.0/tokens/%s' % (token)
- self.admin_request(
- path=path, token=CONF.admin_token,
- method='GET', expected_status=200)
-
- def test_exercise_trust_scoped_token_without_impersonation(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- impersonation=False,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
-
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- trust = self.assertValidTrustResponse(r)
-
- auth_data = self.build_authentication_request(
- user_id=self.trustee_user['id'],
- password=self.trustee_user['password'],
- trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(r, self.trustee_user)
- self.assertEqual(self.trustee_user['id'],
- r.result['token']['user']['id'])
- self.assertEqual(self.trustee_user['name'],
- r.result['token']['user']['name'])
- self.assertEqual(self.domain['id'],
- r.result['token']['user']['domain']['id'])
- self.assertEqual(self.domain['name'],
- r.result['token']['user']['domain']['name'])
- self.assertEqual(self.project['id'],
- r.result['token']['project']['id'])
- self.assertEqual(self.project['name'],
- r.result['token']['project']['name'])
-
- def test_exercise_trust_scoped_token_with_impersonation(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- impersonation=True,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
-
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- trust = self.assertValidTrustResponse(r)
-
- auth_data = self.build_authentication_request(
- user_id=self.trustee_user['id'],
- password=self.trustee_user['password'],
- trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(r, self.user)
- self.assertEqual(self.user['id'], r.result['token']['user']['id'])
- self.assertEqual(self.user['name'], r.result['token']['user']['name'])
- self.assertEqual(self.domain['id'],
- r.result['token']['user']['domain']['id'])
- self.assertEqual(self.domain['name'],
- r.result['token']['user']['domain']['name'])
- self.assertEqual(self.project['id'],
- r.result['token']['project']['id'])
- self.assertEqual(self.project['name'],
- r.result['token']['project']['name'])
-
def test_impersonation_token_cannot_create_new_trust(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
@@ -3653,9 +4002,9 @@ class TestTrustAuth(test_v3.RestfulTestCase):
trust_token = self.get_requested_token(auth_data)
# Build second trust
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
@@ -3668,7 +4017,7 @@ class TestTrustAuth(test_v3.RestfulTestCase):
def test_trust_deleted_grant(self):
# create a new role
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
grant_url = (
@@ -3682,9 +4031,9 @@ class TestTrustAuth(test_v3.RestfulTestCase):
self.put(grant_url)
# create a trust that delegates the new role
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
@@ -3702,8 +4051,8 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data,
- expected_status=http_client.FORBIDDEN)
+ r = self.v3_create_token(auth_data,
+ expected_status=http_client.FORBIDDEN)
def test_trust_chained(self):
"""Test that a trust token can't be used to execute another trust.
@@ -3713,28 +4062,26 @@ class TestTrustAuth(test_v3.RestfulTestCase):
"""
# create a sub-trustee user
- sub_trustee_user = self.new_user_ref(
+ sub_trustee_user = unit.create_user(
+ self.identity_api,
domain_id=test_v3.DEFAULT_DOMAIN_ID)
- password = sub_trustee_user['password']
- sub_trustee_user = self.identity_api.create_user(sub_trustee_user)
- sub_trustee_user['password'] = password
sub_trustee_user_id = sub_trustee_user['id']
# create a new role
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign the new role to trustee
self.put(
'/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % {
'project_id': self.project_id,
- 'user_id': self.trustee_user_id,
+ 'user_id': self.trustee_user['id'],
'role_id': role['id']})
# create a trust from trustor -> trustee
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
@@ -3744,14 +4091,14 @@ class TestTrustAuth(test_v3.RestfulTestCase):
# authenticate as trustee so we can create a second trust
auth_data = self.build_authentication_request(
- user_id=self.trustee_user_id,
+ user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
project_id=self.project_id)
token = self.get_requested_token(auth_data)
# create a trust from trustee -> sub-trustee
- ref = self.new_trust_ref(
- trustor_user_id=self.trustee_user_id,
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.trustee_user['id'],
trustee_user_id=sub_trustee_user_id,
project_id=self.project_id,
impersonation=True,
@@ -3771,12 +4118,11 @@ class TestTrustAuth(test_v3.RestfulTestCase):
auth_data = self.build_authentication_request(
token=trust_token,
trust_id=trust1['id'])
- r = self.v3_authenticate_token(auth_data,
- expected_status=http_client.FORBIDDEN)
+ r = self.v3_create_token(auth_data,
+ expected_status=http_client.FORBIDDEN)
def assertTrustTokensRevoked(self, trust_id):
- revocation_response = self.get('/OS-REVOKE/events',
- expected_status=200)
+ revocation_response = self.get('/OS-REVOKE/events')
revocation_events = revocation_response.json_body['events']
found = False
for event in revocation_events:
@@ -3786,9 +4132,9 @@ class TestTrustAuth(test_v3.RestfulTestCase):
trust_id)
def test_delete_trust_revokes_tokens(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
@@ -3800,13 +4146,12 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust_id)
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(
+ r = self.v3_create_token(auth_data)
+ self.assertValidProjectScopedTokenResponse(
r, self.trustee_user)
trust_token = r.headers['X-Subject-Token']
self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': trust_id},
- expected_status=204)
+ 'trust_id': trust_id})
headers = {'X-Subject-Token': trust_token}
self.head('/auth/tokens', headers=headers,
expected_status=http_client.NOT_FOUND)
@@ -3817,9 +4162,9 @@ class TestTrustAuth(test_v3.RestfulTestCase):
self.identity_api.update_user(user['id'], user)
def test_trust_get_token_fails_if_trustor_disabled(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
@@ -3833,7 +4178,7 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- self.v3_authenticate_token(auth_data, expected_status=201)
+ self.v3_create_token(auth_data)
self.disable_user(self.user)
@@ -3841,13 +4186,13 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.FORBIDDEN)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.FORBIDDEN)
def test_trust_get_token_fails_if_trustee_disabled(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
@@ -3861,7 +4206,7 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- self.v3_authenticate_token(auth_data, expected_status=201)
+ self.v3_create_token(auth_data)
self.disable_user(self.trustee_user)
@@ -3869,13 +4214,13 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_delete_trust(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
@@ -3886,57 +4231,19 @@ class TestTrustAuth(test_v3.RestfulTestCase):
trust = self.assertValidTrustResponse(r, ref)
self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': trust['id']},
- expected_status=204)
-
- self.get('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': trust['id']},
- expected_status=http_client.NOT_FOUND)
-
- self.get('/OS-TRUST/trusts/%(trust_id)s' % {
- 'trust_id': trust['id']},
- expected_status=http_client.NOT_FOUND)
+ 'trust_id': trust['id']})
auth_data = self.build_authentication_request(
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.UNAUTHORIZED)
-
- def test_list_trusts(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- impersonation=False,
- expires=dict(minutes=1),
- role_ids=[self.role_id])
-
- for i in range(3):
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- self.assertValidTrustResponse(r, ref)
-
- r = self.get('/OS-TRUST/trusts', expected_status=200)
- trusts = r.result['trusts']
- self.assertEqual(3, len(trusts))
- self.assertValidTrustListResponse(r)
-
- r = self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
- self.user_id, expected_status=200)
- trusts = r.result['trusts']
- self.assertEqual(3, len(trusts))
- self.assertValidTrustListResponse(r)
-
- r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' %
- self.user_id, expected_status=200)
- trusts = r.result['trusts']
- self.assertEqual(0, len(trusts))
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
def test_change_password_invalidates_trust_tokens(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=True,
expires=dict(minutes=1),
@@ -3949,64 +4256,52 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
+ r = self.v3_create_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(r, self.user)
+ self.assertValidProjectScopedTokenResponse(r, self.user)
trust_token = r.headers.get('X-Subject-Token')
self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
- self.user_id, expected_status=200,
- token=trust_token)
+ self.user_id, token=trust_token)
self.assertValidUserResponse(
self.patch('/users/%s' % self.trustee_user['id'],
- body={'user': {'password': uuid.uuid4().hex}},
- expected_status=200))
+ body={'user': {'password': uuid.uuid4().hex}}))
self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
self.user_id, expected_status=http_client.UNAUTHORIZED,
token=trust_token)
def test_trustee_can_do_role_ops(self):
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
- project_id=self.project_id,
- impersonation=True,
- role_ids=[self.role_id])
-
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- trust = self.assertValidTrustResponse(r)
-
- auth_data = self.build_authentication_request(
- user_id=self.trustee_user['id'],
- password=self.trustee_user['password'])
+ resp = self.post('/OS-TRUST/trusts',
+ body={'trust': self.redelegated_trust_ref})
+ trust = self.assertValidTrustResponse(resp)
+ trust_token = self._get_trust_token(trust)
- r = self.get(
+ resp = self.get(
'/OS-TRUST/trusts/%(trust_id)s/roles' % {
'trust_id': trust['id']},
- auth=auth_data)
- self.assertValidRoleListResponse(r, self.role)
+ token=trust_token)
+ self.assertValidRoleListResponse(resp, self.role)
self.head(
'/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
'trust_id': trust['id'],
'role_id': self.role['id']},
- auth=auth_data,
- expected_status=200)
+ token=trust_token,
+ expected_status=http_client.OK)
- r = self.get(
+ resp = self.get(
'/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
'trust_id': trust['id'],
'role_id': self.role['id']},
- auth=auth_data,
- expected_status=200)
- self.assertValidRoleResponse(r, self.role)
+ token=trust_token)
+ self.assertValidRoleResponse(resp, self.role)
def test_do_not_consume_remaining_uses_when_get_token_fails(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
- trustee_user_id=self.trustee_user_id,
+ trustee_user_id=self.trustee_user['id'],
project_id=self.project_id,
impersonation=False,
expires=dict(minutes=1),
@@ -4023,13 +4318,209 @@ class TestTrustAuth(test_v3.RestfulTestCase):
user_id=self.default_domain_user['id'],
password=self.default_domain_user['password'],
trust_id=trust_id)
- self.v3_authenticate_token(auth_data,
- expected_status=http_client.FORBIDDEN)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.FORBIDDEN)
r = self.get('/OS-TRUST/trusts/%s' % trust_id)
self.assertEqual(3, r.result.get('trust').get('remaining_uses'))
+class TestTrustChain(test_v3.RestfulTestCase):
+
+ def config_overrides(self):
+ super(TestTrustChain, self).config_overrides()
+ self.config_fixture.config(
+ group='trust',
+ enabled=True,
+ allow_redelegation=True,
+ max_redelegation_count=10
+ )
+
+ def setUp(self):
+ super(TestTrustChain, self).setUp()
+ """Create a trust chain using redelegation.
+
+ A trust chain is a series of trusts that are redelegated. For example,
+ self.user_list consists of userA, userB, and userC. The first trust in
+ the trust chain is going to be established between self.user and userA,
+ call it trustA. Then, userA is going to obtain a trust scoped token
+ using trustA, and with that token create a trust between userA and
+ userB called trustB. This pattern will continue with userB creating a
+ trust with userC.
+ So the trust chain should look something like:
+ trustA -> trustB -> trustC
+ Where:
+ self.user is trusting userA with trustA
+ userA is trusting userB with trustB
+ userB is trusting userC with trustC
+
+ """
+ self.user_list = list()
+ self.trust_chain = list()
+ for _ in range(3):
+ user = unit.create_user(self.identity_api,
+ domain_id=self.domain_id)
+ self.user_list.append(user)
+
+ # trustor->trustee redelegation with impersonation
+ trustee = self.user_list[0]
+ trust_ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=trustee['id'],
+ project_id=self.project_id,
+ impersonation=True,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id],
+ allow_redelegation=True,
+ redelegation_count=3)
+
+ # Create a trust between self.user and the first user in the list
+ r = self.post('/OS-TRUST/trusts',
+ body={'trust': trust_ref})
+
+ trust = self.assertValidTrustResponse(r)
+ auth_data = self.build_authentication_request(
+ user_id=trustee['id'],
+ password=trustee['password'],
+ trust_id=trust['id'])
+
+ # Generate a trusted token for the first user
+ trust_token = self.get_requested_token(auth_data)
+ self.trust_chain.append(trust)
+
+ # Loop through the user to create a chain of redelegated trust.
+ for next_trustee in self.user_list[1:]:
+ trust_ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=next_trustee['id'],
+ project_id=self.project_id,
+ impersonation=True,
+ role_ids=[self.role_id],
+ allow_redelegation=True)
+ r = self.post('/OS-TRUST/trusts',
+ body={'trust': trust_ref},
+ token=trust_token)
+ trust = self.assertValidTrustResponse(r)
+ auth_data = self.build_authentication_request(
+ user_id=next_trustee['id'],
+ password=next_trustee['password'],
+ trust_id=trust['id'])
+ trust_token = self.get_requested_token(auth_data)
+ self.trust_chain.append(trust)
+
+ trustee = self.user_list[-1]
+ trust = self.trust_chain[-1]
+ auth_data = self.build_authentication_request(
+ user_id=trustee['id'],
+ password=trustee['password'],
+ trust_id=trust['id'])
+
+ self.last_token = self.get_requested_token(auth_data)
+
+ def assert_user_authenticate(self, user):
+ auth_data = self.build_authentication_request(
+ user_id=user['id'],
+ password=user['password']
+ )
+ r = self.v3_create_token(auth_data)
+ self.assertValidTokenResponse(r)
+
+ def assert_trust_tokens_revoked(self, trust_id):
+ trustee = self.user_list[0]
+ auth_data = self.build_authentication_request(
+ user_id=trustee['id'],
+ password=trustee['password']
+ )
+ r = self.v3_create_token(auth_data)
+ self.assertValidTokenResponse(r)
+
+ revocation_response = self.get('/OS-REVOKE/events')
+ revocation_events = revocation_response.json_body['events']
+ found = False
+ for event in revocation_events:
+ if event.get('OS-TRUST:trust_id') == trust_id:
+ found = True
+ self.assertTrue(found, 'event with trust_id %s not found in list' %
+ trust_id)
+
+ def test_delete_trust_cascade(self):
+ self.assert_user_authenticate(self.user_list[0])
+ self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+ 'trust_id': self.trust_chain[0]['id']})
+
+ headers = {'X-Subject-Token': self.last_token}
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.NOT_FOUND)
+ self.assert_trust_tokens_revoked(self.trust_chain[0]['id'])
+
+ def test_delete_broken_chain(self):
+ self.assert_user_authenticate(self.user_list[0])
+ self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+ 'trust_id': self.trust_chain[0]['id']})
+
+ # Verify the two remaining trust have been deleted
+ for i in range(len(self.user_list) - 1):
+ auth_data = self.build_authentication_request(
+ user_id=self.user_list[i]['id'],
+ password=self.user_list[i]['password'])
+
+ auth_token = self.get_requested_token(auth_data)
+
+ # Assert chained trust have been deleted
+ self.get('/OS-TRUST/trusts/%(trust_id)s' % {
+ 'trust_id': self.trust_chain[i + 1]['id']},
+ token=auth_token,
+ expected_status=http_client.NOT_FOUND)
+
+ def test_trustor_roles_revoked(self):
+ self.assert_user_authenticate(self.user_list[0])
+
+ self.assignment_api.remove_role_from_user_and_project(
+ self.user_id, self.project_id, self.role_id
+ )
+
+ # Verify that users are not allowed to authenticate with trust
+ for i in range(len(self.user_list[1:])):
+ trustee = self.user_list[i]
+ auth_data = self.build_authentication_request(
+ user_id=trustee['id'],
+ password=trustee['password'])
+
+ # Attempt to authenticate with trust
+ token = self.get_requested_token(auth_data)
+ auth_data = self.build_authentication_request(
+ token=token,
+ trust_id=self.trust_chain[i - 1]['id'])
+
+ # Trustee has no delegated roles
+ self.v3_create_token(auth_data,
+ expected_status=http_client.FORBIDDEN)
+
+ def test_intermediate_user_disabled(self):
+ self.assert_user_authenticate(self.user_list[0])
+
+ disabled = self.user_list[0]
+ disabled['enabled'] = False
+ self.identity_api.update_user(disabled['id'], disabled)
+
+ # Bypass policy enforcement
+ with mock.patch.object(rules, 'enforce', return_value=True):
+ headers = {'X-Subject-Token': self.last_token}
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.FORBIDDEN)
+
+ def test_intermediate_user_deleted(self):
+ self.assert_user_authenticate(self.user_list[0])
+
+ self.identity_api.delete_user(self.user_list[0]['id'])
+
+ # Bypass policy enforcement
+ with mock.patch.object(rules, 'enforce', return_value=True):
+ headers = {'X-Subject-Token': self.last_token}
+ self.head('/auth/tokens', headers=headers,
+ expected_status=http_client.FORBIDDEN)
+
+
class TestAPIProtectionWithoutAuthContextMiddleware(test_v3.RestfulTestCase):
def test_api_protection_with_no_auth_context_in_env(self):
auth_data = self.build_authentication_request(
@@ -4045,7 +4536,7 @@ class TestAPIProtectionWithoutAuthContextMiddleware(test_v3.RestfulTestCase):
'query_string': {},
'environment': {}}
r = auth_controller.validate_token(context)
- self.assertEqual(200, r.status_code)
+ self.assertEqual(http_client.OK, r.status_code)
class TestAuthContext(unit.TestCase):
@@ -4105,9 +4596,7 @@ class TestAuthSpecificData(test_v3.RestfulTestCase):
def test_get_catalog_project_scoped_token(self):
"""Call ``GET /auth/catalog`` with a project-scoped token."""
- r = self.get(
- '/auth/catalog',
- expected_status=200)
+ r = self.get('/auth/catalog')
self.assertValidCatalogResponse(r)
def test_get_catalog_domain_scoped_token(self):
@@ -4141,7 +4630,7 @@ class TestAuthSpecificData(test_v3.RestfulTestCase):
expected_status=http_client.UNAUTHORIZED)
def test_get_projects_project_scoped_token(self):
- r = self.get('/auth/projects', expected_status=200)
+ r = self.get('/auth/projects')
self.assertThat(r.json['projects'], matchers.HasLength(1))
self.assertValidProjectListResponse(r)
@@ -4149,452 +4638,318 @@ class TestAuthSpecificData(test_v3.RestfulTestCase):
self.put(path='/domains/%s/users/%s/roles/%s' % (
self.domain['id'], self.user['id'], self.role['id']))
- r = self.get('/auth/domains', expected_status=200)
+ r = self.get('/auth/domains')
self.assertThat(r.json['domains'], matchers.HasLength(1))
self.assertValidDomainListResponse(r)
-class TestFernetTokenProvider(test_v3.RestfulTestCase):
- def setUp(self):
- super(TestFernetTokenProvider, self).setUp()
- self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
-
- def _make_auth_request(self, auth_data):
- resp = self.post('/auth/tokens', body=auth_data, expected_status=201)
- token = resp.headers.get('X-Subject-Token')
- self.assertLess(len(token), 255)
- return token
-
- def _get_unscoped_token(self):
- auth_data = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'])
- return self._make_auth_request(auth_data)
-
- def _get_project_scoped_token(self):
- auth_data = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'],
- project_id=self.project_id)
- return self._make_auth_request(auth_data)
+class TestTrustAuthPKITokenProvider(TrustAPIBehavior, TestTrustChain):
+ def config_overrides(self):
+ super(TestTrustAuthPKITokenProvider, self).config_overrides()
+ self.config_fixture.config(group='token',
+ provider='pki',
+ revoke_by_id=False)
+ self.config_fixture.config(group='trust',
+ enabled=True)
- def _get_domain_scoped_token(self):
- auth_data = self.build_authentication_request(
- user_id=self.user['id'],
- password=self.user['password'],
- domain_id=self.domain_id)
- return self._make_auth_request(auth_data)
- def _get_trust_scoped_token(self, trustee_user, trust):
- auth_data = self.build_authentication_request(
- user_id=trustee_user['id'],
- password=trustee_user['password'],
- trust_id=trust['id'])
- return self._make_auth_request(auth_data)
-
- def _validate_token(self, token, expected_status=200):
- return self.get(
- '/auth/tokens',
- headers={'X-Subject-Token': token},
- expected_status=expected_status)
+class TestTrustAuthPKIZTokenProvider(TrustAPIBehavior, TestTrustChain):
+ def config_overrides(self):
+ super(TestTrustAuthPKIZTokenProvider, self).config_overrides()
+ self.config_fixture.config(group='token',
+ provider='pkiz',
+ revoke_by_id=False)
+ self.config_fixture.config(group='trust',
+ enabled=True)
- def _revoke_token(self, token, expected_status=204):
- return self.delete(
- '/auth/tokens',
- headers={'X-Subject-Token': token},
- expected_status=expected_status)
- def _set_user_enabled(self, user, enabled=True):
- user['enabled'] = enabled
- self.identity_api.update_user(user['id'], user)
+class TestTrustAuthFernetTokenProvider(TrustAPIBehavior, TestTrustChain):
+ def config_overrides(self):
+ super(TestTrustAuthFernetTokenProvider, self).config_overrides()
+ self.config_fixture.config(group='token',
+ provider='fernet',
+ revoke_by_id=False)
+ self.config_fixture.config(group='trust',
+ enabled=True)
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
- def _create_trust(self):
- # Create a trustee user
- trustee_user_ref = self.new_user_ref(domain_id=self.domain_id)
- trustee_user = self.identity_api.create_user(trustee_user_ref)
- trustee_user['password'] = trustee_user_ref['password']
- ref = self.new_trust_ref(
- trustor_user_id=self.user_id,
- trustee_user_id=trustee_user['id'],
- project_id=self.project_id,
- impersonation=False,
- role_ids=[self.role_id])
- # Create a trust
- r = self.post('/OS-TRUST/trusts', body={'trust': ref})
- trust = self.assertValidTrustResponse(r)
- return (trustee_user, trust)
+class TestAuthFernetTokenProvider(TestAuth):
+ def setUp(self):
+ super(TestAuthFernetTokenProvider, self).setUp()
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
def config_overrides(self):
- super(TestFernetTokenProvider, self).config_overrides()
+ super(TestAuthFernetTokenProvider, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
- def test_validate_unscoped_token(self):
- unscoped_token = self._get_unscoped_token()
- self._validate_token(unscoped_token)
+ def test_verify_with_bound_token(self):
+ self.config_fixture.config(group='token', bind='kerberos')
+ auth_data = self.build_authentication_request(
+ project_id=self.project['id'])
+ remote_user = self.default_domain_user['name']
+ self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+ 'AUTH_TYPE': 'Negotiate'})
+ # Bind not current supported by Fernet, see bug 1433311.
+ self.v3_create_token(auth_data,
+ expected_status=http_client.NOT_IMPLEMENTED)
- def test_validate_tampered_unscoped_token_fails(self):
- unscoped_token = self._get_unscoped_token()
- tampered_token = (unscoped_token[:50] + uuid.uuid4().hex +
- unscoped_token[50 + 32:])
- self._validate_token(tampered_token,
- expected_status=http_client.NOT_FOUND)
+ def test_v2_v3_bind_token_intermix(self):
+ self.config_fixture.config(group='token', bind='kerberos')
- def test_revoke_unscoped_token(self):
- unscoped_token = self._get_unscoped_token()
- self._validate_token(unscoped_token)
- self._revoke_token(unscoped_token)
- self._validate_token(unscoped_token,
- expected_status=http_client.NOT_FOUND)
+ # we need our own user registered to the default domain because of
+ # the way external auth works.
+ remote_user = self.default_domain_user['name']
+ self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+ 'AUTH_TYPE': 'Negotiate'})
+ body = {'auth': {}}
+ # Bind not current supported by Fernet, see bug 1433311.
+ self.admin_request(path='/v2.0/tokens',
+ method='POST',
+ body=body,
+ expected_status=http_client.NOT_IMPLEMENTED)
- def test_unscoped_token_is_invalid_after_disabling_user(self):
- unscoped_token = self._get_unscoped_token()
- # Make sure the token is valid
- self._validate_token(unscoped_token)
- # Disable the user
- self._set_user_enabled(self.user, enabled=False)
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- unscoped_token)
+ def test_auth_with_bind_token(self):
+ self.config_fixture.config(group='token', bind=['kerberos'])
- def test_unscoped_token_is_invalid_after_enabling_disabled_user(self):
- unscoped_token = self._get_unscoped_token()
- # Make sure the token is valid
- self._validate_token(unscoped_token)
- # Disable the user
- self._set_user_enabled(self.user, enabled=False)
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- unscoped_token)
- # Enable the user
- self._set_user_enabled(self.user)
- # Ensure validating a token for a re-enabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- unscoped_token)
+ auth_data = self.build_authentication_request()
+ remote_user = self.default_domain_user['name']
+ self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
+ 'AUTH_TYPE': 'Negotiate'})
+ # Bind not current supported by Fernet, see bug 1433311.
+ self.v3_create_token(auth_data,
+ expected_status=http_client.NOT_IMPLEMENTED)
- def test_unscoped_token_is_invalid_after_disabling_user_domain(self):
- unscoped_token = self._get_unscoped_token()
- # Make sure the token is valid
- self._validate_token(unscoped_token)
- # Disable the user's domain
- self.domain['enabled'] = False
- self.resource_api.update_domain(self.domain['id'], self.domain)
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- unscoped_token)
- def test_unscoped_token_is_invalid_after_changing_user_password(self):
- unscoped_token = self._get_unscoped_token()
- # Make sure the token is valid
- self._validate_token(unscoped_token)
- # Change user's password
- self.user['password'] = 'Password1'
- self.identity_api.update_user(self.user['id'], self.user)
- # Ensure updating user's password revokes existing user's tokens
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- unscoped_token)
+class TestAuthTOTP(test_v3.RestfulTestCase):
- def test_validate_project_scoped_token(self):
- project_scoped_token = self._get_project_scoped_token()
- self._validate_token(project_scoped_token)
+ def setUp(self):
+ super(TestAuthTOTP, self).setUp()
- def test_validate_domain_scoped_token(self):
- # Grant user access to domain
- self.assignment_api.create_grant(self.role['id'],
- user_id=self.user['id'],
- domain_id=self.domain['id'])
- domain_scoped_token = self._get_domain_scoped_token()
- resp = self._validate_token(domain_scoped_token)
- resp_json = json.loads(resp.body)
- self.assertIsNotNone(resp_json['token']['catalog'])
- self.assertIsNotNone(resp_json['token']['roles'])
- self.assertIsNotNone(resp_json['token']['domain'])
+ ref = unit.new_totp_credential(
+ user_id=self.default_domain_user['id'],
+ project_id=self.default_domain_project['id'])
- def test_validate_tampered_project_scoped_token_fails(self):
- project_scoped_token = self._get_project_scoped_token()
- tampered_token = (project_scoped_token[:50] + uuid.uuid4().hex +
- project_scoped_token[50 + 32:])
- self._validate_token(tampered_token,
- expected_status=http_client.NOT_FOUND)
+ self.secret = ref['blob']
- def test_revoke_project_scoped_token(self):
- project_scoped_token = self._get_project_scoped_token()
- self._validate_token(project_scoped_token)
- self._revoke_token(project_scoped_token)
- self._validate_token(project_scoped_token,
- expected_status=http_client.NOT_FOUND)
+ r = self.post('/credentials', body={'credential': ref})
+ self.assertValidCredentialResponse(r, ref)
- def test_project_scoped_token_is_invalid_after_disabling_user(self):
- project_scoped_token = self._get_project_scoped_token()
- # Make sure the token is valid
- self._validate_token(project_scoped_token)
- # Disable the user
- self._set_user_enabled(self.user, enabled=False)
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- project_scoped_token)
+ self.addCleanup(self.cleanup)
- def test_domain_scoped_token_is_invalid_after_disabling_user(self):
- # Grant user access to domain
- self.assignment_api.create_grant(self.role['id'],
- user_id=self.user['id'],
- domain_id=self.domain['id'])
- domain_scoped_token = self._get_domain_scoped_token()
- # Make sure the token is valid
- self._validate_token(domain_scoped_token)
- # Disable user
- self._set_user_enabled(self.user, enabled=False)
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- domain_scoped_token)
+ def auth_plugin_config_override(self):
+ methods = ['totp', 'token', 'password']
+ super(TestAuthTOTP, self).auth_plugin_config_override(methods)
- def test_domain_scoped_token_is_invalid_after_deleting_grant(self):
- # Grant user access to domain
- self.assignment_api.create_grant(self.role['id'],
- user_id=self.user['id'],
- domain_id=self.domain['id'])
- domain_scoped_token = self._get_domain_scoped_token()
- # Make sure the token is valid
- self._validate_token(domain_scoped_token)
- # Delete access to domain
- self.assignment_api.delete_grant(self.role['id'],
- user_id=self.user['id'],
- domain_id=self.domain['id'])
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- domain_scoped_token)
+ def _make_credentials(self, cred_type, count=1, user_id=None,
+ project_id=None, blob=None):
+ user_id = user_id or self.default_domain_user['id']
+ project_id = project_id or self.default_domain_project['id']
- def test_project_scoped_token_invalid_after_changing_user_password(self):
- project_scoped_token = self._get_project_scoped_token()
- # Make sure the token is valid
- self._validate_token(project_scoped_token)
- # Update user's password
- self.user['password'] = 'Password1'
- self.identity_api.update_user(self.user['id'], self.user)
- # Ensure updating user's password revokes existing tokens
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- project_scoped_token)
+ creds = []
+ for __ in range(count):
+ if cred_type == 'totp':
+ ref = unit.new_totp_credential(
+ user_id=user_id, project_id=project_id, blob=blob)
+ else:
+ ref = unit.new_credential_ref(
+ user_id=user_id, project_id=project_id)
+ resp = self.post('/credentials', body={'credential': ref})
+ creds.append(resp.json['credential'])
+ return creds
+
+ def _make_auth_data_by_id(self, passcode, user_id=None):
+ return self.build_authentication_request(
+ user_id=user_id or self.default_domain_user['id'],
+ passcode=passcode,
+ project_id=self.project['id'])
- def test_project_scoped_token_invalid_after_disabling_project(self):
- project_scoped_token = self._get_project_scoped_token()
- # Make sure the token is valid
- self._validate_token(project_scoped_token)
- # Disable project
- self.project['enabled'] = False
- self.resource_api.update_project(self.project['id'], self.project)
- # Ensure validating a token for a disabled project fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- project_scoped_token)
+ def _make_auth_data_by_name(self, passcode, username, user_domain_id):
+ return self.build_authentication_request(
+ username=username,
+ user_domain_id=user_domain_id,
+ passcode=passcode,
+ project_id=self.project['id'])
- def test_domain_scoped_token_invalid_after_disabling_domain(self):
- # Grant user access to domain
+ def cleanup(self):
+ totp_creds = self.credential_api.list_credentials_for_user(
+ self.default_domain_user['id'], type='totp')
+
+ other_creds = self.credential_api.list_credentials_for_user(
+ self.default_domain_user['id'], type='other')
+
+ for cred in itertools.chain(other_creds, totp_creds):
+ self.delete('/credentials/%s' % cred['id'],
+ expected_status=http_client.NO_CONTENT)
+
+ def test_with_a_valid_passcode(self):
+ creds = self._make_credentials('totp')
+ secret = creds[-1]['blob']
+ auth_data = self._make_auth_data_by_id(
+ totp._generate_totp_passcode(secret))
+
+ self.v3_create_token(auth_data, expected_status=http_client.CREATED)
+
+ def test_with_an_invalid_passcode_and_user_credentials(self):
+ self._make_credentials('totp')
+ auth_data = self._make_auth_data_by_id('000000')
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_with_an_invalid_passcode_with_no_user_credentials(self):
+ auth_data = self._make_auth_data_by_id('000000')
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_with_a_corrupt_totp_credential(self):
+ self._make_credentials('totp', count=1, blob='0')
+ auth_data = self._make_auth_data_by_id('000000')
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_with_multiple_credentials(self):
+ self._make_credentials('other', 3)
+ creds = self._make_credentials('totp', count=3)
+ secret = creds[-1]['blob']
+
+ auth_data = self._make_auth_data_by_id(
+ totp._generate_totp_passcode(secret))
+ self.v3_create_token(auth_data, expected_status=http_client.CREATED)
+
+ def test_with_multiple_users(self):
+ # make some credentials for the existing user
+ self._make_credentials('totp', count=3)
+
+ # create a new user and their credentials
+ user = unit.create_user(self.identity_api, domain_id=self.domain_id)
self.assignment_api.create_grant(self.role['id'],
- user_id=self.user['id'],
- domain_id=self.domain['id'])
- domain_scoped_token = self._get_domain_scoped_token()
- # Make sure the token is valid
- self._validate_token(domain_scoped_token)
- # Disable domain
- self.domain['enabled'] = False
- self.resource_api.update_domain(self.domain['id'], self.domain)
- # Ensure validating a token for a disabled domain fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- domain_scoped_token)
-
- def test_rescope_unscoped_token_with_trust(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- self.assertLess(len(trust_scoped_token), 255)
-
- def test_validate_a_trust_scoped_token(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Validate a trust scoped token
- self._validate_token(trust_scoped_token)
-
- def test_validate_tampered_trust_scoped_token_fails(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Get a trust scoped token
- tampered_token = (trust_scoped_token[:50] + uuid.uuid4().hex +
- trust_scoped_token[50 + 32:])
- self._validate_token(tampered_token,
- expected_status=http_client.NOT_FOUND)
+ user_id=user['id'],
+ project_id=self.project['id'])
+ creds = self._make_credentials('totp', count=1, user_id=user['id'])
+ secret = creds[-1]['blob']
- def test_revoke_trust_scoped_token(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Validate a trust scoped token
- self._validate_token(trust_scoped_token)
- self._revoke_token(trust_scoped_token)
- self._validate_token(trust_scoped_token,
- expected_status=http_client.NOT_FOUND)
-
- def test_trust_scoped_token_is_invalid_after_disabling_trustee(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Validate a trust scoped token
- self._validate_token(trust_scoped_token)
-
- # Disable trustee
- trustee_update_ref = dict(enabled=False)
- self.identity_api.update_user(trustee_user['id'], trustee_update_ref)
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- trust_scoped_token)
-
- def test_trust_scoped_token_invalid_after_changing_trustee_password(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Validate a trust scoped token
- self._validate_token(trust_scoped_token)
- # Change trustee's password
- trustee_update_ref = dict(password='Password1')
- self.identity_api.update_user(trustee_user['id'], trustee_update_ref)
- # Ensure updating trustee's password revokes existing tokens
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- trust_scoped_token)
-
- def test_trust_scoped_token_is_invalid_after_disabling_trustor(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Validate a trust scoped token
- self._validate_token(trust_scoped_token)
-
- # Disable the trustor
- trustor_update_ref = dict(enabled=False)
- self.identity_api.update_user(self.user['id'], trustor_update_ref)
- # Ensure validating a token for a disabled user fails
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- trust_scoped_token)
+ # Stop the clock otherwise there is a chance of auth failure due to
+ # getting a different TOTP between the call here and the call in the
+ # auth plugin.
+ self.useFixture(fixture.TimeFixture())
- def test_trust_scoped_token_invalid_after_changing_trustor_password(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Validate a trust scoped token
- self._validate_token(trust_scoped_token)
+ auth_data = self._make_auth_data_by_id(
+ totp._generate_totp_passcode(secret), user_id=user['id'])
+ self.v3_create_token(auth_data, expected_status=http_client.CREATED)
- # Change trustor's password
- trustor_update_ref = dict(password='Password1')
- self.identity_api.update_user(self.user['id'], trustor_update_ref)
- # Ensure updating trustor's password revokes existing user's tokens
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- trust_scoped_token)
+ def test_with_multiple_users_and_invalid_credentials(self):
+ """Prevent logging in with someone else's credentials.
- def test_trust_scoped_token_invalid_after_disabled_trustor_domain(self):
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- # Validate a trust scoped token
- self._validate_token(trust_scoped_token)
+ It's very easy to forget to limit the credentials query by user.
+ Let's just test it for a sanity check.
+ """
+ # make some credentials for the existing user
+ self._make_credentials('totp', count=3)
- # Disable trustor's domain
- self.domain['enabled'] = False
- self.resource_api.update_domain(self.domain['id'], self.domain)
+ # create a new user and their credentials
+ new_user = unit.create_user(self.identity_api,
+ domain_id=self.domain_id)
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=new_user['id'],
+ project_id=self.project['id'])
+ user2_creds = self._make_credentials(
+ 'totp', count=1, user_id=new_user['id'])
+
+ user_id = self.default_domain_user['id'] # user1
+ secret = user2_creds[-1]['blob']
+
+ auth_data = self._make_auth_data_by_id(
+ totp._generate_totp_passcode(secret), user_id=user_id)
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_with_username_and_domain_id(self):
+ creds = self._make_credentials('totp')
+ secret = creds[-1]['blob']
+ auth_data = self._make_auth_data_by_name(
+ totp._generate_totp_passcode(secret),
+ username=self.default_domain_user['name'],
+ user_domain_id=self.default_domain_user['domain_id'])
- trustor_update_ref = dict(password='Password1')
- self.identity_api.update_user(self.user['id'], trustor_update_ref)
- # Ensure updating trustor's password revokes existing user's tokens
- self.assertRaises(exception.TokenNotFound,
- self.token_provider_api.validate_token,
- trust_scoped_token)
+ self.v3_create_token(auth_data, expected_status=http_client.CREATED)
- def test_v2_validate_unscoped_token_returns_unauthorized(self):
- """Test raised exception when validating unscoped token.
- Test that validating an unscoped token in v2.0 of a v3 user of a
- non-default domain returns unauthorized.
- """
- unscoped_token = self._get_unscoped_token()
- self.assertRaises(exception.Unauthorized,
- self.token_provider_api.validate_v2_token,
- unscoped_token)
+class TestFetchRevocationList(test_v3.RestfulTestCase):
+ """Test fetch token revocation list on the v3 Identity API."""
- def test_v2_validate_domain_scoped_token_returns_unauthorized(self):
- """Test raised exception when validating a domain scoped token.
+ def config_overrides(self):
+ super(TestFetchRevocationList, self).config_overrides()
+ self.config_fixture.config(group='token', revoke_by_id=True)
+
+ def test_ids_no_tokens(self):
+ # When there's no revoked tokens the response is an empty list, and
+ # the response is signed.
+ res = self.get('/auth/tokens/OS-PKI/revoked')
+ signed = res.json['signed']
+ clear = cms.cms_verify(signed, CONF.signing.certfile,
+ CONF.signing.ca_certs)
+ payload = json.loads(clear)
+ self.assertEqual({'revoked': []}, payload)
+
+ def test_ids_token(self):
+ # When there's a revoked token, it's in the response, and the response
+ # is signed.
+ token_res = self.v3_create_token(
+ self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project['id']))
- Test that validating an domain scoped token in v2.0
- returns unauthorized.
- """
+ token_id = token_res.headers.get('X-Subject-Token')
+ token_data = token_res.json['token']
- # Grant user access to domain
- self.assignment_api.create_grant(self.role['id'],
- user_id=self.user['id'],
- domain_id=self.domain['id'])
+ self.delete('/auth/tokens', headers={'X-Subject-Token': token_id})
- scoped_token = self._get_domain_scoped_token()
- self.assertRaises(exception.Unauthorized,
- self.token_provider_api.validate_v2_token,
- scoped_token)
+ res = self.get('/auth/tokens/OS-PKI/revoked')
+ signed = res.json['signed']
+ clear = cms.cms_verify(signed, CONF.signing.certfile,
+ CONF.signing.ca_certs)
+ payload = json.loads(clear)
- def test_v2_validate_trust_scoped_token(self):
- """Test raised exception when validating a trust scoped token.
+ def truncate(ts_str):
+ return ts_str[:19] + 'Z' # 2016-01-21T15:53:52 == 19 chars.
- Test that validating an trust scoped token in v2.0 returns
- unauthorized.
- """
+ exp_token_revoke_data = {
+ 'id': token_id,
+ 'audit_id': token_data['audit_ids'][0],
+ 'expires': truncate(token_data['expires_at']),
+ }
- trustee_user, trust = self._create_trust()
- trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust)
- self.assertRaises(exception.Unauthorized,
- self.token_provider_api.validate_v2_token,
- trust_scoped_token)
+ self.assertEqual({'revoked': [exp_token_revoke_data]}, payload)
+ def test_audit_id_only_no_tokens(self):
+ # When there's no revoked tokens and ?audit_id_only is used, the
+ # response is an empty list and is not signed.
+ res = self.get('/auth/tokens/OS-PKI/revoked?audit_id_only')
+ self.assertEqual({'revoked': []}, res.json)
-class TestAuthFernetTokenProvider(TestAuth):
- def setUp(self):
- super(TestAuthFernetTokenProvider, self).setUp()
- self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+ def test_audit_id_only_token(self):
+ # When there's a revoked token and ?audit_id_only is used, the
+ # response contains the audit_id of the token and is not signed.
+ token_res = self.v3_create_token(
+ self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ project_id=self.project['id']))
- def config_overrides(self):
- super(TestAuthFernetTokenProvider, self).config_overrides()
- self.config_fixture.config(group='token', provider='fernet')
+ token_id = token_res.headers.get('X-Subject-Token')
+ token_data = token_res.json['token']
- def test_verify_with_bound_token(self):
- self.config_fixture.config(group='token', bind='kerberos')
- auth_data = self.build_authentication_request(
- project_id=self.project['id'])
- remote_user = self.default_domain_user['name']
- self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
- 'AUTH_TYPE': 'Negotiate'})
- # Bind not current supported by Fernet, see bug 1433311.
- self.v3_authenticate_token(auth_data, expected_status=501)
+ self.delete('/auth/tokens', headers={'X-Subject-Token': token_id})
- def test_v2_v3_bind_token_intermix(self):
- self.config_fixture.config(group='token', bind='kerberos')
+ res = self.get('/auth/tokens/OS-PKI/revoked?audit_id_only')
- # we need our own user registered to the default domain because of
- # the way external auth works.
- remote_user = self.default_domain_user['name']
- self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
- 'AUTH_TYPE': 'Negotiate'})
- body = {'auth': {}}
- # Bind not current supported by Fernet, see bug 1433311.
- self.admin_request(path='/v2.0/tokens',
- method='POST',
- body=body,
- expected_status=501)
+ def truncate(ts_str):
+ return ts_str[:19] + 'Z' # 2016-01-21T15:53:52 == 19 chars.
- def test_auth_with_bind_token(self):
- self.config_fixture.config(group='token', bind=['kerberos'])
+ exp_token_revoke_data = {
+ 'audit_id': token_data['audit_ids'][0],
+ 'expires': truncate(token_data['expires_at']),
+ }
- auth_data = self.build_authentication_request()
- remote_user = self.default_domain_user['name']
- self.admin_app.extra_environ.update({'REMOTE_USER': remote_user,
- 'AUTH_TYPE': 'Negotiate'})
- # Bind not current supported by Fernet, see bug 1433311.
- self.v3_authenticate_token(auth_data, expected_status=501)
+ self.assertEqual({'revoked': [exp_token_revoke_data]}, res.json)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_catalog.py b/keystone-moon/keystone/tests/unit/test_v3_catalog.py
index c536169a..2eb9db14 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_catalog.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_catalog.py
@@ -31,12 +31,12 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_region_with_id(self):
"""Call ``PUT /regions/{region_id}`` w/o an ID in the request body."""
- ref = self.new_region_ref()
+ ref = unit.new_region_ref()
region_id = ref.pop('id')
r = self.put(
'/regions/%s' % region_id,
body={'region': ref},
- expected_status=201)
+ expected_status=http_client.CREATED)
self.assertValidRegionResponse(r, ref)
# Double-check that the region ID was kept as-is and not
# populated with a UUID, as is the case with POST /v3/regions
@@ -44,12 +44,12 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_region_with_matching_ids(self):
"""Call ``PUT /regions/{region_id}`` with an ID in the request body."""
- ref = self.new_region_ref()
+ ref = unit.new_region_ref()
region_id = ref['id']
r = self.put(
'/regions/%s' % region_id,
body={'region': ref},
- expected_status=201)
+ expected_status=http_client.CREATED)
self.assertValidRegionResponse(r, ref)
# Double-check that the region ID was kept as-is and not
# populated with a UUID, as is the case with POST /v3/regions
@@ -60,16 +60,16 @@ class CatalogTestCase(test_v3.RestfulTestCase):
ref = dict(description="my region")
self.put(
'/regions/myregion',
- body={'region': ref}, expected_status=201)
+ body={'region': ref}, expected_status=http_client.CREATED)
# Create region again with duplicate id
self.put(
'/regions/myregion',
- body={'region': ref}, expected_status=409)
+ body={'region': ref}, expected_status=http_client.CONFLICT)
def test_create_region(self):
"""Call ``POST /regions`` with an ID in the request body."""
# the ref will have an ID defined on it
- ref = self.new_region_ref()
+ ref = unit.new_region_ref()
r = self.post(
'/regions',
body={'region': ref})
@@ -83,39 +83,30 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_region_with_empty_id(self):
"""Call ``POST /regions`` with an empty ID in the request body."""
- ref = self.new_region_ref()
- ref['id'] = ''
+ ref = unit.new_region_ref(id='')
- r = self.post(
- '/regions',
- body={'region': ref}, expected_status=201)
+ r = self.post('/regions', body={'region': ref})
self.assertValidRegionResponse(r, ref)
self.assertNotEmpty(r.result['region'].get('id'))
def test_create_region_without_id(self):
"""Call ``POST /regions`` without an ID in the request body."""
- ref = self.new_region_ref()
+ ref = unit.new_region_ref()
# instead of defining the ID ourselves...
del ref['id']
# let the service define the ID
- r = self.post(
- '/regions',
- body={'region': ref},
- expected_status=201)
+ r = self.post('/regions', body={'region': ref})
self.assertValidRegionResponse(r, ref)
def test_create_region_without_description(self):
"""Call ``POST /regions`` without description in the request body."""
- ref = self.new_region_ref()
+ ref = unit.new_region_ref(description=None)
del ref['description']
- r = self.post(
- '/regions',
- body={'region': ref},
- expected_status=201)
+ r = self.post('/regions', body={'region': ref})
# Create the description in the reference to compare to since the
# response should now have a description, even though we didn't send
# it with the original reference.
@@ -123,51 +114,34 @@ class CatalogTestCase(test_v3.RestfulTestCase):
self.assertValidRegionResponse(r, ref)
def test_create_regions_with_same_description_string(self):
- """Call ``POST /regions`` with same description in the request bodies.
- """
+ """Call ``POST /regions`` with duplicate descriptions."""
# NOTE(lbragstad): Make sure we can create two regions that have the
# same description.
- ref1 = self.new_region_ref()
- ref2 = self.new_region_ref()
-
region_desc = 'Some Region Description'
- ref1['description'] = region_desc
- ref2['description'] = region_desc
+ ref1 = unit.new_region_ref(description=region_desc)
+ ref2 = unit.new_region_ref(description=region_desc)
- resp1 = self.post(
- '/regions',
- body={'region': ref1},
- expected_status=201)
+ resp1 = self.post('/regions', body={'region': ref1})
self.assertValidRegionResponse(resp1, ref1)
- resp2 = self.post(
- '/regions',
- body={'region': ref2},
- expected_status=201)
+ resp2 = self.post('/regions', body={'region': ref2})
self.assertValidRegionResponse(resp2, ref2)
def test_create_regions_without_descriptions(self):
- """Call ``POST /regions`` with no description in the request bodies.
- """
+ """Call ``POST /regions`` with no description."""
# NOTE(lbragstad): Make sure we can create two regions that have
# no description in the request body. The description should be
# populated by Catalog Manager.
- ref1 = self.new_region_ref()
- ref2 = self.new_region_ref()
+ ref1 = unit.new_region_ref()
+ ref2 = unit.new_region_ref()
del ref1['description']
ref2['description'] = None
- resp1 = self.post(
- '/regions',
- body={'region': ref1},
- expected_status=201)
+ resp1 = self.post('/regions', body={'region': ref1})
- resp2 = self.post(
- '/regions',
- body={'region': ref2},
- expected_status=201)
+ resp2 = self.post('/regions', body={'region': ref2})
# Create the descriptions in the references to compare to since the
# responses should now have descriptions, even though we didn't send
# a description with the original references.
@@ -179,7 +153,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_region_with_conflicting_ids(self):
"""Call ``PUT /regions/{region_id}`` with conflicting region IDs."""
# the region ref is created with an ID
- ref = self.new_region_ref()
+ ref = unit.new_region_ref()
# but instead of using that ID, make up a new, conflicting one
self.put(
@@ -193,8 +167,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
self.assertValidRegionListResponse(r, ref=self.region)
def _create_region_with_parent_id(self, parent_id=None):
- ref = self.new_region_ref()
- ref['parent_region_id'] = parent_id
+ ref = unit.new_region_ref(parent_region_id=parent_id)
return self.post(
'/regions',
body={'region': ref})
@@ -220,7 +193,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_update_region(self):
"""Call ``PATCH /regions/{region_id}``."""
- region = self.new_region_ref()
+ region = unit.new_region_ref()
del region['id']
r = self.patch('/regions/%(region_id)s' % {
'region_id': self.region_id},
@@ -229,18 +202,16 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_update_region_without_description_keeps_original(self):
"""Call ``PATCH /regions/{region_id}``."""
- region_ref = self.new_region_ref()
+ region_ref = unit.new_region_ref()
- resp = self.post('/regions', body={'region': region_ref},
- expected_status=201)
+ resp = self.post('/regions', body={'region': region_ref})
region_updates = {
# update with something that's not the description
'parent_region_id': self.region_id,
}
resp = self.patch('/regions/%s' % region_ref['id'],
- body={'region': region_updates},
- expected_status=200)
+ body={'region': region_updates})
# NOTE(dstanek): Keystone should keep the original description.
self.assertEqual(region_ref['description'],
@@ -248,9 +219,8 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_update_region_with_null_description(self):
"""Call ``PATCH /regions/{region_id}``."""
- region = self.new_region_ref()
+ region = unit.new_region_ref(description=None)
del region['id']
- region['description'] = None
r = self.patch('/regions/%(region_id)s' % {
'region_id': self.region_id},
body={'region': region})
@@ -262,8 +232,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_delete_region(self):
"""Call ``DELETE /regions/{region_id}``."""
-
- ref = self.new_region_ref()
+ ref = unit.new_region_ref()
r = self.post(
'/regions',
body={'region': ref})
@@ -276,7 +245,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_service(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
+ ref = unit.new_service_ref()
r = self.post(
'/services',
body={'service': ref})
@@ -284,7 +253,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_service_no_name(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
+ ref = unit.new_service_ref()
del ref['name']
r = self.post(
'/services',
@@ -294,7 +263,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_service_no_enabled(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
+ ref = unit.new_service_ref()
del ref['enabled']
r = self.post(
'/services',
@@ -305,8 +274,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_service_enabled_false(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
- ref['enabled'] = False
+ ref = unit.new_service_ref(enabled=False)
r = self.post(
'/services',
body={'service': ref})
@@ -315,8 +283,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_service_enabled_true(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
- ref['enabled'] = True
+ ref = unit.new_service_ref(enabled=True)
r = self.post(
'/services',
body={'service': ref})
@@ -325,22 +292,19 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_service_enabled_str_true(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
- ref['enabled'] = 'True'
+ ref = unit.new_service_ref(enabled='True')
self.post('/services', body={'service': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_service_enabled_str_false(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
- ref['enabled'] = 'False'
+ ref = unit.new_service_ref(enabled='False')
self.post('/services', body={'service': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_service_enabled_str_random(self):
"""Call ``POST /services``."""
- ref = self.new_service_ref()
- ref['enabled'] = 'puppies'
+ ref = unit.new_service_ref(enabled='puppies')
self.post('/services', body={'service': ref},
expected_status=http_client.BAD_REQUEST)
@@ -350,8 +314,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
self.assertValidServiceListResponse(r, ref=self.service)
def _create_random_service(self):
- ref = self.new_service_ref()
- ref['enabled'] = True
+ ref = unit.new_service_ref()
response = self.post(
'/services',
body={'service': ref})
@@ -399,7 +362,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_update_service(self):
"""Call ``PATCH /services/{service_id}``."""
- service = self.new_service_ref()
+ service = unit.new_service_ref()
del service['id']
r = self.patch('/services/%(service_id)s' % {
'service_id': self.service_id},
@@ -423,7 +386,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
region = self._create_region_with_parent_id(
parent_id=parent_region_id)
service = self._create_random_service()
- ref = self.new_endpoint_ref(
+ ref = unit.new_endpoint_ref(
service_id=service['id'],
interface=interface,
region_id=region.result['region']['id'])
@@ -547,87 +510,84 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_create_endpoint_no_enabled(self):
"""Call ``POST /endpoints``."""
- ref = self.new_endpoint_ref(service_id=self.service_id)
- r = self.post(
- '/endpoints',
- body={'endpoint': ref})
+ ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
+ r = self.post('/endpoints', body={'endpoint': ref})
ref['enabled'] = True
self.assertValidEndpointResponse(r, ref)
def test_create_endpoint_enabled_true(self):
"""Call ``POST /endpoints`` with enabled: true."""
- ref = self.new_endpoint_ref(service_id=self.service_id,
+ ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id,
enabled=True)
- r = self.post(
- '/endpoints',
- body={'endpoint': ref})
+ r = self.post('/endpoints', body={'endpoint': ref})
self.assertValidEndpointResponse(r, ref)
def test_create_endpoint_enabled_false(self):
"""Call ``POST /endpoints`` with enabled: false."""
- ref = self.new_endpoint_ref(service_id=self.service_id,
+ ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id,
enabled=False)
- r = self.post(
- '/endpoints',
- body={'endpoint': ref})
+ r = self.post('/endpoints', body={'endpoint': ref})
self.assertValidEndpointResponse(r, ref)
def test_create_endpoint_enabled_str_true(self):
"""Call ``POST /endpoints`` with enabled: 'True'."""
- ref = self.new_endpoint_ref(service_id=self.service_id,
+ ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id,
enabled='True')
- self.post(
- '/endpoints',
- body={'endpoint': ref},
- expected_status=http_client.BAD_REQUEST)
+ self.post('/endpoints', body={'endpoint': ref},
+ expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_enabled_str_false(self):
"""Call ``POST /endpoints`` with enabled: 'False'."""
- ref = self.new_endpoint_ref(service_id=self.service_id,
+ ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id,
enabled='False')
- self.post(
- '/endpoints',
- body={'endpoint': ref},
- expected_status=http_client.BAD_REQUEST)
+ self.post('/endpoints', body={'endpoint': ref},
+ expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_enabled_str_random(self):
"""Call ``POST /endpoints`` with enabled: 'puppies'."""
- ref = self.new_endpoint_ref(service_id=self.service_id,
+ ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id,
enabled='puppies')
- self.post(
- '/endpoints',
- body={'endpoint': ref},
- expected_status=http_client.BAD_REQUEST)
+ self.post('/endpoints', body={'endpoint': ref},
+ expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_with_invalid_region_id(self):
"""Call ``POST /endpoints``."""
- ref = self.new_endpoint_ref(service_id=self.service_id)
- ref["region_id"] = uuid.uuid4().hex
+ ref = unit.new_endpoint_ref(service_id=self.service_id)
self.post('/endpoints', body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_with_region(self):
- """EndpointV3 creates the region before creating the endpoint, if
- endpoint is provided with 'region' and no 'region_id'
+ """EndpointV3 creates the region before creating the endpoint.
+
+ This occurs when endpoint is provided with 'region' and no 'region_id'.
"""
- ref = self.new_endpoint_ref(service_id=self.service_id)
- ref["region"] = uuid.uuid4().hex
- ref.pop('region_id')
- self.post('/endpoints', body={'endpoint': ref}, expected_status=201)
+ ref = unit.new_endpoint_ref_with_region(service_id=self.service_id,
+ region=uuid.uuid4().hex)
+ self.post('/endpoints', body={'endpoint': ref})
# Make sure the region is created
- self.get('/regions/%(region_id)s' % {
- 'region_id': ref["region"]})
+ self.get('/regions/%(region_id)s' % {'region_id': ref["region"]})
def test_create_endpoint_with_no_region(self):
"""EndpointV3 allows to creates the endpoint without region."""
- ref = self.new_endpoint_ref(service_id=self.service_id)
- ref.pop('region_id')
- self.post('/endpoints', body={'endpoint': ref}, expected_status=201)
+ ref = unit.new_endpoint_ref(service_id=self.service_id, region_id=None)
+ del ref['region_id'] # cannot just be None, it needs to not exist
+ self.post('/endpoints', body={'endpoint': ref})
def test_create_endpoint_with_empty_url(self):
"""Call ``POST /endpoints``."""
- ref = self.new_endpoint_ref(service_id=self.service_id)
- ref["url"] = ''
+ ref = unit.new_endpoint_ref(service_id=self.service_id, url='')
self.post('/endpoints', body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
@@ -640,7 +600,9 @@ class CatalogTestCase(test_v3.RestfulTestCase):
def test_update_endpoint(self):
"""Call ``PATCH /endpoints/{endpoint_id}``."""
- ref = self.new_endpoint_ref(service_id=self.service_id)
+ ref = unit.new_endpoint_ref(service_id=self.service_id,
+ interface='public',
+ region_id=self.region_id)
del ref['id']
r = self.patch(
'/endpoints/%(endpoint_id)s' % {
@@ -704,13 +666,12 @@ class CatalogTestCase(test_v3.RestfulTestCase):
'endpoint_id': self.endpoint_id})
# create a v3 endpoint ref, and then tweak it back to a v2-style ref
- ref = self.new_endpoint_ref(service_id=self.service['id'])
+ ref = unit.new_endpoint_ref_with_region(service_id=self.service['id'],
+ region=uuid.uuid4().hex,
+ internalurl=None)
del ref['id']
del ref['interface']
ref['publicurl'] = ref.pop('url')
- ref['internalurl'] = None
- ref['region'] = ref['region_id']
- del ref['region_id']
# don't set adminurl to ensure it's absence is handled like internalurl
# create the endpoint on v2 (using a v3 token)
@@ -751,15 +712,16 @@ class CatalogTestCase(test_v3.RestfulTestCase):
self.assertEqual(endpoint_v2['region'], endpoint_v3['region_id'])
def test_deleting_endpoint_with_space_in_url(self):
- # create a v3 endpoint ref
- ref = self.new_endpoint_ref(service_id=self.service['id'])
-
# add a space to all urls (intentional "i d" to test bug)
url_with_space = "http://127.0.0.1:8774 /v1.1/\$(tenant_i d)s"
- ref['publicurl'] = url_with_space
- ref['internalurl'] = url_with_space
- ref['adminurl'] = url_with_space
- ref['url'] = url_with_space
+
+ # create a v3 endpoint ref
+ ref = unit.new_endpoint_ref(service_id=self.service['id'],
+ region_id=None,
+ publicurl=url_with_space,
+ internalurl=url_with_space,
+ adminurl=url_with_space,
+ url=url_with_space)
# add the endpoint to the database
self.catalog_api.create_endpoint(ref['id'], ref)
@@ -767,7 +729,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
# delete the endpoint
self.delete('/endpoints/%s' % ref['id'])
- # make sure it's deleted (GET should return 404)
+ # make sure it's deleted (GET should return Not Found)
self.get('/endpoints/%s' % ref['id'],
expected_status=http_client.NOT_FOUND)
@@ -776,15 +738,24 @@ class CatalogTestCase(test_v3.RestfulTestCase):
# list one valid url is enough, no need to list too much
valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
- ref = self.new_endpoint_ref(self.service_id)
- ref['url'] = valid_url
- self.post('/endpoints',
- body={'endpoint': ref},
- expected_status=201)
+ ref = unit.new_endpoint_ref(self.service_id,
+ interface='public',
+ region_id=self.region_id,
+ url=valid_url)
+ self.post('/endpoints', body={'endpoint': ref})
+
+ def test_endpoint_create_with_valid_url_project_id(self):
+ """Create endpoint with valid url should be tested,too."""
+ valid_url = 'http://127.0.0.1:8774/v1.1/$(project_id)s'
+
+ ref = unit.new_endpoint_ref(self.service_id,
+ interface='public',
+ region_id=self.region_id,
+ url=valid_url)
+ self.post('/endpoints', body={'endpoint': ref})
def test_endpoint_create_with_invalid_url(self):
- """Test the invalid cases: substitutions is not exactly right.
- """
+ """Test the invalid cases: substitutions is not exactly right."""
invalid_urls = [
# using a substitution that is not whitelisted - KeyError
'http://127.0.0.1:8774/v1.1/$(nonexistent)s',
@@ -799,7 +770,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
'http://127.0.0.1:8774/v1.1/$(admin_url)d',
]
- ref = self.new_endpoint_ref(self.service_id)
+ ref = unit.new_endpoint_ref(self.service_id)
for invalid_url in invalid_urls:
ref['url'] = invalid_url
@@ -809,37 +780,30 @@ class CatalogTestCase(test_v3.RestfulTestCase):
class TestCatalogAPISQL(unit.TestCase):
- """Tests for the catalog Manager against the SQL backend.
-
- """
+ """Tests for the catalog Manager against the SQL backend."""
def setUp(self):
super(TestCatalogAPISQL, self).setUp()
self.useFixture(database.Database())
self.catalog_api = catalog.Manager()
- self.service_id = uuid.uuid4().hex
- service = {'id': self.service_id, 'name': uuid.uuid4().hex}
+ service = unit.new_service_ref()
+ self.service_id = service['id']
self.catalog_api.create_service(self.service_id, service)
- endpoint = self.new_endpoint_ref(service_id=self.service_id)
+ self.create_endpoint(service_id=self.service_id)
+
+ def create_endpoint(self, service_id, **kwargs):
+ endpoint = unit.new_endpoint_ref(service_id=service_id,
+ region_id=None, **kwargs)
+
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ return endpoint
def config_overrides(self):
super(TestCatalogAPISQL, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
- def new_endpoint_ref(self, service_id):
- return {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'interface': uuid.uuid4().hex[:8],
- 'service_id': service_id,
- 'url': uuid.uuid4().hex,
- 'region': uuid.uuid4().hex,
- }
-
def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
@@ -851,14 +815,12 @@ class TestCatalogAPISQL(unit.TestCase):
self.assertEqual(1, len(self.catalog_api.list_endpoints()))
# create a new, invalid endpoint - malformed type declaration
- ref = self.new_endpoint_ref(self.service_id)
- ref['url'] = 'http://keystone/%(tenant_id)'
- self.catalog_api.create_endpoint(ref['id'], ref)
+ self.create_endpoint(self.service_id,
+ url='http://keystone/%(tenant_id)')
# create a new, invalid endpoint - nonexistent key
- ref = self.new_endpoint_ref(self.service_id)
- ref['url'] = 'http://keystone/%(you_wont_find_me)s'
- self.catalog_api.create_endpoint(ref['id'], ref)
+ self.create_endpoint(self.service_id,
+ url='http://keystone/%(you_wont_find_me)s')
# verify that the invalid endpoints don't appear in the catalog
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
@@ -867,9 +829,8 @@ class TestCatalogAPISQL(unit.TestCase):
self.assertEqual(3, len(self.catalog_api.list_endpoints()))
# create another valid endpoint - tenant_id will be replaced
- ref = self.new_endpoint_ref(self.service_id)
- ref['url'] = 'http://keystone/%(tenant_id)s'
- self.catalog_api.create_endpoint(ref['id'], ref)
+ self.create_endpoint(self.service_id,
+ url='http://keystone/%(tenant_id)s')
# there are two valid endpoints, positive check
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
@@ -877,7 +838,8 @@ class TestCatalogAPISQL(unit.TestCase):
# If the URL has no 'tenant_id' to substitute, we will skip the
# endpoint which contains this kind of URL, negative check.
- catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id=None)
+ tenant_id = None
+ catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1))
def test_get_catalog_always_returns_service_name(self):
@@ -885,23 +847,15 @@ class TestCatalogAPISQL(unit.TestCase):
tenant_id = uuid.uuid4().hex
# create a service, with a name
- named_svc = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- }
+ named_svc = unit.new_service_ref()
self.catalog_api.create_service(named_svc['id'], named_svc)
- endpoint = self.new_endpoint_ref(service_id=named_svc['id'])
- self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ self.create_endpoint(service_id=named_svc['id'])
# create a service, with no name
- unnamed_svc = {
- 'id': uuid.uuid4().hex,
- 'type': uuid.uuid4().hex
- }
+ unnamed_svc = unit.new_service_ref(name=None)
+ del unnamed_svc['name']
self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc)
- endpoint = self.new_endpoint_ref(service_id=unnamed_svc['id'])
- self.catalog_api.create_endpoint(endpoint['id'], endpoint)
+ self.create_endpoint(service_id=unnamed_svc['id'])
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
@@ -917,9 +871,7 @@ class TestCatalogAPISQL(unit.TestCase):
# TODO(dstanek): this needs refactoring with the test above, but we are in a
# crunch so that will happen in a future patch.
class TestCatalogAPISQLRegions(unit.TestCase):
- """Tests for the catalog Manager against the SQL backend.
-
- """
+ """Tests for the catalog Manager against the SQL backend."""
def setUp(self):
super(TestCatalogAPISQLRegions, self).setUp()
@@ -930,23 +882,13 @@ class TestCatalogAPISQLRegions(unit.TestCase):
super(TestCatalogAPISQLRegions, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
- def new_endpoint_ref(self, service_id):
- return {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'interface': uuid.uuid4().hex[:8],
- 'service_id': service_id,
- 'url': uuid.uuid4().hex,
- 'region_id': uuid.uuid4().hex,
- }
-
def test_get_catalog_returns_proper_endpoints_with_no_region(self):
- service_id = uuid.uuid4().hex
- service = {'id': service_id, 'name': uuid.uuid4().hex}
+ service = unit.new_service_ref()
+ service_id = service['id']
self.catalog_api.create_service(service_id, service)
- endpoint = self.new_endpoint_ref(service_id=service_id)
+ endpoint = unit.new_endpoint_ref(service_id=service_id,
+ region_id=None)
del endpoint['region_id']
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
@@ -958,12 +900,13 @@ class TestCatalogAPISQLRegions(unit.TestCase):
catalog[0]['endpoints'][0], ref=endpoint)
def test_get_catalog_returns_proper_endpoints_with_region(self):
- service_id = uuid.uuid4().hex
- service = {'id': service_id, 'name': uuid.uuid4().hex}
+ service = unit.new_service_ref()
+ service_id = service['id']
self.catalog_api.create_service(service_id, service)
- endpoint = self.new_endpoint_ref(service_id=service_id)
- self.catalog_api.create_region({'id': endpoint['region_id']})
+ endpoint = unit.new_endpoint_ref(service_id=service_id)
+ region = unit.new_region_ref(id=endpoint['region_id'])
+ self.catalog_api.create_region(region)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
endpoint = self.catalog_api.get_endpoint(endpoint['id'])
diff --git a/keystone-moon/keystone/tests/unit/test_v3_credential.py b/keystone-moon/keystone/tests/unit/test_v3_credential.py
index dd8cf2dd..07995f19 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_credential.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_credential.py
@@ -21,49 +21,46 @@ from oslo_config import cfg
from six.moves import http_client
from testtools import matchers
+from keystone.common import utils
+from keystone.contrib.ec2 import controllers
from keystone import exception
+from keystone.tests import unit
from keystone.tests.unit import test_v3
CONF = cfg.CONF
+CRED_TYPE_EC2 = controllers.CRED_TYPE_EC2
class CredentialBaseTestCase(test_v3.RestfulTestCase):
def _create_dict_blob_credential(self):
- blob = {"access": uuid.uuid4().hex,
- "secret": uuid.uuid4().hex}
- credential_id = hashlib.sha256(blob['access']).hexdigest()
- credential = self.new_credential_ref(
- user_id=self.user['id'],
- project_id=self.project_id)
- credential['id'] = credential_id
+ blob, credential = unit.new_ec2_credential(user_id=self.user['id'],
+ project_id=self.project_id)
# Store the blob as a dict *not* JSON ref bug #1259584
# This means we can test the dict->json workaround, added
# as part of the bugfix for backwards compatibility works.
credential['blob'] = blob
- credential['type'] = 'ec2'
+ credential_id = credential['id']
+
# Create direct via the DB API to avoid validation failure
- self.credential_api.create_credential(
- credential_id,
- credential)
- expected_blob = json.dumps(blob)
- return expected_blob, credential_id
+ self.credential_api.create_credential(credential_id, credential)
+
+ return json.dumps(blob), credential_id
class CredentialTestCase(CredentialBaseTestCase):
"""Test credential CRUD."""
+
def setUp(self):
super(CredentialTestCase, self).setUp()
- self.credential_id = uuid.uuid4().hex
- self.credential = self.new_credential_ref(
- user_id=self.user['id'],
- project_id=self.project_id)
- self.credential['id'] = self.credential_id
+ self.credential = unit.new_credential_ref(user_id=self.user['id'],
+ project_id=self.project_id)
+
self.credential_api.create_credential(
- self.credential_id,
+ self.credential['id'],
self.credential)
def test_credential_api_delete_credentials_for_project(self):
@@ -72,7 +69,7 @@ class CredentialTestCase(CredentialBaseTestCase):
# once we delete all credentials for self.project_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
- credential_id=self.credential_id)
+ credential_id=self.credential['id'])
def test_credential_api_delete_credentials_for_user(self):
self.credential_api.delete_credentials_for_user(self.user_id)
@@ -80,7 +77,7 @@ class CredentialTestCase(CredentialBaseTestCase):
# once we delete all credentials for self.user_id
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
- credential_id=self.credential_id)
+ credential_id=self.credential['id'])
def test_list_credentials(self):
"""Call ``GET /credentials``."""
@@ -89,10 +86,8 @@ class CredentialTestCase(CredentialBaseTestCase):
def test_list_credentials_filtered_by_user_id(self):
"""Call ``GET /credentials?user_id={user_id}``."""
- credential = self.new_credential_ref(
- user_id=uuid.uuid4().hex)
- self.credential_api.create_credential(
- credential['id'], credential)
+ credential = unit.new_credential_ref(user_id=uuid.uuid4().hex)
+ self.credential_api.create_credential(credential['id'], credential)
r = self.get('/credentials?user_id=%s' % self.user['id'])
self.assertValidCredentialListResponse(r, ref=self.credential)
@@ -103,9 +98,9 @@ class CredentialTestCase(CredentialBaseTestCase):
"""Call ``GET /credentials?type={type}``."""
# The type ec2 was chosen, instead of a random string,
# because the type must be in the list of supported types
- ec2_credential = self.new_credential_ref(user_id=uuid.uuid4().hex,
+ ec2_credential = unit.new_credential_ref(user_id=uuid.uuid4().hex,
project_id=self.project_id,
- cred_type='ec2')
+ type=CRED_TYPE_EC2)
ec2_resp = self.credential_api.create_credential(
ec2_credential['id'], ec2_credential)
@@ -123,8 +118,8 @@ class CredentialTestCase(CredentialBaseTestCase):
cred_ec2 = r_ec2.result['credentials'][0]
self.assertValidCredentialListResponse(r_ec2, ref=ec2_resp)
- self.assertEqual('ec2', cred_ec2['type'])
- self.assertEqual(cred_ec2['id'], ec2_credential['id'])
+ self.assertEqual(CRED_TYPE_EC2, cred_ec2['type'])
+ self.assertEqual(ec2_credential['id'], cred_ec2['id'])
def test_list_credentials_filtered_by_type_and_user_id(self):
"""Call ``GET /credentials?user_id={user_id}&type={type}``."""
@@ -132,12 +127,10 @@ class CredentialTestCase(CredentialBaseTestCase):
user2_id = uuid.uuid4().hex
# Creating credentials for two different users
- credential_user1_ec2 = self.new_credential_ref(
- user_id=user1_id, cred_type='ec2')
- credential_user1_cert = self.new_credential_ref(
- user_id=user1_id)
- credential_user2_cert = self.new_credential_ref(
- user_id=user2_id)
+ credential_user1_ec2 = unit.new_credential_ref(user_id=user1_id,
+ type=CRED_TYPE_EC2)
+ credential_user1_cert = unit.new_credential_ref(user_id=user1_id)
+ credential_user2_cert = unit.new_credential_ref(user_id=user2_id)
self.credential_api.create_credential(
credential_user1_ec2['id'], credential_user1_ec2)
@@ -150,12 +143,12 @@ class CredentialTestCase(CredentialBaseTestCase):
self.assertValidCredentialListResponse(r, ref=credential_user1_ec2)
self.assertThat(r.result['credentials'], matchers.HasLength(1))
cred = r.result['credentials'][0]
- self.assertEqual('ec2', cred['type'])
+ self.assertEqual(CRED_TYPE_EC2, cred['type'])
self.assertEqual(user1_id, cred['user_id'])
def test_create_credential(self):
"""Call ``POST /credentials``."""
- ref = self.new_credential_ref(user_id=self.user['id'])
+ ref = unit.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref})
@@ -165,18 +158,17 @@ class CredentialTestCase(CredentialBaseTestCase):
"""Call ``GET /credentials/{credential_id}``."""
r = self.get(
'/credentials/%(credential_id)s' % {
- 'credential_id': self.credential_id})
+ 'credential_id': self.credential['id']})
self.assertValidCredentialResponse(r, self.credential)
def test_update_credential(self):
"""Call ``PATCH /credentials/{credential_id}``."""
- ref = self.new_credential_ref(
- user_id=self.user['id'],
- project_id=self.project_id)
+ ref = unit.new_credential_ref(user_id=self.user['id'],
+ project_id=self.project_id)
del ref['id']
r = self.patch(
'/credentials/%(credential_id)s' % {
- 'credential_id': self.credential_id},
+ 'credential_id': self.credential['id']},
body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
@@ -184,29 +176,24 @@ class CredentialTestCase(CredentialBaseTestCase):
"""Call ``DELETE /credentials/{credential_id}``."""
self.delete(
'/credentials/%(credential_id)s' % {
- 'credential_id': self.credential_id})
+ 'credential_id': self.credential['id']})
def test_create_ec2_credential(self):
"""Call ``POST /credentials`` for creating ec2 credential."""
- ref = self.new_credential_ref(user_id=self.user['id'],
- project_id=self.project_id)
- blob = {"access": uuid.uuid4().hex,
- "secret": uuid.uuid4().hex}
- ref['blob'] = json.dumps(blob)
- ref['type'] = 'ec2'
- r = self.post(
- '/credentials',
- body={'credential': ref})
+ blob, ref = unit.new_ec2_credential(user_id=self.user['id'],
+ project_id=self.project_id)
+ r = self.post('/credentials', body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id for
# ec2 credentials
- self.assertEqual(r.result['credential']['id'],
- hashlib.sha256(blob['access']).hexdigest())
+ access = blob['access'].encode('utf-8')
+ self.assertEqual(hashlib.sha256(access).hexdigest(),
+ r.result['credential']['id'])
# Create second ec2 credential with the same access key id and check
# for conflict.
self.post(
'/credentials',
- body={'credential': ref}, expected_status=409)
+ body={'credential': ref}, expected_status=http_client.CONFLICT)
def test_get_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
@@ -215,7 +202,11 @@ class CredentialTestCase(CredentialBaseTestCase):
r = self.get(
'/credentials/%(credential_id)s' % {
'credential_id': credential_id})
- self.assertEqual(expected_blob, r.result['credential']['blob'])
+
+ # use json.loads to transform the blobs back into Python dictionaries
+ # to avoid problems with the keys being in different orders.
+ self.assertEqual(json.loads(expected_blob),
+ json.loads(r.result['credential']['blob']))
def test_list_ec2_dict_blob(self):
"""Ensure non-JSON blob data is correctly converted."""
@@ -225,47 +216,49 @@ class CredentialTestCase(CredentialBaseTestCase):
list_creds = list_r.result['credentials']
list_ids = [r['id'] for r in list_creds]
self.assertIn(credential_id, list_ids)
+ # use json.loads to transform the blobs back into Python dictionaries
+ # to avoid problems with the keys being in different orders.
for r in list_creds:
if r['id'] == credential_id:
- self.assertEqual(expected_blob, r['blob'])
+ self.assertEqual(json.loads(expected_blob),
+ json.loads(r['blob']))
def test_create_non_ec2_credential(self):
- """Call ``POST /credentials`` for creating non-ec2 credential."""
- ref = self.new_credential_ref(user_id=self.user['id'])
- blob = {"access": uuid.uuid4().hex,
- "secret": uuid.uuid4().hex}
- ref['blob'] = json.dumps(blob)
- r = self.post(
- '/credentials',
- body={'credential': ref})
+ """Test creating non-ec2 credential.
+
+ Call ``POST /credentials``.
+ """
+ blob, ref = unit.new_cert_credential(user_id=self.user['id'])
+
+ r = self.post('/credentials', body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is not same as hash of access key id for
# non-ec2 credentials
- self.assertNotEqual(r.result['credential']['id'],
- hashlib.sha256(blob['access']).hexdigest())
+ access = blob['access'].encode('utf-8')
+ self.assertNotEqual(hashlib.sha256(access).hexdigest(),
+ r.result['credential']['id'])
def test_create_ec2_credential_with_missing_project_id(self):
- """Call ``POST /credentials`` for creating ec2
- credential with missing project_id.
+ """Test Creating ec2 credential with missing project_id.
+
+ Call ``POST /credentials``.
"""
- ref = self.new_credential_ref(user_id=self.user['id'])
- blob = {"access": uuid.uuid4().hex,
- "secret": uuid.uuid4().hex}
- ref['blob'] = json.dumps(blob)
- ref['type'] = 'ec2'
+ _, ref = unit.new_ec2_credential(user_id=self.user['id'],
+ project_id=None)
# Assert bad request status when missing project_id
self.post(
'/credentials',
body={'credential': ref}, expected_status=http_client.BAD_REQUEST)
def test_create_ec2_credential_with_invalid_blob(self):
- """Call ``POST /credentials`` for creating ec2
- credential with invalid blob.
+ """Test creating ec2 credential with invalid blob.
+
+ Call ``POST /credentials``.
"""
- ref = self.new_credential_ref(user_id=self.user['id'],
- project_id=self.project_id)
- ref['blob'] = '{"abc":"def"d}'
- ref['type'] = 'ec2'
+ ref = unit.new_credential_ref(user_id=self.user['id'],
+ project_id=self.project_id,
+ blob='{"abc":"def"d}',
+ type=CRED_TYPE_EC2)
# Assert bad request status when request contains invalid blob
response = self.post(
'/credentials',
@@ -274,20 +267,21 @@ class CredentialTestCase(CredentialBaseTestCase):
def test_create_credential_with_admin_token(self):
# Make sure we can create credential with the static admin token
- ref = self.new_credential_ref(user_id=self.user['id'])
+ ref = unit.new_credential_ref(user_id=self.user['id'])
r = self.post(
'/credentials',
body={'credential': ref},
- token=CONF.admin_token)
+ token=self.get_admin_token())
self.assertValidCredentialResponse(r, ref)
class TestCredentialTrustScoped(test_v3.RestfulTestCase):
"""Test credential with trust scoped token."""
+
def setUp(self):
super(TestCredentialTrustScoped, self).setUp()
- self.trustee_user = self.new_user_ref(domain_id=self.domain_id)
+ self.trustee_user = unit.new_user_ref(domain_id=self.domain_id)
password = self.trustee_user['password']
self.trustee_user = self.identity_api.create_user(self.trustee_user)
self.trustee_user['password'] = password
@@ -298,9 +292,12 @@ class TestCredentialTrustScoped(test_v3.RestfulTestCase):
self.config_fixture.config(group='trust', enabled=True)
def test_trust_scoped_ec2_credential(self):
- """Call ``POST /credentials`` for creating ec2 credential."""
+ """Test creating trust scoped ec2 credential.
+
+ Call ``POST /credentials``.
+ """
# Create the trust
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.project_id,
@@ -316,22 +313,15 @@ class TestCredentialTrustScoped(test_v3.RestfulTestCase):
user_id=self.trustee_user['id'],
password=self.trustee_user['password'],
trust_id=trust['id'])
- r = self.v3_authenticate_token(auth_data)
- self.assertValidProjectTrustScopedTokenResponse(r, self.user)
+ r = self.v3_create_token(auth_data)
+ self.assertValidProjectScopedTokenResponse(r, self.user)
trust_id = r.result['token']['OS-TRUST:trust']['id']
token_id = r.headers.get('X-Subject-Token')
# Create the credential with the trust scoped token
- ref = self.new_credential_ref(user_id=self.user['id'],
- project_id=self.project_id)
- blob = {"access": uuid.uuid4().hex,
- "secret": uuid.uuid4().hex}
- ref['blob'] = json.dumps(blob)
- ref['type'] = 'ec2'
- r = self.post(
- '/credentials',
- body={'credential': ref},
- token=token_id)
+ blob, ref = unit.new_ec2_credential(user_id=self.user['id'],
+ project_id=self.project_id)
+ r = self.post('/credentials', body={'credential': ref}, token=token_id)
# We expect the response blob to contain the trust_id
ret_ref = ref.copy()
@@ -342,8 +332,9 @@ class TestCredentialTrustScoped(test_v3.RestfulTestCase):
# Assert credential id is same as hash of access key id for
# ec2 credentials
- self.assertEqual(r.result['credential']['id'],
- hashlib.sha256(blob['access']).hexdigest())
+ access = blob['access'].encode('utf-8')
+ self.assertEqual(hashlib.sha256(access).hexdigest(),
+ r.result['credential']['id'])
# Create second ec2 credential with the same access key id and check
# for conflict.
@@ -351,11 +342,12 @@ class TestCredentialTrustScoped(test_v3.RestfulTestCase):
'/credentials',
body={'credential': ref},
token=token_id,
- expected_status=409)
+ expected_status=http_client.CONFLICT)
class TestCredentialEc2(CredentialBaseTestCase):
"""Test v3 credential compatibility with ec2tokens."""
+
def setUp(self):
super(TestCredentialEc2, self).setUp()
@@ -382,25 +374,19 @@ class TestCredentialEc2(CredentialBaseTestCase):
r = self.post(
'/ec2tokens',
body={'ec2Credentials': sig_ref},
- expected_status=200)
+ expected_status=http_client.OK)
self.assertValidTokenResponse(r)
def test_ec2_credential_signature_validate(self):
"""Test signature validation with a v3 ec2 credential."""
- ref = self.new_credential_ref(
- user_id=self.user['id'],
- project_id=self.project_id)
- blob = {"access": uuid.uuid4().hex,
- "secret": uuid.uuid4().hex}
- ref['blob'] = json.dumps(blob)
- ref['type'] = 'ec2'
- r = self.post(
- '/credentials',
- body={'credential': ref})
+ blob, ref = unit.new_ec2_credential(user_id=self.user['id'],
+ project_id=self.project_id)
+ r = self.post('/credentials', body={'credential': ref})
self.assertValidCredentialResponse(r, ref)
# Assert credential id is same as hash of access key id
- self.assertEqual(r.result['credential']['id'],
- hashlib.sha256(blob['access']).hexdigest())
+ access = blob['access'].encode('utf-8')
+ self.assertEqual(hashlib.sha256(access).hexdigest(),
+ r.result['credential']['id'])
cred_blob = json.loads(r.result['credential']['blob'])
self.assertEqual(blob, cred_blob)
@@ -409,7 +395,7 @@ class TestCredentialEc2(CredentialBaseTestCase):
def test_ec2_credential_signature_validate_legacy(self):
"""Test signature validation with a legacy v3 ec2 credential."""
- cred_json, credential_id = self._create_dict_blob_credential()
+ cred_json, _ = self._create_dict_blob_credential()
cred_blob = json.loads(cred_json)
self._validate_signature(access=cred_blob['access'],
secret=cred_blob['secret'])
@@ -442,6 +428,19 @@ class TestCredentialEc2(CredentialBaseTestCase):
self.assertThat(ec2_cred['links']['self'],
matchers.EndsWith(uri))
+ def test_ec2_cannot_get_non_ec2_credential(self):
+ access_key = uuid.uuid4().hex
+ cred_id = utils.hash_access_key(access_key)
+ non_ec2_cred = unit.new_credential_ref(
+ user_id=self.user_id,
+ project_id=self.project_id)
+ non_ec2_cred['id'] = cred_id
+ self.credential_api.create_credential(cred_id, non_ec2_cred)
+ uri = '/'.join([self._get_ec2_cred_uri(), access_key])
+ # if access_key is not found, ec2 controller raises Unauthorized
+ # exception
+ self.get(uri, expected_status=http_client.UNAUTHORIZED)
+
def test_ec2_list_credentials(self):
"""Test ec2 credential listing."""
self._get_ec2_cred()
@@ -452,13 +451,26 @@ class TestCredentialEc2(CredentialBaseTestCase):
self.assertThat(r.result['links']['self'],
matchers.EndsWith(uri))
+ # non-EC2 credentials won't be fetched
+ non_ec2_cred = unit.new_credential_ref(
+ user_id=self.user_id,
+ project_id=self.project_id)
+ non_ec2_cred['type'] = uuid.uuid4().hex
+ self.credential_api.create_credential(non_ec2_cred['id'],
+ non_ec2_cred)
+ r = self.get(uri)
+ cred_list_2 = r.result['credentials']
+ # still one element because non-EC2 credentials are not returned.
+ self.assertEqual(1, len(cred_list_2))
+ self.assertEqual(cred_list[0], cred_list_2[0])
+
def test_ec2_delete_credential(self):
"""Test ec2 credential deletion."""
ec2_cred = self._get_ec2_cred()
uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']])
cred_from_credential_api = (
self.credential_api
- .list_credentials_for_user(self.user_id))
+ .list_credentials_for_user(self.user_id, type=CRED_TYPE_EC2))
self.assertEqual(1, len(cred_from_credential_api))
self.delete(uri)
self.assertRaises(exception.CredentialNotFound,
diff --git a/keystone-moon/keystone/tests/unit/test_v3_domain_config.py b/keystone-moon/keystone/tests/unit/test_v3_domain_config.py
index 701cd3cf..ee716081 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_domain_config.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_domain_config.py
@@ -17,6 +17,7 @@ from oslo_config import cfg
from six.moves import http_client
from keystone import exception
+from keystone.tests import unit
from keystone.tests.unit import test_v3
@@ -29,7 +30,7 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
def setUp(self):
super(DomainConfigTestCase, self).setUp()
- self.domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ self.domain = unit.new_domain_ref()
self.resource_api.create_domain(self.domain['id'], self.domain)
self.config = {'ldap': {'url': uuid.uuid4().hex,
'user_tree_dn': uuid.uuid4().hex},
@@ -40,21 +41,34 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
url = '/domains/%(domain_id)s/config' % {
'domain_id': self.domain['id']}
r = self.put(url, body={'config': self.config},
- expected_status=201)
+ expected_status=http_client.CREATED)
res = self.domain_config_api.get_config(self.domain['id'])
self.assertEqual(self.config, r.result['config'])
self.assertEqual(self.config, res)
+ def test_create_config_invalid_domain(self):
+ """Call ``PUT /domains/{domain_id}/config``
+
+ While creating Identity API-based domain config with an invalid domain
+ id provided, the request shall be rejected with a response, 404 domain
+ not found.
+ """
+ invalid_domain_id = uuid.uuid4().hex
+ url = '/domains/%(domain_id)s/config' % {
+ 'domain_id': invalid_domain_id}
+ self.put(url, body={'config': self.config},
+ expected_status=exception.DomainNotFound.code)
+
def test_create_config_twice(self):
"""Check multiple creates don't throw error"""
self.put('/domains/%(domain_id)s/config' % {
'domain_id': self.domain['id']},
body={'config': self.config},
- expected_status=201)
+ expected_status=http_client.CREATED)
self.put('/domains/%(domain_id)s/config' % {
'domain_id': self.domain['id']},
body={'config': self.config},
- expected_status=200)
+ expected_status=http_client.OK)
def test_delete_config(self):
"""Call ``DELETE /domains{domain_id}/config``."""
@@ -65,6 +79,19 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
'domain_id': self.domain['id']},
expected_status=exception.DomainConfigNotFound.code)
+ def test_delete_config_invalid_domain(self):
+ """Call ``DELETE /domains{domain_id}/config``
+
+ While deleting Identity API-based domain config with an invalid domain
+ id provided, the request shall be rejected with a response, 404 domain
+ not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ invalid_domain_id = uuid.uuid4().hex
+ self.delete('/domains/%(domain_id)s/config' % {
+ 'domain_id': invalid_domain_id},
+ expected_status=exception.DomainNotFound.code)
+
def test_delete_config_by_group(self):
"""Call ``DELETE /domains{domain_id}/config/{group}``."""
self.domain_config_api.create_config(self.domain['id'], self.config)
@@ -73,6 +100,19 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
res = self.domain_config_api.get_config(self.domain['id'])
self.assertNotIn('ldap', res)
+ def test_delete_config_by_group_invalid_domain(self):
+ """Call ``DELETE /domains{domain_id}/config/{group}``
+
+ While deleting Identity API-based domain config by group with an
+ invalid domain id provided, the request shall be rejected with a
+ response 404 domain not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ invalid_domain_id = uuid.uuid4().hex
+ self.delete('/domains/%(domain_id)s/config/ldap' % {
+ 'domain_id': invalid_domain_id},
+ expected_status=exception.DomainNotFound.code)
+
def test_get_head_config(self):
"""Call ``GET & HEAD for /domains{domain_id}/config``."""
self.domain_config_api.create_config(self.domain['id'], self.config)
@@ -80,7 +120,7 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
'domain_id': self.domain['id']}
r = self.get(url)
self.assertEqual(self.config, r.result['config'])
- self.head(url, expected_status=200)
+ self.head(url, expected_status=http_client.OK)
def test_get_config_by_group(self):
"""Call ``GET & HEAD /domains{domain_id}/config/{group}``."""
@@ -89,7 +129,20 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
'domain_id': self.domain['id']}
r = self.get(url)
self.assertEqual({'ldap': self.config['ldap']}, r.result['config'])
- self.head(url, expected_status=200)
+ self.head(url, expected_status=http_client.OK)
+
+ def test_get_config_by_group_invalid_domain(self):
+ """Call ``GET & HEAD /domains{domain_id}/config/{group}``
+
+ While retrieving Identity API-based domain config by group with an
+ invalid domain id provided, the request shall be rejected with a
+ response 404 domain not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ invalid_domain_id = uuid.uuid4().hex
+ self.get('/domains/%(domain_id)s/config/ldap' % {
+ 'domain_id': invalid_domain_id},
+ expected_status=exception.DomainNotFound.code)
def test_get_config_by_option(self):
"""Call ``GET & HEAD /domains{domain_id}/config/{group}/{option}``."""
@@ -99,7 +152,20 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
r = self.get(url)
self.assertEqual({'url': self.config['ldap']['url']},
r.result['config'])
- self.head(url, expected_status=200)
+ self.head(url, expected_status=http_client.OK)
+
+ def test_get_config_by_option_invalid_domain(self):
+ """Call ``GET & HEAD /domains{domain_id}/config/{group}/{option}``
+
+ While retrieving Identity API-based domain config by option with an
+ invalid domain id provided, the request shall be rejected with a
+ response 404 domain not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ invalid_domain_id = uuid.uuid4().hex
+ self.get('/domains/%(domain_id)s/config/ldap/url' % {
+ 'domain_id': invalid_domain_id},
+ expected_status=exception.DomainNotFound.code)
def test_get_non_existant_config(self):
"""Call ``GET /domains{domain_id}/config when no config defined``."""
@@ -107,6 +173,18 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
'domain_id': self.domain['id']},
expected_status=http_client.NOT_FOUND)
+ def test_get_non_existant_config_invalid_domain(self):
+ """Call ``GET /domains{domain_id}/config when no config defined``
+
+ While retrieving non-existent Identity API-based domain config with an
+ invalid domain id provided, the request shall be rejected with a
+ response 404 domain not found.
+ """
+ invalid_domain_id = uuid.uuid4().hex
+ self.get('/domains/%(domain_id)s/config' % {
+ 'domain_id': invalid_domain_id},
+ expected_status=exception.DomainNotFound.code)
+
def test_get_non_existant_config_group(self):
"""Call ``GET /domains{domain_id}/config/{group_not_exist}``."""
config = {'ldap': {'url': uuid.uuid4().hex}}
@@ -115,6 +193,20 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
'domain_id': self.domain['id']},
expected_status=http_client.NOT_FOUND)
+ def test_get_non_existant_config_group_invalid_domain(self):
+ """Call ``GET /domains{domain_id}/config/{group_not_exist}``
+
+ While retrieving non-existent Identity API-based domain config group
+ with an invalid domain id provided, the request shall be rejected with
+ a response, 404 domain not found.
+ """
+ config = {'ldap': {'url': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+ invalid_domain_id = uuid.uuid4().hex
+ self.get('/domains/%(domain_id)s/config/identity' % {
+ 'domain_id': invalid_domain_id},
+ expected_status=exception.DomainNotFound.code)
+
def test_get_non_existant_config_option(self):
"""Call ``GET /domains{domain_id}/config/group/{option_not_exist}``."""
config = {'ldap': {'url': uuid.uuid4().hex}}
@@ -123,6 +215,20 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
'domain_id': self.domain['id']},
expected_status=http_client.NOT_FOUND)
+ def test_get_non_existant_config_option_invalid_domain(self):
+ """Call ``GET /domains{domain_id}/config/group/{option_not_exist}``
+
+ While retrieving non-existent Identity API-based domain config option
+ with an invalid domain id provided, the request shall be rejected with
+ a response, 404 domain not found.
+ """
+ config = {'ldap': {'url': uuid.uuid4().hex}}
+ self.domain_config_api.create_config(self.domain['id'], config)
+ invalid_domain_id = uuid.uuid4().hex
+ self.get('/domains/%(domain_id)s/config/ldap/user_tree_dn' % {
+ 'domain_id': invalid_domain_id},
+ expected_status=exception.DomainNotFound.code)
+
def test_update_config(self):
"""Call ``PATCH /domains/{domain_id}/config``."""
self.domain_config_api.create_config(self.domain['id'], self.config)
@@ -139,6 +245,22 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
self.assertEqual(expected_config, r.result['config'])
self.assertEqual(expected_config, res)
+ def test_update_config_invalid_domain(self):
+ """Call ``PATCH /domains/{domain_id}/config``
+
+ While updating Identity API-based domain config with an invalid domain
+ id provided, the request shall be rejected with a response, 404 domain
+ not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ new_config = {'ldap': {'url': uuid.uuid4().hex},
+ 'identity': {'driver': uuid.uuid4().hex}}
+ invalid_domain_id = uuid.uuid4().hex
+ self.patch('/domains/%(domain_id)s/config' % {
+ 'domain_id': invalid_domain_id},
+ body={'config': new_config},
+ expected_status=exception.DomainNotFound.code)
+
def test_update_config_group(self):
"""Call ``PATCH /domains/{domain_id}/config/{group}``."""
self.domain_config_api.create_config(self.domain['id'], self.config)
@@ -155,6 +277,22 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
self.assertEqual(expected_config, r.result['config'])
self.assertEqual(expected_config, res)
+ def test_update_config_group_invalid_domain(self):
+ """Call ``PATCH /domains/{domain_id}/config/{group}``
+
+ While updating Identity API-based domain config group with an invalid
+ domain id provided, the request shall be rejected with a response,
+ 404 domain not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ new_config = {'ldap': {'url': uuid.uuid4().hex,
+ 'user_filter': uuid.uuid4().hex}}
+ invalid_domain_id = uuid.uuid4().hex
+ self.patch('/domains/%(domain_id)s/config/ldap' % {
+ 'domain_id': invalid_domain_id},
+ body={'config': new_config},
+ expected_status=exception.DomainNotFound.code)
+
def test_update_config_invalid_group(self):
"""Call ``PATCH /domains/{domain_id}/config/{invalid_group}``."""
self.domain_config_api.create_config(self.domain['id'], self.config)
@@ -178,6 +316,24 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
body={'config': new_config},
expected_status=http_client.NOT_FOUND)
+ def test_update_config_invalid_group_invalid_domain(self):
+ """Call ``PATCH /domains/{domain_id}/config/{invalid_group}``
+
+ While updating Identity API-based domain config with an invalid group
+ and an invalid domain id provided, the request shall be rejected
+ with a response, 404 domain not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ invalid_group = uuid.uuid4().hex
+ new_config = {invalid_group: {'url': uuid.uuid4().hex,
+ 'user_filter': uuid.uuid4().hex}}
+ invalid_domain_id = uuid.uuid4().hex
+ self.patch('/domains/%(domain_id)s/config/%(invalid_group)s' % {
+ 'domain_id': invalid_domain_id,
+ 'invalid_group': invalid_group},
+ body={'config': new_config},
+ expected_status=exception.DomainNotFound.code)
+
def test_update_config_option(self):
"""Call ``PATCH /domains/{domain_id}/config/{group}/{option}``."""
self.domain_config_api.create_config(self.domain['id'], self.config)
@@ -191,6 +347,21 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
self.assertEqual(expected_config, r.result['config'])
self.assertEqual(expected_config, res)
+ def test_update_config_option_invalid_domain(self):
+ """Call ``PATCH /domains/{domain_id}/config/{group}/{option}``
+
+ While updating Identity API-based domain config option with an invalid
+ domain id provided, the request shall be rejected with a response, 404
+ domain not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ new_config = {'url': uuid.uuid4().hex}
+ invalid_domain_id = uuid.uuid4().hex
+ self.patch('/domains/%(domain_id)s/config/ldap/url' % {
+ 'domain_id': invalid_domain_id},
+ body={'config': new_config},
+ expected_status=exception.DomainNotFound.code)
+
def test_update_config_invalid_option(self):
"""Call ``PATCH /domains/{domain_id}/config/{group}/{invalid}``."""
self.domain_config_api.create_config(self.domain['id'], self.config)
@@ -212,3 +383,77 @@ class DomainConfigTestCase(test_v3.RestfulTestCase):
'domain_id': self.domain['id']},
body={'config': new_config},
expected_status=http_client.NOT_FOUND)
+
+ def test_update_config_invalid_option_invalid_domain(self):
+ """Call ``PATCH /domains/{domain_id}/config/{group}/{invalid}``
+
+ While updating Identity API-based domain config with an invalid option
+ and an invalid domain id provided, the request shall be rejected
+ with a response, 404 domain not found.
+ """
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ invalid_option = uuid.uuid4().hex
+ new_config = {'ldap': {invalid_option: uuid.uuid4().hex}}
+ invalid_domain_id = uuid.uuid4().hex
+ self.patch(
+ '/domains/%(domain_id)s/config/ldap/%(invalid_option)s' % {
+ 'domain_id': invalid_domain_id,
+ 'invalid_option': invalid_option},
+ body={'config': new_config},
+ expected_status=exception.DomainNotFound.code)
+
+ def test_get_config_default(self):
+ """Call ``GET /domains/config/default``."""
+ # Create a config that overrides a few of the options so that we can
+ # check that only the defaults are returned.
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ url = '/domains/config/default'
+ r = self.get(url)
+ default_config = r.result['config']
+ for group in default_config:
+ for option in default_config[group]:
+ self.assertEqual(getattr(getattr(CONF, group), option),
+ default_config[group][option])
+
+ def test_get_config_default_by_group(self):
+ """Call ``GET /domains/config/{group}/default``."""
+ # Create a config that overrides a few of the options so that we can
+ # check that only the defaults are returned.
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ url = '/domains/config/ldap/default'
+ r = self.get(url)
+ default_config = r.result['config']
+ for option in default_config['ldap']:
+ self.assertEqual(getattr(CONF.ldap, option),
+ default_config['ldap'][option])
+
+ def test_get_config_default_by_option(self):
+ """Call ``GET /domains/config/{group}/{option}/default``."""
+ # Create a config that overrides a few of the options so that we can
+ # check that only the defaults are returned.
+ self.domain_config_api.create_config(self.domain['id'], self.config)
+ url = '/domains/config/ldap/url/default'
+ r = self.get(url)
+ default_config = r.result['config']
+ self.assertEqual(CONF.ldap.url, default_config['url'])
+
+ def test_get_config_default_by_invalid_group(self):
+ """Call ``GET for /domains/config/{bad-group}/default``."""
+ # First try a valid group, but one we don't support for domain config
+ self.get('/domains/config/resouce/default',
+ expected_status=http_client.FORBIDDEN)
+
+ # Now try a totally invalid group
+ url = '/domains/config/%s/default' % uuid.uuid4().hex
+ self.get(url, expected_status=http_client.FORBIDDEN)
+
+ def test_get_config_default_by_invalid_option(self):
+ """Call ``GET for /domains/config/{group}/{bad-option}/default``."""
+ # First try a valid option, but one we don't support for domain config,
+ # i.e. one that is in the sensitive options list
+ self.get('/domains/config/ldap/password/default',
+ expected_status=http_client.FORBIDDEN)
+
+ # Now try a totally invalid option
+ url = '/domains/config/ldap/%s/default' % uuid.uuid4().hex
+ self.get(url, expected_status=http_client.FORBIDDEN)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py b/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py
index 3423d2d8..9fee8d2b 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_endpoint_policy.py
@@ -15,6 +15,7 @@
from six.moves import http_client
from testtools import matchers
+from keystone.tests import unit
from keystone.tests.unit import test_v3
@@ -31,13 +32,15 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
def setUp(self):
super(EndpointPolicyTestCase, self).setUp()
- self.policy = self.new_policy_ref()
+ self.policy = unit.new_policy_ref()
self.policy_api.create_policy(self.policy['id'], self.policy)
- self.service = self.new_service_ref()
+ self.service = unit.new_service_ref()
self.catalog_api.create_service(self.service['id'], self.service)
- self.endpoint = self.new_endpoint_ref(self.service['id'], enabled=True)
+ self.endpoint = unit.new_endpoint_ref(self.service['id'], enabled=True,
+ interface='public',
+ region_id=self.region_id)
self.catalog_api.create_endpoint(self.endpoint['id'], self.endpoint)
- self.region = self.new_region_ref()
+ self.region = unit.new_region_ref()
self.catalog_api.create_region(self.region)
def assert_head_and_get_return_same_response(self, url, expected_status):
@@ -53,12 +56,14 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
url,
expected_status=http_client.NOT_FOUND)
- self.put(url, expected_status=204)
+ self.put(url)
# test that the new resource is accessible.
- self.assert_head_and_get_return_same_response(url, expected_status=204)
+ self.assert_head_and_get_return_same_response(
+ url,
+ expected_status=http_client.NO_CONTENT)
- self.delete(url, expected_status=204)
+ self.delete(url)
# test that the deleted resource is no longer accessible
self.assert_head_and_get_return_same_response(
@@ -67,7 +72,6 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
def test_crud_for_policy_for_explicit_endpoint(self):
"""PUT, HEAD and DELETE for explicit endpoint policy."""
-
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints/%(endpoint_id)s') % {
'policy_id': self.policy['id'],
@@ -76,7 +80,6 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
def test_crud_for_policy_for_service(self):
"""PUT, HEAD and DELETE for service endpoint policy."""
-
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s') % {
'policy_id': self.policy['id'],
@@ -85,7 +88,6 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
def test_crud_for_policy_for_region_and_service(self):
"""PUT, HEAD and DELETE for region and service endpoint policy."""
-
url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/services/%(service_id)s/regions/%(region_id)s') % {
'policy_id': self.policy['id'],
@@ -95,37 +97,31 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
def test_get_policy_for_endpoint(self):
"""GET /endpoints/{endpoint_id}/policy."""
-
self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints/%(endpoint_id)s' % {
'policy_id': self.policy['id'],
- 'endpoint_id': self.endpoint['id']},
- expected_status=204)
+ 'endpoint_id': self.endpoint['id']})
self.head('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'
'/policy' % {
'endpoint_id': self.endpoint['id']},
- expected_status=200)
+ expected_status=http_client.OK)
r = self.get('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'
'/policy' % {
- 'endpoint_id': self.endpoint['id']},
- expected_status=200)
+ 'endpoint_id': self.endpoint['id']})
self.assertValidPolicyResponse(r, ref=self.policy)
def test_list_endpoints_for_policy(self):
"""GET /policies/%(policy_id}/endpoints."""
-
self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints/%(endpoint_id)s' % {
'policy_id': self.policy['id'],
- 'endpoint_id': self.endpoint['id']},
- expected_status=204)
+ 'endpoint_id': self.endpoint['id']})
r = self.get('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'
'/endpoints' % {
- 'policy_id': self.policy['id']},
- expected_status=200)
+ 'policy_id': self.policy['id']})
self.assertValidEndpointListResponse(r, ref=self.endpoint)
self.assertThat(r.result.get('endpoints'), matchers.HasLength(1))
@@ -135,8 +131,8 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
'policy_id': self.policy['id'],
'endpoint_id': self.endpoint['id']}
- self.put(url, expected_status=204)
- self.head(url, expected_status=204)
+ self.put(url)
+ self.head(url)
self.delete('/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint['id']})
@@ -150,8 +146,8 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
'service_id': self.service['id'],
'region_id': self.region['id']}
- self.put(url, expected_status=204)
- self.head(url, expected_status=204)
+ self.put(url)
+ self.head(url)
self.delete('/regions/%(region_id)s' % {
'region_id': self.region['id']})
@@ -165,8 +161,8 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
'service_id': self.service['id'],
'region_id': self.region['id']}
- self.put(url, expected_status=204)
- self.head(url, expected_status=204)
+ self.put(url)
+ self.head(url)
self.delete('/services/%(service_id)s' % {
'service_id': self.service['id']})
@@ -179,8 +175,8 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
'policy_id': self.policy['id'],
'service_id': self.service['id']}
- self.put(url, expected_status=204)
- self.get(url, expected_status=204)
+ self.put(url)
+ self.get(url, expected_status=http_client.NO_CONTENT)
self.delete('/policies/%(policy_id)s' % {
'policy_id': self.policy['id']})
@@ -193,8 +189,8 @@ class EndpointPolicyTestCase(test_v3.RestfulTestCase):
'policy_id': self.policy['id'],
'service_id': self.service['id']}
- self.put(url, expected_status=204)
- self.get(url, expected_status=204)
+ self.put(url)
+ self.get(url, expected_status=http_client.NO_CONTENT)
self.delete('/services/%(service_id)s' % {
'service_id': self.service['id']})
diff --git a/keystone-moon/keystone/tests/unit/test_v3_federation.py b/keystone-moon/keystone/tests/unit/test_v3_federation.py
index 4d7dcaab..f4ec8e51 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_federation.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_federation.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import os
import random
from testtools import matchers
@@ -19,7 +20,8 @@ import fixtures
from lxml import etree
import mock
from oslo_config import cfg
-from oslo_log import log
+from oslo_log import versionutils
+from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslotest import mockpatch
import saml2
@@ -33,22 +35,24 @@ if not xmldsig:
from keystone.auth import controllers as auth_controllers
from keystone.common import environment
-from keystone.contrib.federation import controllers as federation_controllers
-from keystone.contrib.federation import idp as keystone_idp
+from keystone.contrib.federation import routers
from keystone import exception
+from keystone.federation import controllers as federation_controllers
+from keystone.federation import idp as keystone_idp
from keystone import notifications
+from keystone.tests import unit
from keystone.tests.unit import core
from keystone.tests.unit import federation_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import mapping_fixtures
from keystone.tests.unit import test_v3
+from keystone.tests.unit import utils
from keystone.token.providers import common as token_common
subprocess = environment.subprocess
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
ROOTDIR = os.path.dirname(os.path.abspath(__file__))
XMLDIR = os.path.join(ROOTDIR, 'saml2/')
@@ -59,8 +63,12 @@ def dummy_validator(*args, **kwargs):
class FederationTests(test_v3.RestfulTestCase):
- EXTENSION_NAME = 'federation'
- EXTENSION_TO_ADD = 'federation_extension'
+ @mock.patch.object(versionutils, 'report_deprecated_feature')
+ def test_exception_happens(self, mock_deprecator):
+ routers.FederationExtension(mock.ANY)
+ mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("Remove federation_extension from", args[1])
class FederatedSetupMixin(object):
@@ -137,7 +145,6 @@ class FederatedSetupMixin(object):
def assertValidMappedUser(self, token):
"""Check if user object meets all the criteria."""
-
user = token['user']
self.assertIn('id', user)
self.assertIn('name', user)
@@ -209,66 +216,62 @@ class FederatedSetupMixin(object):
def load_federation_sample_data(self):
"""Inject additional data."""
-
# Create and add domains
- self.domainA = self.new_domain_ref()
+ self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'],
self.domainA)
- self.domainB = self.new_domain_ref()
+ self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'],
self.domainB)
- self.domainC = self.new_domain_ref()
+ self.domainC = unit.new_domain_ref()
self.resource_api.create_domain(self.domainC['id'],
self.domainC)
- self.domainD = self.new_domain_ref()
+ self.domainD = unit.new_domain_ref()
self.resource_api.create_domain(self.domainD['id'],
self.domainD)
# Create and add projects
- self.proj_employees = self.new_project_ref(
+ self.proj_employees = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_employees['id'],
self.proj_employees)
- self.proj_customers = self.new_project_ref(
+ self.proj_customers = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.proj_customers['id'],
self.proj_customers)
- self.project_all = self.new_project_ref(
+ self.project_all = unit.new_project_ref(
domain_id=self.domainA['id'])
self.resource_api.create_project(self.project_all['id'],
self.project_all)
- self.project_inherited = self.new_project_ref(
+ self.project_inherited = unit.new_project_ref(
domain_id=self.domainD['id'])
self.resource_api.create_project(self.project_inherited['id'],
self.project_inherited)
# Create and add groups
- self.group_employees = self.new_group_ref(
- domain_id=self.domainA['id'])
+ self.group_employees = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_employees = (
self.identity_api.create_group(self.group_employees))
- self.group_customers = self.new_group_ref(
- domain_id=self.domainA['id'])
+ self.group_customers = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_customers = (
self.identity_api.create_group(self.group_customers))
- self.group_admins = self.new_group_ref(
- domain_id=self.domainA['id'])
+ self.group_admins = unit.new_group_ref(domain_id=self.domainA['id'])
self.group_admins = self.identity_api.create_group(self.group_admins)
# Create and add roles
- self.role_employee = self.new_role_ref()
+ self.role_employee = unit.new_role_ref()
self.role_api.create_role(self.role_employee['id'], self.role_employee)
- self.role_customer = self.new_role_ref()
+ self.role_customer = unit.new_role_ref()
self.role_api.create_role(self.role_customer['id'], self.role_customer)
- self.role_admin = self.new_role_ref()
+ self.role_admin = unit.new_role_ref()
self.role_api.create_role(self.role_admin['id'], self.role_admin)
# Employees can access
@@ -774,7 +777,7 @@ class FederatedSetupMixin(object):
self.domainC['id'])
-class FederatedIdentityProviderTests(FederationTests):
+class FederatedIdentityProviderTests(test_v3.RestfulTestCase):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
@@ -815,7 +818,7 @@ class FederatedIdentityProviderTests(FederationTests):
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
- expected_status=201)
+ expected_status=http_client.CREATED)
return resp
def _http_idp_input(self, **kwargs):
@@ -856,7 +859,6 @@ class FederatedIdentityProviderTests(FederationTests):
def test_create_idp(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
-
keys_to_check = list(self.idp_keys)
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
@@ -867,7 +869,6 @@ class FederatedIdentityProviderTests(FederationTests):
def test_create_idp_remote(self):
"""Creates the IdentityProvider entity associated to remote_ids."""
-
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
@@ -886,10 +887,9 @@ class FederatedIdentityProviderTests(FederationTests):
A remote_id is the same for both so the second IdP is not
created because of the uniqueness of the remote_ids
- Expect HTTP 409 code for the latter call.
+ Expect HTTP 409 Conflict code for the latter call.
"""
-
body = self.default_body.copy()
repeated_remote_id = uuid.uuid4().hex
body['remote_ids'] = [uuid.uuid4().hex,
@@ -901,12 +901,15 @@ class FederatedIdentityProviderTests(FederationTests):
url = self.base_url(suffix=uuid.uuid4().hex)
body['remote_ids'] = [uuid.uuid4().hex,
repeated_remote_id]
- self.put(url, body={'identity_provider': body},
- expected_status=http_client.CONFLICT)
+ resp = self.put(url, body={'identity_provider': body},
+ expected_status=http_client.CONFLICT)
+
+ resp_data = jsonutils.loads(resp.body)
+ self.assertIn('Duplicate remote ID',
+ resp_data.get('error', {}).get('message'))
def test_create_idp_remote_empty(self):
"""Creates an IdP with empty remote_ids."""
-
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
@@ -919,7 +922,6 @@ class FederatedIdentityProviderTests(FederationTests):
def test_create_idp_remote_none(self):
"""Creates an IdP with a None remote_ids."""
-
keys_to_check = list(self.idp_keys)
keys_to_check.append('remote_ids')
body = self.default_body.copy()
@@ -986,6 +988,37 @@ class FederatedIdentityProviderTests(FederationTests):
self.assertEqual(sorted(body['remote_ids']),
sorted(returned_idp.get('remote_ids')))
+ def test_update_idp_remote_repeated(self):
+ """Update an IdentityProvider entity reusing a remote_id.
+
+ A remote_id is the same for both so the second IdP is not
+ updated because of the uniqueness of the remote_ids.
+
+ Expect HTTP 409 Conflict code for the latter call.
+
+ """
+ # Create first identity provider
+ body = self.default_body.copy()
+ repeated_remote_id = uuid.uuid4().hex
+ body['remote_ids'] = [uuid.uuid4().hex,
+ repeated_remote_id]
+ self._create_default_idp(body=body)
+
+ # Create second identity provider (without remote_ids)
+ body = self.default_body.copy()
+ default_resp = self._create_default_idp(body=body)
+ default_idp = self._fetch_attribute_from_response(default_resp,
+ 'identity_provider')
+ idp_id = default_idp.get('id')
+ url = self.base_url(suffix=idp_id)
+
+ body['remote_ids'] = [repeated_remote_id]
+ resp = self.patch(url, body={'identity_provider': body},
+ expected_status=http_client.CONFLICT)
+ resp_data = jsonutils.loads(resp.body)
+ self.assertIn('Duplicate remote ID',
+ resp_data['error']['message'])
+
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
@@ -1018,18 +1051,73 @@ class FederatedIdentityProviderTests(FederationTests):
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
+ def test_filter_list_idp_by_id(self):
+ def get_id(resp):
+ r = self._fetch_attribute_from_response(resp,
+ 'identity_provider')
+ return r.get('id')
+
+ idp1_id = get_id(self._create_default_idp())
+ idp2_id = get_id(self._create_default_idp())
+
+ # list the IdP, should get two IdP.
+ url = self.base_url()
+ resp = self.get(url)
+ entities = self._fetch_attribute_from_response(resp,
+ 'identity_providers')
+ entities_ids = [e['id'] for e in entities]
+ self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
+
+ # filter the IdP by ID.
+ url = self.base_url() + '?id=' + idp1_id
+ resp = self.get(url)
+ filtered_service_list = resp.json['identity_providers']
+ self.assertThat(filtered_service_list, matchers.HasLength(1))
+ self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
+
+ def test_filter_list_idp_by_enabled(self):
+ def get_id(resp):
+ r = self._fetch_attribute_from_response(resp,
+ 'identity_provider')
+ return r.get('id')
+
+ idp1_id = get_id(self._create_default_idp())
+
+ body = self.default_body.copy()
+ body['enabled'] = False
+ idp2_id = get_id(self._create_default_idp(body=body))
+
+ # list the IdP, should get two IdP.
+ url = self.base_url()
+ resp = self.get(url)
+ entities = self._fetch_attribute_from_response(resp,
+ 'identity_providers')
+ entities_ids = [e['id'] for e in entities]
+ self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
+
+ # filter the IdP by 'enabled'.
+ url = self.base_url() + '?enabled=True'
+ resp = self.get(url)
+ filtered_service_list = resp.json['identity_providers']
+ self.assertThat(filtered_service_list, matchers.HasLength(1))
+ self.assertEqual(idp1_id, filtered_service_list[0].get('id'))
+
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
- Expect HTTP 409 code for the latter call.
+ Expect HTTP 409 Conflict code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
- expected_status=201)
- self.put(url, body={'identity_provider': body},
- expected_status=http_client.CONFLICT)
+ expected_status=http_client.CREATED)
+ resp = self.put(url, body={'identity_provider': body},
+ expected_status=http_client.CONFLICT)
+
+ resp_data = jsonutils.loads(resp.body)
+ self.assertIn('Duplicate entry',
+ resp_data.get('error', {}).get('message'))
def test_get_idp(self):
"""Create and later fetch IdP."""
@@ -1047,7 +1135,7 @@ class FederatedIdentityProviderTests(FederationTests):
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
- Expected HTTP 404 status code.
+ Expected HTTP 404 Not Found status code.
"""
idp_id = uuid.uuid4().hex
@@ -1059,7 +1147,7 @@ class FederatedIdentityProviderTests(FederationTests):
def test_delete_existing_idp(self):
"""Create and later delete IdP.
- Expect HTTP 404 for the GET IdP call.
+ Expect HTTP 404 Not Found for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
@@ -1072,7 +1160,6 @@ class FederatedIdentityProviderTests(FederationTests):
def test_delete_idp_also_deletes_assigned_protocols(self):
"""Deleting an IdP will delete its assigned protocol."""
-
# create default IdP
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
@@ -1084,7 +1171,7 @@ class FederatedIdentityProviderTests(FederationTests):
idp_url = self.base_url(suffix=idp_id)
# assign protocol to IdP
- kwargs = {'expected_status': 201}
+ kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(
url=url,
idp_id=idp_id,
@@ -1100,7 +1187,7 @@ class FederatedIdentityProviderTests(FederationTests):
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
- Expect HTTP 404 for the GET IdP call.
+ Expect HTTP 404 Not Found for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
@@ -1145,7 +1232,7 @@ class FederatedIdentityProviderTests(FederationTests):
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
- Expect HTTP FORBIDDEN.
+ Expect HTTP BAD REQUEST.
"""
default_resp = self._create_default_idp()
@@ -1160,12 +1247,12 @@ class FederatedIdentityProviderTests(FederationTests):
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body},
- expected_status=http_client.FORBIDDEN)
+ expected_status=http_client.BAD_REQUEST)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
- Expect HTTP 404 code.
+ Expect HTTP 404 Not Found code.
"""
idp_id = uuid.uuid4().hex
@@ -1178,12 +1265,13 @@ class FederatedIdentityProviderTests(FederationTests):
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
-
- self._assign_protocol_to_idp(expected_status=201)
+ self._assign_protocol_to_idp(expected_status=http_client.CREATED)
def test_protocol_composite_pk(self):
- """Test whether Keystone let's add two entities with identical
- names, however attached to different IdPs.
+ """Test that Keystone can add two entities.
+
+ The entities have identical names, however, attached to different
+ IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
@@ -1193,7 +1281,7 @@ class FederatedIdentityProviderTests(FederationTests):
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
- kwargs = {'expected_status': 201}
+ kwargs = {'expected_status': http_client.CREATED}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
@@ -1204,12 +1292,12 @@ class FederatedIdentityProviderTests(FederationTests):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
- return HTTP 409 code.
+ return HTTP 409 Conflict code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
- kwargs = {'expected_status': 201}
+ kwargs = {'expected_status': http_client.CREATED}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': http_client.CONFLICT}
@@ -1221,10 +1309,9 @@ class FederatedIdentityProviderTests(FederationTests):
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
- Expect HTTP 404 code.
+ Expect HTTP 404 Not Found code.
"""
-
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': http_client.NOT_FOUND}
self._assign_protocol_to_idp(proto='saml2',
@@ -1234,8 +1321,8 @@ class FederatedIdentityProviderTests(FederationTests):
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
-
- resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+ resp, idp_id, proto = self._assign_protocol_to_idp(
+ expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
@@ -1254,12 +1341,14 @@ class FederatedIdentityProviderTests(FederationTests):
Compare input and output id sets.
"""
- resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+ resp, idp_id, proto = self._assign_protocol_to_idp(
+ expected_status=http_client.CREATED)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
- resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
- expected_status=201)
+ resp, _, proto = self._assign_protocol_to_idp(
+ idp_id=idp_id,
+ expected_status=http_client.CREATED)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
@@ -1277,8 +1366,8 @@ class FederatedIdentityProviderTests(FederationTests):
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
-
- resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+ resp, idp_id, proto = self._assign_protocol_to_idp(
+ expected_status=http_client.CREATED)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
@@ -1294,19 +1383,21 @@ class FederatedIdentityProviderTests(FederationTests):
def test_delete_protocol(self):
"""Delete protocol.
- Expect HTTP 404 code for the GET call after the protocol is deleted.
+ Expect HTTP 404 Not Found code for the GET call after the protocol is
+ deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
- resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
+ resp, idp_id, proto = self._assign_protocol_to_idp(
+ expected_status=http_client.CREATED)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=http_client.NOT_FOUND)
-class MappingCRUDTests(FederationTests):
+class MappingCRUDTests(test_v3.RestfulTestCase):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
@@ -1340,7 +1431,7 @@ class MappingCRUDTests(FederationTests):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
- expected_status=201)
+ expected_status=http_client.CREATED)
return resp
def _get_id_from_response(self, resp):
@@ -1357,7 +1448,7 @@ class MappingCRUDTests(FederationTests):
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
- self.assertResponseStatus(resp, 200)
+ self.assertResponseStatus(resp, http_client.OK)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(1, len(entities))
@@ -1367,7 +1458,7 @@ class MappingCRUDTests(FederationTests):
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
- self.assertResponseStatus(resp, 204)
+ self.assertResponseStatus(resp, http_client.NO_CONTENT)
self.get(url, expected_status=http_client.NOT_FOUND)
def test_mapping_get(self):
@@ -1463,8 +1554,8 @@ class MappingCRUDTests(FederationTests):
def test_create_mapping_with_blacklist_and_whitelist(self):
"""Test for adding whitelist and blacklist in the rule
- Server should respond with HTTP 400 error upon discovering both
- ``whitelist`` and ``blacklist`` keywords in the same rule.
+ Server should respond with HTTP 400 Bad Request error upon discovering
+ both ``whitelist`` and ``blacklist`` keywords in the same rule.
"""
url = self.MAPPING_URL + uuid.uuid4().hex
@@ -1472,8 +1563,37 @@ class MappingCRUDTests(FederationTests):
self.put(url, expected_status=http_client.BAD_REQUEST,
body={'mapping': mapping})
+ def test_create_mapping_with_local_user_and_local_domain(self):
+ url = self.MAPPING_URL + uuid.uuid4().hex
+ resp = self.put(
+ url,
+ body={
+ 'mapping': mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN
+ },
+ expected_status=http_client.CREATED)
+ self.assertValidMappingResponse(
+ resp, mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN)
+
+ def test_create_mapping_with_ephemeral(self):
+ url = self.MAPPING_URL + uuid.uuid4().hex
+ resp = self.put(
+ url,
+ body={'mapping': mapping_fixtures.MAPPING_EPHEMERAL_USER},
+ expected_status=http_client.CREATED)
+ self.assertValidMappingResponse(
+ resp, mapping_fixtures.MAPPING_EPHEMERAL_USER)
+
+ def test_create_mapping_with_bad_user_type(self):
+ url = self.MAPPING_URL + uuid.uuid4().hex
+ # get a copy of a known good map
+ bad_mapping = copy.deepcopy(mapping_fixtures.MAPPING_EPHEMERAL_USER)
+ # now sabotage the user type
+ bad_mapping['rules'][0]['local'][0]['user']['type'] = uuid.uuid4().hex
+ self.put(url, expected_status=http_client.BAD_REQUEST,
+ body={'mapping': bad_mapping})
+
-class FederatedTokenTests(FederationTests, FederatedSetupMixin):
+class FederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
def auth_plugin_config_override(self):
methods = ['saml2']
@@ -1510,7 +1630,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
self.assertTrue(note['send_notification_called'])
def load_fixtures(self, fixtures):
- super(FederationTests, self).load_fixtures(fixtures)
+ super(FederatedTokenTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def test_issue_unscoped_token_notify(self):
@@ -1609,7 +1729,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
def test_issue_unscoped_token_with_remote_unavailable(self):
self.config_fixture.config(group='federation',
remote_id_attribute=self.REMOTE_ID_ATTR)
- self.assertRaises(exception.ValidationError,
+ self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
idp=self.IDP_WITH_REMOTE,
environment={
@@ -1649,13 +1769,13 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once_notify(self):
- r = self.v3_authenticate_token(
+ r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
user_id = r.json['token']['user']['id']
self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id)
def test_scope_to_project_once(self):
- r = self.v3_authenticate_token(
+ r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
@@ -1685,14 +1805,13 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
"""
enabled_false = {'enabled': False}
self.federation_api.update_idp(self.IDP, enabled_false)
- self.v3_authenticate_token(
+ self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.FORBIDDEN)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
-
- self.v3_authenticate_token(
+ self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
@@ -1705,13 +1824,12 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
* Employees' project
"""
-
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
- r = self.v3_authenticate_token(body)
+ r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(token_resp,
project_id_ref)
@@ -1719,7 +1837,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
def test_scope_to_project_with_only_inherited_roles(self):
"""Try to scope token whose only roles are inherited."""
self.config_fixture.config(group='os_inherit', enabled=True)
- r = self.v3_authenticate_token(
+ r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_project_scoped_token_attributes(
@@ -1731,7 +1849,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
- self.v3_authenticate_token(
+ self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=http_client.NOT_FOUND)
@@ -1755,7 +1873,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
- r = self.v3_authenticate_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
+ r = self.v3_create_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
self.domainA['id'])
@@ -1778,14 +1896,14 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
- r = self.v3_authenticate_token(body)
+ r = self.v3_create_token(body)
token_resp = r.result['token']
self._check_domain_scoped_token_attributes(token_resp,
domain_id_ref)
def test_scope_to_domain_with_only_inherited_roles_fails(self):
"""Try to scope to a domain that has no direct roles."""
- self.v3_authenticate_token(
+ self.v3_create_token(
self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER,
expected_status=http_client.UNAUTHORIZED)
@@ -1816,14 +1934,14 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
# TODO(samueldmq): Create another test class for role inheritance tests.
# The advantage would be to reduce the complexity of this test class and
- # have tests specific to this fuctionality grouped, easing readability and
+ # have tests specific to this functionality grouped, easing readability and
# maintenability.
def test_list_projects_for_inherited_project_assignment(self):
# Enable os_inherit extension
self.config_fixture.config(group='os_inherit', enabled=True)
# Create a subproject
- subproject_inherited = self.new_project_ref(
+ subproject_inherited = unit.new_project_ref(
domain_id=self.domainD['id'],
parent_id=self.project_inherited['id'])
self.resource_api.create_project(subproject_inherited['id'],
@@ -1878,6 +1996,9 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
self.assertEqual(domains_ref, domains,
'match failed for url %s' % url)
+ @utils.wip('This will fail because of bug #1501032. The returned method'
+ 'list should contain "saml2". This is documented in bug '
+ '1501032.')
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
@@ -1886,9 +2007,10 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
* Scope token to one of available projects
"""
-
r = self._issue_unscoped_token()
token_resp = r.json_body['token']
+ # NOTE(lbragstad): Ensure only 'saml2' is in the method list.
+ self.assertListEqual(['saml2'], token_resp['methods'])
self.assertValidMappedUser(token_resp)
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/auth/projects', token=employee_unscoped_token_id)
@@ -1899,8 +2021,12 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
- r = self.v3_authenticate_token(v3_scope_request)
+ r = self.v3_create_token(v3_scope_request)
token_resp = r.result['token']
+ # FIXME(lbragstad): 'token' should be in the list of methods returned
+ # but it isn't. This is documented in bug 1501032.
+ self.assertIn('token', token_resp['methods'])
+ self.assertIn('saml2', token_resp['methods'])
self._check_project_scoped_token_attributes(token_resp, project['id'])
def test_workflow_with_groups_deletion(self):
@@ -1917,10 +2043,9 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
"""
# create group and role
- group = self.new_group_ref(
- domain_id=self.domainA['id'])
+ group = unit.new_group_ref(domain_id=self.domainA['id'])
group = self.identity_api.create_group(group)
- role = self.new_role_ref()
+ role = unit.new_role_ref()
self.role_api.create_role(role['id'], role)
# assign role to group and project_admins
@@ -1971,7 +2096,8 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
token_id, 'project',
self.project_all['id'])
- self.v3_authenticate_token(scoped_token, expected_status=500)
+ self.v3_create_token(
+ scoped_token, expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_lists_with_missing_group_in_backend(self):
"""Test a mapping that points to a group that does not exist
@@ -1990,8 +2116,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
- group = self.new_group_ref(domain_id=domain_id)
- group['name'] = 'EXISTS'
+ group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
@@ -2047,18 +2172,16 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
assigned
"""
-
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
- group_exists = self.new_group_ref(domain_id=domain_id)
- group_exists['name'] = 'EXISTS'
+ group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
- group_no_exists = self.new_group_ref(domain_id=domain_id)
- group_no_exists['name'] = 'NO_EXISTS'
+ group_no_exists = unit.new_group_ref(domain_id=domain_id,
+ name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
@@ -2122,18 +2245,17 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
assigned
"""
-
domain_id = self.domainA['id']
domain_name = self.domainA['name']
# Add a group "EXISTS"
- group_exists = self.new_group_ref(domain_id=domain_id)
- group_exists['name'] = 'EXISTS'
+ group_exists = unit.new_group_ref(domain_id=domain_id,
+ name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
- group_no_exists = self.new_group_ref(domain_id=domain_id)
- group_no_exists['name'] = 'NO_EXISTS'
+ group_no_exists = unit.new_group_ref(domain_id=domain_id,
+ name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
@@ -2198,8 +2320,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
"""
domain_id = self.domainA['id']
domain_name = self.domainA['name']
- group = self.new_group_ref(domain_id=domain_id)
- group['name'] = 'EXISTS'
+ group = unit.new_group_ref(domain_id=domain_id, name='EXISTS')
group = self.identity_api.create_group(group)
rules = {
'rules': [
@@ -2262,13 +2383,13 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
domain_name = self.domainA['name']
# Add a group "EXISTS"
- group_exists = self.new_group_ref(domain_id=domain_id)
- group_exists['name'] = 'EXISTS'
+ group_exists = unit.new_group_ref(domain_id=domain_id,
+ name='EXISTS')
group_exists = self.identity_api.create_group(group_exists)
# Add a group "NO_EXISTS"
- group_no_exists = self.new_group_ref(domain_id=domain_id)
- group_no_exists['name'] = 'NO_EXISTS'
+ group_no_exists = unit.new_group_ref(domain_id=domain_id,
+ name='NO_EXISTS')
group_no_exists = self.identity_api.create_group(group_no_exists)
group_ids = set([group_exists['id'], group_no_exists['id']])
@@ -2362,7 +2483,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
self._check_domains_are_valid(r.json_body['token'])
def test_scoped_token_has_user_domain(self):
- r = self.v3_authenticate_token(
+ r = self.v3_create_token(
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
self._check_domains_are_valid(r.result['token'])
@@ -2383,7 +2504,7 @@ class FederatedTokenTests(FederationTests, FederatedSetupMixin):
assertion='ANOTHER_LOCAL_USER_ASSERTION')
-class FernetFederatedTokenTests(FederationTests, FederatedSetupMixin):
+class FernetFederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin):
AUTH_METHOD = 'token'
def load_fixtures(self, fixtures):
@@ -2436,7 +2557,7 @@ class FernetFederatedTokenTests(FederationTests, FederatedSetupMixin):
v3_scope_request = self._scope_request(unscoped_token,
'project', project['id'])
- resp = self.v3_authenticate_token(v3_scope_request)
+ resp = self.v3_create_token(v3_scope_request)
token_resp = resp.result['token']
self._check_project_scoped_token_attributes(token_resp, project['id'])
@@ -2448,6 +2569,7 @@ class FederatedTokenTestsMethodToken(FederatedTokenTests):
way for scoping all the tokens.
"""
+
AUTH_METHOD = 'token'
def auth_plugin_config_override(self):
@@ -2455,8 +2577,67 @@ class FederatedTokenTestsMethodToken(FederatedTokenTests):
super(FederatedTokenTests,
self).auth_plugin_config_override(methods)
+ @utils.wip('This will fail because of bug #1501032. The returned method'
+ 'list should contain "saml2". This is documented in bug '
+ '1501032.')
+ def test_full_workflow(self):
+ """Test 'standard' workflow for granting access tokens.
+
+ * Issue unscoped token
+ * List available projects based on groups
+ * Scope token to one of available projects
+
+ """
+ r = self._issue_unscoped_token()
+ token_resp = r.json_body['token']
+ # NOTE(lbragstad): Ensure only 'saml2' is in the method list.
+ self.assertListEqual(['saml2'], token_resp['methods'])
+ self.assertValidMappedUser(token_resp)
+ employee_unscoped_token_id = r.headers.get('X-Subject-Token')
+ r = self.get('/auth/projects', token=employee_unscoped_token_id)
+ projects = r.result['projects']
+ random_project = random.randint(0, len(projects)) - 1
+ project = projects[random_project]
+
+ v3_scope_request = self._scope_request(employee_unscoped_token_id,
+ 'project', project['id'])
+
+ r = self.v3_authenticate_token(v3_scope_request)
+ token_resp = r.result['token']
+ self.assertIn('token', token_resp['methods'])
+ self.assertIn('saml2', token_resp['methods'])
+ self._check_project_scoped_token_attributes(token_resp, project['id'])
+
+
+class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
+ """Tests for federated users
+
+ Tests new shadow users functionality
+
+ """
+
+ def auth_plugin_config_override(self):
+ methods = ['saml2']
+ super(FederatedUserTests, self).auth_plugin_config_override(methods)
+
+ def setUp(self):
+ super(FederatedUserTests, self).setUp()
+
+ def load_fixtures(self, fixtures):
+ super(FederatedUserTests, self).load_fixtures(fixtures)
+ self.load_federation_sample_data()
+
+ def test_user_id_persistense(self):
+ """Ensure user_id is persistend for multiple federated authn calls."""
+ r = self._issue_unscoped_token()
+ user_id = r.json_body['token']['user']['id']
-class JsonHomeTests(FederationTests, test_v3.JsonHomeTestMixin):
+ r = self._issue_unscoped_token()
+ user_id2 = r.json_body['token']['user']['id']
+ self.assertEqual(user_id, user_id2)
+
+
+class JsonHomeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/'
'1.0/rel/identity_provider': {
@@ -2484,7 +2665,7 @@ def _load_xml(filename):
return xml.read()
-class SAMLGenerationTests(FederationTests):
+class SAMLGenerationTests(test_v3.RestfulTestCase):
SP_AUTH_URL = ('http://beta.com:5000/v3/OS-FEDERATION/identity_providers'
'/BETA/protocols/saml2/auth')
@@ -2523,7 +2704,7 @@ class SAMLGenerationTests(FederationTests):
self.sp = self.sp_ref()
url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID
self.put(url, body={'service_provider': self.sp},
- expected_status=201)
+ expected_status=http_client.CREATED)
def test_samlize_token_values(self):
"""Test the SAML generator produces a SAML object.
@@ -2665,7 +2846,7 @@ class SAMLGenerationTests(FederationTests):
"""
if not _is_xmlsec1_installed():
- self.skip('xmlsec1 is not installed')
+ self.skipTest('xmlsec1 is not installed')
generator = keystone_idp.SAMLGenerator()
response = generator.samlize_token(self.ISSUER, self.RECIPIENT,
@@ -2709,7 +2890,7 @@ class SAMLGenerationTests(FederationTests):
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
- resp = self.v3_authenticate_token(auth_data)
+ resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
@@ -2718,7 +2899,7 @@ class SAMLGenerationTests(FederationTests):
user_id=self.user['id'],
password=self.user['password'],
user_domain_id=self.domain['id'])
- resp = self.v3_authenticate_token(auth_data)
+ resp = self.v3_create_token(auth_data)
token_id = resp.headers.get('X-Subject-Token')
return token_id
@@ -2757,7 +2938,7 @@ class SAMLGenerationTests(FederationTests):
return_value=self.signed_assertion):
http_response = self.post(self.SAML_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
- expected_status=200)
+ expected_status=http_client.OK)
response = etree.fromstring(http_response.result)
issuer = response[0]
@@ -2789,10 +2970,9 @@ class SAMLGenerationTests(FederationTests):
def test_invalid_scope_body(self):
"""Test that missing the scope in request body raises an exception.
- Raises exception.SchemaValidationError() - error code 400
+ Raises exception.SchemaValidationError() - error 400 Bad Request
"""
-
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
@@ -2804,10 +2984,9 @@ class SAMLGenerationTests(FederationTests):
def test_invalid_token_body(self):
"""Test that missing the token in request body raises an exception.
- Raises exception.SchemaValidationError() - error code 400
+ Raises exception.SchemaValidationError() - error 400 Bad Request
"""
-
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
@@ -2819,7 +2998,7 @@ class SAMLGenerationTests(FederationTests):
def test_sp_not_found(self):
"""Test SAML generation with an invalid service provider ID.
- Raises exception.ServiceProviderNotFound() - error code 404
+ Raises exception.ServiceProviderNotFound() - error Not Found 404
"""
sp_id = uuid.uuid4().hex
@@ -2830,7 +3009,6 @@ class SAMLGenerationTests(FederationTests):
def test_sp_disabled(self):
"""Try generating assertion for disabled Service Provider."""
-
# Disable Service Provider
sp_ref = {'enabled': False}
self.federation_api.update_sp(self.SERVICE_PROVDIER_ID, sp_ref)
@@ -2844,10 +3022,9 @@ class SAMLGenerationTests(FederationTests):
def test_token_not_found(self):
"""Test that an invalid token in the request body raises an exception.
- Raises exception.TokenNotFound() - error code 404
+ Raises exception.TokenNotFound() - error Not Found 404
"""
-
token_id = uuid.uuid4().hex
body = self._create_generate_saml_request(token_id,
self.SERVICE_PROVDIER_ID)
@@ -2863,7 +3040,6 @@ class SAMLGenerationTests(FederationTests):
The controller should return a SAML assertion that is wrapped in a
SOAP envelope.
"""
-
self.config_fixture.config(group='saml', idp_entity_id=self.ISSUER)
token_id = self._fetch_valid_token()
body = self._create_generate_saml_request(token_id,
@@ -2873,7 +3049,7 @@ class SAMLGenerationTests(FederationTests):
return_value=self.signed_assertion):
http_response = self.post(self.ECP_GENERATION_ROUTE, body=body,
response_content_type='text/xml',
- expected_status=200)
+ expected_status=http_client.OK)
env_response = etree.fromstring(http_response.result)
header = env_response[0]
@@ -2956,7 +3132,7 @@ class SAMLGenerationTests(FederationTests):
self.assertEqual(expected_log, logger_fixture.output)
-class IdPMetadataGenerationTests(FederationTests):
+class IdPMetadataGenerationTests(test_v3.RestfulTestCase):
"""A class for testing Identity Provider Metadata generation."""
METADATA_URL = '/OS-FEDERATION/saml2/metadata'
@@ -3073,20 +3249,20 @@ class IdPMetadataGenerationTests(FederationTests):
self.generator.generate_metadata)
def test_get_metadata_with_no_metadata_file_configured(self):
- self.get(self.METADATA_URL, expected_status=500)
+ self.get(self.METADATA_URL,
+ expected_status=http_client.INTERNAL_SERVER_ERROR)
def test_get_metadata(self):
self.config_fixture.config(
group='saml', idp_metadata_path=XMLDIR + '/idp_saml2_metadata.xml')
- r = self.get(self.METADATA_URL, response_content_type='text/xml',
- expected_status=200)
+ r = self.get(self.METADATA_URL, response_content_type='text/xml')
self.assertEqual('text/xml', r.headers.get('Content-Type'))
reference_file = _load_xml('idp_saml2_metadata.xml')
self.assertEqual(reference_file, r.result)
-class ServiceProviderTests(FederationTests):
+class ServiceProviderTests(test_v3.RestfulTestCase):
"""A test class for Service Providers."""
MEMBER_NAME = 'service_provider'
@@ -3096,13 +3272,13 @@ class ServiceProviderTests(FederationTests):
'relay_state_prefix', 'sp_url']
def setUp(self):
- super(FederationTests, self).setUp()
+ super(ServiceProviderTests, self).setUp()
# Add a Service Provider
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
self.SP_REF = self.sp_ref()
self.SERVICE_PROVIDER = self.put(
url, body={'service_provider': self.SP_REF},
- expected_status=201).result
+ expected_status=http_client.CREATED).result
def sp_ref(self):
ref = {
@@ -3119,9 +3295,18 @@ class ServiceProviderTests(FederationTests):
return '/OS-FEDERATION/service_providers/' + str(suffix)
return '/OS-FEDERATION/service_providers'
+ def _create_default_sp(self, body=None):
+ """Create default Service Provider."""
+ url = self.base_url(suffix=uuid.uuid4().hex)
+ if body is None:
+ body = self.sp_ref()
+ resp = self.put(url, body={'service_provider': body},
+ expected_status=http_client.CREATED)
+ return resp
+
def test_get_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
- resp = self.get(url, expected_status=200)
+ resp = self.get(url)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
@@ -3133,7 +3318,7 @@ class ServiceProviderTests(FederationTests):
url = self.base_url(suffix=uuid.uuid4().hex)
sp = self.sp_ref()
resp = self.put(url, body={'service_provider': sp},
- expected_status=201)
+ expected_status=http_client.CREATED)
self.assertValidEntity(resp.result['service_provider'],
keys_to_check=self.SP_KEYS)
@@ -3143,7 +3328,7 @@ class ServiceProviderTests(FederationTests):
sp = self.sp_ref()
del sp['relay_state_prefix']
resp = self.put(url, body={'service_provider': sp},
- expected_status=201)
+ expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(CONF.saml.relay_state_prefix,
sp_result['relay_state_prefix'])
@@ -3155,7 +3340,7 @@ class ServiceProviderTests(FederationTests):
non_default_prefix = uuid.uuid4().hex
sp['relay_state_prefix'] = non_default_prefix
resp = self.put(url, body={'service_provider': sp},
- expected_status=201)
+ expected_status=http_client.CREATED)
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
@@ -3182,7 +3367,8 @@ class ServiceProviderTests(FederationTests):
}
for id, sp in ref_service_providers.items():
url = self.base_url(suffix=id)
- self.put(url, body={'service_provider': sp}, expected_status=201)
+ self.put(url, body={'service_provider': sp},
+ expected_status=http_client.CREATED)
# Insert ids into service provider object, we will compare it with
# responses from server and those include 'id' attribute.
@@ -3209,15 +3395,14 @@ class ServiceProviderTests(FederationTests):
"""
new_sp_ref = self.sp_ref()
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
- resp = self.patch(url, body={'service_provider': new_sp_ref},
- expected_status=200)
+ resp = self.patch(url, body={'service_provider': new_sp_ref})
patch_result = resp.result
new_sp_ref['id'] = self.SERVICE_PROVIDER_ID
self.assertValidEntity(patch_result['service_provider'],
ref=new_sp_ref,
keys_to_check=self.SP_KEYS)
- resp = self.get(url, expected_status=200)
+ resp = self.get(url)
get_result = resp.result
self.assertDictEqual(patch_result['service_provider'],
@@ -3227,7 +3412,7 @@ class ServiceProviderTests(FederationTests):
"""Update immutable attributes in service provider.
In this particular case the test will try to change ``id`` attribute.
- The server should return an HTTP 403 error code.
+ The server should return an HTTP 403 Forbidden error code.
"""
new_sp_ref = {'id': uuid.uuid4().hex}
@@ -3242,7 +3427,7 @@ class ServiceProviderTests(FederationTests):
self.patch(url, body={'service_provider': new_sp_ref},
expected_status=http_client.BAD_REQUEST)
- def test_update_service_provider_404(self):
+ def test_update_service_provider_returns_not_found(self):
new_sp_ref = self.sp_ref()
new_sp_ref['description'] = uuid.uuid4().hex
url = self.base_url(suffix=uuid.uuid4().hex)
@@ -3250,25 +3435,74 @@ class ServiceProviderTests(FederationTests):
expected_status=http_client.NOT_FOUND)
def test_update_sp_relay_state(self):
- """Update an SP with custome relay state."""
+ """Update an SP with custom relay state."""
new_sp_ref = self.sp_ref()
non_default_prefix = uuid.uuid4().hex
new_sp_ref['relay_state_prefix'] = non_default_prefix
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
- resp = self.patch(url, body={'service_provider': new_sp_ref},
- expected_status=200)
+ resp = self.patch(url, body={'service_provider': new_sp_ref})
sp_result = resp.result['service_provider']
self.assertEqual(non_default_prefix,
sp_result['relay_state_prefix'])
def test_delete_service_provider(self):
url = self.base_url(suffix=self.SERVICE_PROVIDER_ID)
- self.delete(url, expected_status=204)
+ self.delete(url)
- def test_delete_service_provider_404(self):
+ def test_delete_service_provider_returns_not_found(self):
url = self.base_url(suffix=uuid.uuid4().hex)
self.delete(url, expected_status=http_client.NOT_FOUND)
+ def test_filter_list_sp_by_id(self):
+ def get_id(resp):
+ sp = resp.result.get('service_provider')
+ return sp.get('id')
+
+ sp1_id = get_id(self._create_default_sp())
+ sp2_id = get_id(self._create_default_sp())
+
+ # list the SP, should get SPs.
+ url = self.base_url()
+ resp = self.get(url)
+ sps = resp.result.get('service_providers')
+ entities_ids = [e['id'] for e in sps]
+ self.assertIn(sp1_id, entities_ids)
+ self.assertIn(sp2_id, entities_ids)
+
+ # filter the SP by 'id'. Only SP1 should appear.
+ url = self.base_url() + '?id=' + sp1_id
+ resp = self.get(url)
+ sps = resp.result.get('service_providers')
+ entities_ids = [e['id'] for e in sps]
+ self.assertIn(sp1_id, entities_ids)
+ self.assertNotIn(sp2_id, entities_ids)
+
+ def test_filter_list_sp_by_enabled(self):
+ def get_id(resp):
+ sp = resp.result.get('service_provider')
+ return sp.get('id')
+
+ sp1_id = get_id(self._create_default_sp())
+ sp2_ref = self.sp_ref()
+ sp2_ref['enabled'] = False
+ sp2_id = get_id(self._create_default_sp(body=sp2_ref))
+
+ # list the SP, should get two SPs.
+ url = self.base_url()
+ resp = self.get(url)
+ sps = resp.result.get('service_providers')
+ entities_ids = [e['id'] for e in sps]
+ self.assertIn(sp1_id, entities_ids)
+ self.assertIn(sp2_id, entities_ids)
+
+ # filter the SP by 'enabled'. Only SP1 should appear.
+ url = self.base_url() + '?enabled=True'
+ resp = self.get(url)
+ sps = resp.result.get('service_providers')
+ entities_ids = [e['id'] for e in sps]
+ self.assertIn(sp1_id, entities_ids)
+ self.assertNotIn(sp2_id, entities_ids)
+
class WebSSOTests(FederatedTokenTests):
"""A class for testing Web SSO."""
@@ -3306,6 +3540,21 @@ class WebSSOTests(FederatedTokenTests):
resp = self.api.federated_sso_auth(context, self.PROTOCOL)
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
+ def test_get_sso_origin_host_case_insensitive(self):
+ # test lowercase hostname in trusted_dashboard
+ context = {
+ 'query_string': {
+ 'origin': "http://horizon.com",
+ },
+ }
+ host = self.api._get_sso_origin_host(context)
+ self.assertEqual("http://horizon.com", host)
+ # test uppercase hostname in trusted_dashboard
+ self.config_fixture.config(group='federation',
+ trusted_dashboard=['http://Horizon.com'])
+ host = self.api._get_sso_origin_host(context)
+ self.assertEqual("http://horizon.com", host)
+
def test_federated_sso_auth_with_protocol_specific_remote_id(self):
self.config_fixture.config(
group=self.PROTOCOL,
@@ -3380,7 +3629,7 @@ class WebSSOTests(FederatedTokenTests):
self.assertIn(self.TRUSTED_DASHBOARD, resp.body)
-class K2KServiceCatalogTests(FederationTests):
+class K2KServiceCatalogTests(test_v3.RestfulTestCase):
SP1 = 'SP1'
SP2 = 'SP2'
SP3 = 'SP3'
@@ -3429,11 +3678,10 @@ class K2KServiceCatalogTests(FederationTests):
for entity in service_providers:
id = entity.get('id')
ref_entity = self.sp_response(id, ref.get(id))
- self.assertDictEqual(ref_entity, entity)
+ self.assertDictEqual(entity, ref_entity)
def test_service_providers_in_token(self):
"""Check if service providers are listed in service catalog."""
-
token = self.token_v3_helper.get_token_data(self.user_id, ['password'])
ref = {}
for r in (self.sp_alpha, self.sp_beta, self.sp_gamma):
diff --git a/keystone-moon/keystone/tests/unit/test_v3_filters.py b/keystone-moon/keystone/tests/unit/test_v3_filters.py
index 668a2308..9dc19af5 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_filters.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_filters.py
@@ -13,13 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import uuid
-
from oslo_config import cfg
from oslo_serialization import jsonutils
from six.moves import range
+from keystone.tests import unit
from keystone.tests.unit import filtering
+from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
from keystone.tests.unit import test_v3
@@ -31,14 +31,14 @@ class IdentityTestFilteredCase(filtering.FilterTests,
test_v3.RestfulTestCase):
"""Test filter enforcement on the v3 Identity API."""
+ def _policy_fixture(self):
+ return ksfixtures.Policy(self.tmpfilename, self.config_fixture)
+
def setUp(self):
"""Setup for Identity Filter Test Cases."""
-
- super(IdentityTestFilteredCase, self).setUp()
self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
self.tmpfilename = self.tempfile.file_name
- self.config_fixture.config(group='oslo_policy',
- policy_file=self.tmpfilename)
+ super(IdentityTestFilteredCase, self).setUp()
def load_sample_data(self):
"""Create sample data for these tests.
@@ -57,32 +57,23 @@ class IdentityTestFilteredCase(filtering.FilterTests,
"""
# Start by creating a few domains
self._populate_default_domain()
- self.domainA = self.new_domain_ref()
+ self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'], self.domainA)
- self.domainB = self.new_domain_ref()
+ self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'], self.domainB)
- self.domainC = self.new_domain_ref()
+ self.domainC = unit.new_domain_ref()
self.domainC['enabled'] = False
self.resource_api.create_domain(self.domainC['id'], self.domainC)
# Now create some users, one in domainA and two of them in domainB
- self.user1 = self.new_user_ref(domain_id=self.domainA['id'])
- password = uuid.uuid4().hex
- self.user1['password'] = password
- self.user1 = self.identity_api.create_user(self.user1)
- self.user1['password'] = password
-
- self.user2 = self.new_user_ref(domain_id=self.domainB['id'])
- self.user2['password'] = password
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
-
- self.user3 = self.new_user_ref(domain_id=self.domainB['id'])
- self.user3['password'] = password
- self.user3 = self.identity_api.create_user(self.user3)
- self.user3['password'] = password
-
- self.role = self.new_role_ref()
+ self.user1 = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
+ self.user2 = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
+ self.user3 = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
+
+ self.role = unit.new_role_ref()
self.role_api.create_role(self.role['id'], self.role)
self.assignment_api.create_grant(self.role['id'],
user_id=self.user1['id'],
@@ -311,7 +302,7 @@ class IdentityTestFilteredCase(filtering.FilterTests,
# See if we can add a SQL command...use the group table instead of the
# user table since 'user' is reserved word for SQLAlchemy.
- group = self.new_group_ref(domain_id=self.domainB['id'])
+ group = unit.new_group_ref(domain_id=self.domainB['id'])
group = self.identity_api.create_group(group)
url_by_name = "/users?name=x'; drop table group"
@@ -325,11 +316,11 @@ class IdentityTestFilteredCase(filtering.FilterTests,
class IdentityTestListLimitCase(IdentityTestFilteredCase):
"""Test list limiting enforcement on the v3 Identity API."""
+
content_type = 'json'
def setUp(self):
"""Setup for Identity Limit Test Cases."""
-
super(IdentityTestListLimitCase, self).setUp()
# Create 10 entries for each of the entities we are going to test
@@ -343,7 +334,7 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
self.service_list = []
self.addCleanup(self.clean_up_service)
for _ in range(10):
- new_entity = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex}
+ new_entity = unit.new_service_ref()
service = self.catalog_api.create_service(new_entity['id'],
new_entity)
self.service_list.append(service)
@@ -351,26 +342,22 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
self.policy_list = []
self.addCleanup(self.clean_up_policy)
for _ in range(10):
- new_entity = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex,
- 'blob': uuid.uuid4().hex}
+ new_entity = unit.new_policy_ref()
policy = self.policy_api.create_policy(new_entity['id'],
new_entity)
self.policy_list.append(policy)
def clean_up_entity(self, entity):
"""Clean up entity test data from Identity Limit Test Cases."""
-
self._delete_test_data(entity, self.entity_lists[entity])
def clean_up_service(self):
"""Clean up service test data from Identity Limit Test Cases."""
-
for service in self.service_list:
self.catalog_api.delete_service(service['id'])
def clean_up_policy(self):
"""Clean up policy test data from Identity Limit Test Cases."""
-
for policy in self.policy_list:
self.policy_api.delete_policy(policy['id'])
@@ -430,7 +417,6 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
def test_no_limit(self):
"""Check truncated attribute not set when list not limited."""
-
self._set_policy({"identity:list_services": []})
r = self.get('/services', auth=self.auth)
self.assertEqual(10, len(r.result.get('services')))
@@ -438,7 +424,6 @@ class IdentityTestListLimitCase(IdentityTestFilteredCase):
def test_at_limit(self):
"""Check truncated attribute not set when list at max size."""
-
# Test this by overriding the general limit with a higher
# driver-specific limit (allowing all entities to be returned
# in the collection), which should result in a non truncated list
diff --git a/keystone-moon/keystone/tests/unit/test_v3_identity.py b/keystone-moon/keystone/tests/unit/test_v3_identity.py
index 5a8e4fd5..7d3f6cad 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_identity.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_identity.py
@@ -30,31 +30,63 @@ from keystone.tests.unit import test_v3
CONF = cfg.CONF
+# NOTE(morganfainberg): To be removed when admin_token_auth middleware is
+# removed. This was moved to it's own testcase so it can setup the
+# admin_token_auth pipeline without impacting other tests.
+class IdentityTestCaseStaticAdminToken(test_v3.RestfulTestCase):
+ EXTENSION_TO_ADD = 'admin_token_auth'
+
+ def config_overrides(self):
+ super(IdentityTestCaseStaticAdminToken, self).config_overrides()
+ self.config_fixture.config(
+ admin_token='ADMIN')
+
+ def test_list_users_with_static_admin_token_and_multiple_backends(self):
+ # domain-specific operations with the bootstrap ADMIN token is
+ # disallowed when domain-specific drivers are enabled
+ self.config_fixture.config(group='identity',
+ domain_specific_drivers_enabled=True)
+ self.get('/users', token=CONF.admin_token,
+ expected_status=exception.Unauthorized.code)
+
+ def test_create_user_with_admin_token_and_no_domain(self):
+ """Call ``POST /users`` with admin token but no domain id.
+
+ It should not be possible to use the admin token to create a user
+ while not explicitly passing the domain in the request body.
+
+ """
+ # Passing a valid domain id to new_user_ref() since domain_id is
+ # not an optional parameter.
+ ref = unit.new_user_ref(domain_id=self.domain_id)
+ # Delete the domain id before sending the request.
+ del ref['domain_id']
+ self.post('/users', body={'user': ref}, token=CONF.admin_token,
+ expected_status=http_client.BAD_REQUEST)
+
+
class IdentityTestCase(test_v3.RestfulTestCase):
"""Test users and groups."""
def setUp(self):
super(IdentityTestCase, self).setUp()
- self.group = self.new_group_ref(
- domain_id=self.domain_id)
+ self.group = unit.new_group_ref(domain_id=self.domain_id)
self.group = self.identity_api.create_group(self.group)
self.group_id = self.group['id']
- self.credential_id = uuid.uuid4().hex
- self.credential = self.new_credential_ref(
+ self.credential = unit.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
- self.credential['id'] = self.credential_id
- self.credential_api.create_credential(
- self.credential_id,
- self.credential)
+
+ self.credential_api.create_credential(self.credential['id'],
+ self.credential)
# user crud tests
def test_create_user(self):
"""Call ``POST /users``."""
- ref = self.new_user_ref(domain_id=self.domain_id)
+ ref = unit.new_user_ref(domain_id=self.domain_id)
r = self.post(
'/users',
body={'user': ref})
@@ -70,17 +102,14 @@ class IdentityTestCase(test_v3.RestfulTestCase):
"""
# Create a user with a role on the domain so we can get a
# domain scoped token
- domain = self.new_domain_ref()
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user = self.new_user_ref(domain_id=domain['id'])
- password = user['password']
- user = self.identity_api.create_user(user)
- user['password'] = password
+ user = unit.create_user(self.identity_api, domain_id=domain['id'])
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
- ref = self.new_user_ref(domain_id=domain['id'])
+ ref = unit.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
@@ -91,7 +120,7 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.assertValidUserResponse(r, ref)
# Now try the same thing without a domain token - which should fail
- ref = self.new_user_ref(domain_id=domain['id'])
+ ref = unit.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
@@ -112,6 +141,79 @@ class IdentityTestCase(test_v3.RestfulTestCase):
ref['domain_id'] = CONF.identity.default_domain_id
return self.assertValidUserResponse(r, ref)
+ def test_create_user_with_admin_token_and_domain(self):
+ """Call ``POST /users`` with admin token and domain id."""
+ ref = unit.new_user_ref(domain_id=self.domain_id)
+ self.post('/users', body={'user': ref}, token=self.get_admin_token(),
+ expected_status=http_client.CREATED)
+
+ def test_user_management_normalized_keys(self):
+ """Illustrate the inconsistent handling of hyphens in keys.
+
+ To quote Morgan in bug 1526244:
+
+ the reason this is converted from "domain-id" to "domain_id" is
+ because of how we process/normalize data. The way we have to handle
+ specific data types for known columns requires avoiding "-" in the
+ actual python code since "-" is not valid for attributes in python
+ w/o significant use of "getattr" etc.
+
+ In short, historically we handle some things in conversions. The
+ use of "extras" has long been a poor design choice that leads to
+ odd/strange inconsistent behaviors because of other choices made in
+ handling data from within the body. (In many cases we convert from
+ "-" to "_" throughout openstack)
+
+ Source: https://bugs.launchpad.net/keystone/+bug/1526244/comments/9
+
+ """
+ # Create two domains to work with.
+ domain1 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain1['id'], domain1)
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+
+ # We can successfully create a normal user without any surprises.
+ user = unit.new_user_ref(domain_id=domain1['id'])
+ r = self.post(
+ '/users',
+ body={'user': user})
+ self.assertValidUserResponse(r, user)
+ user['id'] = r.json['user']['id']
+
+ # Query strings are not normalized: so we get all users back (like
+ # self.user), not just the ones in the specified domain.
+ r = self.get(
+ '/users?domain-id=%s' % domain1['id'])
+ self.assertValidUserListResponse(r, ref=self.user)
+ self.assertNotEqual(domain1['id'], self.user['domain_id'])
+
+ # When creating a new user, if we move the 'domain_id' into the
+ # 'domain-id' attribute, the server will normalize the request
+ # attribute, and effectively "move it back" for us.
+ user = unit.new_user_ref(domain_id=domain1['id'])
+ user['domain-id'] = user.pop('domain_id')
+ r = self.post(
+ '/users',
+ body={'user': user})
+ self.assertNotIn('domain-id', r.json['user'])
+ self.assertEqual(domain1['id'], r.json['user']['domain_id'])
+ # (move this attribute back so we can use assertValidUserResponse)
+ user['domain_id'] = user.pop('domain-id')
+ self.assertValidUserResponse(r, user)
+ user['id'] = r.json['user']['id']
+
+ # If we try updating the user's 'domain_id' by specifying a
+ # 'domain-id', then it'll be stored into extras rather than normalized,
+ # and the user's actual 'domain_id' is not affected.
+ r = self.patch(
+ '/users/%s' % user['id'],
+ body={'user': {'domain-id': domain2['id']}})
+ self.assertEqual(domain2['id'], r.json['user']['domain-id'])
+ self.assertEqual(user['domain_id'], r.json['user']['domain_id'])
+ self.assertNotEqual(domain2['id'], user['domain_id'])
+ self.assertValidUserResponse(r, user)
+
def test_create_user_bad_request(self):
"""Call ``POST /users``."""
self.post('/users', body={'user': {}},
@@ -134,29 +236,42 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.config_fixture.config(group='identity',
domain_specific_drivers_enabled=True)
- # Create a user with a role on the domain so we can get a
- # domain scoped token
- domain = self.new_domain_ref()
+ # Create a new domain with a new project and user
+ domain = unit.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
- user = self.new_user_ref(domain_id=domain['id'])
- password = user['password']
- user = self.identity_api.create_user(user)
- user['password'] = password
+
+ project = unit.new_project_ref(domain_id=domain['id'])
+ self.resource_api.create_project(project['id'], project)
+
+ user = unit.create_user(self.identity_api, domain_id=domain['id'])
+
+ # Create both project and domain role grants for the user so we
+ # can get both project and domain scoped tokens
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
+ self.assignment_api.create_grant(
+ role_id=self.role_id, user_id=user['id'],
+ project_id=project['id'])
- ref = self.new_user_ref(domain_id=domain['id'])
- ref_nd = ref.copy()
- ref_nd.pop('domain_id')
- auth = self.build_authentication_request(
+ dom_auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
domain_id=domain['id'])
+ project_auth = self.build_authentication_request(
+ user_id=user['id'],
+ password=user['password'],
+ project_id=project['id'])
# First try using a domain scoped token
resource_url = '/users'
- r = self.get(resource_url, auth=auth)
+ r = self.get(resource_url, auth=dom_auth)
+ self.assertValidUserListResponse(r, ref=user,
+ resource_url=resource_url)
+
+ # Now try using a project scoped token
+ resource_url = '/users'
+ r = self.get(resource_url, auth=project_auth)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
@@ -167,21 +282,9 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
- # Now try the same thing without a domain token or filter,
- # which should fail
- r = self.get('/users', expected_status=exception.Unauthorized.code)
-
- def test_list_users_with_static_admin_token_and_multiple_backends(self):
- # domain-specific operations with the bootstrap ADMIN token is
- # disallowed when domain-specific drivers are enabled
- self.config_fixture.config(group='identity',
- domain_specific_drivers_enabled=True)
- self.get('/users', token=CONF.admin_token,
- expected_status=exception.Unauthorized.code)
-
def test_list_users_no_default_project(self):
"""Call ``GET /users`` making sure no default_project_id."""
- user = self.new_user_ref(self.domain_id)
+ user = unit.new_user_ref(self.domain_id)
user = self.identity_api.create_user(user)
resource_url = '/users'
r = self.get(resource_url)
@@ -196,7 +299,7 @@ class IdentityTestCase(test_v3.RestfulTestCase):
def test_get_user_with_default_project(self):
"""Call ``GET /users/{user_id}`` making sure of default_project_id."""
- user = self.new_user_ref(domain_id=self.domain_id,
+ user = unit.new_user_ref(domain_id=self.domain_id,
project_id=self.project_id)
user = self.identity_api.create_user(user)
r = self.get('/users/%(user_id)s' % {'user_id': user['id']})
@@ -209,45 +312,39 @@ class IdentityTestCase(test_v3.RestfulTestCase):
def test_list_groups_for_user(self):
"""Call ``GET /users/{user_id}/groups``."""
+ user1 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+ user2 = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
- self.user1 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user1['password']
- self.user1 = self.identity_api.create_user(self.user1)
- self.user1['password'] = password
- self.user2 = self.new_user_ref(
- domain_id=self.domain['id'])
- password = self.user2['password']
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
- 'group_id': self.group_id, 'user_id': self.user1['id']})
+ 'group_id': self.group_id, 'user_id': user1['id']})
# Scenarios below are written to test the default policy configuration
# One should be allowed to list one's own groups
auth = self.build_authentication_request(
- user_id=self.user1['id'],
- password=self.user1['password'])
+ user_id=user1['id'],
+ password=user1['password'])
resource_url = ('/users/%(user_id)s/groups' %
- {'user_id': self.user1['id']})
+ {'user_id': user1['id']})
r = self.get(resource_url, auth=auth)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
# Administrator is allowed to list others' groups
resource_url = ('/users/%(user_id)s/groups' %
- {'user_id': self.user1['id']})
+ {'user_id': user1['id']})
r = self.get(resource_url)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
# Ordinary users should not be allowed to list other's groups
auth = self.build_authentication_request(
- user_id=self.user2['id'],
- password=self.user2['password'])
+ user_id=user2['id'],
+ password=user2['password'])
r = self.get('/users/%(user_id)s/groups' % {
- 'user_id': self.user1['id']}, auth=auth,
+ 'user_id': user1['id']}, auth=auth,
expected_status=exception.ForbiddenAction.code)
def test_check_user_in_group(self):
@@ -278,7 +375,7 @@ class IdentityTestCase(test_v3.RestfulTestCase):
def test_update_user(self):
"""Call ``PATCH /users/{user_id}``."""
- user = self.new_user_ref(domain_id=self.domain_id)
+ user = unit.new_user_ref(domain_id=self.domain_id)
del user['id']
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
@@ -287,44 +384,42 @@ class IdentityTestCase(test_v3.RestfulTestCase):
def test_admin_password_reset(self):
# bootstrap a user as admin
- user_ref = self.new_user_ref(domain_id=self.domain['id'])
- password = user_ref['password']
- user_ref = self.identity_api.create_user(user_ref)
+ user_ref = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
# auth as user should work before a password change
old_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
- password=password)
- r = self.v3_authenticate_token(old_password_auth, expected_status=201)
+ password=user_ref['password'])
+ r = self.v3_create_token(old_password_auth)
old_token = r.headers.get('X-Subject-Token')
# auth as user with a token should work before a password change
old_token_auth = self.build_authentication_request(token=old_token)
- self.v3_authenticate_token(old_token_auth, expected_status=201)
+ self.v3_create_token(old_token_auth)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
- body={'user': {'password': new_password}},
- expected_status=200)
+ body={'user': {'password': new_password}})
# auth as user with original password should not work after change
- self.v3_authenticate_token(old_password_auth,
- expected_status=http_client.UNAUTHORIZED)
+ self.v3_create_token(old_password_auth,
+ expected_status=http_client.UNAUTHORIZED)
# auth as user with an old token should not work after change
- self.v3_authenticate_token(old_token_auth,
- expected_status=http_client.NOT_FOUND)
+ self.v3_create_token(old_token_auth,
+ expected_status=http_client.NOT_FOUND)
# new password should work
new_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
password=new_password)
- self.v3_authenticate_token(new_password_auth, expected_status=201)
+ self.v3_create_token(new_password_auth)
def test_update_user_domain_id(self):
"""Call ``PATCH /users/{user_id}`` with domain_id."""
- user = self.new_user_ref(domain_id=self.domain['id'])
+ user = unit.new_user_ref(domain_id=self.domain['id'])
user = self.identity_api.create_user(user)
user['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/users/%(user_id)s' % {
@@ -349,18 +444,16 @@ class IdentityTestCase(test_v3.RestfulTestCase):
"""
# First check the credential for this user is present
r = self.credential_api.get_credential(self.credential['id'])
- self.assertDictEqual(r, self.credential)
+ self.assertDictEqual(self.credential, r)
# Create a second credential with a different user
- self.user2 = self.new_user_ref(
- domain_id=self.domain['id'],
- project_id=self.project['id'])
- self.user2 = self.identity_api.create_user(self.user2)
- self.credential2 = self.new_credential_ref(
- user_id=self.user2['id'],
- project_id=self.project['id'])
- self.credential_api.create_credential(
- self.credential2['id'],
- self.credential2)
+
+ user2 = unit.new_user_ref(domain_id=self.domain['id'],
+ project_id=self.project['id'])
+ user2 = self.identity_api.create_user(user2)
+ credential2 = unit.new_credential_ref(user_id=user2['id'],
+ project_id=self.project['id'])
+ self.credential_api.create_credential(credential2['id'], credential2)
+
# Create a token for this user which we can check later
# gets deleted
auth_data = self.build_authentication_request(
@@ -371,7 +464,7 @@ class IdentityTestCase(test_v3.RestfulTestCase):
# Confirm token is valid for now
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
- expected_status=200)
+ expected_status=http_client.OK)
# Now delete the user
self.delete('/users/%(user_id)s' % {
@@ -387,14 +480,57 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.user['id'])
self.assertEqual(0, len(tokens))
# But the credential for user2 is unaffected
- r = self.credential_api.get_credential(self.credential2['id'])
- self.assertDictEqual(r, self.credential2)
+ r = self.credential_api.get_credential(credential2['id'])
+ self.assertDictEqual(credential2, r)
+
+ # shadow user tests
+ def test_shadow_federated_user(self):
+ fed_user = unit.new_federated_user_ref()
+ user = (
+ self.identity_api.shadow_federated_user(fed_user["idp_id"],
+ fed_user["protocol_id"],
+ fed_user["unique_id"],
+ fed_user["display_name"])
+ )
+ self.assertIsNotNone(user["id"])
+ self.assertEqual(len(user.keys()), 4)
+ self.assertIsNotNone(user['id'])
+ self.assertIsNotNone(user['name'])
+ self.assertIsNone(user['domain_id'])
+ self.assertEqual(user['enabled'], True)
+
+ def test_shadow_existing_federated_user(self):
+ fed_user = unit.new_federated_user_ref()
+
+ # introduce the user to keystone for the first time
+ shadow_user1 = self.identity_api.shadow_federated_user(
+ fed_user["idp_id"],
+ fed_user["protocol_id"],
+ fed_user["unique_id"],
+ fed_user["display_name"])
+ self.assertEqual(fed_user['display_name'], shadow_user1['name'])
+
+ # shadow the user again, with another name to invalidate the cache
+ # internally, this operation causes request to the driver. It should
+ # not fail.
+ fed_user['display_name'] = uuid.uuid4().hex
+ shadow_user2 = self.identity_api.shadow_federated_user(
+ fed_user["idp_id"],
+ fed_user["protocol_id"],
+ fed_user["unique_id"],
+ fed_user["display_name"])
+ self.assertEqual(fed_user['display_name'], shadow_user2['name'])
+ self.assertNotEqual(shadow_user1['name'], shadow_user2['name'])
+
+ # The shadowed users still share the same unique ID.
+ self.assertEqual(shadow_user1['id'], shadow_user2['id'])
# group crud tests
def test_create_group(self):
"""Call ``POST /groups``."""
- ref = self.new_group_ref(domain_id=self.domain_id)
+ # Create a new group to avoid a duplicate check failure
+ ref = unit.new_group_ref(domain_id=self.domain_id)
r = self.post(
'/groups',
body={'group': ref})
@@ -420,7 +556,7 @@ class IdentityTestCase(test_v3.RestfulTestCase):
def test_update_group(self):
"""Call ``PATCH /groups/{group_id}``."""
- group = self.new_group_ref(domain_id=self.domain_id)
+ group = unit.new_group_ref(domain_id=self.domain_id)
del group['id']
r = self.patch('/groups/%(group_id)s' % {
'group_id': self.group_id},
@@ -429,19 +565,17 @@ class IdentityTestCase(test_v3.RestfulTestCase):
def test_update_group_domain_id(self):
"""Call ``PATCH /groups/{group_id}`` with domain_id."""
- group = self.new_group_ref(domain_id=self.domain['id'])
- group = self.identity_api.create_group(group)
- group['domain_id'] = CONF.identity.default_domain_id
+ self.group['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/groups/%(group_id)s' % {
- 'group_id': group['id']},
- body={'group': group},
+ 'group_id': self.group['id']},
+ body={'group': self.group},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
- group['domain_id'] = self.domain['id']
+ self.group['domain_id'] = self.domain['id']
r = self.patch('/groups/%(group_id)s' % {
- 'group_id': group['id']},
- body={'group': group})
- self.assertValidGroupResponse(r, group)
+ 'group_id': self.group['id']},
+ body={'group': self.group})
+ self.assertValidGroupResponse(r, self.group)
def test_delete_group(self):
"""Call ``DELETE /groups/{group_id}``."""
@@ -453,7 +587,7 @@ class IdentityTestCase(test_v3.RestfulTestCase):
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
- ref = self.new_user_ref(domain_id=self.domain_id)
+ ref = unit.new_user_ref(domain_id=self.domain_id)
self.post(
'/users',
body={'user': ref})
@@ -467,108 +601,122 @@ class IdentityTestCase(test_v3.RestfulTestCase):
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# bootstrap a user as admin
- user_ref = self.new_user_ref(domain_id=self.domain['id'])
- password = user_ref['password']
- user_ref = self.identity_api.create_user(user_ref)
+ user_ref = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+
+ self.assertNotIn(user_ref['password'], log_fix.output)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
- body={'user': {'password': new_password}},
- expected_status=200)
+ body={'user': {'password': new_password}})
- self.assertNotIn(password, log_fix.output)
self.assertNotIn(new_password, log_fix.output)
class IdentityV3toV2MethodsTestCase(unit.TestCase):
"""Test users V3 to V2 conversion methods."""
+ def new_user_ref(self, **kwargs):
+ """Construct a bare bones user ref.
+
+ Omits all optional components.
+ """
+ ref = unit.new_user_ref(**kwargs)
+ # description is already omitted
+ del ref['email']
+ del ref['enabled']
+ del ref['password']
+ return ref
+
def setUp(self):
super(IdentityV3toV2MethodsTestCase, self).setUp()
self.load_backends()
- self.user_id = uuid.uuid4().hex
- self.default_project_id = uuid.uuid4().hex
- self.tenant_id = uuid.uuid4().hex
+ user_id = uuid.uuid4().hex
+ project_id = uuid.uuid4().hex
+
# User with only default_project_id in ref
- self.user1 = {'id': self.user_id,
- 'name': self.user_id,
- 'default_project_id': self.default_project_id,
- 'domain_id': CONF.identity.default_domain_id}
+ self.user1 = self.new_user_ref(
+ id=user_id,
+ name=user_id,
+ project_id=project_id,
+ domain_id=CONF.identity.default_domain_id)
# User without default_project_id or tenantId in ref
- self.user2 = {'id': self.user_id,
- 'name': self.user_id,
- 'domain_id': CONF.identity.default_domain_id}
+ self.user2 = self.new_user_ref(
+ id=user_id,
+ name=user_id,
+ domain_id=CONF.identity.default_domain_id)
# User with both tenantId and default_project_id in ref
- self.user3 = {'id': self.user_id,
- 'name': self.user_id,
- 'default_project_id': self.default_project_id,
- 'tenantId': self.tenant_id,
- 'domain_id': CONF.identity.default_domain_id}
+ self.user3 = self.new_user_ref(
+ id=user_id,
+ name=user_id,
+ project_id=project_id,
+ tenantId=project_id,
+ domain_id=CONF.identity.default_domain_id)
# User with only tenantId in ref
- self.user4 = {'id': self.user_id,
- 'name': self.user_id,
- 'tenantId': self.tenant_id,
- 'domain_id': CONF.identity.default_domain_id}
+ self.user4 = self.new_user_ref(
+ id=user_id,
+ name=user_id,
+ tenantId=project_id,
+ domain_id=CONF.identity.default_domain_id)
# Expected result if the user is meant to have a tenantId element
- self.expected_user = {'id': self.user_id,
- 'name': self.user_id,
- 'username': self.user_id,
- 'tenantId': self.default_project_id}
+ self.expected_user = {'id': user_id,
+ 'name': user_id,
+ 'username': user_id,
+ 'tenantId': project_id}
# Expected result if the user is not meant to have a tenantId element
- self.expected_user_no_tenant_id = {'id': self.user_id,
- 'name': self.user_id,
- 'username': self.user_id}
+ self.expected_user_no_tenant_id = {'id': user_id,
+ 'name': user_id,
+ 'username': user_id}
def test_v3_to_v2_user_method(self):
updated_user1 = controller.V2Controller.v3_to_v2_user(self.user1)
self.assertIs(self.user1, updated_user1)
- self.assertDictEqual(self.user1, self.expected_user)
+ self.assertDictEqual(self.expected_user, self.user1)
updated_user2 = controller.V2Controller.v3_to_v2_user(self.user2)
self.assertIs(self.user2, updated_user2)
- self.assertDictEqual(self.user2, self.expected_user_no_tenant_id)
+ self.assertDictEqual(self.expected_user_no_tenant_id, self.user2)
updated_user3 = controller.V2Controller.v3_to_v2_user(self.user3)
self.assertIs(self.user3, updated_user3)
- self.assertDictEqual(self.user3, self.expected_user)
+ self.assertDictEqual(self.expected_user, self.user3)
updated_user4 = controller.V2Controller.v3_to_v2_user(self.user4)
self.assertIs(self.user4, updated_user4)
- self.assertDictEqual(self.user4, self.expected_user_no_tenant_id)
+ self.assertDictEqual(self.expected_user_no_tenant_id, self.user4)
def test_v3_to_v2_user_method_list(self):
user_list = [self.user1, self.user2, self.user3, self.user4]
updated_list = controller.V2Controller.v3_to_v2_user(user_list)
- self.assertEqual(len(updated_list), len(user_list))
+ self.assertEqual(len(user_list), len(updated_list))
for i, ref in enumerate(updated_list):
# Order should not change.
self.assertIs(ref, user_list[i])
- self.assertDictEqual(self.user1, self.expected_user)
- self.assertDictEqual(self.user2, self.expected_user_no_tenant_id)
- self.assertDictEqual(self.user3, self.expected_user)
- self.assertDictEqual(self.user4, self.expected_user_no_tenant_id)
+ self.assertDictEqual(self.expected_user, self.user1)
+ self.assertDictEqual(self.expected_user_no_tenant_id, self.user2)
+ self.assertDictEqual(self.expected_user, self.user3)
+ self.assertDictEqual(self.expected_user_no_tenant_id, self.user4)
class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
def setUp(self):
super(UserSelfServiceChangingPasswordsTestCase, self).setUp()
- self.user_ref = self.new_user_ref(domain_id=self.domain['id'])
- password = self.user_ref['password']
- self.user_ref = self.identity_api.create_user(self.user_ref)
- self.user_ref['password'] = password
- self.token = self.get_request_token(self.user_ref['password'], 201)
+ self.user_ref = unit.create_user(self.identity_api,
+ domain_id=self.domain['id'])
+ self.token = self.get_request_token(self.user_ref['password'],
+ http_client.CREATED)
def get_request_token(self, password, expected_status):
auth_data = self.build_authentication_request(
user_id=self.user_ref['id'],
password=password)
- r = self.v3_authenticate_token(auth_data,
- expected_status=expected_status)
+ r = self.v3_create_token(auth_data,
+ expected_status=expected_status)
return r.headers.get('X-Subject-Token')
def change_password(self, expected_status, **kwargs):
@@ -581,27 +729,28 @@ class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
def test_changing_password(self):
# original password works
token_id = self.get_request_token(self.user_ref['password'],
- expected_status=201)
+ expected_status=http_client.CREATED)
# original token works
old_token_auth = self.build_authentication_request(token=token_id)
- self.v3_authenticate_token(old_token_auth, expected_status=201)
+ self.v3_create_token(old_token_auth)
# change password
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
- expected_status=204)
+ expected_status=http_client.NO_CONTENT)
# old password fails
self.get_request_token(self.user_ref['password'],
expected_status=http_client.UNAUTHORIZED)
# old token fails
- self.v3_authenticate_token(old_token_auth,
- expected_status=http_client.NOT_FOUND)
+ self.v3_create_token(old_token_auth,
+ expected_status=http_client.NOT_FOUND)
# new password works
- self.get_request_token(new_password, expected_status=201)
+ self.get_request_token(new_password,
+ expected_status=http_client.CREATED)
def test_changing_password_with_missing_original_password_fails(self):
r = self.change_password(password=uuid.uuid4().hex,
@@ -640,7 +789,7 @@ class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
- expected_status=204)
+ expected_status=http_client.NO_CONTENT)
self.assertNotIn(self.user_ref['password'], log_fix.output)
self.assertNotIn(new_password, log_fix.output)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_oauth1.py b/keystone-moon/keystone/tests/unit/test_v3_oauth1.py
index 8794a426..198dffb8 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_oauth1.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_oauth1.py
@@ -15,28 +15,36 @@
import copy
import uuid
-from oslo_config import cfg
+import mock
+from oslo_log import versionutils
from oslo_serialization import jsonutils
from pycadf import cadftaxonomy
from six.moves import http_client
from six.moves import urllib
-from keystone.contrib import oauth1
-from keystone.contrib.oauth1 import controllers
-from keystone.contrib.oauth1 import core
+from keystone.contrib.oauth1 import routers
from keystone import exception
+from keystone import oauth1
+from keystone.oauth1 import controllers
+from keystone.oauth1 import core
+from keystone.tests import unit
from keystone.tests.unit.common import test_notifications
+from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
from keystone.tests.unit import test_v3
-CONF = cfg.CONF
+class OAuth1ContribTests(test_v3.RestfulTestCase):
+ @mock.patch.object(versionutils, 'report_deprecated_feature')
+ def test_exception_happens(self, mock_deprecator):
+ routers.OAuth1Extension(mock.ANY)
+ mock_deprecator.assert_called_once_with(mock.ANY, mock.ANY)
+ args, _kwargs = mock_deprecator.call_args
+ self.assertIn("Remove oauth1_extension from", args[1])
-class OAuth1Tests(test_v3.RestfulTestCase):
- EXTENSION_NAME = 'oauth1'
- EXTENSION_TO_ADD = 'oauth1_extension'
+class OAuth1Tests(test_v3.RestfulTestCase):
CONSUMER_URL = '/OS-OAUTH1/consumers'
@@ -140,7 +148,7 @@ class ConsumerCRUDTests(OAuth1Tests):
consumer = self._create_single_consumer()
consumer_id = consumer['id']
resp = self.delete(self.CONSUMER_URL + '/%s' % consumer_id)
- self.assertResponseStatus(resp, 204)
+ self.assertResponseStatus(resp, http_client.NO_CONTENT)
def test_consumer_get(self):
consumer = self._create_single_consumer()
@@ -262,7 +270,7 @@ class OAuthFlowTests(OAuth1Tests):
url = self._authorize_request_token(request_key)
body = {'roles': [{'id': self.role_id}]}
- resp = self.put(url, body=body, expected_status=200)
+ resp = self.put(url, body=body, expected_status=http_client.OK)
self.verifier = resp.result['token']['oauth_verifier']
self.assertTrue(all(i in core.VERIFIER_CHARS for i in self.verifier))
self.assertEqual(8, len(self.verifier))
@@ -357,7 +365,7 @@ class AccessTokenCRUDTests(OAuthFlowTests):
resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
% {'user': self.user_id,
'auth': self.access_token.key})
- self.assertResponseStatus(resp, 204)
+ self.assertResponseStatus(resp, http_client.NO_CONTENT)
# List access_token should be 0
resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
@@ -388,7 +396,7 @@ class AuthTokenTests(OAuthFlowTests):
self.assertEqual(self.role_id, roles_list[0]['id'])
# verify that the token can perform delegated tasks
- ref = self.new_user_ref(domain_id=self.domain_id)
+ ref = unit.new_user_ref(domain_id=self.domain_id)
r = self.admin_request(path='/v3/users', headers=headers,
method='POST', body={'user': ref})
self.assertValidUserResponse(r, ref)
@@ -400,7 +408,7 @@ class AuthTokenTests(OAuthFlowTests):
resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
% {'user': self.user_id,
'auth': self.access_token.key})
- self.assertResponseStatus(resp, 204)
+ self.assertResponseStatus(resp, http_client.NO_CONTENT)
# Check Keystone Token no longer exists
headers = {'X-Subject-Token': self.keystone_token_id,
@@ -415,7 +423,7 @@ class AuthTokenTests(OAuthFlowTests):
consumer_id = self.consumer['key']
resp = self.delete('/OS-OAUTH1/consumers/%(consumer_id)s'
% {'consumer_id': consumer_id})
- self.assertResponseStatus(resp, 204)
+ self.assertResponseStatus(resp, http_client.NO_CONTENT)
# List access_token should be 0
resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
@@ -491,7 +499,7 @@ class AuthTokenTests(OAuthFlowTests):
self.keystone_token_id)
def _create_trust_get_token(self):
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.user_id,
project_id=self.project_id,
@@ -534,7 +542,7 @@ class AuthTokenTests(OAuthFlowTests):
def test_oauth_token_cannot_create_new_trust(self):
self.test_oauth_flow()
- ref = self.new_trust_ref(
+ ref = unit.new_trust_ref(
trustor_user_id=self.user_id,
trustee_user_id=self.user_id,
project_id=self.project_id,
@@ -588,6 +596,18 @@ class AuthTokenTests(OAuthFlowTests):
expected_status=http_client.FORBIDDEN)
+class FernetAuthTokenTests(AuthTokenTests):
+
+ def config_overrides(self):
+ super(FernetAuthTokenTests, self).config_overrides()
+ self.config_fixture.config(group='token', provider='fernet')
+ self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
+
+ def test_delete_keystone_tokens_by_consumer_id(self):
+ # NOTE(lbragstad): Fernet tokens are never persisted in the backend.
+ pass
+
+
class MaliciousOAuth1Tests(OAuth1Tests):
def test_bad_consumer_secret(self):
@@ -645,7 +665,7 @@ class MaliciousOAuth1Tests(OAuth1Tests):
url = self._authorize_request_token(request_key)
body = {'roles': [{'id': self.role_id}]}
- resp = self.put(url, body=body, expected_status=200)
+ resp = self.put(url, body=body, expected_status=http_client.OK)
verifier = resp.result['token']['oauth_verifier']
self.assertIsNotNone(verifier)
@@ -719,7 +739,7 @@ class MaliciousOAuth1Tests(OAuth1Tests):
url = self._authorize_request_token(request_key)
body = {'roles': [{'id': self.role_id}]}
- resp = self.put(url, body=body, expected_status=200)
+ resp = self.put(url, body=body, expected_status=http_client.OK)
self.verifier = resp.result['token']['oauth_verifier']
self.request_token.set_verifier(self.verifier)
@@ -753,7 +773,8 @@ class MaliciousOAuth1Tests(OAuth1Tests):
# NOTE(stevemar): To simulate this error, we remove the Authorization
# header from the post request.
del headers['Authorization']
- self.post(endpoint, headers=headers, expected_status=500)
+ self.post(endpoint, headers=headers,
+ expected_status=http_client.INTERNAL_SERVER_ERROR)
class OAuthNotificationTests(OAuth1Tests,
@@ -800,7 +821,6 @@ class OAuthNotificationTests(OAuth1Tests,
notifications for request token creation, and access token
creation/deletion are emitted.
"""
-
consumer = self._create_single_consumer()
consumer_id = consumer['id']
consumer_secret = consumer['secret']
@@ -829,7 +849,7 @@ class OAuthNotificationTests(OAuth1Tests,
url = self._authorize_request_token(request_key)
body = {'roles': [{'id': self.role_id}]}
- resp = self.put(url, body=body, expected_status=200)
+ resp = self.put(url, body=body, expected_status=http_client.OK)
self.verifier = resp.result['token']['oauth_verifier']
self.assertTrue(all(i in core.VERIFIER_CHARS for i in self.verifier))
self.assertEqual(8, len(self.verifier))
@@ -858,7 +878,7 @@ class OAuthNotificationTests(OAuth1Tests,
resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
% {'user': self.user_id,
'auth': self.access_token.key})
- self.assertResponseStatus(resp, 204)
+ self.assertResponseStatus(resp, http_client.NO_CONTENT)
# Test to ensure the delete access token notification is sent
self._assert_notify_sent(access_key,
@@ -873,7 +893,7 @@ class OAuthNotificationTests(OAuth1Tests,
class OAuthCADFNotificationTests(OAuthNotificationTests):
def setUp(self):
- """Repeat the tests for CADF notifications """
+ """Repeat the tests for CADF notifications."""
super(OAuthCADFNotificationTests, self).setUp()
self.config_fixture.config(notification_format='cadf')
diff --git a/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py b/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py
index 86ced724..5fb5387a 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_os_revoke.py
@@ -19,7 +19,7 @@ from six.moves import http_client
from testtools import matchers
from keystone.common import utils
-from keystone.contrib.revoke import model
+from keystone.models import revoke_model
from keystone.tests.unit import test_v3
from keystone.token import provider
@@ -31,8 +31,6 @@ def _future_time_string():
class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
- EXTENSION_NAME = 'revoke'
- EXTENSION_TO_ADD = 'revoke_extension'
JSON_HOME_DATA = {
'http://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0/'
@@ -92,7 +90,7 @@ class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
sample['project_id'] = six.text_type(project_id)
before_time = timeutils.utcnow()
self.revoke_api.revoke(
- model.RevokeEvent(project_id=project_id))
+ revoke_model.RevokeEvent(project_id=project_id))
resp = self.get('/OS-REVOKE/events')
events = resp.json_body['events']
@@ -105,7 +103,7 @@ class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
sample['domain_id'] = six.text_type(domain_id)
before_time = timeutils.utcnow()
self.revoke_api.revoke(
- model.RevokeEvent(domain_id=domain_id))
+ revoke_model.RevokeEvent(domain_id=domain_id))
resp = self.get('/OS-REVOKE/events')
events = resp.json_body['events']
@@ -127,7 +125,7 @@ class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin):
sample['domain_id'] = six.text_type(domain_id)
self.revoke_api.revoke(
- model.RevokeEvent(domain_id=domain_id))
+ revoke_model.RevokeEvent(domain_id=domain_id))
resp = self.get('/OS-REVOKE/events')
events = resp.json_body['events']
diff --git a/keystone-moon/keystone/tests/unit/test_v3_policy.py b/keystone-moon/keystone/tests/unit/test_v3_policy.py
index 538fc565..76a52088 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_policy.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_policy.py
@@ -12,8 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
import uuid
+from keystone.tests import unit
from keystone.tests.unit import test_v3
@@ -22,9 +24,8 @@ class PolicyTestCase(test_v3.RestfulTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
- self.policy_id = uuid.uuid4().hex
- self.policy = self.new_policy_ref()
- self.policy['id'] = self.policy_id
+ self.policy = unit.new_policy_ref()
+ self.policy_id = self.policy['id']
self.policy_api.create_policy(
self.policy_id,
self.policy.copy())
@@ -33,10 +34,8 @@ class PolicyTestCase(test_v3.RestfulTestCase):
def test_create_policy(self):
"""Call ``POST /policies``."""
- ref = self.new_policy_ref()
- r = self.post(
- '/policies',
- body={'policy': ref})
+ ref = unit.new_policy_ref()
+ r = self.post('/policies', body={'policy': ref})
return self.assertValidPolicyResponse(r, ref)
def test_list_policies(self):
@@ -47,22 +46,18 @@ class PolicyTestCase(test_v3.RestfulTestCase):
def test_get_policy(self):
"""Call ``GET /policies/{policy_id}``."""
r = self.get(
- '/policies/%(policy_id)s' % {
- 'policy_id': self.policy_id})
+ '/policies/%(policy_id)s' % {'policy_id': self.policy_id})
self.assertValidPolicyResponse(r, self.policy)
def test_update_policy(self):
"""Call ``PATCH /policies/{policy_id}``."""
- policy = self.new_policy_ref()
- policy['id'] = self.policy_id
+ self.policy['blob'] = json.dumps({'data': uuid.uuid4().hex, })
r = self.patch(
- '/policies/%(policy_id)s' % {
- 'policy_id': self.policy_id},
- body={'policy': policy})
- self.assertValidPolicyResponse(r, policy)
+ '/policies/%(policy_id)s' % {'policy_id': self.policy_id},
+ body={'policy': self.policy})
+ self.assertValidPolicyResponse(r, self.policy)
def test_delete_policy(self):
"""Call ``DELETE /policies/{policy_id}``."""
self.delete(
- '/policies/%(policy_id)s' % {
- 'policy_id': self.policy_id})
+ '/policies/%(policy_id)s' % {'policy_id': self.policy_id})
diff --git a/keystone-moon/keystone/tests/unit/test_v3_protection.py b/keystone-moon/keystone/tests/unit/test_v3_protection.py
index 9922ae5e..f77a1528 100644
--- a/keystone-moon/keystone/tests/unit/test_v3_protection.py
+++ b/keystone-moon/keystone/tests/unit/test_v3_protection.py
@@ -20,19 +20,22 @@ from oslo_serialization import jsonutils
from six.moves import http_client
from keystone import exception
-from keystone.policy.backends import rules
from keystone.tests import unit
+from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
from keystone.tests.unit import test_v3
+from keystone.tests.unit import utils
CONF = cfg.CONF
-DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class IdentityTestProtectedCase(test_v3.RestfulTestCase):
"""Test policy enforcement on the v3 Identity API."""
+ def _policy_fixture(self):
+ return ksfixtures.Policy(self.tmpfilename, self.config_fixture)
+
def setUp(self):
"""Setup for Identity Protection Test Cases.
@@ -49,14 +52,9 @@ class IdentityTestProtectedCase(test_v3.RestfulTestCase):
the default domain.
"""
- # Ensure that test_v3.RestfulTestCase doesn't load its own
- # sample data, which would make checking the results of our
- # tests harder
- super(IdentityTestProtectedCase, self).setUp()
self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
self.tmpfilename = self.tempfile.file_name
- self.config_fixture.config(group='oslo_policy',
- policy_file=self.tmpfilename)
+ super(IdentityTestProtectedCase, self).setUp()
# A default auth request we can use - un-scoped user token
self.auth = self.build_authentication_request(
@@ -66,45 +64,33 @@ class IdentityTestProtectedCase(test_v3.RestfulTestCase):
def load_sample_data(self):
self._populate_default_domain()
# Start by creating a couple of domains
- self.domainA = self.new_domain_ref()
+ self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'], self.domainA)
- self.domainB = self.new_domain_ref()
+ self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'], self.domainB)
- self.domainC = self.new_domain_ref()
- self.domainC['enabled'] = False
+ self.domainC = unit.new_domain_ref(enabled=False)
self.resource_api.create_domain(self.domainC['id'], self.domainC)
# Now create some users, one in domainA and two of them in domainB
- self.user1 = self.new_user_ref(domain_id=self.domainA['id'])
- password = uuid.uuid4().hex
- self.user1['password'] = password
- self.user1 = self.identity_api.create_user(self.user1)
- self.user1['password'] = password
-
- self.user2 = self.new_user_ref(domain_id=self.domainB['id'])
- password = uuid.uuid4().hex
- self.user2['password'] = password
- self.user2 = self.identity_api.create_user(self.user2)
- self.user2['password'] = password
-
- self.user3 = self.new_user_ref(domain_id=self.domainB['id'])
- password = uuid.uuid4().hex
- self.user3['password'] = password
- self.user3 = self.identity_api.create_user(self.user3)
- self.user3['password'] = password
-
- self.group1 = self.new_group_ref(domain_id=self.domainA['id'])
+ self.user1 = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
+ self.user2 = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
+ self.user3 = unit.create_user(self.identity_api,
+ domain_id=self.domainB['id'])
+
+ self.group1 = unit.new_group_ref(domain_id=self.domainA['id'])
self.group1 = self.identity_api.create_group(self.group1)
- self.group2 = self.new_group_ref(domain_id=self.domainA['id'])
+ self.group2 = unit.new_group_ref(domain_id=self.domainA['id'])
self.group2 = self.identity_api.create_group(self.group2)
- self.group3 = self.new_group_ref(domain_id=self.domainB['id'])
+ self.group3 = unit.new_group_ref(domain_id=self.domainB['id'])
self.group3 = self.identity_api.create_group(self.group3)
- self.role = self.new_role_ref()
+ self.role = unit.new_role_ref()
self.role_api.create_role(self.role['id'], self.role)
- self.role1 = self.new_role_ref()
+ self.role1 = unit.new_role_ref()
self.role_api.create_role(self.role1['id'], self.role1)
self.assignment_api.create_grant(self.role['id'],
user_id=self.user1['id'],
@@ -348,34 +334,23 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
def load_sample_data(self):
self._populate_default_domain()
- self.just_a_user = self.new_user_ref(
+ self.just_a_user = unit.create_user(
+ self.identity_api,
domain_id=CONF.identity.default_domain_id)
- password = uuid.uuid4().hex
- self.just_a_user['password'] = password
- self.just_a_user = self.identity_api.create_user(self.just_a_user)
- self.just_a_user['password'] = password
-
- self.another_user = self.new_user_ref(
+ self.another_user = unit.create_user(
+ self.identity_api,
domain_id=CONF.identity.default_domain_id)
- password = uuid.uuid4().hex
- self.another_user['password'] = password
- self.another_user = self.identity_api.create_user(self.another_user)
- self.another_user['password'] = password
-
- self.admin_user = self.new_user_ref(
+ self.admin_user = unit.create_user(
+ self.identity_api,
domain_id=CONF.identity.default_domain_id)
- password = uuid.uuid4().hex
- self.admin_user['password'] = password
- self.admin_user = self.identity_api.create_user(self.admin_user)
- self.admin_user['password'] = password
- self.role = self.new_role_ref()
+ self.role = unit.new_role_ref()
self.role_api.create_role(self.role['id'], self.role)
- self.admin_role = {'id': uuid.uuid4().hex, 'name': 'admin'}
+ self.admin_role = unit.new_role_ref(name='admin')
self.role_api.create_role(self.admin_role['id'], self.admin_role)
# Create and assign roles to the project
- self.project = self.new_project_ref(
+ self.project = unit.new_project_ref(
domain_id=CONF.identity.default_domain_id)
self.resource_api.create_project(self.project['id'], self.project)
self.assignment_api.create_grant(self.role['id'],
@@ -461,7 +436,8 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
token = self.get_requested_token(auth)
self.head('/auth/tokens', token=token,
- headers={'X-Subject-Token': token}, expected_status=200)
+ headers={'X-Subject-Token': token},
+ expected_status=http_client.OK)
def test_user_check_user_token(self):
# A user can check one of their own tokens.
@@ -474,7 +450,8 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
token2 = self.get_requested_token(auth)
self.head('/auth/tokens', token=token1,
- headers={'X-Subject-Token': token2}, expected_status=200)
+ headers={'X-Subject-Token': token2},
+ expected_status=http_client.OK)
def test_user_check_other_user_token_rejected(self):
# A user cannot check another user's token.
@@ -510,7 +487,8 @@ class IdentityTestPolicySample(test_v3.RestfulTestCase):
user_token = self.get_requested_token(user_auth)
self.head('/auth/tokens', token=admin_token,
- headers={'X-Subject-Token': user_token}, expected_status=200)
+ headers={'X-Subject-Token': user_token},
+ expected_status=http_client.OK)
def test_user_revoke_same_token(self):
# Given a non-admin user token, the token can be used to revoke
@@ -579,6 +557,10 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
test_v3.AssignmentTestMixin):
"""Test policy enforcement of the sample v3 cloud policy file."""
+ def _policy_fixture(self):
+ return ksfixtures.Policy(unit.dirs.etc('policy.v3cloudsample.json'),
+ self.config_fixture)
+
def setUp(self):
"""Setup for v3 Cloud Policy Sample Test Cases.
@@ -592,8 +574,8 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
- domain_admin_user has role 'admin' on domainA,
- project_admin_user has role 'admin' on the project,
- just_a_user has a non-admin role on both domainA and the project.
- - admin_domain has user cloud_admin_user, with an 'admin' role
- on admin_domain.
+ - admin_domain has admin_project, and user cloud_admin_user, with an
+ 'admin' role on admin_project.
We test various api protection rules from the cloud sample policy
file to make sure the sample is valid and that we correctly enforce it.
@@ -604,62 +586,61 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
# tests harder
super(IdentityTestv3CloudPolicySample, self).setUp()
- # Finally, switch to the v3 sample policy file
- self.addCleanup(rules.reset)
- rules.reset()
self.config_fixture.config(
- group='oslo_policy',
- policy_file=unit.dirs.etc('policy.v3cloudsample.json'))
+ group='resource',
+ admin_project_name=self.admin_project['name'])
+ self.config_fixture.config(
+ group='resource',
+ admin_project_domain_name=self.admin_domain['name'])
def load_sample_data(self):
# Start by creating a couple of domains
self._populate_default_domain()
- self.domainA = self.new_domain_ref()
+ self.domainA = unit.new_domain_ref()
self.resource_api.create_domain(self.domainA['id'], self.domainA)
- self.domainB = self.new_domain_ref()
+ self.domainB = unit.new_domain_ref()
self.resource_api.create_domain(self.domainB['id'], self.domainB)
- self.admin_domain = {'id': 'admin_domain_id', 'name': 'Admin_domain'}
+ self.admin_domain = unit.new_domain_ref()
self.resource_api.create_domain(self.admin_domain['id'],
self.admin_domain)
+ self.admin_project = unit.new_project_ref(
+ domain_id=self.admin_domain['id'])
+ self.resource_api.create_project(self.admin_project['id'],
+ self.admin_project)
+
# And our users
- self.cloud_admin_user = self.new_user_ref(
+ self.cloud_admin_user = unit.create_user(
+ self.identity_api,
domain_id=self.admin_domain['id'])
- password = uuid.uuid4().hex
- self.cloud_admin_user['password'] = password
- self.cloud_admin_user = (
- self.identity_api.create_user(self.cloud_admin_user))
- self.cloud_admin_user['password'] = password
- self.just_a_user = self.new_user_ref(domain_id=self.domainA['id'])
- password = uuid.uuid4().hex
- self.just_a_user['password'] = password
- self.just_a_user = self.identity_api.create_user(self.just_a_user)
- self.just_a_user['password'] = password
- self.domain_admin_user = self.new_user_ref(
+ self.just_a_user = unit.create_user(
+ self.identity_api,
domain_id=self.domainA['id'])
- password = uuid.uuid4().hex
- self.domain_admin_user['password'] = password
- self.domain_admin_user = (
- self.identity_api.create_user(self.domain_admin_user))
- self.domain_admin_user['password'] = password
- self.project_admin_user = self.new_user_ref(
+ self.domain_admin_user = unit.create_user(
+ self.identity_api,
+ domain_id=self.domainA['id'])
+ self.domainB_admin_user = unit.create_user(
+ self.identity_api,
+ domain_id=self.domainB['id'])
+ self.project_admin_user = unit.create_user(
+ self.identity_api,
domain_id=self.domainA['id'])
- password = uuid.uuid4().hex
- self.project_admin_user['password'] = password
- self.project_admin_user = (
- self.identity_api.create_user(self.project_admin_user))
- self.project_admin_user['password'] = password
-
- # The admin role and another plain role
- self.admin_role = {'id': uuid.uuid4().hex, 'name': 'admin'}
+ self.project_adminB_user = unit.create_user(
+ self.identity_api,
+ domain_id=self.domainB['id'])
+
+ # The admin role, a domain specific role and another plain role
+ self.admin_role = unit.new_role_ref(name='admin')
self.role_api.create_role(self.admin_role['id'], self.admin_role)
- self.role = self.new_role_ref()
+ self.roleA = unit.new_role_ref(domain_id=self.domainA['id'])
+ self.role_api.create_role(self.roleA['id'], self.roleA)
+ self.role = unit.new_role_ref()
self.role_api.create_role(self.role['id'], self.role)
- # The cloud admin just gets the admin role
+ # The cloud admin just gets the admin role on the special admin project
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.cloud_admin_user['id'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
# Assign roles to the domain
self.assignment_api.create_grant(self.admin_role['id'],
@@ -668,13 +649,21 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.assignment_api.create_grant(self.role['id'],
user_id=self.just_a_user['id'],
domain_id=self.domainA['id'])
+ self.assignment_api.create_grant(self.admin_role['id'],
+ user_id=self.domainB_admin_user['id'],
+ domain_id=self.domainB['id'])
# Create and assign roles to the project
- self.project = self.new_project_ref(domain_id=self.domainA['id'])
+ self.project = unit.new_project_ref(domain_id=self.domainA['id'])
self.resource_api.create_project(self.project['id'], self.project)
+ self.projectB = unit.new_project_ref(domain_id=self.domainB['id'])
+ self.resource_api.create_project(self.projectB['id'], self.projectB)
self.assignment_api.create_grant(self.admin_role['id'],
user_id=self.project_admin_user['id'],
project_id=self.project['id'])
+ self.assignment_api.create_grant(
+ self.admin_role['id'], user_id=self.project_adminB_user['id'],
+ project_id=self.projectB['id'])
self.assignment_api.create_grant(self.role['id'],
user_id=self.just_a_user['id'],
project_id=self.project['id'])
@@ -683,7 +672,8 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
# Return the expected return codes for APIs with and without data
# with any specified status overriding the normal values
if expected_status is None:
- return (200, 201, 204)
+ return (http_client.OK, http_client.CREATED,
+ http_client.NO_CONTENT)
else:
return (expected_status, expected_status, expected_status)
@@ -702,7 +692,7 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
- user_ref = self.new_user_ref(domain_id=domain_id)
+ user_ref = unit.new_user_ref(domain_id=domain_id)
self.post('/users', auth=self.auth, body={'user': user_ref},
expected_status=status_created)
@@ -721,7 +711,7 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
- proj_ref = self.new_project_ref(domain_id=domain_id)
+ proj_ref = unit.new_project_ref(domain_id=domain_id)
self.post('/projects', auth=self.auth, body={'project': proj_ref},
expected_status=status_created)
@@ -740,13 +730,14 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.delete(entity_url, auth=self.auth,
expected_status=status_no_data)
- domain_ref = self.new_domain_ref()
+ domain_ref = unit.new_domain_ref()
self.post('/domains', auth=self.auth, body={'domain': domain_ref},
expected_status=status_created)
- def _test_grants(self, target, entity_id, expected=None):
+ def _test_grants(self, target, entity_id, role_domain_id=None,
+ list_status_OK=False, expected=None):
status_OK, status_created, status_no_data = self._stati(expected)
- a_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ a_role = unit.new_role_ref(domain_id=role_domain_id)
self.role_api.create_role(a_role['id'], a_role)
collection_url = (
@@ -762,11 +753,67 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
expected_status=status_no_data)
self.head(member_url, auth=self.auth,
expected_status=status_no_data)
- self.get(collection_url, auth=self.auth,
- expected_status=status_OK)
+ if list_status_OK:
+ self.get(collection_url, auth=self.auth)
+ else:
+ self.get(collection_url, auth=self.auth,
+ expected_status=status_OK)
self.delete(member_url, auth=self.auth,
expected_status=status_no_data)
+ def _role_management_cases(self, read_status_OK=False, expected=None):
+ # Set the different status values for different types of call depending
+ # on whether we expect the calls to fail or not.
+ status_OK, status_created, status_no_data = self._stati(expected)
+ entity_url = '/roles/%s' % self.role['id']
+ list_url = '/roles'
+
+ if read_status_OK:
+ self.get(entity_url, auth=self.auth)
+ self.get(list_url, auth=self.auth)
+ else:
+ self.get(entity_url, auth=self.auth,
+ expected_status=status_OK)
+ self.get(list_url, auth=self.auth,
+ expected_status=status_OK)
+
+ role = {'name': 'Updated'}
+ self.patch(entity_url, auth=self.auth, body={'role': role},
+ expected_status=status_OK)
+ self.delete(entity_url, auth=self.auth,
+ expected_status=status_no_data)
+
+ role_ref = unit.new_role_ref()
+ self.post('/roles', auth=self.auth, body={'role': role_ref},
+ expected_status=status_created)
+
+ def _domain_role_management_cases(self, domain_id, read_status_OK=False,
+ expected=None):
+ # Set the different status values for different types of call depending
+ # on whether we expect the calls to fail or not.
+ status_OK, status_created, status_no_data = self._stati(expected)
+ entity_url = '/roles/%s' % self.roleA['id']
+ list_url = '/roles?domain_id=%s' % domain_id
+
+ if read_status_OK:
+ self.get(entity_url, auth=self.auth)
+ self.get(list_url, auth=self.auth)
+ else:
+ self.get(entity_url, auth=self.auth,
+ expected_status=status_OK)
+ self.get(list_url, auth=self.auth,
+ expected_status=status_OK)
+
+ role = {'name': 'Updated'}
+ self.patch(entity_url, auth=self.auth, body={'role': role},
+ expected_status=status_OK)
+ self.delete(entity_url, auth=self.auth,
+ expected_status=status_no_data)
+
+ role_ref = unit.new_role_ref(domain_id=domain_id)
+ self.post('/roles', auth=self.auth, body={'role': role_ref},
+ expected_status=status_created)
+
def test_user_management(self):
# First, authenticate with a user that does not have the domain
# admin role - shouldn't be able to do much.
@@ -786,13 +833,90 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self._test_user_management(self.domainA['id'])
+ def test_user_management_normalized_keys(self):
+ """Illustrate the inconsistent handling of hyphens in keys.
+
+ To quote Morgan in bug 1526244:
+
+ the reason this is converted from "domain-id" to "domain_id" is
+ because of how we process/normalize data. The way we have to handle
+ specific data types for known columns requires avoiding "-" in the
+ actual python code since "-" is not valid for attributes in python
+ w/o significant use of "getattr" etc.
+
+ In short, historically we handle some things in conversions. The
+ use of "extras" has long been a poor design choice that leads to
+ odd/strange inconsistent behaviors because of other choices made in
+ handling data from within the body. (In many cases we convert from
+ "-" to "_" throughout openstack)
+
+ Source: https://bugs.launchpad.net/keystone/+bug/1526244/comments/9
+
+ """
+ # Authenticate with a user that has the domain admin role
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ # Show that we can read a normal user without any surprises.
+ r = self.get(
+ '/users/%s' % self.just_a_user['id'],
+ auth=self.auth,
+ expected_status=http_client.OK)
+ self.assertValidUserResponse(r)
+
+ # We don't normalize query string keys, so both of these result in a
+ # 403, because we didn't specify a domain_id query string in either
+ # case, and we explicitly require one (it doesn't matter what
+ # 'domain-id' value you use).
+ self.get(
+ '/users?domain-id=%s' % self.domainA['id'],
+ auth=self.auth,
+ expected_status=exception.ForbiddenAction.code)
+ self.get(
+ '/users?domain-id=%s' % self.domainB['id'],
+ auth=self.auth,
+ expected_status=exception.ForbiddenAction.code)
+
+ # If we try updating the user's 'domain_id' by specifying a
+ # 'domain-id', then it'll be stored into extras rather than normalized,
+ # and the user's actual 'domain_id' is not affected.
+ r = self.patch(
+ '/users/%s' % self.just_a_user['id'],
+ auth=self.auth,
+ body={'user': {'domain-id': self.domainB['id']}},
+ expected_status=http_client.OK)
+ self.assertEqual(self.domainB['id'], r.json['user']['domain-id'])
+ self.assertEqual(self.domainA['id'], r.json['user']['domain_id'])
+ self.assertNotEqual(self.domainB['id'], self.just_a_user['domain_id'])
+ self.assertValidUserResponse(r, self.just_a_user)
+
+ # Finally, show that we can create a new user without any surprises.
+ # But if we specify a 'domain-id' instead of a 'domain_id', we get a
+ # Forbidden response because we fail a policy check before
+ # normalization occurs.
+ user_ref = unit.new_user_ref(domain_id=self.domainA['id'])
+ r = self.post(
+ '/users',
+ auth=self.auth,
+ body={'user': user_ref},
+ expected_status=http_client.CREATED)
+ self.assertValidUserResponse(r, ref=user_ref)
+ user_ref['domain-id'] = user_ref.pop('domain_id')
+ self.post(
+ '/users',
+ auth=self.auth,
+ body={'user': user_ref},
+ expected_status=exception.ForbiddenAction.code)
+
def test_user_management_by_cloud_admin(self):
# Test users management with a cloud admin. This user should
# be able to manage users in any domain.
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
self._test_user_management(self.domainA['id'])
@@ -824,7 +948,7 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
# Check whether cloud admin can operate a domain
# other than its own domain or not
@@ -858,10 +982,56 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
self._test_grants('domains', self.domainA['id'])
+ def test_domain_grants_by_cloud_admin_for_domain_specific_role(self):
+ # Test domain grants with a cloud admin. This user should be
+ # able to manage domain roles on any domain.
+ self.auth = self.build_authentication_request(
+ user_id=self.cloud_admin_user['id'],
+ password=self.cloud_admin_user['password'],
+ project_id=self.admin_project['id'])
+
+ self._test_grants('domains', self.domainA['id'],
+ role_domain_id=self.domainB['id'])
+
+ def test_domain_grants_by_non_admin_for_domain_specific_role(self):
+ # A non-admin shouldn't be able to do anything
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ domain_id=self.domainA['id'])
+
+ self._test_grants('domains', self.domainA['id'],
+ role_domain_id=self.domainA['id'],
+ expected=exception.ForbiddenAction.code)
+ self._test_grants('domains', self.domainA['id'],
+ role_domain_id=self.domainB['id'],
+ expected=exception.ForbiddenAction.code)
+
+ def test_domain_grants_by_domain_admin_for_domain_specific_role(self):
+ # Authenticate with a user that does have the domain admin role,
+ # should not be able to assign a domain_specific role from another
+ # domain
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ self._test_grants('domains', self.domainA['id'],
+ role_domain_id=self.domainB['id'],
+ # List status will always be OK, since we are not
+ # granting/checking/deleting assignments
+ list_status_OK=True,
+ expected=exception.ForbiddenAction.code)
+
+ # They should be able to assign a domain specific role from the same
+ # domain
+ self._test_grants('domains', self.domainA['id'],
+ role_domain_id=self.domainA['id'])
+
def test_project_grants(self):
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
@@ -890,11 +1060,67 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self._test_grants('projects', self.project['id'])
+ def test_project_grants_by_non_admin_for_domain_specific_role(self):
+ # A non-admin shouldn't be able to do anything
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ project_id=self.project['id'])
+
+ self._test_grants('projects', self.project['id'],
+ role_domain_id=self.domainA['id'],
+ expected=exception.ForbiddenAction.code)
+ self._test_grants('projects', self.project['id'],
+ role_domain_id=self.domainB['id'],
+ expected=exception.ForbiddenAction.code)
+
+ def test_project_grants_by_project_admin_for_domain_specific_role(self):
+ # Authenticate with a user that does have the project admin role,
+ # should not be able to assign a domain_specific role from another
+ # domain
+ self.auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
+ self._test_grants('projects', self.project['id'],
+ role_domain_id=self.domainB['id'],
+ # List status will always be OK, since we are not
+ # granting/checking/deleting assignments
+ list_status_OK=True,
+ expected=exception.ForbiddenAction.code)
+
+ # They should be able to assign a domain specific role from the same
+ # domain
+ self._test_grants('projects', self.project['id'],
+ role_domain_id=self.domainA['id'])
+
+ def test_project_grants_by_domain_admin_for_domain_specific_role(self):
+ # Authenticate with a user that does have the domain admin role,
+ # should not be able to assign a domain_specific role from another
+ # domain
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ self._test_grants('projects', self.project['id'],
+ role_domain_id=self.domainB['id'],
+ # List status will always be OK, since we are not
+ # granting/checking/deleting assignments
+ list_status_OK=True,
+ expected=exception.ForbiddenAction.code)
+
+ # They should be able to assign a domain specific role from the same
+ # domain
+ self._test_grants('projects', self.project['id'],
+ role_domain_id=self.domainA['id'])
+
def test_cloud_admin_list_assignments_of_domain(self):
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
collection_url = self.build_role_assignment_query_url(
domain_id=self.domainA['id'])
@@ -968,7 +1194,7 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
collection_url = self.build_role_assignment_query_url(
project_id=self.project['id'])
@@ -990,7 +1216,33 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.assertRoleAssignmentInListResponse(r, project_admin_entity)
self.assertRoleAssignmentInListResponse(r, project_user_entity)
- @unit.utils.wip('waiting on bug #1437407')
+ def test_admin_project_list_assignments_of_project(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ project_id=self.project['id'])
+ r = self.get(collection_url, auth=self.auth)
+ self.assertValidRoleAssignmentListResponse(
+ r, expected_length=2, resource_url=collection_url)
+
+ project_admin_entity = self.build_role_assignment_entity(
+ project_id=self.project['id'],
+ user_id=self.project_admin_user['id'],
+ role_id=self.admin_role['id'],
+ inherited_to_projects=False)
+ project_user_entity = self.build_role_assignment_entity(
+ project_id=self.project['id'],
+ user_id=self.just_a_user['id'],
+ role_id=self.role['id'],
+ inherited_to_projects=False)
+
+ self.assertRoleAssignmentInListResponse(r, project_admin_entity)
+ self.assertRoleAssignmentInListResponse(r, project_user_entity)
+
+ @utils.wip('waiting on bug #1437407')
def test_domain_admin_list_assignments_of_project(self):
self.auth = self.build_authentication_request(
user_id=self.domain_admin_user['id'],
@@ -1017,6 +1269,53 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.assertRoleAssignmentInListResponse(r, project_admin_entity)
self.assertRoleAssignmentInListResponse(r, project_user_entity)
+ def test_domain_admin_list_assignment_tree(self):
+ # Add a child project to the standard test data
+ sub_project = unit.new_project_ref(domain_id=self.domainA['id'],
+ parent_id=self.project['id'])
+ self.resource_api.create_project(sub_project['id'], sub_project)
+ self.assignment_api.create_grant(self.role['id'],
+ user_id=self.just_a_user['id'],
+ project_id=sub_project['id'])
+
+ collection_url = self.build_role_assignment_query_url(
+ project_id=self.project['id'])
+ collection_url += '&include_subtree=True'
+
+ # The domain admin should be able to list the assignment tree
+ auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ r = self.get(collection_url, auth=auth)
+ self.assertValidRoleAssignmentListResponse(
+ r, expected_length=3, resource_url=collection_url)
+
+ # A project admin should not be able to
+ auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
+ r = self.get(collection_url, auth=auth,
+ expected_status=http_client.FORBIDDEN)
+
+ # A neither should a domain admin from a different domain
+ domainB_admin_user = unit.create_user(
+ self.identity_api,
+ domain_id=self.domainB['id'])
+ self.assignment_api.create_grant(self.admin_role['id'],
+ user_id=domainB_admin_user['id'],
+ domain_id=self.domainB['id'])
+ auth = self.build_authentication_request(
+ user_id=domainB_admin_user['id'],
+ password=domainB_admin_user['password'],
+ domain_id=self.domainB['id'])
+
+ r = self.get(collection_url, auth=auth,
+ expected_status=http_client.FORBIDDEN)
+
def test_domain_user_list_assignments_of_project_failed(self):
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
@@ -1040,7 +1339,23 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
+
+ self._test_domain_management()
+
+ def test_admin_project(self):
+ self.auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
+ self._test_domain_management(
+ expected=exception.ForbiddenAction.code)
+
+ self.auth = self.build_authentication_request(
+ user_id=self.cloud_admin_user['id'],
+ password=self.cloud_admin_user['password'],
+ project_id=self.admin_project['id'])
self._test_domain_management()
@@ -1050,16 +1365,15 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
password=self.domain_admin_user['password'],
domain_id=self.domainA['id'])
entity_url = '/domains/%s' % self.domainA['id']
- self.get(entity_url, auth=self.auth, expected_status=200)
+ self.get(entity_url, auth=self.auth)
def test_list_user_credentials(self):
- self.credential_user = self.new_credential_ref(self.just_a_user['id'])
- self.credential_api.create_credential(self.credential_user['id'],
- self.credential_user)
- self.credential_admin = self.new_credential_ref(
- self.cloud_admin_user['id'])
- self.credential_api.create_credential(self.credential_admin['id'],
- self.credential_admin)
+ credential_user = unit.new_credential_ref(self.just_a_user['id'])
+ self.credential_api.create_credential(credential_user['id'],
+ credential_user)
+ credential_admin = unit.new_credential_ref(self.cloud_admin_user['id'])
+ self.credential_api.create_credential(credential_admin['id'],
+ credential_admin)
self.auth = self.build_authentication_request(
user_id=self.just_a_user['id'],
@@ -1075,9 +1389,8 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
def test_get_and_delete_ec2_credentials(self):
"""Tests getting and deleting ec2 credentials through the ec2 API."""
- another_user = self.new_user_ref(domain_id=self.domainA['id'])
- password = another_user['password']
- another_user = self.identity_api.create_user(another_user)
+ another_user = unit.create_user(self.identity_api,
+ domain_id=self.domainA['id'])
# create a credential for just_a_user
just_user_auth = self.build_authentication_request(
@@ -1091,7 +1404,7 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
# another normal user can't get the credential
another_user_auth = self.build_authentication_request(
user_id=another_user['id'],
- password=password)
+ password=another_user['password'])
another_user_url = '/users/%s/credentials/OS-EC2/%s' % (
another_user['id'], r.result['credential']['access'])
self.get(another_user_url, auth=another_user_auth,
@@ -1160,7 +1473,26 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
admin_auth = self.build_authentication_request(
user_id=self.cloud_admin_user['id'],
password=self.cloud_admin_user['password'],
- domain_id=self.admin_domain['id'])
+ project_id=self.admin_project['id'])
+ admin_token = self.get_requested_token(admin_auth)
+
+ user_auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'])
+ user_token = self.get_requested_token(user_auth)
+
+ self.get('/auth/tokens', token=admin_token,
+ headers={'X-Subject-Token': user_token})
+
+ def test_admin_project_validate_user_token(self):
+ # An admin can validate a user's token.
+ # This is GET /v3/auth/tokens
+
+ admin_auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
admin_token = self.get_requested_token(admin_auth)
user_auth = self.build_authentication_request(
@@ -1182,7 +1514,8 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
token = self.get_requested_token(auth)
self.head('/auth/tokens', token=token,
- headers={'X-Subject-Token': token}, expected_status=200)
+ headers={'X-Subject-Token': token},
+ expected_status=http_client.OK)
def test_user_check_user_token(self):
# A user can check one of their own tokens.
@@ -1195,7 +1528,8 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
token2 = self.get_requested_token(auth)
self.head('/auth/tokens', token=token1,
- headers={'X-Subject-Token': token2}, expected_status=200)
+ headers={'X-Subject-Token': token2},
+ expected_status=http_client.OK)
def test_user_check_other_user_token_rejected(self):
# A user cannot check another user's token.
@@ -1231,7 +1565,8 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
user_token = self.get_requested_token(user_auth)
self.head('/auth/tokens', token=admin_token,
- headers={'X-Subject-Token': user_token}, expected_status=200)
+ headers={'X-Subject-Token': user_token},
+ expected_status=http_client.OK)
def test_user_revoke_same_token(self):
# Given a non-admin user token, the token can be used to revoke
@@ -1294,3 +1629,149 @@ class IdentityTestv3CloudPolicySample(test_v3.RestfulTestCase,
self.delete('/auth/tokens', token=admin_token,
headers={'X-Subject-Token': user_token})
+
+ def test_user_with_a_role_get_project(self):
+ user_auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ project_id=self.project['id'])
+
+ # Test user can get project for one they have a role in
+ self.get('/projects/%s' % self.project['id'], auth=user_auth)
+
+ # Test user can not get project for one they don't have a role in,
+ # even if they have a role on another project
+ project2 = unit.new_project_ref(domain_id=self.domainA['id'])
+ self.resource_api.create_project(project2['id'], project2)
+ self.get('/projects/%s' % project2['id'], auth=user_auth,
+ expected_status=exception.ForbiddenAction.code)
+
+ def test_project_admin_get_project(self):
+ admin_auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
+ resp = self.get('/projects/%s' % self.project['id'], auth=admin_auth)
+ self.assertEqual(self.project['id'],
+ jsonutils.loads(resp.body)['project']['id'])
+
+ def test_role_management_no_admin_no_rights(self):
+ # A non-admin domain user shouldn't be able to manipulate roles
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ domain_id=self.domainA['id'])
+
+ self._role_management_cases(expected=exception.ForbiddenAction.code)
+
+ # ...and nor should non-admin project user
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ project_id=self.project['id'])
+
+ self._role_management_cases(expected=exception.ForbiddenAction.code)
+
+ def test_role_management_with_project_admin(self):
+ # A project admin user should be able to get and list, but not be able
+ # to create/update/delete global roles
+ self.auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
+ self._role_management_cases(read_status_OK=True,
+ expected=exception.ForbiddenAction.code)
+
+ def test_role_management_with_domain_admin(self):
+ # A domain admin user should be able to get and list, but not be able
+ # to create/update/delete global roles
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ self._role_management_cases(read_status_OK=True,
+ expected=exception.ForbiddenAction.code)
+
+ def test_role_management_with_cloud_admin(self):
+ # A cloud admin user should have rights to manipulate global roles
+ self.auth = self.build_authentication_request(
+ user_id=self.cloud_admin_user['id'],
+ password=self.cloud_admin_user['password'],
+ project_id=self.admin_project['id'])
+
+ self._role_management_cases()
+
+ def test_domain_role_management_no_admin_no_rights(self):
+ # A non-admin domain user shouldn't be able to manipulate domain roles
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ domain_id=self.domainA['id'])
+
+ self._domain_role_management_cases(
+ self.domainA['id'], expected=exception.ForbiddenAction.code)
+
+ # ...and nor should non-admin project user
+ self.auth = self.build_authentication_request(
+ user_id=self.just_a_user['id'],
+ password=self.just_a_user['password'],
+ project_id=self.project['id'])
+
+ self._domain_role_management_cases(
+ self.domainA['id'], expected=exception.ForbiddenAction.code)
+
+ def test_domain_role_management_with_cloud_admin(self):
+ # A cloud admin user should have rights to manipulate domain roles
+ self.auth = self.build_authentication_request(
+ user_id=self.cloud_admin_user['id'],
+ password=self.cloud_admin_user['password'],
+ project_id=self.admin_project['id'])
+
+ self._domain_role_management_cases(self.domainA['id'])
+
+ def test_domain_role_management_with_domain_admin(self):
+ # A domain admin user should only be able to manipulate the domain
+ # specific roles in their own domain
+ self.auth = self.build_authentication_request(
+ user_id=self.domainB_admin_user['id'],
+ password=self.domainB_admin_user['password'],
+ domain_id=self.domainB['id'])
+
+ # Try to access the domain specific roles in another domain
+ self._domain_role_management_cases(
+ self.domainA['id'], expected=exception.ForbiddenAction.code)
+
+ # ...but they should be able to work with those in their own domain
+ self.auth = self.build_authentication_request(
+ user_id=self.domain_admin_user['id'],
+ password=self.domain_admin_user['password'],
+ domain_id=self.domainA['id'])
+
+ self._domain_role_management_cases(self.domainA['id'])
+
+ def test_domain_role_management_with_project_admin(self):
+ # A project admin user should have not access to domain specific roles
+ # in another domain. They should be able to get and list domain
+ # specific roles from their own domain, but not be able to create,
+ # update or delete them,
+ self.auth = self.build_authentication_request(
+ user_id=self.project_adminB_user['id'],
+ password=self.project_adminB_user['password'],
+ project_id=self.projectB['id'])
+
+ # Try access the domain specific roless in another domain
+ self._domain_role_management_cases(
+ self.domainA['id'], expected=exception.ForbiddenAction.code)
+
+ # ...but they should be ablet to work with those in their own domain
+ self.auth = self.build_authentication_request(
+ user_id=self.project_admin_user['id'],
+ password=self.project_admin_user['password'],
+ project_id=self.project['id'])
+
+ self._domain_role_management_cases(
+ self.domainA['id'], read_status_OK=True,
+ expected=exception.ForbiddenAction.code)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_resource.py b/keystone-moon/keystone/tests/unit/test_v3_resource.py
new file mode 100644
index 00000000..f54fcb57
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/test_v3_resource.py
@@ -0,0 +1,1434 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_config import cfg
+from six.moves import http_client
+from six.moves import range
+from testtools import matchers
+
+from keystone.common import controller
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit import test_v3
+from keystone.tests.unit import utils as test_utils
+
+
+CONF = cfg.CONF
+
+
+class ResourceTestCase(test_v3.RestfulTestCase,
+ test_v3.AssignmentTestMixin):
+ """Test domains and projects."""
+
+ # Domain CRUD tests
+
+ def test_create_domain(self):
+ """Call ``POST /domains``."""
+ ref = unit.new_domain_ref()
+ r = self.post(
+ '/domains',
+ body={'domain': ref})
+ return self.assertValidDomainResponse(r, ref)
+
+ def test_create_domain_case_sensitivity(self):
+ """Call `POST /domains`` twice with upper() and lower() cased name."""
+ ref = unit.new_domain_ref()
+
+ # ensure the name is lowercase
+ ref['name'] = ref['name'].lower()
+ r = self.post(
+ '/domains',
+ body={'domain': ref})
+ self.assertValidDomainResponse(r, ref)
+
+ # ensure the name is uppercase
+ ref['name'] = ref['name'].upper()
+ r = self.post(
+ '/domains',
+ body={'domain': ref})
+ self.assertValidDomainResponse(r, ref)
+
+ def test_create_domain_bad_request(self):
+ """Call ``POST /domains``."""
+ self.post('/domains', body={'domain': {}},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_domain_unsafe(self):
+ """Call ``POST /domains with unsafe names``."""
+ unsafe_name = 'i am not / safe'
+
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe='off')
+ ref = unit.new_domain_ref(name=unsafe_name)
+ self.post(
+ '/domains',
+ body={'domain': ref})
+
+ for config_setting in ['new', 'strict']:
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe=config_setting)
+ ref = unit.new_domain_ref(name=unsafe_name)
+ self.post(
+ '/domains',
+ body={'domain': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_domain_unsafe_default(self):
+ """Check default for unsafe names for ``POST /domains``."""
+ unsafe_name = 'i am not / safe'
+
+ # By default, we should be able to create unsafe names
+ ref = unit.new_domain_ref(name=unsafe_name)
+ self.post(
+ '/domains',
+ body={'domain': ref})
+
+ def test_create_domain_creates_is_domain_project(self):
+ """Check a project that acts as a domain is created.
+
+ Call ``POST /domains``.
+ """
+ # Create a new domain
+ domain_ref = unit.new_domain_ref()
+ r = self.post('/domains', body={'domain': domain_ref})
+ self.assertValidDomainResponse(r, domain_ref)
+
+ # Retrieve its correspondent project
+ r = self.get('/projects/%(project_id)s' % {
+ 'project_id': r.result['domain']['id']})
+ self.assertValidProjectResponse(r)
+
+ # The created project has is_domain flag as True
+ self.assertTrue(r.result['project']['is_domain'])
+
+ # And its parent_id and domain_id attributes are equal
+ self.assertIsNone(r.result['project']['parent_id'])
+ self.assertIsNone(r.result['project']['domain_id'])
+
+ def test_create_is_domain_project_creates_domain(self):
+ """Call ``POST /projects`` is_domain and check a domain is created."""
+ # Create a new project that acts as a domain
+ project_ref = unit.new_project_ref(domain_id=None, is_domain=True)
+ r = self.post('/projects', body={'project': project_ref})
+ self.assertValidProjectResponse(r)
+
+ # Retrieve its correspondent domain
+ r = self.get('/domains/%(domain_id)s' % {
+ 'domain_id': r.result['project']['id']})
+ self.assertValidDomainResponse(r)
+ self.assertIsNotNone(r.result['domain'])
+
+ def test_list_domains(self):
+ """Call ``GET /domains``."""
+ resource_url = '/domains'
+ r = self.get(resource_url)
+ self.assertValidDomainListResponse(r, ref=self.domain,
+ resource_url=resource_url)
+
+ def test_get_domain(self):
+ """Call ``GET /domains/{domain_id}``."""
+ r = self.get('/domains/%(domain_id)s' % {
+ 'domain_id': self.domain_id})
+ self.assertValidDomainResponse(r, self.domain)
+
+ def test_update_domain(self):
+ """Call ``PATCH /domains/{domain_id}``."""
+ ref = unit.new_domain_ref()
+ del ref['id']
+ r = self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': self.domain_id},
+ body={'domain': ref})
+ self.assertValidDomainResponse(r, ref)
+
+ def test_update_domain_unsafe(self):
+ """Call ``POST /domains/{domain_id} with unsafe names``."""
+ unsafe_name = 'i am not / safe'
+
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe='off')
+ ref = unit.new_domain_ref(name=unsafe_name)
+ del ref['id']
+ self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': self.domain_id},
+ body={'domain': ref})
+
+ unsafe_name = 'i am still not / safe'
+ for config_setting in ['new', 'strict']:
+ self.config_fixture.config(group='resource',
+ domain_name_url_safe=config_setting)
+ ref = unit.new_domain_ref(name=unsafe_name)
+ del ref['id']
+ self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': self.domain_id},
+ body={'domain': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_update_domain_unsafe_default(self):
+ """Check default for unsafe names for ``POST /domains``."""
+ unsafe_name = 'i am not / safe'
+
+ # By default, we should be able to create unsafe names
+ ref = unit.new_domain_ref(name=unsafe_name)
+ del ref['id']
+ self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': self.domain_id},
+ body={'domain': ref})
+
+ def test_update_domain_updates_is_domain_project(self):
+ """Check the project that acts as a domain is updated.
+
+ Call ``PATCH /domains``.
+ """
+ # Create a new domain
+ domain_ref = unit.new_domain_ref()
+ r = self.post('/domains', body={'domain': domain_ref})
+ self.assertValidDomainResponse(r, domain_ref)
+
+ # Disable it
+ self.patch('/domains/%s' % r.result['domain']['id'],
+ body={'domain': {'enabled': False}})
+
+ # Retrieve its correspondent project
+ r = self.get('/projects/%(project_id)s' % {
+ 'project_id': r.result['domain']['id']})
+ self.assertValidProjectResponse(r)
+
+ # The created project is disabled as well
+ self.assertFalse(r.result['project']['enabled'])
+
+ def test_disable_domain(self):
+ """Call ``PATCH /domains/{domain_id}`` (set enabled=False)."""
+ # Create a 2nd set of entities in a 2nd domain
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+
+ project2 = unit.new_project_ref(domain_id=domain2['id'])
+ self.resource_api.create_project(project2['id'], project2)
+
+ user2 = unit.create_user(self.identity_api,
+ domain_id=domain2['id'],
+ project_id=project2['id'])
+
+ self.assignment_api.add_user_to_project(project2['id'],
+ user2['id'])
+
+ # First check a user in that domain can authenticate..
+ body = {
+ 'auth': {
+ 'passwordCredentials': {
+ 'userId': user2['id'],
+ 'password': user2['password']
+ },
+ 'tenantId': project2['id']
+ }
+ }
+ self.admin_request(
+ path='/v2.0/tokens', method='POST', body=body)
+
+ auth_data = self.build_authentication_request(
+ user_id=user2['id'],
+ password=user2['password'],
+ project_id=project2['id'])
+ self.v3_create_token(auth_data)
+
+ # Now disable the domain
+ domain2['enabled'] = False
+ r = self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': domain2['id']},
+ body={'domain': {'enabled': False}})
+ self.assertValidDomainResponse(r, domain2)
+
+ # Make sure the user can no longer authenticate, via
+ # either API
+ body = {
+ 'auth': {
+ 'passwordCredentials': {
+ 'userId': user2['id'],
+ 'password': user2['password']
+ },
+ 'tenantId': project2['id']
+ }
+ }
+ self.admin_request(
+ path='/v2.0/tokens', method='POST', body=body,
+ expected_status=http_client.UNAUTHORIZED)
+
+ # Try looking up in v3 by name and id
+ auth_data = self.build_authentication_request(
+ user_id=user2['id'],
+ password=user2['password'],
+ project_id=project2['id'])
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ auth_data = self.build_authentication_request(
+ username=user2['name'],
+ user_domain_id=domain2['id'],
+ password=user2['password'],
+ project_id=project2['id'])
+ self.v3_create_token(auth_data,
+ expected_status=http_client.UNAUTHORIZED)
+
+ def test_delete_enabled_domain_fails(self):
+ """Call ``DELETE /domains/{domain_id}`` (when domain enabled)."""
+ # Try deleting an enabled domain, which should fail
+ self.delete('/domains/%(domain_id)s' % {
+ 'domain_id': self.domain['id']},
+ expected_status=exception.ForbiddenAction.code)
+
+ def test_delete_domain(self):
+ """Call ``DELETE /domains/{domain_id}``.
+
+ The sample data set up already has a user and project that is part of
+ self.domain. Additionally we will create a group and a credential
+ within it. Since the user we will authenticate with is in this domain,
+ we create a another set of entities in a second domain. Deleting this
+ second domain should delete all these new entities. In addition,
+ all the entities in the regular self.domain should be unaffected
+ by the delete.
+
+ Test Plan:
+
+ - Create domain2 and a 2nd set of entities
+ - Disable domain2
+ - Delete domain2
+ - Check entities in domain2 have been deleted
+ - Check entities in self.domain are unaffected
+
+ """
+ # Create a group and a credential in the main domain
+ group = unit.new_group_ref(domain_id=self.domain_id)
+ group = self.identity_api.create_group(group)
+
+ credential = unit.new_credential_ref(user_id=self.user['id'],
+ project_id=self.project_id)
+ self.credential_api.create_credential(credential['id'], credential)
+
+ # Create a 2nd set of entities in a 2nd domain
+ domain2 = unit.new_domain_ref()
+ self.resource_api.create_domain(domain2['id'], domain2)
+
+ project2 = unit.new_project_ref(domain_id=domain2['id'])
+ project2 = self.resource_api.create_project(project2['id'], project2)
+
+ user2 = unit.new_user_ref(domain_id=domain2['id'],
+ project_id=project2['id'])
+ user2 = self.identity_api.create_user(user2)
+
+ group2 = unit.new_group_ref(domain_id=domain2['id'])
+ group2 = self.identity_api.create_group(group2)
+
+ credential2 = unit.new_credential_ref(user_id=user2['id'],
+ project_id=project2['id'])
+ self.credential_api.create_credential(credential2['id'],
+ credential2)
+
+ # Now disable the new domain and delete it
+ domain2['enabled'] = False
+ r = self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': domain2['id']},
+ body={'domain': {'enabled': False}})
+ self.assertValidDomainResponse(r, domain2)
+ self.delete('/domains/%(domain_id)s' % {'domain_id': domain2['id']})
+
+ # Check all the domain2 relevant entities are gone
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain,
+ domain2['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ project2['id'])
+ self.assertRaises(exception.GroupNotFound,
+ self.identity_api.get_group,
+ group2['id'])
+ self.assertRaises(exception.UserNotFound,
+ self.identity_api.get_user,
+ user2['id'])
+ self.assertRaises(exception.CredentialNotFound,
+ self.credential_api.get_credential,
+ credential2['id'])
+
+ # ...and that all self.domain entities are still here
+ r = self.resource_api.get_domain(self.domain['id'])
+ self.assertDictEqual(self.domain, r)
+ r = self.resource_api.get_project(self.project['id'])
+ self.assertDictEqual(self.project, r)
+ r = self.identity_api.get_group(group['id'])
+ self.assertDictEqual(group, r)
+ r = self.identity_api.get_user(self.user['id'])
+ self.user.pop('password')
+ self.assertDictEqual(self.user, r)
+ r = self.credential_api.get_credential(credential['id'])
+ self.assertDictEqual(credential, r)
+
+ def test_delete_domain_deletes_is_domain_project(self):
+ """Check the project that acts as a domain is deleted.
+
+ Call ``DELETE /domains``.
+ """
+ # Create a new domain
+ domain_ref = unit.new_domain_ref()
+ r = self.post('/domains', body={'domain': domain_ref})
+ self.assertValidDomainResponse(r, domain_ref)
+
+ # Retrieve its correspondent project
+ self.get('/projects/%(project_id)s' % {
+ 'project_id': r.result['domain']['id']})
+
+ # Delete the domain
+ self.patch('/domains/%s' % r.result['domain']['id'],
+ body={'domain': {'enabled': False}})
+ self.delete('/domains/%s' % r.result['domain']['id'])
+
+ # The created project is deleted as well
+ self.get('/projects/%(project_id)s' % {
+ 'project_id': r.result['domain']['id']}, expected_status=404)
+
+ def test_delete_default_domain(self):
+ # Need to disable it first.
+ self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': CONF.identity.default_domain_id},
+ body={'domain': {'enabled': False}})
+
+ self.delete(
+ '/domains/%(domain_id)s' % {
+ 'domain_id': CONF.identity.default_domain_id})
+
+ def test_token_revoked_once_domain_disabled(self):
+ """Test token from a disabled domain has been invalidated.
+
+ Test that a token that was valid for an enabled domain
+ becomes invalid once that domain is disabled.
+
+ """
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+
+ user2 = unit.create_user(self.identity_api,
+ domain_id=domain['id'])
+
+ # build a request body
+ auth_body = self.build_authentication_request(
+ user_id=user2['id'],
+ password=user2['password'])
+
+ # sends a request for the user's token
+ token_resp = self.post('/auth/tokens', body=auth_body)
+
+ subject_token = token_resp.headers.get('x-subject-token')
+
+ # validates the returned token and it should be valid.
+ self.head('/auth/tokens',
+ headers={'x-subject-token': subject_token},
+ expected_status=http_client.OK)
+
+ # now disable the domain
+ domain['enabled'] = False
+ url = "/domains/%(domain_id)s" % {'domain_id': domain['id']}
+ self.patch(url,
+ body={'domain': {'enabled': False}})
+
+ # validates the same token again and it should be 'not found'
+ # as the domain has already been disabled.
+ self.head('/auth/tokens',
+ headers={'x-subject-token': subject_token},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_delete_domain_hierarchy(self):
+ """Call ``DELETE /domains/{domain_id}``."""
+ domain = unit.new_domain_ref()
+ self.resource_api.create_domain(domain['id'], domain)
+
+ root_project = unit.new_project_ref(domain_id=domain['id'])
+ root_project = self.resource_api.create_project(root_project['id'],
+ root_project)
+
+ leaf_project = unit.new_project_ref(
+ domain_id=domain['id'],
+ parent_id=root_project['id'])
+ self.resource_api.create_project(leaf_project['id'], leaf_project)
+
+ # Need to disable it first.
+ self.patch('/domains/%(domain_id)s' % {
+ 'domain_id': domain['id']},
+ body={'domain': {'enabled': False}})
+
+ self.delete(
+ '/domains/%(domain_id)s' % {
+ 'domain_id': domain['id']})
+
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.get_domain,
+ domain['id'])
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ root_project['id'])
+
+ self.assertRaises(exception.ProjectNotFound,
+ self.resource_api.get_project,
+ leaf_project['id'])
+
+ def test_forbid_operations_on_federated_domain(self):
+ """Make sure one cannot operate on federated domain.
+
+ This includes operations like create, update, delete
+ on domain identified by id and name where difference variations of
+ id 'Federated' are used.
+
+ """
+ def create_domains():
+ for variation in ('Federated', 'FEDERATED',
+ 'federated', 'fEderated'):
+ domain = unit.new_domain_ref()
+ domain['id'] = variation
+ yield domain
+
+ for domain in create_domains():
+ self.assertRaises(
+ AssertionError, self.resource_api.create_domain,
+ domain['id'], domain)
+ self.assertRaises(
+ AssertionError, self.resource_api.update_domain,
+ domain['id'], domain)
+ self.assertRaises(
+ exception.DomainNotFound, self.resource_api.delete_domain,
+ domain['id'])
+
+ # swap 'name' with 'id' and try again, expecting the request to
+ # gracefully fail
+ domain['id'], domain['name'] = domain['name'], domain['id']
+ self.assertRaises(
+ AssertionError, self.resource_api.create_domain,
+ domain['id'], domain)
+ self.assertRaises(
+ AssertionError, self.resource_api.update_domain,
+ domain['id'], domain)
+ self.assertRaises(
+ exception.DomainNotFound, self.resource_api.delete_domain,
+ domain['id'])
+
+ def test_forbid_operations_on_defined_federated_domain(self):
+ """Make sure one cannot operate on a user-defined federated domain.
+
+ This includes operations like create, update, delete.
+
+ """
+ non_default_name = 'beta_federated_domain'
+ self.config_fixture.config(group='federation',
+ federated_domain_name=non_default_name)
+ domain = unit.new_domain_ref(name=non_default_name)
+ self.assertRaises(AssertionError,
+ self.resource_api.create_domain,
+ domain['id'], domain)
+ self.assertRaises(exception.DomainNotFound,
+ self.resource_api.delete_domain,
+ domain['id'])
+ self.assertRaises(AssertionError,
+ self.resource_api.update_domain,
+ domain['id'], domain)
+
+ # Project CRUD tests
+
+ def test_list_projects(self):
+ """Call ``GET /projects``."""
+ resource_url = '/projects'
+ r = self.get(resource_url)
+ self.assertValidProjectListResponse(r, ref=self.project,
+ resource_url=resource_url)
+
+ def test_create_project(self):
+ """Call ``POST /projects``."""
+ ref = unit.new_project_ref(domain_id=self.domain_id)
+ r = self.post(
+ '/projects',
+ body={'project': ref})
+ self.assertValidProjectResponse(r, ref)
+
+ def test_create_project_bad_request(self):
+ """Call ``POST /projects``."""
+ self.post('/projects', body={'project': {}},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_project_invalid_domain_id(self):
+ """Call ``POST /projects``."""
+ ref = unit.new_project_ref(domain_id=uuid.uuid4().hex)
+ self.post('/projects', body={'project': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_project_unsafe(self):
+ """Call ``POST /projects with unsafe names``."""
+ unsafe_name = 'i am not / safe'
+
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='off')
+ ref = unit.new_project_ref(name=unsafe_name)
+ self.post(
+ '/projects',
+ body={'project': ref})
+
+ for config_setting in ['new', 'strict']:
+ self.config_fixture.config(group='resource',
+ project_name_url_safe=config_setting)
+ ref = unit.new_project_ref(name=unsafe_name)
+ self.post(
+ '/projects',
+ body={'project': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_project_unsafe_default(self):
+ """Check default for unsafe names for ``POST /projects``."""
+ unsafe_name = 'i am not / safe'
+
+ # By default, we should be able to create unsafe names
+ ref = unit.new_project_ref(name=unsafe_name)
+ self.post(
+ '/projects',
+ body={'project': ref})
+
+ def test_create_project_with_parent_id_none_and_domain_id_none(self):
+ """Call ``POST /projects``."""
+ # Grant a domain role for the user
+ collection_url = (
+ '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+ 'domain_id': self.domain_id,
+ 'user_id': self.user['id']})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+ self.put(member_url)
+
+ # Create an authentication request for a domain scoped token
+ auth = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ domain_id=self.domain_id)
+
+ # Without parent_id and domain_id passed as None, the domain_id should
+ # be normalized to the domain on the token, when using a domain
+ # scoped token.
+ ref = unit.new_project_ref()
+ r = self.post(
+ '/projects',
+ auth=auth,
+ body={'project': ref})
+ ref['domain_id'] = self.domain['id']
+ self.assertValidProjectResponse(r, ref)
+
+ def test_create_project_without_parent_id_and_without_domain_id(self):
+ """Call ``POST /projects``."""
+ # Grant a domain role for the user
+ collection_url = (
+ '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+ 'domain_id': self.domain_id,
+ 'user_id': self.user['id']})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+ self.put(member_url)
+
+ # Create an authentication request for a domain scoped token
+ auth = self.build_authentication_request(
+ user_id=self.user['id'],
+ password=self.user['password'],
+ domain_id=self.domain_id)
+
+ # Without domain_id and parent_id, the domain_id should be
+ # normalized to the domain on the token, when using a domain
+ # scoped token.
+ ref = unit.new_project_ref()
+ r = self.post(
+ '/projects',
+ auth=auth,
+ body={'project': ref})
+ ref['domain_id'] = self.domain['id']
+ self.assertValidProjectResponse(r, ref)
+
+ @test_utils.wip('waiting for support for parent_id to imply domain_id')
+ def test_create_project_with_parent_id_and_no_domain_id(self):
+ """Call ``POST /projects``."""
+ # With only the parent_id, the domain_id should be
+ # normalized to the parent's domain_id
+ ref_child = unit.new_project_ref(parent_id=self.project['id'])
+
+ r = self.post(
+ '/projects',
+ body={'project': ref_child})
+ self.assertEqual(r.result['project']['domain_id'],
+ self.project['domain_id'])
+ ref_child['domain_id'] = self.domain['id']
+ self.assertValidProjectResponse(r, ref_child)
+
+ def _create_projects_hierarchy(self, hierarchy_size=1):
+ """Creates a single-branched project hierarchy with the specified size.
+
+ :param hierarchy_size: the desired hierarchy size, default is 1 -
+ a project with one child.
+
+ :returns projects: a list of the projects in the created hierarchy.
+
+ """
+ new_ref = unit.new_project_ref(domain_id=self.domain_id)
+ resp = self.post('/projects', body={'project': new_ref})
+
+ projects = [resp.result]
+
+ for i in range(hierarchy_size):
+ new_ref = unit.new_project_ref(
+ domain_id=self.domain_id,
+ parent_id=projects[i]['project']['id'])
+ resp = self.post('/projects',
+ body={'project': new_ref})
+ self.assertValidProjectResponse(resp, new_ref)
+
+ projects.append(resp.result)
+
+ return projects
+
+ def test_list_projects_filtering_by_parent_id(self):
+ """Call ``GET /projects?parent_id={project_id}``."""
+ projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+ # Add another child to projects[1] - it will be projects[3]
+ new_ref = unit.new_project_ref(
+ domain_id=self.domain_id,
+ parent_id=projects[1]['project']['id'])
+ resp = self.post('/projects',
+ body={'project': new_ref})
+ self.assertValidProjectResponse(resp, new_ref)
+
+ projects.append(resp.result)
+
+ # Query for projects[0] immediate children - it will
+ # be only projects[1]
+ r = self.get(
+ '/projects?parent_id=%(project_id)s' % {
+ 'project_id': projects[0]['project']['id']})
+ self.assertValidProjectListResponse(r)
+
+ projects_result = r.result['projects']
+ expected_list = [projects[1]['project']]
+
+ # projects[0] has projects[1] as child
+ self.assertEqual(expected_list, projects_result)
+
+ # Query for projects[1] immediate children - it will
+ # be projects[2] and projects[3]
+ r = self.get(
+ '/projects?parent_id=%(project_id)s' % {
+ 'project_id': projects[1]['project']['id']})
+ self.assertValidProjectListResponse(r)
+
+ projects_result = r.result['projects']
+ expected_list = [projects[2]['project'], projects[3]['project']]
+
+ # projects[1] has projects[2] and projects[3] as children
+ self.assertEqual(expected_list, projects_result)
+
+ # Query for projects[2] immediate children - it will be an empty list
+ r = self.get(
+ '/projects?parent_id=%(project_id)s' % {
+ 'project_id': projects[2]['project']['id']})
+ self.assertValidProjectListResponse(r)
+
+ projects_result = r.result['projects']
+ expected_list = []
+
+ # projects[2] has no child, projects_result must be an empty list
+ self.assertEqual(expected_list, projects_result)
+
+ def test_create_hierarchical_project(self):
+ """Call ``POST /projects``."""
+ self._create_projects_hierarchy()
+
+ def test_get_project(self):
+ """Call ``GET /projects/{project_id}``."""
+ r = self.get(
+ '/projects/%(project_id)s' % {
+ 'project_id': self.project_id})
+ self.assertValidProjectResponse(r, self.project)
+
+ def test_get_project_with_parents_as_list_with_invalid_id(self):
+ """Call ``GET /projects/{project_id}?parents_as_list``."""
+ self.get('/projects/%(project_id)s?parents_as_list' % {
+ 'project_id': None}, expected_status=http_client.NOT_FOUND)
+
+ self.get('/projects/%(project_id)s?parents_as_list' % {
+ 'project_id': uuid.uuid4().hex},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_get_project_with_subtree_as_list_with_invalid_id(self):
+ """Call ``GET /projects/{project_id}?subtree_as_list``."""
+ self.get('/projects/%(project_id)s?subtree_as_list' % {
+ 'project_id': None}, expected_status=http_client.NOT_FOUND)
+
+ self.get('/projects/%(project_id)s?subtree_as_list' % {
+ 'project_id': uuid.uuid4().hex},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_get_project_with_parents_as_ids(self):
+ """Call ``GET /projects/{project_id}?parents_as_ids``."""
+ projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+ # Query for projects[2] parents_as_ids
+ r = self.get(
+ '/projects/%(project_id)s?parents_as_ids' % {
+ 'project_id': projects[2]['project']['id']})
+
+ self.assertValidProjectResponse(r, projects[2]['project'])
+ parents_as_ids = r.result['project']['parents']
+
+ # Assert parents_as_ids is a structured dictionary correctly
+ # representing the hierarchy. The request was made using projects[2]
+ # id, hence its parents should be projects[1], projects[0] and the
+ # is_domain_project, which is the root of the hierarchy. It should
+ # have the following structure:
+ # {
+ # projects[1]: {
+ # projects[0]: {
+ # is_domain_project: None
+ # }
+ # }
+ # }
+ is_domain_project_id = projects[0]['project']['domain_id']
+ expected_dict = {
+ projects[1]['project']['id']: {
+ projects[0]['project']['id']: {is_domain_project_id: None}
+ }
+ }
+ self.assertDictEqual(expected_dict, parents_as_ids)
+
+ # Query for projects[0] parents_as_ids
+ r = self.get(
+ '/projects/%(project_id)s?parents_as_ids' % {
+ 'project_id': projects[0]['project']['id']})
+
+ self.assertValidProjectResponse(r, projects[0]['project'])
+ parents_as_ids = r.result['project']['parents']
+
+ # projects[0] has only the project that acts as a domain as parent
+ expected_dict = {
+ is_domain_project_id: None
+ }
+ self.assertDictEqual(expected_dict, parents_as_ids)
+
+ # Query for is_domain_project parents_as_ids
+ r = self.get(
+ '/projects/%(project_id)s?parents_as_ids' % {
+ 'project_id': is_domain_project_id})
+
+ parents_as_ids = r.result['project']['parents']
+
+ # the project that acts as a domain has no parents, parents_as_ids
+ # must be None
+ self.assertIsNone(parents_as_ids)
+
+ def test_get_project_with_parents_as_list_with_full_access(self):
+ """``GET /projects/{project_id}?parents_as_list`` with full access.
+
+ Test plan:
+
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on each one of those projects;
+ - Check that calling parents_as_list on 'subproject' returns both
+ 'project' and 'parent'.
+
+ """
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on all the created projects
+ for proj in (parent, project, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?parents_as_list' %
+ {'project_id': subproject['project']['id']})
+ self.assertValidProjectResponse(r, subproject['project'])
+
+ # Assert only 'project' and 'parent' are in the parents list
+ self.assertIn(project, r.result['project']['parents'])
+ self.assertIn(parent, r.result['project']['parents'])
+ self.assertEqual(2, len(r.result['project']['parents']))
+
+ def test_get_project_with_parents_as_list_with_partial_access(self):
+ """``GET /projects/{project_id}?parents_as_list`` with partial access.
+
+ Test plan:
+
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on 'parent' and 'subproject';
+ - Check that calling parents_as_list on 'subproject' only returns
+ 'parent'.
+
+ """
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on parent and subproject
+ for proj in (parent, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?parents_as_list' %
+ {'project_id': subproject['project']['id']})
+ self.assertValidProjectResponse(r, subproject['project'])
+
+ # Assert only 'parent' is in the parents list
+ self.assertIn(parent, r.result['project']['parents'])
+ self.assertEqual(1, len(r.result['project']['parents']))
+
+ def test_get_project_with_parents_as_list_and_parents_as_ids(self):
+ """Attempt to list a project's parents as both a list and as IDs.
+
+ This uses ``GET /projects/{project_id}?parents_as_list&parents_as_ids``
+ which should fail with a Bad Request due to the conflicting query
+ strings.
+
+ """
+ projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+ self.get(
+ '/projects/%(project_id)s?parents_as_list&parents_as_ids' % {
+ 'project_id': projects[1]['project']['id']},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_list_project_is_domain_filter(self):
+ """Call ``GET /projects?is_domain=True/False``."""
+ # Get the initial number of projects, both acting as a domain as well
+ # as regular.
+ r = self.get('/projects?is_domain=True', expected_status=200)
+ initial_number_is_domain_true = len(r.result['projects'])
+ r = self.get('/projects?is_domain=False', expected_status=200)
+ initial_number_is_domain_false = len(r.result['projects'])
+
+ # Add some more projects acting as domains
+ new_is_domain_project = unit.new_project_ref(is_domain=True)
+ new_is_domain_project = self.resource_api.create_project(
+ new_is_domain_project['id'], new_is_domain_project)
+ new_is_domain_project2 = unit.new_project_ref(is_domain=True)
+ new_is_domain_project2 = self.resource_api.create_project(
+ new_is_domain_project2['id'], new_is_domain_project2)
+ number_is_domain_true = initial_number_is_domain_true + 2
+
+ r = self.get('/projects?is_domain=True', expected_status=200)
+ self.assertThat(r.result['projects'],
+ matchers.HasLength(number_is_domain_true))
+ self.assertIn(new_is_domain_project['id'],
+ [p['id'] for p in r.result['projects']])
+ self.assertIn(new_is_domain_project2['id'],
+ [p['id'] for p in r.result['projects']])
+
+ # Now add a regular project
+ new_regular_project = unit.new_project_ref(domain_id=self.domain_id)
+ new_regular_project = self.resource_api.create_project(
+ new_regular_project['id'], new_regular_project)
+ number_is_domain_false = initial_number_is_domain_false + 1
+
+ # Check we still have the same number of projects acting as domains
+ r = self.get('/projects?is_domain=True', expected_status=200)
+ self.assertThat(r.result['projects'],
+ matchers.HasLength(number_is_domain_true))
+
+ # Check the number of regular projects is correct
+ r = self.get('/projects?is_domain=False', expected_status=200)
+ self.assertThat(r.result['projects'],
+ matchers.HasLength(number_is_domain_false))
+ self.assertIn(new_regular_project['id'],
+ [p['id'] for p in r.result['projects']])
+
+ def test_list_project_is_domain_filter_default(self):
+ """Default project list should not see projects acting as domains"""
+ # Get the initial count of regular projects
+ r = self.get('/projects?is_domain=False', expected_status=200)
+ number_is_domain_false = len(r.result['projects'])
+
+ # Make sure we have at least one project acting as a domain
+ new_is_domain_project = unit.new_project_ref(is_domain=True)
+ new_is_domain_project = self.resource_api.create_project(
+ new_is_domain_project['id'], new_is_domain_project)
+
+ r = self.get('/projects', expected_status=200)
+ self.assertThat(r.result['projects'],
+ matchers.HasLength(number_is_domain_false))
+ self.assertNotIn(new_is_domain_project, r.result['projects'])
+
+ def test_get_project_with_subtree_as_ids(self):
+ """Call ``GET /projects/{project_id}?subtree_as_ids``.
+
+ This test creates a more complex hierarchy to test if the structured
+ dictionary returned by using the ``subtree_as_ids`` query param
+ correctly represents the hierarchy.
+
+ The hierarchy contains 5 projects with the following structure::
+
+ +--A--+
+ | |
+ +--B--+ C
+ | |
+ D E
+
+
+ """
+ projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+ # Add another child to projects[0] - it will be projects[3]
+ new_ref = unit.new_project_ref(
+ domain_id=self.domain_id,
+ parent_id=projects[0]['project']['id'])
+ resp = self.post('/projects',
+ body={'project': new_ref})
+ self.assertValidProjectResponse(resp, new_ref)
+ projects.append(resp.result)
+
+ # Add another child to projects[1] - it will be projects[4]
+ new_ref = unit.new_project_ref(
+ domain_id=self.domain_id,
+ parent_id=projects[1]['project']['id'])
+ resp = self.post('/projects',
+ body={'project': new_ref})
+ self.assertValidProjectResponse(resp, new_ref)
+ projects.append(resp.result)
+
+ # Query for projects[0] subtree_as_ids
+ r = self.get(
+ '/projects/%(project_id)s?subtree_as_ids' % {
+ 'project_id': projects[0]['project']['id']})
+ self.assertValidProjectResponse(r, projects[0]['project'])
+ subtree_as_ids = r.result['project']['subtree']
+
+ # The subtree hierarchy from projects[0] should have the following
+ # structure:
+ # {
+ # projects[1]: {
+ # projects[2]: None,
+ # projects[4]: None
+ # },
+ # projects[3]: None
+ # }
+ expected_dict = {
+ projects[1]['project']['id']: {
+ projects[2]['project']['id']: None,
+ projects[4]['project']['id']: None
+ },
+ projects[3]['project']['id']: None
+ }
+ self.assertDictEqual(expected_dict, subtree_as_ids)
+
+ # Now query for projects[1] subtree_as_ids
+ r = self.get(
+ '/projects/%(project_id)s?subtree_as_ids' % {
+ 'project_id': projects[1]['project']['id']})
+ self.assertValidProjectResponse(r, projects[1]['project'])
+ subtree_as_ids = r.result['project']['subtree']
+
+ # The subtree hierarchy from projects[1] should have the following
+ # structure:
+ # {
+ # projects[2]: None,
+ # projects[4]: None
+ # }
+ expected_dict = {
+ projects[2]['project']['id']: None,
+ projects[4]['project']['id']: None
+ }
+ self.assertDictEqual(expected_dict, subtree_as_ids)
+
+ # Now query for projects[3] subtree_as_ids
+ r = self.get(
+ '/projects/%(project_id)s?subtree_as_ids' % {
+ 'project_id': projects[3]['project']['id']})
+ self.assertValidProjectResponse(r, projects[3]['project'])
+ subtree_as_ids = r.result['project']['subtree']
+
+ # projects[3] has no subtree, subtree_as_ids must be None
+ self.assertIsNone(subtree_as_ids)
+
+ def test_get_project_with_subtree_as_list_with_full_access(self):
+ """``GET /projects/{project_id}?subtree_as_list`` with full access.
+
+ Test plan:
+
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on each one of those projects;
+ - Check that calling subtree_as_list on 'parent' returns both 'parent'
+ and 'subproject'.
+
+ """
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on all the created projects
+ for proj in (parent, project, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?subtree_as_list' %
+ {'project_id': parent['project']['id']})
+ self.assertValidProjectResponse(r, parent['project'])
+
+ # Assert only 'project' and 'subproject' are in the subtree
+ self.assertIn(project, r.result['project']['subtree'])
+ self.assertIn(subproject, r.result['project']['subtree'])
+ self.assertEqual(2, len(r.result['project']['subtree']))
+
+ def test_get_project_with_subtree_as_list_with_partial_access(self):
+ """``GET /projects/{project_id}?subtree_as_list`` with partial access.
+
+ Test plan:
+
+ - Create 'parent', 'project' and 'subproject' projects;
+ - Assign a user a role on 'parent' and 'subproject';
+ - Check that calling subtree_as_list on 'parent' returns 'subproject'.
+
+ """
+ # Create the project hierarchy
+ parent, project, subproject = self._create_projects_hierarchy(2)
+
+ # Assign a role for the user on parent and subproject
+ for proj in (parent, subproject):
+ self.put(self.build_role_assignment_link(
+ role_id=self.role_id, user_id=self.user_id,
+ project_id=proj['project']['id']))
+
+ # Make the API call
+ r = self.get('/projects/%(project_id)s?subtree_as_list' %
+ {'project_id': parent['project']['id']})
+ self.assertValidProjectResponse(r, parent['project'])
+
+ # Assert only 'subproject' is in the subtree
+ self.assertIn(subproject, r.result['project']['subtree'])
+ self.assertEqual(1, len(r.result['project']['subtree']))
+
+ def test_get_project_with_subtree_as_list_and_subtree_as_ids(self):
+ """Attempt to get a project subtree as both a list and as IDs.
+
+ This uses ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids``
+ which should fail with a bad request due to the conflicting query
+ strings.
+
+ """
+ projects = self._create_projects_hierarchy(hierarchy_size=2)
+
+ self.get(
+ '/projects/%(project_id)s?subtree_as_list&subtree_as_ids' % {
+ 'project_id': projects[1]['project']['id']},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_update_project(self):
+ """Call ``PATCH /projects/{project_id}``."""
+ ref = unit.new_project_ref(domain_id=self.domain_id,
+ parent_id=self.project['parent_id'])
+ del ref['id']
+ r = self.patch(
+ '/projects/%(project_id)s' % {
+ 'project_id': self.project_id},
+ body={'project': ref})
+ self.assertValidProjectResponse(r, ref)
+
+ def test_update_project_unsafe(self):
+ """Call ``POST /projects/{project_id} with unsafe names``."""
+ unsafe_name = 'i am not / safe'
+
+ self.config_fixture.config(group='resource',
+ project_name_url_safe='off')
+ ref = unit.new_project_ref(name=unsafe_name,
+ domain_id=self.domain_id,
+ parent_id=self.project['parent_id'])
+ del ref['id']
+ self.patch(
+ '/projects/%(project_id)s' % {
+ 'project_id': self.project_id},
+ body={'project': ref})
+
+ unsafe_name = 'i am still not / safe'
+ for config_setting in ['new', 'strict']:
+ self.config_fixture.config(group='resource',
+ project_name_url_safe=config_setting)
+ ref = unit.new_project_ref(name=unsafe_name,
+ domain_id=self.domain_id,
+ parent_id=self.project['parent_id'])
+ del ref['id']
+ self.patch(
+ '/projects/%(project_id)s' % {
+ 'project_id': self.project_id},
+ body={'project': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_update_project_unsafe_default(self):
+ """Check default for unsafe names for ``POST /projects``."""
+ unsafe_name = 'i am not / safe'
+
+ # By default, we should be able to create unsafe names
+ ref = unit.new_project_ref(name=unsafe_name,
+ domain_id=self.domain_id,
+ parent_id=self.project['parent_id'])
+ del ref['id']
+ self.patch(
+ '/projects/%(project_id)s' % {
+ 'project_id': self.project_id},
+ body={'project': ref})
+
+ def test_update_project_domain_id(self):
+ """Call ``PATCH /projects/{project_id}`` with domain_id."""
+ project = unit.new_project_ref(domain_id=self.domain['id'])
+ project = self.resource_api.create_project(project['id'], project)
+ project['domain_id'] = CONF.identity.default_domain_id
+ r = self.patch('/projects/%(project_id)s' % {
+ 'project_id': project['id']},
+ body={'project': project},
+ expected_status=exception.ValidationError.code)
+ self.config_fixture.config(domain_id_immutable=False)
+ project['domain_id'] = self.domain['id']
+ r = self.patch('/projects/%(project_id)s' % {
+ 'project_id': project['id']},
+ body={'project': project})
+ self.assertValidProjectResponse(r, project)
+
+ def test_update_project_parent_id(self):
+ """Call ``PATCH /projects/{project_id}``."""
+ projects = self._create_projects_hierarchy()
+ leaf_project = projects[1]['project']
+ leaf_project['parent_id'] = None
+ self.patch(
+ '/projects/%(project_id)s' % {
+ 'project_id': leaf_project['id']},
+ body={'project': leaf_project},
+ expected_status=http_client.FORBIDDEN)
+
+ def test_update_project_is_domain_not_allowed(self):
+ """Call ``PATCH /projects/{project_id}`` with is_domain.
+
+ The is_domain flag is immutable.
+ """
+ project = unit.new_project_ref(domain_id=self.domain['id'])
+ resp = self.post('/projects',
+ body={'project': project})
+ self.assertFalse(resp.result['project']['is_domain'])
+
+ project['parent_id'] = resp.result['project']['parent_id']
+ project['is_domain'] = True
+ self.patch('/projects/%(project_id)s' % {
+ 'project_id': resp.result['project']['id']},
+ body={'project': project},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_disable_leaf_project(self):
+ """Call ``PATCH /projects/{project_id}``."""
+ projects = self._create_projects_hierarchy()
+ leaf_project = projects[1]['project']
+ leaf_project['enabled'] = False
+ r = self.patch(
+ '/projects/%(project_id)s' % {
+ 'project_id': leaf_project['id']},
+ body={'project': leaf_project})
+ self.assertEqual(
+ leaf_project['enabled'], r.result['project']['enabled'])
+
+ def test_disable_not_leaf_project(self):
+ """Call ``PATCH /projects/{project_id}``."""
+ projects = self._create_projects_hierarchy()
+ root_project = projects[0]['project']
+ root_project['enabled'] = False
+ self.patch(
+ '/projects/%(project_id)s' % {
+ 'project_id': root_project['id']},
+ body={'project': root_project},
+ expected_status=http_client.FORBIDDEN)
+
+ def test_delete_project(self):
+ """Call ``DELETE /projects/{project_id}``
+
+ As well as making sure the delete succeeds, we ensure
+ that any credentials that reference this projects are
+ also deleted, while other credentials are unaffected.
+
+ """
+ credential = unit.new_credential_ref(user_id=self.user['id'],
+ project_id=self.project_id)
+ self.credential_api.create_credential(credential['id'], credential)
+
+ # First check the credential for this project is present
+ r = self.credential_api.get_credential(credential['id'])
+ self.assertDictEqual(credential, r)
+ # Create a second credential with a different project
+ project2 = unit.new_project_ref(domain_id=self.domain['id'])
+ self.resource_api.create_project(project2['id'], project2)
+ credential2 = unit.new_credential_ref(user_id=self.user['id'],
+ project_id=project2['id'])
+ self.credential_api.create_credential(credential2['id'], credential2)
+
+ # Now delete the project
+ self.delete(
+ '/projects/%(project_id)s' % {
+ 'project_id': self.project_id})
+
+ # Deleting the project should have deleted any credentials
+ # that reference this project
+ self.assertRaises(exception.CredentialNotFound,
+ self.credential_api.get_credential,
+ credential_id=credential['id'])
+ # But the credential for project2 is unaffected
+ r = self.credential_api.get_credential(credential2['id'])
+ self.assertDictEqual(credential2, r)
+
+ def test_delete_not_leaf_project(self):
+ """Call ``DELETE /projects/{project_id}``."""
+ projects = self._create_projects_hierarchy()
+ self.delete(
+ '/projects/%(project_id)s' % {
+ 'project_id': projects[0]['project']['id']},
+ expected_status=http_client.FORBIDDEN)
+
+
+class ResourceV3toV2MethodsTestCase(unit.TestCase):
+ """Test domain V3 to V2 conversion methods."""
+
+ def _setup_initial_projects(self):
+ self.project_id = uuid.uuid4().hex
+ self.domain_id = CONF.identity.default_domain_id
+ self.parent_id = uuid.uuid4().hex
+ # Project with only domain_id in ref
+ self.project1 = unit.new_project_ref(id=self.project_id,
+ name=self.project_id,
+ domain_id=self.domain_id)
+ # Project with both domain_id and parent_id in ref
+ self.project2 = unit.new_project_ref(id=self.project_id,
+ name=self.project_id,
+ domain_id=self.domain_id,
+ parent_id=self.parent_id)
+ # Project with no domain_id and parent_id in ref
+ self.project3 = unit.new_project_ref(id=self.project_id,
+ name=self.project_id,
+ domain_id=self.domain_id,
+ parent_id=self.parent_id)
+ # Expected result with no domain_id and parent_id
+ self.expected_project = {'id': self.project_id,
+ 'name': self.project_id}
+
+ def test_v2controller_filter_domain_id(self):
+ # V2.0 is not domain aware, ensure domain_id is popped off the ref.
+ other_data = uuid.uuid4().hex
+ domain_id = CONF.identity.default_domain_id
+ ref = {'domain_id': domain_id,
+ 'other_data': other_data}
+
+ ref_no_domain = {'other_data': other_data}
+ expected_ref = ref_no_domain.copy()
+
+ updated_ref = controller.V2Controller.filter_domain_id(ref)
+ self.assertIs(ref, updated_ref)
+ self.assertDictEqual(expected_ref, ref)
+ # Make sure we don't error/muck up data if domain_id isn't present
+ updated_ref = controller.V2Controller.filter_domain_id(ref_no_domain)
+ self.assertIs(ref_no_domain, updated_ref)
+ self.assertDictEqual(expected_ref, ref_no_domain)
+
+ def test_v3controller_filter_domain_id(self):
+ # No data should be filtered out in this case.
+ other_data = uuid.uuid4().hex
+ domain_id = uuid.uuid4().hex
+ ref = {'domain_id': domain_id,
+ 'other_data': other_data}
+
+ expected_ref = ref.copy()
+ updated_ref = controller.V3Controller.filter_domain_id(ref)
+ self.assertIs(ref, updated_ref)
+ self.assertDictEqual(expected_ref, ref)
+
+ def test_v2controller_filter_domain(self):
+ other_data = uuid.uuid4().hex
+ domain_id = uuid.uuid4().hex
+ non_default_domain_ref = {'domain': {'id': domain_id},
+ 'other_data': other_data}
+ default_domain_ref = {'domain': {'id': 'default'},
+ 'other_data': other_data}
+ updated_ref = controller.V2Controller.filter_domain(default_domain_ref)
+ self.assertNotIn('domain', updated_ref)
+ self.assertNotIn(
+ 'domain',
+ controller.V2Controller.filter_domain(non_default_domain_ref))
+
+ def test_v2controller_filter_project_parent_id(self):
+ # V2.0 is not project hierarchy aware, ensure parent_id is popped off.
+ other_data = uuid.uuid4().hex
+ parent_id = uuid.uuid4().hex
+ ref = {'parent_id': parent_id,
+ 'other_data': other_data}
+
+ ref_no_parent = {'other_data': other_data}
+ expected_ref = ref_no_parent.copy()
+
+ updated_ref = controller.V2Controller.filter_project_parent_id(ref)
+ self.assertIs(ref, updated_ref)
+ self.assertDictEqual(expected_ref, ref)
+ # Make sure we don't error/muck up data if parent_id isn't present
+ updated_ref = controller.V2Controller.filter_project_parent_id(
+ ref_no_parent)
+ self.assertIs(ref_no_parent, updated_ref)
+ self.assertDictEqual(expected_ref, ref_no_parent)
+
+ def test_v3_to_v2_project_method(self):
+ self._setup_initial_projects()
+
+ # TODO(shaleh): these optional fields are not handled well by the
+ # v3_to_v2 code. Manually remove them for now. Eventually update
+ # new_project_ref to not return optional values
+ del self.project1['enabled']
+ del self.project1['description']
+ del self.project2['enabled']
+ del self.project2['description']
+ del self.project3['enabled']
+ del self.project3['description']
+
+ updated_project1 = controller.V2Controller.v3_to_v2_project(
+ self.project1)
+ self.assertIs(self.project1, updated_project1)
+ self.assertDictEqual(self.expected_project, self.project1)
+ updated_project2 = controller.V2Controller.v3_to_v2_project(
+ self.project2)
+ self.assertIs(self.project2, updated_project2)
+ self.assertDictEqual(self.expected_project, self.project2)
+ updated_project3 = controller.V2Controller.v3_to_v2_project(
+ self.project3)
+ self.assertIs(self.project3, updated_project3)
+ self.assertDictEqual(self.expected_project, self.project2)
+
+ def test_v3_to_v2_project_method_list(self):
+ self._setup_initial_projects()
+ project_list = [self.project1, self.project2, self.project3]
+
+ # TODO(shaleh): these optional fields are not handled well by the
+ # v3_to_v2 code. Manually remove them for now. Eventually update
+ # new_project_ref to not return optional values
+ for p in project_list:
+ del p['enabled']
+ del p['description']
+ updated_list = controller.V2Controller.v3_to_v2_project(project_list)
+
+ self.assertEqual(len(updated_list), len(project_list))
+
+ for i, ref in enumerate(updated_list):
+ # Order should not change.
+ self.assertIs(ref, project_list[i])
+
+ self.assertDictEqual(self.expected_project, self.project1)
+ self.assertDictEqual(self.expected_project, self.project2)
+ self.assertDictEqual(self.expected_project, self.project3)
diff --git a/keystone-moon/keystone/tests/unit/test_v3_trust.py b/keystone-moon/keystone/tests/unit/test_v3_trust.py
new file mode 100644
index 00000000..d3127c89
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/test_v3_trust.py
@@ -0,0 +1,403 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from six.moves import http_client
+
+from keystone.tests import unit
+from keystone.tests.unit import test_v3
+
+
+class TestTrustOperations(test_v3.RestfulTestCase):
+ """Test module for create, read, update and delete operations on trusts.
+
+ This module is specific to tests for trust CRUD operations. All other tests
+ related to trusts that are authentication or authorization specific should
+ live in in the keystone/tests/unit/test_v3_auth.py module.
+
+ """
+
+ def setUp(self):
+ super(TestTrustOperations, self).setUp()
+ # create a trustee to delegate stuff to
+ self.trustee_user = unit.create_user(self.identity_api,
+ domain_id=self.domain_id)
+ self.trustee_user_id = self.trustee_user['id']
+
+ def test_create_trust_bad_request(self):
+ # The server returns a 403 Forbidden rather than a 400 Bad Request, see
+ # bug 1133435
+ self.post('/OS-TRUST/trusts', body={'trust': {}},
+ expected_status=http_client.FORBIDDEN)
+
+ def test_trust_crud(self):
+ # create a new trust
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ role_ids=[self.role_id])
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = self.assertValidTrustResponse(r, ref)
+
+ # get the trust
+ r = self.get(
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
+ self.assertValidTrustResponse(r, ref)
+
+ # validate roles on the trust
+ r = self.get(
+ '/OS-TRUST/trusts/%(trust_id)s/roles' % {
+ 'trust_id': trust['id']})
+ roles = self.assertValidRoleListResponse(r, self.role)
+ self.assertIn(self.role['id'], [x['id'] for x in roles])
+ self.head(
+ '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
+ 'trust_id': trust['id'],
+ 'role_id': self.role['id']},
+ expected_status=http_client.OK)
+ r = self.get(
+ '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {
+ 'trust_id': trust['id'],
+ 'role_id': self.role['id']})
+ self.assertValidRoleResponse(r, self.role)
+
+ # list all trusts
+ r = self.get('/OS-TRUST/trusts')
+ self.assertValidTrustListResponse(r, trust)
+
+ # trusts are immutable
+ self.patch(
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+ body={'trust': ref},
+ expected_status=http_client.NOT_FOUND)
+
+ # delete the trust
+ self.delete(
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']})
+
+ # ensure the trust is not found
+ self.get(
+ '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': trust['id']},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_list_trusts(self):
+ # create three trusts with the same trustor and trustee
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ impersonation=False,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id])
+ for i in range(3):
+ ref['expires_at'] = datetime.datetime.utcnow().replace(
+ year=2032).strftime(unit.TIME_FORMAT)
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ self.assertValidTrustResponse(r, ref)
+
+ # list all trusts
+ r = self.get('/OS-TRUST/trusts')
+ trusts = r.result['trusts']
+ self.assertEqual(3, len(trusts))
+ self.assertValidTrustListResponse(r)
+
+ # list all trusts for the trustor
+ r = self.get('/OS-TRUST/trusts?trustor_user_id=%s' %
+ self.user_id)
+ trusts = r.result['trusts']
+ self.assertEqual(3, len(trusts))
+ self.assertValidTrustListResponse(r)
+
+ # list all trusts as the trustor as the trustee.
+ r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' %
+ self.user_id)
+ trusts = r.result['trusts']
+ self.assertEqual(0, len(trusts))
+
+ # list all trusts as the trustee is forbidden
+ r = self.get('/OS-TRUST/trusts?trustee_user_id=%s' %
+ self.trustee_user_id,
+ expected_status=http_client.FORBIDDEN)
+
+ def test_delete_trust(self):
+ # create a trust
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ impersonation=False,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id])
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = self.assertValidTrustResponse(r, ref)
+
+ # delete the trust
+ self.delete('/OS-TRUST/trusts/%(trust_id)s' % {
+ 'trust_id': trust['id']})
+
+ # ensure the trust isn't found
+ self.get('/OS-TRUST/trusts/%(trust_id)s' % {
+ 'trust_id': trust['id']},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_create_trust_without_trustee_returns_bad_request(self):
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ role_ids=[self.role_id])
+
+ # trustee_user_id is required to create a trust
+ del ref['trustee_user_id']
+
+ self.post('/OS-TRUST/trusts',
+ body={'trust': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_trust_without_impersonation_returns_bad_request(self):
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ role_ids=[self.role_id])
+
+ # impersonation is required to create a trust
+ del ref['impersonation']
+
+ self.post('/OS-TRUST/trusts',
+ body={'trust': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_trust_with_bad_remaining_uses_returns_bad_request(self):
+ # negative numbers, strings, non-integers, and 0 are not value values
+ for value in [-1, 0, "a bad value", 7.2]:
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ remaining_uses=value,
+ role_ids=[self.role_id])
+ self.post('/OS-TRUST/trusts',
+ body={'trust': ref},
+ expected_status=http_client.BAD_REQUEST)
+
+ def test_create_trust_with_non_existant_trustee_returns_not_found(self):
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=uuid.uuid4().hex,
+ project_id=self.project_id,
+ role_ids=[self.role_id])
+ self.post('/OS-TRUST/trusts', body={'trust': ref},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_create_trust_with_trustee_as_trustor_returns_forbidden(self):
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.trustee_user_id,
+ trustee_user_id=self.user_id,
+ project_id=self.project_id,
+ role_ids=[self.role_id])
+ # NOTE(lbragstad): This fails because the user making the request isn't
+ # the trustor defined in the request.
+ self.post('/OS-TRUST/trusts', body={'trust': ref},
+ expected_status=http_client.FORBIDDEN)
+
+ def test_create_trust_with_non_existant_project_returns_not_found(self):
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=uuid.uuid4().hex,
+ role_ids=[self.role_id])
+ self.post('/OS-TRUST/trusts', body={'trust': ref},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_create_trust_with_non_existant_role_id_returns_not_found(self):
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ role_ids=[uuid.uuid4().hex])
+ self.post('/OS-TRUST/trusts', body={'trust': ref},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_create_trust_with_non_existant_role_name_returns_not_found(self):
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ role_names=[uuid.uuid4().hex])
+ self.post('/OS-TRUST/trusts', body={'trust': ref},
+ expected_status=http_client.NOT_FOUND)
+
+ def test_validate_trust_scoped_token_against_v2_returns_unauthorized(self):
+ # create a new trust
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.default_domain_user_id,
+ project_id=self.project_id,
+ impersonation=False,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id])
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = self.assertValidTrustResponse(r)
+
+ # get a v3 trust-scoped token as the trustee
+ auth_data = self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ trust_id=trust['id'])
+ r = self.v3_create_token(auth_data)
+ self.assertValidProjectScopedTokenResponse(
+ r, self.default_domain_user)
+ token = r.headers.get('X-Subject-Token')
+
+ # now validate the v3 token with v2 API
+ path = '/v2.0/tokens/%s' % (token)
+ self.admin_request(
+ path=path, token=self.get_admin_token(),
+ method='GET', expected_status=http_client.UNAUTHORIZED)
+
+ def test_v3_v2_intermix_trustor_not_in_default_domain_failed(self):
+ # get a project-scoped token
+ auth_data = self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ project_id=self.default_domain_project_id)
+ token = self.get_requested_token(auth_data)
+
+ # create a new trust
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.default_domain_user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.default_domain_project_id,
+ impersonation=False,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id])
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
+ trust = self.assertValidTrustResponse(r)
+
+ # get a trust-scoped token as the trustee
+ auth_data = self.build_authentication_request(
+ user_id=self.trustee_user['id'],
+ password=self.trustee_user['password'],
+ trust_id=trust['id'])
+ r = self.v3_create_token(auth_data)
+ self.assertValidProjectScopedTokenResponse(
+ r, self.trustee_user)
+ token = r.headers.get('X-Subject-Token')
+
+ # now validate the v3 token with v2 API
+ path = '/v2.0/tokens/%s' % (token)
+ self.admin_request(
+ path=path, token=self.get_admin_token(),
+ method='GET', expected_status=http_client.UNAUTHORIZED)
+
+ def test_v3_v2_intermix_project_not_in_default_domain_failed(self):
+ # create a trustee in default domain to delegate stuff to
+ trustee_user = unit.create_user(self.identity_api,
+ domain_id=test_v3.DEFAULT_DOMAIN_ID)
+ trustee_user_id = trustee_user['id']
+
+ # create a new trust
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.default_domain_user_id,
+ trustee_user_id=trustee_user_id,
+ project_id=self.project_id,
+ impersonation=False,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id])
+
+ # get a project-scoped token as the default_domain_user
+ auth_data = self.build_authentication_request(
+ user_id=self.default_domain_user['id'],
+ password=self.default_domain_user['password'],
+ project_id=self.default_domain_project_id)
+ token = self.get_requested_token(auth_data)
+
+ r = self.post('/OS-TRUST/trusts', body={'trust': ref}, token=token)
+ trust = self.assertValidTrustResponse(r)
+
+ # get a trust-scoped token as the trustee
+ auth_data = self.build_authentication_request(
+ user_id=trustee_user['id'],
+ password=trustee_user['password'],
+ trust_id=trust['id'])
+ r = self.v3_create_token(auth_data)
+ self.assertValidProjectScopedTokenResponse(r, trustee_user)
+ token = r.headers.get('X-Subject-Token')
+
+ # ensure the token is invalid against v2
+ path = '/v2.0/tokens/%s' % (token)
+ self.admin_request(
+ path=path, token=self.get_admin_token(),
+ method='GET', expected_status=http_client.UNAUTHORIZED)
+
+ def test_exercise_trust_scoped_token_without_impersonation(self):
+ # create a new trust
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ impersonation=False,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id])
+ resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = self.assertValidTrustResponse(resp)
+
+ # get a trust-scoped token as the trustee
+ auth_data = self.build_authentication_request(
+ user_id=self.trustee_user['id'],
+ password=self.trustee_user['password'],
+ trust_id=trust['id'])
+ resp = self.v3_create_token(auth_data)
+ resp_body = resp.json_body['token']
+
+ self.assertValidProjectScopedTokenResponse(resp,
+ self.trustee_user)
+ self.assertEqual(self.trustee_user['id'], resp_body['user']['id'])
+ self.assertEqual(self.trustee_user['name'], resp_body['user']['name'])
+ self.assertEqual(self.domain['id'], resp_body['user']['domain']['id'])
+ self.assertEqual(self.domain['name'],
+ resp_body['user']['domain']['name'])
+ self.assertEqual(self.project['id'], resp_body['project']['id'])
+ self.assertEqual(self.project['name'], resp_body['project']['name'])
+
+ def test_exercise_trust_scoped_token_with_impersonation(self):
+ # create a new trust
+ ref = unit.new_trust_ref(
+ trustor_user_id=self.user_id,
+ trustee_user_id=self.trustee_user_id,
+ project_id=self.project_id,
+ impersonation=True,
+ expires=dict(minutes=1),
+ role_ids=[self.role_id])
+ resp = self.post('/OS-TRUST/trusts', body={'trust': ref})
+ trust = self.assertValidTrustResponse(resp)
+
+ # get a trust-scoped token as the trustee
+ auth_data = self.build_authentication_request(
+ user_id=self.trustee_user['id'],
+ password=self.trustee_user['password'],
+ trust_id=trust['id'])
+ resp = self.v3_create_token(auth_data)
+ resp_body = resp.json_body['token']
+
+ self.assertValidProjectScopedTokenResponse(resp, self.user)
+ self.assertEqual(self.user['id'], resp_body['user']['id'])
+ self.assertEqual(self.user['name'], resp_body['user']['name'])
+ self.assertEqual(self.domain['id'], resp_body['user']['domain']['id'])
+ self.assertEqual(self.domain['name'],
+ resp_body['user']['domain']['name'])
+ self.assertEqual(self.project['id'], resp_body['project']['id'])
+ self.assertEqual(self.project['name'], resp_body['project']['name'])
diff --git a/keystone-moon/keystone/tests/unit/test_validation.py b/keystone-moon/keystone/tests/unit/test_validation.py
index f7a224a0..73cb6ef6 100644
--- a/keystone-moon/keystone/tests/unit/test_validation.py
+++ b/keystone-moon/keystone/tests/unit/test_validation.py
@@ -21,11 +21,11 @@ from keystone.catalog import schema as catalog_schema
from keystone.common import validation
from keystone.common.validation import parameter_types
from keystone.common.validation import validators
-from keystone.contrib.endpoint_filter import schema as endpoint_filter_schema
-from keystone.contrib.federation import schema as federation_schema
from keystone.credential import schema as credential_schema
from keystone import exception
+from keystone.federation import schema as federation_schema
from keystone.identity import schema as identity_schema
+from keystone.oauth1 import schema as oauth1_schema
from keystone.policy import schema as policy_schema
from keystone.resource import schema as resource_schema
from keystone.tests import unit
@@ -67,6 +67,12 @@ entity_create = {
'additionalProperties': True,
}
+entity_create_optional_body = {
+ 'type': 'object',
+ 'properties': _entity_properties,
+ 'additionalProperties': True,
+}
+
entity_update = {
'type': 'object',
'properties': _entity_properties,
@@ -78,6 +84,8 @@ _VALID_ENABLED_FORMATS = [True, False]
_INVALID_ENABLED_FORMATS = ['some string', 1, 0, 'True', 'False']
+_INVALID_DESC_FORMATS = [False, 1, 2.0]
+
_VALID_URLS = ['https://example.com', 'http://EXAMPLE.com/v3',
'http://localhost', 'http://127.0.0.1:5000',
'http://1.1.1.1', 'http://255.255.255.255',
@@ -90,7 +98,7 @@ _VALID_URLS = ['https://example.com', 'http://EXAMPLE.com/v3',
_INVALID_URLS = [False, 'this is not a URL', 1234, 'www.example.com',
'localhost', 'http//something.com',
- 'https//something.com']
+ 'https//something.com', ' http://example.com']
_VALID_FILTERS = [{'interface': 'admin'},
{'region': 'US-WEST',
@@ -99,6 +107,17 @@ _VALID_FILTERS = [{'interface': 'admin'},
_INVALID_FILTERS = ['some string', 1, 0, True, False]
+def expected_validation_failure(msg):
+ def wrapper(f):
+ def wrapped(self, *args, **kwargs):
+ args = (self,) + args
+ e = self.assertRaises(exception.ValidationError, f,
+ *args, **kwargs)
+ self.assertIn(msg, six.text_type(e))
+ return wrapped
+ return wrapper
+
+
class ValidatedDecoratorTests(unit.BaseTestCase):
entity_schema = {
@@ -113,42 +132,51 @@ class ValidatedDecoratorTests(unit.BaseTestCase):
'name': uuid.uuid4().hex,
}
- invalid_entity = {}
-
- @validation.validated(entity_schema, 'entity')
- def do_something(self, entity):
- pass
+ invalid_entity = {
+ 'name': 1.0, # NOTE(dstanek): this is the incorrect type for name
+ }
@validation.validated(entity_create, 'entity')
def create_entity(self, entity):
- pass
+ """Used to test cases where validated param is the only param."""
+
+ @validation.validated(entity_create_optional_body, 'entity')
+ def create_entity_optional_body(self, entity):
+ """Used to test cases where there is an optional body."""
@validation.validated(entity_update, 'entity')
def update_entity(self, entity_id, entity):
- pass
+ """Used to test cases where validated param is not the only param."""
- def _assert_call_entity_method_fails(self, method, *args, **kwargs):
- e = self.assertRaises(exception.ValidationError, method,
- *args, **kwargs)
+ def test_calling_create_with_valid_entity_kwarg_succeeds(self):
+ self.create_entity(entity=self.valid_entity)
- self.assertIn('Expecting to find entity in request body',
- six.text_type(e))
+ def test_calling_create_with_empty_entity_kwarg_succeeds(self):
+ """Test the case when client passing in an empty kwarg reference."""
+ self.create_entity_optional_body(entity={})
- def test_calling_with_valid_entity_kwarg_succeeds(self):
- self.do_something(entity=self.valid_entity)
+ @expected_validation_failure('Expecting to find entity in request body')
+ def test_calling_create_with_kwarg_as_None_fails(self):
+ self.create_entity(entity=None)
- def test_calling_with_invalid_entity_kwarg_fails(self):
- self.assertRaises(exception.ValidationError,
- self.do_something,
- entity=self.invalid_entity)
+ def test_calling_create_with_valid_entity_arg_succeeds(self):
+ self.create_entity(self.valid_entity)
- def test_calling_with_valid_entity_arg_succeeds(self):
- self.do_something(self.valid_entity)
+ def test_calling_create_with_empty_entity_arg_succeeds(self):
+ """Test the case when client passing in an empty entity reference."""
+ self.create_entity_optional_body({})
- def test_calling_with_invalid_entity_arg_fails(self):
- self.assertRaises(exception.ValidationError,
- self.do_something,
- self.invalid_entity)
+ @expected_validation_failure("Invalid input for field 'name'")
+ def test_calling_create_with_invalid_entity_fails(self):
+ self.create_entity(self.invalid_entity)
+
+ @expected_validation_failure('Expecting to find entity in request body')
+ def test_calling_create_with_entity_arg_as_None_fails(self):
+ self.create_entity(None)
+
+ @expected_validation_failure('Expecting to find entity in request body')
+ def test_calling_create_without_an_entity_fails(self):
+ self.create_entity()
def test_using_the_wrong_name_with_the_decorator_fails(self):
with testtools.ExpectedException(TypeError):
@@ -156,24 +184,26 @@ class ValidatedDecoratorTests(unit.BaseTestCase):
def function(entity):
pass
- def test_create_entity_no_request_body_with_decorator(self):
- """Test the case when request body is not provided."""
- self._assert_call_entity_method_fails(self.create_entity)
+ # NOTE(dstanek): below are the test cases for making sure the validation
+ # works when the validated param is not the only param. Since all of the
+ # actual validation cases are tested above these test are for a sanity
+ # check.
- def test_create_entity_empty_request_body_with_decorator(self):
- """Test the case when client passing in an empty entity reference."""
- self._assert_call_entity_method_fails(self.create_entity, entity={})
+ def test_calling_update_with_valid_entity_succeeds(self):
+ self.update_entity(uuid.uuid4().hex, self.valid_entity)
- def test_update_entity_no_request_body_with_decorator(self):
- """Test the case when request body is not provided."""
- self._assert_call_entity_method_fails(self.update_entity,
- uuid.uuid4().hex)
+ @expected_validation_failure("Invalid input for field 'name'")
+ def test_calling_update_with_invalid_entity_fails(self):
+ self.update_entity(uuid.uuid4().hex, self.invalid_entity)
- def test_update_entity_empty_request_body_with_decorator(self):
+ def test_calling_update_with_empty_entity_kwarg_succeeds(self):
"""Test the case when client passing in an empty entity reference."""
- self._assert_call_entity_method_fails(self.update_entity,
- uuid.uuid4().hex,
- entity={})
+ global entity_update
+ original_entity_update = entity_update.copy()
+ # pop 'minProperties' from schema so that empty body is allowed.
+ entity_update.pop('minProperties')
+ self.update_entity(uuid.uuid4().hex, entity={})
+ entity_update = original_entity_update
class EntityValidationTestCase(unit.BaseTestCase):
@@ -499,11 +529,22 @@ class ProjectValidationTestCase(unit.BaseTestCase):
self.update_project_validator.validate,
request_to_validate)
- def test_validate_project_update_request_with_null_domain_id_fails(self):
- request_to_validate = {'domain_id': None}
- self.assertRaises(exception.SchemaValidationError,
- self.update_project_validator.validate,
- request_to_validate)
+ def test_validate_project_create_request_with_valid_domain_id(self):
+ """Test that we validate `domain_id` in create project requests."""
+ # domain_id is nullable
+ for domain_id in [None, uuid.uuid4().hex]:
+ request_to_validate = {'name': self.project_name,
+ 'domain_id': domain_id}
+ self.create_project_validator.validate(request_to_validate)
+
+ def test_validate_project_request_with_invalid_domain_id_fails(self):
+ """Exception is raised when `domain_id` is a non-id value."""
+ for domain_id in [False, 'fake_project']:
+ request_to_validate = {'name': self.project_name,
+ 'domain_id': domain_id}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_project_validator.validate,
+ request_to_validate)
class DomainValidationTestCase(unit.BaseTestCase):
@@ -897,6 +938,11 @@ class RegionValidationTestCase(unit.BaseTestCase):
request_to_validate = {'other_attr': uuid.uuid4().hex}
self.create_region_validator.validate(request_to_validate)
+ def test_validate_region_create_succeeds_with_no_parameters(self):
+ """Validate create region request with no parameters."""
+ request_to_validate = {}
+ self.create_region_validator.validate(request_to_validate)
+
def test_validate_region_update_succeeds(self):
"""Test that we validate a region update request."""
request_to_validate = {'id': 'us-west',
@@ -1298,8 +1344,8 @@ class EndpointGroupValidationTestCase(unit.BaseTestCase):
def setUp(self):
super(EndpointGroupValidationTestCase, self).setUp()
- create = endpoint_filter_schema.endpoint_group_create
- update = endpoint_filter_schema.endpoint_group_update
+ create = catalog_schema.endpoint_group_create
+ update = catalog_schema.endpoint_group_update
self.create_endpoint_grp_validator = validators.SchemaValidator(create)
self.update_endpoint_grp_validator = validators.SchemaValidator(update)
@@ -1321,8 +1367,7 @@ class EndpointGroupValidationTestCase(unit.BaseTestCase):
self.create_endpoint_grp_validator.validate(request_to_validate)
def test_validate_endpoint_group_create_succeeds_with_valid_filters(self):
- """Validate dict values as `filters` in endpoint group create requests.
- """
+ """Validate `filters` in endpoint group create requests."""
request_to_validate = {'description': 'endpoint group description',
'name': 'endpoint_group_name'}
for valid_filters in _VALID_FILTERS:
@@ -1718,13 +1763,8 @@ class UserValidationTestCase(unit.BaseTestCase):
def test_validate_user_create_with_all_valid_parameters_succeeds(self):
"""Test that validating a user create request succeeds."""
- request_to_validate = {'name': self.user_name,
- 'default_project_id': uuid.uuid4().hex,
- 'domain_id': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'enabled': True,
- 'email': uuid.uuid4().hex,
- 'password': uuid.uuid4().hex}
+ request_to_validate = unit.new_user_ref(domain_id=uuid.uuid4().hex,
+ name=self.user_name)
self.create_user_validator.validate(request_to_validate)
def test_validate_user_create_fails_without_name(self):
@@ -1875,3 +1915,201 @@ class GroupValidationTestCase(unit.BaseTestCase):
"""Validate group update requests with extra parameters."""
request_to_validate = {'other_attr': uuid.uuid4().hex}
self.update_group_validator.validate(request_to_validate)
+
+
+class IdentityProviderValidationTestCase(unit.BaseTestCase):
+ """Test for V3 Identity Provider API validation."""
+
+ def setUp(self):
+ super(IdentityProviderValidationTestCase, self).setUp()
+
+ create = federation_schema.identity_provider_create
+ update = federation_schema.identity_provider_update
+ self.create_idp_validator = validators.SchemaValidator(create)
+ self.update_idp_validator = validators.SchemaValidator(update)
+
+ def test_validate_idp_request_succeeds(self):
+ """Test that we validate an identity provider request."""
+ request_to_validate = {'description': 'identity provider description',
+ 'enabled': True,
+ 'remote_ids': [uuid.uuid4().hex,
+ uuid.uuid4().hex]}
+ self.create_idp_validator.validate(request_to_validate)
+ self.update_idp_validator.validate(request_to_validate)
+
+ def test_validate_idp_request_fails_with_invalid_params(self):
+ """Exception raised when unknown parameter is found."""
+ request_to_validate = {'bogus': uuid.uuid4().hex}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_idp_validator.validate,
+ request_to_validate)
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_idp_validator.validate,
+ request_to_validate)
+
+ def test_validate_idp_request_with_enabled(self):
+ """Validate `enabled` as boolean-like values."""
+ for valid_enabled in _VALID_ENABLED_FORMATS:
+ request_to_validate = {'enabled': valid_enabled}
+ self.create_idp_validator.validate(request_to_validate)
+ self.update_idp_validator.validate(request_to_validate)
+
+ def test_validate_idp_request_with_invalid_enabled_fails(self):
+ """Exception is raised when `enabled` isn't a boolean-like value."""
+ for invalid_enabled in _INVALID_ENABLED_FORMATS:
+ request_to_validate = {'enabled': invalid_enabled}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_idp_validator.validate,
+ request_to_validate)
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_idp_validator.validate,
+ request_to_validate)
+
+ def test_validate_idp_request_no_parameters(self):
+ """Test that schema validation with empty request body."""
+ request_to_validate = {}
+ self.create_idp_validator.validate(request_to_validate)
+
+ # Exception raised when no property on IdP update.
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_idp_validator.validate,
+ request_to_validate)
+
+ def test_validate_idp_request_with_invalid_description_fails(self):
+ """Exception is raised when `description` as a non-string value."""
+ request_to_validate = {'description': False}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_idp_validator.validate,
+ request_to_validate)
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_idp_validator.validate,
+ request_to_validate)
+
+ def test_validate_idp_request_with_invalid_remote_id_fails(self):
+ """Exception is raised when `remote_ids` is not a array."""
+ request_to_validate = {"remote_ids": uuid.uuid4().hex}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_idp_validator.validate,
+ request_to_validate)
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_idp_validator.validate,
+ request_to_validate)
+
+ def test_validate_idp_request_with_duplicated_remote_id(self):
+ """Exception is raised when the duplicated `remote_ids` is found."""
+ idp_id = uuid.uuid4().hex
+ request_to_validate = {"remote_ids": [idp_id, idp_id]}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_idp_validator.validate,
+ request_to_validate)
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_idp_validator.validate,
+ request_to_validate)
+
+ def test_validate_idp_request_remote_id_nullable(self):
+ """Test that `remote_ids` could be explicitly set to None"""
+ request_to_validate = {'remote_ids': None}
+ self.create_idp_validator.validate(request_to_validate)
+ self.update_idp_validator.validate(request_to_validate)
+
+
+class FederationProtocolValidationTestCase(unit.BaseTestCase):
+ """Test for V3 Federation Protocol API validation."""
+
+ def setUp(self):
+ super(FederationProtocolValidationTestCase, self).setUp()
+
+ schema = federation_schema.federation_protocol_schema
+ # create protocol and update protocol have the same shema definition,
+ # combine them together, no need to validate separately.
+ self.protocol_validator = validators.SchemaValidator(schema)
+
+ def test_validate_protocol_request_succeeds(self):
+ """Test that we validate a protocol request successfully."""
+ request_to_validate = {'mapping_id': uuid.uuid4().hex}
+ self.protocol_validator.validate(request_to_validate)
+
+ def test_validate_protocol_request_succeeds_with_nonuuid_mapping_id(self):
+ """Test that we allow underscore in mapping_id value."""
+ request_to_validate = {'mapping_id': 'my_mapping_id'}
+ self.protocol_validator.validate(request_to_validate)
+
+ def test_validate_protocol_request_fails_with_invalid_params(self):
+ """Exception raised when unknown parameter is found."""
+ request_to_validate = {'bogus': uuid.uuid4().hex}
+ self.assertRaises(exception.SchemaValidationError,
+ self.protocol_validator.validate,
+ request_to_validate)
+
+ def test_validate_protocol_request_no_parameters(self):
+ """Test that schema validation with empty request body."""
+ request_to_validate = {}
+ # 'mapping_id' is required.
+ self.assertRaises(exception.SchemaValidationError,
+ self.protocol_validator.validate,
+ request_to_validate)
+
+ def test_validate_protocol_request_fails_with_invalid_mapping_id(self):
+ """Exception raised when mapping_id is not string."""
+ request_to_validate = {'mapping_id': 12334}
+ self.assertRaises(exception.SchemaValidationError,
+ self.protocol_validator.validate,
+ request_to_validate)
+
+
+class OAuth1ValidationTestCase(unit.BaseTestCase):
+ """Test for V3 Identity OAuth1 API validation."""
+
+ def setUp(self):
+ super(OAuth1ValidationTestCase, self).setUp()
+
+ create = oauth1_schema.consumer_create
+ update = oauth1_schema.consumer_update
+ self.create_consumer_validator = validators.SchemaValidator(create)
+ self.update_consumer_validator = validators.SchemaValidator(update)
+
+ def test_validate_consumer_request_succeeds(self):
+ """Test that we validate a consumer request successfully."""
+ request_to_validate = {'description': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex}
+ self.create_consumer_validator.validate(request_to_validate)
+ self.update_consumer_validator.validate(request_to_validate)
+
+ def test_validate_consumer_request_with_no_parameters(self):
+ """Test that schema validation with empty request body."""
+ request_to_validate = {}
+ self.create_consumer_validator.validate(request_to_validate)
+ # At least one property should be given.
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_consumer_validator.validate,
+ request_to_validate)
+
+ def test_validate_consumer_request_with_invalid_description_fails(self):
+ """Exception is raised when `description` as a non-string value."""
+ for invalid_desc in _INVALID_DESC_FORMATS:
+ request_to_validate = {'description': invalid_desc}
+ self.assertRaises(exception.SchemaValidationError,
+ self.create_consumer_validator.validate,
+ request_to_validate)
+
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_consumer_validator.validate,
+ request_to_validate)
+
+ def test_validate_update_consumer_request_fails_with_secret(self):
+ """Exception raised when secret is given."""
+ request_to_validate = {'secret': uuid.uuid4().hex}
+ self.assertRaises(exception.SchemaValidationError,
+ self.update_consumer_validator.validate,
+ request_to_validate)
+
+ def test_validate_consumer_request_with_none_desc(self):
+ """Test that schema validation with None desc."""
+ request_to_validate = {'description': None}
+ self.create_consumer_validator.validate(request_to_validate)
+ self.update_consumer_validator.validate(request_to_validate)
diff --git a/keystone-moon/keystone/tests/unit/test_versions.py b/keystone-moon/keystone/tests/unit/test_versions.py
index 40814588..2f5c2b17 100644
--- a/keystone-moon/keystone/tests/unit/test_versions.py
+++ b/keystone-moon/keystone/tests/unit/test_versions.py
@@ -25,9 +25,9 @@ from testtools import matchers as tt_matchers
import webob
from keystone.common import json_home
-from keystone import controllers
from keystone.tests import unit
from keystone.tests.unit import utils
+from keystone.version import controllers
CONF = cfg.CONF
@@ -74,9 +74,9 @@ v3_MEDIA_TYPES = [
]
v3_EXPECTED_RESPONSE = {
- "id": "v3.4",
+ "id": "v3.6",
"status": "stable",
- "updated": "2015-03-30T00:00:00Z",
+ "updated": "2016-04-04T00:00:00Z",
"links": [
{
"rel": "self",
@@ -131,6 +131,10 @@ _build_ep_filter_rel = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-EP-FILTER', extension_version='1.0')
+_build_os_inherit_rel = functools.partial(
+ json_home.build_v3_extension_resource_relation,
+ extension_name='OS-INHERIT', extension_version='1.0')
+
TRUST_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation(
'OS-TRUST', '1.0', 'trust_id')
@@ -169,13 +173,12 @@ BASE_EP_FILTER = BASE_EP_FILTER_PREFIX + '/endpoint_groups/{endpoint_group_id}'
BASE_ACCESS_TOKEN = (
'/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}')
-# TODO(stevemar): Use BASE_IDP_PROTOCOL when bug 1420125 is resolved.
-FEDERATED_AUTH_URL = ('/OS-FEDERATION/identity_providers/{identity_provider}'
- '/protocols/{protocol}/auth')
+FEDERATED_AUTH_URL = ('/OS-FEDERATION/identity_providers/{idp_id}'
+ '/protocols/{protocol_id}/auth')
FEDERATED_IDP_SPECIFIC_WEBSSO = ('/auth/OS-FEDERATION/identity_providers/'
'{idp_id}/protocols/{protocol_id}/websso')
-V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
+V3_JSON_HOME_RESOURCES = {
json_home.build_v3_resource_relation('auth_tokens'): {
'href': '/auth/tokens'},
json_home.build_v3_resource_relation('auth_catalog'): {
@@ -231,8 +234,8 @@ V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
_build_ec2tokens_relation(resource_name='user_credential'): {
'href-template': '/users/{user_id}/credentials/OS-EC2/{credential_id}',
'href-vars': {
- 'credential_id': json_home.build_v3_extension_parameter_relation(
- 'OS-EC2', '1.0', 'credential_id'),
+ 'credential_id':
+ json_home.build_v3_parameter_relation('credential_id'),
'user_id': json_home.Parameters.USER_ID, }},
_build_ec2tokens_relation(resource_name='user_credentials'): {
'href-template': '/users/{user_id}/credentials/OS-EC2',
@@ -324,6 +327,22 @@ V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
'href-template': '/roles/{role_id}',
'href-vars': {
'role_id': json_home.Parameters.ROLE_ID, }},
+ json_home.build_v3_resource_relation('implied_roles'): {
+ 'href-template': '/roles/{prior_role_id}/implies',
+ 'href-vars': {
+ 'prior_role_id': json_home.Parameters.ROLE_ID},
+ 'hints': {'status': 'experimental'}},
+ json_home.build_v3_resource_relation('implied_role'): {
+ 'href-template':
+ '/roles/{prior_role_id}/implies/{implied_role_id}',
+ 'href-vars': {
+ 'prior_role_id': json_home.Parameters.ROLE_ID,
+ 'implied_role_id': json_home.Parameters.ROLE_ID,
+ },
+ 'hints': {'status': 'experimental'}},
+ json_home.build_v3_resource_relation('role_inferences'): {
+ 'href': '/role_inferences',
+ 'hints': {'status': 'experimental'}},
json_home.build_v3_resource_relation('role_assignments'): {
'href': '/role_assignments'},
json_home.build_v3_resource_relation('roles'): {'href': '/roles'},
@@ -394,12 +413,11 @@ V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
'href-template': BASE_IDP_PROTOCOL,
'href-vars': {
'idp_id': IDP_ID_PARAMETER_RELATION}},
- # TODO(stevemar): Update href-vars when bug 1420125 is resolved.
_build_federation_rel(resource_name='identity_provider_protocol_auth'): {
'href-template': FEDERATED_AUTH_URL,
'href-vars': {
- 'identity_provider': IDP_ID_PARAMETER_RELATION,
- 'protocol': PROTOCOL_ID_PARAM_RELATION, }},
+ 'idp_id': IDP_ID_PARAMETER_RELATION,
+ 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }},
_build_oauth1_rel(resource_name='access_tokens'): {
'href': '/OS-OAUTH1/access_token'},
_build_oauth1_rel(resource_name='request_tokens'): {
@@ -509,6 +527,58 @@ V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
'href-template': BASE_EP_FILTER + '/projects',
'href-vars': {'endpoint_group_id':
ENDPOINT_GROUP_ID_PARAMETER_RELATION, }},
+ _build_os_inherit_rel(
+ resource_name='domain_user_role_inherited_to_projects'):
+ {
+ 'href-template': '/OS-INHERIT/domains/{domain_id}/users/'
+ '{user_id}/roles/{role_id}/inherited_to_projects',
+ 'href-vars': {
+ 'domain_id': json_home.Parameters.DOMAIN_ID,
+ 'role_id': json_home.Parameters.ROLE_ID,
+ 'user_id': json_home.Parameters.USER_ID, }},
+ _build_os_inherit_rel(
+ resource_name='domain_group_role_inherited_to_projects'):
+ {
+ 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/'
+ '{group_id}/roles/{role_id}/inherited_to_projects',
+ 'href-vars': {
+ 'domain_id': json_home.Parameters.DOMAIN_ID,
+ 'group_id': json_home.Parameters.GROUP_ID,
+ 'role_id': json_home.Parameters.ROLE_ID, }},
+ _build_os_inherit_rel(
+ resource_name='domain_user_roles_inherited_to_projects'):
+ {
+ 'href-template': '/OS-INHERIT/domains/{domain_id}/users/'
+ '{user_id}/roles/inherited_to_projects',
+ 'href-vars': {
+ 'domain_id': json_home.Parameters.DOMAIN_ID,
+ 'user_id': json_home.Parameters.USER_ID, }},
+ _build_os_inherit_rel(
+ resource_name='domain_group_roles_inherited_to_projects'):
+ {
+ 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/'
+ '{group_id}/roles/inherited_to_projects',
+ 'href-vars': {
+ 'domain_id': json_home.Parameters.DOMAIN_ID,
+ 'group_id': json_home.Parameters.GROUP_ID, }},
+ _build_os_inherit_rel(
+ resource_name='project_user_role_inherited_to_projects'):
+ {
+ 'href-template': '/OS-INHERIT/projects/{project_id}/users/'
+ '{user_id}/roles/{role_id}/inherited_to_projects',
+ 'href-vars': {
+ 'project_id': json_home.Parameters.PROJECT_ID,
+ 'role_id': json_home.Parameters.ROLE_ID,
+ 'user_id': json_home.Parameters.USER_ID, }},
+ _build_os_inherit_rel(
+ resource_name='project_group_role_inherited_to_projects'):
+ {
+ 'href-template': '/OS-INHERIT/projects/{project_id}/groups/'
+ '{group_id}/roles/{role_id}/inherited_to_projects',
+ 'href-vars': {
+ 'project_id': json_home.Parameters.PROJECT_ID,
+ 'group_id': json_home.Parameters.GROUP_ID,
+ 'role_id': json_home.Parameters.ROLE_ID, }},
json_home.build_v3_resource_relation('domain_config'): {
'href-template':
'/domains/{domain_id}/config',
@@ -530,99 +600,23 @@ V3_JSON_HOME_RESOURCES_INHERIT_DISABLED = {
'group': json_home.build_v3_parameter_relation('config_group'),
'option': json_home.build_v3_parameter_relation('config_option')},
'hints': {'status': 'experimental'}},
+ json_home.build_v3_resource_relation('domain_config_default'): {
+ 'href': '/domains/config/default',
+ 'hints': {'status': 'experimental'}},
+ json_home.build_v3_resource_relation('domain_config_default_group'): {
+ 'href-template': '/domains/config/{group}/default',
+ 'href-vars': {
+ 'group': json_home.build_v3_parameter_relation('config_group')},
+ 'hints': {'status': 'experimental'}},
+ json_home.build_v3_resource_relation('domain_config_default_option'): {
+ 'href-template': '/domains/config/{group}/{option}/default',
+ 'href-vars': {
+ 'group': json_home.build_v3_parameter_relation('config_group'),
+ 'option': json_home.build_v3_parameter_relation('config_option')},
+ 'hints': {'status': 'experimental'}},
}
-# with os-inherit enabled, there's some more resources.
-
-build_os_inherit_relation = functools.partial(
- json_home.build_v3_extension_resource_relation,
- extension_name='OS-INHERIT', extension_version='1.0')
-
-V3_JSON_HOME_RESOURCES_INHERIT_ENABLED = dict(
- V3_JSON_HOME_RESOURCES_INHERIT_DISABLED)
-V3_JSON_HOME_RESOURCES_INHERIT_ENABLED.update(
- (
- (
- build_os_inherit_relation(
- resource_name='domain_user_role_inherited_to_projects'),
- {
- 'href-template': '/OS-INHERIT/domains/{domain_id}/users/'
- '{user_id}/roles/{role_id}/inherited_to_projects',
- 'href-vars': {
- 'domain_id': json_home.Parameters.DOMAIN_ID,
- 'role_id': json_home.Parameters.ROLE_ID,
- 'user_id': json_home.Parameters.USER_ID,
- },
- }
- ),
- (
- build_os_inherit_relation(
- resource_name='domain_group_role_inherited_to_projects'),
- {
- 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/'
- '{group_id}/roles/{role_id}/inherited_to_projects',
- 'href-vars': {
- 'domain_id': json_home.Parameters.DOMAIN_ID,
- 'group_id': json_home.Parameters.GROUP_ID,
- 'role_id': json_home.Parameters.ROLE_ID,
- },
- }
- ),
- (
- build_os_inherit_relation(
- resource_name='domain_user_roles_inherited_to_projects'),
- {
- 'href-template': '/OS-INHERIT/domains/{domain_id}/users/'
- '{user_id}/roles/inherited_to_projects',
- 'href-vars': {
- 'domain_id': json_home.Parameters.DOMAIN_ID,
- 'user_id': json_home.Parameters.USER_ID,
- },
- }
- ),
- (
- build_os_inherit_relation(
- resource_name='domain_group_roles_inherited_to_projects'),
- {
- 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/'
- '{group_id}/roles/inherited_to_projects',
- 'href-vars': {
- 'domain_id': json_home.Parameters.DOMAIN_ID,
- 'group_id': json_home.Parameters.GROUP_ID,
- },
- }
- ),
- (
- build_os_inherit_relation(
- resource_name='project_user_role_inherited_to_projects'),
- {
- 'href-template': '/OS-INHERIT/projects/{project_id}/users/'
- '{user_id}/roles/{role_id}/inherited_to_projects',
- 'href-vars': {
- 'project_id': json_home.Parameters.PROJECT_ID,
- 'role_id': json_home.Parameters.ROLE_ID,
- 'user_id': json_home.Parameters.USER_ID,
- },
- }
- ),
- (
- build_os_inherit_relation(
- resource_name='project_group_role_inherited_to_projects'),
- {
- 'href-template': '/OS-INHERIT/projects/{project_id}/groups/'
- '{group_id}/roles/{role_id}/inherited_to_projects',
- 'href-vars': {
- 'project_id': json_home.Parameters.PROJECT_ID,
- 'group_id': json_home.Parameters.GROUP_ID,
- 'role_id': json_home.Parameters.ROLE_ID,
- },
- }
- ),
- )
-)
-
-
class TestClient(object):
def __init__(self, app=None, token=None):
self.app = app
@@ -751,7 +745,7 @@ class VersionTestCase(unit.TestCase):
def test_public_version_v2(self):
client = TestClient(self.public_app)
resp = client.get('/v2.0/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v2_VERSION_RESPONSE
self._paste_in_port(expected['version'],
@@ -762,7 +756,7 @@ class VersionTestCase(unit.TestCase):
def test_admin_version_v2(self):
client = TestClient(self.admin_app)
resp = client.get('/v2.0/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v2_VERSION_RESPONSE
self._paste_in_port(expected['version'],
@@ -775,7 +769,7 @@ class VersionTestCase(unit.TestCase):
for app in (self.public_app, self.admin_app):
client = TestClient(app)
resp = client.get('/v2.0/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v2_VERSION_RESPONSE
self._paste_in_port(expected['version'], 'http://localhost/v2.0/')
@@ -784,7 +778,7 @@ class VersionTestCase(unit.TestCase):
def test_public_version_v3(self):
client = TestClient(self.public_app)
resp = client.get('/v3/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v3_VERSION_RESPONSE
self._paste_in_port(expected['version'],
@@ -796,7 +790,7 @@ class VersionTestCase(unit.TestCase):
def test_admin_version_v3(self):
client = TestClient(self.admin_app)
resp = client.get('/v3/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v3_VERSION_RESPONSE
self._paste_in_port(expected['version'],
@@ -809,7 +803,7 @@ class VersionTestCase(unit.TestCase):
for app in (self.public_app, self.admin_app):
client = TestClient(app)
resp = client.get('/v3/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v3_VERSION_RESPONSE
self._paste_in_port(expected['version'], 'http://localhost/v3/')
@@ -824,7 +818,7 @@ class VersionTestCase(unit.TestCase):
# request to /v3 should pass
resp = client.get('/v3/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v3_VERSION_RESPONSE
self._paste_in_port(expected['version'],
@@ -857,7 +851,7 @@ class VersionTestCase(unit.TestCase):
# request to /v2.0 should pass
resp = client.get('/v2.0/')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
data = jsonutils.loads(resp.body)
expected = v2_VERSION_RESPONSE
self._paste_in_port(expected['version'],
@@ -897,7 +891,7 @@ class VersionTestCase(unit.TestCase):
# then the server responds with a JSON Home document.
exp_json_home_data = {
- 'resources': V3_JSON_HOME_RESOURCES_INHERIT_DISABLED}
+ 'resources': V3_JSON_HOME_RESOURCES}
self._test_json_home('/v3', exp_json_home_data)
@@ -906,7 +900,7 @@ class VersionTestCase(unit.TestCase):
# then the server responds with a JSON Home document.
exp_json_home_data = copy.deepcopy({
- 'resources': V3_JSON_HOME_RESOURCES_INHERIT_DISABLED})
+ 'resources': V3_JSON_HOME_RESOURCES})
json_home.translate_urls(exp_json_home_data, '/v3')
self._test_json_home('/', exp_json_home_data)
@@ -1022,45 +1016,6 @@ class VersionSingleAppTestCase(unit.TestCase):
self._test_version('admin')
-class VersionInheritEnabledTestCase(unit.TestCase):
- def setUp(self):
- super(VersionInheritEnabledTestCase, self).setUp()
- self.load_backends()
- self.public_app = self.loadapp('keystone', 'main')
- self.admin_app = self.loadapp('keystone', 'admin')
-
- self.config_fixture.config(
- public_endpoint='http://localhost:%(public_port)d',
- admin_endpoint='http://localhost:%(admin_port)d')
-
- def config_overrides(self):
- super(VersionInheritEnabledTestCase, self).config_overrides()
- admin_port = random.randint(10000, 30000)
- public_port = random.randint(40000, 60000)
- self.config_fixture.config(group='eventlet_server',
- public_port=public_port,
- admin_port=admin_port)
-
- self.config_fixture.config(group='os_inherit', enabled=True)
-
- def test_json_home_v3(self):
- # If the request is /v3 and the Accept header is application/json-home
- # then the server responds with a JSON Home document.
-
- client = TestClient(self.public_app)
- resp = client.get('/v3/', headers={'Accept': 'application/json-home'})
-
- self.assertThat(resp.status, tt_matchers.Equals('200 OK'))
- self.assertThat(resp.headers['Content-Type'],
- tt_matchers.Equals('application/json-home'))
-
- exp_json_home_data = {
- 'resources': V3_JSON_HOME_RESOURCES_INHERIT_ENABLED}
-
- self.assertThat(jsonutils.loads(resp.body),
- tt_matchers.Equals(exp_json_home_data))
-
-
class VersionBehindSslTestCase(unit.TestCase):
def setUp(self):
super(VersionBehindSslTestCase, self).setUp()
diff --git a/keystone-moon/keystone/tests/unit/test_wsgi.py b/keystone-moon/keystone/tests/unit/test_wsgi.py
index ed4c67d6..564d7406 100644
--- a/keystone-moon/keystone/tests/unit/test_wsgi.py
+++ b/keystone-moon/keystone/tests/unit/test_wsgi.py
@@ -85,7 +85,7 @@ class ApplicationTest(BaseWSGITest):
def test_response_content_type(self):
req = self._make_request()
resp = req.get_response(self.app)
- self.assertEqual(resp.content_type, 'application/json')
+ self.assertEqual('application/json', resp.content_type)
def test_query_string_available(self):
class FakeApp(wsgi.Application):
@@ -93,7 +93,7 @@ class ApplicationTest(BaseWSGITest):
return context['query_string']
req = self._make_request(url='/?1=2')
resp = req.get_response(FakeApp())
- self.assertEqual(jsonutils.loads(resp.body), {'1': '2'})
+ self.assertEqual({'1': '2'}, jsonutils.loads(resp.body))
def test_headers_available(self):
class FakeApp(wsgi.Application):
@@ -112,15 +112,16 @@ class ApplicationTest(BaseWSGITest):
resp = wsgi.render_response(body=data)
self.assertEqual('200 OK', resp.status)
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
self.assertEqual(body, resp.body)
self.assertEqual('X-Auth-Token', resp.headers.get('Vary'))
self.assertEqual(str(len(body)), resp.headers.get('Content-Length'))
def test_render_response_custom_status(self):
- resp = wsgi.render_response(status=(501, 'Not Implemented'))
+ resp = wsgi.render_response(
+ status=(http_client.NOT_IMPLEMENTED, 'Not Implemented'))
self.assertEqual('501 Not Implemented', resp.status)
- self.assertEqual(501, resp.status_int)
+ self.assertEqual(http_client.NOT_IMPLEMENTED, resp.status_int)
def test_successful_require_attribute(self):
app = FakeAttributeCheckerApp()
@@ -169,19 +170,31 @@ class ApplicationTest(BaseWSGITest):
self.assertEqual('Some-Value', resp.headers.get('Custom-Header'))
self.assertEqual('X-Auth-Token', resp.headers.get('Vary'))
+ def test_render_response_non_str_headers_converted(self):
+ resp = wsgi.render_response(
+ headers=[('Byte-Header', 'Byte-Value'),
+ (u'Unicode-Header', u'Unicode-Value')])
+ # assert that all headers are identified.
+ self.assertThat(resp.headers, matchers.HasLength(4))
+ self.assertEqual('Unicode-Value', resp.headers.get('Unicode-Header'))
+ # assert that unicode value is converted, the expected type is str
+ # on both python2 and python3.
+ self.assertEqual(str,
+ type(resp.headers.get('Unicode-Header')))
+
def test_render_response_no_body(self):
resp = wsgi.render_response()
self.assertEqual('204 No Content', resp.status)
- self.assertEqual(204, resp.status_int)
+ self.assertEqual(http_client.NO_CONTENT, resp.status_int)
self.assertEqual(b'', resp.body)
self.assertEqual('0', resp.headers.get('Content-Length'))
self.assertIsNone(resp.headers.get('Content-Type'))
def test_render_response_head_with_body(self):
resp = wsgi.render_response({'id': uuid.uuid4().hex}, method='HEAD')
- self.assertEqual(200, resp.status_int)
+ self.assertEqual(http_client.OK, resp.status_int)
self.assertEqual(b'', resp.body)
- self.assertNotEqual(resp.headers.get('Content-Length'), '0')
+ self.assertNotEqual('0', resp.headers.get('Content-Length'))
self.assertEqual('application/json', resp.headers.get('Content-Type'))
def test_application_local_config(self):
@@ -200,7 +213,9 @@ class ApplicationTest(BaseWSGITest):
def test_render_exception_host(self):
e = exception.Unauthorized(message=u'\u7f51\u7edc')
- context = {'host_url': 'http://%s:5000' % uuid.uuid4().hex}
+ req = self._make_request(url='/')
+ context = {'host_url': 'http://%s:5000' % uuid.uuid4().hex,
+ 'environment': req.environ}
resp = wsgi.render_exception(e, context=context)
self.assertEqual(http_client.UNAUTHORIZED, resp.status_int)
@@ -225,6 +240,77 @@ class ApplicationTest(BaseWSGITest):
self.assertEqual({'name': u'nonexit\xe8nt'},
jsonutils.loads(resp.body))
+ def test_base_url(self):
+ class FakeApp(wsgi.Application):
+ def index(self, context):
+ return self.base_url(context, 'public')
+ req = self._make_request(url='/')
+ # NOTE(gyee): according to wsgiref, if HTTP_HOST is present in the
+ # request environment, it will be used to construct the base url.
+ # SERVER_NAME and SERVER_PORT will be ignored. These are standard
+ # WSGI environment variables populated by the webserver.
+ req.environ.update({
+ 'SCRIPT_NAME': '/identity',
+ 'SERVER_NAME': '1.2.3.4',
+ 'wsgi.url_scheme': 'http',
+ 'SERVER_PORT': '80',
+ 'HTTP_HOST': '1.2.3.4',
+ })
+ resp = req.get_response(FakeApp())
+ self.assertEqual(b"http://1.2.3.4/identity", resp.body)
+
+ # if HTTP_HOST is absent, SERVER_NAME and SERVER_PORT will be used
+ req = self._make_request(url='/')
+ del req.environ['HTTP_HOST']
+ req.environ.update({
+ 'SCRIPT_NAME': '/identity',
+ 'SERVER_NAME': '1.1.1.1',
+ 'wsgi.url_scheme': 'http',
+ 'SERVER_PORT': '1234',
+ })
+ resp = req.get_response(FakeApp())
+ self.assertEqual(b"http://1.1.1.1:1234/identity", resp.body)
+
+ # make sure keystone normalize the standard HTTP port 80 by stripping
+ # it
+ req = self._make_request(url='/')
+ req.environ.update({'HTTP_HOST': 'foo:80',
+ 'SCRIPT_NAME': '/identity'})
+ resp = req.get_response(FakeApp())
+ self.assertEqual(b"http://foo/identity", resp.body)
+
+ # make sure keystone normalize the standard HTTPS port 443 by stripping
+ # it
+ req = self._make_request(url='/')
+ req.environ.update({'HTTP_HOST': 'foo:443',
+ 'SCRIPT_NAME': '/identity',
+ 'wsgi.url_scheme': 'https'})
+ resp = req.get_response(FakeApp())
+ self.assertEqual(b"https://foo/identity", resp.body)
+
+ # make sure non-standard port is preserved
+ req = self._make_request(url='/')
+ req.environ.update({'HTTP_HOST': 'foo:1234',
+ 'SCRIPT_NAME': '/identity'})
+ resp = req.get_response(FakeApp())
+ self.assertEqual(b"http://foo:1234/identity", resp.body)
+
+ # make sure version portion of the SCRIPT_NAME, '/v2.0', is stripped
+ # from base url
+ req = self._make_request(url='/')
+ req.environ.update({'HTTP_HOST': 'foo:80',
+ 'SCRIPT_NAME': '/bar/identity/v2.0'})
+ resp = req.get_response(FakeApp())
+ self.assertEqual(b"http://foo/bar/identity", resp.body)
+
+ # make sure version portion of the SCRIPT_NAME, '/v3' is stripped from
+ # base url
+ req = self._make_request(url='/')
+ req.environ.update({'HTTP_HOST': 'foo:80',
+ 'SCRIPT_NAME': '/identity/v3'})
+ resp = req.get_response(FakeApp())
+ self.assertEqual(b"http://foo/identity", resp.body)
+
class ExtensionRouterTest(BaseWSGITest):
def test_extensionrouter_local_config(self):
@@ -293,24 +379,15 @@ class MiddlewareTest(BaseWSGITest):
self.assertEqual(exception.UnexpectedError.code, resp.status_int)
return resp
- # Exception data should not be in the message when debug is False
- self.config_fixture.config(debug=False)
+ # Exception data should not be in the message when insecure_debug is
+ # False
+ self.config_fixture.config(debug=False, insecure_debug=False)
self.assertNotIn(exception_str, do_request().body)
- # Exception data should be in the message when debug is True
- self.config_fixture.config(debug=True)
+ # Exception data should be in the message when insecure_debug is True
+ self.config_fixture.config(debug=True, insecure_debug=True)
self.assertIn(exception_str, do_request().body)
- def test_middleware_local_config(self):
- class FakeMiddleware(wsgi.Middleware):
- def __init__(self, *args, **kwargs):
- self.kwargs = kwargs
-
- factory = FakeMiddleware.factory({}, testkey="test")
- app = factory(self.app)
- self.assertIn("testkey", app.kwargs)
- self.assertEqual("test", app.kwargs["testkey"])
-
class LocalizedResponseTest(unit.TestCase):
def test_request_match_default(self):
@@ -345,8 +422,8 @@ class LocalizedResponseTest(unit.TestCase):
def test_static_translated_string_is_lazy_translatable(self):
# Statically created message strings are an object that can get
# lazy-translated rather than a regular string.
- self.assertNotEqual(type(exception.Unauthorized.message_format),
- six.text_type)
+ self.assertNotEqual(six.text_type,
+ type(exception.Unauthorized.message_format))
@mock.patch.object(oslo_i18n, 'get_available_languages')
def test_get_localized_response(self, mock_gal):
@@ -457,12 +534,14 @@ class ServerTest(unit.TestCase):
server.start()
self.addCleanup(server.stop)
- self.assertEqual(2, mock_sock_dup.setsockopt.call_count)
-
- # Test the last set of call args i.e. for the keepidle
- mock_sock_dup.setsockopt.assert_called_with(socket.IPPROTO_TCP,
- socket.TCP_KEEPIDLE,
- 1)
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ self.assertEqual(2, mock_sock_dup.setsockopt.call_count)
+ # Test the last set of call args i.e. for the keepidle
+ mock_sock_dup.setsockopt.assert_called_with(socket.IPPROTO_TCP,
+ socket.TCP_KEEPIDLE,
+ 1)
+ else:
+ self.assertEqual(1, mock_sock_dup.setsockopt.call_count)
self.assertTrue(mock_listen.called)
diff --git a/keystone-moon/keystone/tests/unit/tests/test_core.py b/keystone-moon/keystone/tests/unit/tests/test_core.py
index 50f1309e..56e42bcc 100644
--- a/keystone-moon/keystone/tests/unit/tests/test_core.py
+++ b/keystone-moon/keystone/tests/unit/tests/test_core.py
@@ -39,7 +39,7 @@ class TestTestCase(unit.TestCase):
# If the arguments are invalid for the string in a log it raises an
# exception during testing.
self.assertThat(
- lambda: LOG.warn('String %(p1)s %(p2)s', {'p1': 'something'}),
+ lambda: LOG.warning('String %(p1)s %(p2)s', {'p1': 'something'}),
matchers.raises(KeyError))
def test_sa_warning(self):
diff --git a/keystone-moon/keystone/tests/unit/token/test_backends.py b/keystone-moon/keystone/tests/unit/token/test_backends.py
new file mode 100644
index 00000000..feb7e017
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/token/test_backends.py
@@ -0,0 +1,551 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import datetime
+import hashlib
+import uuid
+
+from keystoneclient.common import cms
+from oslo_config import cfg
+from oslo_utils import timeutils
+import six
+from six.moves import range
+
+from keystone import exception
+from keystone.tests import unit
+from keystone.tests.unit import utils as test_utils
+from keystone.token import provider
+
+
+CONF = cfg.CONF
+NULL_OBJECT = object()
+
+
+class TokenTests(object):
+ def _create_token_id(self):
+ # Use a token signed by the cms module
+ token_id = ""
+ for i in range(1, 20):
+ token_id += uuid.uuid4().hex
+ return cms.cms_sign_token(token_id,
+ CONF.signing.certfile,
+ CONF.signing.keyfile)
+
+ def _assert_revoked_token_list_matches_token_persistence(
+ self, revoked_token_id_list):
+ # Assert that the list passed in matches the list returned by the
+ # token persistence service
+ persistence_list = [
+ x['id']
+ for x in self.token_provider_api.list_revoked_tokens()
+ ]
+ self.assertEqual(persistence_list, revoked_token_id_list)
+
+ def test_token_crud(self):
+ token_id = self._create_token_id()
+ data = {'id': token_id, 'a': 'b',
+ 'trust_id': None,
+ 'user': {'id': 'testuserid'},
+ 'token_data': {'access': {'token': {
+ 'audit_ids': [uuid.uuid4().hex]}}}}
+ data_ref = self.token_provider_api._persistence.create_token(token_id,
+ data)
+ expires = data_ref.pop('expires')
+ data_ref.pop('user_id')
+ self.assertIsInstance(expires, datetime.datetime)
+ data_ref.pop('id')
+ data.pop('id')
+ self.assertDictEqual(data, data_ref)
+
+ new_data_ref = self.token_provider_api._persistence.get_token(token_id)
+ expires = new_data_ref.pop('expires')
+ self.assertIsInstance(expires, datetime.datetime)
+ new_data_ref.pop('user_id')
+ new_data_ref.pop('id')
+
+ self.assertEqual(data, new_data_ref)
+
+ self.token_provider_api._persistence.delete_token(token_id)
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token, token_id)
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api._persistence.delete_token, token_id)
+
+ def create_token_sample_data(self, token_id=None, tenant_id=None,
+ trust_id=None, user_id=None, expires=None):
+ if token_id is None:
+ token_id = self._create_token_id()
+ if user_id is None:
+ user_id = 'testuserid'
+ # FIXME(morganfainberg): These tokens look nothing like "Real" tokens.
+ # This should be fixed when token issuance is cleaned up.
+ data = {'id': token_id, 'a': 'b',
+ 'user': {'id': user_id},
+ 'access': {'token': {'audit_ids': [uuid.uuid4().hex]}}}
+ if tenant_id is not None:
+ data['tenant'] = {'id': tenant_id, 'name': tenant_id}
+ if tenant_id is NULL_OBJECT:
+ data['tenant'] = None
+ if expires is not None:
+ data['expires'] = expires
+ if trust_id is not None:
+ data['trust_id'] = trust_id
+ data['access'].setdefault('trust', {})
+ # Testuserid2 is used here since a trustee will be different in
+ # the cases of impersonation and therefore should not match the
+ # token's user_id.
+ data['access']['trust']['trustee_user_id'] = 'testuserid2'
+ data['token_version'] = provider.V2
+ # Issue token stores a copy of all token data at token['token_data'].
+ # This emulates that assumption as part of the test.
+ data['token_data'] = copy.deepcopy(data)
+ new_token = self.token_provider_api._persistence.create_token(token_id,
+ data)
+ return new_token['id'], data
+
+ def test_delete_tokens(self):
+ tokens = self.token_provider_api._persistence._list_tokens(
+ 'testuserid')
+ self.assertEqual(0, len(tokens))
+ token_id1, data = self.create_token_sample_data(
+ tenant_id='testtenantid')
+ token_id2, data = self.create_token_sample_data(
+ tenant_id='testtenantid')
+ token_id3, data = self.create_token_sample_data(
+ tenant_id='testtenantid',
+ user_id='testuserid1')
+ tokens = self.token_provider_api._persistence._list_tokens(
+ 'testuserid')
+ self.assertEqual(2, len(tokens))
+ self.assertIn(token_id2, tokens)
+ self.assertIn(token_id1, tokens)
+ self.token_provider_api._persistence.delete_tokens(
+ user_id='testuserid',
+ tenant_id='testtenantid')
+ tokens = self.token_provider_api._persistence._list_tokens(
+ 'testuserid')
+ self.assertEqual(0, len(tokens))
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token,
+ token_id1)
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token,
+ token_id2)
+
+ self.token_provider_api._persistence.get_token(token_id3)
+
+ def test_delete_tokens_trust(self):
+ tokens = self.token_provider_api._persistence._list_tokens(
+ user_id='testuserid')
+ self.assertEqual(0, len(tokens))
+ token_id1, data = self.create_token_sample_data(
+ tenant_id='testtenantid',
+ trust_id='testtrustid')
+ token_id2, data = self.create_token_sample_data(
+ tenant_id='testtenantid',
+ user_id='testuserid1',
+ trust_id='testtrustid1')
+ tokens = self.token_provider_api._persistence._list_tokens(
+ 'testuserid')
+ self.assertEqual(1, len(tokens))
+ self.assertIn(token_id1, tokens)
+ self.token_provider_api._persistence.delete_tokens(
+ user_id='testuserid',
+ tenant_id='testtenantid',
+ trust_id='testtrustid')
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token,
+ token_id1)
+ self.token_provider_api._persistence.get_token(token_id2)
+
+ def _test_token_list(self, token_list_fn):
+ tokens = token_list_fn('testuserid')
+ self.assertEqual(0, len(tokens))
+ token_id1, data = self.create_token_sample_data()
+ tokens = token_list_fn('testuserid')
+ self.assertEqual(1, len(tokens))
+ self.assertIn(token_id1, tokens)
+ token_id2, data = self.create_token_sample_data()
+ tokens = token_list_fn('testuserid')
+ self.assertEqual(2, len(tokens))
+ self.assertIn(token_id2, tokens)
+ self.assertIn(token_id1, tokens)
+ self.token_provider_api._persistence.delete_token(token_id1)
+ tokens = token_list_fn('testuserid')
+ self.assertIn(token_id2, tokens)
+ self.assertNotIn(token_id1, tokens)
+ self.token_provider_api._persistence.delete_token(token_id2)
+ tokens = token_list_fn('testuserid')
+ self.assertNotIn(token_id2, tokens)
+ self.assertNotIn(token_id1, tokens)
+
+ # tenant-specific tokens
+ tenant1 = uuid.uuid4().hex
+ tenant2 = uuid.uuid4().hex
+ token_id3, data = self.create_token_sample_data(tenant_id=tenant1)
+ token_id4, data = self.create_token_sample_data(tenant_id=tenant2)
+ # test for existing but empty tenant (LP:1078497)
+ token_id5, data = self.create_token_sample_data(tenant_id=NULL_OBJECT)
+ tokens = token_list_fn('testuserid')
+ self.assertEqual(3, len(tokens))
+ self.assertNotIn(token_id1, tokens)
+ self.assertNotIn(token_id2, tokens)
+ self.assertIn(token_id3, tokens)
+ self.assertIn(token_id4, tokens)
+ self.assertIn(token_id5, tokens)
+ tokens = token_list_fn('testuserid', tenant2)
+ self.assertEqual(1, len(tokens))
+ self.assertNotIn(token_id1, tokens)
+ self.assertNotIn(token_id2, tokens)
+ self.assertNotIn(token_id3, tokens)
+ self.assertIn(token_id4, tokens)
+
+ def test_token_list(self):
+ self._test_token_list(
+ self.token_provider_api._persistence._list_tokens)
+
+ def test_token_list_trust(self):
+ trust_id = uuid.uuid4().hex
+ token_id5, data = self.create_token_sample_data(trust_id=trust_id)
+ tokens = self.token_provider_api._persistence._list_tokens(
+ 'testuserid', trust_id=trust_id)
+ self.assertEqual(1, len(tokens))
+ self.assertIn(token_id5, tokens)
+
+ def test_get_token_returns_not_found(self):
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token,
+ uuid.uuid4().hex)
+
+ def test_delete_token_returns_not_found(self):
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api._persistence.delete_token,
+ uuid.uuid4().hex)
+
+ def test_expired_token(self):
+ token_id = uuid.uuid4().hex
+ expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1)
+ data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+ 'expires': expire_time,
+ 'trust_id': None,
+ 'user': {'id': 'testuserid'}}
+ data_ref = self.token_provider_api._persistence.create_token(token_id,
+ data)
+ data_ref.pop('user_id')
+ self.assertDictEqual(data, data_ref)
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token,
+ token_id)
+
+ def test_null_expires_token(self):
+ token_id = uuid.uuid4().hex
+ data = {'id': token_id, 'id_hash': token_id, 'a': 'b', 'expires': None,
+ 'user': {'id': 'testuserid'}}
+ data_ref = self.token_provider_api._persistence.create_token(token_id,
+ data)
+ self.assertIsNotNone(data_ref['expires'])
+ new_data_ref = self.token_provider_api._persistence.get_token(token_id)
+
+ # MySQL doesn't store microseconds, so discard them before testing
+ data_ref['expires'] = data_ref['expires'].replace(microsecond=0)
+ new_data_ref['expires'] = new_data_ref['expires'].replace(
+ microsecond=0)
+
+ self.assertEqual(data_ref, new_data_ref)
+
+ def check_list_revoked_tokens(self, token_infos):
+ revocation_list = self.token_provider_api.list_revoked_tokens()
+ revoked_ids = [x['id'] for x in revocation_list]
+ revoked_audit_ids = [x['audit_id'] for x in revocation_list]
+ self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+ for token_id, audit_id in token_infos:
+ self.assertIn(token_id, revoked_ids)
+ self.assertIn(audit_id, revoked_audit_ids)
+
+ def delete_token(self):
+ token_id = uuid.uuid4().hex
+ audit_id = uuid.uuid4().hex
+ data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+ 'user': {'id': 'testuserid'},
+ 'token_data': {'token': {'audit_ids': [audit_id]}}}
+ data_ref = self.token_provider_api._persistence.create_token(token_id,
+ data)
+ self.token_provider_api._persistence.delete_token(token_id)
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token,
+ data_ref['id'])
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api._persistence.delete_token,
+ data_ref['id'])
+ return (token_id, audit_id)
+
+ def test_list_revoked_tokens_returns_empty_list(self):
+ revoked_ids = [x['id']
+ for x in self.token_provider_api.list_revoked_tokens()]
+ self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+ self.assertEqual([], revoked_ids)
+
+ def test_list_revoked_tokens_for_single_token(self):
+ self.check_list_revoked_tokens([self.delete_token()])
+
+ def test_list_revoked_tokens_for_multiple_tokens(self):
+ self.check_list_revoked_tokens([self.delete_token()
+ for x in range(2)])
+
+ def test_flush_expired_token(self):
+ token_id = uuid.uuid4().hex
+ expire_time = timeutils.utcnow() - datetime.timedelta(minutes=1)
+ data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+ 'expires': expire_time,
+ 'trust_id': None,
+ 'user': {'id': 'testuserid'}}
+ data_ref = self.token_provider_api._persistence.create_token(token_id,
+ data)
+ data_ref.pop('user_id')
+ self.assertDictEqual(data, data_ref)
+
+ token_id = uuid.uuid4().hex
+ expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1)
+ data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+ 'expires': expire_time,
+ 'trust_id': None,
+ 'user': {'id': 'testuserid'}}
+ data_ref = self.token_provider_api._persistence.create_token(token_id,
+ data)
+ data_ref.pop('user_id')
+ self.assertDictEqual(data, data_ref)
+
+ self.token_provider_api._persistence.flush_expired_tokens()
+ tokens = self.token_provider_api._persistence._list_tokens(
+ 'testuserid')
+ self.assertEqual(1, len(tokens))
+ self.assertIn(token_id, tokens)
+
+ @unit.skip_if_cache_disabled('token')
+ def test_revocation_list_cache(self):
+ expire_time = timeutils.utcnow() + datetime.timedelta(minutes=10)
+ token_id = uuid.uuid4().hex
+ token_data = {'id_hash': token_id, 'id': token_id, 'a': 'b',
+ 'expires': expire_time,
+ 'trust_id': None,
+ 'user': {'id': 'testuserid'},
+ 'token_data': {'token': {
+ 'audit_ids': [uuid.uuid4().hex]}}}
+ token2_id = uuid.uuid4().hex
+ token2_data = {'id_hash': token2_id, 'id': token2_id, 'a': 'b',
+ 'expires': expire_time,
+ 'trust_id': None,
+ 'user': {'id': 'testuserid'},
+ 'token_data': {'token': {
+ 'audit_ids': [uuid.uuid4().hex]}}}
+ # Create 2 Tokens.
+ self.token_provider_api._persistence.create_token(token_id,
+ token_data)
+ self.token_provider_api._persistence.create_token(token2_id,
+ token2_data)
+ # Verify the revocation list is empty.
+ self.assertEqual(
+ [], self.token_provider_api._persistence.list_revoked_tokens())
+ self.assertEqual([], self.token_provider_api.list_revoked_tokens())
+ # Delete a token directly, bypassing the manager.
+ self.token_provider_api._persistence.driver.delete_token(token_id)
+ # Verify the revocation list is still empty.
+ self.assertEqual(
+ [], self.token_provider_api._persistence.list_revoked_tokens())
+ self.assertEqual([], self.token_provider_api.list_revoked_tokens())
+ # Invalidate the revocation list.
+ self.token_provider_api._persistence.invalidate_revocation_list()
+ # Verify the deleted token is in the revocation list.
+ revoked_ids = [x['id']
+ for x in self.token_provider_api.list_revoked_tokens()]
+ self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+ self.assertIn(token_id, revoked_ids)
+ # Delete the second token, through the manager
+ self.token_provider_api._persistence.delete_token(token2_id)
+ revoked_ids = [x['id']
+ for x in self.token_provider_api.list_revoked_tokens()]
+ self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+ # Verify both tokens are in the revocation list.
+ self.assertIn(token_id, revoked_ids)
+ self.assertIn(token2_id, revoked_ids)
+
+ def _test_predictable_revoked_pki_token_id(self, hash_fn):
+ token_id = self._create_token_id()
+ token_id_hash = hash_fn(token_id.encode('utf-8')).hexdigest()
+ token = {'user': {'id': uuid.uuid4().hex},
+ 'token_data': {'token': {'audit_ids': [uuid.uuid4().hex]}}}
+
+ self.token_provider_api._persistence.create_token(token_id, token)
+ self.token_provider_api._persistence.delete_token(token_id)
+
+ revoked_ids = [x['id']
+ for x in self.token_provider_api.list_revoked_tokens()]
+ self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+ self.assertIn(token_id_hash, revoked_ids)
+ self.assertNotIn(token_id, revoked_ids)
+ for t in self.token_provider_api._persistence.list_revoked_tokens():
+ self.assertIn('expires', t)
+
+ def test_predictable_revoked_pki_token_id_default(self):
+ self._test_predictable_revoked_pki_token_id(hashlib.md5)
+
+ def test_predictable_revoked_pki_token_id_sha256(self):
+ self.config_fixture.config(group='token', hash_algorithm='sha256')
+ self._test_predictable_revoked_pki_token_id(hashlib.sha256)
+
+ def test_predictable_revoked_uuid_token_id(self):
+ token_id = uuid.uuid4().hex
+ token = {'user': {'id': uuid.uuid4().hex},
+ 'token_data': {'token': {'audit_ids': [uuid.uuid4().hex]}}}
+
+ self.token_provider_api._persistence.create_token(token_id, token)
+ self.token_provider_api._persistence.delete_token(token_id)
+
+ revoked_tokens = self.token_provider_api.list_revoked_tokens()
+ revoked_ids = [x['id'] for x in revoked_tokens]
+ self._assert_revoked_token_list_matches_token_persistence(revoked_ids)
+ self.assertIn(token_id, revoked_ids)
+ for t in revoked_tokens:
+ self.assertIn('expires', t)
+
+ def test_create_unicode_token_id(self):
+ token_id = six.text_type(self._create_token_id())
+ self.create_token_sample_data(token_id=token_id)
+ self.token_provider_api._persistence.get_token(token_id)
+
+ def test_create_unicode_user_id(self):
+ user_id = six.text_type(uuid.uuid4().hex)
+ token_id, data = self.create_token_sample_data(user_id=user_id)
+ self.token_provider_api._persistence.get_token(token_id)
+
+ def test_token_expire_timezone(self):
+
+ @test_utils.timezone
+ def _create_token(expire_time):
+ token_id = uuid.uuid4().hex
+ user_id = six.text_type(uuid.uuid4().hex)
+ return self.create_token_sample_data(token_id=token_id,
+ user_id=user_id,
+ expires=expire_time)
+
+ for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']:
+ test_utils.TZ = 'UTC' + d
+ expire_time = timeutils.utcnow() + datetime.timedelta(minutes=1)
+ token_id, data_in = _create_token(expire_time)
+ data_get = self.token_provider_api._persistence.get_token(token_id)
+
+ self.assertEqual(data_in['id'], data_get['id'],
+ 'TZ=%s' % test_utils.TZ)
+
+ expire_time_expired = (
+ timeutils.utcnow() + datetime.timedelta(minutes=-1))
+ token_id, data_in = _create_token(expire_time_expired)
+ self.assertRaises(exception.TokenNotFound,
+ self.token_provider_api._persistence.get_token,
+ data_in['id'])
+
+
+class TokenCacheInvalidation(object):
+ def _create_test_data(self):
+ self.user = unit.new_user_ref(
+ domain_id=CONF.identity.default_domain_id)
+ self.tenant = unit.new_project_ref(
+ domain_id=CONF.identity.default_domain_id)
+
+ # Create an equivalent of a scoped token
+ token_dict = {'user': self.user, 'tenant': self.tenant,
+ 'metadata': {}, 'id': 'placeholder'}
+ token_id, data = self.token_provider_api.issue_v2_token(token_dict)
+ self.scoped_token_id = token_id
+
+ # ..and an un-scoped one
+ token_dict = {'user': self.user, 'tenant': None,
+ 'metadata': {}, 'id': 'placeholder'}
+ token_id, data = self.token_provider_api.issue_v2_token(token_dict)
+ self.unscoped_token_id = token_id
+
+ # Validate them, in the various ways possible - this will load the
+ # responses into the token cache.
+ self._check_scoped_tokens_are_valid()
+ self._check_unscoped_tokens_are_valid()
+
+ def _check_unscoped_tokens_are_invalid(self):
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ self.unscoped_token_id)
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api.validate_v2_token,
+ self.unscoped_token_id)
+
+ def _check_scoped_tokens_are_invalid(self):
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ self.scoped_token_id)
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api.validate_token,
+ self.scoped_token_id,
+ self.tenant['id'])
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api.validate_v2_token,
+ self.scoped_token_id)
+ self.assertRaises(
+ exception.TokenNotFound,
+ self.token_provider_api.validate_v2_token,
+ self.scoped_token_id,
+ self.tenant['id'])
+
+ def _check_scoped_tokens_are_valid(self):
+ self.token_provider_api.validate_token(self.scoped_token_id)
+ self.token_provider_api.validate_token(
+ self.scoped_token_id, belongs_to=self.tenant['id'])
+ self.token_provider_api.validate_v2_token(self.scoped_token_id)
+ self.token_provider_api.validate_v2_token(
+ self.scoped_token_id, belongs_to=self.tenant['id'])
+
+ def _check_unscoped_tokens_are_valid(self):
+ self.token_provider_api.validate_token(self.unscoped_token_id)
+ self.token_provider_api.validate_v2_token(self.unscoped_token_id)
+
+ def test_delete_unscoped_token(self):
+ self.token_provider_api._persistence.delete_token(
+ self.unscoped_token_id)
+ self._check_unscoped_tokens_are_invalid()
+ self._check_scoped_tokens_are_valid()
+
+ def test_delete_scoped_token_by_id(self):
+ self.token_provider_api._persistence.delete_token(self.scoped_token_id)
+ self._check_scoped_tokens_are_invalid()
+ self._check_unscoped_tokens_are_valid()
+
+ def test_delete_scoped_token_by_user(self):
+ self.token_provider_api._persistence.delete_tokens(self.user['id'])
+ # Since we are deleting all tokens for this user, they should all
+ # now be invalid.
+ self._check_scoped_tokens_are_invalid()
+ self._check_unscoped_tokens_are_invalid()
+
+ def test_delete_scoped_token_by_user_and_tenant(self):
+ self.token_provider_api._persistence.delete_tokens(
+ self.user['id'],
+ tenant_id=self.tenant['id'])
+ self._check_scoped_tokens_are_invalid()
+ self._check_unscoped_tokens_are_valid()
diff --git a/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py b/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py
index bfb590db..5f51d7b3 100644
--- a/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py
+++ b/keystone-moon/keystone/tests/unit/token/test_fernet_provider.py
@@ -22,8 +22,8 @@ from six.moves import urllib
from keystone.common import config
from keystone.common import utils
-from keystone.contrib.federation import constants as federation_constants
from keystone import exception
+from keystone.federation import constants as federation_constants
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import database
@@ -48,17 +48,25 @@ class TestFernetTokenProvider(unit.TestCase):
def test_needs_persistence_returns_false(self):
self.assertFalse(self.provider.needs_persistence())
- def test_invalid_v3_token_raises_404(self):
- self.assertRaises(
+ def test_invalid_v3_token_raises_token_not_found(self):
+ # NOTE(lbragstad): Here we use the validate_non_persistent_token()
+ # methods because the validate_v3_token() method is strictly for
+ # validating UUID formatted tokens. It is written to assume cached
+ # tokens from a backend, where validate_non_persistent_token() is not.
+ token_id = uuid.uuid4().hex
+ e = self.assertRaises(
exception.TokenNotFound,
- self.provider.validate_v3_token,
- uuid.uuid4().hex)
+ self.provider.validate_non_persistent_token,
+ token_id)
+ self.assertIn(token_id, u'%s' % e)
- def test_invalid_v2_token_raises_404(self):
- self.assertRaises(
+ def test_invalid_v2_token_raises_token_not_found(self):
+ token_id = uuid.uuid4().hex
+ e = self.assertRaises(
exception.TokenNotFound,
- self.provider.validate_v2_token,
- uuid.uuid4().hex)
+ self.provider.validate_non_persistent_token,
+ token_id)
+ self.assertIn(token_id, u'%s' % e)
class TestValidate(unit.TestCase):
@@ -91,7 +99,6 @@ class TestValidate(unit.TestCase):
token = token_data['token']
self.assertIsInstance(token['audit_ids'], list)
self.assertIsInstance(token['expires_at'], str)
- self.assertEqual({}, token['extras'])
self.assertIsInstance(token['issued_at'], str)
self.assertEqual(method_names, token['methods'])
exp_user_info = {
@@ -200,7 +207,7 @@ class TestValidate(unit.TestCase):
def test_validate_v3_token_validation_error_exc(self):
# When the token format isn't recognized, TokenNotFound is raised.
- # A uuid string isn't a valid fernet token.
+ # A uuid string isn't a valid Fernet token.
token_id = uuid.uuid4().hex
self.assertRaises(exception.TokenNotFound,
self.token_provider_api.validate_v3_token, token_id)
@@ -214,10 +221,14 @@ class TestTokenFormatter(unit.TestCase):
def test_restore_padding(self):
# 'a' will result in '==' padding, 'aa' will result in '=' padding, and
# 'aaa' will result in no padding.
- strings_to_test = ['a', 'aa', 'aaa']
-
- for string in strings_to_test:
- encoded_string = base64.urlsafe_b64encode(string)
+ binary_to_test = [b'a', b'aa', b'aaa']
+
+ for binary in binary_to_test:
+ # base64.urlsafe_b64encode takes six.binary_type and returns
+ # six.binary_type.
+ encoded_string = base64.urlsafe_b64encode(binary)
+ encoded_string = encoded_string.decode('utf-8')
+ # encoded_string is now six.text_type.
encoded_str_without_padding = encoded_string.rstrip('=')
self.assertFalse(encoded_str_without_padding.endswith('='))
encoded_str_with_padding_restored = (
@@ -231,36 +242,57 @@ class TestTokenFormatter(unit.TestCase):
second_value = uuid.uuid4().hex
payload = (first_value, second_value)
msgpack_payload = msgpack.packb(payload)
+ # msgpack_payload is six.binary_type.
+
+ tf = token_formatters.TokenFormatter()
- # NOTE(lbragstad): This method perserves the way that keystone used to
+ # NOTE(lbragstad): This method preserves the way that keystone used to
# percent encode the tokens, prior to bug #1491926.
def legacy_pack(payload):
- tf = token_formatters.TokenFormatter()
+ # payload is six.binary_type.
encrypted_payload = tf.crypto.encrypt(payload)
+ # encrypted_payload is six.binary_type.
# the encrypted_payload is returned with padding appended
- self.assertTrue(encrypted_payload.endswith('='))
+ self.assertTrue(encrypted_payload.endswith(b'='))
# using urllib.parse.quote will percent encode the padding, like
# keystone did in Kilo.
percent_encoded_payload = urllib.parse.quote(encrypted_payload)
+ # percent_encoded_payload is six.text_type.
- # ensure that the padding was actaully percent encoded
+ # ensure that the padding was actually percent encoded
self.assertTrue(percent_encoded_payload.endswith('%3D'))
return percent_encoded_payload
token_with_legacy_padding = legacy_pack(msgpack_payload)
- tf = token_formatters.TokenFormatter()
+ # token_with_legacy_padding is six.text_type.
# demonstrate the we can validate a payload that has been percent
# encoded with the Fernet logic that existed in Kilo
serialized_payload = tf.unpack(token_with_legacy_padding)
+ # serialized_payload is six.binary_type.
returned_payload = msgpack.unpackb(serialized_payload)
- self.assertEqual(first_value, returned_payload[0])
- self.assertEqual(second_value, returned_payload[1])
+ # returned_payload contains six.binary_type.
+ self.assertEqual(first_value, returned_payload[0].decode('utf-8'))
+ self.assertEqual(second_value, returned_payload[1].decode('utf-8'))
class TestPayloads(unit.TestCase):
+ def assertTimestampsEqual(self, expected, actual):
+ # The timestamp that we get back when parsing the payload may not
+ # exactly match the timestamp that was put in the payload due to
+ # conversion to and from a float.
+
+ exp_time = timeutils.parse_isotime(expected)
+ actual_time = timeutils.parse_isotime(actual)
+
+ # the granularity of timestamp string is microseconds and it's only the
+ # last digit in the representation that's different, so use a delta
+ # just above nanoseconds.
+ return self.assertCloseEnoughForGovernmentWork(exp_time, actual_time,
+ delta=1e-05)
+
def test_uuid_hex_to_byte_conversions(self):
payload_cls = token_formatters.BasePayload
@@ -274,249 +306,137 @@ class TestPayloads(unit.TestCase):
expected_uuid_in_bytes)
self.assertEqual(expected_hex_uuid, actual_hex_uuid)
- def test_time_string_to_int_conversions(self):
+ def test_time_string_to_float_conversions(self):
payload_cls = token_formatters.BasePayload
- expected_time_str = utils.isotime(subsecond=True)
- time_obj = timeutils.parse_isotime(expected_time_str)
- expected_time_int = (
+ original_time_str = utils.isotime(subsecond=True)
+ time_obj = timeutils.parse_isotime(original_time_str)
+ expected_time_float = (
(timeutils.normalize_time(time_obj) -
datetime.datetime.utcfromtimestamp(0)).total_seconds())
- actual_time_int = payload_cls._convert_time_string_to_int(
- expected_time_str)
- self.assertEqual(expected_time_int, actual_time_int)
-
- actual_time_str = payload_cls._convert_int_to_time_string(
- actual_time_int)
+ # NOTE(lbragstad): The token expiration time for Fernet tokens is
+ # passed in the payload of the token. This is different from the token
+ # creation time, which is handled by Fernet and doesn't support
+ # subsecond precision because it is a timestamp integer.
+ self.assertIsInstance(expected_time_float, float)
+
+ actual_time_float = payload_cls._convert_time_string_to_float(
+ original_time_str)
+ self.assertIsInstance(actual_time_float, float)
+ self.assertEqual(expected_time_float, actual_time_float)
+
+ # Generate expected_time_str using the same time float. Using
+ # original_time_str from utils.isotime will occasionally fail due to
+ # floating point rounding differences.
+ time_object = datetime.datetime.utcfromtimestamp(actual_time_float)
+ expected_time_str = utils.isotime(time_object, subsecond=True)
+
+ actual_time_str = payload_cls._convert_float_to_time_string(
+ actual_time_float)
self.assertEqual(expected_time_str, actual_time_str)
- def test_unscoped_payload(self):
- exp_user_id = uuid.uuid4().hex
- exp_methods = ['password']
+ def _test_payload(self, payload_class, exp_user_id=None, exp_methods=None,
+ exp_project_id=None, exp_domain_id=None,
+ exp_trust_id=None, exp_federated_info=None,
+ exp_access_token_id=None):
+ exp_user_id = exp_user_id or uuid.uuid4().hex
+ exp_methods = exp_methods or ['password']
exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
exp_audit_ids = [provider.random_urlsafe_str()]
- payload = token_formatters.UnscopedPayload.assemble(
- exp_user_id, exp_methods, exp_expires_at, exp_audit_ids)
+ payload = payload_class.assemble(
+ exp_user_id, exp_methods, exp_project_id, exp_domain_id,
+ exp_expires_at, exp_audit_ids, exp_trust_id, exp_federated_info,
+ exp_access_token_id)
- (user_id, methods, expires_at, audit_ids) = (
- token_formatters.UnscopedPayload.disassemble(payload))
+ (user_id, methods, project_id,
+ domain_id, expires_at, audit_ids,
+ trust_id, federated_info,
+ access_token_id) = payload_class.disassemble(payload)
self.assertEqual(exp_user_id, user_id)
self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_expires_at, expires_at)
+ self.assertTimestampsEqual(exp_expires_at, expires_at)
self.assertEqual(exp_audit_ids, audit_ids)
-
- def test_project_scoped_payload(self):
- exp_user_id = uuid.uuid4().hex
- exp_methods = ['password']
- exp_project_id = uuid.uuid4().hex
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
-
- payload = token_formatters.ProjectScopedPayload.assemble(
- exp_user_id, exp_methods, exp_project_id, exp_expires_at,
- exp_audit_ids)
-
- (user_id, methods, project_id, expires_at, audit_ids) = (
- token_formatters.ProjectScopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
self.assertEqual(exp_project_id, project_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
+ self.assertEqual(exp_domain_id, domain_id)
+ self.assertEqual(exp_trust_id, trust_id)
+ self.assertEqual(exp_access_token_id, access_token_id)
- def test_domain_scoped_payload(self):
- exp_user_id = uuid.uuid4().hex
- exp_methods = ['password']
- exp_domain_id = uuid.uuid4().hex
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
+ if exp_federated_info:
+ self.assertDictEqual(exp_federated_info, federated_info)
+ else:
+ self.assertIsNone(federated_info)
- payload = token_formatters.DomainScopedPayload.assemble(
- exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
- exp_audit_ids)
+ def test_unscoped_payload(self):
+ self._test_payload(token_formatters.UnscopedPayload)
- (user_id, methods, domain_id, expires_at, audit_ids) = (
- token_formatters.DomainScopedPayload.disassemble(payload))
+ def test_project_scoped_payload(self):
+ self._test_payload(token_formatters.ProjectScopedPayload,
+ exp_project_id=uuid.uuid4().hex)
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_domain_id, domain_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
+ def test_domain_scoped_payload(self):
+ self._test_payload(token_formatters.DomainScopedPayload,
+ exp_domain_id=uuid.uuid4().hex)
def test_domain_scoped_payload_with_default_domain(self):
- exp_user_id = uuid.uuid4().hex
- exp_methods = ['password']
- exp_domain_id = CONF.identity.default_domain_id
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
-
- payload = token_formatters.DomainScopedPayload.assemble(
- exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
- exp_audit_ids)
-
- (user_id, methods, domain_id, expires_at, audit_ids) = (
- token_formatters.DomainScopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_domain_id, domain_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
+ self._test_payload(token_formatters.DomainScopedPayload,
+ exp_domain_id=CONF.identity.default_domain_id)
def test_trust_scoped_payload(self):
- exp_user_id = uuid.uuid4().hex
- exp_methods = ['password']
- exp_project_id = uuid.uuid4().hex
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
- exp_trust_id = uuid.uuid4().hex
-
- payload = token_formatters.TrustScopedPayload.assemble(
- exp_user_id, exp_methods, exp_project_id, exp_expires_at,
- exp_audit_ids, exp_trust_id)
-
- (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
- token_formatters.TrustScopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_project_id, project_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
- self.assertEqual(exp_trust_id, trust_id)
-
- def _test_unscoped_payload_with_user_id(self, exp_user_id):
- exp_methods = ['password']
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
-
- payload = token_formatters.UnscopedPayload.assemble(
- exp_user_id, exp_methods, exp_expires_at, exp_audit_ids)
-
- (user_id, methods, expires_at, audit_ids) = (
- token_formatters.UnscopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
+ self._test_payload(token_formatters.TrustScopedPayload,
+ exp_project_id=uuid.uuid4().hex,
+ exp_trust_id=uuid.uuid4().hex)
def test_unscoped_payload_with_non_uuid_user_id(self):
- self._test_unscoped_payload_with_user_id('someNonUuidUserId')
+ self._test_payload(token_formatters.UnscopedPayload,
+ exp_user_id='someNonUuidUserId')
def test_unscoped_payload_with_16_char_non_uuid_user_id(self):
- self._test_unscoped_payload_with_user_id('0123456789abcdef')
-
- def _test_project_scoped_payload_with_ids(self, exp_user_id,
- exp_project_id):
- exp_methods = ['password']
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
+ self._test_payload(token_formatters.UnscopedPayload,
+ exp_user_id='0123456789abcdef')
- payload = token_formatters.ProjectScopedPayload.assemble(
- exp_user_id, exp_methods, exp_project_id, exp_expires_at,
- exp_audit_ids)
+ def test_project_scoped_payload_with_non_uuid_ids(self):
+ self._test_payload(token_formatters.ProjectScopedPayload,
+ exp_user_id='someNonUuidUserId',
+ exp_project_id='someNonUuidProjectId')
- (user_id, methods, project_id, expires_at, audit_ids) = (
- token_formatters.ProjectScopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_project_id, project_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
-
- def test_project_scoped_payload_with_non_uuid_user_id(self):
- self._test_project_scoped_payload_with_ids('someNonUuidUserId',
- 'someNonUuidProjectId')
-
- def test_project_scoped_payload_with_16_char_non_uuid_user_id(self):
- self._test_project_scoped_payload_with_ids('0123456789abcdef',
- '0123456789abcdef')
-
- def _test_domain_scoped_payload_with_user_id(self, exp_user_id):
- exp_methods = ['password']
- exp_domain_id = uuid.uuid4().hex
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
-
- payload = token_formatters.DomainScopedPayload.assemble(
- exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
- exp_audit_ids)
-
- (user_id, methods, domain_id, expires_at, audit_ids) = (
- token_formatters.DomainScopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_domain_id, domain_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
+ def test_project_scoped_payload_with_16_char_non_uuid_ids(self):
+ self._test_payload(token_formatters.ProjectScopedPayload,
+ exp_user_id='0123456789abcdef',
+ exp_project_id='0123456789abcdef')
def test_domain_scoped_payload_with_non_uuid_user_id(self):
- self._test_domain_scoped_payload_with_user_id('nonUuidUserId')
+ self._test_payload(token_formatters.DomainScopedPayload,
+ exp_user_id='nonUuidUserId',
+ exp_domain_id=uuid.uuid4().hex)
def test_domain_scoped_payload_with_16_char_non_uuid_user_id(self):
- self._test_domain_scoped_payload_with_user_id('0123456789abcdef')
-
- def _test_trust_scoped_payload_with_ids(self, exp_user_id, exp_project_id):
- exp_methods = ['password']
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
- exp_trust_id = uuid.uuid4().hex
-
- payload = token_formatters.TrustScopedPayload.assemble(
- exp_user_id, exp_methods, exp_project_id, exp_expires_at,
- exp_audit_ids, exp_trust_id)
-
- (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
- token_formatters.TrustScopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_project_id, project_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
- self.assertEqual(exp_trust_id, trust_id)
-
- def test_trust_scoped_payload_with_non_uuid_user_id(self):
- self._test_trust_scoped_payload_with_ids('someNonUuidUserId',
- 'someNonUuidProjectId')
-
- def test_trust_scoped_payload_with_16_char_non_uuid_user_id(self):
- self._test_trust_scoped_payload_with_ids('0123456789abcdef',
- '0123456789abcdef')
+ self._test_payload(token_formatters.DomainScopedPayload,
+ exp_user_id='0123456789abcdef',
+ exp_domain_id=uuid.uuid4().hex)
+
+ def test_trust_scoped_payload_with_non_uuid_ids(self):
+ self._test_payload(token_formatters.TrustScopedPayload,
+ exp_user_id='someNonUuidUserId',
+ exp_project_id='someNonUuidProjectId',
+ exp_trust_id=uuid.uuid4().hex)
+
+ def test_trust_scoped_payload_with_16_char_non_uuid_ids(self):
+ self._test_payload(token_formatters.TrustScopedPayload,
+ exp_user_id='0123456789abcdef',
+ exp_project_id='0123456789abcdef',
+ exp_trust_id=uuid.uuid4().hex)
def _test_federated_payload_with_ids(self, exp_user_id, exp_group_id):
- exp_methods = ['password']
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
exp_federated_info = {'group_ids': [{'id': exp_group_id}],
'idp_id': uuid.uuid4().hex,
'protocol_id': uuid.uuid4().hex}
- payload = token_formatters.FederatedUnscopedPayload.assemble(
- exp_user_id, exp_methods, exp_expires_at, exp_audit_ids,
- exp_federated_info)
-
- (user_id, methods, expires_at, audit_ids, federated_info) = (
- token_formatters.FederatedUnscopedPayload.disassemble(payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
- self.assertEqual(exp_federated_info['group_ids'][0]['id'],
- federated_info['group_ids'][0]['id'])
- self.assertEqual(exp_federated_info['idp_id'],
- federated_info['idp_id'])
- self.assertEqual(exp_federated_info['protocol_id'],
- federated_info['protocol_id'])
+ self._test_payload(token_formatters.FederatedUnscopedPayload,
+ exp_user_id=exp_user_id,
+ exp_federated_info=exp_federated_info)
def test_federated_payload_with_non_uuid_ids(self):
self._test_federated_payload_with_ids('someNonUuidUserId',
@@ -527,56 +447,31 @@ class TestPayloads(unit.TestCase):
'0123456789abcdef')
def test_federated_project_scoped_payload(self):
- exp_user_id = 'someNonUuidUserId'
- exp_methods = ['token']
- exp_project_id = uuid.uuid4().hex
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}],
'idp_id': uuid.uuid4().hex,
'protocol_id': uuid.uuid4().hex}
- payload = token_formatters.FederatedProjectScopedPayload.assemble(
- exp_user_id, exp_methods, exp_project_id, exp_expires_at,
- exp_audit_ids, exp_federated_info)
-
- (user_id, methods, project_id, expires_at, audit_ids,
- federated_info) = (
- token_formatters.FederatedProjectScopedPayload.disassemble(
- payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_project_id, project_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
- self.assertDictEqual(exp_federated_info, federated_info)
+ self._test_payload(token_formatters.FederatedProjectScopedPayload,
+ exp_user_id='someNonUuidUserId',
+ exp_methods=['token'],
+ exp_project_id=uuid.uuid4().hex,
+ exp_federated_info=exp_federated_info)
def test_federated_domain_scoped_payload(self):
- exp_user_id = 'someNonUuidUserId'
- exp_methods = ['token']
- exp_domain_id = uuid.uuid4().hex
- exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True)
- exp_audit_ids = [provider.random_urlsafe_str()]
exp_federated_info = {'group_ids': [{'id': 'someNonUuidGroupId'}],
'idp_id': uuid.uuid4().hex,
'protocol_id': uuid.uuid4().hex}
- payload = token_formatters.FederatedDomainScopedPayload.assemble(
- exp_user_id, exp_methods, exp_domain_id, exp_expires_at,
- exp_audit_ids, exp_federated_info)
+ self._test_payload(token_formatters.FederatedDomainScopedPayload,
+ exp_user_id='someNonUuidUserId',
+ exp_methods=['token'],
+ exp_domain_id=uuid.uuid4().hex,
+ exp_federated_info=exp_federated_info)
- (user_id, methods, domain_id, expires_at, audit_ids,
- federated_info) = (
- token_formatters.FederatedDomainScopedPayload.disassemble(
- payload))
-
- self.assertEqual(exp_user_id, user_id)
- self.assertEqual(exp_methods, methods)
- self.assertEqual(exp_domain_id, domain_id)
- self.assertEqual(exp_expires_at, expires_at)
- self.assertEqual(exp_audit_ids, audit_ids)
- self.assertDictEqual(exp_federated_info, federated_info)
+ def test_oauth_scoped_payload(self):
+ self._test_payload(token_formatters.OauthScopedPayload,
+ exp_project_id=uuid.uuid4().hex,
+ exp_access_token_id=uuid.uuid4().hex)
class TestFernetKeyRotation(unit.TestCase):
@@ -610,7 +505,7 @@ class TestFernetKeyRotation(unit.TestCase):
static set of keys, and simply shuffling them, would fail such a test).
"""
- # Load the keys into a list.
+ # Load the keys into a list, keys is list of six.text_type.
keys = fernet_utils.load_keys()
# Sort the list of keys by the keys themselves (they were previously
@@ -620,7 +515,8 @@ class TestFernetKeyRotation(unit.TestCase):
# Create the thumbprint using all keys in the repository.
signature = hashlib.sha1()
for key in keys:
- signature.update(key)
+ # Need to convert key to six.binary_type for update.
+ signature.update(key.encode('utf-8'))
return signature.hexdigest()
def assertRepositoryState(self, expected_size):
diff --git a/keystone-moon/keystone/tests/unit/token/test_provider.py b/keystone-moon/keystone/tests/unit/token/test_provider.py
index be831484..7093f3ba 100644
--- a/keystone-moon/keystone/tests/unit/token/test_provider.py
+++ b/keystone-moon/keystone/tests/unit/token/test_provider.py
@@ -24,7 +24,7 @@ class TestRandomStrings(unit.BaseTestCase):
def test_strings_can_be_converted_to_bytes(self):
s = provider.random_urlsafe_str()
- self.assertTrue(isinstance(s, six.string_types))
+ self.assertIsInstance(s, six.text_type)
b = provider.random_urlsafe_str_to_bytes(s)
- self.assertTrue(isinstance(b, bytes))
+ self.assertIsInstance(b, six.binary_type)
diff --git a/keystone-moon/keystone/tests/unit/token/test_token_data_helper.py b/keystone-moon/keystone/tests/unit/token/test_token_data_helper.py
index 6114b723..9e8c3889 100644
--- a/keystone-moon/keystone/tests/unit/token/test_token_data_helper.py
+++ b/keystone-moon/keystone/tests/unit/token/test_token_data_helper.py
@@ -28,7 +28,8 @@ class TestTokenDataHelper(unit.TestCase):
def test_v3_token_data_helper_populate_audit_info_string(self):
token_data = {}
- audit_info = base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]
+ audit_info_bytes = base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]
+ audit_info = audit_info_bytes.decode('utf-8')
self.v3_data_helper._populate_audit_info(token_data, audit_info)
self.assertIn(audit_info, token_data['audit_ids'])
self.assertThat(token_data['audit_ids'], matchers.HasLength(2))
diff --git a/keystone-moon/keystone/tests/unit/token/test_token_model.py b/keystone-moon/keystone/tests/unit/token/test_token_model.py
index f1398491..1cb0ef55 100644
--- a/keystone-moon/keystone/tests/unit/token/test_token_model.py
+++ b/keystone-moon/keystone/tests/unit/token/test_token_model.py
@@ -17,8 +17,8 @@ from oslo_config import cfg
from oslo_utils import timeutils
from six.moves import range
-from keystone.contrib.federation import constants as federation_constants
from keystone import exception
+from keystone.federation import constants as federation_constants
from keystone.models import token_model
from keystone.tests.unit import core
from keystone.tests.unit import test_token_provider
diff --git a/keystone-moon/keystone/tests/unit/trust/__init__.py b/keystone-moon/keystone/tests/unit/trust/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/trust/__init__.py
diff --git a/keystone-moon/keystone/tests/unit/trust/test_backends.py b/keystone-moon/keystone/tests/unit/trust/test_backends.py
new file mode 100644
index 00000000..05df866f
--- /dev/null
+++ b/keystone-moon/keystone/tests/unit/trust/test_backends.py
@@ -0,0 +1,172 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo_utils import timeutils
+from six.moves import range
+
+from keystone import exception
+
+
+class TrustTests(object):
+ def create_sample_trust(self, new_id, remaining_uses=None):
+ self.trustor = self.user_foo
+ self.trustee = self.user_two
+ expires_at = datetime.datetime.utcnow().replace(year=2032)
+ trust_data = (self.trust_api.create_trust
+ (new_id,
+ {'trustor_user_id': self.trustor['id'],
+ 'trustee_user_id': self.user_two['id'],
+ 'project_id': self.tenant_bar['id'],
+ 'expires_at': expires_at,
+ 'impersonation': True,
+ 'remaining_uses': remaining_uses},
+ roles=[{"id": "member"},
+ {"id": "other"},
+ {"id": "browser"}]))
+ return trust_data
+
+ def test_delete_trust(self):
+ new_id = uuid.uuid4().hex
+ trust_data = self.create_sample_trust(new_id)
+ trust_id = trust_data['id']
+ self.assertIsNotNone(trust_data)
+ trust_data = self.trust_api.get_trust(trust_id)
+ self.assertEqual(new_id, trust_data['id'])
+ self.trust_api.delete_trust(trust_id)
+ self.assertRaises(exception.TrustNotFound,
+ self.trust_api.get_trust,
+ trust_id)
+
+ def test_delete_trust_not_found(self):
+ trust_id = uuid.uuid4().hex
+ self.assertRaises(exception.TrustNotFound,
+ self.trust_api.delete_trust,
+ trust_id)
+
+ def test_get_trust(self):
+ new_id = uuid.uuid4().hex
+ trust_data = self.create_sample_trust(new_id)
+ trust_id = trust_data['id']
+ self.assertIsNotNone(trust_data)
+ trust_data = self.trust_api.get_trust(trust_id)
+ self.assertEqual(new_id, trust_data['id'])
+ self.trust_api.delete_trust(trust_data['id'])
+
+ def test_get_deleted_trust(self):
+ new_id = uuid.uuid4().hex
+ trust_data = self.create_sample_trust(new_id)
+ self.assertIsNotNone(trust_data)
+ self.assertIsNone(trust_data['deleted_at'])
+ self.trust_api.delete_trust(new_id)
+ self.assertRaises(exception.TrustNotFound,
+ self.trust_api.get_trust,
+ new_id)
+ deleted_trust = self.trust_api.get_trust(trust_data['id'],
+ deleted=True)
+ self.assertEqual(trust_data['id'], deleted_trust['id'])
+ self.assertIsNotNone(deleted_trust.get('deleted_at'))
+
+ def test_create_trust(self):
+ new_id = uuid.uuid4().hex
+ trust_data = self.create_sample_trust(new_id)
+
+ self.assertEqual(new_id, trust_data['id'])
+ self.assertEqual(self.trustee['id'], trust_data['trustee_user_id'])
+ self.assertEqual(self.trustor['id'], trust_data['trustor_user_id'])
+ self.assertTrue(timeutils.normalize_time(trust_data['expires_at']) >
+ timeutils.utcnow())
+
+ self.assertEqual([{'id': 'member'},
+ {'id': 'other'},
+ {'id': 'browser'}], trust_data['roles'])
+
+ def test_list_trust_by_trustee(self):
+ for i in range(3):
+ self.create_sample_trust(uuid.uuid4().hex)
+ trusts = self.trust_api.list_trusts_for_trustee(self.trustee['id'])
+ self.assertEqual(3, len(trusts))
+ self.assertEqual(trusts[0]["trustee_user_id"], self.trustee['id'])
+ trusts = self.trust_api.list_trusts_for_trustee(self.trustor['id'])
+ self.assertEqual(0, len(trusts))
+
+ def test_list_trust_by_trustor(self):
+ for i in range(3):
+ self.create_sample_trust(uuid.uuid4().hex)
+ trusts = self.trust_api.list_trusts_for_trustor(self.trustor['id'])
+ self.assertEqual(3, len(trusts))
+ self.assertEqual(trusts[0]["trustor_user_id"], self.trustor['id'])
+ trusts = self.trust_api.list_trusts_for_trustor(self.trustee['id'])
+ self.assertEqual(0, len(trusts))
+
+ def test_list_trusts(self):
+ for i in range(3):
+ self.create_sample_trust(uuid.uuid4().hex)
+ trusts = self.trust_api.list_trusts()
+ self.assertEqual(3, len(trusts))
+
+ def test_trust_has_remaining_uses_positive(self):
+ # create a trust with limited uses, check that we have uses left
+ trust_data = self.create_sample_trust(uuid.uuid4().hex,
+ remaining_uses=5)
+ self.assertEqual(5, trust_data['remaining_uses'])
+ # create a trust with unlimited uses, check that we have uses left
+ trust_data = self.create_sample_trust(uuid.uuid4().hex)
+ self.assertIsNone(trust_data['remaining_uses'])
+
+ def test_trust_has_remaining_uses_negative(self):
+ # try to create a trust with no remaining uses, check that it fails
+ self.assertRaises(exception.ValidationError,
+ self.create_sample_trust,
+ uuid.uuid4().hex,
+ remaining_uses=0)
+ # try to create a trust with negative remaining uses,
+ # check that it fails
+ self.assertRaises(exception.ValidationError,
+ self.create_sample_trust,
+ uuid.uuid4().hex,
+ remaining_uses=-12)
+
+ def test_consume_use(self):
+ # consume a trust repeatedly until it has no uses anymore
+ trust_data = self.create_sample_trust(uuid.uuid4().hex,
+ remaining_uses=2)
+ self.trust_api.consume_use(trust_data['id'])
+ t = self.trust_api.get_trust(trust_data['id'])
+ self.assertEqual(1, t['remaining_uses'])
+ self.trust_api.consume_use(trust_data['id'])
+ # This was the last use, the trust isn't available anymore
+ self.assertRaises(exception.TrustNotFound,
+ self.trust_api.get_trust,
+ trust_data['id'])
+
+ def test_duplicate_trusts_not_allowed(self):
+ self.trustor = self.user_foo
+ self.trustee = self.user_two
+ trust_data = {'trustor_user_id': self.trustor['id'],
+ 'trustee_user_id': self.user_two['id'],
+ 'project_id': self.tenant_bar['id'],
+ 'expires_at': timeutils.parse_isotime(
+ '2032-02-18T18:10:00Z'),
+ 'impersonation': True,
+ 'remaining_uses': None}
+ roles = [{"id": "member"},
+ {"id": "other"},
+ {"id": "browser"}]
+ self.trust_api.create_trust(uuid.uuid4().hex, trust_data, roles)
+ self.assertRaises(exception.Conflict,
+ self.trust_api.create_trust,
+ uuid.uuid4().hex,
+ trust_data,
+ roles)
diff --git a/keystone-moon/keystone/tests/unit/utils.py b/keystone-moon/keystone/tests/unit/utils.py
index 17d1de81..e3e49e70 100644
--- a/keystone-moon/keystone/tests/unit/utils.py
+++ b/keystone-moon/keystone/tests/unit/utils.py
@@ -17,13 +17,10 @@ import os
import time
import uuid
-from oslo_log import log
import six
from testtools import testcase
-LOG = log.getLogger(__name__)
-
TZ = None
@@ -72,7 +69,6 @@ def wip(message):
>>> pass
"""
-
def _wip(f):
@six.wraps(f)
def run_test(*args, **kwargs):
diff --git a/keystone-moon/keystone/token/__init__.py b/keystone-moon/keystone/token/__init__.py
index a73e19f9..f85ffc79 100644
--- a/keystone-moon/keystone/token/__init__.py
+++ b/keystone-moon/keystone/token/__init__.py
@@ -15,4 +15,3 @@
from keystone.token import controllers # noqa
from keystone.token import persistence # noqa
from keystone.token import provider # noqa
-from keystone.token import routers # noqa
diff --git a/keystone-moon/keystone/token/_simple_cert.py b/keystone-moon/keystone/token/_simple_cert.py
new file mode 100644
index 00000000..9c369255
--- /dev/null
+++ b/keystone-moon/keystone/token/_simple_cert.py
@@ -0,0 +1,91 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# TODO(morganfainberg): Remove this file and extension in the "O" release as
+# it is only used in support of the PKI/PKIz token providers.
+import functools
+
+from oslo_config import cfg
+import webob
+
+from keystone.common import controller
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone import exception
+
+
+CONF = cfg.CONF
+EXTENSION_DATA = {
+ 'name': 'OpenStack Simple Certificate API',
+ 'namespace': 'http://docs.openstack.org/identity/api/ext/'
+ 'OS-SIMPLE-CERT/v1.0',
+ 'alias': 'OS-SIMPLE-CERT',
+ 'updated': '2014-01-20T12:00:0-00:00',
+ 'description': 'OpenStack simple certificate retrieval extension',
+ 'links': [
+ {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'http://developer.openstack.org/'
+ 'api-ref-identity-v2-ext.html',
+ }
+ ]}
+extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
+
+build_resource_relation = functools.partial(
+ json_home.build_v3_extension_resource_relation,
+ extension_name='OS-SIMPLE-CERT', extension_version='1.0')
+
+
+class Routers(wsgi.RoutersBase):
+
+ def _construct_url(self, suffix):
+ return "/OS-SIMPLE-CERT/%s" % suffix
+
+ def append_v3_routers(self, mapper, routers):
+ controller = SimpleCert()
+
+ self._add_resource(
+ mapper, controller,
+ path=self._construct_url('ca'),
+ get_action='get_ca_certificate',
+ rel=build_resource_relation(resource_name='ca_certificate'))
+ self._add_resource(
+ mapper, controller,
+ path=self._construct_url('certificates'),
+ get_action='list_certificates',
+ rel=build_resource_relation(resource_name='certificates'))
+
+
+@dependency.requires('token_provider_api')
+class SimpleCert(controller.V3Controller):
+
+ def _get_certificate(self, name):
+ try:
+ with open(name, 'r') as f:
+ body = f.read()
+ except IOError:
+ raise exception.CertificateFilesUnavailable()
+
+ # NOTE(jamielennox): We construct the webob Response ourselves here so
+ # that we don't pass through the JSON encoding process.
+ headers = [('Content-Type', 'application/x-pem-file')]
+ return webob.Response(body=body, headerlist=headers, status="200 OK")
+
+ def get_ca_certificate(self, context):
+ return self._get_certificate(CONF.signing.ca_certs)
+
+ def list_certificates(self, context):
+ return self._get_certificate(CONF.signing.certfile)
diff --git a/keystone-moon/keystone/token/controllers.py b/keystone-moon/keystone/token/controllers.py
index ff65e733..6eeb23ec 100644
--- a/keystone-moon/keystone/token/controllers.py
+++ b/keystone-moon/keystone/token/controllers.py
@@ -38,6 +38,7 @@ LOG = log.getLogger(__name__)
class ExternalAuthNotApplicable(Exception):
"""External authentication is not applicable."""
+
pass
@@ -48,19 +49,17 @@ class Auth(controller.V2Controller):
@controller.v2_deprecated
def ca_cert(self, context, auth=None):
- ca_file = open(CONF.signing.ca_certs, 'r')
- data = ca_file.read()
- ca_file.close()
+ with open(CONF.signing.ca_certs, 'r') as ca_file:
+ data = ca_file.read()
return data
@controller.v2_deprecated
def signing_cert(self, context, auth=None):
- cert_file = open(CONF.signing.certfile, 'r')
- data = cert_file.read()
- cert_file.close()
+ with open(CONF.signing.certfile, 'r') as cert_file:
+ data = cert_file.read()
return data
- @controller.v2_deprecated
+ @controller.v2_auth_deprecated
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
@@ -82,7 +81,6 @@ class Auth(controller.V2Controller):
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
-
if auth is None:
raise exception.ValidationError(attribute='auth',
target='request body')
@@ -182,7 +180,8 @@ class Auth(controller.V2Controller):
try:
token_model_ref = token_model.KeystoneToken(
token_id=old_token,
- token_data=self.token_provider_api.validate_token(old_token))
+ token_data=self.token_provider_api.validate_v2_token(old_token)
+ )
except exception.NotFound as e:
raise exception.Unauthorized(e)
@@ -369,6 +368,10 @@ class Auth(controller.V2Controller):
size=CONF.max_param_size)
if tenant_name:
+ if (CONF.resource.project_name_url_safe == 'strict' and
+ utils.is_not_url_safe(tenant_name)):
+ msg = _('Tenant name cannot contain reserved characters.')
+ raise exception.Unauthorized(message=msg)
try:
tenant_ref = self.resource_api.get_project_by_name(
tenant_name, CONF.identity.default_domain_id)
@@ -379,7 +382,6 @@ class Auth(controller.V2Controller):
def _get_project_roles_and_ref(self, user_id, tenant_id):
"""Returns the project roles for this user, and the project ref."""
-
tenant_ref = None
role_list = []
if tenant_id:
diff --git a/keystone-moon/keystone/token/persistence/__init__.py b/keystone-moon/keystone/token/persistence/__init__.py
index 89ec875d..9d8e17f2 100644
--- a/keystone-moon/keystone/token/persistence/__init__.py
+++ b/keystone-moon/keystone/token/persistence/__init__.py
@@ -13,4 +13,4 @@
from keystone.token.persistence.core import * # noqa
-__all__ = ['Manager', 'Driver']
+__all__ = ('Manager', 'Driver')
diff --git a/keystone-moon/keystone/token/persistence/backends/kvs.py b/keystone-moon/keystone/token/persistence/backends/kvs.py
index 51931586..3620db58 100644
--- a/keystone-moon/keystone/token/persistence/backends/kvs.py
+++ b/keystone-moon/keystone/token/persistence/backends/kvs.py
@@ -55,10 +55,10 @@ class Token(token.persistence.TokenDriverV8):
if self.__class__ == Token:
# NOTE(morganfainberg): Only warn if the base KVS implementation
# is instantiated.
- LOG.warn(_LW('It is recommended to only use the base '
- 'key-value-store implementation for the token driver '
- "for testing purposes. Please use 'memcache' or "
- "'sql' instead."))
+ LOG.warning(_LW('It is recommended to only use the base '
+ 'key-value-store implementation for the token '
+ 'driver for testing purposes. Please use '
+ "'memcache' or 'sql' instead."))
def _prefix_token_id(self, token_id):
return 'token-%s' % token_id.encode('utf-8')
@@ -138,8 +138,10 @@ class Token(token.persistence.TokenDriverV8):
return data_copy
def _get_user_token_list_with_expiry(self, user_key):
- """Return a list of tuples in the format (token_id, token_expiry) for
- the user_key.
+ """Return user token list with token expiry.
+
+ :return: the tuples in the format (token_id, token_expiry)
+ :rtype: list
"""
return self._get_key_or_default(user_key, default=[])
@@ -210,6 +212,15 @@ class Token(token.persistence.TokenDriverV8):
subsecond=True)
revoked_token_data['id'] = data['id']
+ token_data = data['token_data']
+ if 'access' in token_data:
+ # It's a v2 token.
+ audit_ids = token_data['access']['token']['audit_ids']
+ else:
+ # It's a v3 token.
+ audit_ids = token_data['token']['audit_ids']
+ revoked_token_data['audit_id'] = audit_ids[0]
+
token_list = self._get_key_or_default(self.revocation_key, default=[])
if not isinstance(token_list, list):
# NOTE(morganfainberg): In the case that the revocation list is not
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache.py b/keystone-moon/keystone/token/persistence/backends/memcache.py
index 03f27eaf..e6b0fcab 100644
--- a/keystone-moon/keystone/token/persistence/backends/memcache.py
+++ b/keystone-moon/keystone/token/persistence/backends/memcache.py
@@ -14,6 +14,7 @@
# under the License.
from oslo_config import cfg
+from oslo_log import versionutils
from keystone.token.persistence.backends import kvs
@@ -25,6 +26,11 @@ class Token(kvs.Token):
kvs_backend = 'openstack.kvs.Memcached'
memcached_backend = 'memcached'
+ @versionutils.deprecated(
+ what='Memcache Token Persistence Driver',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of='fernet token driver (no-persistence)',
+ remove_in=0)
def __init__(self, *args, **kwargs):
kwargs['memcached_backend'] = self.memcached_backend
kwargs['no_expiry_keys'] = [self.revocation_key]
diff --git a/keystone-moon/keystone/token/persistence/backends/memcache_pool.py b/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
index 55f9e8ae..39a5ca65 100644
--- a/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
+++ b/keystone-moon/keystone/token/persistence/backends/memcache_pool.py
@@ -11,6 +11,7 @@
# under the License.
from oslo_config import cfg
+from oslo_log import versionutils
from keystone.token.persistence.backends import memcache
@@ -21,6 +22,11 @@ CONF = cfg.CONF
class Token(memcache.Token):
memcached_backend = 'pooled_memcached'
+ @versionutils.deprecated(
+ what='Memcache Pool Token Persistence Driver',
+ as_of=versionutils.deprecated.MITAKA,
+ in_favor_of='fernet token driver (no-persistence)',
+ remove_in=0)
def __init__(self, *args, **kwargs):
for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize',
'pool_unused_timeout', 'pool_connection_get_timeout'):
diff --git a/keystone-moon/keystone/token/persistence/backends/sql.py b/keystone-moon/keystone/token/persistence/backends/sql.py
index 6fc1d223..4b3439a1 100644
--- a/keystone-moon/keystone/token/persistence/backends/sql.py
+++ b/keystone-moon/keystone/token/persistence/backends/sql.py
@@ -53,7 +53,6 @@ def _expiry_range_batched(session, upper_bound_func, batch_size):
Return the timestamp of the next token that is `batch_size` rows from
being the oldest expired token.
"""
-
# This expiry strategy splits the tokens into roughly equal sized batches
# to be deleted. It does this by finding the timestamp of a token
# `batch_size` rows from the oldest token and yielding that to the caller.
@@ -79,7 +78,6 @@ def _expiry_range_batched(session, upper_bound_func, batch_size):
def _expiry_range_all(session, upper_bound_func):
"""Expires all tokens in one pass."""
-
yield upper_bound_func()
@@ -88,11 +86,11 @@ class Token(token.persistence.TokenDriverV8):
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id=token_id)
- session = sql.get_session()
- token_ref = session.query(TokenModel).get(token_id)
- if not token_ref or not token_ref.valid:
- raise exception.TokenNotFound(token_id=token_id)
- return token_ref.to_dict()
+ with sql.session_for_read() as session:
+ token_ref = session.query(TokenModel).get(token_id)
+ if not token_ref or not token_ref.valid:
+ raise exception.TokenNotFound(token_id=token_id)
+ return token_ref.to_dict()
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
@@ -103,14 +101,12 @@ class Token(token.persistence.TokenDriverV8):
token_ref = TokenModel.from_dict(data_copy)
token_ref.valid = True
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
session.add(token_ref)
return token_ref.to_dict()
def delete_token(self, token_id):
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
@@ -126,9 +122,8 @@ class Token(token.persistence.TokenDriverV8):
or the trustor's user ID, so will use trust_id to query the tokens.
"""
- session = sql.get_session()
token_list = []
- with session.begin():
+ with sql.session_for_write() as session:
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter_by(valid=True)
@@ -169,38 +164,37 @@ class Token(token.persistence.TokenDriverV8):
return False
def _list_tokens_for_trust(self, trust_id):
- session = sql.get_session()
- tokens = []
- now = timeutils.utcnow()
- query = session.query(TokenModel)
- query = query.filter(TokenModel.expires > now)
- query = query.filter(TokenModel.trust_id == trust_id)
-
- token_references = query.filter_by(valid=True)
- for token_ref in token_references:
- token_ref_dict = token_ref.to_dict()
- tokens.append(token_ref_dict['id'])
- return tokens
+ with sql.session_for_read() as session:
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.trust_id == trust_id)
+
+ token_references = query.filter_by(valid=True)
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ tokens.append(token_ref_dict['id'])
+ return tokens
def _list_tokens_for_user(self, user_id, tenant_id=None):
- session = sql.get_session()
- tokens = []
- now = timeutils.utcnow()
- query = session.query(TokenModel)
- query = query.filter(TokenModel.expires > now)
- query = query.filter(TokenModel.user_id == user_id)
-
- token_references = query.filter_by(valid=True)
- for token_ref in token_references:
- token_ref_dict = token_ref.to_dict()
- if self._tenant_matches(tenant_id, token_ref_dict):
- tokens.append(token_ref['id'])
- return tokens
+ with sql.session_for_read() as session:
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel)
+ query = query.filter(TokenModel.expires > now)
+ query = query.filter(TokenModel.user_id == user_id)
+
+ token_references = query.filter_by(valid=True)
+ for token_ref in token_references:
+ token_ref_dict = token_ref.to_dict()
+ if self._tenant_matches(tenant_id, token_ref_dict):
+ tokens.append(token_ref['id'])
+ return tokens
def _list_tokens_for_consumer(self, user_id, consumer_id):
tokens = []
- session = sql.get_session()
- with session.begin():
+ with sql.session_for_write() as session:
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
@@ -225,19 +219,29 @@ class Token(token.persistence.TokenDriverV8):
return self._list_tokens_for_user(user_id, tenant_id)
def list_revoked_tokens(self):
- session = sql.get_session()
- tokens = []
- now = timeutils.utcnow()
- query = session.query(TokenModel.id, TokenModel.expires)
- query = query.filter(TokenModel.expires > now)
- token_references = query.filter_by(valid=False)
- for token_ref in token_references:
- record = {
- 'id': token_ref[0],
- 'expires': token_ref[1],
- }
- tokens.append(record)
- return tokens
+ with sql.session_for_read() as session:
+ tokens = []
+ now = timeutils.utcnow()
+ query = session.query(TokenModel.id, TokenModel.expires,
+ TokenModel.extra)
+ query = query.filter(TokenModel.expires > now)
+ token_references = query.filter_by(valid=False)
+ for token_ref in token_references:
+ token_data = token_ref[2]['token_data']
+ if 'access' in token_data:
+ # It's a v2 token.
+ audit_ids = token_data['access']['token']['audit_ids']
+ else:
+ # It's a v3 token.
+ audit_ids = token_data['token']['audit_ids']
+
+ record = {
+ 'id': token_ref[0],
+ 'expires': token_ref[1],
+ 'audit_id': audit_ids[0],
+ }
+ tokens.append(record)
+ return tokens
def _expiry_range_strategy(self, dialect):
"""Choose a token range expiration strategy
@@ -245,7 +249,6 @@ class Token(token.persistence.TokenDriverV8):
Based on the DB dialect, select an expiry range callable that is
appropriate.
"""
-
# DB2 and MySQL can both benefit from a batched strategy. On DB2 the
# transaction log can fill up and on MySQL w/Galera, large
# transactions can exceed the maximum write set size.
@@ -266,18 +269,18 @@ class Token(token.persistence.TokenDriverV8):
return _expiry_range_all
def flush_expired_tokens(self):
- session = sql.get_session()
- dialect = session.bind.dialect.name
- expiry_range_func = self._expiry_range_strategy(dialect)
- query = session.query(TokenModel.expires)
- total_removed = 0
- upper_bound_func = timeutils.utcnow
- for expiry_time in expiry_range_func(session, upper_bound_func):
- delete_query = query.filter(TokenModel.expires <=
- expiry_time)
- row_count = delete_query.delete(synchronize_session=False)
- total_removed += row_count
- LOG.debug('Removed %d total expired tokens', total_removed)
-
- session.flush()
- LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
+ with sql.session_for_write() as session:
+ dialect = session.bind.dialect.name
+ expiry_range_func = self._expiry_range_strategy(dialect)
+ query = session.query(TokenModel.expires)
+ total_removed = 0
+ upper_bound_func = timeutils.utcnow
+ for expiry_time in expiry_range_func(session, upper_bound_func):
+ delete_query = query.filter(TokenModel.expires <=
+ expiry_time)
+ row_count = delete_query.delete(synchronize_session=False)
+ total_removed += row_count
+ LOG.debug('Removed %d total expired tokens', total_removed)
+
+ session.flush()
+ LOG.info(_LI('Total expired tokens removed: %d'), total_removed)
diff --git a/keystone-moon/keystone/token/persistence/core.py b/keystone-moon/keystone/token/persistence/core.py
index e68970ac..76c3ff70 100644
--- a/keystone-moon/keystone/token/persistence/core.py
+++ b/keystone-moon/keystone/token/persistence/core.py
@@ -32,9 +32,9 @@ from keystone.token import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-MEMOIZE = cache.get_memoization_decorator(section='token')
-REVOCATION_MEMOIZE = cache.get_memoization_decorator(
- section='token', expiration_section='revoke')
+MEMOIZE = cache.get_memoization_decorator(group='token')
+REVOCATION_MEMOIZE = cache.get_memoization_decorator(group='token',
+ expiration_group='revoke')
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
@@ -60,11 +60,6 @@ class PersistenceManager(manager.Manager):
raise exception.TokenNotFound(token_id=token_id)
def get_token(self, token_id):
- if not token_id:
- # NOTE(morganfainberg): There are cases when the
- # context['token_id'] will in-fact be None. This also saves
- # a round-trip to the backend if we don't have a token_id.
- raise exception.TokenNotFound(token_id='')
unique_id = utils.generate_unique_id(token_id)
token_ref = self._get_token(unique_id)
# NOTE(morganfainberg): Lift expired checking to the manager, there is
@@ -206,13 +201,13 @@ class Manager(object):
This class is a proxy class to the token_provider_api's persistence
manager.
"""
+
def __init__(self):
# NOTE(morganfainberg): __init__ is required for dependency processing.
super(Manager, self).__init__()
def __getattr__(self, item):
"""Forward calls to the `token_provider_api` persistence manager."""
-
# NOTE(morganfainberg): Prevent infinite recursion, raise an
# AttributeError for 'token_provider_api' ensuring that the dep
# injection doesn't infinitely try and lookup self.token_provider_api
@@ -240,7 +235,7 @@ class TokenDriverV8(object):
:param token_id: identity of the token
:type token_id: string
:returns: token_ref
- :raises: keystone.exception.TokenNotFound
+ :raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -276,7 +271,7 @@ class TokenDriverV8(object):
:param token_id: identity of the token
:type token_id: string
:returns: None.
- :raises: keystone.exception.TokenNotFound
+ :raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -304,7 +299,7 @@ class TokenDriverV8(object):
:param consumer_id: identity of the consumer
:type consumer_id: string
:returns: The tokens that have been deleted.
- :raises: keystone.exception.TokenNotFound
+ :raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
if not CONF.token.revoke_by_id:
@@ -317,7 +312,8 @@ class TokenDriverV8(object):
for token in token_list:
try:
self.delete_token(token)
- except exception.NotFound:
+ except exception.NotFound: # nosec
+ # The token is already gone, good.
pass
return token_list
@@ -354,8 +350,7 @@ class TokenDriverV8(object):
@abc.abstractmethod
def flush_expired_tokens(self):
- """Archive or delete tokens that have expired.
- """
+ """Archive or delete tokens that have expired."""
raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/token/provider.py b/keystone-moon/keystone/token/provider.py
index 1422e41f..7c4166f4 100644
--- a/keystone-moon/keystone/token/provider.py
+++ b/keystone-moon/keystone/token/provider.py
@@ -33,12 +33,13 @@ from keystone.i18n import _, _LE
from keystone.models import token_model
from keystone import notifications
from keystone.token import persistence
+from keystone.token import providers
from keystone.token import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-MEMOIZE = cache.get_memoization_decorator(section='token')
+MEMOIZE = cache.get_memoization_decorator(group='token')
# NOTE(morganfainberg): This is for compatibility in case someone was relying
# on the old location of the UnsupportedTokenVersionException for their code.
@@ -51,18 +52,37 @@ VERSIONS = token_model.VERSIONS
def base64_encode(s):
- """Encode a URL-safe string."""
- return base64.urlsafe_b64encode(s).rstrip('=')
+ """Encode a URL-safe string.
+
+ :type s: six.text_type
+ :rtype: six.text_type
+
+ """
+ # urlsafe_b64encode() returns six.binary_type so need to convert to
+ # six.text_type, might as well do it before stripping.
+ return base64.urlsafe_b64encode(s).decode('utf-8').rstrip('=')
def random_urlsafe_str():
- """Generate a random URL-safe string."""
+ """Generate a random URL-safe string.
+
+ :rtype: six.text_type
+
+ """
# chop the padding (==) off the end of the encoding to save space
- return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2]
+ return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2].decode('utf-8')
def random_urlsafe_str_to_bytes(s):
- """Convert a string generated by ``random_urlsafe_str()`` to bytes."""
+ """Convert a string from :func:`random_urlsafe_str()` to six.binary_type.
+
+ :type s: six.text_type
+ :rtype: six.binary_type
+
+ """
+ # urlsafe_b64decode() requires str, unicode isn't accepted.
+ s = str(s)
+
# restore the padding (==) at the end of the string
return base64.urlsafe_b64decode(s + '==')
@@ -201,14 +221,29 @@ class Manager(manager.Manager):
self.revoke_api.check_token(token_values)
def validate_v2_token(self, token_id, belongs_to=None):
- unique_id = utils.generate_unique_id(token_id)
+ # NOTE(lbragstad): Only go to the persistence backend if the token
+ # provider requires it.
if self._needs_persistence:
# NOTE(morganfainberg): Ensure we never use the long-form token_id
# (PKI) as part of the cache_key.
+ unique_id = utils.generate_unique_id(token_id)
token_ref = self._persistence.get_token(unique_id)
+ token = self._validate_v2_token(token_ref)
else:
- token_ref = token_id
- token = self._validate_v2_token(token_ref)
+ # NOTE(lbragstad): If the token doesn't require persistence, then
+ # it is a fernet token. The fernet token provider doesn't care if
+ # it's creating version 2.0 tokens or v3 tokens, so we use the same
+ # validate_non_persistent_token() method to validate both. Then we
+ # can leverage a separate method to make version 3 token data look
+ # like version 2.0 token data. The pattern we want to move towards
+ # is one where the token providers just handle data and the
+ # controller layers handle interpreting the token data in a format
+ # that makes sense for the request.
+ v3_token_ref = self.validate_non_persistent_token(token_id)
+ v2_token_data_helper = providers.common.V2TokenDataHelper()
+ token = v2_token_data_helper.v3_to_v2_token(v3_token_ref)
+
+ # these are common things that happen regardless of token provider
token['access']['token']['id'] = token_id
self._token_belongs_to(token, belongs_to)
self._is_valid_token(token)
@@ -223,37 +258,52 @@ class Manager(manager.Manager):
self.revoke_api.check_token(token_values)
def check_revocation(self, token):
- version = self.driver.get_token_version(token)
+ version = self.get_token_version(token)
if version == V2:
return self.check_revocation_v2(token)
else:
return self.check_revocation_v3(token)
def validate_v3_token(self, token_id):
- unique_id = utils.generate_unique_id(token_id)
- # NOTE(lbragstad): Only go to persistent storage if we have a token to
- # fetch from the backend. If the Fernet token provider is being used
- # this step isn't necessary. The Fernet token reference is persisted in
- # the token_id, so in this case set the token_ref as the identifier of
- # the token.
- if not self._needs_persistence:
- token_ref = token_id
- else:
- # NOTE(morganfainberg): Ensure we never use the long-form token_id
- # (PKI) as part of the cache_key.
- token_ref = self._persistence.get_token(unique_id)
- token = self._validate_v3_token(token_ref)
- self._is_valid_token(token)
- return token
+ if not token_id:
+ raise exception.TokenNotFound(_('No token in the request'))
+
+ try:
+ # NOTE(lbragstad): Only go to persistent storage if we have a token
+ # to fetch from the backend (the driver persists the token).
+ # Otherwise the information about the token must be in the token
+ # id.
+ if not self._needs_persistence:
+ token_ref = self.validate_non_persistent_token(token_id)
+ else:
+ unique_id = utils.generate_unique_id(token_id)
+ # NOTE(morganfainberg): Ensure we never use the long-form
+ # token_id (PKI) as part of the cache_key.
+ token_ref = self._persistence.get_token(unique_id)
+ token_ref = self._validate_v3_token(token_ref)
+ self._is_valid_token(token_ref)
+ return token_ref
+ except exception.Unauthorized as e:
+ LOG.debug('Unable to validate token: %s', e)
+ raise exception.TokenNotFound(token_id=token_id)
@MEMOIZE
def _validate_token(self, token_id):
+ if not token_id:
+ raise exception.TokenNotFound(_('No token in the request'))
+
if not self._needs_persistence:
- return self.driver.validate_v3_token(token_id)
+ # NOTE(lbragstad): This will validate v2 and v3 non-persistent
+ # tokens.
+ return self.driver.validate_non_persistent_token(token_id)
token_ref = self._persistence.get_token(token_id)
- version = self.driver.get_token_version(token_ref)
+ version = self.get_token_version(token_ref)
if version == self.V3:
- return self.driver.validate_v3_token(token_ref)
+ try:
+ return self.driver.validate_v3_token(token_ref)
+ except exception.Unauthorized as e:
+ LOG.debug('Unable to validate token: %s', e)
+ raise exception.TokenNotFound(token_id=token_id)
elif version == self.V2:
return self.driver.validate_v2_token(token_ref)
raise exception.UnsupportedTokenVersionException()
@@ -268,7 +318,6 @@ class Manager(manager.Manager):
def _is_valid_token(self, token):
"""Verify the token is valid format and has not expired."""
-
current_time = timeutils.normalize_time(timeutils.utcnow())
try:
@@ -490,7 +539,8 @@ class Provider(object):
:param token_data: token_data
:type token_data: dict
:returns: token version string
- :raises: keystone.token.provider.UnsupportedTokenVersionException
+ :raises keystone.exception.UnsupportedTokenVersionException:
+ If the token version is not expected.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -548,8 +598,19 @@ class Provider(object):
:param token_ref: the token reference
:type token_ref: dict
:returns: token data
- :raises: keystone.exception.TokenNotFound
+ :raises keystone.exception.TokenNotFound: If the token doesn't exist.
+
+ """
+ raise exception.NotImplemented() # pragma: no cover
+ @abc.abstractmethod
+ def validate_non_persistent_token(self, token_id):
+ """Validate a given non-persistent token id and return the token_data.
+
+ :param token_id: the token id
+ :type token_id: string
+ :returns: token data
+ :raises keystone.exception.TokenNotFound: When the token is invalid
"""
raise exception.NotImplemented() # pragma: no cover
@@ -560,7 +621,7 @@ class Provider(object):
:param token_ref: the token reference
:type token_ref: dict
:returns: token data
- :raises: keystone.exception.TokenNotFound
+ :raises keystone.exception.TokenNotFound: If the token doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
@@ -570,6 +631,7 @@ class Provider(object):
:param token_data: token information
:type token_data: dict
- returns: token identifier
+ :returns: token identifier
+ :rtype: six.text_type
"""
raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/token/providers/common.py b/keystone-moon/keystone/token/providers/common.py
index b71458cd..94729178 100644
--- a/keystone-moon/keystone/token/providers/common.py
+++ b/keystone-moon/keystone/token/providers/common.py
@@ -14,7 +14,6 @@
from oslo_config import cfg
from oslo_log import log
-from oslo_log import versionutils
from oslo_serialization import jsonutils
import six
from six.moves.urllib import parse
@@ -22,8 +21,8 @@ from six.moves.urllib import parse
from keystone.common import controller as common_controller
from keystone.common import dependency
from keystone.common import utils
-from keystone.contrib.federation import constants as federation_constants
from keystone import exception
+from keystone.federation import constants as federation_constants
from keystone.i18n import _, _LE
from keystone import token
from keystone.token import provider
@@ -33,72 +32,69 @@ LOG = log.getLogger(__name__)
CONF = cfg.CONF
-@dependency.requires('catalog_api', 'resource_api')
+@dependency.requires('catalog_api', 'resource_api', 'assignment_api')
class V2TokenDataHelper(object):
"""Creates V2 token data."""
def v3_to_v2_token(self, v3_token_data):
+ """Convert v3 token data into v2.0 token data.
+
+ This method expects a dictionary generated from
+ V3TokenDataHelper.get_token_data() and converts it to look like a v2.0
+ token dictionary.
+
+ :param v3_token_data: dictionary formatted for v3 tokens
+ :returns: dictionary formatted for v2 tokens
+ :raises keystone.exception.Unauthorized: If a specific token type is
+ not supported in v2.
+
+ """
token_data = {}
# Build v2 token
v3_token = v3_token_data['token']
+ # NOTE(lbragstad): Version 2.0 tokens don't know about any domain other
+ # than the default domain specified in the configuration.
+ domain_id = v3_token.get('domain', {}).get('id')
+ if domain_id and CONF.identity.default_domain_id != domain_id:
+ msg = ('Unable to validate domain-scoped tokens outside of the '
+ 'default domain')
+ raise exception.Unauthorized(msg)
+
token = {}
token['expires'] = v3_token.get('expires_at')
token['issued_at'] = v3_token.get('issued_at')
token['audit_ids'] = v3_token.get('audit_ids')
- # Bail immediately if this is a domain-scoped token, which is not
- # supported by the v2 API at all.
- if 'domain' in v3_token:
- raise exception.Unauthorized(_(
- 'Domains are not supported by the v2 API. Please use the v3 '
- 'API instead.'))
-
- # Bail if this is a project-scoped token outside the default domain,
- # which may result in a namespace collision with a project inside the
- # default domain.
if 'project' in v3_token:
- if (v3_token['project']['domain']['id'] !=
- CONF.identity.default_domain_id):
- raise exception.Unauthorized(_(
- 'Project not found in the default domain (please use the '
- 'v3 API instead): %s') % v3_token['project']['id'])
-
# v3 token_data does not contain all tenant attributes
tenant = self.resource_api.get_project(
v3_token['project']['id'])
- token['tenant'] = common_controller.V2Controller.filter_domain_id(
+ # Drop domain specific fields since v2 calls are not domain-aware.
+ token['tenant'] = common_controller.V2Controller.v3_to_v2_project(
tenant)
token_data['token'] = token
# Build v2 user
v3_user = v3_token['user']
- # Bail if this is a token outside the default domain,
- # which may result in a namespace collision with a project inside the
- # default domain.
- if ('domain' in v3_user and v3_user['domain']['id'] !=
- CONF.identity.default_domain_id):
- raise exception.Unauthorized(_(
- 'User not found in the default domain (please use the v3 API '
- 'instead): %s') % v3_user['id'])
-
user = common_controller.V2Controller.v3_to_v2_user(v3_user)
- # Maintain Trust Data
if 'OS-TRUST:trust' in v3_token:
- v3_trust_data = v3_token['OS-TRUST:trust']
- token_data['trust'] = {
- 'trustee_user_id': v3_trust_data['trustee_user']['id'],
- 'id': v3_trust_data['id'],
- 'trustor_user_id': v3_trust_data['trustor_user']['id'],
- 'impersonation': v3_trust_data['impersonation']
- }
+ msg = ('Unable to validate trust-scoped tokens using version v2.0 '
+ 'API.')
+ raise exception.Unauthorized(msg)
+
+ if 'OS-OAUTH1' in v3_token:
+ msg = ('Unable to validate Oauth tokens using the version v2.0 '
+ 'API.')
+ raise exception.Unauthorized(msg)
# Set user roles
user['roles'] = []
role_ids = []
for role in v3_token.get('roles', []):
+ role_ids.append(role.pop('id'))
user['roles'].append(role)
user['roles_links'] = []
@@ -145,7 +141,7 @@ class V2TokenDataHelper(object):
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
- 'issued_at': utils.strtime(),
+ 'issued_at': utils.isotime(subsecond=True),
'audit_ids': audit_info
},
'user': {'id': user_ref['id'],
@@ -186,7 +182,8 @@ class V2TokenDataHelper(object):
@classmethod
def format_catalog(cls, catalog_ref):
- """Munge catalogs from internal to output format
+ """Munge catalogs from internal to output format.
+
Internal catalogs look like::
{$REGION: {
@@ -235,6 +232,7 @@ class V2TokenDataHelper(object):
'identity_api', 'resource_api', 'role_api', 'trust_api')
class V3TokenDataHelper(object):
"""Token data helper."""
+
def __init__(self):
# Keep __init__ around to ensure dependency injection works.
super(V3TokenDataHelper, self).__init__()
@@ -248,8 +246,12 @@ class V3TokenDataHelper(object):
filtered_project = {
'id': project_ref['id'],
'name': project_ref['name']}
- filtered_project['domain'] = self._get_filtered_domain(
- project_ref['domain_id'])
+ if project_ref['domain_id'] is not None:
+ filtered_project['domain'] = (
+ self._get_filtered_domain(project_ref['domain_id']))
+ else:
+ # Projects acting as a domain do not have a domain_id attribute
+ filtered_project['domain'] = None
return filtered_project
def _populate_scope(self, token_data, domain_id, project_id):
@@ -262,6 +264,18 @@ class V3TokenDataHelper(object):
if project_id:
token_data['project'] = self._get_filtered_project(project_id)
+ def _populate_is_admin_project(self, token_data):
+ # TODO(ayoung): Support the ability for a project acting as a domain
+ # to be the admin project once the rest of the code for projects
+ # acting as domains is merged. Code will likely be:
+ # (r.admin_project_name == None and project['is_domain'] == True
+ # and project['name'] == r.admin_project_domain_name)
+ project = token_data['project']
+ r = CONF.resource
+ if (project['name'] == r.admin_project_name and
+ project['domain']['name'] == r.admin_project_domain_name):
+ token_data['is_admin_project'] = True
+
def _get_roles_for_user(self, user_id, domain_id, project_id):
roles = []
if domain_id:
@@ -282,12 +296,12 @@ class V3TokenDataHelper(object):
place.
:param token_data: a dictionary used for building token response
- :group_ids: list of group IDs a user is a member of
- :project_id: project ID to scope to
- :domain_id: domain ID to scope to
- :user_id: user ID
+ :param group_ids: list of group IDs a user is a member of
+ :param project_id: project ID to scope to
+ :param domain_id: domain ID to scope to
+ :param user_id: user ID
- :raises: exception.Unauthorized - when no roles were found for a
+ :raises keystone.exception.Unauthorized: when no roles were found for a
(group_ids, project_id) or (group_ids, domain_id) pairs.
"""
@@ -370,7 +384,16 @@ class V3TokenDataHelper(object):
return
if CONF.trust.enabled and trust:
- token_user_id = trust['trustor_user_id']
+ # If redelegated_trust_id is set, then we must traverse the
+ # trust_chain in order to determine who the original trustor is. We
+ # need to do this because the user ID of the original trustor helps
+ # us determine scope in the redelegated context.
+ if trust.get('redelegated_trust_id'):
+ trust_chain = self.trust_api.get_trust_pedigree(trust['id'])
+ token_user_id = trust_chain[-1]['trustor_user_id']
+ else:
+ token_user_id = trust['trustor_user_id']
+
token_project_id = trust['project_id']
# trusts do not support domains yet
token_domain_id = None
@@ -380,21 +403,39 @@ class V3TokenDataHelper(object):
token_domain_id = domain_id
if token_domain_id or token_project_id:
- roles = self._get_roles_for_user(token_user_id,
- token_domain_id,
- token_project_id)
filtered_roles = []
if CONF.trust.enabled and trust:
- for trust_role in trust['roles']:
- match_roles = [x for x in roles
- if x['id'] == trust_role['id']]
+ # First expand out any roles that were in the trust to include
+ # any implied roles, whether global or domain specific
+ refs = [{'role_id': role['id']} for role in trust['roles']]
+ effective_trust_roles = (
+ self.assignment_api.add_implied_roles(refs))
+ # Now get the current role assignments for the trustor,
+ # including any domain specific roles.
+ assignment_list = self.assignment_api.list_role_assignments(
+ user_id=token_user_id,
+ project_id=token_project_id,
+ effective=True, strip_domain_roles=False)
+ current_effective_trustor_roles = (
+ list(set([x['role_id'] for x in assignment_list])))
+ # Go through each of the effective trust roles, making sure the
+ # trustor still has them, if any have been removed, then we
+ # will treat the trust as invalid
+ for trust_role in effective_trust_roles:
+
+ match_roles = [x for x in current_effective_trustor_roles
+ if x == trust_role['role_id']]
if match_roles:
- filtered_roles.append(match_roles[0])
+ role = self.role_api.get_role(match_roles[0])
+ if role['domain_id'] is None:
+ filtered_roles.append(role)
else:
raise exception.Forbidden(
_('Trustee has no delegated roles.'))
else:
- for role in roles:
+ for role in self._get_roles_for_user(token_user_id,
+ token_domain_id,
+ token_project_id):
filtered_roles.append({'id': role['id'],
'name': role['name']})
@@ -426,7 +467,6 @@ class V3TokenDataHelper(object):
if project_id or domain_id:
service_catalog = self.catalog_api.get_v3_catalog(
user_id, project_id)
- # TODO(ayoung): Enforce Endpoints for trust
token_data['catalog'] = service_catalog
def _populate_service_providers(self, token_data):
@@ -458,20 +498,11 @@ class V3TokenDataHelper(object):
LOG.error(msg)
raise exception.UnexpectedError(msg)
- def get_token_data(self, user_id, method_names, extras=None,
- domain_id=None, project_id=None, expires=None,
- trust=None, token=None, include_catalog=True,
- bind=None, access_token=None, issued_at=None,
- audit_info=None):
- if extras is None:
- extras = {}
- if extras:
- versionutils.deprecated(
- what='passing token data with "extras"',
- as_of=versionutils.deprecated.KILO,
- in_favor_of='well-defined APIs')(lambda: None)()
- token_data = {'methods': method_names,
- 'extras': extras}
+ def get_token_data(self, user_id, method_names, domain_id=None,
+ project_id=None, expires=None, trust=None, token=None,
+ include_catalog=True, bind=None, access_token=None,
+ issued_at=None, audit_info=None):
+ token_data = {'methods': method_names}
# We've probably already written these to the token
if token:
@@ -479,14 +510,12 @@ class V3TokenDataHelper(object):
if x in token:
token_data[x] = token[x]
- if CONF.trust.enabled and trust:
- if user_id != trust['trustee_user_id']:
- raise exception.Forbidden(_('User is not a trustee.'))
-
if bind:
token_data['bind'] = bind
self._populate_scope(token_data, domain_id, project_id)
+ if token_data.get('project'):
+ self._populate_is_admin_project(token_data)
self._populate_user(token_data, user_id, trust)
self._populate_roles(token_data, user_id, domain_id, project_id, trust,
access_token)
@@ -527,6 +556,11 @@ class BaseProvider(provider.Provider):
def issue_v2_token(self, token_ref, roles_ref=None,
catalog_ref=None):
+ if token_ref.get('bind') and not self._supports_bind_authentication:
+ msg = _('The configured token provider does not support bind '
+ 'authentication.')
+ raise exception.NotImplemented(message=msg)
+
metadata_ref = token_ref['metadata']
trust_ref = None
if CONF.trust.enabled and metadata_ref and 'trust_id' in metadata_ref:
@@ -559,6 +593,10 @@ class BaseProvider(provider.Provider):
'trust_id' in metadata_ref):
trust = self.trust_api.get_trust(metadata_ref['trust_id'])
+ if CONF.trust.enabled and trust:
+ if user_id != trust['trustee_user_id']:
+ raise exception.Forbidden(_('User is not a trustee.'))
+
token_ref = None
if auth_context and self._is_mapped_token(auth_context):
token_ref = self._handle_mapped_tokens(
@@ -572,7 +610,6 @@ class BaseProvider(provider.Provider):
token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
- auth_context.get('extras') if auth_context else None,
domain_id=domain_id,
project_id=project_id,
expires=expires_at,
@@ -636,21 +673,10 @@ class BaseProvider(provider.Provider):
token.provider.V3):
# this is a V3 token
msg = _('Non-default domain is not supported')
- # user in a non-default is prohibited
- if (token_ref['token_data']['token']['user']['domain']['id'] !=
- CONF.identity.default_domain_id):
- raise exception.Unauthorized(msg)
# domain scoping is prohibited
if token_ref['token_data']['token'].get('domain'):
raise exception.Unauthorized(
_('Domain scoped token is not supported'))
- # project in non-default domain is prohibited
- if token_ref['token_data']['token'].get('project'):
- project = token_ref['token_data']['token']['project']
- project_domain_id = project['domain']['id']
- # scoped to project in non-default domain is prohibited
- if project_domain_id != CONF.identity.default_domain_id:
- raise exception.Unauthorized(msg)
# if token is scoped to trust, both trustor and trustee must
# be in the default domain. Furthermore, the delegated project
# must also be in the default domain
@@ -693,14 +719,58 @@ class BaseProvider(provider.Provider):
trust_id = token_data['access'].get('trust', {}).get('id')
if trust_id:
- # token trust validation
- self.trust_api.get_trust(trust_id)
+ msg = ('Unable to validate trust-scoped tokens using version '
+ 'v2.0 API.')
+ raise exception.Unauthorized(msg)
return token_data
- except exception.ValidationError as e:
+ except exception.ValidationError:
LOG.exception(_LE('Failed to validate token'))
+ token_id = token_ref['token_data']['access']['token']['id']
+ raise exception.TokenNotFound(token_id=token_id)
+
+ def validate_non_persistent_token(self, token_id):
+ try:
+ (user_id, methods, audit_ids, domain_id, project_id, trust_id,
+ federated_info, access_token_id, created_at, expires_at) = (
+ self.token_formatter.validate_token(token_id))
+ except exception.ValidationError as e:
raise exception.TokenNotFound(e)
+ token_dict = None
+ trust_ref = None
+ if federated_info:
+ # NOTE(lbragstad): We need to rebuild information about the
+ # federated token as well as the federated token roles. This is
+ # because when we validate a non-persistent token, we don't have a
+ # token reference to pull the federated token information out of.
+ # As a result, we have to extract it from the token itself and
+ # rebuild the federated context. These private methods currently
+ # live in the keystone.token.providers.fernet.Provider() class.
+ token_dict = self._rebuild_federated_info(federated_info, user_id)
+ if project_id or domain_id:
+ self._rebuild_federated_token_roles(token_dict, federated_info,
+ user_id, project_id,
+ domain_id)
+ if trust_id:
+ trust_ref = self.trust_api.get_trust(trust_id)
+
+ access_token = None
+ if access_token_id:
+ access_token = self.oauth_api.get_access_token(access_token_id)
+
+ return self.v3_token_data_helper.get_token_data(
+ user_id,
+ method_names=methods,
+ domain_id=domain_id,
+ project_id=project_id,
+ issued_at=created_at,
+ expires=expires_at,
+ trust=trust_ref,
+ token=token_dict,
+ access_token=access_token,
+ audit_info=audit_ids)
+
def validate_v3_token(self, token_ref):
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
diff --git a/keystone-moon/keystone/token/providers/fernet/core.py b/keystone-moon/keystone/token/providers/fernet/core.py
index a71c375b..ff6fe9cc 100644
--- a/keystone-moon/keystone/token/providers/fernet/core.py
+++ b/keystone-moon/keystone/token/providers/fernet/core.py
@@ -11,23 +11,18 @@
# under the License.
from oslo_config import cfg
-from oslo_log import log
from keystone.common import dependency
from keystone.common import utils as ks_utils
-from keystone.contrib.federation import constants as federation_constants
-from keystone import exception
-from keystone.i18n import _
-from keystone.token import provider
+from keystone.federation import constants as federation_constants
from keystone.token.providers import common
from keystone.token.providers.fernet import token_formatters as tf
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
-@dependency.requires('trust_api')
+@dependency.requires('trust_api', 'oauth_api')
class Provider(common.BaseProvider):
def __init__(self, *args, **kwargs):
super(Provider, self).__init__(*args, **kwargs)
@@ -38,65 +33,10 @@ class Provider(common.BaseProvider):
"""Should the token be written to a backend."""
return False
- def issue_v2_token(self, token_ref, roles_ref=None, catalog_ref=None):
- """Issue a V2 formatted token.
-
- :param token_ref: reference describing the token
- :param roles_ref: reference describing the roles for the token
- :param catalog_ref: reference describing the token's catalog
- :returns: tuple containing the ID of the token and the token data
-
- """
- # TODO(lbragstad): Currently, Fernet tokens don't support bind in the
- # token format. Raise a 501 if we're dealing with bind.
- if token_ref.get('bind'):
- raise exception.NotImplemented()
-
- user_id = token_ref['user']['id']
- # Default to password since methods not provided by token_ref
- method_names = ['password']
- project_id = None
- # Verify that tenant is not None in token_ref
- if token_ref.get('tenant'):
- project_id = token_ref['tenant']['id']
-
- # maintain expiration time across rescopes
- expires = token_ref.get('expires')
-
- parent_audit_id = token_ref.get('parent_audit_id')
- # If parent_audit_id is defined then a token authentication was made
- if parent_audit_id:
- method_names.append('token')
-
- audit_ids = provider.audit_info(parent_audit_id)
-
- # Get v3 token data and exclude building v3 specific catalog. This is
- # due to the fact that the V2TokenDataHelper.format_token() method
- # doesn't build any of the token_reference from other Keystone APIs.
- # Instead, it builds it from what is persisted in the token reference.
- # Here we are going to leverage the V3TokenDataHelper.get_token_data()
- # method written for V3 because it goes through and populates the token
- # reference dynamically. Once we have a V3 token reference, we can
- # attempt to convert it to a V2 token response.
- v3_token_data = self.v3_token_data_helper.get_token_data(
- user_id,
- method_names,
- project_id=project_id,
- token=token_ref,
- include_catalog=False,
- audit_info=audit_ids,
- expires=expires)
-
- expires_at = v3_token_data['token']['expires_at']
- token_id = self.token_formatter.create_token(user_id, expires_at,
- audit_ids,
- methods=method_names,
- project_id=project_id)
- self._build_issued_at_info(token_id, v3_token_data)
- # Convert v3 to v2 token data and build v2 catalog
- token_data = self.v2_token_data_helper.v3_to_v2_token(v3_token_data)
- token_data['access']['token']['id'] = token_id
-
+ def issue_v2_token(self, *args, **kwargs):
+ token_id, token_data = super(Provider, self).issue_v2_token(
+ *args, **kwargs)
+ self._build_issued_at_info(token_id, token_data)
return token_id, token_data
def issue_v3_token(self, *args, **kwargs):
@@ -117,8 +57,12 @@ class Provider(common.BaseProvider):
# that we have to rely on when we validate the token.
fernet_creation_datetime_obj = self.token_formatter.creation_time(
token_id)
- token_data['token']['issued_at'] = ks_utils.isotime(
- at=fernet_creation_datetime_obj, subsecond=True)
+ if token_data.get('access'):
+ token_data['access']['token']['issued_at'] = ks_utils.isotime(
+ at=fernet_creation_datetime_obj, subsecond=True)
+ else:
+ token_data['token']['issued_at'] = ks_utils.isotime(
+ at=fernet_creation_datetime_obj, subsecond=True)
def _build_federated_info(self, token_data):
"""Extract everything needed for federated tokens.
@@ -127,18 +71,18 @@ class Provider(common.BaseProvider):
the values and build federated Fernet tokens.
"""
- idp_id = token_data['token'].get('user', {}).get(
- federation_constants.FEDERATION, {}).get(
- 'identity_provider', {}).get('id')
- protocol_id = token_data['token'].get('user', {}).get(
- federation_constants.FEDERATION, {}).get('protocol', {}).get('id')
- # If we don't have an identity provider ID and a protocol ID, it's safe
- # to assume we aren't dealing with a federated token.
- if not (idp_id and protocol_id):
- return None
-
- group_ids = token_data['token'].get('user', {}).get(
- federation_constants.FEDERATION, {}).get('groups')
+ token_data = token_data['token']
+ try:
+ user = token_data['user']
+ federation = user[federation_constants.FEDERATION]
+ idp_id = federation['identity_provider']['id']
+ protocol_id = federation['protocol']['id']
+ except KeyError:
+ # The token data doesn't have federated info, so we aren't dealing
+ # with a federated token and no federated info to build.
+ return
+
+ group_ids = federation.get('groups')
return {'group_ids': group_ids,
'idp_id': idp_id,
@@ -195,96 +139,66 @@ class Provider(common.BaseProvider):
self.v3_token_data_helper.populate_roles_for_groups(
token_dict, group_ids, project_id, domain_id, user_id)
- def validate_v2_token(self, token_ref):
- """Validate a V2 formatted token.
-
- :param token_ref: reference describing the token to validate
- :returns: the token data
- :raises keystone.exception.TokenNotFound: if token format is invalid
- :raises keystone.exception.Unauthorized: if v3 token is used
-
- """
- try:
- (user_id, methods,
- audit_ids, domain_id,
- project_id, trust_id,
- federated_info, created_at,
- expires_at) = self.token_formatter.validate_token(token_ref)
- except exception.ValidationError as e:
- raise exception.TokenNotFound(e)
-
- if trust_id or domain_id or federated_info:
- msg = _('This is not a v2.0 Fernet token. Use v3 for trust, '
- 'domain, or federated tokens.')
- raise exception.Unauthorized(msg)
-
- v3_token_data = self.v3_token_data_helper.get_token_data(
- user_id,
- methods,
- project_id=project_id,
- expires=expires_at,
- issued_at=created_at,
- token=token_ref,
- include_catalog=False,
- audit_info=audit_ids)
- token_data = self.v2_token_data_helper.v3_to_v2_token(v3_token_data)
- token_data['access']['token']['id'] = token_ref
- return token_data
-
- def validate_v3_token(self, token):
- """Validate a V3 formatted token.
-
- :param token: a string describing the token to validate
- :returns: the token data
- :raises keystone.exception.TokenNotFound: if token format version isn't
- supported
-
- """
- try:
- (user_id, methods, audit_ids, domain_id, project_id, trust_id,
- federated_info, created_at, expires_at) = (
- self.token_formatter.validate_token(token))
- except exception.ValidationError as e:
- raise exception.TokenNotFound(e)
-
- token_dict = None
- trust_ref = None
- if federated_info:
- token_dict = self._rebuild_federated_info(federated_info, user_id)
- if project_id or domain_id:
- self._rebuild_federated_token_roles(token_dict, federated_info,
- user_id, project_id,
- domain_id)
- if trust_id:
- trust_ref = self.trust_api.get_trust(trust_id)
-
- return self.v3_token_data_helper.get_token_data(
- user_id,
- method_names=methods,
- domain_id=domain_id,
- project_id=project_id,
- issued_at=created_at,
- expires=expires_at,
- trust=trust_ref,
- token=token_dict,
- audit_info=audit_ids)
+ def _extract_v2_token_data(self, token_data):
+ user_id = token_data['access']['user']['id']
+ expires_at = token_data['access']['token']['expires']
+ audit_ids = token_data['access']['token'].get('audit_ids')
+ methods = ['password']
+ if len(audit_ids) > 1:
+ methods.append('token')
+ project_id = token_data['access']['token'].get('tenant', {}).get('id')
+ domain_id = None
+ trust_id = None
+ access_token_id = None
+ federated_info = None
+ return (user_id, expires_at, audit_ids, methods, domain_id, project_id,
+ trust_id, access_token_id, federated_info)
+
+ def _extract_v3_token_data(self, token_data):
+ """Extract information from a v3 token reference."""
+ user_id = token_data['token']['user']['id']
+ expires_at = token_data['token']['expires_at']
+ audit_ids = token_data['token']['audit_ids']
+ methods = token_data['token'].get('methods')
+ domain_id = token_data['token'].get('domain', {}).get('id')
+ project_id = token_data['token'].get('project', {}).get('id')
+ trust_id = token_data['token'].get('OS-TRUST:trust', {}).get('id')
+ access_token_id = token_data['token'].get('OS-OAUTH1', {}).get(
+ 'access_token_id')
+ federated_info = self._build_federated_info(token_data)
+
+ return (user_id, expires_at, audit_ids, methods, domain_id, project_id,
+ trust_id, access_token_id, federated_info)
def _get_token_id(self, token_data):
"""Generate the token_id based upon the data in token_data.
:param token_data: token information
:type token_data: dict
- :raises keystone.exception.NotImplemented: when called
+ :rtype: six.text_type
+
"""
+ # NOTE(lbragstad): Only v2.0 token responses include an 'access'
+ # attribute.
+ if token_data.get('access'):
+ (user_id, expires_at, audit_ids, methods, domain_id, project_id,
+ trust_id, access_token_id, federated_info) = (
+ self._extract_v2_token_data(token_data))
+ else:
+ (user_id, expires_at, audit_ids, methods, domain_id, project_id,
+ trust_id, access_token_id, federated_info) = (
+ self._extract_v3_token_data(token_data))
+
return self.token_formatter.create_token(
- token_data['token']['user']['id'],
- token_data['token']['expires_at'],
- token_data['token']['audit_ids'],
- methods=token_data['token'].get('methods'),
- domain_id=token_data['token'].get('domain', {}).get('id'),
- project_id=token_data['token'].get('project', {}).get('id'),
- trust_id=token_data['token'].get('OS-TRUST:trust', {}).get('id'),
- federated_info=self._build_federated_info(token_data)
+ user_id,
+ expires_at,
+ audit_ids,
+ methods=methods,
+ domain_id=domain_id,
+ project_id=project_id,
+ trust_id=trust_id,
+ federated_info=federated_info,
+ access_token_id=access_token_id
)
@property
@@ -292,5 +206,6 @@ class Provider(common.BaseProvider):
"""Return if the token provider supports bind authentication methods.
:returns: False
+
"""
return False
diff --git a/keystone-moon/keystone/token/providers/fernet/token_formatters.py b/keystone-moon/keystone/token/providers/fernet/token_formatters.py
index dbfee6dd..dfdd06e8 100644
--- a/keystone-moon/keystone/token/providers/fernet/token_formatters.py
+++ b/keystone-moon/keystone/token/providers/fernet/token_formatters.py
@@ -20,7 +20,6 @@ import msgpack
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
-import six
from six.moves import map
from six.moves import urllib
@@ -66,14 +65,22 @@ class TokenFormatter(object):
return fernet.MultiFernet(fernet_instances)
def pack(self, payload):
- """Pack a payload for transport as a token."""
+ """Pack a payload for transport as a token.
+
+ :type payload: six.binary_type
+ :rtype: six.text_type
+
+ """
# base64 padding (if any) is not URL-safe
- return self.crypto.encrypt(payload).rstrip('=')
+ return self.crypto.encrypt(payload).rstrip(b'=').decode('utf-8')
def unpack(self, token):
- """Unpack a token, and validate the payload."""
- token = six.binary_type(token)
+ """Unpack a token, and validate the payload.
+ :type token: six.text_type
+ :rtype: six.binary_type
+
+ """
# TODO(lbragstad): Restore padding on token before decoding it.
# Initially in Kilo, Fernet tokens were returned to the user with
# padding appended to the token. Later in Liberty this padding was
@@ -89,16 +96,17 @@ class TokenFormatter(object):
token = TokenFormatter.restore_padding(token)
try:
- return self.crypto.decrypt(token)
+ return self.crypto.decrypt(token.encode('utf-8'))
except fernet.InvalidToken:
raise exception.ValidationError(
- _('This is not a recognized Fernet token'))
+ _('This is not a recognized Fernet token %s') % token)
@classmethod
def restore_padding(cls, token):
"""Restore padding based on token size.
:param token: token to restore padding on
+ :type token: six.text_type
:returns: token with correct padding
"""
@@ -106,21 +114,22 @@ class TokenFormatter(object):
mod_returned = len(token) % 4
if mod_returned:
missing_padding = 4 - mod_returned
- token += b'=' * missing_padding
+ token += '=' * missing_padding
return token
@classmethod
def creation_time(cls, fernet_token):
- """Returns the creation time of a valid Fernet token."""
- # tokens may be transmitted as Unicode, but they're just ASCII
- # (pypi/cryptography will refuse to operate on Unicode input)
- fernet_token = six.binary_type(fernet_token)
+ """Returns the creation time of a valid Fernet token.
- # Restore padding on token before decoding it
+ :type fernet_token: six.text_type
+
+ """
fernet_token = TokenFormatter.restore_padding(fernet_token)
+ # fernet_token is six.text_type
- # fernet tokens are base64 encoded, so we need to unpack them first
- token_bytes = base64.urlsafe_b64decode(fernet_token)
+ # Fernet tokens are base64 encoded, so we need to unpack them first
+ # urlsafe_b64decode() requires six.binary_type
+ token_bytes = base64.urlsafe_b64decode(fernet_token.encode('utf-8'))
# slice into the byte array to get just the timestamp
timestamp_bytes = token_bytes[TIMESTAMP_START:TIMESTAMP_END]
@@ -136,66 +145,20 @@ class TokenFormatter(object):
def create_token(self, user_id, expires_at, audit_ids, methods=None,
domain_id=None, project_id=None, trust_id=None,
- federated_info=None):
+ federated_info=None, access_token_id=None):
"""Given a set of payload attributes, generate a Fernet token."""
- if trust_id:
- version = TrustScopedPayload.version
- payload = TrustScopedPayload.assemble(
- user_id,
- methods,
- project_id,
- expires_at,
- audit_ids,
- trust_id)
- elif project_id and federated_info:
- version = FederatedProjectScopedPayload.version
- payload = FederatedProjectScopedPayload.assemble(
- user_id,
- methods,
- project_id,
- expires_at,
- audit_ids,
- federated_info)
- elif domain_id and federated_info:
- version = FederatedDomainScopedPayload.version
- payload = FederatedDomainScopedPayload.assemble(
- user_id,
- methods,
- domain_id,
- expires_at,
- audit_ids,
- federated_info)
- elif federated_info:
- version = FederatedUnscopedPayload.version
- payload = FederatedUnscopedPayload.assemble(
- user_id,
- methods,
- expires_at,
- audit_ids,
- federated_info)
- elif project_id:
- version = ProjectScopedPayload.version
- payload = ProjectScopedPayload.assemble(
- user_id,
- methods,
- project_id,
- expires_at,
- audit_ids)
- elif domain_id:
- version = DomainScopedPayload.version
- payload = DomainScopedPayload.assemble(
- user_id,
- methods,
- domain_id,
- expires_at,
- audit_ids)
- else:
- version = UnscopedPayload.version
- payload = UnscopedPayload.assemble(
- user_id,
- methods,
- expires_at,
- audit_ids)
+ for payload_class in PAYLOAD_CLASSES:
+ if payload_class.create_arguments_apply(
+ project_id=project_id, domain_id=domain_id,
+ trust_id=trust_id, federated_info=federated_info,
+ access_token_id=access_token_id):
+ break
+
+ version = payload_class.version
+ payload = payload_class.assemble(
+ user_id, methods, project_id, domain_id, expires_at, audit_ids,
+ trust_id, federated_info, access_token_id
+ )
versioned_payload = (version,) + payload
serialized_payload = msgpack.packb(versioned_payload)
@@ -215,44 +178,21 @@ class TokenFormatter(object):
return token
def validate_token(self, token):
- """Validates a Fernet token and returns the payload attributes."""
- # Convert v2 unicode token to a string
- if not isinstance(token, six.binary_type):
- token = token.encode('ascii')
+ """Validates a Fernet token and returns the payload attributes.
+ :type token: six.text_type
+
+ """
serialized_payload = self.unpack(token)
versioned_payload = msgpack.unpackb(serialized_payload)
version, payload = versioned_payload[0], versioned_payload[1:]
- # depending on the formatter, these may or may not be defined
- domain_id = None
- project_id = None
- trust_id = None
- federated_info = None
-
- if version == UnscopedPayload.version:
- (user_id, methods, expires_at, audit_ids) = (
- UnscopedPayload.disassemble(payload))
- elif version == DomainScopedPayload.version:
- (user_id, methods, domain_id, expires_at, audit_ids) = (
- DomainScopedPayload.disassemble(payload))
- elif version == ProjectScopedPayload.version:
- (user_id, methods, project_id, expires_at, audit_ids) = (
- ProjectScopedPayload.disassemble(payload))
- elif version == TrustScopedPayload.version:
- (user_id, methods, project_id, expires_at, audit_ids, trust_id) = (
- TrustScopedPayload.disassemble(payload))
- elif version == FederatedUnscopedPayload.version:
- (user_id, methods, expires_at, audit_ids, federated_info) = (
- FederatedUnscopedPayload.disassemble(payload))
- elif version == FederatedProjectScopedPayload.version:
- (user_id, methods, project_id, expires_at, audit_ids,
- federated_info) = FederatedProjectScopedPayload.disassemble(
- payload)
- elif version == FederatedDomainScopedPayload.version:
- (user_id, methods, domain_id, expires_at, audit_ids,
- federated_info) = FederatedDomainScopedPayload.disassemble(
- payload)
+ for payload_class in PAYLOAD_CLASSES:
+ if version == payload_class.version:
+ (user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id) = (
+ payload_class.disassemble(payload))
+ break
else:
# If the token_format is not recognized, raise ValidationError.
raise exception.ValidationError(_(
@@ -267,7 +207,7 @@ class TokenFormatter(object):
expires_at = ks_utils.isotime(at=expires_at, subsecond=True)
return (user_id, methods, audit_ids, domain_id, project_id, trust_id,
- federated_info, created_at, expires_at)
+ federated_info, access_token_id, created_at, expires_at)
class BasePayload(object):
@@ -275,10 +215,32 @@ class BasePayload(object):
version = None
@classmethod
- def assemble(cls, *args):
+ def create_arguments_apply(cls, **kwargs):
+ """Check the arguments to see if they apply to this payload variant.
+
+ :returns: True if the arguments indicate that this payload class is
+ needed for the token otherwise returns False.
+ :rtype: bool
+
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
"""Assemble the payload of a token.
- :param args: whatever data should go into the payload
+ :param user_id: identifier of the user in the token request
+ :param methods: list of authentication methods used
+ :param project_id: ID of the project to scope to
+ :param domain_id: ID of the domain to scope to
+ :param expires_at: datetime of the token's expiration
+ :param audit_ids: list of the token's audit IDs
+ :param trust_id: ID of the trust in effect
+ :param federated_info: dictionary containing group IDs, the identity
+ provider ID, protocol ID, and federated domain
+ ID
+ :param access_token_id: ID of the secret in OAuth1 authentication
:returns: the payload of a token
"""
@@ -288,6 +250,17 @@ class BasePayload(object):
def disassemble(cls, payload):
"""Disassemble an unscoped payload into the component data.
+ The tuple consists of::
+
+ (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
+
+ * ``methods`` are the auth methods.
+ * federated_info is a dict contains the group IDs, the identity
+ provider ID, the protocol ID, and the federated domain ID
+
+ Fields will be set to None if they didn't apply to this payload type.
+
:param payload: this variant of payload
:returns: a tuple of the payloads component data
@@ -302,9 +275,6 @@ class BasePayload(object):
:returns: a byte representation of the uuid
"""
- # TODO(lbragstad): Wrap this in an exception. Not sure what the case
- # would be where we couldn't handle what we've been given but incase
- # the integrity of the token has been compromised.
uuid_obj = uuid.UUID(uuid_string)
return uuid_obj.bytes
@@ -316,18 +286,15 @@ class BasePayload(object):
:returns: uuid hex formatted string
"""
- # TODO(lbragstad): Wrap this in an exception. Not sure what the case
- # would be where we couldn't handle what we've been given but incase
- # the integrity of the token has been compromised.
uuid_obj = uuid.UUID(bytes=uuid_byte_string)
return uuid_obj.hex
@classmethod
- def _convert_time_string_to_int(cls, time_string):
- """Convert a time formatted string to a timestamp integer.
+ def _convert_time_string_to_float(cls, time_string):
+ """Convert a time formatted string to a float.
:param time_string: time formatted string
- :returns: an integer timestamp
+ :returns: a timestamp as a float
"""
time_object = timeutils.parse_isotime(time_string)
@@ -335,14 +302,14 @@ class BasePayload(object):
datetime.datetime.utcfromtimestamp(0)).total_seconds()
@classmethod
- def _convert_int_to_time_string(cls, time_int):
- """Convert a timestamp integer to a string.
+ def _convert_float_to_time_string(cls, time_float):
+ """Convert a floating point timestamp to a string.
- :param time_int: integer representing timestamp
+ :param time_float: integer representing timestamp
:returns: a time formatted strings
"""
- time_object = datetime.datetime.utcfromtimestamp(time_int)
+ time_object = datetime.datetime.utcfromtimestamp(time_float)
return ks_utils.isotime(time_object, subsecond=True)
@classmethod
@@ -361,74 +328,51 @@ class BasePayload(object):
# federation)
return (False, value)
- @classmethod
- def attempt_convert_uuid_bytes_to_hex(cls, value):
- """Attempt to convert value to hex or return value.
-
- :param value: value to attempt to convert to hex
- :returns: uuid value in hex or value
-
- """
- try:
- return cls.convert_uuid_bytes_to_hex(value)
- except ValueError:
- return value
-
class UnscopedPayload(BasePayload):
version = 0
@classmethod
- def assemble(cls, user_id, methods, expires_at, audit_ids):
- """Assemble the payload of an unscoped token.
-
- :param user_id: identifier of the user in the token request
- :param methods: list of authentication methods used
- :param expires_at: datetime of the token's expiration
- :param audit_ids: list of the token's audit IDs
- :returns: the payload of an unscoped token
+ def create_arguments_apply(cls, **kwargs):
+ return True
- """
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
- expires_at_int = cls._convert_time_string_to_int(expires_at)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
- """Disassemble an unscoped payload into the component data.
-
- :param payload: the payload of an unscoped token
- :return: a tuple containing the user_id, auth methods, expires_at, and
- audit_ids
-
- """
(is_stored_as_bytes, user_id) = payload[0]
if is_stored_as_bytes:
- user_id = cls.attempt_convert_uuid_bytes_to_hex(user_id)
+ user_id = cls.convert_uuid_bytes_to_hex(user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
- expires_at_str = cls._convert_int_to_time_string(payload[2])
+ expires_at_str = cls._convert_float_to_time_string(payload[2])
audit_ids = list(map(provider.base64_encode, payload[3]))
- return (user_id, methods, expires_at_str, audit_ids)
+ project_id = None
+ domain_id = None
+ trust_id = None
+ federated_info = None
+ access_token_id = None
+ return (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
class DomainScopedPayload(BasePayload):
version = 1
@classmethod
- def assemble(cls, user_id, methods, domain_id, expires_at, audit_ids):
- """Assemble the payload of a domain-scoped token.
+ def create_arguments_apply(cls, **kwargs):
+ return kwargs['domain_id']
- :param user_id: ID of the user in the token request
- :param methods: list of authentication methods used
- :param domain_id: ID of the domain to scope to
- :param expires_at: datetime of the token's expiration
- :param audit_ids: list of the token's audit IDs
- :returns: the payload of a domain-scoped token
-
- """
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
try:
@@ -439,23 +383,16 @@ class DomainScopedPayload(BasePayload):
b_domain_id = domain_id
else:
raise
- expires_at_int = cls._convert_time_string_to_int(expires_at)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_domain_id, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
- """Disassemble a payload into the component data.
-
- :param payload: the payload of a token
- :return: a tuple containing the user_id, auth methods, domain_id,
- expires_at_str, and audit_ids
-
- """
(is_stored_as_bytes, user_id) = payload[0]
if is_stored_as_bytes:
- user_id = cls.attempt_convert_uuid_bytes_to_hex(user_id)
+ user_id = cls.convert_uuid_bytes_to_hex(user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
try:
domain_id = cls.convert_uuid_bytes_to_hex(payload[2])
@@ -465,79 +402,68 @@ class DomainScopedPayload(BasePayload):
domain_id = payload[2]
else:
raise
- expires_at_str = cls._convert_int_to_time_string(payload[3])
+ expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
-
- return (user_id, methods, domain_id, expires_at_str, audit_ids)
+ project_id = None
+ trust_id = None
+ federated_info = None
+ access_token_id = None
+ return (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
class ProjectScopedPayload(BasePayload):
version = 2
@classmethod
- def assemble(cls, user_id, methods, project_id, expires_at, audit_ids):
- """Assemble the payload of a project-scoped token.
+ def create_arguments_apply(cls, **kwargs):
+ return kwargs['project_id']
- :param user_id: ID of the user in the token request
- :param methods: list of authentication methods used
- :param project_id: ID of the project to scope to
- :param expires_at: datetime of the token's expiration
- :param audit_ids: list of the token's audit IDs
- :returns: the payload of a project-scoped token
-
- """
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
- expires_at_int = cls._convert_time_string_to_int(expires_at)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
- """Disassemble a payload into the component data.
-
- :param payload: the payload of a token
- :return: a tuple containing the user_id, auth methods, project_id,
- expires_at_str, and audit_ids
-
- """
(is_stored_as_bytes, user_id) = payload[0]
if is_stored_as_bytes:
- user_id = cls.attempt_convert_uuid_bytes_to_hex(user_id)
+ user_id = cls.convert_uuid_bytes_to_hex(user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, project_id) = payload[2]
if is_stored_as_bytes:
- project_id = cls.attempt_convert_uuid_bytes_to_hex(project_id)
- expires_at_str = cls._convert_int_to_time_string(payload[3])
+ project_id = cls.convert_uuid_bytes_to_hex(project_id)
+ expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
-
- return (user_id, methods, project_id, expires_at_str, audit_ids)
+ domain_id = None
+ trust_id = None
+ federated_info = None
+ access_token_id = None
+ return (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
class TrustScopedPayload(BasePayload):
version = 3
@classmethod
- def assemble(cls, user_id, methods, project_id, expires_at, audit_ids,
- trust_id):
- """Assemble the payload of a trust-scoped token.
-
- :param user_id: ID of the user in the token request
- :param methods: list of authentication methods used
- :param project_id: ID of the project to scope to
- :param expires_at: datetime of the token's expiration
- :param audit_ids: list of the token's audit IDs
- :param trust_id: ID of the trust in effect
- :returns: the payload of a trust-scoped token
+ def create_arguments_apply(cls, **kwargs):
+ return kwargs['trust_id']
- """
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id)
- expires_at_int = cls._convert_time_string_to_int(expires_at)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
@@ -546,32 +472,31 @@ class TrustScopedPayload(BasePayload):
@classmethod
def disassemble(cls, payload):
- """Validate a trust-based payload.
-
- :param token_string: a string representing the token
- :returns: a tuple containing the user_id, auth methods, project_id,
- expires_at_str, audit_ids, and trust_id
-
- """
(is_stored_as_bytes, user_id) = payload[0]
if is_stored_as_bytes:
- user_id = cls.attempt_convert_uuid_bytes_to_hex(user_id)
+ user_id = cls.convert_uuid_bytes_to_hex(user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, project_id) = payload[2]
if is_stored_as_bytes:
- project_id = cls.attempt_convert_uuid_bytes_to_hex(project_id)
- expires_at_str = cls._convert_int_to_time_string(payload[3])
+ project_id = cls.convert_uuid_bytes_to_hex(project_id)
+ expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(provider.base64_encode, payload[4]))
trust_id = cls.convert_uuid_bytes_to_hex(payload[5])
-
- return (user_id, methods, project_id, expires_at_str, audit_ids,
- trust_id)
+ domain_id = None
+ federated_info = None
+ access_token_id = None
+ return (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
class FederatedUnscopedPayload(BasePayload):
version = 4
@classmethod
+ def create_arguments_apply(cls, **kwargs):
+ return kwargs['federated_info']
+
+ @classmethod
def pack_group_id(cls, group_dict):
return cls.attempt_convert_uuid_hex_to_bytes(group_dict['id'])
@@ -579,24 +504,12 @@ class FederatedUnscopedPayload(BasePayload):
def unpack_group_id(cls, group_id_in_bytes):
(is_stored_as_bytes, group_id) = group_id_in_bytes
if is_stored_as_bytes:
- group_id = cls.attempt_convert_uuid_bytes_to_hex(group_id)
+ group_id = cls.convert_uuid_bytes_to_hex(group_id)
return {'id': group_id}
@classmethod
- def assemble(cls, user_id, methods, expires_at, audit_ids, federated_info):
- """Assemble the payload of a federated token.
-
- :param user_id: ID of the user in the token request
- :param methods: list of authentication methods used
- :param expires_at: datetime of the token's expiration
- :param audit_ids: list of the token's audit IDs
- :param federated_info: dictionary containing group IDs, the identity
- provider ID, protocol ID, and federated domain
- ID
- :returns: the payload of a federated token
-
- """
-
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_group_ids = list(map(cls.pack_group_id,
@@ -604,7 +517,7 @@ class FederatedUnscopedPayload(BasePayload):
b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
federated_info['idp_id'])
protocol_id = federated_info['protocol_id']
- expires_at_int = cls._convert_time_string_to_int(expires_at)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
@@ -613,59 +526,43 @@ class FederatedUnscopedPayload(BasePayload):
@classmethod
def disassemble(cls, payload):
- """Validate a federated payload.
-
- :param token_string: a string representing the token
- :return: a tuple containing the user_id, auth methods, audit_ids, and a
- dictionary containing federated information such as the group
- IDs, the identity provider ID, the protocol ID, and the
- federated domain ID
-
- """
-
(is_stored_as_bytes, user_id) = payload[0]
if is_stored_as_bytes:
- user_id = cls.attempt_convert_uuid_bytes_to_hex(user_id)
+ user_id = cls.convert_uuid_bytes_to_hex(user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
group_ids = list(map(cls.unpack_group_id, payload[2]))
(is_stored_as_bytes, idp_id) = payload[3]
if is_stored_as_bytes:
- idp_id = cls.attempt_convert_uuid_bytes_to_hex(idp_id)
+ idp_id = cls.convert_uuid_bytes_to_hex(idp_id)
protocol_id = payload[4]
- expires_at_str = cls._convert_int_to_time_string(payload[5])
+ expires_at_str = cls._convert_float_to_time_string(payload[5])
audit_ids = list(map(provider.base64_encode, payload[6]))
federated_info = dict(group_ids=group_ids, idp_id=idp_id,
protocol_id=protocol_id)
- return (user_id, methods, expires_at_str, audit_ids, federated_info)
+ project_id = None
+ domain_id = None
+ trust_id = None
+ access_token_id = None
+ return (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
class FederatedScopedPayload(FederatedUnscopedPayload):
version = None
@classmethod
- def assemble(cls, user_id, methods, scope_id, expires_at, audit_ids,
- federated_info):
- """Assemble the project-scoped payload of a federated token.
-
- :param user_id: ID of the user in the token request
- :param methods: list of authentication methods used
- :param scope_id: ID of the project or domain ID to scope to
- :param expires_at: datetime of the token's expiration
- :param audit_ids: list of the token's audit IDs
- :param federated_info: dictionary containing the identity provider ID,
- protocol ID, federated domain ID and group IDs
- :returns: the payload of a federated token
-
- """
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
- b_scope_id = cls.attempt_convert_uuid_hex_to_bytes(scope_id)
+ b_scope_id = cls.attempt_convert_uuid_hex_to_bytes(
+ project_id or domain_id)
b_group_ids = list(map(cls.pack_group_id,
federated_info['group_ids']))
b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(
federated_info['idp_id'])
protocol_id = federated_info['protocol_id']
- expires_at_int = cls._convert_time_string_to_int(expires_at)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
audit_ids))
@@ -674,39 +571,107 @@ class FederatedScopedPayload(FederatedUnscopedPayload):
@classmethod
def disassemble(cls, payload):
- """Validate a project-scoped federated payload.
-
- :param token_string: a string representing the token
- :returns: a tuple containing the user_id, auth methods, scope_id,
- expiration time (as str), audit_ids, and a dictionary
- containing federated information such as the the identity
- provider ID, the protocol ID, the federated domain ID and
- group IDs
-
- """
(is_stored_as_bytes, user_id) = payload[0]
if is_stored_as_bytes:
- user_id = cls.attempt_convert_uuid_bytes_to_hex(user_id)
+ user_id = cls.convert_uuid_bytes_to_hex(user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, scope_id) = payload[2]
if is_stored_as_bytes:
- scope_id = cls.attempt_convert_uuid_bytes_to_hex(scope_id)
+ scope_id = cls.convert_uuid_bytes_to_hex(scope_id)
+ project_id = (
+ scope_id
+ if cls.version == FederatedProjectScopedPayload.version else None)
+ domain_id = (
+ scope_id
+ if cls.version == FederatedDomainScopedPayload.version else None)
group_ids = list(map(cls.unpack_group_id, payload[3]))
(is_stored_as_bytes, idp_id) = payload[4]
if is_stored_as_bytes:
- idp_id = cls.attempt_convert_uuid_bytes_to_hex(idp_id)
+ idp_id = cls.convert_uuid_bytes_to_hex(idp_id)
protocol_id = payload[5]
- expires_at_str = cls._convert_int_to_time_string(payload[6])
+ expires_at_str = cls._convert_float_to_time_string(payload[6])
audit_ids = list(map(provider.base64_encode, payload[7]))
federated_info = dict(idp_id=idp_id, protocol_id=protocol_id,
group_ids=group_ids)
- return (user_id, methods, scope_id, expires_at_str, audit_ids,
- federated_info)
+ trust_id = None
+ access_token_id = None
+ return (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
class FederatedProjectScopedPayload(FederatedScopedPayload):
version = 5
+ @classmethod
+ def create_arguments_apply(cls, **kwargs):
+ return kwargs['project_id'] and kwargs['federated_info']
+
class FederatedDomainScopedPayload(FederatedScopedPayload):
version = 6
+
+ @classmethod
+ def create_arguments_apply(cls, **kwargs):
+ return kwargs['domain_id'] and kwargs['federated_info']
+
+
+class OauthScopedPayload(BasePayload):
+ version = 7
+
+ @classmethod
+ def create_arguments_apply(cls, **kwargs):
+ return kwargs['access_token_id']
+
+ @classmethod
+ def assemble(cls, user_id, methods, project_id, domain_id, expires_at,
+ audit_ids, trust_id, federated_info, access_token_id):
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
+ b_audit_ids = list(map(provider.random_urlsafe_str_to_bytes,
+ audit_ids))
+ b_access_token_id = cls.attempt_convert_uuid_hex_to_bytes(
+ access_token_id)
+ return (b_user_id, methods, b_project_id, b_access_token_id,
+ expires_at_int, b_audit_ids)
+
+ @classmethod
+ def disassemble(cls, payload):
+ (is_stored_as_bytes, user_id) = payload[0]
+ if is_stored_as_bytes:
+ user_id = cls.convert_uuid_bytes_to_hex(user_id)
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ (is_stored_as_bytes, project_id) = payload[2]
+ if is_stored_as_bytes:
+ project_id = cls.convert_uuid_bytes_to_hex(project_id)
+ (is_stored_as_bytes, access_token_id) = payload[3]
+ if is_stored_as_bytes:
+ access_token_id = cls.convert_uuid_bytes_to_hex(access_token_id)
+ expires_at_str = cls._convert_float_to_time_string(payload[4])
+ audit_ids = list(map(provider.base64_encode, payload[5]))
+ domain_id = None
+ trust_id = None
+ federated_info = None
+
+ return (user_id, methods, project_id, domain_id, expires_at_str,
+ audit_ids, trust_id, federated_info, access_token_id)
+
+
+# For now, the order of the classes in the following list is important. This
+# is because the way they test that the payload applies to them in
+# the create_arguments_apply method requires that the previous ones rejected
+# the payload arguments. For example, UnscopedPayload must be last since it's
+# the catch-all after all the other payloads have been checked.
+# TODO(blk-u): Clean up the create_arguments_apply methods so that they don't
+# depend on the previous classes then these can be in any order.
+PAYLOAD_CLASSES = [
+ OauthScopedPayload,
+ TrustScopedPayload,
+ FederatedProjectScopedPayload,
+ FederatedDomainScopedPayload,
+ FederatedUnscopedPayload,
+ ProjectScopedPayload,
+ DomainScopedPayload,
+ UnscopedPayload,
+]
diff --git a/keystone-moon/keystone/token/providers/fernet/utils.py b/keystone-moon/keystone/token/providers/fernet/utils.py
index 4235eda8..1c3552d4 100644
--- a/keystone-moon/keystone/token/providers/fernet/utils.py
+++ b/keystone-moon/keystone/token/providers/fernet/utils.py
@@ -25,29 +25,33 @@ LOG = log.getLogger(__name__)
CONF = cfg.CONF
-def validate_key_repository():
+def validate_key_repository(requires_write=False):
"""Validate permissions on the key repository directory."""
# NOTE(lbragstad): We shouldn't need to check if the directory was passed
# in as None because we don't set allow_no_values to True.
- # ensure current user has full access to the key repository
- if (not os.access(CONF.fernet_tokens.key_repository, os.R_OK) or not
- os.access(CONF.fernet_tokens.key_repository, os.W_OK) or not
- os.access(CONF.fernet_tokens.key_repository, os.X_OK)):
+ # ensure current user has sufficient access to the key repository
+ is_valid = (os.access(CONF.fernet_tokens.key_repository, os.R_OK) and
+ os.access(CONF.fernet_tokens.key_repository, os.X_OK))
+ if requires_write:
+ is_valid = (is_valid and
+ os.access(CONF.fernet_tokens.key_repository, os.W_OK))
+
+ if not is_valid:
LOG.error(
_LE('Either [fernet_tokens] key_repository does not exist or '
'Keystone does not have sufficient permission to access it: '
'%s'), CONF.fernet_tokens.key_repository)
- return False
-
- # ensure the key repository isn't world-readable
- stat_info = os.stat(CONF.fernet_tokens.key_repository)
- if stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH:
- LOG.warning(_LW(
- '[fernet_tokens] key_repository is world readable: %s'),
- CONF.fernet_tokens.key_repository)
+ else:
+ # ensure the key repository isn't world-readable
+ stat_info = os.stat(CONF.fernet_tokens.key_repository)
+ if(stat_info.st_mode & stat.S_IROTH or
+ stat_info.st_mode & stat.S_IXOTH):
+ LOG.warning(_LW(
+ '[fernet_tokens] key_repository is world readable: %s'),
+ CONF.fernet_tokens.key_repository)
- return True
+ return is_valid
def _convert_to_integers(id_value):
@@ -99,7 +103,7 @@ def _create_new_key(keystone_user_id, keystone_group_id):
Create a new key that is readable by the Keystone group and Keystone user.
"""
- key = fernet.Fernet.generate_key()
+ key = fernet.Fernet.generate_key() # key is bytes
# This ensures the key created is not world-readable
old_umask = os.umask(0o177)
@@ -117,7 +121,7 @@ def _create_new_key(keystone_user_id, keystone_group_id):
key_file = os.path.join(CONF.fernet_tokens.key_repository, '0')
try:
with open(key_file, 'w') as f:
- f.write(key)
+ f.write(key.decode('utf-8')) # convert key to str for the file.
finally:
# After writing the key, set the umask back to it's original value. Do
# the same with group and user identifiers if a Keystone group or user
@@ -176,7 +180,7 @@ def rotate_keys(keystone_user_id=None, keystone_group_id=None):
if os.path.isfile(path):
try:
key_id = int(filename)
- except ValueError:
+ except ValueError: # nosec : name isn't a number, ignore the file.
pass
else:
key_files[key_id] = path
@@ -243,7 +247,8 @@ def load_keys():
with open(path, 'r') as key_file:
try:
key_id = int(filename)
- except ValueError:
+ except ValueError: # nosec : filename isn't a number, ignore
+ # this file since it's not a key.
pass
else:
keys[key_id] = key_file.read()
diff --git a/keystone-moon/keystone/token/providers/pki.py b/keystone-moon/keystone/token/providers/pki.py
index af8dc739..6a5a2999 100644
--- a/keystone-moon/keystone/token/providers/pki.py
+++ b/keystone-moon/keystone/token/providers/pki.py
@@ -17,6 +17,7 @@
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_serialization import jsonutils
from keystone.common import environment
@@ -31,6 +32,10 @@ CONF = cfg.CONF
LOG = log.getLogger(__name__)
+@versionutils.deprecated(
+ as_of=versionutils.deprecated.MITAKA,
+ what='the PKI token provider',
+ in_favor_of='the Fernet or UUID token providers')
class Provider(common.BaseProvider):
def _get_token_id(self, token_data):
try:
diff --git a/keystone-moon/keystone/token/providers/pkiz.py b/keystone-moon/keystone/token/providers/pkiz.py
index b4e31918..3e78d2e4 100644
--- a/keystone-moon/keystone/token/providers/pkiz.py
+++ b/keystone-moon/keystone/token/providers/pkiz.py
@@ -15,6 +15,7 @@
from keystoneclient.common import cms
from oslo_config import cfg
from oslo_log import log
+from oslo_log import versionutils
from oslo_serialization import jsonutils
from keystone.common import environment
@@ -30,6 +31,10 @@ LOG = log.getLogger(__name__)
ERROR_MESSAGE = _('Unable to sign token.')
+@versionutils.deprecated(
+ as_of=versionutils.deprecated.MITAKA,
+ what='the PKIZ token provider',
+ in_favor_of='the Fernet or UUID token providers')
class Provider(common.BaseProvider):
def _get_token_id(self, token_data):
try:
diff --git a/keystone-moon/keystone/trust/__init__.py b/keystone-moon/keystone/trust/__init__.py
index e5ee61fb..bd7297ea 100644
--- a/keystone-moon/keystone/trust/__init__.py
+++ b/keystone-moon/keystone/trust/__init__.py
@@ -14,4 +14,3 @@
from keystone.trust import controllers # noqa
from keystone.trust.core import * # noqa
-from keystone.trust import routers # noqa
diff --git a/keystone-moon/keystone/trust/backends/sql.py b/keystone-moon/keystone/trust/backends/sql.py
index a017056b..cb8446b3 100644
--- a/keystone-moon/keystone/trust/backends/sql.py
+++ b/keystone-moon/keystone/trust/backends/sql.py
@@ -14,7 +14,6 @@
import time
-from oslo_log import log
from oslo_utils import timeutils
from six.moves import range
@@ -23,7 +22,6 @@ from keystone import exception
from keystone import trust
-LOG = log.getLogger(__name__)
# The maximum number of iterations that will be attempted for optimistic
# locking on consuming a limited-use trust.
MAXIMUM_CONSUME_ATTEMPTS = 10
@@ -45,6 +43,10 @@ class TrustModel(sql.ModelBase, sql.DictBase):
expires_at = sql.Column(sql.DateTime)
remaining_uses = sql.Column(sql.Integer, nullable=True)
extra = sql.Column(sql.JsonBlob())
+ __table_args__ = (sql.UniqueConstraint(
+ 'trustor_user_id', 'trustee_user_id', 'project_id',
+ 'impersonation', 'expires_at',
+ name='duplicate_trust_constraint'),)
class TrustRole(sql.ModelBase):
@@ -57,7 +59,7 @@ class TrustRole(sql.ModelBase):
class Trust(trust.TrustDriverV8):
@sql.handle_conflicts(conflict_type='trust')
def create_trust(self, trust_id, trust, roles):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
ref = TrustModel.from_dict(trust)
ref['id'] = trust_id
if ref.get('expires_at') and ref['expires_at'].tzinfo is not None:
@@ -70,9 +72,9 @@ class Trust(trust.TrustDriverV8):
trust_role.role_id = role['id']
added_roles.append({'id': role['id']})
session.add(trust_role)
- trust_dict = ref.to_dict()
- trust_dict['roles'] = added_roles
- return trust_dict
+ trust_dict = ref.to_dict()
+ trust_dict['roles'] = added_roles
+ return trust_dict
def _add_roles(self, trust_id, session, trust_dict):
roles = []
@@ -84,7 +86,7 @@ class Trust(trust.TrustDriverV8):
def consume_use(self, trust_id):
for attempt in range(MAXIMUM_CONSUME_ATTEMPTS):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
try:
query_result = (session.query(TrustModel.remaining_uses).
filter_by(id=trust_id).
@@ -130,51 +132,51 @@ class Trust(trust.TrustDriverV8):
raise exception.TrustConsumeMaximumAttempt(trust_id=trust_id)
def get_trust(self, trust_id, deleted=False):
- session = sql.get_session()
- query = session.query(TrustModel).filter_by(id=trust_id)
- if not deleted:
- query = query.filter_by(deleted_at=None)
- ref = query.first()
- if ref is None:
- raise exception.TrustNotFound(trust_id=trust_id)
- if ref.expires_at is not None and not deleted:
- now = timeutils.utcnow()
- if now > ref.expires_at:
+ with sql.session_for_read() as session:
+ query = session.query(TrustModel).filter_by(id=trust_id)
+ if not deleted:
+ query = query.filter_by(deleted_at=None)
+ ref = query.first()
+ if ref is None:
raise exception.TrustNotFound(trust_id=trust_id)
- # Do not return trusts that can't be used anymore
- if ref.remaining_uses is not None and not deleted:
- if ref.remaining_uses <= 0:
- raise exception.TrustNotFound(trust_id=trust_id)
- trust_dict = ref.to_dict()
+ if ref.expires_at is not None and not deleted:
+ now = timeutils.utcnow()
+ if now > ref.expires_at:
+ raise exception.TrustNotFound(trust_id=trust_id)
+ # Do not return trusts that can't be used anymore
+ if ref.remaining_uses is not None and not deleted:
+ if ref.remaining_uses <= 0:
+ raise exception.TrustNotFound(trust_id=trust_id)
+ trust_dict = ref.to_dict()
- self._add_roles(trust_id, session, trust_dict)
- return trust_dict
+ self._add_roles(trust_id, session, trust_dict)
+ return trust_dict
@sql.handle_conflicts(conflict_type='trust')
def list_trusts(self):
- session = sql.get_session()
- trusts = session.query(TrustModel).filter_by(deleted_at=None)
- return [trust_ref.to_dict() for trust_ref in trusts]
+ with sql.session_for_read() as session:
+ trusts = session.query(TrustModel).filter_by(deleted_at=None)
+ return [trust_ref.to_dict() for trust_ref in trusts]
@sql.handle_conflicts(conflict_type='trust')
def list_trusts_for_trustee(self, trustee_user_id):
- session = sql.get_session()
- trusts = (session.query(TrustModel).
- filter_by(deleted_at=None).
- filter_by(trustee_user_id=trustee_user_id))
- return [trust_ref.to_dict() for trust_ref in trusts]
+ with sql.session_for_read() as session:
+ trusts = (session.query(TrustModel).
+ filter_by(deleted_at=None).
+ filter_by(trustee_user_id=trustee_user_id))
+ return [trust_ref.to_dict() for trust_ref in trusts]
@sql.handle_conflicts(conflict_type='trust')
def list_trusts_for_trustor(self, trustor_user_id):
- session = sql.get_session()
- trusts = (session.query(TrustModel).
- filter_by(deleted_at=None).
- filter_by(trustor_user_id=trustor_user_id))
- return [trust_ref.to_dict() for trust_ref in trusts]
+ with sql.session_for_read() as session:
+ trusts = (session.query(TrustModel).
+ filter_by(deleted_at=None).
+ filter_by(trustor_user_id=trustor_user_id))
+ return [trust_ref.to_dict() for trust_ref in trusts]
@sql.handle_conflicts(conflict_type='trust')
def delete_trust(self, trust_id):
- with sql.transaction() as session:
+ with sql.session_for_write() as session:
trust_ref = session.query(TrustModel).get(trust_id)
if not trust_ref:
raise exception.TrustNotFound(trust_id=trust_id)
diff --git a/keystone-moon/keystone/trust/controllers.py b/keystone-moon/keystone/trust/controllers.py
index 39cf0110..00581304 100644
--- a/keystone-moon/keystone/trust/controllers.py
+++ b/keystone-moon/keystone/trust/controllers.py
@@ -14,9 +14,6 @@
import uuid
-from oslo_config import cfg
-from oslo_log import log
-from oslo_log import versionutils
from oslo_utils import timeutils
import six
@@ -31,11 +28,6 @@ from keystone import notifications
from keystone.trust import schema
-CONF = cfg.CONF
-
-LOG = log.getLogger(__name__)
-
-
def _trustor_trustee_only(trust, user_id):
if (user_id != trust.get('trustee_user_id') and
user_id != trust.get('trustor_user_id')):
@@ -47,8 +39,8 @@ def _admin_trustor_only(context, trust, user_id):
raise exception.Forbidden()
-@dependency.requires('assignment_api', 'identity_api', 'role_api',
- 'token_provider_api', 'trust_api')
+@dependency.requires('assignment_api', 'identity_api', 'resource_api',
+ 'role_api', 'token_provider_api', 'trust_api')
class TrustV3(controller.V3Controller):
collection_name = "trusts"
member_name = "trust"
@@ -56,7 +48,6 @@ class TrustV3(controller.V3Controller):
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
-
# NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that
# V3Controller.base_url handles setting the self link correctly.
path = '/OS-TRUST/' + cls.collection_name
@@ -113,7 +104,7 @@ class TrustV3(controller.V3Controller):
trust_roles.append({'id':
all_role_names[rolename]['id']})
else:
- raise exception.RoleNotFound("role %s is not defined" %
+ raise exception.RoleNotFound(_("role %s is not defined") %
rolename)
else:
raise exception.ValidationError(attribute='id or name',
@@ -128,7 +119,6 @@ class TrustV3(controller.V3Controller):
The user creating the trust must be the trustor.
"""
-
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
@@ -178,17 +168,27 @@ class TrustV3(controller.V3Controller):
raise exception.Forbidden(
_('At least one role should be specified.'))
- def _get_user_role(self, trust):
+ def _get_trustor_roles(self, trust):
+ original_trust = trust.copy()
+ while original_trust.get('redelegated_trust_id'):
+ original_trust = self.trust_api.get_trust(
+ original_trust['redelegated_trust_id'])
+
if not self._attribute_is_empty(trust, 'project_id'):
- return self.assignment_api.get_roles_for_user_and_project(
- trust['trustor_user_id'], trust['project_id'])
+ self.resource_api.get_project(original_trust['project_id'])
+ # Get a list of roles including any domain specific roles
+ assignment_list = self.assignment_api.list_role_assignments(
+ user_id=original_trust['trustor_user_id'],
+ project_id=original_trust['project_id'],
+ effective=True, strip_domain_roles=False)
+ return list(set([x['role_id'] for x in assignment_list]))
else:
return []
def _require_trustor_has_role_in_project(self, trust):
- user_roles = self._get_user_role(trust)
+ trustor_roles = self._get_trustor_roles(trust)
for trust_role in trust['roles']:
- matching_roles = [x for x in user_roles
+ matching_roles = [x for x in trustor_roles
if x == trust_role['id']]
if not matching_roles:
raise exception.RoleNotFound(role_id=trust_role['id'])
@@ -262,12 +262,6 @@ class TrustV3(controller.V3Controller):
return {'roles': trust['roles'],
'links': trust['roles_links']}
- @versionutils.deprecated(
- versionutils.deprecated.KILO,
- remove_in=+2)
- def check_role_for_trust(self, context, trust_id, role_id):
- return self._check_role_for_trust(self, context, trust_id, role_id)
-
@controller.protected()
def get_role_for_trust(self, context, trust_id, role_id):
"""Get a role that has been assigned to a trust."""
diff --git a/keystone-moon/keystone/trust/core.py b/keystone-moon/keystone/trust/core.py
index 7838cb03..43069deb 100644
--- a/keystone-moon/keystone/trust/core.py
+++ b/keystone-moon/keystone/trust/core.py
@@ -17,7 +17,6 @@
import abc
from oslo_config import cfg
-from oslo_log import log
import six
from six.moves import zip
@@ -30,8 +29,6 @@ from keystone import notifications
CONF = cfg.CONF
-LOG = log.getLogger(__name__)
-
@dependency.requires('identity_api')
@dependency.provider('trust_api')
@@ -93,14 +90,9 @@ class Manager(manager.Manager):
def get_trust_pedigree(self, trust_id):
trust = self.driver.get_trust(trust_id)
trust_chain = [trust]
- if trust and trust.get('redelegated_trust_id'):
- trusts = self.driver.list_trusts_for_trustor(
- trust['trustor_user_id'])
- while trust_chain[-1].get('redelegated_trust_id'):
- for t in trusts:
- if t['id'] == trust_chain[-1]['redelegated_trust_id']:
- trust_chain.append(t)
- break
+ while trust and trust.get('redelegated_trust_id'):
+ trust = self.driver.get_trust(trust['redelegated_trust_id'])
+ trust_chain.append(trust)
return trust_chain
@@ -179,7 +171,7 @@ class Manager(manager.Manager):
def delete_trust(self, trust_id, initiator=None):
"""Remove a trust.
- :raises: keystone.exception.TrustNotFound
+ :raises keystone.exception.TrustNotFound: If the trust doesn't exist.
Recursively remove given and redelegated trusts
"""
@@ -192,7 +184,7 @@ class Manager(manager.Manager):
# recursive call to make sure all notifications are sent
try:
self.delete_trust(t['id'])
- except exception.TrustNotFound:
+ except exception.TrustNotFound: # nosec
# if trust was deleted by concurrent process
# consistency must not suffer
pass
@@ -244,11 +236,14 @@ class TrustDriverV8(object):
@abc.abstractmethod
def consume_use(self, trust_id):
- """Consume one use when a trust was created with a limitation on its
- uses, provided there are still uses available.
+ """Consume one use of a trust.
+
+ One use of a trust is consumed when the trust was created with a
+ limitation on its uses, provided there are still uses available.
- :raises: keystone.exception.TrustUseLimitReached,
- keystone.exception.TrustNotFound
+ :raises keystone.exception.TrustUseLimitReached: If no remaining uses
+ for trust.
+ :raises keystone.exception.TrustNotFound: If the trust doesn't exist.
"""
raise exception.NotImplemented() # pragma: no cover
diff --git a/keystone-moon/keystone/v2_crud/__init__.py b/keystone-moon/keystone/v2_crud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/v2_crud/__init__.py
diff --git a/keystone-moon/keystone/v2_crud/admin_crud.py b/keystone-moon/keystone/v2_crud/admin_crud.py
new file mode 100644
index 00000000..86ccfcd8
--- /dev/null
+++ b/keystone-moon/keystone/v2_crud/admin_crud.py
@@ -0,0 +1,240 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone import assignment
+from keystone import catalog
+from keystone.common import extension
+from keystone.common import wsgi
+from keystone import identity
+from keystone import resource
+
+
+extension.register_admin_extension(
+ 'OS-KSADM', {
+ 'name': 'OpenStack Keystone Admin',
+ 'namespace': 'http://docs.openstack.org/identity/api/ext/'
+ 'OS-KSADM/v1.0',
+ 'alias': 'OS-KSADM',
+ 'updated': '2013-07-11T17:14:00-00:00',
+ 'description': 'OpenStack extensions to Keystone v2.0 API '
+ 'enabling Administrative Operations.',
+ 'links': [
+ {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'http://developer.openstack.org/'
+ 'api-ref-identity-v2-ext.html',
+ }
+ ]})
+
+
+class Router(wsgi.ComposableRouter):
+ """Previously known as the OS-KSADM extension.
+
+ Provides a bunch of CRUD operations for internal data types.
+
+ """
+
+ def add_routes(self, mapper):
+ tenant_controller = resource.controllers.Tenant()
+ assignment_tenant_controller = (
+ assignment.controllers.TenantAssignment())
+ user_controller = identity.controllers.User()
+ role_controller = assignment.controllers.Role()
+ assignment_role_controller = assignment.controllers.RoleAssignmentV2()
+ service_controller = catalog.controllers.Service()
+ endpoint_controller = catalog.controllers.Endpoint()
+
+ # Tenant Operations
+ mapper.connect(
+ '/tenants',
+ controller=tenant_controller,
+ action='create_project',
+ conditions=dict(method=['POST']))
+ mapper.connect(
+ '/tenants/{tenant_id}',
+ controller=tenant_controller,
+ action='update_project',
+ conditions=dict(method=['PUT', 'POST']))
+ mapper.connect(
+ '/tenants/{tenant_id}',
+ controller=tenant_controller,
+ action='delete_project',
+ conditions=dict(method=['DELETE']))
+ mapper.connect(
+ '/tenants/{tenant_id}/users',
+ controller=assignment_tenant_controller,
+ action='get_project_users',
+ conditions=dict(method=['GET']))
+
+ # User Operations
+ mapper.connect(
+ '/users',
+ controller=user_controller,
+ action='get_users',
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ '/users',
+ controller=user_controller,
+ action='create_user',
+ conditions=dict(method=['POST']))
+ # NOTE(termie): not in diablo
+ mapper.connect(
+ '/users/{user_id}',
+ controller=user_controller,
+ action='update_user',
+ conditions=dict(method=['PUT']))
+ mapper.connect(
+ '/users/{user_id}',
+ controller=user_controller,
+ action='delete_user',
+ conditions=dict(method=['DELETE']))
+
+ # COMPAT(diablo): the copy with no OS-KSADM is from diablo
+ mapper.connect(
+ '/users/{user_id}/password',
+ controller=user_controller,
+ action='set_user_password',
+ conditions=dict(method=['PUT']))
+ mapper.connect(
+ '/users/{user_id}/OS-KSADM/password',
+ controller=user_controller,
+ action='set_user_password',
+ conditions=dict(method=['PUT']))
+
+ # COMPAT(diablo): the copy with no OS-KSADM is from diablo
+ mapper.connect(
+ '/users/{user_id}/tenant',
+ controller=user_controller,
+ action='update_user',
+ conditions=dict(method=['PUT']))
+ mapper.connect(
+ '/users/{user_id}/OS-KSADM/tenant',
+ controller=user_controller,
+ action='update_user',
+ conditions=dict(method=['PUT']))
+
+ # COMPAT(diablo): the copy with no OS-KSADM is from diablo
+ mapper.connect(
+ '/users/{user_id}/enabled',
+ controller=user_controller,
+ action='set_user_enabled',
+ conditions=dict(method=['PUT']))
+ mapper.connect(
+ '/users/{user_id}/OS-KSADM/enabled',
+ controller=user_controller,
+ action='set_user_enabled',
+ conditions=dict(method=['PUT']))
+
+ # User Roles
+ mapper.connect(
+ '/users/{user_id}/roles/OS-KSADM/{role_id}',
+ controller=assignment_role_controller,
+ action='add_role_to_user',
+ conditions=dict(method=['PUT']))
+ mapper.connect(
+ '/users/{user_id}/roles/OS-KSADM/{role_id}',
+ controller=assignment_role_controller,
+ action='remove_role_from_user',
+ conditions=dict(method=['DELETE']))
+
+ # COMPAT(diablo): User Roles
+ mapper.connect(
+ '/users/{user_id}/roleRefs',
+ controller=assignment_role_controller,
+ action='get_role_refs',
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ '/users/{user_id}/roleRefs',
+ controller=assignment_role_controller,
+ action='create_role_ref',
+ conditions=dict(method=['POST']))
+ mapper.connect(
+ '/users/{user_id}/roleRefs/{role_ref_id}',
+ controller=assignment_role_controller,
+ action='delete_role_ref',
+ conditions=dict(method=['DELETE']))
+
+ # User-Tenant Roles
+ mapper.connect(
+ '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
+ controller=assignment_role_controller,
+ action='add_role_to_user',
+ conditions=dict(method=['PUT']))
+ mapper.connect(
+ '/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}',
+ controller=assignment_role_controller,
+ action='remove_role_from_user',
+ conditions=dict(method=['DELETE']))
+
+ # Service Operations
+ mapper.connect(
+ '/OS-KSADM/services',
+ controller=service_controller,
+ action='get_services',
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ '/OS-KSADM/services',
+ controller=service_controller,
+ action='create_service',
+ conditions=dict(method=['POST']))
+ mapper.connect(
+ '/OS-KSADM/services/{service_id}',
+ controller=service_controller,
+ action='delete_service',
+ conditions=dict(method=['DELETE']))
+ mapper.connect(
+ '/OS-KSADM/services/{service_id}',
+ controller=service_controller,
+ action='get_service',
+ conditions=dict(method=['GET']))
+
+ # Endpoint Templates
+ mapper.connect(
+ '/endpoints',
+ controller=endpoint_controller,
+ action='get_endpoints',
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ '/endpoints',
+ controller=endpoint_controller,
+ action='create_endpoint',
+ conditions=dict(method=['POST']))
+ mapper.connect(
+ '/endpoints/{endpoint_id}',
+ controller=endpoint_controller,
+ action='delete_endpoint',
+ conditions=dict(method=['DELETE']))
+
+ # Role Operations
+ mapper.connect(
+ '/OS-KSADM/roles',
+ controller=role_controller,
+ action='create_role',
+ conditions=dict(method=['POST']))
+ mapper.connect(
+ '/OS-KSADM/roles',
+ controller=role_controller,
+ action='get_roles',
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ '/OS-KSADM/roles/{role_id}',
+ controller=role_controller,
+ action='get_role',
+ conditions=dict(method=['GET']))
+ mapper.connect(
+ '/OS-KSADM/roles/{role_id}',
+ controller=role_controller,
+ action='delete_role',
+ conditions=dict(method=['DELETE']))
diff --git a/keystone-moon/keystone/v2_crud/user_crud.py b/keystone-moon/keystone/v2_crud/user_crud.py
new file mode 100644
index 00000000..9da7f31f
--- /dev/null
+++ b/keystone-moon/keystone/v2_crud/user_crud.py
@@ -0,0 +1,134 @@
+# Copyright 2012 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from oslo_log import log
+
+from keystone.common import dependency
+from keystone.common import extension
+from keystone.common import wsgi
+from keystone import exception
+from keystone import identity
+from keystone.models import token_model
+
+
+LOG = log.getLogger(__name__)
+
+
+extension.register_public_extension(
+ 'OS-KSCRUD', {
+ 'name': 'OpenStack Keystone User CRUD',
+ 'namespace': 'http://docs.openstack.org/identity/api/ext/'
+ 'OS-KSCRUD/v1.0',
+ 'alias': 'OS-KSCRUD',
+ 'updated': '2013-07-07T12:00:0-00:00',
+ 'description': 'OpenStack extensions to Keystone v2.0 API '
+ 'enabling User Operations.',
+ 'links': [
+ {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'http://developer.openstack.org/'
+ 'api-ref-identity-v2-ext.html',
+ }
+ ]})
+
+
+@dependency.requires('catalog_api', 'identity_api', 'resource_api',
+ 'token_provider_api')
+class UserController(identity.controllers.User):
+ def set_user_password(self, context, user_id, user):
+ token_id = context.get('token_id')
+ original_password = user.get('original_password')
+
+ token_data = self.token_provider_api.validate_token(token_id)
+ token_ref = token_model.KeystoneToken(token_id=token_id,
+ token_data=token_data)
+
+ if token_ref.user_id != user_id:
+ raise exception.Forbidden('Token belongs to another user')
+ if original_password is None:
+ raise exception.ValidationError(target='user',
+ attribute='original password')
+
+ try:
+ user_ref = self.identity_api.authenticate(
+ context,
+ user_id=token_ref.user_id,
+ password=original_password)
+ if not user_ref.get('enabled', True):
+ # NOTE(dolph): why can't you set a disabled user's password?
+ raise exception.Unauthorized('User is disabled')
+ except AssertionError:
+ raise exception.Unauthorized()
+
+ update_dict = {'password': user['password'], 'id': user_id}
+
+ admin_context = copy.copy(context)
+ admin_context['is_admin'] = True
+ super(UserController, self).set_user_password(admin_context,
+ user_id,
+ update_dict)
+
+ # Issue a new token based upon the original token data. This will
+ # always be a V2.0 token.
+
+ # TODO(morganfainberg): Add a mechanism to issue a new token directly
+ # from a token model so that this code can go away. This is likely
+ # not the norm as most cases do not need to yank apart a token to
+ # issue a new one.
+ new_token_ref = {}
+ metadata_ref = {}
+ roles_ref = None
+
+ new_token_ref['user'] = user_ref
+ if token_ref.bind:
+ new_token_ref['bind'] = token_ref.bind
+ if token_ref.project_id:
+ new_token_ref['tenant'] = self.resource_api.get_project(
+ token_ref.project_id)
+ if token_ref.role_names:
+ roles_ref = [dict(name=value)
+ for value in token_ref.role_names]
+ if token_ref.role_ids:
+ metadata_ref['roles'] = token_ref.role_ids
+ if token_ref.trust_id:
+ metadata_ref['trust'] = {
+ 'id': token_ref.trust_id,
+ 'trustee_user_id': token_ref.trustee_user_id}
+ new_token_ref['metadata'] = metadata_ref
+ new_token_ref['id'] = uuid.uuid4().hex
+
+ catalog_ref = self.catalog_api.get_catalog(user_id,
+ token_ref.project_id)
+
+ new_token_id, new_token_data = self.token_provider_api.issue_v2_token(
+ token_ref=new_token_ref, roles_ref=roles_ref,
+ catalog_ref=catalog_ref)
+ LOG.debug('TOKEN_REF %s', new_token_data)
+ return new_token_data
+
+
+class Router(wsgi.ComposableRouter):
+ """Provides a subset of CRUD operations for internal data types."""
+
+ def add_routes(self, mapper):
+ user_controller = UserController()
+
+ mapper.connect('/OS-KSCRUD/users/{user_id}',
+ controller=user_controller,
+ action='set_user_password',
+ conditions=dict(method=['PATCH']))
diff --git a/keystone-moon/keystone/version/__init__.py b/keystone-moon/keystone/version/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/keystone-moon/keystone/version/__init__.py
diff --git a/keystone-moon/keystone/version/controllers.py b/keystone-moon/keystone/version/controllers.py
new file mode 100644
index 00000000..2a7bacdf
--- /dev/null
+++ b/keystone-moon/keystone/version/controllers.py
@@ -0,0 +1,215 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils
+import webob
+
+from keystone.common import extension
+from keystone.common import json_home
+from keystone.common import wsgi
+from keystone import exception
+
+
+MEDIA_TYPE_JSON = 'application/vnd.openstack.identity-%s+json'
+
+_VERSIONS = []
+
+# NOTE(blk-u): latest_app will be set by keystone.version.service.loadapp(). It
+# gets set to the application that was just loaded. In the case of keystone-all
+# loadapp() gets called twice, once for the public app and once for the admin
+# app. In the case of httpd/keystone, loadapp() gets called once for the public
+# app if this is the public instance or loadapp() gets called for the admin app
+# if it's the admin instance.
+# This is used to fetch the /v3 JSON Home response. The /v3 JSON Home response
+# is the same whether it's the admin or public service so either admin or
+# public works.
+latest_app = None
+
+
+def request_v3_json_home(new_prefix):
+ if 'v3' not in _VERSIONS:
+ # No V3 support, so return an empty JSON Home document.
+ return {'resources': {}}
+
+ req = webob.Request.blank(
+ '/v3', headers={'Accept': 'application/json-home'})
+ v3_json_home_str = req.get_response(latest_app).body
+ v3_json_home = jsonutils.loads(v3_json_home_str)
+ json_home.translate_urls(v3_json_home, new_prefix)
+
+ return v3_json_home
+
+
+class Extensions(wsgi.Application):
+ """Base extensions controller to be extended by public and admin API's."""
+
+ # extend in subclass to specify the set of extensions
+ @property
+ def extensions(self):
+ return None
+
+ def get_extensions_info(self, context):
+ return {'extensions': {'values': list(self.extensions.values())}}
+
+ def get_extension_info(self, context, extension_alias):
+ try:
+ return {'extension': self.extensions[extension_alias]}
+ except KeyError:
+ raise exception.NotFound(target=extension_alias)
+
+
+class AdminExtensions(Extensions):
+ @property
+ def extensions(self):
+ return extension.ADMIN_EXTENSIONS
+
+
+class PublicExtensions(Extensions):
+ @property
+ def extensions(self):
+ return extension.PUBLIC_EXTENSIONS
+
+
+def register_version(version):
+ _VERSIONS.append(version)
+
+
+class MimeTypes(object):
+ JSON = 'application/json'
+ JSON_HOME = 'application/json-home'
+
+
+def v3_mime_type_best_match(context):
+
+ # accept_header is a WebOb MIMEAccept object so supports best_match.
+ accept_header = context['accept_header']
+
+ if not accept_header:
+ return MimeTypes.JSON
+
+ SUPPORTED_TYPES = [MimeTypes.JSON, MimeTypes.JSON_HOME]
+ return accept_header.best_match(SUPPORTED_TYPES)
+
+
+class Version(wsgi.Application):
+
+ def __init__(self, version_type, routers=None):
+ self.endpoint_url_type = version_type
+ self._routers = routers
+
+ super(Version, self).__init__()
+
+ def _get_identity_url(self, context, version):
+ """Returns a URL to keystone's own endpoint."""
+ url = self.base_url(context, self.endpoint_url_type)
+ return '%s/%s/' % (url, version)
+
+ def _get_versions_list(self, context):
+ """The list of versions is dependent on the context."""
+ versions = {}
+ if 'v2.0' in _VERSIONS:
+ versions['v2.0'] = {
+ 'id': 'v2.0',
+ 'status': 'stable',
+ 'updated': '2014-04-17T00:00:00Z',
+ 'links': [
+ {
+ 'rel': 'self',
+ 'href': self._get_identity_url(context, 'v2.0'),
+ }, {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'http://docs.openstack.org/'
+ }
+ ],
+ 'media-types': [
+ {
+ 'base': 'application/json',
+ 'type': MEDIA_TYPE_JSON % 'v2.0'
+ }
+ ]
+ }
+
+ if 'v3' in _VERSIONS:
+ versions['v3'] = {
+ 'id': 'v3.6',
+ 'status': 'stable',
+ 'updated': '2016-04-04T00:00:00Z',
+ 'links': [
+ {
+ 'rel': 'self',
+ 'href': self._get_identity_url(context, 'v3'),
+ }
+ ],
+ 'media-types': [
+ {
+ 'base': 'application/json',
+ 'type': MEDIA_TYPE_JSON % 'v3'
+ }
+ ]
+ }
+
+ return versions
+
+ def get_versions(self, context):
+
+ req_mime_type = v3_mime_type_best_match(context)
+ if req_mime_type == MimeTypes.JSON_HOME:
+ v3_json_home = request_v3_json_home('/v3')
+ return wsgi.render_response(
+ body=v3_json_home,
+ headers=(('Content-Type', MimeTypes.JSON_HOME),))
+
+ versions = self._get_versions_list(context)
+ return wsgi.render_response(status=(300, 'Multiple Choices'), body={
+ 'versions': {
+ 'values': list(versions.values())
+ }
+ })
+
+ def get_version_v2(self, context):
+ versions = self._get_versions_list(context)
+ if 'v2.0' in _VERSIONS:
+ return wsgi.render_response(body={
+ 'version': versions['v2.0']
+ })
+ else:
+ raise exception.VersionNotFound(version='v2.0')
+
+ def _get_json_home_v3(self):
+
+ def all_resources():
+ for router in self._routers:
+ for resource in router.v3_resources:
+ yield resource
+
+ return {
+ 'resources': dict(all_resources())
+ }
+
+ def get_version_v3(self, context):
+ versions = self._get_versions_list(context)
+ if 'v3' in _VERSIONS:
+ req_mime_type = v3_mime_type_best_match(context)
+
+ if req_mime_type == MimeTypes.JSON_HOME:
+ return wsgi.render_response(
+ body=self._get_json_home_v3(),
+ headers=(('Content-Type', MimeTypes.JSON_HOME),))
+
+ return wsgi.render_response(body={
+ 'version': versions['v3']
+ })
+ else:
+ raise exception.VersionNotFound(version='v3')
diff --git a/keystone-moon/keystone/version/routers.py b/keystone-moon/keystone/version/routers.py
new file mode 100644
index 00000000..5da4951c
--- /dev/null
+++ b/keystone-moon/keystone/version/routers.py
@@ -0,0 +1,80 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+The only types of routers in this file should be ``ComposingRouters``.
+
+The routers for the backends should be in the backend-specific router modules.
+For example, the ``ComposableRouter`` for ``identity`` belongs in::
+
+ keystone.identity.routers
+
+"""
+
+
+from keystone.common import wsgi
+from keystone.version import controllers
+
+
+class Extension(wsgi.ComposableRouter):
+ def __init__(self, is_admin=True):
+ if is_admin:
+ self.controller = controllers.AdminExtensions()
+ else:
+ self.controller = controllers.PublicExtensions()
+
+ def add_routes(self, mapper):
+ extensions_controller = self.controller
+ mapper.connect('/extensions',
+ controller=extensions_controller,
+ action='get_extensions_info',
+ conditions=dict(method=['GET']))
+ mapper.connect('/extensions/{extension_alias}',
+ controller=extensions_controller,
+ action='get_extension_info',
+ conditions=dict(method=['GET']))
+
+
+class VersionV2(wsgi.ComposableRouter):
+ def __init__(self, description):
+ self.description = description
+
+ def add_routes(self, mapper):
+ version_controller = controllers.Version(self.description)
+ mapper.connect('/',
+ controller=version_controller,
+ action='get_version_v2')
+
+
+class VersionV3(wsgi.ComposableRouter):
+ def __init__(self, description, routers):
+ self.description = description
+ self._routers = routers
+
+ def add_routes(self, mapper):
+ version_controller = controllers.Version(self.description,
+ routers=self._routers)
+ mapper.connect('/',
+ controller=version_controller,
+ action='get_version_v3')
+
+
+class Versions(wsgi.ComposableRouter):
+ def __init__(self, description):
+ self.description = description
+
+ def add_routes(self, mapper):
+ version_controller = controllers.Version(self.description)
+ mapper.connect('/',
+ controller=version_controller,
+ action='get_versions')
diff --git a/keystone-moon/keystone/version/service.py b/keystone-moon/keystone/version/service.py
new file mode 100644
index 00000000..b0ed3b76
--- /dev/null
+++ b/keystone-moon/keystone/version/service.py
@@ -0,0 +1,161 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import sys
+
+from oslo_config import cfg
+from oslo_log import log
+from paste import deploy
+import routes
+
+from keystone.assignment import routers as assignment_routers
+from keystone.auth import routers as auth_routers
+from keystone.catalog import routers as catalog_routers
+from keystone.common import wsgi
+from keystone.credential import routers as credential_routers
+from keystone.endpoint_policy import routers as endpoint_policy_routers
+from keystone.federation import routers as federation_routers
+from keystone.i18n import _LW
+from keystone.identity import routers as identity_routers
+from keystone.oauth1 import routers as oauth1_routers
+from keystone.policy import routers as policy_routers
+from keystone.resource import routers as resource_routers
+from keystone.revoke import routers as revoke_routers
+from keystone.token import _simple_cert as simple_cert_ext
+from keystone.token import routers as token_routers
+from keystone.trust import routers as trust_routers
+from keystone.v2_crud import admin_crud
+from keystone.v2_crud import user_crud
+from keystone.version import controllers
+from keystone.version import routers
+
+
+CONF = cfg.CONF
+LOG = log.getLogger(__name__)
+
+
+def loadapp(conf, name):
+ # NOTE(blk-u): Save the application being loaded in the controllers module.
+ # This is similar to how public_app_factory() and v3_app_factory()
+ # register the version with the controllers module.
+ controllers.latest_app = deploy.loadapp(conf, name=name)
+ return controllers.latest_app
+
+
+def fail_gracefully(f):
+ """Logs exceptions and aborts."""
+ @functools.wraps(f)
+ def wrapper(*args, **kw):
+ try:
+ return f(*args, **kw)
+ except Exception as e:
+ LOG.debug(e, exc_info=True)
+
+ # exception message is printed to all logs
+ LOG.critical(e)
+ sys.exit(1)
+
+ return wrapper
+
+
+def warn_local_conf(f):
+ @functools.wraps(f)
+ def wrapper(*args, **local_conf):
+ if local_conf:
+ LOG.warning(_LW('\'local conf\' from PasteDeploy INI is being '
+ 'ignored.'))
+ return f(*args, **local_conf)
+ return wrapper
+
+
+@fail_gracefully
+@warn_local_conf
+def public_app_factory(global_conf, **local_conf):
+ controllers.register_version('v2.0')
+ return wsgi.ComposingRouter(routes.Mapper(),
+ [assignment_routers.Public(),
+ token_routers.Router(),
+ user_crud.Router(),
+ routers.VersionV2('public'),
+ routers.Extension(False)])
+
+
+@fail_gracefully
+@warn_local_conf
+def admin_app_factory(global_conf, **local_conf):
+ controllers.register_version('v2.0')
+ return wsgi.ComposingRouter(routes.Mapper(),
+ [identity_routers.Admin(),
+ assignment_routers.Admin(),
+ token_routers.Router(),
+ resource_routers.Admin(),
+ admin_crud.Router(),
+ routers.VersionV2('admin'),
+ routers.Extension()])
+
+
+@fail_gracefully
+@warn_local_conf
+def public_version_app_factory(global_conf, **local_conf):
+ return wsgi.ComposingRouter(routes.Mapper(),
+ [routers.Versions('public')])
+
+
+@fail_gracefully
+@warn_local_conf
+def admin_version_app_factory(global_conf, **local_conf):
+ return wsgi.ComposingRouter(routes.Mapper(),
+ [routers.Versions('admin')])
+
+
+@fail_gracefully
+@warn_local_conf
+def v3_app_factory(global_conf, **local_conf):
+ controllers.register_version('v3')
+ mapper = routes.Mapper()
+ sub_routers = []
+ _routers = []
+
+ # NOTE(dstanek): Routers should be ordered by their frequency of use in
+ # a live system. This is due to the routes implementation. The most
+ # frequently used routers should appear first.
+ all_api_routers = [auth_routers,
+ assignment_routers,
+ catalog_routers,
+ credential_routers,
+ identity_routers,
+ policy_routers,
+ resource_routers,
+ revoke_routers,
+ federation_routers,
+ oauth1_routers,
+ # TODO(morganfainberg): Remove the simple_cert router
+ # when PKI and PKIZ tokens are removed.
+ simple_cert_ext]
+
+ if CONF.trust.enabled:
+ all_api_routers.append(trust_routers)
+
+ if CONF.endpoint_policy.enabled:
+ all_api_routers.append(endpoint_policy_routers)
+
+ for api_routers in all_api_routers:
+ routers_instance = api_routers.Routers()
+ _routers.append(routers_instance)
+ routers_instance.append_v3_routers(mapper, sub_routers)
+
+ # Add in the v3 version api
+ sub_routers.append(routers.VersionV3('public', _routers))
+ return wsgi.ComposingRouter(mapper, sub_routers)
diff --git a/keystone-moon/rally-jobs/keystone.yaml b/keystone-moon/rally-jobs/keystone.yaml
index 9e656aad..d997404a 100644
--- a/keystone-moon/rally-jobs/keystone.yaml
+++ b/keystone-moon/rally-jobs/keystone.yaml
@@ -2,8 +2,6 @@
KeystoneBasic.create_user:
-
- args:
- name_length: 10
runner:
type: "constant"
times: 100
@@ -14,8 +12,6 @@
KeystoneBasic.create_delete_user:
-
- args:
- name_length: 10
runner:
type: "constant"
times: 100
@@ -26,8 +22,6 @@
KeystoneBasic.create_and_list_users:
-
- args:
- name_length: 10
runner:
type: "constant"
times: 100
@@ -39,7 +33,6 @@
KeystoneBasic.create_user_update_password:
-
args:
- name_length: 10
password_length: 10
runner:
type: "constant"
@@ -51,8 +44,6 @@
KeystoneBasic.create_and_list_tenants:
-
- args:
- name_length: 10
runner:
type: "constant"
times: 100
@@ -111,8 +102,6 @@
KeystoneBasic.create_tenant:
-
- args:
- name_length: 10
runner:
type: "constant"
times: 50
@@ -124,7 +113,6 @@
KeystoneBasic.create_tenant_with_users:
-
args:
- name_length: 10
users_per_tenant: 10
runner:
type: "constant"
@@ -136,8 +124,6 @@
KeystoneBasic.create_update_and_delete_tenant:
-
- args:
- name_length: 10
runner:
type: "constant"
times: 50
diff --git a/keystone-moon/releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml b/keystone-moon/releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml
new file mode 100644
index 00000000..89ef1082
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml
@@ -0,0 +1,13 @@
+---
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ The V8 Assignment driver interface is deprecated. Support for the V8
+ Assignment driver interface is planned to be removed in the 'O' release of
+ OpenStack.
+other:
+ - The list_project_ids_for_user(), list_domain_ids_for_user(),
+ list_user_ids_for_project(), list_project_ids_for_groups(),
+ list_domain_ids_for_groups(), list_role_ids_for_groups_on_project() and
+ list_role_ids_for_groups_on_domain() methods have been removed from the
+ V9 version of the Assignment driver.
diff --git a/keystone-moon/releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml b/keystone-moon/releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml
new file mode 100644
index 00000000..98306f3e
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - >
+ [`blueprint domain-specific-roles <https://blueprints.launchpad.net/keystone/+spec/domain-specific-roles>`_]
+ Roles can now be optionally defined as domain specific. Domain specific
+ roles are not referenced in policy files, rather they can be used to allow
+ a domain to build their own private inference rules with implied roles. A
+ domain specific role can be assigned to a domain or project within its
+ domain, and any subset of global roles it implies will appear in a token
+ scoped to the respective domain or project. The domain specific role
+ itself, however, will not appear in the token.
diff --git a/keystone-moon/releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml b/keystone-moon/releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml
new file mode 100644
index 00000000..08bda86f
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ The V8 Role driver interface is deprecated. Support for the V8 Role driver
+ interface is planned to be removed in the 'O' release of OpenStack.
diff --git a/keystone-moon/releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml b/keystone-moon/releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml
new file mode 100644
index 00000000..8003b702
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - The V8 Resource driver interface is deprecated. Support for the V8
+ Resource driver interface is planned to be removed in the 'O' release of
+ OpenStack.
diff --git a/keystone-moon/releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml b/keystone-moon/releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml
new file mode 100644
index 00000000..997ee64a
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - >
+ [`blueprint bootstrap <https://blueprints.launchpad.net/keystone/+spec/bootstrap>`_]
+ keystone-manage now supports the bootstrap command
+ on the CLI so that a keystone install can be
+ initialized without the need of the admin_token
+ filter in the paste-ini.
+security:
+ - The use of admin_token filter is insecure compared
+ to the use of a proper username/password. Historically
+ the admin_token filter has been left enabled in
+ Keystone after initialization due to the way CMS
+ systems work. Moving to an out-of-band initialization using
+ ``keystone-manage bootstrap`` will eliminate the security concerns around
+ a static shared string that conveys admin access to keystone
+ and therefore to the entire installation.
diff --git a/keystone-moon/releasenotes/notes/admin_token-a5678d712783c145.yaml b/keystone-moon/releasenotes/notes/admin_token-a5678d712783c145.yaml
new file mode 100644
index 00000000..8547c6d3
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/admin_token-a5678d712783c145.yaml
@@ -0,0 +1,14 @@
+---
+upgrade:
+ - >
+ [`bug 1473553 <https://bugs.launchpad.net/keystone/+bug/1473553>`_]
+ The `keystone-paste.ini` must be updated to put the ``admin_token_auth``
+ middleware before ``build_auth_context``. See the sample
+ `keystone-paste.ini` for the correct `pipeline` value. Having
+ ``admin_token_auth`` after ``build_auth_context`` is deprecated and will
+ not be supported in a future release.
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ The ``admin_token_auth`` filter must now be placed before the
+ ``build_auth_context`` filter in `keystone-paste.ini`.
diff --git a/keystone-moon/releasenotes/notes/admin_token-c634ec12fc714255.yaml b/keystone-moon/releasenotes/notes/admin_token-c634ec12fc714255.yaml
new file mode 100644
index 00000000..69b70dbb
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/admin_token-c634ec12fc714255.yaml
@@ -0,0 +1,11 @@
+---
+security:
+ - The admin_token method of authentication was never intended to be
+ used for any purpose other than bootstrapping an install. However
+ many deployments had to leave the admin_token method enabled due
+ to restrictions on editing the paste file used to configure the
+ web pipelines. To minimize the risk from this mechanism, the
+ `admin_token` configuration value now defaults to a python `None`
+ value. In addition, if the value is set to `None`, either explicitly or
+ implicitly, the `admin_token` will not be enabled, and an attempt to
+ use it will lead to a failed authentication.
diff --git a/keystone-moon/releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml b/keystone-moon/releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml
new file mode 100644
index 00000000..a78f831f
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - >
+ [`blueprint domain-config-default <https://blueprints.launchpad.net/keystone/+spec/domain-config-default>`_]
+ The Identity API now supports retrieving the default values for the
+ configuration options that can be overriden via the domain specific
+ configuration API.
diff --git a/keystone-moon/releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml b/keystone-moon/releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml
new file mode 100644
index 00000000..1c81d866
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - >
+ [`blueprint url-safe-naming <https://blueprints.launchpad.net/keystone/+spec/url-safe-naming>`_]
+ The names of projects and domains can optionally be ensured to be url safe,
+ to support the future ability to specify projects using hierarchical
+ naming.
diff --git a/keystone-moon/releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml b/keystone-moon/releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml
new file mode 100644
index 00000000..0d5c2034
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - >
+ [`bug 1490804 <https://bugs.launchpad.net/keystone/+bug/1490804>`_]
+ Audit IDs are included in the token revocation list.
+security:
+ - >
+ [`bug 1490804 <https://bugs.launchpad.net/keystone/+bug/1490804>`_]
+ [`CVE-2015-7546 <http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-7546>`_]
+ A bug is fixed where an attacker could avoid token revocation when the PKI
+ or PKIZ token provider is used. The complete remediation for this
+ vulnerability requires the corresponding fix in the keystonemiddleware
+ project.
diff --git a/keystone-moon/releasenotes/notes/bug-1519210-de76097c974f9c93.yaml b/keystone-moon/releasenotes/notes/bug-1519210-de76097c974f9c93.yaml
new file mode 100644
index 00000000..0b7192b1
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/bug-1519210-de76097c974f9c93.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - >
+ [`bug 1519210 <https://bugs.launchpad.net/keystone/+bug/1519210>`_]
+ A user may now opt-out of notifications by specifying a list of
+ event types using the `notification_opt_out` option in `keystone.conf`.
+ These events are never sent to a messaging service.
diff --git a/keystone-moon/releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml b/keystone-moon/releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml
new file mode 100644
index 00000000..68cb7e1d
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - >
+ [`bug 1535878 <https://bugs.launchpad.net/keystone/+bug/1535878>`_]
+ Originally, to perform GET /projects/{project_id}, the provided policy
+ files required a user to have at least project admin level of permission.
+ They have been updated to allow it to be performed by any user who has a
+ role on the project.
diff --git a/keystone-moon/releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml b/keystone-moon/releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml
new file mode 100644
index 00000000..bc6ec728
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml
@@ -0,0 +1,21 @@
+---
+features:
+ - >
+ [`bug 1542417 <https://bugs.launchpad.net/keystone/+bug/1542417>`_]
+ Added support for a `user_description_attribute` mapping
+ to the LDAP driver configuration.
+upgrade:
+ - >
+ The LDAP driver now also maps the user description attribute after
+ user retrieval from LDAP.
+ If this is undesired behavior for your setup, please add `description`
+ to the `user_attribute_ignore` LDAP driver config setting.
+
+ The default mapping of the description attribute is set to `description`.
+ Please adjust the LDAP driver config setting `user_description_attribute`
+ if your LDAP uses a different attribute name (for instance to `displayName`
+ in case of an AD backed LDAP).
+
+ If your `user_additional_attribute_mapping` setting contains
+ `description:description` you can remove this mapping, since this is
+ now the default behavior.
diff --git a/keystone-moon/releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml b/keystone-moon/releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml
new file mode 100644
index 00000000..0befecd3
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - >
+ [`bug 1526462 <https://bugs.launchpad.net/keystone/+bug/1526462>`_]
+ Support for posixGroups with OpenDirectory and UNIX when using
+ the LDAP identity driver.
diff --git a/keystone-moon/releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml b/keystone-moon/releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml
new file mode 100644
index 00000000..785fb3cf
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - >
+ [`bug 1489061 <https://bugs.launchpad.net/keystone/+bug/1489061>`_]
+ Caching has been added to catalog retrieval on a per user ID and project
+ ID basis. This affects both the v2 and v3 APIs. As a result this should
+ provide a performance benefit to fernet-based deployments.
diff --git a/keystone-moon/releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml b/keystone-moon/releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml
new file mode 100644
index 00000000..e0c381d9
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml
@@ -0,0 +1,9 @@
+---
+deprecations:
+ - Use of ``$(tenant_id)s`` in the catalog endpoints is deprecated in favor
+ of ``$(project_id)s``.
+features:
+ - Keystone supports ``$(project_id)s`` in the catalog. It works the same as
+ ``$(tenant_id)s``. Use of ``$(tenant_id)s`` is deprecated and catalog
+ endpoints should be updated to use ``$(project_id)s``.
+
diff --git a/keystone-moon/releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml b/keystone-moon/releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml
new file mode 100644
index 00000000..ce372ede
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ Deprecate the ``enabled`` option from ``[endpoint_policy]``, it will be
+ removed in the 'O' release, and the extension will always be enabled.
diff --git a/keystone-moon/releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml b/keystone-moon/releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml
new file mode 100644
index 00000000..7b9c8e08
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ The token memcache and memcache_pool persistence
+ backends have been deprecated in favor of using
+ Fernet tokens (which require no persistence).
diff --git a/keystone-moon/releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml b/keystone-moon/releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml
new file mode 100644
index 00000000..59680274
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml
@@ -0,0 +1,8 @@
+---
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ Deprecated all v2.0 APIs. The keystone team recommends using v3 APIs instead.
+ Most v2.0 APIs will be removed in the 'Q' release. However, the authentication
+ APIs and EC2 APIs are indefinitely deprecated and will not be removed in
+ the 'Q' release.
diff --git a/keystone-moon/releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml b/keystone-moon/releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml
new file mode 100644
index 00000000..31c7ff85
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml
@@ -0,0 +1,26 @@
+---
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ As of the Mitaka release, the PKI and PKIz token formats have been
+ deprecated. They will be removed in the 'O' release. Due to this change,
+ the `hash_algorithm` option in the `[token]` section of the
+ configuration file has also been deprecated. Also due to this change, the
+ ``keystone-manage pki_setup`` command has been deprecated as well.
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ As of the Mitaka release, write support for the LDAP driver of the Identity
+ backend has been deprecated. This includes the following operations: create user,
+ create group, delete user, delete group, update user, update group,
+ add user to group, and remove user from group. These operations will be
+ removed in the 'O' release.
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ As of the Mitaka release, the auth plugin `keystone.auth.plugins.saml2.Saml2`
+ has been deprecated. It is recommended to use `keystone.auth.plugins.mapped.Mapped`
+ instead. The ``saml2`` plugin will be removed in the 'O' release.
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ As of the Mitaka release, the simple_cert_extension is deprecated since it
+ is only used in support of the PKI and PKIz token formats. It will be
+ removed in the 'O' release.
diff --git a/keystone-moon/releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml b/keystone-moon/releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml
new file mode 100644
index 00000000..f4c1bbe7
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - >
+ [`bug 1525317 <https://bugs.launchpad.net/keystone/+bug/1525317>`_]
+ Enable filtering of identity providers based on `id`, and `enabled`
+ attributes.
+ - >
+ [`bug 1555830 <https://bugs.launchpad.net/keystone/+bug/1555830>`_]
+ Enable filtering of service providers based on `id`, and `enabled`
+ attributes. \ No newline at end of file
diff --git a/keystone-moon/releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml b/keystone-moon/releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml
new file mode 100644
index 00000000..8346285a
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - >
+ The default setting for the `os_inherit` configuration option is
+ changed to True. If it is required to continue with this portion
+ of the API disabled, then override the default setting by explicitly
+ specifying the os_inherit option as False.
+deprecations:
+ - The `os_inherit` configuration option is disabled. In the future, this
+ option will be removed and this portion of the API will be always enabled.
diff --git a/keystone-moon/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yaml b/keystone-moon/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yaml
new file mode 100644
index 00000000..d94db3ba
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - >
+ [`bug 1516469 <https://bugs.launchpad.net/keystone/+bug/1516469>`_]
+ Endpoints filtered by endpoint_group project association will be
+ included in the service catalog when a project scoped token is issued and
+ ``endpoint_filter.sql`` is used for the catalog driver.
diff --git a/keystone-moon/releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml b/keystone-moon/releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml
new file mode 100644
index 00000000..ced7d5a7
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml
@@ -0,0 +1,25 @@
+---
+upgrade:
+ - >
+ The `keystone-paste.ini` file must be updated to remove extension
+ filters, and their use in ``[pipeline:api_v3]``.
+ Remove the following filters: ``[filter:oauth1_extension]``,
+ ``[filter:federation_extension]``, ``[filter:endpoint_filter_extension]``,
+ and ``[filter:revoke_extension]``. See the sample `keystone-paste.ini
+ <https://git.openstack.org/cgit/openstack/keystone/tree/etc/keystone-paste.ini>`_
+ file for guidance.
+ - >
+ The `keystone-paste.ini` file must be updated to remove extension filters,
+ and their use in ``[pipeline:public_api]`` and ``[pipeline:admin_api]`` pipelines.
+ Remove the following filters: ``[filter:user_crud_extension]``,
+ ``[filter:crud_extension]``. See the sample `keystone-paste.ini
+ <https://git.openstack.org/cgit/openstack/keystone/tree/etc/keystone-paste.ini>`_
+ file for guidance.
+other:
+ - >
+ [`blueprint move-extensions <https://blueprints.launchpad.net/keystone/+spec/move-extensions>`_]
+ If any extension migrations are run, for example: ``keystone-manage db_sync
+ --extension endpoint_policy`` an error will be returned. This is working as
+ designed. To run these migrations simply run: ``keystone-manage db_sync``.
+ The complete list of affected extensions are: ``oauth1``, ``federation``,
+ ``endpoint_filter``, ``endpoint_policy``, and ``revoke``.
diff --git a/keystone-moon/releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml b/keystone-moon/releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml
new file mode 100644
index 00000000..04d45dae
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - >
+ [`blueprint federation-group-ids-mapped-without-domain-reference <https://blueprints.launchpad.net/keystone/+spec/federation-group-ids-mapped-without-domain-reference>`_]
+ Enhanced the federation mapping engine to allow for group IDs to be
+ referenced without a domain ID.
diff --git a/keystone-moon/releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml b/keystone-moon/releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml
new file mode 100644
index 00000000..86bb378e
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ The file ``httpd/keystone.py`` has been deprecated in favor of
+ ``keystone-wsgi-admin`` and ``keystone-wsgi-public`` and may be
+ removed in the 'O' release.
diff --git a/keystone-moon/releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml b/keystone-moon/releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml
new file mode 100644
index 00000000..3afd9159
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml
@@ -0,0 +1,9 @@
+---
+other:
+ - >
+ [`bug 1367113 <https://bugs.launchpad.net/keystone/+bug/1367113>`_]
+ The "get entity" and "list entities" functionality for the KVS catalog
+ backend has been reimplemented to use the data from the catalog template.
+ Previously this would only act on temporary data that was created at
+ runtime. The create, update and delete entity functionality now raises
+ an exception.
diff --git a/keystone-moon/releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml b/keystone-moon/releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml
new file mode 100644
index 00000000..065fd541
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - >
+ [`blueprint implied-roles <https://blueprints.launchpad.net/keystone/+spec/implied-roles>`_]
+ Keystone now supports creating implied roles. Role inference rules can now
+ be added to indicate when the assignment of one role implies the assignment
+ of another. The rules are of the form `prior_role` implies
+ `implied_role`. At token generation time, user/group assignments of roles
+ that have implied roles will be expanded to also include such roles in the
+ token. The expansion of implied roles is controlled by the
+ `prohibited_implied_role` option in the `[assignment]`
+ section of `keystone.conf`.
diff --git a/keystone-moon/releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml b/keystone-moon/releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml
new file mode 100644
index 00000000..ba11ab2a
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - A new config option, `insecure_debug`, is added to control whether debug
+ information is returned to clients. This used to be controlled by the
+ `debug` option. If you'd like to return extra information to clients
+ set the value to ``true``. This extra information may help an attacker.
+
diff --git a/keystone-moon/releasenotes/notes/is-admin-24b34238c83b3a82.yaml b/keystone-moon/releasenotes/notes/is-admin-24b34238c83b3a82.yaml
new file mode 100644
index 00000000..a0c2b3bb
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/is-admin-24b34238c83b3a82.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - >
+ [`bug 96869 <https://bugs.launchpad.net/keystone/+bug/968696>`_]
+ A pair of configuration options have been added to the ``[resource]``
+ section to specify a special ``admin`` project:
+ ``admin_project_domain_name`` and ``admin_project_name``. If these are
+ defined, any scoped token issued for that project will have an additional
+ identifier ``is_admin_project`` added to the token. This identifier can then
+ be checked by the policy rules in the policy files of the services when
+ evaluating access control policy for an API. Keystone does not yet
+ support the ability for a project acting as a domain to be the
+ admin project. That will be added once the rest of the code for
+ projects acting as domains is merged.
diff --git a/keystone-moon/releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml b/keystone-moon/releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml
new file mode 100644
index 00000000..c26eeb3f
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - >
+ The configuration options for LDAP connection pooling, `[ldap] use_pool`
+ and `[ldap] use_auth_pool`, are now both enabled by default. Only
+ deployments using LDAP drivers are affected. Additional configuration
+ options are available in the `[ldap]` section to tune connection pool size,
+ etc.
diff --git a/keystone-moon/releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml b/keystone-moon/releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml
new file mode 100644
index 00000000..1d097ae3
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - >
+ [`bug 1515302 <https://bugs.launchpad.net/keystone/+bug/1515302>`_]
+ Two new configuration options have been added to the `[ldap]` section.
+ `user_enabled_emulation_use_group_config` and
+ `project_enabled_emulation_use_group_config`, which allow deployers to
+ choose if they want to override the default group LDAP schema option.
diff --git a/keystone-moon/releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml b/keystone-moon/releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml
new file mode 100644
index 00000000..4e5f5458
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - >
+ [`bug 1501698 <https://bugs.launchpad.net/keystone/+bug/1501698>`_]
+ Support parameter `list_limit` when LDAP is used as
+ identity backend.
diff --git a/keystone-moon/releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml b/keystone-moon/releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml
new file mode 100644
index 00000000..267ece71
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - >
+ [`bug 1479569 <https://bugs.launchpad.net/keystone/+bug/1479569>`_]
+ Names have been added to list role assignments
+ (GET /role_assignments?include_names=True), rather than returning
+ just the internal IDs of the objects the names are also returned.
diff --git a/keystone-moon/releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml b/keystone-moon/releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml
new file mode 100644
index 00000000..c7d9d412
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - >
+ [`bug 1541092 <https://bugs.launchpad.net/keystone/+bug/1541092>`_]
+ Only database upgrades from Kilo and newer are supported.
diff --git a/keystone-moon/releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml b/keystone-moon/releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml
new file mode 100644
index 00000000..a449ad67
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml
@@ -0,0 +1,7 @@
+---
+other:
+ - >
+ ``keystone-manage db_sync`` will no longer create the Default domain. This
+ domain is used as the domain for any users created using the legacy v2.0
+ API. A default domain is created by ``keystone-manage bootstrap`` and when
+ a user or project is created using the legacy v2.0 API.
diff --git a/keystone-moon/releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml b/keystone-moon/releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml
new file mode 100644
index 00000000..d80ab826
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - Support has now been added to send notification events
+ on user/group membership. When a user is added or removed
+ from a group a notification will be sent including the
+ identifiers of both the user and the group.
diff --git a/keystone-moon/releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml b/keystone-moon/releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml
new file mode 100644
index 00000000..dc989154
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+ - >
+ Keystone now uses oslo.cache. Update the `[cache]` section of
+ `keystone.conf` to point to oslo.cache backends:
+ ``oslo_cache.memcache_pool`` or ``oslo_cache.mongo``. Refer to the
+ sample configuration file for examples. See `oslo.cache
+ <http://docs.openstack.org/developer/oslo.cache>`_ for additional
+ documentation.
+deprecations:
+ - >
+ [`blueprint deprecated-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/deprecated-as-of-mitaka>`_]
+ ``keystone.common.cache.backends.memcache_pool``,
+ ``keystone.common.cache.backends.mongo``, and
+ ``keystone.common.cache.backends.noop`` are deprecated in favor of
+ oslo.cache backends. The keystone backends will be removed in the 'O'
+ release.
diff --git a/keystone-moon/releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml b/keystone-moon/releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml
new file mode 100644
index 00000000..7845df9a
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - Domains are now represented as top level projects with the attribute
+ `is_domain` set to true. Such projects will appear as parents for any
+ previous top level projects. Projects acting as domains can be created,
+ read, updated, and deleted via either the project API or the domain API
+ (V3 only).
diff --git a/keystone-moon/releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml b/keystone-moon/releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml
new file mode 100644
index 00000000..0c591dcc
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml
@@ -0,0 +1,4 @@
+---
+other:
+ - The ability to validate a trust-scoped token against the v2.0 API has been
+ removed, in favor of using the version 3 of the API.
diff --git a/keystone-moon/releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml b/keystone-moon/releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml
new file mode 100644
index 00000000..b0964c95
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml
@@ -0,0 +1,44 @@
+---
+other:
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ Removed ``extras`` from token responses. These fields should not be
+ necessary and a well-defined API makes this field redundant. This was
+ deprecated in the Kilo release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ Removed ``RequestBodySizeLimiter`` from keystone middleware. The keystone
+ team suggests using ``oslo_middleware.sizelimit.RequestBodySizeLimiter``
+ instead. This was deprecated in the Kilo release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ Notifications with event_type ``identity.created.role_assignment`` and
+ ``identity.deleted.role_assignment`` have been removed. The keystone team
+ suggests listening for ``identity.role_assignment.created`` and
+ ``identity.role_assignment.deleted`` instead. This was deprecated in the
+ Kilo release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ Removed ``check_role_for_trust`` from the trust controller, ensure policy
+ files do not refer to this target. This was deprecated in the Kilo
+ release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ Removed Catalog KVS backend (``keystone.catalog.backends.sql.Catalog``).
+ This was deprecated in the Icehouse release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ The LDAP backend for Assignment has been removed. This was deprecated in
+ the Kilo release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ The LDAP backend for Resource has been removed. This was deprecated in
+ the Kilo release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ The LDAP backend for Role has been removed. This was deprecated in the
+ Kilo release.
+ - >
+ [`blueprint removed-as-of-mitaka <https://blueprints.launchpad.net/keystone/+spec/removed-as-of-mitaka>`_]
+ Removed Revoke KVS backend (``keystone.revoke.backends.kvs.Revoke``).
+ This was deprecated in the Juno release.
diff --git a/keystone-moon/releasenotes/notes/request_context-e143ba9c446a5952.yaml b/keystone-moon/releasenotes/notes/request_context-e143ba9c446a5952.yaml
new file mode 100644
index 00000000..b00153db
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/request_context-e143ba9c446a5952.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - >
+ [`bug 1500222 <https://bugs.launchpad.net/keystone/+bug/1500222>`_]
+ Added information such as: user ID, project ID, and domain ID to log
+ entries. As a side effect of this change, both the user's domain ID and
+ project's domain ID are now included in the auth context.
diff --git a/keystone-moon/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yaml b/keystone-moon/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yaml
new file mode 100644
index 00000000..cc28c7f3
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yaml
@@ -0,0 +1,12 @@
+fixes:
+ - >
+ [`bug 1527759 <https://bugs.launchpad.net/keystone/+bug/1527759>`_]
+ Reverted the change that eliminates the ability to get
+ a V2 token with a user or project that is not in the
+ default domain. This change broke real-world deployments
+ that utilized the ability to authenticate via V2 API
+ with a user not in the default domain or with a
+ project not in the default domain. The deployer
+ is being convinced to update code to properly handle
+ V3 auth but the fix broke expected and tested
+ behavior.
diff --git a/keystone-moon/releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml b/keystone-moon/releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml
new file mode 100644
index 00000000..85fcd6d8
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - >
+ [`bug 1473042 <https://bugs.launchpad.net/keystone/+bug/1473042>`_]
+ Keystone's S3 compatibility support can now authenticate using AWS
+ Signature Version 4.
diff --git a/keystone-moon/releasenotes/notes/totp-40d93231714c6a20.yaml b/keystone-moon/releasenotes/notes/totp-40d93231714c6a20.yaml
new file mode 100644
index 00000000..fcfdb049
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/totp-40d93231714c6a20.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - >
+ [`blueprint totp-auth <https://blueprints.launchpad.net/keystone/+spec/totp-auth>`_]
+ Keystone now supports authenticating via Time-based One-time Password (TOTP).
+ To enable this feature, add the ``totp`` auth plugin to the `methods`
+ option in the `[auth]` section of `keystone.conf`. More information
+ about using TOTP can be found in `keystone's developer documentation
+ <http://docs.openstack.org/developer/keystone/auth-totp.html>`_.
diff --git a/keystone-moon/releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml b/keystone-moon/releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml
new file mode 100644
index 00000000..ae184605
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - >
+ [`bug 1480270 <https://bugs.launchpad.net/keystone/+bug/1480270>`_]
+ Endpoints created when using v3 of the keystone REST API will now be
+ included when listing endpoints via the v2.0 API.
diff --git a/keystone-moon/releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml b/keystone-moon/releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml
new file mode 100644
index 00000000..7db04c81
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - The V8 Federation driver interface is deprecated in favor of the V9
+ Federation driver interface. Support for the V8 Federation driver
+ interface is planned to be removed in the 'O' release of OpenStack.
diff --git a/keystone-moon/releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml b/keystone-moon/releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml
new file mode 100644
index 00000000..421acd6d
--- /dev/null
+++ b/keystone-moon/releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - >
+ [`blueprint x509-ssl-client-cert-authn <https://blueprints.launchpad.net/keystone/+spec/x509-ssl-client-cert-authn>`_]
+ Keystone now supports tokenless client SSL x.509 certificate authentication
+ and authorization.
diff --git a/keystone-moon/requirements.txt b/keystone-moon/requirements.txt
index 8bc177b1..ed941a12 100644
--- a/keystone-moon/requirements.txt
+++ b/keystone-moon/requirements.txt
@@ -2,37 +2,38 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.6
-WebOb>=1.2.3
-eventlet>=0.17.4
-greenlet>=0.3.2
-PasteDeploy>=1.5.0
-Paste
-Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7'
-Routes!=2.0,>=1.12.3;python_version!='2.7'
-cryptography>=1.0 # Apache-2.0
-six>=1.9.0
-SQLAlchemy<1.1.0,>=0.9.9
-sqlalchemy-migrate>=0.9.6
+pbr>=1.6 # Apache-2.0
+WebOb>=1.2.3 # MIT
+eventlet!=0.18.3,>=0.18.2 # MIT
+greenlet>=0.3.2 # MIT
+PasteDeploy>=1.5.0 # MIT
+Paste # MIT
+Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT
+Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT
+cryptography!=1.3.0,>=1.0 # BSD/Apache-2.0
+six>=1.9.0 # MIT
+SQLAlchemy<1.1.0,>=1.0.10 # MIT
+sqlalchemy-migrate>=0.9.6 # Apache-2.0
stevedore>=1.5.0 # Apache-2.0
-passlib>=1.6
-python-keystoneclient!=1.8.0,>=1.6.0
-keystonemiddleware!=2.4.0,>=2.0.0
-oslo.concurrency>=2.3.0 # Apache-2.0
-oslo.config>=2.3.0 # Apache-2.0
+passlib>=1.6 # BSD
+python-keystoneclient!=1.8.0,!=2.1.0,<3.0.0,>=1.6.0 # Apache-2.0
+keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0
+oslo.cache>=1.5.0 # Apache-2.0
+oslo.concurrency>=3.5.0 # Apache-2.0
+oslo.config>=3.7.0 # Apache-2.0
oslo.context>=0.2.0 # Apache-2.0
-oslo.messaging!=1.17.0,!=1.17.1,!=2.6.0,!=2.6.1,!=2.7.0,!=2.8.0,!=2.8.1,!=2.9.0,>=1.16.0 # Apache-2.0
-oslo.db>=2.4.1 # Apache-2.0
-oslo.i18n>=1.5.0 # Apache-2.0
-oslo.log>=1.8.0 # Apache-2.0
-oslo.middleware>=2.8.0 # Apache-2.0
+oslo.messaging>=4.0.0 # Apache-2.0
+oslo.db>=4.1.0 # Apache-2.0
+oslo.i18n>=2.1.0 # Apache-2.0
+oslo.log>=1.14.0 # Apache-2.0
+oslo.middleware>=3.0.0 # Apache-2.0
oslo.policy>=0.5.0 # Apache-2.0
-oslo.serialization>=1.4.0 # Apache-2.0
-oslo.service>=0.7.0 # Apache-2.0
-oslo.utils!=2.6.0,>=2.0.0 # Apache-2.0
-oauthlib>=0.6
-pysaml2>=2.4.0
-dogpile.cache>=0.5.4
-jsonschema!=2.5.0,<3.0.0,>=2.0.0
-pycadf>=1.1.0
-msgpack-python>=0.4.0
+oslo.serialization>=1.10.0 # Apache-2.0
+oslo.service>=1.0.0 # Apache-2.0
+oslo.utils>=3.5.0 # Apache-2.0
+oauthlib>=0.6 # BSD
+pysaml2<4.0.3,>=2.4.0 # Apache-2.0
+dogpile.cache>=0.5.7 # BSD
+jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
+pycadf!=2.0.0,>=1.1.0 # Apache-2.0
+msgpack-python>=0.4.0 # Apache-2.0
diff --git a/keystone-moon/setup.cfg b/keystone-moon/setup.cfg
index 5290d40c..2ac7436f 100644
--- a/keystone-moon/setup.cfg
+++ b/keystone-moon/setup.cfg
@@ -5,7 +5,7 @@ description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
-home-page = http://www.openstack.org/
+home-page = http://docs.openstack.org/developer/keystone/
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -22,14 +22,14 @@ packages =
[extras]
ldap =
- python-ldap>=2.4:python_version=='2.7'
+ python-ldap>=2.4:python_version=='2.7' # PSF
ldappool>=1.0:python_version=='2.7' # MPL
memcache =
- python-memcached>=1.56
+ python-memcached>=1.56 # PSF
mongodb =
- pymongo>=3.0.2
+ pymongo!=3.1,>=3.0.2 # Apache-2.0
bandit =
- bandit>=0.13.2
+ bandit>=0.17.3 # Apache-2.0
[global]
setup-hooks =
@@ -103,11 +103,13 @@ keystone.auth.saml2 =
keystone.auth.token =
default = keystone.auth.plugins.token:Token
+keystone.auth.totp =
+ default = keystone.auth.plugins.totp:TOTP
+
keystone.auth.x509 =
default = keystone.auth.plugins.mapped:Mapped
keystone.catalog =
- kvs = keystone.catalog.backends.kvs:Catalog
sql = keystone.catalog.backends.sql:Catalog
templated = keystone.catalog.backends.templated:Catalog
endpoint_filter.sql = keystone.contrib.endpoint_filter.backends.catalog_sql:EndpointFilterCatalog
@@ -125,19 +127,20 @@ keystone.identity.id_generator =
keystone.identity.id_mapping =
sql = keystone.identity.mapping_backends.sql:Mapping
+keystone.identity.shadow_users =
+ sql = keystone.identity.shadow_backends.sql:ShadowUsers
+
keystone.policy =
rules = keystone.policy.backends.rules:Policy
sql = keystone.policy.backends.sql:Policy
keystone.resource =
- ldap = keystone.resource.backends.ldap:Resource
sql = keystone.resource.backends.sql:Resource
keystone.resource.domain_config =
sql = keystone.resource.config_backends.sql:DomainConfig
keystone.role =
- ldap = keystone.assignment.role_backends.ldap:Role
sql = keystone.assignment.role_backends.sql:Role
keystone.token.persistence =
@@ -156,20 +159,19 @@ keystone.trust =
sql = keystone.trust.backends.sql:Trust
keystone.endpoint_filter =
- sql = keystone.contrib.endpoint_filter.backends.sql:EndpointFilter
+ sql = keystone.catalog.backends.sql:Catalog
keystone.endpoint_policy =
sql = keystone.endpoint_policy.backends.sql:EndpointPolicy
keystone.federation =
- sql = keystone.contrib.federation.backends.sql:Federation
+ sql = keystone.federation.backends.sql:Federation
keystone.oauth1 =
- sql = keystone.contrib.oauth1.backends.sql:OAuth1
+ sql = keystone.oauth1.backends.sql:OAuth1
keystone.revoke =
- kvs = keystone.contrib.revoke.backends.kvs:Revoke
- sql = keystone.contrib.revoke.backends.sql:Revoke
+ sql = keystone.revoke.backends.sql:Revoke
keystone.moon.configuration =
ram = keystone.contrib.moon.backends.memory:ConfigurationConnector
@@ -187,11 +189,14 @@ oslo.config.opts =
keystone = keystone.common.config:list_opts
keystone.notifications = keystone.notifications:list_opts
+oslo.config.opts.defaults =
+ keystone = keystone.common.config:set_middleware_defaults
+
paste.filter_factory =
admin_token_auth = keystone.middleware:AdminTokenAuthMiddleware.factory
build_auth_context = keystone.middleware:AuthContextMiddleware.factory
crud_extension = keystone.contrib.admin_crud:CrudExtension.factory
- debug = keystone.common.wsgi:Debug.factory
+ debug = oslo_middleware:Debug.factory
endpoint_filter_extension = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
ec2_extension = keystone.contrib.ec2:Ec2Extension.factory
ec2_extension_v3 = keystone.contrib.ec2:Ec2ExtensionV3.factory
@@ -208,9 +213,9 @@ paste.filter_factory =
user_crud_extension = keystone.contrib.user_crud:CrudExtension.factory
paste.app_factory =
- admin_service = keystone.service:admin_app_factory
- admin_version_service = keystone.service:admin_version_app_factory
- public_service = keystone.service:public_app_factory
- public_version_service = keystone.service:public_version_app_factory
- service_v3 = keystone.service:v3_app_factory
+ admin_service = keystone.version.service:admin_app_factory
+ admin_version_service = keystone.version.service:admin_version_app_factory
+ public_service = keystone.version.service:public_app_factory
+ public_version_service = keystone.version.service:public_version_app_factory
+ service_v3 = keystone.version.service:v3_app_factory
moon_service = keystone.contrib.moon.service:moon_app_factory
diff --git a/keystone-moon/test-requirements.txt b/keystone-moon/test-requirements.txt
index f10b9929..96992343 100644
--- a/keystone-moon/test-requirements.txt
+++ b/keystone-moon/test-requirements.txt
@@ -3,35 +3,38 @@
# process, which may cause wedges in the gate later.
hacking<0.11,>=0.10.0
+pep257==0.7.0 # MIT License
+flake8-docstrings==0.2.1.post1 # MIT
bashate>=0.2 # Apache-2.0
+os-testr>=0.4.1 # Apache-2.0
# computes code coverage percentages
-coverage>=3.6
+coverage>=3.6 # Apache-2.0
# fixture stubbing
-fixtures>=1.3.1
+fixtures<2.0,>=1.3.1 # Apache-2.0/BSD
# xml parsing
-lxml>=2.3
+lxml>=2.3 # BSD
# mock object framework
-mock>=1.2
+mock>=1.2 # BSD
oslotest>=1.10.0 # Apache-2.0
# required to build documentation
-sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
+sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
# test wsgi apps without starting an http server
-WebTest>=2.0
+WebTest>=2.0 # MIT
# mox was removed in favor of mock. We should not re-enable this module. See
# discussion: http://lists.openstack.org/pipermail/openstack-dev/2013-July/012484.html
#mox>=0.5.3
-python-subunit>=0.0.18
-testrepository>=0.0.18
-testtools>=1.4.0
+python-subunit>=0.0.18 # Apache-2.0/BSD
+testrepository>=0.0.18 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
# For documentation
-oslosphinx>=2.5.0 # Apache-2.0
+oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
reno>=0.1.1 # Apache2
-tempest-lib>=0.8.0
+tempest-lib>=0.14.0 # Apache-2.0
# Functional tests.
-requests!=2.8.0,>=2.5.2
+requests!=2.9.0,>=2.8.1 # Apache-2.0
diff --git a/keystone-moon/tests-py3-blacklist.txt b/keystone-moon/tests-py3-blacklist.txt
new file mode 100644
index 00000000..75836221
--- /dev/null
+++ b/keystone-moon/tests-py3-blacklist.txt
@@ -0,0 +1,10 @@
+keystone.tests.unit.common.test_ldap
+keystone.tests.unit.common.test_notifications
+keystone.tests.unit.test_backend_ldap
+keystone.tests.unit.test_backend_ldap_pool
+keystone.tests.unit.test_v2
+keystone.tests.unit.test_v3_auth
+keystone.tests.unit.test_v3_credential
+keystone.tests.unit.test_v3_federation
+keystone.tests.unit.test_v3_oauth1
+keystone.tests.unit.token.test_fernet_provider
diff --git a/keystone-moon/tools/pretty_tox_py3.sh b/keystone-moon/tools/pretty_tox_py3.sh
new file mode 100755
index 00000000..9bb72bb8
--- /dev/null
+++ b/keystone-moon/tools/pretty_tox_py3.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -o pipefail
+
+TESTRARGS=`python -c 'print ("^((?!%s).)*$" % "|".join(f.strip() for f in open("tests-py3-blacklist.txt")))'`
+python setup.py testr --testr-args="--subunit $TESTRARGS" | subunit-trace -f
+retval=$?
+# NOTE(mtreinish) The pipe above would eat the slowest display from pbr's testr
+# wrapper so just manually print the slowest tests.
+echo -e "\nSlowest Tests:\n"
+testr slowest
+exit $retval
diff --git a/keystone-moon/tools/sample_data.sh b/keystone-moon/tools/sample_data.sh
index bb1eada8..ce407431 100755
--- a/keystone-moon/tools/sample_data.sh
+++ b/keystone-moon/tools/sample_data.sh
@@ -32,10 +32,11 @@
# Tenant User Roles
# -------------------------------------------------------
# demo admin admin
-# service glance admin
-# service nova admin
-# service ec2 admin
-# service swift admin
+# service glance service
+# service nova service
+# service ec2 service
+# service swift service
+# service neutron service
# By default, passwords used are those in the OpenStack Install and Deploy Manual.
# One can override these (publicly known, and hence, insecure) passwords by setting the appropriate
@@ -53,6 +54,7 @@ NOVA_PASSWORD=${NOVA_PASSWORD:-${SERVICE_PASSWORD:-nova}}
GLANCE_PASSWORD=${GLANCE_PASSWORD:-${SERVICE_PASSWORD:-glance}}
EC2_PASSWORD=${EC2_PASSWORD:-${SERVICE_PASSWORD:-ec2}}
SWIFT_PASSWORD=${SWIFT_PASSWORD:-${SERVICE_PASSWORD:-swiftpass}}
+NEUTRON_PASSWORD=${NEUTRON_PASSWORD:-${SERVICE_PASSWORD:-neutron}}
CONTROLLER_PUBLIC_ADDRESS=${CONTROLLER_PUBLIC_ADDRESS:-localhost}
CONTROLLER_ADMIN_ADDRESS=${CONTROLLER_ADMIN_ADDRESS:-localhost}
@@ -99,6 +101,14 @@ function get_id () {
}
#
+# Roles
+#
+
+openstack role create admin
+
+openstack role create service
+
+#
# Default tenant
#
openstack project create demo \
@@ -107,8 +117,6 @@ openstack project create demo \
openstack user create admin --project demo \
--password "${ADMIN_PASSWORD}"
-openstack role create admin
-
openstack role add --user admin \
--project demo\
admin
@@ -124,28 +132,35 @@ openstack user create glance --project service\
openstack role add --user glance \
--project service \
- admin
+ service
openstack user create nova --project service\
--password "${NOVA_PASSWORD}"
openstack role add --user nova \
--project service \
- admin
+ service
openstack user create ec2 --project service \
--password "${EC2_PASSWORD}"
openstack role add --user ec2 \
--project service \
- admin
+ service
openstack user create swift --project service \
--password "${SWIFT_PASSWORD}" \
openstack role add --user swift \
--project service \
- admin
+ service
+
+openstack user create neutron --project service \
+ --password "${NEUTRON_PASSWORD}" \
+
+openstack role add --user neutron \
+ --project service \
+ service
#
# Keystone service
@@ -231,6 +246,20 @@ if [[ -z "$DISABLE_ENDPOINTS" ]]; then
swift
fi
+#
+# Neutron service
+#
+openstack service create --name=neutron \
+ --description="Neutron Network Service" \
+ network
+if [[ -z "$DISABLE_ENDPOINTS" ]]; then
+ openstack endpoint create --region RegionOne \
+ --publicurl "http://$CONTROLLER_PUBLIC_ADDRESS:9696" \
+ --adminurl "http://$CONTROLLER_ADMIN_ADDRESS:9696" \
+ --internalurl "http://$CONTROLLER_INTERNAL_ADDRESS:9696" \
+ neutron
+fi
+
# create ec2 creds and parse the secret and access key returned
ADMIN_USER=$(get_id openstack user show admin)
RESULT=$(openstack ec2 credentials create --project service --user $ADMIN_USER)
diff --git a/keystone-moon/tox.ini b/keystone-moon/tox.ini
index af1d9b82..13ca0d84 100644
--- a/keystone-moon/tox.ini
+++ b/keystone-moon/tox.ini
@@ -1,92 +1,131 @@
[tox]
minversion = 1.6
skipsdist = True
-envlist = py27,py34,pep8,docs,genconfig,releasenotes
+envlist = py34,py27,pep8,docs,genconfig,releasenotes
[testenv]
usedevelop = True
-install_command = pip install -U {opts} {packages}
+install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/mitaka} {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/test-requirements.txt
.[ldap,memcache,mongodb]
-commands = bash tools/pretty_tox.sh '{posargs}'
-whitelist_externals = bash
-passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
+commands =
+ find keystone -type f -name "*.pyc" -delete
+ bash tools/pretty_tox.sh '{posargs}'
+whitelist_externals =
+ bash
+ find
+passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY PBR_VERSION
[testenv:py34]
+commands =
+ find keystone -type f -name "*.pyc" -delete
+ bash tools/pretty_tox_py3.sh
+
+[testenv:legacy_drivers]
deps = -r{toxinidir}/test-requirements.txt
nose
- .[memcache,mongodb]
+ .[ldap,memcache,mongodb]
commands =
- nosetests --with-coverage --cover-package=keystone \
- keystone/tests/unit/auth/test_controllers.py \
- keystone/tests/unit/catalog/test_core.py \
- keystone/tests/unit/common/test_base64utils.py \
- keystone/tests/unit/common/test_injection.py \
- keystone/tests/unit/common/test_json_home.py \
- keystone/tests/unit/common/test_sql_core.py \
- keystone/tests/unit/common/test_utils.py \
- keystone/tests/unit/test_auth_plugin.py \
- keystone/tests/unit/test_backend.py \
- keystone/tests/unit/test_backend_endpoint_policy.py \
- keystone/tests/unit/test_backend_rules.py \
- keystone/tests/unit/test_cache_backend_mongo.py \
- keystone/tests/unit/test_config.py \
- keystone/tests/unit/test_contrib_s3_core.py \
- keystone/tests/unit/test_driver_hints.py \
- keystone/tests/unit/test_exception.py \
- keystone/tests/unit/test_policy.py \
- keystone/tests/unit/test_singular_plural.py \
- keystone/tests/unit/test_sql_livetest.py \
- keystone/tests/unit/test_sql_migrate_extensions.py \
- keystone/tests/unit/test_sql_upgrade.py \
- keystone/tests/unit/test_ssl.py \
- keystone/tests/unit/test_token_bind.py \
- keystone/tests/unit/test_url_middleware.py \
- keystone/tests/unit/test_v3_controller.py \
- keystone/tests/unit/test_validation.py \
- keystone/tests/unit/test_wsgi.py \
- keystone/tests/unit/tests/test_core.py \
- keystone/tests/unit/tests/test_utils.py \
- keystone/tests/unit/token/test_pki_provider.py \
- keystone/tests/unit/token/test_pkiz_provider.py \
- keystone/tests/unit/token/test_token_model.py \
- keystone/tests/unit/token/test_uuid_provider.py
+ # Run each legacy test separately, to avoid SQL model redefinitions
+ find keystone -type f -name "*.pyc" -delete
+ nosetests -v \
+ keystone/tests/unit/backend/legacy_drivers/assignment/V8/sql.py
+ nosetests -v \
+ keystone/tests/unit/backend/legacy_drivers/role/V8/sql.py
+ nosetests -v \
+ keystone/tests/unit/backend/legacy_drivers/federation/V8/api_v3.py
+ nosetests -v \
+ keystone/tests/unit/backend/legacy_drivers/resource/V8/sql.py
[testenv:pep8]
+deps =
+ .[bandit]
+ {[testenv]deps}
commands =
- flake8 {posargs}
+ flake8
# Run bash8 during pep8 runs to ensure violations are caught by
# the check and gate queues
bashate examples/pki/gen_pki.sh
# Check that .po and .pot files are valid.
bash -c "find keystone -type f -regex '.*\.pot?' -print0| \
xargs -0 -n 1 msgfmt --check-format -o /dev/null"
+ # Run security linter
+ bandit -r keystone -x tests
-[tox:jenkins]
-downloadcache = ~/cache/pip
+[testenv:bandit]
+# NOTE(browne): This is required for the integration test job of the bandit
+# project. Please do not remove.
+deps = .[bandit]
+commands = bandit -r keystone -x tests
[testenv:cover]
-commands = python setup.py testr --coverage --testr-args='{posargs}'
+# Also do not run test_coverage_ext tests while gathering coverage as those
+# tests conflict with coverage.
+# NOTE(sdague): this target does not use constraints because
+# upstream infra does not yet support it. Once that's fixed, we can
+# drop the install_command.
+install_command = pip install -U --force-reinstall {opts} {packages}
+commands =
+ find keystone -type f -name "*.pyc" -delete
+ python setup.py testr --coverage --testr-args='{posargs}'
[testenv:venv]
+# NOTE(jaegerandi): this target does not use constraints because
+# upstream infra does not yet support it. Once that's fixed, we can
+# drop the install_command.
+install_command = pip install -U --force-reinstall {opts} {packages}
commands = {posargs}
[testenv:debug]
-commands = oslo_debug_helper {posargs}
+commands =
+ find keystone -type f -name "*.pyc" -delete
+ oslo_debug_helper {posargs}
+passenv =
+ KSTEST_ADMIN_URL
+ KSTEST_ADMIN_USERNAME
+ KSTEST_ADMIN_PASSWORD
+ KSTEST_ADMIN_DOMAIN_ID
+ KSTEST_PUBLIC_URL
+ KSTEST_USER_USERNAME
+ KSTEST_USER_PASSWORD
+ KSTEST_USER_DOMAIN_ID
+ KSTEST_PROJECT_ID
[testenv:functional]
basepython = python3.4
deps = -r{toxinidir}/test-requirements.txt
setenv = OS_TEST_PATH=./keystone/tests/functional
-commands = python setup.py testr --slowest --testr-args='{posargs}'
+commands =
+ find keystone -type f -name "*.pyc" -delete
+ python setup.py testr --slowest --testr-args='{posargs}'
+passenv =
+ KSTEST_ADMIN_URL
+ KSTEST_ADMIN_USERNAME
+ KSTEST_ADMIN_PASSWORD
+ KSTEST_ADMIN_DOMAIN_ID
+ KSTEST_PUBLIC_URL
+ KSTEST_USER_USERNAME
+ KSTEST_USER_PASSWORD
+ KSTEST_USER_DOMAIN_ID
+ KSTEST_PROJECT_ID
[flake8]
filename= *.py,keystone-all,keystone-manage
show-source = true
-# H405 multi line docstring summary not separated with an empty line
-ignore = H405
+# D100: Missing docstring in public module
+# D101: Missing docstring in public class
+# D102: Missing docstring in public method
+# D103: Missing docstring in public function
+# D104: Missing docstring in public package
+# D105: Missing docstring in magic method
+# D202: No blank lines allowed after docstring.
+# D203: 1 blank required before class docstring.
+# D205: Blank line required between one-line summary and description.
+# D400: First line should end with a period.
+# D401: First line should be in imperative mood.
+ignore = D100,D101,D102,D103,D104,D105,D203,D205,D400,D401
exclude=.venv,.git,.tox,build,dist,doc,*openstack/common*,*lib/python*,*egg,tools,vendor,.update-venv,*.ini,*.po,*.pot
max-complexity=24
@@ -98,15 +137,15 @@ commands=
python setup.py build_sphinx
[testenv:releasenotes]
-commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+# NOTE(sdague): this target does not use constraints because
+# upstream infra does not yet support it. Once that's fixed, we can
+# drop the install_command.
+install_command = pip install -U --force-reinstall {opts} {packages}
+commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:genconfig]
commands = oslo-config-generator --config-file=config-generator/keystone.conf
-[testenv:bandit]
-deps = .[bandit]
-commands = bandit -c bandit.yaml -r keystone -n5 -p gate
-
[hacking]
import_exceptions =
keystone.i18n