summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO14
-rw-r--r--api/AUTHORS11
-rw-r--r--api/ChangeLog28
-rw-r--r--api/LICENSE176
-rw-r--r--api/MANIFEST.in10
-rw-r--r--api/README.rst0
-rw-r--r--api/babel.cfg1
-rw-r--r--api/escalator/__init__.py0
-rw-r--r--api/escalator/api/__init__.py20
-rw-r--r--api/escalator/api/middleware/__init__.py0
-rw-r--r--api/escalator/api/middleware/context.py137
-rw-r--r--api/escalator/api/policy.py97
-rw-r--r--api/escalator/api/v1/__init__.py14
-rw-r--r--api/escalator/api/v1/router.py25
-rw-r--r--api/escalator/api/versions.py78
-rw-r--r--api/escalator/cmd/__init__.py16
-rw-r--r--api/escalator/cmd/api.py87
-rw-r--r--api/escalator/common/__init__.py0
-rw-r--r--api/escalator/common/auth.py294
-rw-r--r--api/escalator/common/client.py594
-rw-r--r--api/escalator/common/config.py204
-rw-r--r--api/escalator/common/crypt.py68
-rw-r--r--api/escalator/common/exception.py521
-rw-r--r--api/escalator/common/rpc.py279
-rw-r--r--api/escalator/common/utils.py938
-rw-r--r--api/escalator/common/wsgi.py903
-rw-r--r--api/escalator/context.py60
-rw-r--r--api/escalator/i18n.py31
-rw-r--r--api/escalator/notifier.py66
-rw-r--r--api/escalator/opts.py62
-rw-r--r--api/escalator/version.py18
-rw-r--r--api/etc/escalator-api-paste.ini23
-rw-r--r--api/etc/escalator-api.conf216
-rw-r--r--api/etc/oslo-config-generator/escalator-api.conf10
-rw-r--r--api/etc/policy.json5
-rw-r--r--api/etc/property-protections-policies.conf.sample34
-rw-r--r--api/etc/property-protections-roles.conf.sample32
-rw-r--r--api/pylintrc27
-rw-r--r--api/requirements.txt33
-rw-r--r--api/setup.cfg45
-rw-r--r--api/setup.py30
-rw-r--r--api/tox.ini49
-rwxr-xr-xci/build.sh8
-rw-r--r--ci/build_rpm/Dockerfile29
-rwxr-xr-xci/build_rpm/build_rpms.sh58
-rwxr-xr-xci/build_rpm/build_rpms_docker.sh46
-rw-r--r--client/AUTHORS11
-rw-r--r--client/ChangeLog8
-rw-r--r--client/LICENSE175
-rw-r--r--client/MANIFEST.in10
-rw-r--r--client/PKG-INFO30
-rw-r--r--client/README.rst6
-rw-r--r--client/babel.cfg1
-rw-r--r--client/doc/Makefile90
-rw-r--r--client/doc/source/apiv2.rst27
-rw-r--r--client/doc/source/conf.py72
-rw-r--r--client/doc/source/index.rst37
-rw-r--r--client/doc/source/man/escalator.rst87
-rw-r--r--client/escalatorclient/__init__.py31
-rw-r--r--client/escalatorclient/_i18n.py34
-rw-r--r--client/escalatorclient/client.py39
-rw-r--r--client/escalatorclient/common/__init__.py0
-rw-r--r--client/escalatorclient/common/base.py34
-rw-r--r--client/escalatorclient/common/http.py288
-rw-r--r--client/escalatorclient/common/https.py349
-rw-r--r--client/escalatorclient/common/utils.py462
-rw-r--r--client/escalatorclient/exc.py201
-rw-r--r--client/escalatorclient/openstack/__init__.py0
-rw-r--r--client/escalatorclient/openstack/common/__init__.py0
-rw-r--r--client/escalatorclient/openstack/common/_i18n.py45
-rw-r--r--client/escalatorclient/openstack/common/apiclient/__init__.py0
-rw-r--r--client/escalatorclient/openstack/common/apiclient/auth.py234
-rw-r--r--client/escalatorclient/openstack/common/apiclient/base.py532
-rw-r--r--client/escalatorclient/openstack/common/apiclient/client.py388
-rw-r--r--client/escalatorclient/openstack/common/apiclient/exceptions.py479
-rw-r--r--client/escalatorclient/openstack/common/apiclient/utils.py100
-rw-r--r--client/escalatorclient/shell.py714
-rw-r--r--client/escalatorclient/v1/__init__.py16
-rw-r--r--client/escalatorclient/v1/client.py36
-rw-r--r--client/escalatorclient/v1/shell.py178
-rw-r--r--client/escalatorclient/v1/versions.py294
-rw-r--r--client/pylintrc27
-rw-r--r--client/requirements.txt14
-rw-r--r--client/run_tests.sh49
-rw-r--r--client/setup.cfg46
-rw-r--r--client/setup.py30
-rw-r--r--client/test-requirements.txt13
-rw-r--r--client/tox.ini39
-rw-r--r--docs/design/etc/conf.py19
-rw-r--r--docs/etc/conf.py20
-rw-r--r--docs/gap_analysis/etc/conf.py19
-rw-r--r--docs/requirements/102-Terminologies.rst151
-rw-r--r--docs/requirements/104-Requirements.rst210
-rw-r--r--docs/requirements/105-Use_Cases.rst23
-rw-r--r--docs/requirements/etc/conf.py19
95 files changed, 10786 insertions, 298 deletions
diff --git a/INFO b/INFO
index a6b3570..8d0f92e 100644
--- a/INFO
+++ b/INFO
@@ -11,24 +11,12 @@ Mailing list tag: [escalator]
Meetings: Every Thursday 1200-1300 UTC
IRC : #opnfv-meeting @ Freenode.net
-
Committers:
hu.jie@zte.com.cn
fuqiao@chinamobile.com
ulrich.kleber@huawei.com
maria.toeroe@ericsson.com
-
-Contributors:
-sama@docomolab-euro.com
-chao.zhong@zte.com.cn
-zhang.jun3g@zte.com.cn
-yuan.yue@zte.com.cn
-huangzhipeng@huawei.com
-meng.jia@zte.com.cn
-liyi.meng@ericsson.com
-pasi.vaananen@stratus.com
-wang.guobing1@zte.com.cn
-jianfei.zhang@nokia.com
+kong.wei2@zte.com.cn
Link to TSC approval of the project:
http://meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-04-21-14.00.html
diff --git a/api/AUTHORS b/api/AUTHORS
new file mode 100644
index 0000000..ad0c219
--- /dev/null
+++ b/api/AUTHORS
@@ -0,0 +1,11 @@
+Aric Gardner <agardner@linuxfoundation.org>
+Jie Hu <hu.jie@zte.com.cn>
+Jing Sun <sun.jing22@zte.com.cn>
+Liyi Meng <liyi.meng@ericsson.com>
+Maria Toeroe <Maria.Toeroe@ericsson.com>
+Ryota MIBU <r-mibu@cq.jp.nec.com>
+SerenaFeng <feng.xiaoewi@zte.com.cn>
+chaozhong-zte <chao.zhong@zte.com.cn>
+hujie <hu.jie@zte.com.cn>
+wangguobing <wang.guobing1@zte.com.cn>
+zhang-jun3g <zhang.jun3g@zte.com.cn>
diff --git a/api/ChangeLog b/api/ChangeLog
new file mode 100644
index 0000000..9e09288
--- /dev/null
+++ b/api/ChangeLog
@@ -0,0 +1,28 @@
+CHANGES
+=======
+
+* add escalator frame
+* Fix the trailing white spaces in Line 14 and 20
+* Add license information to conf.py files
+* Upgrade duration requirement
+* delete HA and non-HA description in 105-User_Cases.rst
+* Refact Terminology chapter
+* ESCALATOR-31 Adjusting documentation
+* updates to use new doc toolchain
+* Revise Terminology section
+* ESCALATOR-29: Reference Architecture format bugfix
+* ESCALATOR-28: Revise the Section of Reference Architecture
+* Move files from doc to docs
+* Contribute a RA/Information flows from ZTE's implementation
+* ESCALATOR-18: Use Cases
+* Incorporate software dimensions and other comments
+* Add jianfei.zhang@nokia.com to the contributor(ESCALATOR-22)
+* Transfer late changes from etherpad to rst
+* ESCALATOR-5: Fix syntax error of rst files in doc
+* JIRA ESCALATOR-6 Add a new contributor to INFO file Signed-off-by: Jie Hu <hu.jie@zte.com.cn>
+* JIRA ESCALATOR-3
+* Example as code, documentation template for sphinx build
+* Convert Requirement to .rst. Just for uploading to repo JIRA: ESCALATOR-1 Signed-off-by: Jie Hu <hu.jie@zte.com.cn>
+* Add modified INFO to escalator repo
+* Adding .gitreview to escalator JIRA:0000 more information at https://wiki.opendaylight.org/view/Git-review_Workflow
+* Initial empty repository
diff --git a/api/LICENSE b/api/LICENSE
new file mode 100644
index 0000000..68c771a
--- /dev/null
+++ b/api/LICENSE
@@ -0,0 +1,176 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
diff --git a/api/MANIFEST.in b/api/MANIFEST.in
new file mode 100644
index 0000000..ae484e5
--- /dev/null
+++ b/api/MANIFEST.in
@@ -0,0 +1,10 @@
+include ChangeLog
+include MANIFEST.in pylintrc
+include AUTHORS
+include LICENSE
+include ChangeLog
+include babel.cfg tox.ini
+graft docs
+graft etc
+graft escalator/locale
+global-exclude *.pyc
diff --git a/api/README.rst b/api/README.rst
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/api/README.rst
diff --git a/api/babel.cfg b/api/babel.cfg
new file mode 100644
index 0000000..efceab8
--- /dev/null
+++ b/api/babel.cfg
@@ -0,0 +1 @@
+[python: **.py]
diff --git a/api/escalator/__init__.py b/api/escalator/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/api/escalator/__init__.py
diff --git a/api/escalator/api/__init__.py b/api/escalator/api/__init__.py
new file mode 100644
index 0000000..e7ebaab
--- /dev/null
+++ b/api/escalator/api/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import paste.urlmap
+
+
+def root_app_factory(loader, global_conf, **local_conf):
+ return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
diff --git a/api/escalator/api/middleware/__init__.py b/api/escalator/api/middleware/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/api/escalator/api/middleware/__init__.py
diff --git a/api/escalator/api/middleware/context.py b/api/escalator/api/middleware/context.py
new file mode 100644
index 0000000..b921289
--- /dev/null
+++ b/api/escalator/api/middleware/context.py
@@ -0,0 +1,137 @@
+# Copyright 2016 OPNFV Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils
+from oslo_config import cfg
+from oslo_log import log as logging
+import webob.exc
+
+from escalator.api import policy
+from escalator.common import wsgi
+import escalator.context
+from escalator import i18n
+
+_ = i18n._
+
+context_opts = [
+ cfg.BoolOpt('owner_is_tenant', default=True,
+ help=_('When true, this option sets the owner of an image '
+ 'to be the tenant. Otherwise, the owner of the '
+ ' image will be the authenticated user issuing the '
+ 'request.')),
+ cfg.StrOpt('admin_role', default='admin',
+ help=_('Role used to identify an authenticated user as '
+ 'administrator.')),
+ cfg.BoolOpt('allow_anonymous_access', default=False,
+ help=_('Allow unauthenticated users to access the API with '
+ 'read-only privileges. This only applies when using '
+ 'ContextMiddleware.')),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(context_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+class BaseContextMiddleware(wsgi.Middleware):
+ def process_response(self, resp):
+ try:
+ request_id = resp.request.context.request_id
+ except AttributeError:
+ LOG.warn(_('Unable to retrieve request id from context'))
+ else:
+ resp.headers['x-openstack-request-id'] = 'req-%s' % request_id
+ return resp
+
+
+class ContextMiddleware(BaseContextMiddleware):
+ def __init__(self, app):
+ self.policy_enforcer = policy.Enforcer()
+ super(ContextMiddleware, self).__init__(app)
+
+ def process_request(self, req):
+ """Convert authentication information into a request context
+
+ Generate a escalator.context.RequestContext object from the available
+ authentication headers and store on the 'context' attribute
+ of the req object.
+
+ :param req: wsgi request object that will be given the context object
+ :raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status
+ header is not 'Confirmed' and
+ anonymous access is disallowed
+ """
+ if req.headers.get('X-Identity-Status') == 'Confirmed':
+ req.context = self._get_authenticated_context(req)
+ elif CONF.allow_anonymous_access:
+ req.context = self._get_anonymous_context()
+ else:
+ raise webob.exc.HTTPUnauthorized()
+
+ def _get_anonymous_context(self):
+ kwargs = {
+ 'user': None,
+ 'tenant': None,
+ 'roles': [],
+ 'is_admin': False,
+ 'read_only': True,
+ 'policy_enforcer': self.policy_enforcer,
+ }
+ return escalator.context.RequestContext(**kwargs)
+
+ def _get_authenticated_context(self, req):
+ # NOTE(bcwaldon): X-Roles is a csv string, but we need to parse
+ # it into a list to be useful
+ roles_header = req.headers.get('X-Roles', '')
+ roles = [r.strip().lower() for r in roles_header.split(',')]
+
+ # NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token
+ deprecated_token = req.headers.get('X-Storage-Token')
+
+ service_catalog = None
+ if req.headers.get('X-Service-Catalog') is not None:
+ try:
+ catalog_header = req.headers.get('X-Service-Catalog')
+ service_catalog = jsonutils.loads(catalog_header)
+ except ValueError:
+ raise webob.exc.HTTPInternalServerError(
+ _('Invalid service catalog json.'))
+
+ kwargs = {
+ 'user': req.headers.get('X-User-Id'),
+ 'tenant': req.headers.get('X-Tenant-Id'),
+ 'roles': roles,
+ 'is_admin': CONF.admin_role.strip().lower() in roles,
+ 'auth_token': req.headers.get('X-Auth-Token', deprecated_token),
+ 'owner_is_tenant': CONF.owner_is_tenant,
+ 'service_catalog': service_catalog,
+ 'policy_enforcer': self.policy_enforcer,
+ }
+
+ return escalator.context.RequestContext(**kwargs)
+
+
+class UnauthenticatedContextMiddleware(BaseContextMiddleware):
+ def process_request(self, req):
+ """Create a context without an authorized user."""
+ kwargs = {
+ 'user': None,
+ 'tenant': None,
+ 'roles': [],
+ 'is_admin': True,
+ }
+
+ req.context = escalator.context.RequestContext(**kwargs)
diff --git a/api/escalator/api/policy.py b/api/escalator/api/policy.py
new file mode 100644
index 0000000..4d94f51
--- /dev/null
+++ b/api/escalator/api/policy.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Policy Engine For Escalator"""
+
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_policy import policy
+
+from escalator.common import exception
+from escalator import i18n
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+DEFAULT_RULES = policy.Rules.from_dict({
+ 'context_is_admin': 'role:admin',
+ 'default': '@',
+ 'manage_image_cache': 'role:admin',
+})
+
+_ = i18n._
+_LI = i18n._LI
+_LW = i18n._LW
+
+
+class Enforcer(policy.Enforcer):
+ """Responsible for loading and enforcing rules"""
+
+ def __init__(self):
+ if CONF.find_file(CONF.oslo_policy.policy_file):
+ kwargs = dict(rules=None, use_conf=True)
+ else:
+ kwargs = dict(rules=DEFAULT_RULES, use_conf=False)
+ super(Enforcer, self).__init__(CONF, overwrite=False, **kwargs)
+
+ def add_rules(self, rules):
+ """Add new rules to the Rules object"""
+ self.set_rules(rules, overwrite=False, use_conf=self.use_conf)
+
+ def enforce(self, context, action, target):
+ """Verifies that the action is valid on the target in this context.
+
+ :param context: Escalator request context
+ :param action: String representing the action to be checked
+ :param target: Dictionary representing the object of the action.
+ :raises: `escalator.common.exception.Forbidden`
+ :returns: A non-False value if access is allowed.
+ """
+ credentials = {
+ 'roles': context.roles,
+ 'user': context.user,
+ 'tenant': context.tenant,
+ }
+ return super(Enforcer, self).enforce(action, target, credentials,
+ do_raise=True,
+ exc=exception.Forbidden,
+ action=action)
+
+ def check(self, context, action, target):
+ """Verifies that the action is valid on the target in this context.
+
+ :param context: Escalator request context
+ :param action: String representing the action to be checked
+ :param target: Dictionary representing the object of the action.
+ :returns: A non-False value if access is allowed.
+ """
+ credentials = {
+ 'roles': context.roles,
+ 'user': context.user,
+ 'tenant': context.tenant,
+ }
+ return super(Enforcer, self).enforce(action, target, credentials)
+
+ def check_is_admin(self, context):
+ """Check if the given context is associated with an admin role,
+ as defined via the 'context_is_admin' RBAC rule.
+
+ :param context: Escalator request context
+ :returns: A non-False value if context role is admin.
+ """
+ return self.check(context, 'context_is_admin', context.to_dict())
diff --git a/api/escalator/api/v1/__init__.py b/api/escalator/api/v1/__init__.py
new file mode 100644
index 0000000..31285c4
--- /dev/null
+++ b/api/escalator/api/v1/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/api/escalator/api/v1/router.py b/api/escalator/api/v1/router.py
new file mode 100644
index 0000000..54b09c4
--- /dev/null
+++ b/api/escalator/api/v1/router.py
@@ -0,0 +1,25 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from escalator.common import wsgi
+
+
+class API(wsgi.Router):
+
+ """WSGI router for Escalator v1 API requests."""
+
+ def __init__(self, mapper):
+ wsgi.Resource(wsgi.RejectMethodController())
+
+ super(API, self).__init__(mapper)
diff --git a/api/escalator/api/versions.py b/api/escalator/api/versions.py
new file mode 100644
index 0000000..751fc76
--- /dev/null
+++ b/api/escalator/api/versions.py
@@ -0,0 +1,78 @@
+# Copyright 2012 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import httplib
+
+from oslo_serialization import jsonutils
+from oslo_config import cfg
+import webob.dec
+
+from escalator.common import wsgi
+from escalator import i18n
+
+_ = i18n._
+
+versions_opts = [
+ cfg.StrOpt('public_endpoint', default=None,
+ help=_('Public url to use for versions endpoint. The default '
+ 'is None, which will use the request\'s host_url '
+ 'attribute to populate the URL base. If Escalator is '
+ 'operating behind a proxy, you will want to change '
+ 'this to represent the proxy\'s URL.')),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(versions_opts)
+
+
+class Controller(object):
+
+ """A wsgi controller that reports which API versions are supported."""
+
+ def index(self, req):
+ """Respond to a request for all OpenStack API versions."""
+ def build_version_object(version, path, status):
+ url = CONF.public_endpoint or req.host_url
+ return {
+ 'id': 'v%s' % version,
+ 'status': status,
+ 'links': [
+ {
+ 'rel': 'self',
+ 'href': '%s/%s/' % (url, path),
+ },
+ ],
+ }
+
+ version_objs = []
+ if CONF.enable_v1_api:
+ version_objs.extend([
+ build_version_object(1.1, 'v1', 'SUPPORTED'),
+ build_version_object(1.0, 'v1', 'SUPPORTED'),
+ ])
+
+ response = webob.Response(request=req,
+ status=httplib.MULTIPLE_CHOICES,
+ content_type='application/json')
+ response.body = jsonutils.dumps(dict(versions=version_objs))
+ return response
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ return self.index(req)
+
+
+def create_resource(conf):
+ return wsgi.Resource(Controller())
diff --git a/api/escalator/cmd/__init__.py b/api/escalator/cmd/__init__.py
new file mode 100644
index 0000000..871224d
--- /dev/null
+++ b/api/escalator/cmd/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 OPNFV Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from escalator import i18n
+i18n.enable_lazy()
diff --git a/api/escalator/cmd/api.py b/api/escalator/cmd/api.py
new file mode 100644
index 0000000..a0f63dc
--- /dev/null
+++ b/api/escalator/cmd/api.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Escalator API Server
+"""
+
+import os
+import sys
+
+import eventlet
+
+from oslo_service import systemd
+from oslo_config import cfg
+from oslo_log import log as logging
+import osprofiler.notifier
+import osprofiler.web
+
+from escalator.common import config
+from escalator.common import wsgi
+from escalator import notifier
+
+
+# Monkey patch socket, time, select, threads
+eventlet.patcher.monkey_patch(all=False, socket=True, time=True,
+ select=True, thread=True, os=True)
+
+# If ../escalator/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'escalator', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+CONF = cfg.CONF
+CONF.import_group("profiler", "escalator.common.wsgi")
+logging.register_options(CONF)
+
+
+def fail(e):
+ sys.exit(100)
+
+
+def main():
+ try:
+ config.parse_args()
+ wsgi.set_eventlet_hub()
+ logging.setup(CONF, 'escalator')
+
+ if cfg.CONF.profiler.enabled:
+ _notifier = osprofiler.notifier.create("Messaging",
+ notifier.messaging, {},
+ notifier.get_transport(),
+ "escalator", "api",
+ cfg.CONF.bind_host)
+ osprofiler.notifier.set(_notifier)
+ else:
+ osprofiler.web.disable()
+
+ server = wsgi.Server()
+ server.start(config.load_paste_app('escalator-api'), default_port=9393)
+ systemd.notify_once()
+ server.wait()
+ except Exception as e:
+ fail(e)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/api/escalator/common/__init__.py b/api/escalator/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/api/escalator/common/__init__.py
diff --git a/api/escalator/common/auth.py b/api/escalator/common/auth.py
new file mode 100644
index 0000000..d3e2893
--- /dev/null
+++ b/api/escalator/common/auth.py
@@ -0,0 +1,294 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+This auth module is intended to allow OpenStack client-tools to select from a
+variety of authentication strategies, including NoAuth (the default), and
+Keystone (an identity management system).
+
+ > auth_plugin = AuthPlugin(creds)
+
+ > auth_plugin.authenticate()
+
+ > auth_plugin.auth_token
+ abcdefg
+
+ > auth_plugin.management_url
+ http://service_endpoint/
+"""
+import httplib2
+from oslo_serialization import jsonutils
+from oslo_log import log as logging
+# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
+from six.moves import range
+import six.moves.urllib.parse as urlparse
+
+from escalator.common import exception
+from escalator import i18n
+
+
+LOG = logging.getLogger(__name__)
+_ = i18n._
+
+
+class BaseStrategy(object):
+
+ def __init__(self):
+ self.auth_token = None
+ # TODO(sirp): Should expose selecting public/internal/admin URL.
+ self.management_url = None
+
+ def authenticate(self):
+ raise NotImplementedError
+
+ @property
+ def is_authenticated(self):
+ raise NotImplementedError
+
+ @property
+ def strategy(self):
+ raise NotImplementedError
+
+
+class NoAuthStrategy(BaseStrategy):
+
+ def authenticate(self):
+ pass
+
+ @property
+ def is_authenticated(self):
+ return True
+
+ @property
+ def strategy(self):
+ return 'noauth'
+
+
+class KeystoneStrategy(BaseStrategy):
+ MAX_REDIRECTS = 10
+
+ def __init__(self, creds, insecure=False, configure_via_auth=True):
+ self.creds = creds
+ self.insecure = insecure
+ self.configure_via_auth = configure_via_auth
+ super(KeystoneStrategy, self).__init__()
+
+ def check_auth_params(self):
+ # Ensure that supplied credential parameters are as required
+ for required in ('username', 'password', 'auth_url',
+ 'strategy'):
+ if self.creds.get(required) is None:
+ raise exception.MissingCredentialError(required=required)
+ if self.creds['strategy'] != 'keystone':
+ raise exception.BadAuthStrategy(expected='keystone',
+ received=self.creds['strategy'])
+ # For v2.0 also check tenant is present
+ if self.creds['auth_url'].rstrip('/').endswith('v2.0'):
+ if self.creds.get("tenant") is None:
+ raise exception.MissingCredentialError(required='tenant')
+
+ def authenticate(self):
+ """Authenticate with the Keystone service.
+
+ There are a few scenarios to consider here:
+
+ 1. Which version of Keystone are we using? v1 which uses headers to
+ pass the credentials, or v2 which uses a JSON encoded request body?
+
+ 2. Keystone may respond back with a redirection using a 305 status
+ code.
+
+ 3. We may attempt a v1 auth when v2 is what's called for. In this
+ case, we rewrite the url to contain /v2.0/ and retry using the v2
+ protocol.
+ """
+ def _authenticate(auth_url):
+ # If OS_AUTH_URL is missing a trailing slash add one
+ if not auth_url.endswith('/'):
+ auth_url += '/'
+ token_url = urlparse.urljoin(auth_url, "tokens")
+ # 1. Check Keystone version
+ is_v2 = auth_url.rstrip('/').endswith('v2.0')
+ if is_v2:
+ self._v2_auth(token_url)
+ else:
+ self._v1_auth(token_url)
+
+ self.check_auth_params()
+ auth_url = self.creds['auth_url']
+ for _ in range(self.MAX_REDIRECTS):
+ try:
+ _authenticate(auth_url)
+ except exception.AuthorizationRedirect as e:
+ # 2. Keystone may redirect us
+ auth_url = e.url
+ except exception.AuthorizationFailure:
+ # 3. In some configurations nova makes redirection to
+ # v2.0 keystone endpoint. Also, new location does not
+ # contain real endpoint, only hostname and port.
+ if 'v2.0' not in auth_url:
+ auth_url = urlparse.urljoin(auth_url, 'v2.0/')
+ else:
+ # If we successfully auth'd, then memorize the correct auth_url
+ # for future use.
+ self.creds['auth_url'] = auth_url
+ break
+ else:
+ # Guard against a redirection loop
+ raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS)
+
+ def _v1_auth(self, token_url):
+ creds = self.creds
+
+ headers = {}
+ headers['X-Auth-User'] = creds['username']
+ headers['X-Auth-Key'] = creds['password']
+
+ tenant = creds.get('tenant')
+ if tenant:
+ headers['X-Auth-Tenant'] = tenant
+
+ resp, resp_body = self._do_request(token_url, 'GET', headers=headers)
+
+ def _management_url(self, resp):
+ for url_header in ('x-image-management-url',
+ 'x-server-management-url',
+ 'x-escalator'):
+ try:
+ return resp[url_header]
+ except KeyError as e:
+ not_found = e
+ raise not_found
+
+ if resp.status in (200, 204):
+ try:
+ if self.configure_via_auth:
+ self.management_url = _management_url(self, resp)
+ self.auth_token = resp['x-auth-token']
+ except KeyError:
+ raise exception.AuthorizationFailure()
+ elif resp.status == 305:
+ raise exception.AuthorizationRedirect(uri=resp['location'])
+ elif resp.status == 400:
+ raise exception.AuthBadRequest(url=token_url)
+ elif resp.status == 401:
+ raise exception.NotAuthenticated()
+ elif resp.status == 404:
+ raise exception.AuthUrlNotFound(url=token_url)
+ else:
+ raise Exception(_('Unexpected response: %s') % resp.status)
+
+ def _v2_auth(self, token_url):
+
+ creds = self.creds
+
+ creds = {
+ "auth": {
+ "tenantName": creds['tenant'],
+ "passwordCredentials": {
+ "username": creds['username'],
+ "password": creds['password']
+ }
+ }
+ }
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+ req_body = jsonutils.dumps(creds)
+
+ resp, resp_body = self._do_request(
+ token_url, 'POST', headers=headers, body=req_body)
+
+ if resp.status == 200:
+ resp_auth = jsonutils.loads(resp_body)['access']
+ creds_region = self.creds.get('region')
+ if self.configure_via_auth:
+ endpoint = get_endpoint(resp_auth['serviceCatalog'],
+ endpoint_region=creds_region)
+ self.management_url = endpoint
+ self.auth_token = resp_auth['token']['id']
+ elif resp.status == 305:
+ raise exception.RedirectException(resp['location'])
+ elif resp.status == 400:
+ raise exception.AuthBadRequest(url=token_url)
+ elif resp.status == 401:
+ raise exception.NotAuthenticated()
+ elif resp.status == 404:
+ raise exception.AuthUrlNotFound(url=token_url)
+ else:
+ raise Exception(_('Unexpected response: %s') % resp.status)
+
+ @property
+ def is_authenticated(self):
+ return self.auth_token is not None
+
+ @property
+ def strategy(self):
+ return 'keystone'
+
+ def _do_request(self, url, method, headers=None, body=None):
+ headers = headers or {}
+ conn = httplib2.Http()
+ conn.force_exception_to_status_code = True
+ conn.disable_ssl_certificate_validation = self.insecure
+ headers['User-Agent'] = 'escalator-client'
+ resp, resp_body = conn.request(url, method, headers=headers, body=body)
+ return resp, resp_body
+
+
+def get_plugin_from_strategy(strategy, creds=None, insecure=False,
+ configure_via_auth=True):
+ if strategy == 'noauth':
+ return NoAuthStrategy()
+ elif strategy == 'keystone':
+ return KeystoneStrategy(creds, insecure,
+ configure_via_auth=configure_via_auth)
+ else:
+ raise Exception(_("Unknown auth strategy '%s'") % strategy)
+
+
+def get_endpoint(service_catalog, service_type='image', endpoint_region=None,
+ endpoint_type='publicURL'):
+ """
+ Select an endpoint from the service catalog
+
+ We search the full service catalog for services
+ matching both type and region. If the client
+ supplied no region then any 'image' endpoint
+ is considered a match. There must be one -- and
+ only one -- successful match in the catalog,
+ otherwise we will raise an exception.
+ """
+ endpoint = None
+ for service in service_catalog:
+ s_type = None
+ try:
+ s_type = service['type']
+ except KeyError:
+ msg = _('Encountered service with no "type": %s') % s_type
+ LOG.warn(msg)
+ continue
+
+ if s_type == service_type:
+ for ep in service['endpoints']:
+ if endpoint_region is None or endpoint_region == ep['region']:
+ if endpoint is not None:
+ # This is a second match, abort
+ raise exception.RegionAmbiguity(region=endpoint_region)
+ endpoint = ep
+ if endpoint and endpoint.get(endpoint_type):
+ return endpoint[endpoint_type]
+ else:
+ raise exception.NoServiceEndpoint()
diff --git a/api/escalator/common/client.py b/api/escalator/common/client.py
new file mode 100644
index 0000000..586d638
--- /dev/null
+++ b/api/escalator/common/client.py
@@ -0,0 +1,594 @@
+# Copyright 2010-2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# HTTPSClientAuthConnection code comes courtesy of ActiveState website:
+# http://code.activestate.com/recipes/
+# 577548-https-httplib-client-connection-with-certificate-v/
+
+import collections
+import copy
+import errno
+import functools
+import httplib
+import os
+import re
+
+try:
+ from eventlet.green import socket
+ from eventlet.green import ssl
+except ImportError:
+ import socket
+ import ssl
+
+import osprofiler.web
+
+try:
+ import sendfile # noqa
+ SENDFILE_SUPPORTED = True
+except ImportError:
+ SENDFILE_SUPPORTED = False
+
+from oslo_log import log as logging
+from oslo_utils import encodeutils
+import six
+# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
+from six.moves import range
+import six.moves.urllib.parse as urlparse
+
+from escalator.common import auth
+from escalator.common import exception
+from escalator.common import utils
+from escalator import i18n
+
+LOG = logging.getLogger(__name__)
+_ = i18n._
+
+# common chunk size for get and put
+CHUNKSIZE = 65536
+
+VERSION_REGEX = re.compile(r"/?v[0-9\.]+")
+
+
+def handle_unauthenticated(func):
+ """
+ Wrap a function to re-authenticate and retry.
+ """
+ @functools.wraps(func)
+ def wrapped(self, *args, **kwargs):
+ try:
+ return func(self, *args, **kwargs)
+ except exception.NotAuthenticated:
+ self._authenticate(force_reauth=True)
+ return func(self, *args, **kwargs)
+ return wrapped
+
+
+def handle_redirects(func):
+ """
+ Wrap the _do_request function to handle HTTP redirects.
+ """
+ MAX_REDIRECTS = 5
+
+ @functools.wraps(func)
+ def wrapped(self, method, url, body, headers):
+ for _ in range(MAX_REDIRECTS):
+ try:
+ return func(self, method, url, body, headers)
+ except exception.RedirectException as redirect:
+ if redirect.url is None:
+ raise exception.InvalidRedirect()
+ url = redirect.url
+ raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS)
+ return wrapped
+
+
+class HTTPSClientAuthConnection(httplib.HTTPSConnection):
+ """
+ Class to make a HTTPS connection, with support for
+ full client-based SSL Authentication
+
+ :see http://code.activestate.com/recipes/
+ 577548-https-httplib-client-connection-with-certificate-v/
+ """
+
+ def __init__(self, host, port, key_file, cert_file,
+ ca_file, timeout=None, insecure=False):
+ httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file,
+ cert_file=cert_file)
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.ca_file = ca_file
+ self.timeout = timeout
+ self.insecure = insecure
+
+ def connect(self):
+ """
+ Connect to a host on a given (SSL) port.
+ If ca_file is pointing somewhere, use it to check Server Certificate.
+
+ Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
+ This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
+ ssl.wrap_socket(), which forces SSL to check server certificate against
+ our client certificate.
+ """
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+ if self._tunnel_host:
+ self.sock = sock
+ self._tunnel()
+ # Check CA file unless 'insecure' is specificed
+ if self.insecure is True:
+ self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
+ cert_reqs=ssl.CERT_NONE)
+ else:
+ self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
+ ca_certs=self.ca_file,
+ cert_reqs=ssl.CERT_REQUIRED)
+
+
+class BaseClient(object):
+
+ """A base client class"""
+
+ DEFAULT_PORT = 80
+ DEFAULT_DOC_ROOT = None
+ # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora,
+ # Suse, FreeBSD/OpenBSD
+ DEFAULT_CA_FILE_PATH = ('/etc/ssl/certs/ca-certificates.crt:'
+ '/etc/pki/tls/certs/ca-bundle.crt:'
+ '/etc/ssl/ca-bundle.pem:'
+ '/etc/ssl/cert.pem')
+
+ OK_RESPONSE_CODES = (
+ httplib.OK,
+ httplib.CREATED,
+ httplib.ACCEPTED,
+ httplib.NO_CONTENT,
+ )
+
+ REDIRECT_RESPONSE_CODES = (
+ httplib.MOVED_PERMANENTLY,
+ httplib.FOUND,
+ httplib.SEE_OTHER,
+ httplib.USE_PROXY,
+ httplib.TEMPORARY_REDIRECT,
+ )
+
+ def __init__(self, host, port=None, timeout=None, use_ssl=False,
+ auth_token=None, creds=None, doc_root=None, key_file=None,
+ cert_file=None, ca_file=None, insecure=False,
+ configure_via_auth=True):
+ """
+ Creates a new client to some service.
+
+ :param host: The host where service resides
+ :param port: The port where service resides
+ :param timeout: Connection timeout.
+ :param use_ssl: Should we use HTTPS?
+ :param auth_token: The auth token to pass to the server
+ :param creds: The credentials to pass to the auth plugin
+ :param doc_root: Prefix for all URLs we request from host
+ :param key_file: Optional PEM-formatted file that contains the private
+ key.
+ If use_ssl is True, and this param is None (the
+ default), then an environ variable
+ ESCALATOR_CLIENT_KEY_FILE is looked for. If no such
+ environ variable is found, ClientConnectionError
+ will be raised.
+ :param cert_file: Optional PEM-formatted certificate chain file.
+ If use_ssl is True, and this param is None (the
+ default), then an environ variable
+ ESCALATOR_CLIENT_CERT_FILE is looked for. If no such
+ environ variable is found, ClientConnectionError
+ will be raised.
+ :param ca_file: Optional CA cert file to use in SSL connections
+ If use_ssl is True, and this param is None (the
+ default), then an environ variable
+ ESCALATOR_CLIENT_CA_FILE is looked for.
+ :param insecure: Optional. If set then the server's certificate
+ will not be verified.
+ :param configure_via_auth: Optional. Defaults to True. If set, the
+ URL returned from the service catalog for the image
+ endpoint will **override** the URL supplied to in
+ the host parameter.
+ """
+ self.host = host
+ self.port = port or self.DEFAULT_PORT
+ self.timeout = timeout
+ # A value of '0' implies never timeout
+ if timeout == 0:
+ self.timeout = None
+ self.use_ssl = use_ssl
+ self.auth_token = auth_token
+ self.creds = creds or {}
+ self.connection = None
+ self.configure_via_auth = configure_via_auth
+ # doc_root can be a nullstring, which is valid, and why we
+ # cannot simply do doc_root or self.DEFAULT_DOC_ROOT below.
+ self.doc_root = (doc_root if doc_root is not None
+ else self.DEFAULT_DOC_ROOT)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.ca_file = ca_file
+ self.insecure = insecure
+ self.auth_plugin = self.make_auth_plugin(self.creds, self.insecure)
+ self.connect_kwargs = self.get_connect_kwargs()
+
+ def get_connect_kwargs(self):
+ connect_kwargs = {}
+
+ # Both secure and insecure connections have a timeout option
+ connect_kwargs['timeout'] = self.timeout
+
+ if self.use_ssl:
+ if self.key_file is None:
+ self.key_file = os.environ.get('ESCALATOR_CLIENT_KEY_FILE')
+ if self.cert_file is None:
+ self.cert_file = os.environ.get('ESCALATOR_CLIENT_CERT_FILE')
+ if self.ca_file is None:
+ self.ca_file = os.environ.get('ESCALATOR_CLIENT_CA_FILE')
+
+ # Check that key_file/cert_file are either both set or both unset
+ if self.cert_file is not None and self.key_file is None:
+ msg = _("You have selected to use SSL in connecting, "
+ "and you have supplied a cert, "
+ "however you have failed to supply either a "
+ "key_file parameter or set the "
+ "ESCALATOR_CLIENT_KEY_FILE environ variable")
+ raise exception.ClientConnectionError(msg)
+
+ if self.key_file is not None and self.cert_file is None:
+ msg = _("You have selected to use SSL in connecting, "
+ "and you have supplied a key, "
+ "however you have failed to supply either a "
+ "cert_file parameter or set the "
+ "ESCALATOR_CLIENT_CERT_FILE environ variable")
+ raise exception.ClientConnectionError(msg)
+
+ if (self.key_file is not None and
+ not os.path.exists(self.key_file)):
+ msg = _("The key file you specified %s does not "
+ "exist") % self.key_file
+ raise exception.ClientConnectionError(msg)
+ connect_kwargs['key_file'] = self.key_file
+
+ if (self.cert_file is not None and
+ not os.path.exists(self.cert_file)):
+ msg = _("The cert file you specified %s does not "
+ "exist") % self.cert_file
+ raise exception.ClientConnectionError(msg)
+ connect_kwargs['cert_file'] = self.cert_file
+
+ if (self.ca_file is not None and
+ not os.path.exists(self.ca_file)):
+ msg = _("The CA file you specified %s does not "
+ "exist") % self.ca_file
+ raise exception.ClientConnectionError(msg)
+
+ if self.ca_file is None:
+ for ca in self.DEFAULT_CA_FILE_PATH.split(":"):
+ if os.path.exists(ca):
+ self.ca_file = ca
+ break
+
+ connect_kwargs['ca_file'] = self.ca_file
+ connect_kwargs['insecure'] = self.insecure
+
+ return connect_kwargs
+
+ def configure_from_url(self, url):
+ """
+ Setups the connection based on the given url.
+
+ The form is:
+
+ <http|https>://<host>:port/doc_root
+ """
+ LOG.debug("Configuring from URL: %s", url)
+ parsed = urlparse.urlparse(url)
+ self.use_ssl = parsed.scheme == 'https'
+ self.host = parsed.hostname
+ self.port = parsed.port or 80
+ self.doc_root = parsed.path.rstrip('/')
+
+ # We need to ensure a version identifier is appended to the doc_root
+ if not VERSION_REGEX.match(self.doc_root):
+ if self.DEFAULT_DOC_ROOT:
+ doc_root = self.DEFAULT_DOC_ROOT.lstrip('/')
+ self.doc_root += '/' + doc_root
+ msg = ("Appending doc_root %(doc_root)s to URL %(url)s" %
+ {'doc_root': doc_root, 'url': url})
+ LOG.debug(msg)
+
+ # ensure connection kwargs are re-evaluated after the service catalog
+ # publicURL is parsed for potential SSL usage
+ self.connect_kwargs = self.get_connect_kwargs()
+
+ def make_auth_plugin(self, creds, insecure):
+ """
+ Returns an instantiated authentication plugin.
+ """
+ strategy = creds.get('strategy', 'noauth')
+ plugin = auth.get_plugin_from_strategy(strategy, creds, insecure,
+ self.configure_via_auth)
+ return plugin
+
+ def get_connection_type(self):
+ """
+ Returns the proper connection type
+ """
+ if self.use_ssl:
+ return HTTPSClientAuthConnection
+ else:
+ return httplib.HTTPConnection
+
+ def _authenticate(self, force_reauth=False):
+ """
+ Use the authentication plugin to authenticate and set the auth token.
+
+ :param force_reauth: For re-authentication to bypass cache.
+ """
+ auth_plugin = self.auth_plugin
+
+ if not auth_plugin.is_authenticated or force_reauth:
+ auth_plugin.authenticate()
+
+ self.auth_token = auth_plugin.auth_token
+
+ management_url = auth_plugin.management_url
+ if management_url and self.configure_via_auth:
+ self.configure_from_url(management_url)
+
+ @handle_unauthenticated
+ def do_request(self, method, action, body=None, headers=None,
+ params=None):
+ """
+ Make a request, returning an HTTP response object.
+
+ :param method: HTTP verb (GET, POST, PUT, etc.)
+ :param action: Requested path to append to self.doc_root
+ :param body: Data to send in the body of the request
+ :param headers: Headers to send with the request
+ :param params: Key/value pairs to use in query string
+ :returns: HTTP response object
+ """
+ if not self.auth_token:
+ self._authenticate()
+
+ url = self._construct_url(action, params)
+ # NOTE(ameade): We need to copy these kwargs since they can be altered
+ # in _do_request but we need the originals if handle_unauthenticated
+ # calls this function again.
+ return self._do_request(method=method, url=url,
+ body=copy.deepcopy(body),
+ headers=copy.deepcopy(headers))
+
+ def _construct_url(self, action, params=None):
+ """
+ Create a URL object we can use to pass to _do_request().
+ """
+ action = urlparse.quote(action)
+ path = '/'.join([self.doc_root or '', action.lstrip('/')])
+ scheme = "https" if self.use_ssl else "http"
+ netloc = "%s:%d" % (self.host, self.port)
+
+ if isinstance(params, dict):
+ for (key, value) in params.items():
+ if value is None:
+ del params[key]
+ continue
+ if not isinstance(value, six.string_types):
+ value = str(value)
+ params[key] = encodeutils.safe_encode(value)
+ query = urlparse.urlencode(params)
+ else:
+ query = None
+
+ url = urlparse.ParseResult(scheme, netloc, path, '', query, '')
+ log_msg = _("Constructed URL: %s")
+ LOG.debug(log_msg, url.geturl())
+ return url
+
+ def _encode_headers(self, headers):
+ """
+ Encodes headers.
+
+ Note: This should be used right before
+ sending anything out.
+
+ :param headers: Headers to encode
+ :returns: Dictionary with encoded headers'
+ names and values
+ """
+ to_str = encodeutils.safe_encode
+ return dict([(to_str(h), to_str(v)) for h, v in
+ six.iteritems(headers)])
+
+ @handle_redirects
+ def _do_request(self, method, url, body, headers):
+ """
+ Connects to the server and issues a request. Handles converting
+ any returned HTTP error status codes to ESCALATOR exceptions
+ and closing the server connection. Returns the result data, or
+ raises an appropriate exception.
+
+ :param method: HTTP method ("GET", "POST", "PUT", etc...)
+ :param url: urlparse.ParsedResult object with URL information
+ :param body: data to send (as string, filelike or iterable),
+ or None (default)
+ :param headers: mapping of key/value pairs to add as headers
+
+ :note
+
+ If the body param has a read attribute, and method is either
+ POST or PUT, this method will automatically conduct a chunked-transfer
+ encoding and use the body as a file object or iterable, transferring
+ chunks of data using the connection's send() method. This allows large
+ objects to be transferred efficiently without buffering the entire
+ body in memory.
+ """
+ if url.query:
+ path = url.path + "?" + url.query
+ else:
+ path = url.path
+
+ try:
+ connection_type = self.get_connection_type()
+ headers = self._encode_headers(headers or {})
+ headers.update(osprofiler.web.get_trace_id_headers())
+
+ if 'x-auth-token' not in headers and self.auth_token:
+ headers['x-auth-token'] = self.auth_token
+
+ c = connection_type(url.hostname, url.port, **self.connect_kwargs)
+
+ def _pushing(method):
+ return method.lower() in ('post', 'put')
+
+ def _simple(body):
+ return body is None or isinstance(body, six.string_types)
+
+ def _filelike(body):
+ return hasattr(body, 'read')
+
+ def _sendbody(connection, iter):
+ connection.endheaders()
+ for sent in iter:
+ # iterator has done the heavy lifting
+ pass
+
+ def _chunkbody(connection, iter):
+ connection.putheader('Transfer-Encoding', 'chunked')
+ connection.endheaders()
+ for chunk in iter:
+ connection.send('%x\r\n%s\r\n' % (len(chunk), chunk))
+ connection.send('0\r\n\r\n')
+
+ # Do a simple request or a chunked request, depending
+ # on whether the body param is file-like or iterable and
+ # the method is PUT or POST
+ #
+ if not _pushing(method) or _simple(body):
+ # Simple request...
+ c.request(method, path, body, headers)
+ elif _filelike(body) or self._iterable(body):
+ c.putrequest(method, path)
+
+ use_sendfile = self._sendable(body)
+
+ # According to HTTP/1.1, Content-Length and Transfer-Encoding
+ # conflict.
+ for header, value in headers.items():
+ if use_sendfile or header.lower() != 'content-length':
+ c.putheader(header, str(value))
+
+ iter = utils.chunkreadable(body)
+
+ if use_sendfile:
+ # send actual file without copying into userspace
+ _sendbody(c, iter)
+ else:
+ # otherwise iterate and chunk
+ _chunkbody(c, iter)
+ else:
+ raise TypeError('Unsupported image type: %s' % body.__class__)
+
+ res = c.getresponse()
+
+ def _retry(res):
+ return res.getheader('Retry-After')
+
+ status_code = self.get_status_code(res)
+ if status_code in self.OK_RESPONSE_CODES:
+ return res
+ elif status_code in self.REDIRECT_RESPONSE_CODES:
+ raise exception.RedirectException(res.getheader('Location'))
+ elif status_code == httplib.UNAUTHORIZED:
+ raise exception.NotAuthenticated(res.read())
+ elif status_code == httplib.FORBIDDEN:
+ raise exception.Forbidden(res.read())
+ elif status_code == httplib.NOT_FOUND:
+ raise exception.NotFound(res.read())
+ elif status_code == httplib.CONFLICT:
+ raise exception.Duplicate(res.read())
+ elif status_code == httplib.BAD_REQUEST:
+ raise exception.Invalid(res.read())
+ elif status_code == httplib.MULTIPLE_CHOICES:
+ raise exception.MultipleChoices(body=res.read())
+ elif status_code == httplib.REQUEST_ENTITY_TOO_LARGE:
+ raise exception.LimitExceeded(retry=_retry(res),
+ body=res.read())
+ elif status_code == httplib.INTERNAL_SERVER_ERROR:
+ raise exception.ServerError()
+ elif status_code == httplib.SERVICE_UNAVAILABLE:
+ raise exception.ServiceUnavailable(retry=_retry(res))
+ else:
+ raise exception.UnexpectedStatus(status=status_code,
+ body=res.read())
+
+ except (socket.error, IOError) as e:
+ raise exception.ClientConnectionError(e)
+
+ def _seekable(self, body):
+ # pipes are not seekable, avoids sendfile() failure on e.g.
+ # cat /path/to/image | escalator add ...
+ # or where add command is launched via popen
+ try:
+ os.lseek(body.fileno(), 0, os.SEEK_CUR)
+ return True
+ except OSError as e:
+ return (e.errno != errno.ESPIPE)
+
+ def _sendable(self, body):
+ return (SENDFILE_SUPPORTED and
+ hasattr(body, 'fileno') and
+ self._seekable(body) and
+ not self.use_ssl)
+
+ def _iterable(self, body):
+ return isinstance(body, collections.Iterable)
+
+ def get_status_code(self, response):
+ """
+ Returns the integer status code from the response, which
+ can be either a Webob.Response (used in testing) or httplib.Response
+ """
+ if hasattr(response, 'status_int'):
+ return response.status_int
+ else:
+ return response.status
+
+ def _extract_params(self, actual_params, allowed_params):
+ """
+ Extract a subset of keys from a dictionary. The filters key
+ will also be extracted, and each of its values will be returned
+ as an individual param.
+
+ :param actual_params: dict of keys to filter
+ :param allowed_params: list of keys that 'actual_params' will be
+ reduced to
+ :retval subset of 'params' dict
+ """
+ try:
+ # expect 'filters' param to be a dict here
+ result = dict(actual_params.get('filters'))
+ except TypeError:
+ result = {}
+
+ for allowed_param in allowed_params:
+ if allowed_param in actual_params:
+ result[allowed_param] = actual_params[allowed_param]
+
+ return result
diff --git a/api/escalator/common/config.py b/api/escalator/common/config.py
new file mode 100644
index 0000000..66a59f1
--- /dev/null
+++ b/api/escalator/common/config.py
@@ -0,0 +1,204 @@
+
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Routines for configuring Escalator
+"""
+
+import logging
+import logging.config
+import logging.handlers
+import os
+import tempfile
+
+from oslo_concurrency import lockutils
+from oslo_config import cfg
+from oslo_policy import policy
+from paste import deploy
+
+from escalator import i18n
+from escalator.version import version_info as version
+
+_ = i18n._
+
+paste_deploy_opts = [
+ cfg.StrOpt('flavor',
+ help=_('Partial name of a pipeline in your paste configuration '
+ 'file with the service name removed. For example, if '
+ 'your paste section name is '
+ '[pipeline:escalator-api-keystone] use the value '
+ '"keystone"')),
+ cfg.StrOpt('config_file',
+ help=_('Name of the paste configuration file.')),
+]
+task_opts = [
+ cfg.IntOpt('task_time_to_live',
+ default=48,
+ help=_("Time in hours for which a task lives after, either "
+ "succeeding or failing"),
+ deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live',
+ group='DEFAULT')]),
+ cfg.StrOpt('task_executor',
+ default='taskflow',
+ help=_("Specifies which task executor to be used to run the "
+ "task scripts.")),
+ cfg.StrOpt('work_dir',
+ default=None,
+ help=_('Work dir for asynchronous task operations. '
+ 'The directory set here will be used to operate over '
+ 'images - normally before they are imported in the '
+ 'destination store. When providing work dir, make sure '
+ 'enough space is provided for concurrent tasks to run '
+ 'efficiently without running out of space. A rough '
+ 'estimation can be done by multiplying the number of '
+ '`max_workers` - or the N of workers running - by an '
+ 'average image size (e.g 500MB). The image size '
+ 'estimation should be done based on the average size in '
+ 'your deployment. Note that depending on the tasks '
+ 'running you may need to multiply this number by some '
+ 'factor depending on what the task does. For example, '
+ 'you may want to double the available size if image '
+ 'conversion is enabled. All this being said, remember '
+ 'these are just estimations and you should do them '
+ 'based on the worst case scenario and be prepared to '
+ 'act in case they were wrong.')),
+]
+common_opts = [
+ cfg.IntOpt('limit_param_default', default=25,
+ help=_('Default value for the number of items returned by a '
+ 'request if not specified explicitly in the request')),
+ cfg.IntOpt('api_limit_max', default=1000,
+ help=_('Maximum permissible number of items that could be '
+ 'returned by a request')),
+ cfg.BoolOpt('enable_v1_api', default=True,
+ help=_("Deploy the v1 OPNFV Escalator API.")),
+ cfg.BoolOpt('enable_v2_api', default=True,
+ help=_("Deploy the v2 OpenStack Images API.")),
+ cfg.StrOpt('pydev_worker_debug_host',
+ help=_('The hostname/IP of the pydev process listening for '
+ 'debug connections')),
+ cfg.IntOpt('pydev_worker_debug_port', default=5678,
+ help=_('The port on which a pydev process is listening for '
+ 'connections.')),
+ cfg.StrOpt('digest_algorithm', default='sha1',
+ help=_('Digest algorithm which will be used for digital '
+ 'signature; the default is sha1 the default in Kilo '
+ 'for a smooth upgrade process, and it will be updated '
+ 'with sha256 in next release(L). Use the command '
+ '"openssl list-message-digest-algorithms" to get the '
+ 'available algorithms supported by the version of '
+ 'OpenSSL on the platform. Examples are "sha1", '
+ '"sha256", "sha512", etc.')),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(paste_deploy_opts, group='paste_deploy')
+CONF.register_opts(task_opts, group='task')
+CONF.register_opts(common_opts)
+policy.Enforcer(CONF)
+
+
+def parse_args(args=None, usage=None, default_config_files=None):
+ if "OSLO_LOCK_PATH" not in os.environ:
+ lockutils.set_defaults(tempfile.gettempdir())
+
+ CONF(args=args,
+ project='escalator',
+ version=version.cached_version_string(),
+ usage=usage,
+ default_config_files=default_config_files)
+
+
+def _get_deployment_flavor(flavor=None):
+ """
+ Retrieve the paste_deploy.flavor config item, formatted appropriately
+ for appending to the application name.
+
+ :param flavor: if specified, use this setting rather than the
+ paste_deploy.flavor configuration setting
+ """
+ if not flavor:
+ flavor = CONF.paste_deploy.flavor
+ return '' if not flavor else ('-' + flavor)
+
+
+def _get_paste_config_path():
+ paste_suffix = '-paste.ini'
+ conf_suffix = '.conf'
+ if CONF.config_file:
+ # Assume paste config is in a paste.ini file corresponding
+ # to the last config file
+ path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
+ else:
+ path = CONF.prog + paste_suffix
+ return CONF.find_file(os.path.basename(path))
+
+
+def _get_deployment_config_file():
+ """
+ Retrieve the deployment_config_file config item, formatted as an
+ absolute pathname.
+ """
+ path = CONF.paste_deploy.config_file
+ if not path:
+ path = _get_paste_config_path()
+ if not path:
+ msg = _("Unable to locate paste config file for %s.") % CONF.prog
+ raise RuntimeError(msg)
+ return os.path.abspath(path)
+
+
+def load_paste_app(app_name, flavor=None, conf_file=None):
+ """
+ Builds and returns a WSGI app from a paste config file.
+
+ We assume the last config file specified in the supplied ConfigOpts
+ object is the paste config file, if conf_file is None.
+
+ :param app_name: name of the application to load
+ :param flavor: name of the variant of the application to load
+ :param conf_file: path to the paste config file
+
+ :raises RuntimeError when config file cannot be located or application
+ cannot be loaded from config file
+ """
+ # append the deployment flavor to the application name,
+ # in order to identify the appropriate paste pipeline
+ app_name += _get_deployment_flavor(flavor)
+
+ if not conf_file:
+ conf_file = _get_deployment_config_file()
+
+ try:
+ logger = logging.getLogger(__name__)
+ logger.debug("Loading %(app_name)s from %(conf_file)s",
+ {'conf_file': conf_file, 'app_name': app_name})
+
+ app = deploy.loadapp("config:%s" % conf_file, name=app_name)
+
+ # Log the options used when starting if we're in debug mode...
+ if CONF.debug:
+ CONF.log_opt_values(logger, logging.DEBUG)
+
+ return app
+ except (LookupError, ImportError) as e:
+ msg = (_("Unable to load %(app_name)s from "
+ "configuration file %(conf_file)s."
+ "\nGot: %(e)r") % {'app_name': app_name,
+ 'conf_file': conf_file,
+ 'e': e})
+ logger.error(msg)
+ raise RuntimeError(msg)
diff --git a/api/escalator/common/crypt.py b/api/escalator/common/crypt.py
new file mode 100644
index 0000000..3638f11
--- /dev/null
+++ b/api/escalator/common/crypt.py
@@ -0,0 +1,68 @@
+
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Routines for URL-safe encrypting/decrypting
+"""
+
+import base64
+
+from Crypto.Cipher import AES
+from Crypto import Random
+from Crypto.Random import random
+# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
+from six.moves import range
+
+
+def urlsafe_encrypt(key, plaintext, blocksize=16):
+ """
+ Encrypts plaintext. Resulting ciphertext will contain URL-safe characters
+ :param key: AES secret key
+ :param plaintext: Input text to be encrypted
+ :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16)
+
+ :returns : Resulting ciphertext
+ """
+ def pad(text):
+ """
+ Pads text to be encrypted
+ """
+ pad_length = (blocksize - len(text) % blocksize)
+ sr = random.StrongRandom()
+ pad = ''.join(chr(sr.randint(1, 0xFF)) for i in range(pad_length - 1))
+ # We use chr(0) as a delimiter between text and padding
+ return text + chr(0) + pad
+
+ # random initial 16 bytes for CBC
+ init_vector = Random.get_random_bytes(16)
+ cypher = AES.new(key, AES.MODE_CBC, init_vector)
+ padded = cypher.encrypt(pad(str(plaintext)))
+ return base64.urlsafe_b64encode(init_vector + padded)
+
+
+def urlsafe_decrypt(key, ciphertext):
+ """
+ Decrypts URL-safe base64 encoded ciphertext
+ :param key: AES secret key
+ :param ciphertext: The encrypted text to decrypt
+
+ :returns : Resulting plaintext
+ """
+ # Cast from unicode
+ ciphertext = base64.urlsafe_b64decode(str(ciphertext))
+ cypher = AES.new(key, AES.MODE_CBC, ciphertext[:16])
+ padded = cypher.decrypt(ciphertext[16:])
+ return padded[:padded.rfind(chr(0))]
diff --git a/api/escalator/common/exception.py b/api/escalator/common/exception.py
new file mode 100644
index 0000000..6905074
--- /dev/null
+++ b/api/escalator/common/exception.py
@@ -0,0 +1,521 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Escalator exception subclasses"""
+
+import six
+import six.moves.urllib.parse as urlparse
+
+from escalator import i18n
+
+_ = i18n._
+
+_FATAL_EXCEPTION_FORMAT_ERRORS = False
+
+
+class RedirectException(Exception):
+
+ def __init__(self, url):
+ self.url = urlparse.urlparse(url)
+
+
+class EscalatorException(Exception):
+ """
+ Base Escalator Exception
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+ """
+ message = _("An unknown exception occurred")
+
+ def __init__(self, message=None, *args, **kwargs):
+ if not message:
+ message = self.message
+ try:
+ if kwargs:
+ message = message % kwargs
+ except Exception:
+ if _FATAL_EXCEPTION_FORMAT_ERRORS:
+ raise
+ else:
+ # at least get the core message out if something happened
+ pass
+ self.msg = message
+ self.message = message
+ super(EscalatorException, self).__init__(message)
+
+ def __unicode__(self):
+ # NOTE(flwang): By default, self.msg is an instance of Message, which
+ # can't be converted by str(). Based on the definition of
+ # __unicode__, it should return unicode always.
+ return six.text_type(self.msg)
+
+
+class MissingCredentialError(EscalatorException):
+ message = _("Missing required credential: %(required)s")
+
+
+class BadAuthStrategy(EscalatorException):
+ message = _("Incorrect auth strategy, expected \"%(expected)s\" but "
+ "received \"%(received)s\"")
+
+
+class NotFound(EscalatorException):
+ message = _("An object with the specified identifier was not found.")
+
+
+class BadStoreUri(EscalatorException):
+ message = _("The Store URI was malformed.")
+
+
+class Duplicate(EscalatorException):
+ message = _("An object with the same identifier already exists.")
+
+
+class Conflict(EscalatorException):
+ message = _("An object with the same identifier is currently being "
+ "operated on.")
+
+
+class AuthBadRequest(EscalatorException):
+ message = _("Connect error/bad request to Auth service at URL %(url)s.")
+
+
+class AuthUrlNotFound(EscalatorException):
+ message = _("Auth service at URL %(url)s not found.")
+
+
+class AuthorizationFailure(EscalatorException):
+ message = _("Authorization failed.")
+
+
+class NotAuthenticated(EscalatorException):
+ message = _("You are not authenticated.")
+
+
+class Forbidden(EscalatorException):
+ message = _("You are not authorized to complete this action.")
+
+
+class ProtectedMetadefNamespaceDelete(Forbidden):
+ message = _("Metadata definition namespace %(namespace)s is protected"
+ " and cannot be deleted.")
+
+
+class ProtectedMetadefNamespacePropDelete(Forbidden):
+ message = _("Metadata definition property %(property_name)s is protected"
+ " and cannot be deleted.")
+
+
+class ProtectedMetadefObjectDelete(Forbidden):
+ message = _("Metadata definition object %(object_name)s is protected"
+ " and cannot be deleted.")
+
+
+class ProtectedMetadefResourceTypeAssociationDelete(Forbidden):
+ message = _("Metadata definition resource-type-association"
+ " %(resource_type)s is protected and cannot be deleted.")
+
+
+class ProtectedMetadefResourceTypeSystemDelete(Forbidden):
+ message = _("Metadata definition resource-type %(resource_type_name)s is"
+ " a seeded-system type and cannot be deleted.")
+
+
+class ProtectedMetadefTagDelete(Forbidden):
+ message = _("Metadata definition tag %(tag_name)s is protected"
+ " and cannot be deleted.")
+
+
+class Invalid(EscalatorException):
+ message = _("Data supplied was not valid.")
+
+
+class InvalidSortKey(Invalid):
+ message = _("Sort key supplied was not valid.")
+
+
+class InvalidSortDir(Invalid):
+ message = _("Sort direction supplied was not valid.")
+
+
+class InvalidPropertyProtectionConfiguration(Invalid):
+ message = _("Invalid configuration in property protection file.")
+
+
+class InvalidFilterRangeValue(Invalid):
+ message = _("Unable to filter using the specified range.")
+
+
+class InvalidOptionValue(Invalid):
+ message = _("Invalid value for option %(option)s: %(value)s")
+
+
+class ReadonlyProperty(Forbidden):
+ message = _("Attribute '%(property)s' is read-only.")
+
+
+class ReservedProperty(Forbidden):
+ message = _("Attribute '%(property)s' is reserved.")
+
+
+class AuthorizationRedirect(EscalatorException):
+ message = _("Redirecting to %(uri)s for authorization.")
+
+
+class ClientConnectionError(EscalatorException):
+ message = _("There was an error connecting to a server")
+
+
+class ClientConfigurationError(EscalatorException):
+ message = _("There was an error configuring the client.")
+
+
+class MultipleChoices(EscalatorException):
+ message = _("The request returned a 302 Multiple Choices. This generally "
+ "means that you have not included a version indicator in a "
+ "request URI.\n\nThe body of response returned:\n%(body)s")
+
+
+class LimitExceeded(EscalatorException):
+ message = _("The request returned a 413 Request Entity Too Large. This "
+ "generally means that rate limiting or a quota threshold was "
+ "breached.\n\nThe response body:\n%(body)s")
+
+ def __init__(self, *args, **kwargs):
+ self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
+ else None)
+ super(LimitExceeded, self).__init__(*args, **kwargs)
+
+
+class ServiceUnavailable(EscalatorException):
+ message = _("The request returned 503 Service Unavailable. This "
+ "generally occurs on service overload or other transient "
+ "outage.")
+
+ def __init__(self, *args, **kwargs):
+ self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
+ else None)
+ super(ServiceUnavailable, self).__init__(*args, **kwargs)
+
+
+class ServerError(EscalatorException):
+ message = _("The request returned 500 Internal Server Error.")
+
+
+class UnexpectedStatus(EscalatorException):
+ message = _("The request returned an unexpected status: %(status)s."
+ "\n\nThe response body:\n%(body)s")
+
+
+class InvalidContentType(EscalatorException):
+ message = _("Invalid content type %(content_type)s")
+
+
+class BadRegistryConnectionConfiguration(EscalatorException):
+ message = _("Registry was not configured correctly on API server. "
+ "Reason: %(reason)s")
+
+
+class BadDriverConfiguration(EscalatorException):
+ message = _("Driver %(driver_name)s could not be configured correctly. "
+ "Reason: %(reason)s")
+
+
+class MaxRedirectsExceeded(EscalatorException):
+ message = _("Maximum redirects (%(redirects)s) was exceeded.")
+
+
+class InvalidRedirect(EscalatorException):
+ message = _("Received invalid HTTP redirect.")
+
+
+class NoServiceEndpoint(EscalatorException):
+ message = _("Response from Keystone does not contain a Glance endpoint.")
+
+
+class RegionAmbiguity(EscalatorException):
+ message = _("Multiple 'image' service matches for region %(region)s. This "
+ "generally means that a region is required and you have not "
+ "supplied one.")
+
+
+class WorkerCreationFailure(EscalatorException):
+ message = _("Server worker creation failed: %(reason)s.")
+
+
+class SchemaLoadError(EscalatorException):
+ message = _("Unable to load schema: %(reason)s")
+
+
+class InvalidObject(EscalatorException):
+ message = _("Provided object does not match schema "
+ "'%(schema)s': %(reason)s")
+
+
+class UnsupportedHeaderFeature(EscalatorException):
+ message = _("Provided header feature is unsupported: %(feature)s")
+
+
+class InUseByStore(EscalatorException):
+ message = _("The image cannot be deleted because it is in use through "
+ "the backend store outside of escalator.")
+
+
+class SIGHUPInterrupt(EscalatorException):
+ message = _("System SIGHUP signal received.")
+
+
+class RPCError(EscalatorException):
+ message = _("%(cls)s exception was raised in the last rpc call: %(val)s")
+
+
+class TaskException(EscalatorException):
+ message = _("An unknown task exception occurred")
+
+
+class BadTaskConfiguration(EscalatorException):
+ message = _("Task was not configured properly")
+
+
+class TaskNotFound(TaskException, NotFound):
+ message = _("Task with the given id %(task_id)s was not found")
+
+
+class InvalidTaskStatus(TaskException, Invalid):
+ message = _("Provided status of task is unsupported: %(status)s")
+
+
+class InvalidTaskType(TaskException, Invalid):
+ message = _("Provided type of task is unsupported: %(type)s")
+
+
+class InvalidTaskStatusTransition(TaskException, Invalid):
+ message = _("Status transition from %(cur_status)s to"
+ " %(new_status)s is not allowed")
+
+
+class DuplicateLocation(Duplicate):
+ message = _("The location %(location)s already exists")
+
+
+class InvalidParameterValue(Invalid):
+ message = _("Invalid value '%(value)s' for parameter '%(param)s': "
+ "%(extra_msg)s")
+
+
+class MetadefDuplicateNamespace(Duplicate):
+ message = _("The metadata definition namespace=%(namespace_name)s"
+ " already exists.")
+
+
+class MetadefDuplicateObject(Duplicate):
+ message = _("A metadata definition object with name=%(object_name)s"
+ " already exists in namespace=%(namespace_name)s.")
+
+
+class MetadefDuplicateProperty(Duplicate):
+ message = _("A metadata definition property with name=%(property_name)s"
+ " already exists in namespace=%(namespace_name)s.")
+
+
+class MetadefDuplicateResourceType(Duplicate):
+ message = _("A metadata definition resource-type with"
+ " name=%(resource_type_name)s already exists.")
+
+
+class MetadefDuplicateResourceTypeAssociation(Duplicate):
+ message = _("The metadata definition resource-type association of"
+ " resource-type=%(resource_type_name)s to"
+ " namespace=%(namespace_name)s"
+ " already exists.")
+
+
+class MetadefDuplicateTag(Duplicate):
+ message = _("A metadata tag with name=%(name)s"
+ " already exists in namespace=%(namespace_name)s.")
+
+
+class MetadefForbidden(Forbidden):
+ message = _("You are not authorized to complete this action.")
+
+
+class MetadefIntegrityError(Forbidden):
+ message = _("The metadata definition %(record_type)s with"
+ " name=%(record_name)s not deleted."
+ " Other records still refer to it.")
+
+
+class MetadefNamespaceNotFound(NotFound):
+ message = _("Metadata definition namespace=%(namespace_name)s"
+ "was not found.")
+
+
+class MetadefObjectNotFound(NotFound):
+ message = _("The metadata definition object with"
+ " name=%(object_name)s was not found in"
+ " namespace=%(namespace_name)s.")
+
+
+class MetadefPropertyNotFound(NotFound):
+ message = _("The metadata definition property with"
+ " name=%(property_name)s was not found in"
+ " namespace=%(namespace_name)s.")
+
+
+class MetadefResourceTypeNotFound(NotFound):
+ message = _("The metadata definition resource-type with"
+ " name=%(resource_type_name)s, was not found.")
+
+
+class MetadefResourceTypeAssociationNotFound(NotFound):
+ message = _("The metadata definition resource-type association of"
+ " resource-type=%(resource_type_name)s to"
+ " namespace=%(namespace_name)s,"
+ " was not found.")
+
+
+class MetadefTagNotFound(NotFound):
+ message = _("The metadata definition tag with"
+ " name=%(name)s was not found in"
+ " namespace=%(namespace_name)s.")
+
+
+class InvalidVersion(Invalid):
+ message = _("Version is invalid: %(reason)s")
+
+
+class InvalidArtifactTypePropertyDefinition(Invalid):
+ message = _("Invalid property definition")
+
+
+class InvalidArtifactTypeDefinition(Invalid):
+ message = _("Invalid type definition")
+
+
+class InvalidArtifactPropertyValue(Invalid):
+ message = _("Property '%(name)s' may not have value '%(val)s': %(msg)s")
+
+ def __init__(self, message=None, *args, **kwargs):
+ super(InvalidArtifactPropertyValue, self).__init__(message, *args,
+ **kwargs)
+ self.name = kwargs.get('name')
+ self.value = kwargs.get('val')
+
+
+class ArtifactNotFound(NotFound):
+ message = _("Artifact with id=%(id)s was not found")
+
+
+class ArtifactForbidden(Forbidden):
+ message = _("Artifact with id=%(id)s is not accessible")
+
+
+class ArtifactDuplicateNameTypeVersion(Duplicate):
+ message = _("Artifact with the specified type, name and version"
+ " already exists")
+
+
+class InvalidArtifactStateTransition(Invalid):
+ message = _("Artifact cannot change state from %(source)s to %(target)s")
+
+
+class ArtifactDuplicateDirectDependency(Duplicate):
+ message = _("Artifact with the specified type, name and version"
+ " already has the direct dependency=%(dep)s")
+
+
+class ArtifactDuplicateTransitiveDependency(Duplicate):
+ message = _("Artifact with the specified type, name and version"
+ " already has the transitive dependency=%(dep)s")
+
+
+class ArtifactUnsupportedPropertyOperator(Invalid):
+ message = _("Operator %(op)s is not supported")
+
+
+class ArtifactUnsupportedShowLevel(Invalid):
+ message = _("Show level %(shl)s is not supported in this operation")
+
+
+class ArtifactPropertyValueNotFound(NotFound):
+ message = _("Property's %(prop)s value has not been found")
+
+
+class ArtifactInvalidProperty(Invalid):
+ message = _("Artifact has no property %(prop)s")
+
+
+class ArtifactInvalidPropertyParameter(Invalid):
+ message = _("Cannot use this parameter with the operator %(op)s")
+
+
+class ArtifactLoadError(EscalatorException):
+ message = _("Cannot load artifact '%(name)s'")
+
+
+class ArtifactNonMatchingTypeName(ArtifactLoadError):
+ message = _(
+ "Plugin name '%(plugin)s' should match artifact typename '%(name)s'")
+
+
+class ArtifactPluginNotFound(NotFound):
+ message = _("No plugin for '%(name)s' has been loaded")
+
+
+class UnknownArtifactType(NotFound):
+ message = _("Artifact type with name '%(name)s' and version '%(version)s' "
+ "is not known")
+
+
+class ArtifactInvalidStateTransition(Invalid):
+ message = _("Artifact state cannot be changed from %(curr)s to %(to)s")
+
+
+class JsonPatchException(EscalatorException):
+ message = _("Invalid jsonpatch request")
+
+
+class InvalidJsonPatchBody(JsonPatchException):
+ message = _("The provided body %(body)s is invalid "
+ "under given schema: %(schema)s")
+
+
+class InvalidJsonPatchPath(JsonPatchException):
+ message = _("The provided path '%(path)s' is invalid: %(explanation)s")
+
+ def __init__(self, message=None, *args, **kwargs):
+ self.explanation = kwargs.get("explanation")
+ super(InvalidJsonPatchPath, self).__init__(message, *args, **kwargs)
+
+
+class ThreadBinException(EscalatorException):
+
+ def __init__(self, *args):
+ super(ThreadBinException, self).__init__(*args)
+
+
+class SubprocessCmdFailed(EscalatorException):
+ message = _("suprocess command failed.")
+
+
+class DeleteConstrainted(EscalatorException):
+ message = _("delete is not allowed.")
+
+
+class TrustMeFailed(EscalatorException):
+ message = _("Trust me script failed.")
diff --git a/api/escalator/common/rpc.py b/api/escalator/common/rpc.py
new file mode 100644
index 0000000..4d50461
--- /dev/null
+++ b/api/escalator/common/rpc.py
@@ -0,0 +1,279 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+RPC Controller
+"""
+import datetime
+import traceback
+
+from oslo_config import cfg
+from oslo_log import log as logging
+import oslo_utils.importutils as imp
+from oslo_utils import timeutils
+import six
+from webob import exc
+
+from escalator.common import client
+from escalator.common import exception
+from escalator.common import utils
+from escalator.common import wsgi
+from escalator import i18n
+
+LOG = logging.getLogger(__name__)
+_ = i18n._
+_LE = i18n._LE
+
+
+rpc_opts = [
+ # NOTE(flaper87): Shamelessly copied
+ # from oslo rpc.
+ cfg.ListOpt('allowed_rpc_exception_modules',
+ default=['openstack.common.exception',
+ 'escalator.common.exception',
+ 'exceptions',
+ ],
+ help='Modules of exceptions that are permitted to be recreated'
+ ' upon receiving exception data from an rpc call.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(rpc_opts)
+
+
+class RPCJSONSerializer(wsgi.JSONResponseSerializer):
+
+ def _sanitizer(self, obj):
+ def to_primitive(_type, _value):
+ return {"_type": _type, "_value": _value}
+
+ if isinstance(obj, datetime.datetime):
+ return to_primitive("datetime", timeutils.strtime(obj))
+
+ return super(RPCJSONSerializer, self)._sanitizer(obj)
+
+
+class RPCJSONDeserializer(wsgi.JSONRequestDeserializer):
+
+ def _to_datetime(self, obj):
+ return timeutils.parse_strtime(obj)
+
+ def _sanitizer(self, obj):
+ try:
+ _type, _value = obj["_type"], obj["_value"]
+ return getattr(self, "_to_" + _type)(_value)
+ except (KeyError, AttributeError):
+ return obj
+
+
+class Controller(object):
+ """
+ Base RPCController.
+
+ This is the base controller for RPC based APIs. Commands
+ handled by this controller respect the following form:
+
+ [{
+ 'command': 'method_name',
+ 'kwargs': {...}
+ }]
+
+ The controller is capable of processing more than one command
+ per request and will always return a list of results.
+
+ :params raise_exc: Boolean that specifies whether to raise
+ exceptions instead of "serializing" them.
+ """
+
+ def __init__(self, raise_exc=False):
+ self._registered = {}
+ self.raise_exc = raise_exc
+
+ def register(self, resource, filtered=None, excluded=None, refiner=None):
+ """
+ Exports methods through the RPC Api.
+
+ :params resource: Resource's instance to register.
+ :params filtered: List of methods that *can* be registered. Read
+ as "Method must be in this list".
+ :params excluded: List of methods to exclude.
+ :params refiner: Callable to use as filter for methods.
+
+ :raises AssertionError: If refiner is not callable.
+ """
+
+ funcs = filter(lambda x: not x.startswith("_"), dir(resource))
+
+ if filtered:
+ funcs = [f for f in funcs if f in filtered]
+
+ if excluded:
+ funcs = [f for f in funcs if f not in excluded]
+
+ if refiner:
+ assert callable(refiner), "Refiner must be callable"
+ funcs = filter(refiner, funcs)
+
+ for name in funcs:
+ meth = getattr(resource, name)
+
+ if not callable(meth):
+ continue
+
+ self._registered[name] = meth
+
+ def __call__(self, req, body):
+ """
+ Executes the command
+ """
+
+ if not isinstance(body, list):
+ msg = _("Request must be a list of commands")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ def validate(cmd):
+ if not isinstance(cmd, dict):
+ msg = _("Bad Command: %s") % str(cmd)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ command, kwargs = cmd.get("command"), cmd.get("kwargs")
+
+ if (not command or not isinstance(command, six.string_types) or
+ (kwargs and not isinstance(kwargs, dict))):
+ msg = _("Wrong command structure: %s") % (str(cmd))
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ method = self._registered.get(command)
+ if not method:
+ # Just raise 404 if the user tries to
+ # access a private method. No need for
+ # 403 here since logically the command
+ # is not registered to the rpc dispatcher
+ raise exc.HTTPNotFound(explanation=_("Command not found"))
+
+ return True
+
+ # If more than one command were sent then they might
+ # be intended to be executed sequentially, that for,
+ # lets first verify they're all valid before executing
+ # them.
+ commands = filter(validate, body)
+
+ results = []
+ for cmd in commands:
+ # kwargs is not required
+ command, kwargs = cmd["command"], cmd.get("kwargs", {})
+ method = self._registered[command]
+ try:
+ result = method(req.context, **kwargs)
+ except Exception as e:
+ if self.raise_exc:
+ raise
+
+ cls, val = e.__class__, utils.exception_to_str(e)
+ msg = (_LE("RPC Call Error: %(val)s\n%(tb)s") %
+ dict(val=val, tb=traceback.format_exc()))
+ LOG.error(msg)
+
+ # NOTE(flaper87): Don't propagate all exceptions
+ # but the ones allowed by the user.
+ module = cls.__module__
+ if module not in CONF.allowed_rpc_exception_modules:
+ cls = exception.RPCError
+ val = six.text_type(exception.RPCError(cls=cls, val=val))
+
+ cls_path = "%s.%s" % (cls.__module__, cls.__name__)
+ result = {"_error": {"cls": cls_path, "val": val}}
+ results.append(result)
+ return results
+
+
+class RPCClient(client.BaseClient):
+
+ def __init__(self, *args, **kwargs):
+ self._serializer = RPCJSONSerializer()
+ self._deserializer = RPCJSONDeserializer()
+
+ self.raise_exc = kwargs.pop("raise_exc", True)
+ self.base_path = kwargs.pop("base_path", '/rpc')
+ super(RPCClient, self).__init__(*args, **kwargs)
+
+ @client.handle_unauthenticated
+ def bulk_request(self, commands):
+ """
+ Execute multiple commands in a single request.
+
+ :params commands: List of commands to send. Commands
+ must respect the following form:
+
+ {
+ 'command': 'method_name',
+ 'kwargs': method_kwargs
+ }
+ """
+ body = self._serializer.to_json(commands)
+ response = super(RPCClient, self).do_request('POST',
+ self.base_path,
+ body)
+ return self._deserializer.from_json(response.read())
+
+ def do_request(self, method, **kwargs):
+ """
+ Simple do_request override. This method serializes
+ the outgoing body and builds the command that will
+ be sent.
+
+ :params method: The remote python method to call
+ :params kwargs: Dynamic parameters that will be
+ passed to the remote method.
+ """
+ content = self.bulk_request([{'command': method,
+ 'kwargs': kwargs}])
+
+ # NOTE(flaper87): Return the first result if
+ # a single command was executed.
+ content = content[0]
+
+ # NOTE(flaper87): Check if content is an error
+ # and re-raise it if raise_exc is True. Before
+ # checking if content contains the '_error' key,
+ # verify if it is an instance of dict - since the
+ # RPC call may have returned something different.
+ if self.raise_exc and (isinstance(content, dict) and
+ '_error' in content):
+ error = content['_error']
+ try:
+ exc_cls = imp.import_class(error['cls'])
+ raise exc_cls(error['val'])
+ except ImportError:
+ # NOTE(flaper87): The exception
+ # class couldn't be imported, using
+ # a generic exception.
+ raise exception.RPCError(**error)
+ return content
+
+ def __getattr__(self, item):
+ """
+ This method returns a method_proxy that
+ will execute the rpc call in the registry
+ service.
+ """
+ if item.startswith('_'):
+ raise AttributeError(item)
+
+ def method_proxy(**kw):
+ return self.do_request(item, **kw)
+
+ return method_proxy
diff --git a/api/escalator/common/utils.py b/api/escalator/common/utils.py
new file mode 100644
index 0000000..6cb1784
--- /dev/null
+++ b/api/escalator/common/utils.py
@@ -0,0 +1,938 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2014 SoftLayer Technologies, Inc.
+# Copyright 2015 Mirantis, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+System-level utilities and helper functions.
+"""
+
+import errno
+from functools import reduce
+
+try:
+ from eventlet import sleep
+except ImportError:
+ from time import sleep
+from eventlet.green import socket
+
+import functools
+import os
+import platform
+import re
+import subprocess
+import sys
+import uuid
+import copy
+
+from OpenSSL import crypto
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import encodeutils
+from oslo_utils import excutils
+import six
+from webob import exc
+from escalator.common import exception
+from escalator import i18n
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+_ = i18n._
+_LE = i18n._LE
+
+
+ESCALATOR_TEST_SOCKET_FD_STR = 'ESCALATOR_TEST_SOCKET_FD'
+
+
+def chunkreadable(iter, chunk_size=65536):
+ """
+ Wrap a readable iterator with a reader yielding chunks of
+ a preferred size, otherwise leave iterator unchanged.
+
+ :param iter: an iter which may also be readable
+ :param chunk_size: maximum size of chunk
+ """
+ return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter
+
+
+def chunkiter(fp, chunk_size=65536):
+ """
+ Return an iterator to a file-like obj which yields fixed size chunks
+
+ :param fp: a file-like object
+ :param chunk_size: maximum size of chunk
+ """
+ while True:
+ chunk = fp.read(chunk_size)
+ if chunk:
+ yield chunk
+ else:
+ break
+
+
+def cooperative_iter(iter):
+ """
+ Return an iterator which schedules after each
+ iteration. This can prevent eventlet thread starvation.
+
+ :param iter: an iterator to wrap
+ """
+ try:
+ for chunk in iter:
+ sleep(0)
+ yield chunk
+ except Exception as err:
+ with excutils.save_and_reraise_exception():
+ msg = _LE("Error: cooperative_iter exception %s") % err
+ LOG.error(msg)
+
+
+def cooperative_read(fd):
+ """
+ Wrap a file descriptor's read with a partial function which schedules
+ after each read. This can prevent eventlet thread starvation.
+
+ :param fd: a file descriptor to wrap
+ """
+ def readfn(*args):
+ result = fd.read(*args)
+ sleep(0)
+ return result
+ return readfn
+
+
+MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit
+
+
+class CooperativeReader(object):
+
+ """
+ An eventlet thread friendly class for reading in image data.
+
+ When accessing data either through the iterator or the read method
+ we perform a sleep to allow a co-operative yield. When there is more than
+ one image being uploaded/downloaded this prevents eventlet thread
+ starvation, ie allows all threads to be scheduled periodically rather than
+ having the same thread be continuously active.
+ """
+
+ def __init__(self, fd):
+ """
+ :param fd: Underlying image file object
+ """
+ self.fd = fd
+ self.iterator = None
+ # NOTE(markwash): if the underlying supports read(), overwrite the
+ # default iterator-based implementation with cooperative_read which
+ # is more straightforward
+ if hasattr(fd, 'read'):
+ self.read = cooperative_read(fd)
+ else:
+ self.iterator = None
+ self.buffer = ''
+ self.position = 0
+
+ def read(self, length=None):
+ """Return the requested amount of bytes, fetching the next chunk of
+ the underlying iterator when needed.
+
+ This is replaced with cooperative_read in __init__ if the underlying
+ fd already supports read().
+ """
+ if length is None:
+ if len(self.buffer) - self.position > 0:
+ # if no length specified but some data exists in buffer,
+ # return that data and clear the buffer
+ result = self.buffer[self.position:]
+ self.buffer = ''
+ self.position = 0
+ return str(result)
+ else:
+ # otherwise read the next chunk from the underlying iterator
+ # and return it as a whole. Reset the buffer, as subsequent
+ # calls may specify the length
+ try:
+ if self.iterator is None:
+ self.iterator = self.__iter__()
+ return self.iterator.next()
+ except StopIteration:
+ return ''
+ finally:
+ self.buffer = ''
+ self.position = 0
+ else:
+ result = bytearray()
+ while len(result) < length:
+ if self.position < len(self.buffer):
+ to_read = length - len(result)
+ chunk = self.buffer[self.position:self.position + to_read]
+ result.extend(chunk)
+
+ # This check is here to prevent potential OOM issues if
+ # this code is called with unreasonably high values of read
+ # size. Currently it is only called from the HTTP clients
+ # of Glance backend stores, which use httplib for data
+ # streaming, which has readsize hardcoded to 8K, so this
+ # check should never fire. Regardless it still worths to
+ # make the check, as the code may be reused somewhere else.
+ if len(result) >= MAX_COOP_READER_BUFFER_SIZE:
+ raise exception.LimitExceeded()
+ self.position += len(chunk)
+ else:
+ try:
+ if self.iterator is None:
+ self.iterator = self.__iter__()
+ self.buffer = self.iterator.next()
+ self.position = 0
+ except StopIteration:
+ self.buffer = ''
+ self.position = 0
+ return str(result)
+ return str(result)
+
+ def __iter__(self):
+ return cooperative_iter(self.fd.__iter__())
+
+
+class LimitingReader(object):
+
+ """
+ Reader designed to fail when reading image data past the configured
+ allowable amount.
+ """
+
+ def __init__(self, data, limit):
+ """
+ :param data: Underlying image data object
+ :param limit: maximum number of bytes the reader should allow
+ """
+ self.data = data
+ self.limit = limit
+ self.bytes_read = 0
+
+ def __iter__(self):
+ for chunk in self.data:
+ self.bytes_read += len(chunk)
+ if self.bytes_read > self.limit:
+ raise exception.ImageSizeLimitExceeded()
+ else:
+ yield chunk
+
+ def read(self, i):
+ result = self.data.read(i)
+ self.bytes_read += len(result)
+ if self.bytes_read > self.limit:
+ raise exception.ImageSizeLimitExceeded()
+ return result
+
+
+def get_dict_meta(response):
+ result = {}
+ for key, value in response.json.items():
+ result[key] = value
+ return result
+
+
+def create_mashup_dict(image_meta):
+ """
+ Returns a dictionary-like mashup of the image core properties
+ and the image custom properties from given image metadata.
+
+ :param image_meta: metadata of image with core and custom properties
+ """
+
+ def get_items():
+ for key, value in six.iteritems(image_meta):
+ if isinstance(value, dict):
+ for subkey, subvalue in six.iteritems(
+ create_mashup_dict(value)):
+ if subkey not in image_meta:
+ yield subkey, subvalue
+ else:
+ yield key, value
+
+ return dict(get_items())
+
+
+def safe_mkdirs(path):
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def safe_remove(path):
+ try:
+ os.remove(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+class PrettyTable(object):
+
+ """Creates an ASCII art table for use in bin/escalator
+
+ """
+
+ def __init__(self):
+ self.columns = []
+
+ def add_column(self, width, label="", just='l'):
+ """Add a column to the table
+
+ :param width: number of characters wide the column should be
+ :param label: column heading
+ :param just: justification for the column, 'l' for left,
+ 'r' for right
+ """
+ self.columns.append((width, label, just))
+
+ def make_header(self):
+ label_parts = []
+ break_parts = []
+ for width, label, _ in self.columns:
+ # NOTE(sirp): headers are always left justified
+ label_part = self._clip_and_justify(label, width, 'l')
+ label_parts.append(label_part)
+
+ break_part = '-' * width
+ break_parts.append(break_part)
+
+ label_line = ' '.join(label_parts)
+ break_line = ' '.join(break_parts)
+ return '\n'.join([label_line, break_line])
+
+ def make_row(self, *args):
+ row = args
+ row_parts = []
+ for data, (width, _, just) in zip(row, self.columns):
+ row_part = self._clip_and_justify(data, width, just)
+ row_parts.append(row_part)
+
+ row_line = ' '.join(row_parts)
+ return row_line
+
+ @staticmethod
+ def _clip_and_justify(data, width, just):
+ # clip field to column width
+ clipped_data = str(data)[:width]
+
+ if just == 'r':
+ # right justify
+ justified = clipped_data.rjust(width)
+ else:
+ # left justify
+ justified = clipped_data.ljust(width)
+
+ return justified
+
+
+def get_terminal_size():
+
+ def _get_terminal_size_posix():
+ import fcntl
+ import struct
+ import termios
+
+ height_width = None
+
+ try:
+ height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(),
+ termios.TIOCGWINSZ,
+ struct.pack(
+ 'HH', 0, 0)))
+ except Exception:
+ pass
+
+ if not height_width:
+ try:
+ p = subprocess.Popen(['stty', 'size'],
+ shell=False,
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+ result = p.communicate()
+ if p.returncode == 0:
+ return tuple(int(x) for x in result[0].split())
+ except Exception:
+ pass
+
+ return height_width
+
+ def _get_terminal_size_win32():
+ try:
+ from ctypes import create_string_buffer
+ from ctypes import windll
+ handle = windll.kernel32.GetStdHandle(-12)
+ csbi = create_string_buffer(22)
+ res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)
+ except Exception:
+ return None
+ if res:
+ import struct
+ unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw)
+ (bufx, bufy, curx, cury, wattr,
+ left, top, right, bottom, maxx, maxy) = unpack_tmp
+ height = bottom - top + 1
+ width = right - left + 1
+ return (height, width)
+ else:
+ return None
+
+ def _get_terminal_size_unknownOS():
+ raise NotImplementedError
+
+ func = {'posix': _get_terminal_size_posix,
+ 'win32': _get_terminal_size_win32}
+
+ height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)()
+
+ if height_width is None:
+ raise exception.Invalid()
+
+ for i in height_width:
+ if not isinstance(i, int) or i <= 0:
+ raise exception.Invalid()
+
+ return height_width[0], height_width[1]
+
+
+def mutating(func):
+ """Decorator to enforce read-only logic"""
+ @functools.wraps(func)
+ def wrapped(self, req, *args, **kwargs):
+ if req.context.read_only:
+ msg = "Read-only access"
+ LOG.debug(msg)
+ raise exc.HTTPForbidden(msg, request=req,
+ content_type="text/plain")
+ return func(self, req, *args, **kwargs)
+ return wrapped
+
+
+def setup_remote_pydev_debug(host, port):
+ error_msg = _LE('Error setting up the debug environment. Verify that the'
+ ' option pydev_worker_debug_host is pointing to a valid '
+ 'hostname or IP on which a pydev server is listening on'
+ ' the port indicated by pydev_worker_debug_port.')
+
+ try:
+ try:
+ from pydev import pydevd
+ except ImportError:
+ import pydevd
+
+ pydevd.settrace(host,
+ port=port,
+ stdoutToServer=True,
+ stderrToServer=True)
+ return True
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(error_msg)
+
+
+def validate_key_cert(key_file, cert_file):
+ try:
+ error_key_name = "private key"
+ error_filename = key_file
+ with open(key_file, 'r') as keyfile:
+ key_str = keyfile.read()
+ key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str)
+
+ error_key_name = "certificate"
+ error_filename = cert_file
+ with open(cert_file, 'r') as certfile:
+ cert_str = certfile.read()
+ cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
+ except IOError as ioe:
+ raise RuntimeError(_("There is a problem with your %(error_key_name)s "
+ "%(error_filename)s. Please verify it."
+ " Error: %(ioe)s") %
+ {'error_key_name': error_key_name,
+ 'error_filename': error_filename,
+ 'ioe': ioe})
+ except crypto.Error as ce:
+ raise RuntimeError(_("There is a problem with your %(error_key_name)s "
+ "%(error_filename)s. Please verify it. OpenSSL"
+ " error: %(ce)s") %
+ {'error_key_name': error_key_name,
+ 'error_filename': error_filename,
+ 'ce': ce})
+
+ try:
+ data = str(uuid.uuid4())
+ digest = CONF.digest_algorithm
+ if digest == 'sha1':
+ LOG.warn('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)'
+ ' state that the SHA-1 is not suitable for'
+ ' general-purpose digital signature applications (as'
+ ' specified in FIPS 186-3) that require 112 bits of'
+ ' security. The default value is sha1 in Kilo for a'
+ ' smooth upgrade process, and it will be updated'
+ ' with sha256 in next release(L).')
+ out = crypto.sign(key, data, digest)
+ crypto.verify(cert, out, data, digest)
+ except crypto.Error as ce:
+ raise RuntimeError(_("There is a problem with your key pair. "
+ "Please verify that cert %(cert_file)s and "
+ "key %(key_file)s belong together. OpenSSL "
+ "error %(ce)s") % {'cert_file': cert_file,
+ 'key_file': key_file,
+ 'ce': ce})
+
+
+def get_test_suite_socket():
+ global ESCALATOR_TEST_SOCKET_FD_STR
+ if ESCALATOR_TEST_SOCKET_FD_STR in os.environ:
+ fd = int(os.environ[ESCALATOR_TEST_SOCKET_FD_STR])
+ sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
+ sock = socket.SocketType(_sock=sock)
+ sock.listen(CONF.backlog)
+ del os.environ[ESCALATOR_TEST_SOCKET_FD_STR]
+ os.close(fd)
+ return sock
+ return None
+
+
+def is_uuid_like(val):
+ """Returns validation of a value as a UUID.
+
+ For our purposes, a UUID is a canonical form string:
+ aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
+ """
+ try:
+ return str(uuid.UUID(val)) == val
+ except (TypeError, ValueError, AttributeError):
+ return False
+
+
+def exception_to_str(exc):
+ try:
+ error = six.text_type(exc)
+ except UnicodeError:
+ try:
+ error = str(exc)
+ except UnicodeError:
+ error = ("Caught '%(exception)s' exception." %
+ {"exception": exc.__class__.__name__})
+ return encodeutils.safe_encode(error, errors='ignore')
+
+
+try:
+ REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]')
+except re.error:
+ # UCS-2 build case
+ REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
+
+
+def no_4byte_params(f):
+ """
+ Checks that no 4 byte unicode characters are allowed
+ in dicts' keys/values and string's parameters
+ """
+ def wrapper(*args, **kwargs):
+
+ def _is_match(some_str):
+ return (isinstance(some_str, unicode) and
+ REGEX_4BYTE_UNICODE.findall(some_str) != [])
+
+ def _check_dict(data_dict):
+ # a dict of dicts has to be checked recursively
+ for key, value in data_dict.iteritems():
+ if isinstance(value, dict):
+ _check_dict(value)
+ else:
+ if _is_match(key):
+ msg = _("Property names can't contain 4 byte unicode.")
+ raise exception.Invalid(msg)
+ if _is_match(value):
+ msg = (_("%s can't contain 4 byte unicode characters.")
+ % key.title())
+ raise exception.Invalid(msg)
+
+ for data_dict in [arg for arg in args if isinstance(arg, dict)]:
+ _check_dict(data_dict)
+ # now check args for str values
+ for arg in args:
+ if _is_match(arg):
+ msg = _("Param values can't contain 4 byte unicode.")
+ raise exception.Invalid(msg)
+ # check kwargs as well, as params are passed as kwargs via
+ # registry calls
+ _check_dict(kwargs)
+ return f(*args, **kwargs)
+ return wrapper
+
+
+def stash_conf_values():
+ """
+ Make a copy of some of the current global CONF's settings.
+ Allows determining if any of these values have changed
+ when the config is reloaded.
+ """
+ conf = {}
+ conf['bind_host'] = CONF.bind_host
+ conf['bind_port'] = CONF.bind_port
+ conf['tcp_keepidle'] = CONF.cert_file
+ conf['backlog'] = CONF.backlog
+ conf['key_file'] = CONF.key_file
+ conf['cert_file'] = CONF.cert_file
+
+ return conf
+
+
+def validate_ip_format(ip_str):
+ '''
+ valid ip_str format = '10.43.178.9'
+ invalid ip_str format : '123. 233.42.12', spaces existed in field
+ '3234.23.453.353', out of range
+ '-2.23.24.234', negative number in field
+ '1.2.3.4d', letter in field
+ '10.43.1789', invalid format
+ '''
+ if not ip_str:
+ msg = (_("No ip given when check ip"))
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(msg, content_type="text/plain")
+
+ valid_fromat = False
+ if ip_str.count('.') == 3 and all(num.isdigit() and 0 <= int(
+ num) < 256 for num in ip_str.rstrip().split('.')):
+ valid_fromat = True
+ if not valid_fromat:
+ msg = (_("%s invalid ip format!") % ip_str)
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(msg, content_type="text/plain")
+
+
+def valid_cidr(cidr):
+ if not cidr:
+ msg = (_("No CIDR given."))
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ cidr_division = cidr.split('/')
+ if (len(cidr_division) != 2 or
+ not cidr_division[0] or
+ not cidr_division[1]):
+ msg = (_("CIDR format error."))
+ LOG.error(msg)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ netmask_err_msg = (_("CIDR netmask error, "
+ "it should be a integer between 0-32."))
+ try:
+ netmask_cidr = int(cidr_division[1])
+ except ValueError:
+ LOG.warn(netmask_err_msg)
+ raise exc.HTTPBadRequest(explanation=netmask_err_msg)
+
+ if (netmask_cidr < 0 and
+ netmask_cidr > 32):
+ LOG.warn(netmask_err_msg)
+ raise exc.HTTPBadRequest(explanation=netmask_err_msg)
+
+ validate_ip_format(cidr_division[0])
+
+
+def ip_into_int(ip):
+ """
+ Switch ip string to decimalism integer..
+ :param ip: ip string
+ :return: decimalism integer
+ """
+ return reduce(lambda x, y: (x << 8) + y, map(int, ip.split('.')))
+
+
+def int_into_ip(num):
+ s = []
+ for i in range(4):
+ s.append(str(num % 256))
+ num /= 256
+ return '.'.join(s[::-1])
+
+
+def is_ip_in_cidr(ip, cidr):
+ """
+ Check ip is in cidr
+ :param ip: Ip will be checked, like:192.168.1.2.
+ :param cidr: Ip range,like:192.168.0.0/24.
+ :return: If ip in cidr, return True, else return False.
+ """
+ if not ip:
+ msg = "Error, ip is empty"
+ raise exc.HTTPBadRequest(explanation=msg)
+ if not cidr:
+ msg = "Error, CIDR is empty"
+ raise exc.HTTPBadRequest(explanation=msg)
+ network = cidr.split('/')
+ mask = ~(2**(32 - int(network[1])) - 1)
+ return (ip_into_int(ip) & mask) == (ip_into_int(network[0]) & mask)
+
+
+def is_ip_in_ranges(ip, ip_ranges):
+ """
+ Check ip is in range
+ : ip: Ip will be checked, like:192.168.1.2.
+ : ip_ranges : Ip ranges, like:
+ [{'start':'192.168.0.10', 'end':'192.168.0.20'}
+ {'start':'192.168.0.50', 'end':'192.168.0.60'}]
+ :return: If ip in ip_ranges, return True, else return False.
+ """
+ if not ip:
+ msg = "Error, ip is empty"
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if not ip_ranges:
+ return True
+
+ for ip_range in ip_ranges:
+ start_ip_int = ip_into_int(ip_range['start'])
+ end_ip_int = ip_into_int(ip_range['end'])
+ ip_int = ip_into_int(ip)
+ if ip_int >= start_ip_int and ip_int <= end_ip_int:
+ return True
+
+ return False
+
+
+def merge_ip_ranges(ip_ranges):
+ if not ip_ranges:
+ return ip_ranges
+ sort_ranges_by_start_ip = {}
+ for ip_range in ip_ranges:
+ start_ip_int = ip_into_int(ip_range['start'])
+ sort_ranges_by_start_ip.update({str(start_ip_int): ip_range})
+ sort_ranges = [sort_ranges_by_start_ip[key] for key in
+ sorted(sort_ranges_by_start_ip.keys())]
+ last_range_end_ip = None
+
+ merged_ip_ranges = []
+ for ip_range in sort_ranges:
+ if last_range_end_ip is None:
+ last_range_end_ip = ip_range['end']
+ merged_ip_ranges.append(ip_range)
+ continue
+ else:
+ last_range_end_ip_int = ip_into_int(last_range_end_ip)
+ ip_range_start_ip_int = ip_into_int(ip_range['start'])
+ if (last_range_end_ip_int + 1) == ip_range_start_ip_int:
+ merged_ip_ranges[-1]['end'] = ip_range['end']
+ else:
+ merged_ip_ranges.append(ip_range)
+ return merged_ip_ranges
+
+
+def _split_ip_ranges(ip_ranges):
+ ip_ranges_start = set()
+ ip_ranges_end = set()
+ if not ip_ranges:
+ return (ip_ranges_start, ip_ranges_end)
+
+ for ip_range in ip_ranges:
+ ip_ranges_start.add(ip_range['start'])
+ ip_ranges_end.add(ip_range['end'])
+
+ return (ip_ranges_start, ip_ranges_end)
+
+
+# [{'start':'192.168.0.10', 'end':'192.168.0.20'},
+# {'start':'192.168.0.21', 'end':'192.168.0.22'}] and
+# [{'start':'192.168.0.10', 'end':'192.168.0.22'}] is equal here
+def is_ip_ranges_equal(ip_ranges1, ip_ranges2):
+ if not ip_ranges1 and not ip_ranges2:
+ return True
+ if ((ip_ranges1 and not ip_ranges2) or
+ (ip_ranges2 and not ip_ranges1)):
+ return False
+ ip_ranges_1 = copy.deepcopy(ip_ranges1)
+ ip_ranges_2 = copy.deepcopy(ip_ranges2)
+ merged_ip_ranges1 = merge_ip_ranges(ip_ranges_1)
+ merged_ip_ranges2 = merge_ip_ranges(ip_ranges_2)
+ ip_ranges1_start, ip_ranges1_end = _split_ip_ranges(merged_ip_ranges1)
+ ip_ranges2_start, ip_ranges2_end = _split_ip_ranges(merged_ip_ranges2)
+ if (ip_ranges1_start == ip_ranges2_start and
+ ip_ranges1_end == ip_ranges2_end):
+ return True
+ else:
+ return False
+
+
+def get_dvs_interfaces(host_interfaces):
+ dvs_interfaces = []
+ if not isinstance(host_interfaces, list):
+ host_interfaces = eval(host_interfaces)
+ for interface in host_interfaces:
+ if not isinstance(interface, dict):
+ interface = eval(interface)
+ if ('vswitch_type' in interface and
+ interface['vswitch_type'] == 'dvs'):
+ dvs_interfaces.append(interface)
+
+ return dvs_interfaces
+
+
+def get_clc_pci_info(pci_info):
+ clc_pci = []
+ flag1 = 'Intel Corporation Coleto Creek PCIe Endpoint'
+ flag2 = '8086:0435'
+ for pci in pci_info:
+ if flag1 in pci or flag2 in pci:
+ clc_pci.append(pci.split()[0])
+ return clc_pci
+
+
+def cpu_str_to_list(spec):
+ """Parse a CPU set specification.
+
+ :param spec: cpu set string eg "1-4,^3,6"
+
+ Each element in the list is either a single
+ CPU number, a range of CPU numbers, or a
+ caret followed by a CPU number to be excluded
+ from a previous range.
+
+ :returns: a set of CPU indexes
+ """
+
+ cpusets = []
+ if not spec:
+ return cpusets
+
+ cpuset_ids = set()
+ cpuset_reject_ids = set()
+ for rule in spec.split(','):
+ rule = rule.strip()
+ # Handle multi ','
+ if len(rule) < 1:
+ continue
+ # Note the count limit in the .split() call
+ range_parts = rule.split('-', 1)
+ if len(range_parts) > 1:
+ # So, this was a range; start by converting the parts to ints
+ try:
+ start, end = [int(p.strip()) for p in range_parts]
+ except ValueError:
+ raise exception.Invalid(_("Invalid range expression %r")
+ % rule)
+ # Make sure it's a valid range
+ if start > end:
+ raise exception.Invalid(_("Invalid range expression %r")
+ % rule)
+ # Add available CPU ids to set
+ cpuset_ids |= set(range(start, end + 1))
+ elif rule[0] == '^':
+ # Not a range, the rule is an exclusion rule; convert to int
+ try:
+ cpuset_reject_ids.add(int(rule[1:].strip()))
+ except ValueError:
+ raise exception.Invalid(_("Invalid exclusion "
+ "expression %r") % rule)
+ else:
+ # OK, a single CPU to include; convert to int
+ try:
+ cpuset_ids.add(int(rule))
+ except ValueError:
+ raise exception.Invalid(_("Invalid inclusion "
+ "expression %r") % rule)
+
+ # Use sets to handle the exclusion rules for us
+ cpuset_ids -= cpuset_reject_ids
+ cpusets = list(cpuset_ids)
+ cpusets.sort()
+ return cpusets
+
+
+def cpu_list_to_str(cpu_list):
+ """Parse a CPU list to string.
+
+ :param cpu_list: eg "[1,2,3,4,6,7]"
+
+ :returns: a string of CPU ranges, eg 1-4,6,7
+ """
+ spec = ''
+ if not cpu_list:
+ return spec
+
+ cpu_list.sort()
+ count = 0
+ group_cpus = []
+ tmp_cpus = []
+ for cpu in cpu_list:
+ if count == 0:
+ init = cpu
+ tmp_cpus.append(cpu)
+ else:
+ if cpu == (init + count):
+ tmp_cpus.append(cpu)
+ else:
+ group_cpus.append(tmp_cpus)
+ tmp_cpus = []
+ count = 0
+ init = cpu
+ tmp_cpus.append(cpu)
+ count += 1
+
+ group_cpus.append(tmp_cpus)
+
+ for group in group_cpus:
+ if len(group) > 2:
+ group_spec = ("%s-%s" % (group[0], group[0]+len(group)-1))
+ else:
+ group_str = [str(num) for num in group]
+ group_spec = ','.join(group_str)
+ if spec:
+ spec += ',' + group_spec
+ else:
+ spec = group_spec
+
+ return spec
+
+
+def simple_subprocess_call(cmd):
+ return_code = subprocess.call(cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ return return_code
+
+
+def translate_quotation_marks_for_shell(orig_str):
+ translated_str = ''
+ quotation_marks = '"'
+ quotation_marks_count = orig_str.count(quotation_marks)
+ if quotation_marks_count > 0:
+ replace_marks = '\\"'
+ translated_str = orig_str.replace(quotation_marks, replace_marks)
+ else:
+ translated_str = orig_str
+ return translated_str
+
+
+def translate_marks_4_sed_command(ori_str):
+ translated_str = ori_str
+ translated_marks = {
+ '/': '\/',
+ '.': '\.',
+ '"': '\\"'}
+ for translated_mark in translated_marks:
+ if translated_str.count(translated_mark):
+ translated_str = translated_str.\
+ replace(translated_mark, translated_marks[translated_mark])
+ return translated_str
diff --git a/api/escalator/common/wsgi.py b/api/escalator/common/wsgi.py
new file mode 100644
index 0000000..8884ea4
--- /dev/null
+++ b/api/escalator/common/wsgi.py
@@ -0,0 +1,903 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack Foundation
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility methods for working with WSGI servers
+"""
+from __future__ import print_function
+
+import errno
+import functools
+import os
+import signal
+import sys
+import time
+
+import eventlet
+from eventlet.green import socket
+from eventlet.green import ssl
+import eventlet.greenio
+import eventlet.wsgi
+from oslo_serialization import jsonutils
+from oslo_concurrency import processutils
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_log import loggers
+import routes
+import routes.middleware
+import six
+import webob.dec
+import webob.exc
+from webob import multidict
+
+from escalator.common import exception
+from escalator.common import utils
+from escalator import i18n
+
+
+_ = i18n._
+_LE = i18n._LE
+_LI = i18n._LI
+_LW = i18n._LW
+
+bind_opts = [
+ cfg.StrOpt('bind_host', default='0.0.0.0',
+ help=_('Address to bind the server. Useful when '
+ 'selecting a particular network interface.')),
+ cfg.IntOpt('bind_port',
+ help=_('The port on which the server will listen.')),
+]
+
+socket_opts = [
+ cfg.IntOpt('backlog', default=4096,
+ help=_('The backlog value that will be used when creating the '
+ 'TCP listener socket.')),
+ cfg.IntOpt('tcp_keepidle', default=600,
+ help=_('The value for the socket option TCP_KEEPIDLE. This is '
+ 'the time in seconds that the connection must be idle '
+ 'before TCP starts sending keepalive probes.')),
+ cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify '
+ 'connecting clients.')),
+ cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API '
+ 'server securely.')),
+ cfg.StrOpt('key_file', help=_('Private key file to use when starting API '
+ 'server securely.')),
+]
+
+eventlet_opts = [
+ cfg.IntOpt('workers', default=processutils.get_worker_count(),
+ help=_('The number of child process workers that will be '
+ 'created to service requests. The default will be '
+ 'equal to the number of CPUs available.')),
+ cfg.IntOpt('max_header_line', default=16384,
+ help=_('Maximum line size of message headers to be accepted. '
+ 'max_header_line may need to be increased when using '
+ 'large tokens (typically those generated by the '
+ 'Keystone v3 API with big service catalogs')),
+ cfg.BoolOpt('http_keepalive', default=True,
+ help=_('If False, server will return the header '
+ '"Connection: close", '
+ 'If True, server will return "Connection: Keep-Alive" '
+ 'in its responses. In order to close the client socket '
+ 'connection explicitly after the response is sent and '
+ 'read successfully by the client, you simply have to '
+ 'set this option to False when you create a wsgi '
+ 'server.')),
+]
+
+profiler_opts = [
+ cfg.BoolOpt("enabled", default=False,
+ help=_('If False fully disable profiling feature.')),
+ cfg.BoolOpt("trace_sqlalchemy", default=False,
+ help=_("If False doesn't trace SQL requests."))
+]
+
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.register_opts(bind_opts)
+CONF.register_opts(socket_opts)
+CONF.register_opts(eventlet_opts)
+CONF.register_opts(profiler_opts, group="profiler")
+
+ASYNC_EVENTLET_THREAD_POOL_LIST = []
+
+
+def get_bind_addr(default_port=None):
+ """Return the host and port to bind to."""
+ return (CONF.bind_host, CONF.bind_port or default_port)
+
+
+def ssl_wrap_socket(sock):
+ """
+ Wrap an existing socket in SSL
+
+ :param sock: non-SSL socket to wrap
+
+ :returns: An SSL wrapped socket
+ """
+ utils.validate_key_cert(CONF.key_file, CONF.cert_file)
+
+ ssl_kwargs = {
+ 'server_side': True,
+ 'certfile': CONF.cert_file,
+ 'keyfile': CONF.key_file,
+ 'cert_reqs': ssl.CERT_NONE,
+ }
+
+ if CONF.ca_file:
+ ssl_kwargs['ca_certs'] = CONF.ca_file
+ ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
+
+ return ssl.wrap_socket(sock, **ssl_kwargs)
+
+
+def get_socket(default_port):
+ """
+ Bind socket to bind ip:port in conf
+
+ note: Mostly comes from Swift with a few small changes...
+
+ :param default_port: port to bind to if none is specified in conf
+
+ :returns : a socket object as returned from socket.listen or
+ ssl.wrap_socket if conf specifies cert_file
+ """
+ bind_addr = get_bind_addr(default_port)
+
+ # TODO(jaypipes): eventlet's greened socket module does not actually
+ # support IPv6 in getaddrinfo(). We need to get around this in the
+ # future or monitor upstream for a fix
+ address_family = [
+ addr[0] for addr in socket.getaddrinfo(bind_addr[0],
+ bind_addr[1],
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)
+ if addr[0] in (socket.AF_INET, socket.AF_INET6)
+ ][0]
+
+ use_ssl = CONF.key_file or CONF.cert_file
+ if use_ssl and (not CONF.key_file or not CONF.cert_file):
+ raise RuntimeError(_("When running server in SSL mode, you must "
+ "specify both a cert_file and key_file "
+ "option value in your configuration file"))
+
+ sock = utils.get_test_suite_socket()
+ retry_until = time.time() + 30
+
+ while not sock and time.time() < retry_until:
+ try:
+ sock = eventlet.listen(bind_addr,
+ backlog=CONF.backlog,
+ family=address_family)
+ except socket.error as err:
+ if err.args[0] != errno.EADDRINUSE:
+ raise
+ eventlet.sleep(0.1)
+ if not sock:
+ raise RuntimeError(_("Could not bind to %(host)s:%(port)s after"
+ " trying for 30 seconds") %
+ {'host': bind_addr[0],
+ 'port': bind_addr[1]})
+
+ return sock
+
+
+def set_eventlet_hub():
+ try:
+ eventlet.hubs.use_hub('poll')
+ except Exception:
+ try:
+ eventlet.hubs.use_hub('selects')
+ except Exception:
+ msg = _("eventlet 'poll' nor 'selects' hubs are available "
+ "on this platform")
+ raise exception.WorkerCreationFailure(
+ reason=msg)
+
+
+def get_asynchronous_eventlet_pool(size=1000):
+ """Return eventlet pool to caller.
+
+ Also store pools created in global list, to wait on
+ it after getting signal for graceful shutdown.
+
+ :param size: eventlet pool size
+ :returns: eventlet pool
+ """
+ global ASYNC_EVENTLET_THREAD_POOL_LIST
+
+ pool = eventlet.GreenPool(size=size)
+ # Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST
+ ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool)
+
+ return pool
+
+
+class Server(object):
+ """Server class to manage multiple WSGI sockets and applications.
+ """
+
+ def __init__(self, threads=1000):
+ os.umask(0o27) # ensure files are created with the correct privileges
+ self._logger = logging.getLogger("eventlet.wsgi.server")
+ self._wsgi_logger = loggers.WritableLogger(self._logger)
+ self.threads = threads
+ self.children = set()
+ self.stale_children = set()
+ self.running = True
+ self.pgid = os.getpid()
+ try:
+ # NOTE(flaper87): Make sure this process
+ # runs in its own process group.
+ os.setpgid(self.pgid, self.pgid)
+ except OSError:
+ # NOTE(flaper87): When running escalator-control,
+ # (escalator's functional tests, for example)
+ # setpgid fails with EPERM as escalator-control
+ # creates a fresh session, of which the newly
+ # launched service becomes the leader (session
+ # leaders may not change process groups)
+ #
+ # Running escalator-(api) is safe and
+ # shouldn't raise any error here.
+ self.pgid = 0
+
+ def hup(self, *args):
+ """
+ Reloads configuration files with zero down time
+ """
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ raise exception.SIGHUPInterrupt
+
+ def kill_children(self, *args):
+ """Kills the entire process group."""
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ self.running = False
+ os.killpg(self.pgid, signal.SIGTERM)
+
+ def start(self, application, default_port):
+ """
+ Run a WSGI server with the given application.
+
+ :param application: The application to be run in the WSGI server
+ :param default_port: Port to bind to if none is specified in conf
+ """
+ self.application = application
+ self.default_port = default_port
+ self.configure()
+ self.start_wsgi()
+
+ def start_wsgi(self):
+
+ if CONF.workers == 0:
+ # Useful for profiling, test, debug etc.
+ self.pool = self.create_pool()
+ self.pool.spawn_n(self._single_run, self.application, self.sock)
+ return
+ else:
+ LOG.info(_LI("Starting %d workers") % CONF.workers)
+ signal.signal(signal.SIGTERM, self.kill_children)
+ signal.signal(signal.SIGINT, self.kill_children)
+ signal.signal(signal.SIGHUP, self.hup)
+ while len(self.children) < CONF.workers:
+ self.run_child()
+
+ def create_pool(self):
+ return eventlet.GreenPool(size=self.threads)
+
+ def _remove_children(self, pid):
+ if pid in self.children:
+ self.children.remove(pid)
+ LOG.info(_LI('Removed dead child %s') % pid)
+ elif pid in self.stale_children:
+ self.stale_children.remove(pid)
+ LOG.info(_LI('Removed stale child %s') % pid)
+ else:
+ LOG.warn(_LW('Unrecognised child %s') % pid)
+
+ def _verify_and_respawn_children(self, pid, status):
+ if len(self.stale_children) == 0:
+ LOG.debug('No stale children')
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
+ LOG.error(_LE('Not respawning child %d, cannot '
+ 'recover from termination') % pid)
+ if not self.children and not self.stale_children:
+ LOG.info(
+ _LI('All workers have terminated. Exiting'))
+ self.running = False
+ else:
+ if len(self.children) < CONF.workers:
+ self.run_child()
+
+ def wait_on_children(self):
+ while self.running:
+ try:
+ pid, status = os.wait()
+ if os.WIFEXITED(status) or os.WIFSIGNALED(status):
+ self._remove_children(pid)
+ self._verify_and_respawn_children(pid, status)
+ except OSError as err:
+ if err.errno not in (errno.EINTR, errno.ECHILD):
+ raise
+ except KeyboardInterrupt:
+ LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
+ break
+ except exception.SIGHUPInterrupt:
+ self.reload()
+ continue
+ eventlet.greenio.shutdown_safe(self.sock)
+ self.sock.close()
+ LOG.debug('Exited')
+
+ def configure(self, old_conf=None, has_changed=None):
+ """
+ Apply configuration settings
+
+ :param old_conf: Cached old configuration settings (if any)
+ :param has changed: callable to determine if a parameter has changed
+ """
+ eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
+ self.configure_socket(old_conf, has_changed)
+
+ def reload(self):
+ """
+ Reload and re-apply configuration settings
+
+ Existing child processes are sent a SIGHUP signal
+ and will exit after completing existing requests.
+ New child processes, which will have the updated
+ configuration, are spawned. This allows preventing
+ interruption to the service.
+ """
+ def _has_changed(old, new, param):
+ old = old.get(param)
+ new = getattr(new, param)
+ return (new != old)
+
+ old_conf = utils.stash_conf_values()
+ has_changed = functools.partial(_has_changed, old_conf, CONF)
+ CONF.reload_config_files()
+ os.killpg(self.pgid, signal.SIGHUP)
+ self.stale_children = self.children
+ self.children = set()
+
+ # Ensure any logging config changes are picked up
+ logging.setup(CONF, 'escalator')
+
+ self.configure(old_conf, has_changed)
+ self.start_wsgi()
+
+ def wait(self):
+ """Wait until all servers have completed running."""
+ try:
+ if self.children:
+ self.wait_on_children()
+ else:
+ self.pool.waitall()
+ except KeyboardInterrupt:
+ pass
+
+ def run_child(self):
+ def child_hup(*args):
+ """Shuts down child processes, existing requests are handled."""
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ eventlet.wsgi.is_accepting = False
+ self.sock.close()
+
+ pid = os.fork()
+ if pid == 0:
+ signal.signal(signal.SIGHUP, child_hup)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ # ignore the interrupt signal to avoid a race whereby
+ # a child worker receives the signal before the parent
+ # and is respawned unnecessarily as a result
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ # The child has no need to stash the unwrapped
+ # socket, and the reference prevents a clean
+ # exit on sighup
+ self._sock = None
+ self.run_server()
+ LOG.info(_LI('Child %d exiting normally') % os.getpid())
+ # self.pool.waitall() is now called in wsgi's server so
+ # it's safe to exit here
+ sys.exit(0)
+ else:
+ LOG.info(_LI('Started child %s') % pid)
+ self.children.add(pid)
+
+ def run_server(self):
+ """Run a WSGI server."""
+ if cfg.CONF.pydev_worker_debug_host:
+ utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host,
+ cfg.CONF.pydev_worker_debug_port)
+
+ eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
+ self.pool = self.create_pool()
+ try:
+ eventlet.wsgi.server(self.sock,
+ self.application,
+ log=self._wsgi_logger,
+ custom_pool=self.pool,
+ debug=False,
+ keepalive=CONF.http_keepalive)
+ except socket.error as err:
+ if err[0] != errno.EINVAL:
+ raise
+
+ # waiting on async pools
+ if ASYNC_EVENTLET_THREAD_POOL_LIST:
+ for pool in ASYNC_EVENTLET_THREAD_POOL_LIST:
+ pool.waitall()
+
+ def _single_run(self, application, sock):
+ """Start a WSGI server in a new green thread."""
+ LOG.info(_LI("Starting single process server"))
+ eventlet.wsgi.server(sock, application, custom_pool=self.pool,
+ log=self._wsgi_logger,
+ debug=False,
+ keepalive=CONF.http_keepalive)
+
+ def configure_socket(self, old_conf=None, has_changed=None):
+ """
+ Ensure a socket exists and is appropriately configured.
+
+ This function is called on start up, and can also be
+ called in the event of a configuration reload.
+
+ When called for the first time a new socket is created.
+ If reloading and either bind_host or bind port have been
+ changed the existing socket must be closed and a new
+ socket opened (laws of physics).
+
+ In all other cases (bind_host/bind_port have not changed)
+ the existing socket is reused.
+
+ :param old_conf: Cached old configuration settings (if any)
+ :param has changed: callable to determine if a parameter has changed
+ """
+ # Do we need a fresh socket?
+ new_sock = (old_conf is None or (
+ has_changed('bind_host') or
+ has_changed('bind_port')))
+ # Will we be using https?
+ use_ssl = not (not CONF.cert_file or not CONF.key_file)
+ # Were we using https before?
+ old_use_ssl = (old_conf is not None and not (
+ not old_conf.get('key_file') or
+ not old_conf.get('cert_file')))
+ # Do we now need to perform an SSL wrap on the socket?
+ wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
+ # Do we now need to perform an SSL unwrap on the socket?
+ unwrap_sock = use_ssl is False and old_use_ssl is True
+
+ if new_sock:
+ self._sock = None
+ if old_conf is not None:
+ self.sock.close()
+ _sock = get_socket(self.default_port)
+ _sock.setsockopt(socket.SOL_SOCKET,
+ socket.SO_REUSEADDR, 1)
+ # sockets can hang around forever without keepalive
+ _sock.setsockopt(socket.SOL_SOCKET,
+ socket.SO_KEEPALIVE, 1)
+ self._sock = _sock
+
+ if wrap_sock:
+ self.sock = ssl_wrap_socket(self._sock)
+
+ if unwrap_sock:
+ self.sock = self._sock
+
+ if new_sock and not use_ssl:
+ self.sock = self._sock
+
+ # Pick up newly deployed certs
+ if old_conf is not None and use_ssl is True and old_use_ssl is True:
+ if has_changed('cert_file') or has_changed('key_file'):
+ utils.validate_key_cert(CONF.key_file, CONF.cert_file)
+ if has_changed('cert_file'):
+ self.sock.certfile = CONF.cert_file
+ if has_changed('key_file'):
+ self.sock.keyfile = CONF.key_file
+
+ if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
+ # This option isn't available in the OS X version of eventlet
+ if hasattr(socket, 'TCP_KEEPIDLE'):
+ self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
+ CONF.tcp_keepidle)
+
+ if old_conf is not None and has_changed('backlog'):
+ self.sock.listen(CONF.backlog)
+
+
+class Middleware(object):
+ """
+ Base WSGI middleware wrapper. These classes require an application to be
+ initialized that will be called next. By default the middleware will
+ simply call its wrapped app, or you can override __call__ to customize its
+ behavior.
+ """
+
+ def __init__(self, application):
+ self.application = application
+
+ @classmethod
+ def factory(cls, global_conf, **local_conf):
+ def filter(app):
+ return cls(app)
+ return filter
+
+ def process_request(self, req):
+ """
+ Called on each request.
+
+ If this returns None, the next application down the stack will be
+ executed. If it returns a response then that response will be returned
+ and execution will stop here.
+
+ """
+ return None
+
+ def process_response(self, response):
+ """Do whatever you'd like to the response."""
+ return response
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ response = self.process_request(req)
+ if response:
+ return response
+ response = req.get_response(self.application)
+ response.request = req
+ try:
+ return self.process_response(response)
+ except webob.exc.HTTPException as e:
+ return e
+
+
+class Debug(Middleware):
+ """
+ Helper class that can be inserted into any WSGI application chain
+ to get information about the request and response.
+ """
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ print(("*" * 40) + " REQUEST ENVIRON")
+ for key, value in req.environ.items():
+ print(key, "=", value)
+ print('')
+ resp = req.get_response(self.application)
+
+ print(("*" * 40) + " RESPONSE HEADERS")
+ for (key, value) in six.iteritems(resp.headers):
+ print(key, "=", value)
+ print('')
+
+ resp.app_iter = self.print_generator(resp.app_iter)
+
+ return resp
+
+ @staticmethod
+ def print_generator(app_iter):
+ """
+ Iterator that prints the contents of a wrapper string iterator
+ when iterated.
+ """
+ print(("*" * 40) + " BODY")
+ for part in app_iter:
+ sys.stdout.write(part)
+ sys.stdout.flush()
+ yield part
+ print()
+
+
+class APIMapper(routes.Mapper):
+ """
+ Handle route matching when url is '' because routes.Mapper returns
+ an error in this case.
+ """
+
+ def routematch(self, url=None, environ=None):
+ if url is "":
+ result = self._match("", environ)
+ return result[0], result[1]
+ return routes.Mapper.routematch(self, url, environ)
+
+
+class RejectMethodController(object):
+
+ def reject(self, req, allowed_methods, *args, **kwargs):
+ LOG.debug("The method %s is not allowed for this resource" %
+ req.environ['REQUEST_METHOD'])
+ raise webob.exc.HTTPMethodNotAllowed(
+ headers=[('Allow', allowed_methods)])
+
+
+class Router(object):
+ """
+ WSGI middleware that maps incoming requests to WSGI apps.
+ """
+
+ def __init__(self, mapper):
+ """
+ Create a router for the given routes.Mapper.
+
+ Each route in `mapper` must specify a 'controller', which is a
+ WSGI app to call. You'll probably want to specify an 'action' as
+ well and have your controller be a wsgi.Controller, who will route
+ the request to the action method.
+
+ Examples:
+ mapper = routes.Mapper()
+ sc = ServerController()
+
+ # Explicit mapping of one route to a controller+action
+ mapper.connect(None, "/svrlist", controller=sc, action="list")
+
+ # Actions are all implicitly defined
+ mapper.resource("server", "servers", controller=sc)
+
+ # Pointing to an arbitrary WSGI app. You can specify the
+ # {path_info:.*} parameter so the target app can be handed just that
+ # section of the URL.
+ mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
+ """
+ mapper.redirect("", "/")
+ self.map = mapper
+ self._router = routes.middleware.RoutesMiddleware(self._dispatch,
+ self.map)
+
+ @classmethod
+ def factory(cls, global_conf, **local_conf):
+ return cls(APIMapper())
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ """
+ Route the incoming request to a controller based on self.map.
+ If no match, return either a 404(Not Found) or 501(Not Implemented).
+ """
+ return self._router
+
+ @staticmethod
+ @webob.dec.wsgify
+ def _dispatch(req):
+ """
+ Called by self._router after matching the incoming request to a route
+ and putting the information into req.environ. Either returns 404,
+ 501, or the routed WSGI app's response.
+ """
+ match = req.environ['wsgiorg.routing_args'][1]
+ if not match:
+ implemented_http_methods = ['GET', 'HEAD', 'POST', 'PUT',
+ 'DELETE', 'PATCH']
+ if req.environ['REQUEST_METHOD'] not in implemented_http_methods:
+ return webob.exc.HTTPNotImplemented()
+ else:
+ return webob.exc.HTTPNotFound()
+ app = match['controller']
+ return app
+
+
+class Request(webob.Request):
+ """Add some OpenStack API-specific logic to the base webob.Request."""
+
+ def best_match_content_type(self):
+ """Determine the requested response content-type."""
+ supported = ('application/json',)
+ bm = self.accept.best_match(supported)
+ return bm or 'application/json'
+
+ def get_content_type(self, allowed_content_types):
+ """Determine content type of the request body."""
+ if "Content-Type" not in self.headers:
+ raise exception.InvalidContentType(content_type=None)
+
+ content_type = self.content_type
+
+ if content_type not in allowed_content_types:
+ raise exception.InvalidContentType(content_type=content_type)
+ else:
+ return content_type
+
+ def best_match_language(self):
+ """Determines best available locale from the Accept-Language header.
+
+ :returns: the best language match or None if the 'Accept-Language'
+ header was not available in the request.
+ """
+ if not self.accept_language:
+ return None
+ langs = i18n.get_available_languages('escalator')
+ return self.accept_language.best_match(langs)
+
+ def get_content_range(self):
+ """Return the `Range` in a request."""
+ range_str = self.headers.get('Content-Range')
+ if range_str is not None:
+ range_ = webob.byterange.ContentRange.parse(range_str)
+ if range_ is None:
+ msg = _('Malformed Content-Range header: %s') % range_str
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+ return range_
+
+
+class JSONRequestDeserializer(object):
+ valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate',
+ 'gzip', 'identity'])
+
+ def has_body(self, request):
+ """
+ Returns whether a Webob.Request object will possess an entity body.
+
+ :param request: Webob.Request object
+ """
+ request_encoding = request.headers.get('transfer-encoding', '').lower()
+ is_valid_encoding = request_encoding in self.valid_transfer_encoding
+ if is_valid_encoding and request.is_body_readable:
+ return True
+ elif request.content_length > 0:
+ return True
+
+ return False
+
+ @staticmethod
+ def _sanitizer(obj):
+ """Sanitizer method that will be passed to jsonutils.loads."""
+ return obj
+
+ def from_json(self, datastring):
+ try:
+ return jsonutils.loads(datastring, object_hook=self._sanitizer)
+ except ValueError:
+ msg = _('Malformed JSON in request body.')
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ def default(self, request):
+ if self.has_body(request):
+ return {'body': self.from_json(request.body)}
+ else:
+ return {}
+
+
+class JSONResponseSerializer(object):
+
+ def _sanitizer(self, obj):
+ """Sanitizer method that will be passed to jsonutils.dumps."""
+ if hasattr(obj, "to_dict"):
+ return obj.to_dict()
+ if isinstance(obj, multidict.MultiDict):
+ return obj.mixed()
+ return jsonutils.to_primitive(obj)
+
+ def to_json(self, data):
+ return jsonutils.dumps(data, default=self._sanitizer)
+
+ def default(self, response, result):
+ response.content_type = 'application/json'
+ response.body = self.to_json(result)
+
+
+def translate_exception(req, e):
+ """Translates all translatable elements of the given exception."""
+
+ # The RequestClass attribute in the webob.dec.wsgify decorator
+ # does not guarantee that the request object will be a particular
+ # type; this check is therefore necessary.
+ if not hasattr(req, "best_match_language"):
+ return e
+
+ locale = req.best_match_language()
+
+ if isinstance(e, webob.exc.HTTPError):
+ e.explanation = i18n.translate(e.explanation, locale)
+ e.detail = i18n.translate(e.detail, locale)
+ if getattr(e, 'body_template', None):
+ e.body_template = i18n.translate(e.body_template, locale)
+ return e
+
+
+class Resource(object):
+ """
+ WSGI app that handles (de)serialization and controller dispatch.
+
+ Reads routing information supplied by RoutesMiddleware and calls
+ the requested action method upon its deserializer, controller,
+ and serializer. Those three objects may implement any of the basic
+ controller action methods (create, update, show, index, delete)
+ along with any that may be specified in the api router. A 'default'
+ method may also be implemented to be used in place of any
+ non-implemented actions. Deserializer methods must accept a request
+ argument and return a dictionary. Controller methods must accept a
+ request argument. Additionally, they must also accept keyword
+ arguments that represent the keys returned by the Deserializer. They
+ may raise a webob.exc exception or return a dict, which will be
+ serialized by requested content type.
+ """
+
+ def __init__(self, controller, deserializer=None, serializer=None):
+ """
+ :param controller: object that implement methods created by routes lib
+ :param deserializer: object that supports webob request deserialization
+ through controller-like actions
+ :param serializer: object that supports webob response serialization
+ through controller-like actions
+ """
+ self.controller = controller
+ self.serializer = serializer or JSONResponseSerializer()
+ self.deserializer = deserializer or JSONRequestDeserializer()
+
+ @webob.dec.wsgify(RequestClass=Request)
+ def __call__(self, request):
+ """WSGI method that controls (de)serialization and method dispatch."""
+ action_args = self.get_action_args(request.environ)
+ action = action_args.pop('action', None)
+
+ try:
+ deserialized_request = self.dispatch(self.deserializer,
+ action, request)
+ action_args.update(deserialized_request)
+ action_result = self.dispatch(self.controller, action,
+ request, **action_args)
+ except webob.exc.WSGIHTTPException as e:
+ exc_info = sys.exc_info()
+ raise translate_exception(request, e), None, exc_info[2]
+
+ try:
+ response = webob.Response(request=request)
+ self.dispatch(self.serializer, action, response, action_result)
+ return response
+ except webob.exc.WSGIHTTPException as e:
+ return translate_exception(request, e)
+ except webob.exc.HTTPException as e:
+ return e
+ # return unserializable result (typically a webob exc)
+ except Exception:
+ return action_result
+
+ def dispatch(self, obj, action, *args, **kwargs):
+ """Find action-specific method on self and call it."""
+ try:
+ method = getattr(obj, action)
+ except AttributeError:
+ method = getattr(obj, 'default')
+
+ return method(*args, **kwargs)
+
+ def get_action_args(self, request_environment):
+ """Parse dictionary created by routes library."""
+ try:
+ args = request_environment['wsgiorg.routing_args'][1].copy()
+ except Exception:
+ return {}
+
+ try:
+ del args['controller']
+ except KeyError:
+ pass
+
+ try:
+ del args['format']
+ except KeyError:
+ pass
+
+ return args
diff --git a/api/escalator/context.py b/api/escalator/context.py
new file mode 100644
index 0000000..0fc8e3d
--- /dev/null
+++ b/api/escalator/context.py
@@ -0,0 +1,60 @@
+# Copyright 2011-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_context import context
+
+from escalator.api import policy
+
+
+class RequestContext(context.RequestContext):
+ """Stores information about the security context.
+
+ Stores how the user accesses the system, as well as additional request
+ information.
+
+ """
+
+ def __init__(self, roles=None,
+ owner_is_tenant=True, service_catalog=None,
+ policy_enforcer=None, **kwargs):
+ super(RequestContext, self).__init__(**kwargs)
+ self.roles = roles or []
+ self.owner_is_tenant = owner_is_tenant
+ self.service_catalog = service_catalog
+ self.policy_enforcer = policy_enforcer or policy.Enforcer()
+ if not self.is_admin:
+ self.is_admin = self.policy_enforcer.check_is_admin(self)
+
+ def to_dict(self):
+ d = super(RequestContext, self).to_dict()
+ d.update({
+ 'roles': self.roles,
+ 'service_catalog': self.service_catalog,
+ })
+ return d
+
+ @classmethod
+ def from_dict(cls, values):
+ return cls(**values)
+
+ @property
+ def owner(self):
+ """Return the owner to correlate with an image."""
+ return self.tenant if self.owner_is_tenant else self.user
+
+ @property
+ def can_see_deleted(self):
+ """Admins can see deleted by default"""
+ return self.show_deleted or self.is_admin
diff --git a/api/escalator/i18n.py b/api/escalator/i18n.py
new file mode 100644
index 0000000..56bfde3
--- /dev/null
+++ b/api/escalator/i18n.py
@@ -0,0 +1,31 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_i18n import TranslatorFactory # noqa
+
+_translators = TranslatorFactory(domain='escalator')
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
diff --git a/api/escalator/notifier.py b/api/escalator/notifier.py
new file mode 100644
index 0000000..1e6ea02
--- /dev/null
+++ b/api/escalator/notifier.py
@@ -0,0 +1,66 @@
+# Copyright 2011, OpenStack Foundation
+# Copyright 2012, Red Hat, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log as logging
+import oslo_messaging
+
+from escalator import i18n
+
+_ = i18n._
+_LE = i18n._LE
+
+notifier_opts = [
+ cfg.StrOpt('default_publisher_id', default="image.localhost",
+ help='Default publisher_id for outgoing notifications.'),
+ cfg.ListOpt('disabled_notifications', default=[],
+ help='List of disabled notifications. A notification can be '
+ 'given either as a notification type to disable a single '
+ 'event, or as a notification group prefix to disable all '
+ 'events within a group. Example: if this config option '
+ 'is set to ["image.create", "metadef_namespace"], then '
+ '"image.create" notification will not be sent after '
+ 'image is created and none of the notifications for '
+ 'metadefinition namespaces will be sent.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(notifier_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+def get_transport():
+ return oslo_messaging.get_transport(CONF)
+
+
+class Notifier(object):
+ """Uses a notification strategy to send out messages about events."""
+
+ def __init__(self):
+ publisher_id = CONF.default_publisher_id
+ self._transport = get_transport()
+ self._notifier = oslo_messaging.Notifier(self._transport,
+ publisher_id=publisher_id)
+
+ def warn(self, event_type, payload):
+ self._notifier.warn({}, event_type, payload)
+
+ def info(self, event_type, payload):
+ self._notifier.info({}, event_type, payload)
+
+ def error(self, event_type, payload):
+ self._notifier.error({}, event_type, payload)
diff --git a/api/escalator/opts.py b/api/escalator/opts.py
new file mode 100644
index 0000000..21639e6
--- /dev/null
+++ b/api/escalator/opts.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2014 OpenStack Foundation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import itertools
+
+import escalator.api.middleware.context
+import escalator.api.versions
+import escalator.common.config
+import escalator.common.rpc
+import escalator.common.wsgi
+import escalator.notifier
+
+__all__ = [
+ 'list_api_opts',
+]
+
+
+_api_opts = [
+ (None, list(itertools.chain(
+ escalator.api.middleware.context.context_opts,
+ escalator.api.versions.versions_opts,
+ escalator.common.config.common_opts,
+ escalator.common.rpc.rpc_opts,
+ escalator.common.wsgi.bind_opts,
+ escalator.common.wsgi.eventlet_opts,
+ escalator.common.wsgi.socket_opts,
+ escalator.notifier.notifier_opts))),
+ ('task', escalator.common.config.task_opts),
+ ('paste_deploy', escalator.common.config.paste_deploy_opts)
+]
+
+
+def list_api_opts():
+ """Return a list of oslo_config options available in Escalator API service.
+
+ Each element of the list is a tuple. The first element is the name of the
+ group under which the list of elements in the second element will be
+ registered. A group name of None corresponds to the [DEFAULT] group in
+ config files.
+
+ This function is also discoverable via the 'escalator.api' entry point
+ under the 'oslo_config.opts' namespace.
+
+ The purpose of this is to allow tools like the Oslo sample config file
+ generator to discover the options exposed to users by escalator.
+
+ :returns: a list of (group_name, opts) tuples
+ """
+
+ return [(g, copy.deepcopy(o)) for g, o in _api_opts]
diff --git a/api/escalator/version.py b/api/escalator/version.py
new file mode 100644
index 0000000..ededbe6
--- /dev/null
+++ b/api/escalator/version.py
@@ -0,0 +1,18 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import pbr.version
+
+version_info = pbr.version.VersionInfo('escalator')
diff --git a/api/etc/escalator-api-paste.ini b/api/etc/escalator-api-paste.ini
new file mode 100644
index 0000000..d8b1940
--- /dev/null
+++ b/api/etc/escalator-api-paste.ini
@@ -0,0 +1,23 @@
+# Use this pipeline for no auth - DEFAULT
+[pipeline:escalator-api]
+pipeline = unauthenticated-context rootapp
+
+[pipeline:escalator-api-keystone]
+pipeline = authtoken context rootapp
+
+[composite:rootapp]
+paste.composite_factory = escalator.api:root_app_factory
+/v1: apiv1app
+
+[app:apiv1app]
+paste.app_factory = escalator.api.v1.router:API.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = escalator.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+delay_auth_decision = true
+
+[filter:context]
+paste.filter_factory = escalator.api.middleware.context:ContextMiddleware.factory
diff --git a/api/etc/escalator-api.conf b/api/etc/escalator-api.conf
new file mode 100644
index 0000000..5287777
--- /dev/null
+++ b/api/etc/escalator-api.conf
@@ -0,0 +1,216 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+verbose = True
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the API server
+bind_host = 0.0.0.0
+
+# Port the bind the API server to
+bind_port = 19393
+
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/escalator/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# The number of child process workers that will be
+# created to service API requests. The default will be
+# equal to the number of CPUs available. (integer value)
+workers = 1
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically those generated by the Keystone v3 API with big service
+# catalogs)
+# max_header_line = 16384
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+#allow_anonymous_access = False
+
+
+# Property Protections config file
+# This file contains the rules for property protections and the roles/policies
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then the escalator-api
+# service will not start.
+#property_protection_file =
+
+# Specify whether 'roles' or 'policies' are used in the
+# property_protection_file.
+# The default value for property_protection_rule_format is 'roles'.
+#property_protection_rule_format = roles
+
+# Public url to use for versions endpoint. The default is None,
+# which will use the request's host_url attribute to populate the URL base.
+# If Escalator is operating behind a proxy, you will want to change this to
+# represent the proxy's URL.
+#public_endpoint=<None>
+
+# http_keepalive option. If False, server will return the header
+# "Connection: close", If True, server will return "Connection: Keep-Alive"
+# in its responses. In order to close the client socket connection
+# explicitly after the response is sent and read successfully by the client,
+# you simply have to set this option to False when you create a wsgi server.
+#http_keepalive = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+
+# Digest algorithm which will be used for digital signature, the default is
+# sha1 in Kilo for a smooth upgrade process, and it will be updated with
+# sha256 in next release(L). Use command
+# "openssl list-message-digest-algorithms" to get the available algorithms
+# supported by the version of OpenSSL on the platform. Examples are 'sha1',
+# 'sha256', 'sha512', etc.
+#digest_algorithm = sha1
+
+
+
+# ============ Notification System Options =====================
+
+# Driver or drivers to handle sending notifications. Set to
+# 'messaging' to send notifications to a message queue.
+# notification_driver = noop
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# List of disabled notifications. A notification can be given either as a
+# notification type to disable a single event, or as a notification group
+# prefix to disable all events within a group.
+# Example: if this config option is set to
+# ["image.create", "metadef_namespace"], then "image.create" notification will
+# not be sent after image is created and none of the notifications for
+# metadefinition namespaces will be sent.
+# disabled_notifications = []
+
+# Messaging driver used for 'messaging' notifications driver
+# rpc_backend = 'rabbit'
+
+# Configuration options if sending notifications via rabbitmq (these are
+# the defaults)
+rabbit_host = localhost
+rabbit_port = 5672
+rabbit_use_ssl = false
+rabbit_userid = guest
+rabbit_password = guest
+rabbit_virtual_host = /
+rabbit_notification_exchange = escalator
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = escalator
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# =============== Policy Options ==================================
+
+[oslo_policy]
+# The JSON file that defines policies.
+# Deprecated group/name - [DEFAULT]/policy_file
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# Deprecated group/name - [DEFAULT]/policy_default_rule
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored.
+# They can be relative to any directory in the search path
+# defined by the config_dir option, or absolute paths.
+# The file defined by policy_file must exist for these
+# directories to be searched.
+# Deprecated group/name - [DEFAULT]/policy_dirs
+#policy_dirs = policy.d
+
+# =============== Database Options =================================
+
+identity_uri = http://127.0.0.1:35357
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USER%
+admin_password = %SERVICE_PASSWORD%
+revocation_cache_time = 10
+
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:escalator-api-keystone], you would configure the flavor below
+# as 'keystone'.
+#flavor=
+
+[profiler]
+# If False fully disable profiling feature.
+#enabled = False
+
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
diff --git a/api/etc/oslo-config-generator/escalator-api.conf b/api/etc/oslo-config-generator/escalator-api.conf
new file mode 100644
index 0000000..7f3bd46
--- /dev/null
+++ b/api/etc/oslo-config-generator/escalator-api.conf
@@ -0,0 +1,10 @@
+[DEFAULT]
+output_file = etc/escalator-api.conf.sample
+namespace = escalator.api
+namespace = oslo_concurrency
+namespace = oslo_messaging
+namespace = oslo_db
+namespace = oslo_db.concurrency
+namespace = oslo_policy
+namespace = keystoneclient.middleware.auth_token
+namespace = oslo_log
diff --git a/api/etc/policy.json b/api/etc/policy.json
new file mode 100644
index 0000000..4bea22d
--- /dev/null
+++ b/api/etc/policy.json
@@ -0,0 +1,5 @@
+{
+ "context_is_admin": "role:admin",
+ "default": ""
+
+}
diff --git a/api/etc/property-protections-policies.conf.sample b/api/etc/property-protections-policies.conf.sample
new file mode 100644
index 0000000..324daab
--- /dev/null
+++ b/api/etc/property-protections-policies.conf.sample
@@ -0,0 +1,34 @@
+# property-protections-policies.conf.sample
+#
+# This file is an example config file for when
+# property_protection_rule_format=policies is enabled.
+#
+# Specify regular expression for which properties will be protected in []
+# For each section, specify CRUD permissions. You may refer to policies defined
+# in policy.json.
+# The property rules will be applied in the order specified. Once
+# a match is found the remaining property rules will not be applied.
+#
+# WARNING:
+# * If the reg ex specified below does not compile, then
+# the escalator-api service fails to start. (Guide for reg ex python compiler
+# used:
+# http://docs.python.org/2/library/re.html#regular-expression-syntax)
+# * If an operation(create, read, update, delete) is not specified or misspelt
+# then the escalator-api service fails to start.
+# So, remember, with GREAT POWER comes GREAT RESPONSIBILITY!
+#
+# NOTE: Only one policy can be specified per action. If multiple policies are
+# specified, then the escalator-api service fails to start.
+
+[^x_.*]
+create = default
+read = default
+update = default
+delete = default
+
+[.*]
+create = context_is_admin
+read = context_is_admin
+update = context_is_admin
+delete = context_is_admin
diff --git a/api/etc/property-protections-roles.conf.sample b/api/etc/property-protections-roles.conf.sample
new file mode 100644
index 0000000..3f9d6ef
--- /dev/null
+++ b/api/etc/property-protections-roles.conf.sample
@@ -0,0 +1,32 @@
+# property-protections-roles.conf.sample
+#
+# This file is an example config file for when
+# property_protection_rule_format=roles is enabled.
+#
+# Specify regular expression for which properties will be protected in []
+# For each section, specify CRUD permissions.
+# The property rules will be applied in the order specified. Once
+# a match is found the remaining property rules will not be applied.
+#
+# WARNING:
+# * If the reg ex specified below does not compile, then
+# escalator-api service will not start. (Guide for reg ex python compiler used:
+# http://docs.python.org/2/library/re.html#regular-expression-syntax)
+# * If an operation(create, read, update, delete) is not specified or misspelt
+# then the escalator-api service will not start.
+# So, remember, with GREAT POWER comes GREAT RESPONSIBILITY!
+#
+# NOTE: Multiple roles can be specified for a given operation. These roles must
+# be comma separated.
+
+[^x_.*]
+create = admin,member
+read = admin,member
+update = admin,member
+delete = admin,member
+
+[.*]
+create = admin
+read = admin
+update = admin
+delete = admin
diff --git a/api/pylintrc b/api/pylintrc
new file mode 100644
index 0000000..2afb2e3
--- /dev/null
+++ b/api/pylintrc
@@ -0,0 +1,27 @@
+[Messages Control]
+# W0511: TODOs in code comments are fine.
+# W0142: *args and **kwargs are fine.
+# W0622: Redefining id is fine.
+disable-msg=W0511,W0142,W0622
+
+[Basic]
+# Variable names can be 1 to 31 characters long, with lowercase and underscores
+variable-rgx=[a-z_][a-z0-9_]{0,30}$
+
+# Argument names can be 2 to 31 characters long, with lowercase and underscores
+argument-rgx=[a-z_][a-z0-9_]{1,30}$
+
+# Method names should be at least 3 characters long
+# and be lowercased with underscores
+method-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Don't require docstrings on tests.
+no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
+
+[Design]
+max-public-methods=100
+min-public-methods=0
+max-args=6
diff --git a/api/requirements.txt b/api/requirements.txt
new file mode 100644
index 0000000..46ef21f
--- /dev/null
+++ b/api/requirements.txt
@@ -0,0 +1,33 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+pbr>=1.6 # Apache-2.0
+
+# < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983
+eventlet>=0.16.1,!=0.17.0
+PasteDeploy>=1.5.0
+Routes>=1.12.3,!=2.0
+WebOb>=1.2.3
+httplib2>=0.7.5
+oslo.concurrency>=3.7 # Apache-2.0
+oslo.context>=0.2.0 # Apache-2.0
+oslo.utils>=3.7 # Apache-2.0
+taskflow>=0.7.1
+
+# For paste.util.template used in keystone.common.template
+Paste
+
+pyOpenSSL>=0.11
+# Required by openstack.common libraries
+six>=1.9.0
+
+oslo.db>=1.7.0 # Apache-2.0
+oslo.i18n>=1.5.0 # Apache-2.0
+oslo.log>=1.0.0 # Apache-2.0
+oslo.messaging>=1.8.0 # Apache-2.0
+oslo.policy>=0.3.1 # Apache-2.0
+oslo.serialization>=1.4.0 # Apache-2.0
+oslo.service>=0.1.0 # Apache-2.0
+osprofiler>=0.3.0 # Apache-2.0
+
diff --git a/api/setup.cfg b/api/setup.cfg
new file mode 100644
index 0000000..af7a2da
--- /dev/null
+++ b/api/setup.cfg
@@ -0,0 +1,45 @@
+[metadata]
+name = escalator
+summary = OPNFV smooth upgrade service
+description-file =
+ README.rst
+author = OPNFV
+author-email = opnfv-tech-discuss@lists.opnfv.org
+home-page = http://www.opnfv.org/
+classifier =
+ Environment :: OPNFV
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+
+[global]
+setup-hooks =
+ pbr.hooks.setup_hook
+
+[entry_points]
+console_scripts =
+ escalator-api = escalator.cmd.api:main
+oslo_config.opts =
+ escalator.api = escalator.opts:list_api_opts
+
+[build_sphinx]
+all_files = 1
+
+[compile_catalog]
+directory = escalator/locale
+domain = escalator
+
+[update_catalog]
+domain = escalator
+output_dir = escalator/locale
+input_file = escalator/locale/escalator.pot
+
+[extract_messages]
+keywords = _ gettext ngettext l_ lazy_gettext
+mapping_file = babel.cfg
+output_file = escalator/locale/escalator.pot
+
diff --git a/api/setup.py b/api/setup.py
new file mode 100644
index 0000000..7363757
--- /dev/null
+++ b/api/setup.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
+setuptools.setup(
+ setup_requires=['pbr'],
+ pbr=True)
diff --git a/api/tox.ini b/api/tox.ini
new file mode 100644
index 0000000..65f48a8
--- /dev/null
+++ b/api/tox.ini
@@ -0,0 +1,49 @@
+[tox]
+minversion = 1.6
+envlist = py27,py33,py34,pep8
+skipsdist = True
+
+[testenv]
+setenv = VIRTUAL_ENV={envdir}
+usedevelop = True
+install_command = pip install -U {opts} {packages}
+deps = -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+commands = lockutils-wrapper python setup.py testr --slowest --testr-args='{posargs}'
+whitelist_externals = bash
+
+[tox:jenkins]
+downloadcache = ~/cache/pip
+
+[testenv:pep8]
+commands =
+ flake8 {posargs}
+ # Check that .po and .pot files are valid:
+ bash -c "find escalator -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null"
+
+[testenv:cover]
+setenv = VIRTUAL_ENV={envdir}
+commands = python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$'
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:genconfig]
+commands =
+ oslo-config-generator --config-file etc/oslo-config-generator/escalator-api.conf
+
+[testenv:docs]
+commands = python setup.py build_sphinx
+
+[flake8]
+# TODO(dmllr): Analyze or fix the warnings blacklisted below
+# E711 comparison to None should be 'if cond is not None:'
+# E712 comparison to True should be 'if cond is True:' or 'if cond:'
+# H404 multi line docstring should start with a summary
+# H405 multi line docstring summary not separated with an empty line
+# H904 Wrap long lines in parentheses instead of a backslash
+ignore = E711,E712,H404,H405,H904,F841,F821,E265,F812,F402,E226,E731
+exclude = .venv,.git,.tox,dist,doc,etc,*escalator/locale*,*openstack/common*,*lib/python*,*egg,build,escalator/i18n.py
+
+[hacking]
+local-check-factory = escalator.hacking.checks.factory
diff --git a/ci/build.sh b/ci/build.sh
new file mode 100755
index 0000000..5a6ad73
--- /dev/null
+++ b/ci/build.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# TODO: Let JJB to pass $WORKDIR instead of $BUILD_OUTPUT
+ESCALATORDIR=$1/../
+OPNFV_ARTIFACT_VERSION=$2
+
+cd ci/build_rpm
+./build_rpms.sh $ESCALATORDIR $OPNFV_ARTIFACT_VERSION
diff --git a/ci/build_rpm/Dockerfile b/ci/build_rpm/Dockerfile
new file mode 100644
index 0000000..48e254f
--- /dev/null
+++ b/ci/build_rpm/Dockerfile
@@ -0,0 +1,29 @@
+FROM centos:7
+LABEL escalator_image_version=1.0
+RUN yum -y update
+RUN yum -y install centos-release-openstack-newton
+RUN yum -y install \
+ wget \
+ coreutils \
+ which \
+ gawk \
+ grep \
+ git \
+ intltool \
+ make \
+ rpm \
+ rpm-build \
+ python-sphinx \
+ python2-oslo-sphinx \
+ python2-oslo-messaging \
+ python2-oslo-concurrency \
+ python2-oslo-sphinx \
+ python-devel \
+ python-d2to1 \
+ python-django-compressor \
+ python-django-openstack-auth \
+ python-django-pyscss \
+ python-lesscpy \
+ python-migrate \
+ python-pint \
+ python-routes
diff --git a/ci/build_rpm/build_rpms.sh b/ci/build_rpm/build_rpms.sh
new file mode 100755
index 0000000..81443ca
--- /dev/null
+++ b/ci/build_rpm/build_rpms.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+set -eux
+ESCALATORDIR=$1
+OPNFV_ARTIFACT_VERSION=$2
+
+function build_rpm_pkg {
+ # Cleanup prev build resutls
+ rm -rf $ESCALATORDIR/build_output
+ mkdir -p $ESCALATORDIR/build_output
+
+ sudo docker build -t escalator_rpm .
+ sudo docker run --rm -v $ESCALATORDIR:/opt/escalator -v $CACHE_DIRECTORY:/home/cache -t escalator_rpm \
+ /opt/escalator/ci/build_rpm/build_rpms_docker.sh $OPNFV_ARTIFACT_VERSION
+
+ # Here to collect build result from $ESCALATORDIR/build_output
+}
+
+function cleanup_container {
+ containers_to_kill=$(sudo docker ps --filter "label=escalator_image_version" \
+ --format "{{.Names}}" -a)
+
+ if [[ -z "$containers_to_kill" ]]; then
+ echo "No containers to cleanup."
+ else
+ volumes_to_remove=$(sudo docker inspect -f \
+ '{{range .Mounts}} {{printf "%s\n" .Name }}{{end}}' \
+ ${containers_to_kill} | egrep -v '(^\s*$)' | sort | uniq)
+ echo "Stopping containers... $containers_to_kill"
+ sudo docker stop -t 2 ${containers_to_kill} >/dev/null 2>&1
+
+ echo "Removing containers... $containers_to_kill"
+ sudo docker rm -v -f ${containers_to_kill} >/dev/null 2>&1
+
+ if [[ -z "$volumes_to_remove" ]]; then
+ echo "No volumes to cleanup."
+ else
+ echo "Removing volumes... $volumes_to_remove"
+ sudo docker volume rm ${volumes_to_remove} >/dev/null 2>&1
+ fi
+ fi
+}
+
+function cleanup_docker_image {
+ images_to_delete=$(sudo docker images -a --filter "label=escalator_image_version" \
+ --format "{{.ID}}")
+
+ echo "Removing images... $images_to_delete"
+ if [[ -z "$images_to_delete" ]]; then
+ echo "No images to cleanup"
+ else
+ sudo docker rmi -f ${images_to_delete} >/dev/null 2>&1
+ fi
+}
+
+cleanup_container
+cleanup_docker_image
+build_rpm_pkg
diff --git a/ci/build_rpm/build_rpms_docker.sh b/ci/build_rpm/build_rpms_docker.sh
new file mode 100755
index 0000000..9b2454d
--- /dev/null
+++ b/ci/build_rpm/build_rpms_docker.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+OPNFV_ARTIFACT_VERSION=$1
+rpm_build_dir=/opt/escalator
+rpm_output_dir=$rpm_build_dir/build_output
+tmp_rpm_build_dir=/root/escalator
+
+cp -r $rpm_build_dir $tmp_rpm_build_dir
+cd $tmp_rpm_build_dir
+
+echo "#########################################################"
+echo " systemctl info: "
+echo "#########################################################"
+echo "###Uname: $(uname)"
+echo "###Hostname: $(hostname)"
+
+maxcount=3
+cnt=0
+rc=1
+while [ $cnt -lt $maxcount ] && [ $rc -ne 0 ]
+do
+ cnt=$[cnt + 1]
+ echo -e "\n\n\n*** Starting build attempt # $cnt"
+
+ cd api
+ python setup.py sdist
+
+ cd ..
+ cd client
+ python setup.py sdist
+
+ echo "######################################################"
+ echo " done "
+ echo "######################################################"
+ if [ $rc -ne 0 ]; then
+ echo "### Build failed with rc $rc ###"
+ else
+ echo "### Build successfully at attempt # $cnt"
+ fi
+done
+cd $rpm_output_dir
+mkdir upload_artifacts
+cp api/dist/escalator-* $rpm_output_dir/upload_artifacts
+cp client/dist/escalatorclient-* $rpm_output_dir/upload_artifacts
+tar zcvf opnfv-$OPNFV_ARTIFACT_VERSION.tar.gz upload_artifacts
+exit $rc
diff --git a/client/AUTHORS b/client/AUTHORS
new file mode 100644
index 0000000..22952a6
--- /dev/null
+++ b/client/AUTHORS
@@ -0,0 +1,11 @@
+Aric Gardner <agardner@linuxfoundation.org>
+Jie Hu <hu.jie@zte.com.cn>
+Zhou Ya <zhou.ya@zte.com.cn>
+Liyi Meng <liyi.meng@ericsson.com>
+Maria Toeroe <Maria.Toeroe@ericsson.com>
+Ryota MIBU <r-mibu@cq.jp.nec.com>
+SerenaFeng <feng.xiaoewi@zte.com.cn>
+chaozhong-zte <chao.zhong@zte.com.cn>
+hujie <hu.jie@zte.com.cn>
+wangguobing <wang.guobing1@zte.com.cn>
+zhang-jun3g <zhang.jun3g@zte.com.cn>
diff --git a/client/ChangeLog b/client/ChangeLog
new file mode 100644
index 0000000..5ea65ab
--- /dev/null
+++ b/client/ChangeLog
@@ -0,0 +1,8 @@
+CHANGES
+=======
+
+1.0.0
+------
+
+* Add escalatorclient
+
diff --git a/client/LICENSE b/client/LICENSE
new file mode 100644
index 0000000..67db858
--- /dev/null
+++ b/client/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/client/MANIFEST.in b/client/MANIFEST.in
new file mode 100644
index 0000000..ae484e5
--- /dev/null
+++ b/client/MANIFEST.in
@@ -0,0 +1,10 @@
+include ChangeLog
+include MANIFEST.in pylintrc
+include AUTHORS
+include LICENSE
+include ChangeLog
+include babel.cfg tox.ini
+graft docs
+graft etc
+graft escalator/locale
+global-exclude *.pyc
diff --git a/client/PKG-INFO b/client/PKG-INFO
new file mode 100644
index 0000000..ecd5763
--- /dev/null
+++ b/client/PKG-INFO
@@ -0,0 +1,30 @@
+Metadata-Version: 1.1
+Name: escalatorclient
+Version: 1.0.0
+Summary: escalator Escalator Client Library
+Home-page: http://www.opnfv.org/
+Author: OPNFV
+Author-email: opnfv-tech-discuss@lists.opnfv.org
+License: Apache License, Version 2.0
+Description: Python bindings to the escalator Escalator Client
+ =============================================
+
+ This is a client library for escalator built on the Escalator Client. It provides a Python API (the ``escalatorclient`` module) and a command-line tool (``escalator``). This library fully supports the v1 Escalator Client, while support for the v2 Client is in progress.
+
+ Development takes place via the usual OPNFV processes as outlined in the `developer guide <http://docs.openstack.org/infra/manual/developers.html>`_.
+
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Environment :: OPNFV
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
diff --git a/client/README.rst b/client/README.rst
new file mode 100644
index 0000000..ba5a5d7
--- /dev/null
+++ b/client/README.rst
@@ -0,0 +1,6 @@
+Python bindings to the Escalator Client
+=============================================
+
+This is a client library for Escalator built on the Escalator Client.
+It provides a Python API (the ``escalatorclient`` module) and a command-line tool (``escalator``).
+This library fully supports the Escalator Client.
diff --git a/client/babel.cfg b/client/babel.cfg
new file mode 100644
index 0000000..efceab8
--- /dev/null
+++ b/client/babel.cfg
@@ -0,0 +1 @@
+[python: **.py]
diff --git a/client/doc/Makefile b/client/doc/Makefile
new file mode 100644
index 0000000..430e5a3
--- /dev/null
+++ b/client/doc/Makefile
@@ -0,0 +1,90 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SPHINXSOURCE = source
+PAPER =
+BUILDDIR = build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE)
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/python-keystoneclient.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/python-keystoneclient.qhc"
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/client/doc/source/apiv2.rst b/client/doc/source/apiv2.rst
new file mode 100644
index 0000000..0695b2a
--- /dev/null
+++ b/client/doc/source/apiv2.rst
@@ -0,0 +1,27 @@
+Python API v1
+=============
+
+To create a client::
+
+ from keystoneclient.auth.identity import v2 as identity
+ from keystoneclient import session
+ from escalatorclient import Client
+
+ auth = identity.Password(auth_url=AUTH_URL,
+ username=USERNAME,
+ password=PASSWORD,
+ tenant_name=PROJECT_ID)
+
+ sess = session.Session(auth=auth)
+ token = auth.get_token(sess)
+
+ escalator = Client('1', endpoint=OS_IMAGE_ENDPOINT, token=token)
+
+
+List
+----
+List nodes you can access::
+
+ for node in escalator.nodes.list():
+ print node
+
diff --git a/client/doc/source/conf.py b/client/doc/source/conf.py
new file mode 100644
index 0000000..1cfaad2
--- /dev/null
+++ b/client/doc/source/conf.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+#
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '..', '..')))
+
+
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'oslosphinx']
+
+# autodoc generation is a bit aggressive and a nuisance when doing heavy
+# text edit cycles.
+# execute "export SPHINX_DEBUG=1" in your terminal to disable
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'python-escalatorclient'
+copyright = u'OpenStack Foundation'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# Grouping the document tree for man pages.
+# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
+
+man_pages = [
+ ('man/escalator', 'escalator', u'Client for OpenStack Images API',
+ [u'OpenStack Foundation'], 1),
+]
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+# html_theme = 'nature'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%sdoc' % project
+
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+ (
+ 'index',
+ '%s.tex' % project,
+ u'%s Documentation' % project,
+ u'OpenStack Foundation',
+ 'manual'
+ ),
+]
diff --git a/client/doc/source/index.rst b/client/doc/source/index.rst
new file mode 100644
index 0000000..ec7d4ef
--- /dev/null
+++ b/client/doc/source/index.rst
@@ -0,0 +1,37 @@
+Python API
+==========
+In order to use the python api directly, you must first obtain an auth token and identify which endpoint you wish to speak to. Once you have done so, you can use the API like so::
+
+ >>> from escalatorclient import Client
+ >>> escalator = Client('1', endpoint=OS_IMAGE_ENDPOINT, token=OS_AUTH_TOKEN)
+ >>> image = escalator.images.create(name="My Test Image")
+ >>> print image.status
+ 'queued'
+ >>> image.update(data=open('/tmp/myimage.iso', 'rb'))
+ >>> print image.status
+ 'active'
+ >>> image.update(properties=dict(my_custom_property='value'))
+ >>> with open('/tmp/copyimage.iso', 'wb') as f:
+ for chunk in image.data:
+ f.write(chunk)
+ >>> image.delete()
+
+For an API v2 example see also :doc:`apiv2`.
+
+Command-line Tool
+=================
+In order to use the CLI, you must provide your OpenStack username, password, tenant, and auth endpoint. Use the corresponding configuration options (``--os-username``, ``--os-password``, ``--os-tenant-id``, and ``--os-auth-url``) or set them in environment variables::
+
+ export OS_USERNAME=user
+ export OS_PASSWORD=pass
+ export OS_TENANT_ID=b363706f891f48019483f8bd6503c54b
+ export OS_AUTH_URL=http://auth.example.com:5000/v2.0
+
+The command line tool will attempt to reauthenticate using your provided credentials for every request. You can override this behavior by manually supplying an auth token using ``--os-image-url`` and ``--os-auth-token``. You can alternatively set these environment variables::
+
+ export OS_IMAGE_URL=http://escalator.example.org:9292/
+ export OS_AUTH_TOKEN=3bcc3d3a03f44e3d8377f9247b0ad155
+
+Once you've configured your authentication parameters, you can run ``escalator help`` to see a complete listing of available commands.
+
+See also :doc:`/man/escalator`. \ No newline at end of file
diff --git a/client/doc/source/man/escalator.rst b/client/doc/source/man/escalator.rst
new file mode 100644
index 0000000..40536ec
--- /dev/null
+++ b/client/doc/source/man/escalator.rst
@@ -0,0 +1,87 @@
+==============================
+:program:`escalator` CLI man page
+==============================
+
+.. program:: escalator
+.. highlight:: bash
+
+SYNOPSIS
+========
+
+:program:`escalator` [options] <command> [command-options]
+
+:program:`escalator help`
+
+:program:`escalator help` <command>
+
+
+DESCRIPTION
+===========
+
+The :program:`escalator` command line utility interacts with OpenStack Images
+Service (escalator).
+
+In order to use the CLI, you must provide your OpenStack username, password,
+project (historically called tenant), and auth endpoint. You can use
+configuration options :option:`--os-username`, :option:`--os-password`,
+:option:`--os-tenant-id`, and :option:`--os-auth-url` or set corresponding
+environment variables::
+
+ export OS_USERNAME=user
+ export OS_PASSWORD=pass
+ export OS_TENANT_ID=b363706f891f48019483f8bd6503c54b
+ export OS_AUTH_URL=http://auth.example.com:5000/v2.0
+
+The command line tool will attempt to reauthenticate using provided
+credentials for every request. You can override this behavior by manually
+supplying an auth token using :option:`--os-image-url` and
+:option:`--os-auth-token` or by setting corresponding environment variables::
+
+ export OS_IMAGE_URL=http://escalator.example.org:9292/
+ export OS_AUTH_TOKEN=3bcc3d3a03f44e3d8377f9247b0ad155
+
+
+You can select an API version to use by :option:`--os-image-api-version`
+option or by setting corresponding environment variable::
+
+ export OS_IMAGE_API_VERSION=2
+
+OPTIONS
+=======
+
+To get a list of available commands and options run::
+
+ escalator help
+
+To get usage and options of a command::
+
+ escalator help <command>
+
+
+EXAMPLES
+========
+
+Get information about image-create command::
+
+ escalator help image-create
+
+See available images::
+
+ escalator image-list
+
+Create new image::
+
+ escalator image-create --name foo --disk-format=qcow2 \
+ --container-format=bare --is-public=True \
+ --copy-from http://somewhere.net/foo.img
+
+Describe a specific image::
+
+ escalator image-show foo
+
+
+BUGS
+====
+
+escalator client is hosted in Launchpad so you can view current bugs at
+https://bugs.launchpad.net/python-escalatorclient/.
diff --git a/client/escalatorclient/__init__.py b/client/escalatorclient/__init__.py
new file mode 100644
index 0000000..4b95f8a
--- /dev/null
+++ b/client/escalatorclient/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2016 OPNFV Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(bcwaldon): this try/except block is needed to run setup.py due to
+# its need to import local code before installing required dependencies
+try:
+ import escalatorclient.client
+ Client = escalatorclient.client.Client
+except ImportError:
+ import warnings
+ warnings.warn("Could not import escalatorclient.client", ImportWarning)
+
+import pbr.version
+
+version_info = pbr.version.VersionInfo('python-escalatorclient')
+
+try:
+ __version__ = version_info.version_string()
+except AttributeError:
+ __version__ = None
diff --git a/client/escalatorclient/_i18n.py b/client/escalatorclient/_i18n.py
new file mode 100644
index 0000000..bbabb98
--- /dev/null
+++ b/client/escalatorclient/_i18n.py
@@ -0,0 +1,34 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+try:
+ import oslo_i18n as i18n
+except ImportError:
+ from oslo import i18n
+
+
+_translators = i18n.TranslatorFactory(domain='escalatorclient')
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
diff --git a/client/escalatorclient/client.py b/client/escalatorclient/client.py
new file mode 100644
index 0000000..b11e23b
--- /dev/null
+++ b/client/escalatorclient/client.py
@@ -0,0 +1,39 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import warnings
+
+from escalatorclient.common import utils
+
+
+def Client(version=None, endpoint=None, *args, **kwargs):
+ if version is not None:
+ warnings.warn(("`version` keyword is being deprecated. Please pass the"
+ " version as part of the URL. "
+ "http://$HOST:$PORT/v$VERSION_NUMBER"),
+ DeprecationWarning)
+
+ endpoint, url_version = utils.strip_version(endpoint)
+
+ if not url_version and not version:
+ msg = ("Please provide either the version or an url with the form "
+ "http://$HOST:$PORT/v$VERSION_NUMBER")
+ raise RuntimeError(msg)
+
+ version = int(version or url_version)
+
+ module = utils.import_versioned_module(version, 'client')
+ client_class = getattr(module, 'Client')
+ return client_class(endpoint, *args, **kwargs)
diff --git a/client/escalatorclient/common/__init__.py b/client/escalatorclient/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/client/escalatorclient/common/__init__.py
diff --git a/client/escalatorclient/common/base.py b/client/escalatorclient/common/base.py
new file mode 100644
index 0000000..b85dc19
--- /dev/null
+++ b/client/escalatorclient/common/base.py
@@ -0,0 +1,34 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Base utilities to build API operation managers and objects on top of.
+
+DEPRECATED post v.0.12.0. Use 'escalatorclient.openstack.common.apiclient.base'
+instead of this module."
+"""
+
+import warnings
+
+from escalatorclient.openstack.common.apiclient import base
+
+
+warnings.warn("The 'escalatorclient.common.base' module is deprecated",
+ DeprecationWarning)
+
+
+getid = base.getid
+Manager = base.ManagerWithFind
+Resource = base.Resource
diff --git a/client/escalatorclient/common/http.py b/client/escalatorclient/common/http.py
new file mode 100644
index 0000000..301eedb
--- /dev/null
+++ b/client/escalatorclient/common/http.py
@@ -0,0 +1,288 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import logging
+import socket
+from oslo_utils import encodeutils
+from escalatorclient.common import https
+from escalatorclient.common.utils import safe_header
+from escalatorclient import exc
+from oslo_utils import importutils
+from oslo_utils import netutils
+from simplejson import decoder
+import requests
+try:
+ from requests.packages.urllib3.exceptions import ProtocolError
+except ImportError:
+ ProtocolError = requests.exceptions.ConnectionError
+import six
+from six.moves.urllib import parse
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+# Python 2.5 compat fix
+if not hasattr(parse, 'parse_qsl'):
+ import cgi
+ parse.parse_qsl = cgi.parse_qsl
+
+
+osprofiler_web = importutils.try_import("osprofiler.web")
+
+LOG = logging.getLogger(__name__)
+USER_AGENT = 'python-escalatorclient'
+CHUNKSIZE = 1024 * 64 # 64kB
+
+
+class HTTPClient(object):
+
+ def __init__(self, endpoint, **kwargs):
+ self.endpoint = endpoint
+ self.identity_headers = kwargs.get('identity_headers')
+ self.auth_token = kwargs.get('token')
+ if self.identity_headers:
+ if self.identity_headers.get('X-Auth-Token'):
+ self.auth_token = self.identity_headers.get('X-Auth-Token')
+ del self.identity_headers['X-Auth-Token']
+
+ self.session = requests.Session()
+ self.session.headers["User-Agent"] = USER_AGENT
+
+ if self.auth_token:
+ self.session.headers["X-Auth-Token"] = self.auth_token
+
+ self.timeout = float(kwargs.get('timeout', 600))
+
+ if self.endpoint.startswith("https"):
+ compression = kwargs.get('ssl_compression', True)
+
+ if not compression:
+ self.session.mount("escalator+https://", https.HTTPSAdapter())
+ self.endpoint = 'escalator+' + self.endpoint
+
+ self.session.verify = (
+ kwargs.get('cacert', requests.certs.where()),
+ kwargs.get('insecure', False))
+
+ else:
+ if kwargs.get('insecure', False) is True:
+ self.session.verify = False
+ else:
+ if kwargs.get('cacert', None) is not '':
+ self.session.verify = kwargs.get('cacert', True)
+
+ self.session.cert = (kwargs.get('cert_file'),
+ kwargs.get('key_file'))
+
+ @staticmethod
+ def parse_endpoint(endpoint):
+ return netutils.urlsplit(endpoint)
+
+ def log_curl_request(self, method, url, headers, data, kwargs):
+ curl = ['curl -g -i -X %s' % method]
+
+ headers = copy.deepcopy(headers)
+ headers.update(self.session.headers)
+
+ for (key, value) in six.iteritems(headers):
+ header = '-H \'%s: %s\'' % safe_header(key, value)
+ curl.append(header)
+
+ if not self.session.verify:
+ curl.append('-k')
+ else:
+ if isinstance(self.session.verify, six.string_types):
+ curl.append(' --cacert %s' % self.session.verify)
+
+ if self.session.cert:
+ curl.append(' --cert %s --key %s' % self.session.cert)
+
+ if data and isinstance(data, six.string_types):
+ curl.append('-d \'%s\'' % data)
+
+ curl.append(url)
+
+ msg = ' '.join([encodeutils.safe_decode(item, errors='ignore')
+ for item in curl])
+ LOG.debug(msg)
+
+ @staticmethod
+ def log_http_response(resp, body=None):
+ status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
+ dump = ['\nHTTP/%.1f %s %s' % status]
+ headers = resp.headers.items()
+ dump.extend(['%s: %s' % safe_header(k, v) for k, v in headers])
+ dump.append('')
+ if body:
+ body = encodeutils.safe_decode(body)
+ dump.extend([body, ''])
+ LOG.debug('\n'.join([encodeutils.safe_decode(x, errors='ignore')
+ for x in dump]))
+
+ @staticmethod
+ def encode_headers(headers):
+ """Encodes headers.
+
+ Note: This should be used right before
+ sending anything out.
+
+ :param headers: Headers to encode
+ :returns: Dictionary with encoded headers'
+ names and values
+ """
+ return dict((encodeutils.safe_encode(h), encodeutils.safe_encode(v))
+ for h, v in six.iteritems(headers) if v is not None)
+
+ def _request(self, method, url, **kwargs):
+ """Send an http request with the specified characteristics.
+ Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
+ as setting headers and error handling.
+ """
+ # Copy the kwargs so we can reuse the original in case of redirects
+ headers = kwargs.pop("headers", {})
+ headers = headers and copy.deepcopy(headers) or {}
+
+ if self.identity_headers:
+ for k, v in six.iteritems(self.identity_headers):
+ headers.setdefault(k, v)
+
+ # Default Content-Type is octet-stream
+ content_type = headers.get('Content-Type', 'application/octet-stream')
+
+ def chunk_body(body):
+ chunk = body
+ while chunk:
+ chunk = body.read(CHUNKSIZE)
+ if chunk == '':
+ break
+ yield chunk
+
+ data = kwargs.pop("data", None)
+ if data is not None and not isinstance(data, six.string_types):
+ try:
+ data = json.dumps(data)
+ content_type = 'application/json'
+ except TypeError:
+ # Here we assume it's
+ # a file-like object
+ # and we'll chunk it
+ data = chunk_body(data)
+
+ headers['Content-Type'] = content_type
+ stream = True if content_type == 'application/octet-stream' else False
+
+ if osprofiler_web:
+ headers.update(osprofiler_web.get_trace_id_headers())
+
+ # Note(flaper87): Before letting headers / url fly,
+ # they should be encoded otherwise httplib will
+ # complain.
+ headers = self.encode_headers(headers)
+
+ try:
+ if self.endpoint.endswith("/") or url.startswith("/"):
+ conn_url = "%s%s" % (self.endpoint, url)
+ else:
+ conn_url = "%s/%s" % (self.endpoint, url)
+ self.log_curl_request(method, conn_url, headers, data, kwargs)
+ resp = self.session.request(method,
+ conn_url,
+ data=data,
+ stream=stream,
+ headers=headers,
+ **kwargs)
+ except requests.exceptions.Timeout as e:
+ message = ("Error communicating with %(endpoint)s %(e)s" %
+ dict(url=conn_url, e=e))
+ raise exc.InvalidEndpoint(message=message)
+ except (requests.exceptions.ConnectionError, ProtocolError) as e:
+ message = ("Error finding address for %(url)s: %(e)s" %
+ dict(url=conn_url, e=e))
+ raise exc.CommunicationError(message=message)
+ except socket.gaierror as e:
+ message = "Error finding address for %s: %s" % (
+ self.endpoint_hostname, e)
+ raise exc.InvalidEndpoint(message=message)
+ except (socket.error, socket.timeout) as e:
+ endpoint = self.endpoint
+ message = ("Error communicating with %(endpoint)s %(e)s" %
+ {'endpoint': endpoint, 'e': e})
+ raise exc.CommunicationError(message=message)
+
+ if not resp.ok:
+ LOG.debug("Request returned failure status %s." % resp.status_code)
+ raise exc.from_response(resp, resp.text)
+ elif resp.status_code == requests.codes.MULTIPLE_CHOICES:
+ raise exc.from_response(resp)
+
+ content_type = resp.headers.get('Content-Type')
+
+ # Read body into string if it isn't obviously image data
+ if content_type == 'application/octet-stream':
+ # Do not read all response in memory when
+ # downloading an image.
+ body_iter = _close_after_stream(resp, CHUNKSIZE)
+ self.log_http_response(resp)
+ else:
+ content = resp.text
+ self.log_http_response(resp, content)
+ if content_type and content_type.startswith('application/json'):
+ # Let's use requests json method,
+ # it should take care of response
+ # encoding
+ try:
+ body_iter = resp.json()
+ except decoder.JSONDecodeError:
+ status_body = {'status_code': resp.status_code}
+ return resp, status_body
+ else:
+ body_iter = six.StringIO(content)
+ try:
+ body_iter = json.loads(''.join([c for c in body_iter]))
+ except ValueError:
+ body_iter = None
+ return resp, body_iter
+
+ def head(self, url, **kwargs):
+ return self._request('HEAD', url, **kwargs)
+
+ def get(self, url, **kwargs):
+ return self._request('GET', url, **kwargs)
+
+ def post(self, url, **kwargs):
+ return self._request('POST', url, **kwargs)
+
+ def put(self, url, **kwargs):
+ return self._request('PUT', url, **kwargs)
+
+ def patch(self, url, **kwargs):
+ return self._request('PATCH', url, **kwargs)
+
+ def delete(self, url, **kwargs):
+ return self._request('DELETE', url, **kwargs)
+
+
+def _close_after_stream(response, chunk_size):
+ """Iterate over the content and ensure the response is closed after."""
+ # Yield each chunk in the response body
+ for chunk in response.iter_content(chunk_size=chunk_size):
+ yield chunk
+ # Once we're done streaming the body, ensure everything is closed.
+ # This will return the connection to the HTTPConnectionPool in urllib3
+ # and ideally reduce the number of HTTPConnectionPool full warnings.
+ response.close()
diff --git a/client/escalatorclient/common/https.py b/client/escalatorclient/common/https.py
new file mode 100644
index 0000000..55769a0
--- /dev/null
+++ b/client/escalatorclient/common/https.py
@@ -0,0 +1,349 @@
+# Copyright 2014 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+import ssl
+import struct
+
+import OpenSSL
+from requests import adapters
+from requests import compat
+try:
+ from requests.packages.urllib3 import connectionpool
+except ImportError:
+ from urllib3 import connectionpool
+
+from oslo_utils import encodeutils
+import six
+# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
+from six.moves import range
+
+from escalatorclient.common import utils
+
+try:
+ from eventlet import patcher
+ # Handle case where we are running in a monkey patched environment
+ if patcher.is_monkey_patched('socket'):
+ from eventlet.green.httplib import HTTPSConnection
+ from eventlet.green.OpenSSL.SSL import GreenConnection as Connection
+ from eventlet.greenio import GreenSocket
+ # TODO(mclaren): A getsockopt workaround: see 'getsockopt' doc string
+ GreenSocket.getsockopt = utils.getsockopt
+ else:
+ raise ImportError
+except ImportError:
+ try:
+ from httplib import HTTPSConnection
+ except ImportError:
+ from http.client import HTTPSConnection
+ from OpenSSL.SSL import Connection as Connection
+
+
+from escalatorclient import exc
+
+
+def verify_callback(host=None):
+ """
+ We use a partial around the 'real' verify_callback function
+ so that we can stash the host value without holding a
+ reference on the VerifiedHTTPSConnection.
+ """
+ def wrapper(connection, x509, errnum,
+ depth, preverify_ok, host=host):
+ return do_verify_callback(connection, x509, errnum,
+ depth, preverify_ok, host=host)
+ return wrapper
+
+
+def do_verify_callback(connection, x509, errnum,
+ depth, preverify_ok, host=None):
+ """
+ Verify the server's SSL certificate.
+
+ This is a standalone function rather than a method to avoid
+ issues around closing sockets if a reference is held on
+ a VerifiedHTTPSConnection by the callback function.
+ """
+ if x509.has_expired():
+ msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
+ raise exc.SSLCertificateError(msg)
+
+ if depth == 0 and preverify_ok:
+ # We verify that the host matches against the last
+ # certificate in the chain
+ return host_matches_cert(host, x509)
+ else:
+ # Pass through OpenSSL's default result
+ return preverify_ok
+
+
+def host_matches_cert(host, x509):
+ """
+ Verify that the x509 certificate we have received
+ from 'host' correctly identifies the server we are
+ connecting to, ie that the certificate's Common Name
+ or a Subject Alternative Name matches 'host'.
+ """
+ def check_match(name):
+ # Directly match the name
+ if name == host:
+ return True
+
+ # Support single wildcard matching
+ if name.startswith('*.') and host.find('.') > 0:
+ if name[2:] == host.split('.', 1)[1]:
+ return True
+
+ common_name = x509.get_subject().commonName
+
+ # First see if we can match the CN
+ if check_match(common_name):
+ return True
+ # Also try Subject Alternative Names for a match
+ san_list = None
+ for i in range(x509.get_extension_count()):
+ ext = x509.get_extension(i)
+ if ext.get_short_name() == b'subjectAltName':
+ san_list = str(ext)
+ for san in ''.join(san_list.split()).split(','):
+ if san.startswith('DNS:'):
+ if check_match(san.split(':', 1)[1]):
+ return True
+
+ # Server certificate does not match host
+ msg = ('Host "%s" does not match x509 certificate contents: '
+ 'CommonName "%s"' % (host, common_name))
+ if san_list is not None:
+ msg = msg + ', subjectAltName "%s"' % san_list
+ raise exc.SSLCertificateError(msg)
+
+
+def to_bytes(s):
+ if isinstance(s, six.string_types):
+ return six.b(s)
+ else:
+ return s
+
+
+class HTTPSAdapter(adapters.HTTPAdapter):
+ """
+ This adapter will be used just when
+ ssl compression should be disabled.
+
+ The init method overwrites the default
+ https pool by setting escalatorclient's
+ one.
+ """
+
+ def request_url(self, request, proxies):
+ # NOTE(flaper87): Make sure the url is encoded, otherwise
+ # python's standard httplib will fail with a TypeError.
+ url = super(HTTPSAdapter, self).request_url(request, proxies)
+ return encodeutils.safe_encode(url)
+
+ def _create_escalator_httpsconnectionpool(self, url):
+ kw = self.poolmanager.connection_kw
+ # Parse the url to get the scheme, host, and port
+ parsed = compat.urlparse(url)
+ # If there is no port specified, we should use the standard HTTPS port
+ port = parsed.port or 443
+ pool = HTTPSConnectionPool(parsed.host, port, **kw)
+
+ with self.poolmanager.pools.lock:
+ self.poolmanager.pools[(parsed.scheme, parsed.host, port)] = pool
+
+ return pool
+
+ def get_connection(self, url, proxies=None):
+ try:
+ return super(HTTPSAdapter, self).get_connection(url, proxies)
+ except KeyError:
+ # NOTE(sigamvirus24): This works around modifying a module global
+ # which fixes bug #1396550
+ # The scheme is most likely escalator+https but check anyway
+ if not url.startswith('escalator+https://'):
+ raise
+
+ return self._create_escalator_httpsconnectionpool(url)
+
+ def cert_verify(self, conn, url, verify, cert):
+ super(HTTPSAdapter, self).cert_verify(conn, url, verify, cert)
+ conn.ca_certs = verify[0]
+ conn.insecure = verify[1]
+
+
+class HTTPSConnectionPool(connectionpool.HTTPSConnectionPool):
+ """
+ HTTPSConnectionPool will be instantiated when a new
+ connection is requested to the HTTPSAdapter.This
+ implementation overwrites the _new_conn method and
+ returns an instances of escalatorclient's VerifiedHTTPSConnection
+ which handles no compression.
+
+ ssl_compression is hard-coded to False because this will
+ be used just when the user sets --no-ssl-compression.
+ """
+
+ scheme = 'escalator+https'
+
+ def _new_conn(self):
+ self.num_connections += 1
+ return VerifiedHTTPSConnection(host=self.host,
+ port=self.port,
+ key_file=self.key_file,
+ cert_file=self.cert_file,
+ cacert=self.ca_certs,
+ insecure=self.insecure,
+ ssl_compression=False)
+
+
+class OpenSSLConnectionDelegator(object):
+ """
+ An OpenSSL.SSL.Connection delegator.
+
+ Supplies an additional 'makefile' method which httplib requires
+ and is not present in OpenSSL.SSL.Connection.
+
+ Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
+ a delegator must be used.
+ """
+ def __init__(self, *args, **kwargs):
+ self.connection = Connection(*args, **kwargs)
+
+ def __getattr__(self, name):
+ return getattr(self.connection, name)
+
+ def makefile(self, *args, **kwargs):
+ return socket._fileobject(self.connection, *args, **kwargs)
+
+
+class VerifiedHTTPSConnection(HTTPSConnection):
+ """
+ Extended HTTPSConnection which uses the OpenSSL library
+ for enhanced SSL support.
+ Note: Much of this functionality can eventually be replaced
+ with native Python 3.3 code.
+ """
+ # Restrict the set of client supported cipher suites
+ CIPHERS = 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:'\
+ 'eCDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:'\
+ 'RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS'
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ cacert=None, timeout=None, insecure=False,
+ ssl_compression=True):
+ # List of exceptions reported by Python3 instead of
+ # SSLConfigurationError
+ if six.PY3:
+ excp_lst = (TypeError, IOError, ssl.SSLError)
+ # https.py:250:36: F821 undefined name 'FileNotFoundError'
+ else:
+ # NOTE(jamespage)
+ # Accomodate changes in behaviour for pep-0467, introduced
+ # in python 2.7.9.
+ # https://github.com/python/peps/blob/master/pep-0476.txt
+ excp_lst = (TypeError, IOError, ssl.SSLError)
+ try:
+ HTTPSConnection.__init__(self, host, port,
+ key_file=key_file,
+ cert_file=cert_file)
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.timeout = timeout
+ self.insecure = insecure
+ # NOTE(flaper87): `is_verified` is needed for
+ # requests' urllib3. If insecure is True then
+ # the request is not `verified`, hence `not insecure`
+ self.is_verified = not insecure
+ self.ssl_compression = ssl_compression
+ self.cacert = None if cacert is None else str(cacert)
+ self.set_context()
+ # ssl exceptions are reported in various form in Python 3
+ # so to be compatible, we report the same kind as under
+ # Python2
+ except excp_lst as e:
+ raise exc.SSLConfigurationError(str(e))
+
+ def set_context(self):
+ """
+ Set up the OpenSSL context.
+ """
+ self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ self.context.set_cipher_list(self.CIPHERS)
+
+ if self.ssl_compression is False:
+ self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
+
+ if self.insecure is not True:
+ self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
+ verify_callback(host=self.host))
+ else:
+ self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
+ lambda *args: True)
+
+ if self.cert_file:
+ try:
+ self.context.use_certificate_file(self.cert_file)
+ except Exception as e:
+ msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
+ raise exc.SSLConfigurationError(msg)
+ if self.key_file is None:
+ # We support having key and cert in same file
+ try:
+ self.context.use_privatekey_file(self.cert_file)
+ except Exception as e:
+ msg = ('No key file specified and unable to load key '
+ 'from "%s" %s' % (self.cert_file, e))
+ raise exc.SSLConfigurationError(msg)
+
+ if self.key_file:
+ try:
+ self.context.use_privatekey_file(self.key_file)
+ except Exception as e:
+ msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
+ raise exc.SSLConfigurationError(msg)
+
+ if self.cacert:
+ try:
+ self.context.load_verify_locations(to_bytes(self.cacert))
+ except Exception as e:
+ msg = 'Unable to load CA from "%s" %s' % (self.cacert, e)
+ raise exc.SSLConfigurationError(msg)
+ else:
+ self.context.set_default_verify_paths()
+
+ def connect(self):
+ """
+ Connect to an SSL port using the OpenSSL library and apply
+ per-connection parameters.
+ """
+ result = socket.getaddrinfo(self.host, self.port, 0,
+ socket.SOCK_STREAM)
+ if result:
+ socket_family = result[0][0]
+ if socket_family == socket.AF_INET6:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ else:
+ # If due to some reason the address lookup fails - we still connect
+ # to IPv4 socket. This retains the older behavior.
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if self.timeout is not None:
+ # '0' microseconds
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
+ struct.pack('LL', self.timeout, 0))
+ self.sock = OpenSSLConnectionDelegator(self.context, sock)
+ self.sock.connect((self.host, self.port))
diff --git a/client/escalatorclient/common/utils.py b/client/escalatorclient/common/utils.py
new file mode 100644
index 0000000..0156d31
--- /dev/null
+++ b/client/escalatorclient/common/utils.py
@@ -0,0 +1,462 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import print_function
+
+import errno
+import hashlib
+import json
+import os
+import re
+import sys
+import threading
+import uuid
+from oslo_utils import encodeutils
+from oslo_utils import strutils
+import prettytable
+import six
+
+from escalatorclient import exc
+from oslo_utils import importutils
+
+if os.name == 'nt':
+ import msvcrt
+else:
+ msvcrt = None
+
+
+_memoized_property_lock = threading.Lock()
+
+SENSITIVE_HEADERS = ('X-Auth-Token', )
+
+
+# Decorator for cli-args
+def arg(*args, **kwargs):
+ def _decorator(func):
+ # Because of the sematics of decorator composition if we just append
+ # to the options list positional options will appear to be backwards.
+ func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
+ return func
+ return _decorator
+
+
+def schema_args(schema_getter, omit=None):
+ omit = omit or []
+ typemap = {
+ 'string': str,
+ 'integer': int,
+ 'boolean': strutils.bool_from_string,
+ 'array': list
+ }
+
+ def _decorator(func):
+ schema = schema_getter()
+ if schema is None:
+ param = '<unavailable>'
+ kwargs = {
+ 'help': ("Please run with connection parameters set to "
+ "retrieve the schema for generating help for this "
+ "command")
+ }
+ func.__dict__.setdefault('arguments', []).insert(0, ((param, ),
+ kwargs))
+ else:
+ properties = schema.get('properties', {})
+ for name, property in six.iteritems(properties):
+ if name in omit:
+ continue
+ param = '--' + name.replace('_', '-')
+ kwargs = {}
+
+ type_str = property.get('type', 'string')
+
+ if isinstance(type_str, list):
+ # NOTE(flaper87): This means the server has
+ # returned something like `['null', 'string']`,
+ # therfore we use the first non-`null` type as
+ # the valid type.
+ for t in type_str:
+ if t != 'null':
+ type_str = t
+ break
+
+ if type_str == 'array':
+ items = property.get('items')
+ kwargs['type'] = typemap.get(items.get('type'))
+ kwargs['nargs'] = '+'
+ else:
+ kwargs['type'] = typemap.get(type_str)
+
+ if type_str == 'boolean':
+ kwargs['metavar'] = '[True|False]'
+ else:
+ kwargs['metavar'] = '<%s>' % name.upper()
+
+ description = property.get('description', "")
+ if 'enum' in property:
+ if len(description):
+ description += " "
+
+ # NOTE(flaper87): Make sure all values are `str/unicode`
+ # for the `join` to succeed. Enum types can also be `None`
+ # therfore, join's call would fail without the following
+ # list comprehension
+ vals = [six.text_type(val) for val in property.get('enum')]
+ description += ('Valid values: ' + ', '.join(vals))
+ kwargs['help'] = description
+
+ func.__dict__.setdefault('arguments',
+ []).insert(0, ((param, ), kwargs))
+ return func
+
+ return _decorator
+
+
+def pretty_choice_list(l):
+ return ', '.join("'%s'" % i for i in l)
+
+
+def print_list(objs, fields, formatters=None, field_settings=None,
+ conver_field=True):
+ formatters = formatters or {}
+ field_settings = field_settings or {}
+ pt = prettytable.PrettyTable([f for f in fields], caching=False)
+ pt.align = 'l'
+
+ for o in objs:
+ row = []
+ for field in fields:
+ if field in field_settings:
+ for setting, value in six.iteritems(field_settings[field]):
+ setting_dict = getattr(pt, setting)
+ setting_dict[field] = value
+
+ if field in formatters:
+ row.append(formatters[field](o))
+ else:
+ if conver_field:
+ field_name = field.lower().replace(' ', '_')
+ else:
+ field_name = field.replace(' ', '_')
+ data = getattr(o, field_name, None)
+ row.append(data)
+ pt.add_row(row)
+
+ print(encodeutils.safe_decode(pt.get_string()))
+
+
+def print_dict(d, max_column_width=80):
+ pt = prettytable.PrettyTable(['Property', 'Value'], caching=False)
+ pt.align = 'l'
+ pt.max_width = max_column_width
+ for k, v in six.iteritems(d):
+ if isinstance(v, (dict, list)):
+ v = json.dumps(v)
+ pt.add_row([k, v])
+ print(encodeutils.safe_decode(pt.get_string(sortby='Property')))
+
+
+def find_resource(manager, id):
+ """Helper for the _find_* methods."""
+ # first try to get entity as integer id
+ try:
+ if isinstance(id, int) or id.isdigit():
+ return manager.get(int(id))
+ except exc.NotFound:
+ pass
+
+ # now try to get entity as uuid
+ try:
+ # This must be unicode for Python 3 compatibility.
+ # If you pass a bytestring to uuid.UUID, you will get a TypeError
+ uuid.UUID(encodeutils.safe_decode(id))
+ return manager.get(id)
+ except (ValueError, exc.NotFound):
+ msg = ("id %s is error " % id)
+ raise exc.CommandError(msg)
+
+ # finally try to find entity by name
+ matches = list(manager.list(filters={'name': id}))
+ num_matches = len(matches)
+ if num_matches == 0:
+ msg = "No %s with a name or ID of '%s' exists." % \
+ (manager.resource_class.__name__.lower(), id)
+ raise exc.CommandError(msg)
+ elif num_matches > 1:
+ msg = ("Multiple %s matches found for '%s', use an ID to be more"
+ " specific." % (manager.resource_class.__name__.lower(),
+ id))
+ raise exc.CommandError(msg)
+ else:
+ return matches[0]
+
+
+def skip_authentication(f):
+ """Function decorator used to indicate a caller may be unauthenticated."""
+ f.require_authentication = False
+ return f
+
+
+def is_authentication_required(f):
+ """Checks to see if the function requires authentication.
+
+ Use the skip_authentication decorator to indicate a caller may
+ skip the authentication step.
+ """
+ return getattr(f, 'require_authentication', True)
+
+
+def env(*vars, **kwargs):
+ """Search for the first defined of possibly many env vars
+
+ Returns the first environment variable defined in vars, or
+ returns the default defined in kwargs.
+ """
+ for v in vars:
+ value = os.environ.get(v, None)
+ if value:
+ return value
+ return kwargs.get('default', '')
+
+
+def import_versioned_module(version, submodule=None):
+ module = 'escalatorclient.v%s' % version
+ if submodule:
+ module = '.'.join((module, submodule))
+ return importutils.import_module(module)
+
+
+def exit(msg='', exit_code=1):
+ if msg:
+ print(encodeutils.safe_decode(msg), file=sys.stderr)
+ sys.exit(exit_code)
+
+
+def save_image(data, path):
+ """
+ Save an image to the specified path.
+
+ :param data: binary data of the image
+ :param path: path to save the image to
+ """
+ if path is None:
+ image = sys.stdout
+ else:
+ image = open(path, 'wb')
+ try:
+ for chunk in data:
+ image.write(chunk)
+ finally:
+ if path is not None:
+ image.close()
+
+
+def make_size_human_readable(size):
+ suffix = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']
+ base = 1024.0
+
+ index = 0
+ while size >= base:
+ index = index + 1
+ size = size / base
+
+ padded = '%.1f' % size
+ stripped = padded.rstrip('0').rstrip('.')
+
+ return '%s%s' % (stripped, suffix[index])
+
+
+def getsockopt(self, *args, **kwargs):
+ """
+ A function which allows us to monkey patch eventlet's
+ GreenSocket, adding a required 'getsockopt' method.
+ TODO: (mclaren) we can remove this once the eventlet fix
+ (https://bitbucket.org/eventlet/eventlet/commits/609f230)
+ lands in mainstream packages.
+ """
+ return self.fd.getsockopt(*args, **kwargs)
+
+
+def exception_to_str(exc):
+ try:
+ error = six.text_type(exc)
+ except UnicodeError:
+ try:
+ error = str(exc)
+ except UnicodeError:
+ error = ("Caught '%(exception)s' exception." %
+ {"exception": exc.__class__.__name__})
+ return encodeutils.safe_decode(error, errors='ignore')
+
+
+def get_file_size(file_obj):
+ """
+ Analyze file-like object and attempt to determine its size.
+
+ :param file_obj: file-like object.
+ :retval The file's size or None if it cannot be determined.
+ """
+ if (hasattr(file_obj, 'seek') and hasattr(file_obj, 'tell') and
+ (six.PY2 or six.PY3 and file_obj.seekable())):
+ try:
+ curr = file_obj.tell()
+ file_obj.seek(0, os.SEEK_END)
+ size = file_obj.tell()
+ file_obj.seek(curr)
+ return size
+ except IOError as e:
+ if e.errno == errno.ESPIPE:
+ # Illegal seek. This means the file object
+ # is a pipe (e.g. the user is trying
+ # to pipe image data to the client,
+ # echo testdata | bin/escalator add blah...), or
+ # that file object is empty, or that a file-like
+ # object which doesn't support 'seek/tell' has
+ # been supplied.
+ return
+ else:
+ raise
+
+
+def get_data_file(args):
+ if args.file:
+ return open(args.file, 'rb')
+ else:
+ # distinguish cases where:
+ # (1) stdin is not valid (as in cron jobs):
+ # escalator ... <&-
+ # (2) image data is provided through standard input:
+ # escalator ... < /tmp/file or cat /tmp/file | escalator ...
+ # (3) no image data provided:
+ # escalator ...
+ try:
+ os.fstat(0)
+ except OSError:
+ # (1) stdin is not valid (closed...)
+ return None
+ if not sys.stdin.isatty():
+ # (2) image data is provided through standard input
+ if msvcrt:
+ msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
+ return sys.stdin
+ else:
+ # (3) no image data provided
+ return None
+
+
+def strip_version(endpoint):
+ """Strip version from the last component of endpoint if present."""
+ # NOTE(flaper87): This shouldn't be necessary if
+ # we make endpoint the first argument. However, we
+ # can't do that just yet because we need to keep
+ # backwards compatibility.
+ if not isinstance(endpoint, six.string_types):
+ raise ValueError("Expected endpoint")
+
+ version = None
+ # Get rid of trailing '/' if present
+ endpoint = endpoint.rstrip('/')
+ url_bits = endpoint.split('/')
+ # regex to match 'v1' or 'v2.0' etc
+ if re.match('v\d+\.?\d*', url_bits[-1]):
+ version = float(url_bits[-1].lstrip('v'))
+ endpoint = '/'.join(url_bits[:-1])
+ return endpoint, version
+
+
+def print_image(image_obj, max_col_width=None):
+ ignore = ['self', 'access', 'file', 'schema']
+ image = dict([item for item in six.iteritems(image_obj)
+ if item[0] not in ignore])
+ if str(max_col_width).isdigit():
+ print_dict(image, max_column_width=max_col_width)
+ else:
+ print_dict(image)
+
+
+def integrity_iter(iter, checksum):
+ """
+ Check image data integrity.
+
+ :raises: IOError
+ """
+ md5sum = hashlib.md5()
+ for chunk in iter:
+ yield chunk
+ if isinstance(chunk, six.string_types):
+ chunk = six.b(chunk)
+ md5sum.update(chunk)
+ md5sum = md5sum.hexdigest()
+ if md5sum != checksum:
+ raise IOError(errno.EPIPE,
+ 'Corrupt image download. Checksum was %s expected %s' %
+ (md5sum, checksum))
+
+
+def memoized_property(fn):
+ attr_name = '_lazy_once_' + fn.__name__
+
+ @property
+ def _memoized_property(self):
+ if hasattr(self, attr_name):
+ return getattr(self, attr_name)
+ else:
+ with _memoized_property_lock:
+ if not hasattr(self, attr_name):
+ setattr(self, attr_name, fn(self))
+ return getattr(self, attr_name)
+ return _memoized_property
+
+
+def safe_header(name, value):
+ if name in SENSITIVE_HEADERS:
+ v = value.encode('utf-8')
+ h = hashlib.sha1(v)
+ d = h.hexdigest()
+ return name, "{SHA1}%s" % d
+ else:
+ return name, value
+
+
+def to_str(value):
+ if value is None:
+ return value
+ if not isinstance(value, six.string_types):
+ return str(value)
+ return value
+
+
+def get_host_min_mac(host_interfaces):
+ mac_list = [interface['mac'] for interface in
+ host_interfaces if interface.get('mac')]
+ if mac_list:
+ return min(mac_list)
+ else:
+ return None
+
+
+class IterableWithLength(object):
+ def __init__(self, iterable, length):
+ self.iterable = iterable
+ self.length = length
+
+ def __iter__(self):
+ return self.iterable
+
+ def next(self):
+ return next(self.iterable)
+
+ def __len__(self):
+ return self.length
diff --git a/client/escalatorclient/exc.py b/client/escalatorclient/exc.py
new file mode 100644
index 0000000..06a9126
--- /dev/null
+++ b/client/escalatorclient/exc.py
@@ -0,0 +1,201 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import sys
+
+
+class BaseException(Exception):
+ """An error occurred."""
+ def __init__(self, message=None):
+ self.message = message
+
+ def __str__(self):
+ return self.message or self.__class__.__doc__
+
+
+class CommandError(BaseException):
+ """Invalid usage of CLI."""
+
+
+class InvalidEndpoint(BaseException):
+ """The provided endpoint is invalid."""
+
+
+class CommunicationError(BaseException):
+ """Unable to communicate with server."""
+
+
+class ClientException(Exception):
+ """DEPRECATED!"""
+
+
+class HTTPException(ClientException):
+ """Base exception for all HTTP-derived exceptions."""
+ code = 'N/A'
+
+ def __init__(self, details=None):
+ self.details = details or self.__class__.__name__
+
+ def __str__(self):
+ return "%s (HTTP %s)" % (self.details, self.code)
+
+
+class HTTPMultipleChoices(HTTPException):
+ code = 300
+
+ def __str__(self):
+ self.details = ("Requested version of OpenStack Images API is not "
+ "available.")
+ return "%s (HTTP %s) %s" % (self.__class__.__name__, self.code,
+ self.details)
+
+
+class BadRequest(HTTPException):
+ """DEPRECATED!"""
+ code = 400
+
+
+class HTTPBadRequest(BadRequest):
+ pass
+
+
+class Unauthorized(HTTPException):
+ """DEPRECATED!"""
+ code = 401
+
+
+class HTTPUnauthorized(Unauthorized):
+ pass
+
+
+class Forbidden(HTTPException):
+ """DEPRECATED!"""
+ code = 403
+
+
+class HTTPForbidden(Forbidden):
+ pass
+
+
+class NotFound(HTTPException):
+ """DEPRECATED!"""
+ code = 404
+
+
+class HTTPNotFound(NotFound):
+ pass
+
+
+class HTTPMethodNotAllowed(HTTPException):
+ code = 405
+
+
+class Conflict(HTTPException):
+ """DEPRECATED!"""
+ code = 409
+
+
+class HTTPConflict(Conflict):
+ pass
+
+
+class OverLimit(HTTPException):
+ """DEPRECATED!"""
+ code = 413
+
+
+class HTTPOverLimit(OverLimit):
+ pass
+
+
+class HTTPInternalServerError(HTTPException):
+ code = 500
+
+
+class HTTPNotImplemented(HTTPException):
+ code = 501
+
+
+class HTTPBadGateway(HTTPException):
+ code = 502
+
+
+class ServiceUnavailable(HTTPException):
+ """DEPRECATED!"""
+ code = 503
+
+
+class HTTPServiceUnavailable(ServiceUnavailable):
+ pass
+
+
+# NOTE(bcwaldon): Build a mapping of HTTP codes to corresponding exception
+# classes
+_code_map = {}
+for obj_name in dir(sys.modules[__name__]):
+ if obj_name.startswith('HTTP'):
+ obj = getattr(sys.modules[__name__], obj_name)
+ _code_map[obj.code] = obj
+
+
+def from_response(response, body=None):
+ """Return an instance of an HTTPException based on httplib response."""
+ cls = _code_map.get(response.status_code, HTTPException)
+ if body and 'json' in response.headers['content-type']:
+ # Iterate over the nested objects and retreive the "message" attribute.
+ messages = [obj.get('message') for obj in response.json().values()]
+ # Join all of the messages together nicely and filter out any objects
+ # that don't have a "message" attr.
+ details = '\n'.join(i for i in messages if i is not None)
+ return cls(details=details)
+ elif body and 'html' in response.headers['content-type']:
+ # Split the lines, strip whitespace and inline HTML from the response.
+ details = [re.sub(r'<.+?>', '', i.strip())
+ for i in response.text.splitlines()]
+ details = [i for i in details if i]
+ # Remove duplicates from the list.
+ details_seen = set()
+ details_temp = []
+ for i in details:
+ if i not in details_seen:
+ details_temp.append(i)
+ details_seen.add(i)
+ # Return joined string separated by colons.
+ details = ': '.join(details_temp)
+ return cls(details=details)
+ elif body:
+ details = body.replace('\n\n', '\n')
+ return cls(details=details)
+
+ return cls()
+
+
+class NoTokenLookupException(Exception):
+ """DEPRECATED!"""
+ pass
+
+
+class EndpointNotFound(Exception):
+ """DEPRECATED!"""
+ pass
+
+
+class SSLConfigurationError(BaseException):
+ pass
+
+
+class SSLCertificateError(BaseException):
+ pass
diff --git a/client/escalatorclient/openstack/__init__.py b/client/escalatorclient/openstack/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/client/escalatorclient/openstack/__init__.py
diff --git a/client/escalatorclient/openstack/common/__init__.py b/client/escalatorclient/openstack/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/client/escalatorclient/openstack/common/__init__.py
diff --git a/client/escalatorclient/openstack/common/_i18n.py b/client/escalatorclient/openstack/common/_i18n.py
new file mode 100644
index 0000000..95d1792
--- /dev/null
+++ b/client/escalatorclient/openstack/common/_i18n.py
@@ -0,0 +1,45 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oslo.i18n integration module.
+
+See http://docs.openstack.org/developer/oslo.i18n/usage.html
+
+"""
+
+try:
+ import oslo.i18n
+
+ # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
+ # application name when this module is synced into the separate
+ # repository. It is OK to have more than one translation function
+ # using the same domain, since there will still only be one message
+ # catalog.
+ _translators = oslo.i18n.TranslatorFactory(domain='escalatorclient')
+
+ # The primary translation function using the well-known name "_"
+ _ = _translators.primary
+
+ # Translators for log levels.
+ #
+ # The abbreviated names are meant to reflect the usual use of a short
+ # name like '_'. The "L" is for "log" and the other letter comes from
+ # the level.
+ _LI = _translators.log_info
+ _LW = _translators.log_warning
+ _LE = _translators.log_error
+ _LC = _translators.log_critical
+except ImportError:
+ # NOTE(dims): Support for cases where a project wants to use
+ # code from oslo-incubator, but is not ready to be internationalized
+ # (like tempest)
+ _ = _LI = _LW = _LE = _LC = lambda x: x
diff --git a/client/escalatorclient/openstack/common/apiclient/__init__.py b/client/escalatorclient/openstack/common/apiclient/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/client/escalatorclient/openstack/common/apiclient/__init__.py
diff --git a/client/escalatorclient/openstack/common/apiclient/auth.py b/client/escalatorclient/openstack/common/apiclient/auth.py
new file mode 100644
index 0000000..4d29dcf
--- /dev/null
+++ b/client/escalatorclient/openstack/common/apiclient/auth.py
@@ -0,0 +1,234 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 Spanish National Research Council.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# E0202: An attribute inherited from %s hide this method
+# pylint: disable=E0202
+
+########################################################################
+#
+# THIS MODULE IS DEPRECATED
+#
+# Please refer to
+# https://etherpad.openstack.org/p/kilo-escalatorclient-library-proposals for
+# the discussion leading to this deprecation.
+#
+# We recommend checking out the python-openstacksdk project
+# (https://launchpad.net/python-openstacksdk) instead.
+#
+########################################################################
+
+import abc
+import argparse
+import os
+
+import six
+from stevedore import extension
+
+from escalatorclient.openstack.common.apiclient import exceptions
+
+
+_discovered_plugins = {}
+
+
+def discover_auth_systems():
+ """Discover the available auth-systems.
+
+ This won't take into account the old style auth-systems.
+ """
+ global _discovered_plugins
+ _discovered_plugins = {}
+
+ def add_plugin(ext):
+ _discovered_plugins[ext.name] = ext.plugin
+
+ ep_namespace = "escalatorclient.openstack.common.apiclient.auth"
+ mgr = extension.ExtensionManager(ep_namespace)
+ mgr.map(add_plugin)
+
+
+def load_auth_system_opts(parser):
+ """Load options needed by the available auth-systems into a parser.
+
+ This function will try to populate the parser with options from the
+ available plugins.
+ """
+ group = parser.add_argument_group("Common auth options")
+ BaseAuthPlugin.add_common_opts(group)
+ for name, auth_plugin in six.iteritems(_discovered_plugins):
+ group = parser.add_argument_group(
+ "Auth-system '%s' options" % name,
+ conflict_handler="resolve")
+ auth_plugin.add_opts(group)
+
+
+def load_plugin(auth_system):
+ try:
+ plugin_class = _discovered_plugins[auth_system]
+ except KeyError:
+ raise exceptions.AuthSystemNotFound(auth_system)
+ return plugin_class(auth_system=auth_system)
+
+
+def load_plugin_from_args(args):
+ """Load required plugin and populate it with options.
+
+ Try to guess auth system if it is not specified. Systems are tried in
+ alphabetical order.
+
+ :type args: argparse.Namespace
+ :raises: AuthPluginOptionsMissing
+ """
+ auth_system = args.os_auth_system
+ if auth_system:
+ plugin = load_plugin(auth_system)
+ plugin.parse_opts(args)
+ plugin.sufficient_options()
+ return plugin
+
+ for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)):
+ plugin_class = _discovered_plugins[plugin_auth_system]
+ plugin = plugin_class()
+ plugin.parse_opts(args)
+ try:
+ plugin.sufficient_options()
+ except exceptions.AuthPluginOptionsMissing:
+ continue
+ return plugin
+ raise exceptions.AuthPluginOptionsMissing(["auth_system"])
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseAuthPlugin(object):
+ """Base class for authentication plugins.
+
+ An authentication plugin needs to override at least the authenticate
+ method to be a valid plugin.
+ """
+
+ auth_system = None
+ opt_names = []
+ common_opt_names = [
+ "auth_system",
+ "username",
+ "password",
+ "tenant_name",
+ "token",
+ "auth_url",
+ ]
+
+ def __init__(self, auth_system=None, **kwargs):
+ self.auth_system = auth_system or self.auth_system
+ self.opts = dict((name, kwargs.get(name))
+ for name in self.opt_names)
+
+ @staticmethod
+ def _parser_add_opt(parser, opt):
+ """Add an option to parser in two variants.
+
+ :param opt: option name (with underscores)
+ """
+ dashed_opt = opt.replace("_", "-")
+ env_var = "OS_%s" % opt.upper()
+ arg_default = os.environ.get(env_var, "")
+ arg_help = "Defaults to env[%s]." % env_var
+ parser.add_argument(
+ "--os-%s" % dashed_opt,
+ metavar="<%s>" % dashed_opt,
+ default=arg_default,
+ help=arg_help)
+ parser.add_argument(
+ "--os_%s" % opt,
+ metavar="<%s>" % dashed_opt,
+ help=argparse.SUPPRESS)
+
+ @classmethod
+ def add_opts(cls, parser):
+ """Populate the parser with the options for this plugin.
+ """
+ for opt in cls.opt_names:
+ # use `BaseAuthPlugin.common_opt_names` since it is never
+ # changed in child classes
+ if opt not in BaseAuthPlugin.common_opt_names:
+ cls._parser_add_opt(parser, opt)
+
+ @classmethod
+ def add_common_opts(cls, parser):
+ """Add options that are common for several plugins.
+ """
+ for opt in cls.common_opt_names:
+ cls._parser_add_opt(parser, opt)
+
+ @staticmethod
+ def get_opt(opt_name, args):
+ """Return option name and value.
+
+ :param opt_name: name of the option, e.g., "username"
+ :param args: parsed arguments
+ """
+ return (opt_name, getattr(args, "os_%s" % opt_name, None))
+
+ def parse_opts(self, args):
+ """Parse the actual auth-system options if any.
+
+ This method is expected to populate the attribute `self.opts` with a
+ dict containing the options and values needed to make authentication.
+ """
+ self.opts.update(dict(self.get_opt(opt_name, args)
+ for opt_name in self.opt_names))
+
+ def authenticate(self, http_client):
+ """Authenticate using plugin defined method.
+
+ The method usually analyses `self.opts` and performs
+ a request to authentication server.
+
+ :param http_client: client object that needs authentication
+ :type http_client: HTTPClient
+ :raises: AuthorizationFailure
+ """
+ self.sufficient_options()
+ self._do_authenticate(http_client)
+
+ @abc.abstractmethod
+ def _do_authenticate(self, http_client):
+ """Protected method for authentication.
+ """
+
+ def sufficient_options(self):
+ """Check if all required options are present.
+
+ :raises: AuthPluginOptionsMissing
+ """
+ missing = [opt
+ for opt in self.opt_names
+ if not self.opts.get(opt)]
+ if missing:
+ raise exceptions.AuthPluginOptionsMissing(missing)
+
+ @abc.abstractmethod
+ def token_and_endpoint(self, endpoint_type, service_type):
+ """Return token and endpoint.
+
+ :param service_type: Service type of the endpoint
+ :type service_type: string
+ :param endpoint_type: Type of endpoint.
+ Possible values: public or publicURL,
+ internal or internalURL,
+ admin or adminURL
+ :type endpoint_type: string
+ :returns: tuple of token and endpoint strings
+ :raises: EndpointException
+ """
diff --git a/client/escalatorclient/openstack/common/apiclient/base.py b/client/escalatorclient/openstack/common/apiclient/base.py
new file mode 100644
index 0000000..eb7218b
--- /dev/null
+++ b/client/escalatorclient/openstack/common/apiclient/base.py
@@ -0,0 +1,532 @@
+# Copyright 2010 Jacob Kaplan-Moss
+# Copyright 2011 OpenStack Foundation
+# Copyright 2012 Grid Dynamics
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Base utilities to build API operation managers and objects on top of.
+"""
+
+########################################################################
+#
+# THIS MODULE IS DEPRECATED
+#
+# Please refer to
+# https://etherpad.openstack.org/p/kilo-escalatorclient-library-proposals for
+# the discussion leading to this deprecation.
+#
+# We recommend checking out the python-openstacksdk project
+# (https://launchpad.net/python-openstacksdk) instead.
+#
+########################################################################
+
+
+# E1102: %s is not callable
+# pylint: disable=E1102
+
+import abc
+import copy
+
+from oslo_utils import strutils
+import six
+from six.moves.urllib import parse
+
+from escalatorclient.openstack.common._i18n import _
+from escalatorclient.openstack.common.apiclient import exceptions
+
+
+def getid(obj):
+ """Return id if argument is a Resource.
+
+ Abstracts the common pattern of allowing both an object or an object's ID
+ (UUID) as a parameter when dealing with relationships.
+ """
+ try:
+ if obj.uuid:
+ return obj.uuid
+ except AttributeError:
+ pass
+ try:
+ return obj.id
+ except AttributeError:
+ return obj
+
+
+# TODO(aababilov): call run_hooks() in HookableMixin's child classes
+class HookableMixin(object):
+ """Mixin so classes can register and run hooks."""
+ _hooks_map = {}
+
+ @classmethod
+ def add_hook(cls, hook_type, hook_func):
+ """Add a new hook of specified type.
+
+ :param cls: class that registers hooks
+ :param hook_type: hook type, e.g., '__pre_parse_args__'
+ :param hook_func: hook function
+ """
+ if hook_type not in cls._hooks_map:
+ cls._hooks_map[hook_type] = []
+
+ cls._hooks_map[hook_type].append(hook_func)
+
+ @classmethod
+ def run_hooks(cls, hook_type, *args, **kwargs):
+ """Run all hooks of specified type.
+
+ :param cls: class that registers hooks
+ :param hook_type: hook type, e.g., '__pre_parse_args__'
+ :param args: args to be passed to every hook function
+ :param kwargs: kwargs to be passed to every hook function
+ """
+ hook_funcs = cls._hooks_map.get(hook_type) or []
+ for hook_func in hook_funcs:
+ hook_func(*args, **kwargs)
+
+
+class BaseManager(HookableMixin):
+ """Basic manager type providing common operations.
+
+ Managers interact with a particular type of API (servers, flavors, images,
+ etc.) and provide CRUD operations for them.
+ """
+ resource_class = None
+
+ def __init__(self, client):
+ """Initializes BaseManager with `client`.
+
+ :param client: instance of BaseClient descendant for HTTP requests
+ """
+ super(BaseManager, self).__init__()
+ self.client = client
+
+ def _list(self, url, response_key=None, obj_class=None, json=None):
+ """List the collection.
+
+ :param url: a partial URL, e.g., '/servers'
+ :param response_key: the key to be looked up in response dictionary,
+ e.g., 'servers'. If response_key is None - all response body
+ will be used.
+ :param obj_class: class for constructing the returned objects
+ (self.resource_class will be used by default)
+ :param json: data that will be encoded as JSON and passed in POST
+ request (GET will be sent by default)
+ """
+ if json:
+ body = self.client.post(url, json=json).json()
+ else:
+ body = self.client.get(url).json()
+
+ if obj_class is None:
+ obj_class = self.resource_class
+
+ data = body[response_key] if response_key is not None else body
+ # NOTE(ja): keystone returns values as list as {'values': [ ... ]}
+ # unlike other services which just return the list...
+ try:
+ data = data['values']
+ except (KeyError, TypeError):
+ pass
+
+ return [obj_class(self, res, loaded=True) for res in data if res]
+
+ def _get(self, url, response_key=None):
+ """Get an object from collection.
+
+ :param url: a partial URL, e.g., '/servers'
+ :param response_key: the key to be looked up in response dictionary,
+ e.g., 'server'. If response_key is None - all response body
+ will be used.
+ """
+ body = self.client.get(url).json()
+ data = body[response_key] if response_key is not None else body
+ return self.resource_class(self, data, loaded=True)
+
+ def _head(self, url):
+ """Retrieve request headers for an object.
+
+ :param url: a partial URL, e.g., '/servers'
+ """
+ resp = self.client.head(url)
+ return resp.status_code == 204
+
+ def _post(self, url, json, response_key=None, return_raw=False):
+ """Create an object.
+
+ :param url: a partial URL, e.g., '/servers'
+ :param json: data that will be encoded as JSON and passed in POST
+ request (GET will be sent by default)
+ :param response_key: the key to be looked up in response dictionary,
+ e.g., 'server'. If response_key is None - all response body
+ will be used.
+ :param return_raw: flag to force returning raw JSON instead of
+ Python object of self.resource_class
+ """
+ body = self.client.post(url, json=json).json()
+ data = body[response_key] if response_key is not None else body
+ if return_raw:
+ return data
+ return self.resource_class(self, data)
+
+ def _put(self, url, json=None, response_key=None):
+ """Update an object with PUT method.
+
+ :param url: a partial URL, e.g., '/servers'
+ :param json: data that will be encoded as JSON and passed in POST
+ request (GET will be sent by default)
+ :param response_key: the key to be looked up in response dictionary,
+ e.g., 'servers'. If response_key is None - all response body
+ will be used.
+ """
+ resp = self.client.put(url, json=json)
+ # PUT requests may not return a body
+ if resp.content:
+ body = resp.json()
+ if response_key is not None:
+ return self.resource_class(self, body[response_key])
+ else:
+ return self.resource_class(self, body)
+
+ def _patch(self, url, json=None, response_key=None):
+ """Update an object with PATCH method.
+
+ :param url: a partial URL, e.g., '/servers'
+ :param json: data that will be encoded as JSON and passed in POST
+ request (GET will be sent by default)
+ :param response_key: the key to be looked up in response dictionary,
+ e.g., 'servers'. If response_key is None - all response body
+ will be used.
+ """
+ body = self.client.patch(url, json=json).json()
+ if response_key is not None:
+ return self.resource_class(self, body[response_key])
+ else:
+ return self.resource_class(self, body)
+
+ def _delete(self, url):
+ """Delete an object.
+
+ :param url: a partial URL, e.g., '/servers/my-server'
+ """
+ return self.client.delete(url)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ManagerWithFind(BaseManager):
+ """Manager with additional `find()`/`findall()` methods."""
+
+ @abc.abstractmethod
+ def list(self):
+ pass
+
+ def find(self, **kwargs):
+ """Find a single item with attributes matching ``**kwargs``.
+
+ This isn't very efficient: it loads the entire list then filters on
+ the Python side.
+ """
+ matches = self.findall(**kwargs)
+ num_matches = len(matches)
+ if num_matches == 0:
+ msg = _("No %(name)s matching %(args)s.") % {
+ 'name': self.resource_class.__name__,
+ 'args': kwargs
+ }
+ raise exceptions.NotFound(msg)
+ elif num_matches > 1:
+ raise exceptions.NoUniqueMatch()
+ else:
+ return matches[0]
+
+ def findall(self, **kwargs):
+ """Find all items with attributes matching ``**kwargs``.
+
+ This isn't very efficient: it loads the entire list then filters on
+ the Python side.
+ """
+ found = []
+ searches = kwargs.items()
+
+ for obj in self.list():
+ try:
+ if all(getattr(obj, attr) == value
+ for (attr, value) in searches):
+ found.append(obj)
+ except AttributeError:
+ continue
+
+ return found
+
+
+class CrudManager(BaseManager):
+ """Base manager class for manipulating entities.
+
+ Children of this class are expected to define a `collection_key` and `key`.
+
+ - `collection_key`: Usually a plural noun by convention (e.g. `entities`);
+ used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
+ objects containing a list of member resources (e.g. `{'entities': [{},
+ {}, {}]}`).
+ - `key`: Usually a singular noun by convention (e.g. `entity`); used to
+ refer to an individual member of the collection.
+
+ """
+ collection_key = None
+ key = None
+
+ def build_url(self, base_url=None, **kwargs):
+ """Builds a resource URL for the given kwargs.
+
+ Given an example collection where `collection_key = 'entities'` and
+ `key = 'entity'`, the following URL's could be generated.
+
+ By default, the URL will represent a collection of entities, e.g.::
+
+ /entities
+
+ If kwargs contains an `entity_id`, then the URL will represent a
+ specific member, e.g.::
+
+ /entities/{entity_id}
+
+ :param base_url: if provided, the generated URL will be appended to it
+ """
+ url = base_url if base_url is not None else ''
+
+ url += '/%s' % self.collection_key
+
+ # do we have a specific entity?
+ entity_id = kwargs.get('%s_id' % self.key)
+ if entity_id is not None:
+ url += '/%s' % entity_id
+
+ return url
+
+ def _filter_kwargs(self, kwargs):
+ """Drop null values and handle ids."""
+ for key, ref in six.iteritems(kwargs.copy()):
+ if ref is None:
+ kwargs.pop(key)
+ else:
+ if isinstance(ref, Resource):
+ kwargs.pop(key)
+ kwargs['%s_id' % key] = getid(ref)
+ return kwargs
+
+ def create(self, **kwargs):
+ kwargs = self._filter_kwargs(kwargs)
+ return self._post(
+ self.build_url(**kwargs),
+ {self.key: kwargs},
+ self.key)
+
+ def get(self, **kwargs):
+ kwargs = self._filter_kwargs(kwargs)
+ return self._get(
+ self.build_url(**kwargs),
+ self.key)
+
+ def head(self, **kwargs):
+ kwargs = self._filter_kwargs(kwargs)
+ return self._head(self.build_url(**kwargs))
+
+ def list(self, base_url=None, **kwargs):
+ """List the collection.
+
+ :param base_url: if provided, the generated URL will be appended to it
+ """
+ kwargs = self._filter_kwargs(kwargs)
+
+ return self._list(
+ '%(base_url)s%(query)s' % {
+ 'base_url': self.build_url(base_url=base_url, **kwargs),
+ 'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
+ },
+ self.collection_key)
+
+ def put(self, base_url=None, **kwargs):
+ """Update an element.
+
+ :param base_url: if provided, the generated URL will be appended to it
+ """
+ kwargs = self._filter_kwargs(kwargs)
+
+ return self._put(self.build_url(base_url=base_url, **kwargs))
+
+ def update(self, **kwargs):
+ kwargs = self._filter_kwargs(kwargs)
+ params = kwargs.copy()
+ params.pop('%s_id' % self.key)
+
+ return self._patch(
+ self.build_url(**kwargs),
+ {self.key: params},
+ self.key)
+
+ def delete(self, **kwargs):
+ kwargs = self._filter_kwargs(kwargs)
+
+ return self._delete(
+ self.build_url(**kwargs))
+
+ def find(self, base_url=None, **kwargs):
+ """Find a single item with attributes matching ``**kwargs``.
+
+ :param base_url: if provided, the generated URL will be appended to it
+ """
+ kwargs = self._filter_kwargs(kwargs)
+
+ rl = self._list(
+ '%(base_url)s%(query)s' % {
+ 'base_url': self.build_url(base_url=base_url, **kwargs),
+ 'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
+ },
+ self.collection_key)
+ num = len(rl)
+
+ if num == 0:
+ msg = _("No %(name)s matching %(args)s.") % {
+ 'name': self.resource_class.__name__,
+ 'args': kwargs
+ }
+ raise exceptions.NotFound(404, msg)
+ elif num > 1:
+ raise exceptions.NoUniqueMatch
+ else:
+ return rl[0]
+
+
+class Extension(HookableMixin):
+ """Extension descriptor."""
+
+ SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
+ manager_class = None
+
+ def __init__(self, name, module):
+ super(Extension, self).__init__()
+ self.name = name
+ self.module = module
+ self._parse_extension_module()
+
+ def _parse_extension_module(self):
+ self.manager_class = None
+ for attr_name, attr_value in self.module.__dict__.items():
+ if attr_name in self.SUPPORTED_HOOKS:
+ self.add_hook(attr_name, attr_value)
+ else:
+ try:
+ if issubclass(attr_value, BaseManager):
+ self.manager_class = attr_value
+ except TypeError:
+ pass
+
+ def __repr__(self):
+ return "<Extension '%s'>" % self.name
+
+
+class Resource(object):
+ """Base class for OpenStack resources (tenant, user, etc.).
+
+ This is pretty much just a bag for attributes.
+ """
+
+ HUMAN_ID = False
+ NAME_ATTR = 'name'
+
+ def __init__(self, manager, info, loaded=False):
+ """Populate and bind to a manager.
+
+ :param manager: BaseManager object
+ :param info: dictionary representing resource attributes
+ :param loaded: prevent lazy-loading if set to True
+ """
+ self.manager = manager
+ self._info = info
+ self._add_details(info)
+ self._loaded = loaded
+
+ def __repr__(self):
+ reprkeys = sorted(k
+ for k in self.__dict__.keys()
+ if k[0] != '_' and k != 'manager')
+ info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
+ return "<%s %s>" % (self.__class__.__name__, info)
+
+ @property
+ def human_id(self):
+ """Human-readable ID which can be used for bash completion.
+ """
+ if self.HUMAN_ID:
+ name = getattr(self, self.NAME_ATTR, None)
+ if name is not None:
+ return strutils.to_slug(name)
+ return None
+
+ def _add_details(self, info):
+ for (k, v) in six.iteritems(info):
+ try:
+ setattr(self, k, v)
+ self._info[k] = v
+ except AttributeError:
+ # In this case we already defined the attribute on the class
+ pass
+
+ def __getattr__(self, k):
+ if k not in self.__dict__:
+ # NOTE(bcwaldon): disallow lazy-loading if already loaded once
+ if not self.is_loaded():
+ self.get()
+ return self.__getattr__(k)
+
+ raise AttributeError(k)
+ else:
+ return self.__dict__[k]
+
+ def get(self):
+ """Support for lazy loading details.
+
+ Some clients, such as novaclient have the option to lazy load the
+ details, details which can be loaded with this function.
+ """
+ # set_loaded() first ... so if we have to bail, we know we tried.
+ self.set_loaded(True)
+ if not hasattr(self.manager, 'get'):
+ return
+
+ new = self.manager.get(self.id)
+ if new:
+ self._add_details(new._info)
+ self._add_details(
+ {'x_request_id': self.manager.client.last_request_id})
+
+ def __eq__(self, other):
+ if not isinstance(other, Resource):
+ return NotImplemented
+ # two resources of different types are not equal
+ if not isinstance(other, self.__class__):
+ return False
+ if hasattr(self, 'id') and hasattr(other, 'id'):
+ return self.id == other.id
+ return self._info == other._info
+
+ def is_loaded(self):
+ return self._loaded
+
+ def set_loaded(self, val):
+ self._loaded = val
+
+ def to_dict(self):
+ return copy.deepcopy(self._info)
diff --git a/client/escalatorclient/openstack/common/apiclient/client.py b/client/escalatorclient/openstack/common/apiclient/client.py
new file mode 100644
index 0000000..d478989
--- /dev/null
+++ b/client/escalatorclient/openstack/common/apiclient/client.py
@@ -0,0 +1,388 @@
+# Copyright 2010 Jacob Kaplan-Moss
+# Copyright 2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# Copyright 2013 Alessio Ababilov
+# Copyright 2013 Grid Dynamics
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+OpenStack Client interface. Handles the REST calls and responses.
+"""
+
+# E0202: An attribute inherited from %s hide this method
+# pylint: disable=E0202
+
+import hashlib
+import logging
+import time
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from oslo_utils import encodeutils
+from oslo_utils import importutils
+import requests
+
+from escalatorclient.openstack.common._i18n import _
+from escalatorclient.openstack.common.apiclient import exceptions
+
+_logger = logging.getLogger(__name__)
+SENSITIVE_HEADERS = ('X-Auth-Token', 'X-Subject-Token',)
+
+
+class HTTPClient(object):
+ """This client handles sending HTTP requests to OpenStack servers.
+
+ Features:
+
+ - share authentication information between several clients to different
+ services (e.g., for compute and image clients);
+ - reissue authentication request for expired tokens;
+ - encode/decode JSON bodies;
+ - raise exceptions on HTTP errors;
+ - pluggable authentication;
+ - store authentication information in a keyring;
+ - store time spent for requests;
+ - register clients for particular services, so one can use
+ `http_client.identity` or `http_client.compute`;
+ - log requests and responses in a format that is easy to copy-and-paste
+ into terminal and send the same request with curl.
+ """
+
+ user_agent = "escalatorclient.openstack.common.apiclient"
+
+ def __init__(self,
+ auth_plugin,
+ region_name=None,
+ endpoint_type="publicURL",
+ original_ip=None,
+ verify=True,
+ cert=None,
+ timeout=None,
+ timings=False,
+ keyring_saver=None,
+ debug=False,
+ user_agent=None,
+ http=None):
+ self.auth_plugin = auth_plugin
+
+ self.endpoint_type = endpoint_type
+ self.region_name = region_name
+
+ self.original_ip = original_ip
+ self.timeout = timeout
+ self.verify = verify
+ self.cert = cert
+
+ self.keyring_saver = keyring_saver
+ self.debug = debug
+ self.user_agent = user_agent or self.user_agent
+
+ self.times = [] # [("item", starttime, endtime), ...]
+ self.timings = timings
+
+ # requests within the same session can reuse TCP connections from pool
+ self.http = http or requests.Session()
+
+ self.cached_token = None
+ self.last_request_id = None
+
+ def _safe_header(self, name, value):
+ if name in SENSITIVE_HEADERS:
+ # because in python3 byte string handling is ... ug
+ v = value.encode('utf-8')
+ h = hashlib.sha1(v)
+ d = h.hexdigest()
+ return encodeutils.safe_decode(name), "{SHA1}%s" % d
+ else:
+ return (encodeutils.safe_decode(name),
+ encodeutils.safe_decode(value))
+
+ def _http_log_req(self, method, url, kwargs):
+ if not self.debug:
+ return
+
+ string_parts = [
+ "curl -g -i",
+ "-X '%s'" % method,
+ "'%s'" % url,
+ ]
+
+ for element in kwargs['headers']:
+ header = ("-H '%s: %s'" %
+ self._safe_header(element, kwargs['headers'][element]))
+ string_parts.append(header)
+
+ _logger.debug("REQ: %s" % " ".join(string_parts))
+ if 'data' in kwargs:
+ _logger.debug("REQ BODY: %s\n" % (kwargs['data']))
+
+ def _http_log_resp(self, resp):
+ if not self.debug:
+ return
+ _logger.debug(
+ "RESP: [%s] %s\n",
+ resp.status_code,
+ resp.headers)
+ if resp._content_consumed:
+ _logger.debug(
+ "RESP BODY: %s\n",
+ resp.text)
+
+ def serialize(self, kwargs):
+ if kwargs.get('json') is not None:
+ kwargs['headers']['Content-Type'] = 'application/json'
+ kwargs['data'] = json.dumps(kwargs['json'])
+ try:
+ del kwargs['json']
+ except KeyError:
+ pass
+
+ def get_timings(self):
+ return self.times
+
+ def reset_timings(self):
+ self.times = []
+
+ def request(self, method, url, **kwargs):
+ """Send an http request with the specified characteristics.
+
+ Wrapper around `requests.Session.request` to handle tasks such as
+ setting headers, JSON encoding/decoding, and error handling.
+
+ :param method: method of HTTP request
+ :param url: URL of HTTP request
+ :param kwargs: any other parameter that can be passed to
+ requests.Session.request (such as `headers`) or `json`
+ that will be encoded as JSON and used as `data` argument
+ """
+ kwargs.setdefault("headers", {})
+ kwargs["headers"]["User-Agent"] = self.user_agent
+ if self.original_ip:
+ kwargs["headers"]["Forwarded"] = "for=%s;by=%s" % (
+ self.original_ip, self.user_agent)
+ if self.timeout is not None:
+ kwargs.setdefault("timeout", self.timeout)
+ kwargs.setdefault("verify", self.verify)
+ if self.cert is not None:
+ kwargs.setdefault("cert", self.cert)
+ self.serialize(kwargs)
+
+ self._http_log_req(method, url, kwargs)
+ if self.timings:
+ start_time = time.time()
+ resp = self.http.request(method, url, **kwargs)
+ if self.timings:
+ self.times.append(("%s %s" % (method, url),
+ start_time, time.time()))
+ self._http_log_resp(resp)
+
+ self.last_request_id = resp.headers.get('x-openstack-request-id')
+
+ if resp.status_code >= 400:
+ _logger.debug(
+ "Request returned failure status: %s",
+ resp.status_code)
+ raise exceptions.from_response(resp, method, url)
+
+ return resp
+
+ @staticmethod
+ def concat_url(endpoint, url):
+ """Concatenate endpoint and final URL.
+
+ E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to
+ "http://keystone/v2.0/tokens".
+
+ :param endpoint: the base URL
+ :param url: the final URL
+ """
+ return "%s/%s" % (endpoint.rstrip("/"), url.strip("/"))
+
+ def client_request(self, client, method, url, **kwargs):
+ """Send an http request using `client`'s endpoint and specified `url`.
+
+ If request was rejected as unauthorized (possibly because the token is
+ expired), issue one authorization attempt and send the request once
+ again.
+
+ :param client: instance of BaseClient descendant
+ :param method: method of HTTP request
+ :param url: URL of HTTP request
+ :param kwargs: any other parameter that can be passed to
+ `HTTPClient.request`
+ """
+
+ filter_args = {
+ "endpoint_type": client.endpoint_type or self.endpoint_type,
+ "service_type": client.service_type,
+ }
+ token, endpoint = (self.cached_token, client.cached_endpoint)
+ just_authenticated = False
+ if not (token and endpoint):
+ try:
+ token, endpoint = self.auth_plugin.token_and_endpoint(
+ **filter_args)
+ except exceptions.EndpointException:
+ pass
+ if not (token and endpoint):
+ self.authenticate()
+ just_authenticated = True
+ token, endpoint = self.auth_plugin.token_and_endpoint(
+ **filter_args)
+ if not (token and endpoint):
+ raise exceptions.AuthorizationFailure(
+ _("Cannot find endpoint or token for request"))
+
+ old_token_endpoint = (token, endpoint)
+ kwargs.setdefault("headers", {})["X-Auth-Token"] = token
+ self.cached_token = token
+ client.cached_endpoint = endpoint
+ # Perform the request once. If we get Unauthorized, then it
+ # might be because the auth token expired, so try to
+ # re-authenticate and try again. If it still fails, bail.
+ try:
+ return self.request(
+ method, self.concat_url(endpoint, url), **kwargs)
+ except exceptions.Unauthorized as unauth_ex:
+ if just_authenticated:
+ raise
+ self.cached_token = None
+ client.cached_endpoint = None
+ if self.auth_plugin.opts.get('token'):
+ self.auth_plugin.opts['token'] = None
+ if self.auth_plugin.opts.get('endpoint'):
+ self.auth_plugin.opts['endpoint'] = None
+ self.authenticate()
+ try:
+ token, endpoint = self.auth_plugin.token_and_endpoint(
+ **filter_args)
+ except exceptions.EndpointException:
+ raise unauth_ex
+ if (not (token and endpoint) or
+ old_token_endpoint == (token, endpoint)):
+ raise unauth_ex
+ self.cached_token = token
+ client.cached_endpoint = endpoint
+ kwargs["headers"]["X-Auth-Token"] = token
+ return self.request(
+ method, self.concat_url(endpoint, url), **kwargs)
+
+ def add_client(self, base_client_instance):
+ """Add a new instance of :class:`BaseClient` descendant.
+
+ `self` will store a reference to `base_client_instance`.
+
+ Example:
+
+ >>> def test_clients():
+ ... from keystoneclient.auth import keystone
+ ... from openstack.common.apiclient import client
+ ... auth = keystone.KeystoneAuthPlugin(
+ ... username="user", password="pass", tenant_name="tenant",
+ ... auth_url="http://auth:5000/v2.0")
+ ... openstack_client = client.HTTPClient(auth)
+ ... # create nova client
+ ... from novaclient.v1_1 import client
+ ... client.Client(openstack_client)
+ ... # create keystone client
+ ... from keystoneclient.v2_0 import client
+ ... client.Client(openstack_client)
+ ... # use them
+ ... openstack_client.identity.tenants.list()
+ ... openstack_client.compute.servers.list()
+ """
+ service_type = base_client_instance.service_type
+ if service_type and not hasattr(self, service_type):
+ setattr(self, service_type, base_client_instance)
+
+ def authenticate(self):
+ self.auth_plugin.authenticate(self)
+ # Store the authentication results in the keyring for later requests
+ if self.keyring_saver:
+ self.keyring_saver.save(self)
+
+
+class BaseClient(object):
+ """Top-level object to access the OpenStack API.
+
+ This client uses :class:`HTTPClient` to send requests. :class:`HTTPClient`
+ will handle a bunch of issues such as authentication.
+ """
+
+ service_type = None
+ endpoint_type = None # "publicURL" will be used
+ cached_endpoint = None
+
+ def __init__(self, http_client, extensions=None):
+ self.http_client = http_client
+ http_client.add_client(self)
+
+ # Add in any extensions...
+ if extensions:
+ for extension in extensions:
+ if extension.manager_class:
+ setattr(self, extension.name,
+ extension.manager_class(self))
+
+ def client_request(self, method, url, **kwargs):
+ return self.http_client.client_request(
+ self, method, url, **kwargs)
+
+ @property
+ def last_request_id(self):
+ return self.http_client.last_request_id
+
+ def head(self, url, **kwargs):
+ return self.client_request("HEAD", url, **kwargs)
+
+ def get(self, url, **kwargs):
+ return self.client_request("GET", url, **kwargs)
+
+ def post(self, url, **kwargs):
+ return self.client_request("POST", url, **kwargs)
+
+ def put(self, url, **kwargs):
+ return self.client_request("PUT", url, **kwargs)
+
+ def delete(self, url, **kwargs):
+ return self.client_request("DELETE", url, **kwargs)
+
+ def patch(self, url, **kwargs):
+ return self.client_request("PATCH", url, **kwargs)
+
+ @staticmethod
+ def get_class(api_name, version, version_map):
+ """Returns the client class for the requested API version
+
+ :param api_name: the name of the API, e.g. 'compute', 'image', etc
+ :param version: the requested API version
+ :param version_map: a dict of client classes keyed by version
+ :rtype: a client class for the requested API version
+ """
+ try:
+ client_path = version_map[str(version)]
+ except (KeyError, ValueError):
+ msg = _("Invalid %(api_name)s client version '%(version)s'. "
+ "Must be one of: %(version_map)s") % {
+ 'api_name': api_name,
+ 'version': version,
+ 'version_map': ', '.join(version_map.keys())}
+ raise exceptions.UnsupportedVersion(msg)
+
+ return importutils.import_class(client_path)
diff --git a/client/escalatorclient/openstack/common/apiclient/exceptions.py b/client/escalatorclient/openstack/common/apiclient/exceptions.py
new file mode 100644
index 0000000..bcda21d
--- /dev/null
+++ b/client/escalatorclient/openstack/common/apiclient/exceptions.py
@@ -0,0 +1,479 @@
+# Copyright 2010 Jacob Kaplan-Moss
+# Copyright 2011 Nebula, Inc.
+# Copyright 2013 Alessio Ababilov
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception definitions.
+"""
+
+########################################################################
+#
+# THIS MODULE IS DEPRECATED
+#
+# Please refer to
+# https://etherpad.openstack.org/p/kilo-escalatorclient-library-proposals for
+# the discussion leading to this deprecation.
+#
+# We recommend checking out the python-openstacksdk project
+# (https://launchpad.net/python-openstacksdk) instead.
+#
+########################################################################
+
+import inspect
+import sys
+
+import six
+
+from escalatorclient.openstack.common._i18n import _
+
+
+class ClientException(Exception):
+ """The base exception class for all exceptions this library raises.
+ """
+ pass
+
+
+class ValidationError(ClientException):
+ """Error in validation on API client side."""
+ pass
+
+
+class UnsupportedVersion(ClientException):
+ """User is trying to use an unsupported version of the API."""
+ pass
+
+
+class CommandError(ClientException):
+ """Error in CLI tool."""
+ pass
+
+
+class AuthorizationFailure(ClientException):
+ """Cannot authorize API client."""
+ pass
+
+
+class ConnectionError(ClientException):
+ """Cannot connect to API service."""
+ pass
+
+
+class ConnectionRefused(ConnectionError):
+ """Connection refused while trying to connect to API service."""
+ pass
+
+
+class AuthPluginOptionsMissing(AuthorizationFailure):
+ """Auth plugin misses some options."""
+ def __init__(self, opt_names):
+ super(AuthPluginOptionsMissing, self).__init__(
+ _("Authentication failed. Missing options: %s") %
+ ", ".join(opt_names))
+ self.opt_names = opt_names
+
+
+class AuthSystemNotFound(AuthorizationFailure):
+ """User has specified an AuthSystem that is not installed."""
+ def __init__(self, auth_system):
+ super(AuthSystemNotFound, self).__init__(
+ _("AuthSystemNotFound: %r") % auth_system)
+ self.auth_system = auth_system
+
+
+class NoUniqueMatch(ClientException):
+ """Multiple entities found instead of one."""
+ pass
+
+
+class EndpointException(ClientException):
+ """Something is rotten in Service Catalog."""
+ pass
+
+
+class EndpointNotFound(EndpointException):
+ """Could not find requested endpoint in Service Catalog."""
+ pass
+
+
+class AmbiguousEndpoints(EndpointException):
+ """Found more than one matching endpoint in Service Catalog."""
+ def __init__(self, endpoints=None):
+ super(AmbiguousEndpoints, self).__init__(
+ _("AmbiguousEndpoints: %r") % endpoints)
+ self.endpoints = endpoints
+
+
+class HttpError(ClientException):
+ """The base exception class for all HTTP exceptions.
+ """
+ http_status = 0
+ message = _("HTTP Error")
+
+ def __init__(self, message=None, details=None,
+ response=None, request_id=None,
+ url=None, method=None, http_status=None):
+ self.http_status = http_status or self.http_status
+ self.message = message or self.message
+ self.details = details
+ self.request_id = request_id
+ self.response = response
+ self.url = url
+ self.method = method
+ formatted_string = "%s (HTTP %s)" % (self.message, self.http_status)
+ if request_id:
+ formatted_string += " (Request-ID: %s)" % request_id
+ super(HttpError, self).__init__(formatted_string)
+
+
+class HTTPRedirection(HttpError):
+ """HTTP Redirection."""
+ message = _("HTTP Redirection")
+
+
+class HTTPClientError(HttpError):
+ """Client-side HTTP error.
+
+ Exception for cases in which the client seems to have erred.
+ """
+ message = _("HTTP Client Error")
+
+
+class HttpServerError(HttpError):
+ """Server-side HTTP error.
+
+ Exception for cases in which the server is aware that it has
+ erred or is incapable of performing the request.
+ """
+ message = _("HTTP Server Error")
+
+
+class MultipleChoices(HTTPRedirection):
+ """HTTP 300 - Multiple Choices.
+
+ Indicates multiple options for the resource that the client may follow.
+ """
+
+ http_status = 300
+ message = _("Multiple Choices")
+
+
+class BadRequest(HTTPClientError):
+ """HTTP 400 - Bad Request.
+
+ The request cannot be fulfilled due to bad syntax.
+ """
+ http_status = 400
+ message = _("Bad Request")
+
+
+class Unauthorized(HTTPClientError):
+ """HTTP 401 - Unauthorized.
+
+ Similar to 403 Forbidden, but specifically for use when authentication
+ is required and has failed or has not yet been provided.
+ """
+ http_status = 401
+ message = _("Unauthorized")
+
+
+class PaymentRequired(HTTPClientError):
+ """HTTP 402 - Payment Required.
+
+ Reserved for future use.
+ """
+ http_status = 402
+ message = _("Payment Required")
+
+
+class Forbidden(HTTPClientError):
+ """HTTP 403 - Forbidden.
+
+ The request was a valid request, but the server is refusing to respond
+ to it.
+ """
+ http_status = 403
+ message = _("Forbidden")
+
+
+class NotFound(HTTPClientError):
+ """HTTP 404 - Not Found.
+
+ The requested resource could not be found but may be available again
+ in the future.
+ """
+ http_status = 404
+ message = _("Not Found")
+
+
+class MethodNotAllowed(HTTPClientError):
+ """HTTP 405 - Method Not Allowed.
+
+ A request was made of a resource using a request method not supported
+ by that resource.
+ """
+ http_status = 405
+ message = _("Method Not Allowed")
+
+
+class NotAcceptable(HTTPClientError):
+ """HTTP 406 - Not Acceptable.
+
+ The requested resource is only capable of generating content not
+ acceptable according to the Accept headers sent in the request.
+ """
+ http_status = 406
+ message = _("Not Acceptable")
+
+
+class ProxyAuthenticationRequired(HTTPClientError):
+ """HTTP 407 - Proxy Authentication Required.
+
+ The client must first authenticate itself with the proxy.
+ """
+ http_status = 407
+ message = _("Proxy Authentication Required")
+
+
+class RequestTimeout(HTTPClientError):
+ """HTTP 408 - Request Timeout.
+
+ The server timed out waiting for the request.
+ """
+ http_status = 408
+ message = _("Request Timeout")
+
+
+class Conflict(HTTPClientError):
+ """HTTP 409 - Conflict.
+
+ Indicates that the request could not be processed because of conflict
+ in the request, such as an edit conflict.
+ """
+ http_status = 409
+ message = _("Conflict")
+
+
+class Gone(HTTPClientError):
+ """HTTP 410 - Gone.
+
+ Indicates that the resource requested is no longer available and will
+ not be available again.
+ """
+ http_status = 410
+ message = _("Gone")
+
+
+class LengthRequired(HTTPClientError):
+ """HTTP 411 - Length Required.
+
+ The request did not specify the length of its content, which is
+ required by the requested resource.
+ """
+ http_status = 411
+ message = _("Length Required")
+
+
+class PreconditionFailed(HTTPClientError):
+ """HTTP 412 - Precondition Failed.
+
+ The server does not meet one of the preconditions that the requester
+ put on the request.
+ """
+ http_status = 412
+ message = _("Precondition Failed")
+
+
+class RequestEntityTooLarge(HTTPClientError):
+ """HTTP 413 - Request Entity Too Large.
+
+ The request is larger than the server is willing or able to process.
+ """
+ http_status = 413
+ message = _("Request Entity Too Large")
+
+ def __init__(self, *args, **kwargs):
+ try:
+ self.retry_after = int(kwargs.pop('retry_after'))
+ except (KeyError, ValueError):
+ self.retry_after = 0
+
+ super(RequestEntityTooLarge, self).__init__(*args, **kwargs)
+
+
+class RequestUriTooLong(HTTPClientError):
+ """HTTP 414 - Request-URI Too Long.
+
+ The URI provided was too long for the server to process.
+ """
+ http_status = 414
+ message = _("Request-URI Too Long")
+
+
+class UnsupportedMediaType(HTTPClientError):
+ """HTTP 415 - Unsupported Media Type.
+
+ The request entity has a media type which the server or resource does
+ not support.
+ """
+ http_status = 415
+ message = _("Unsupported Media Type")
+
+
+class RequestedRangeNotSatisfiable(HTTPClientError):
+ """HTTP 416 - Requested Range Not Satisfiable.
+
+ The client has asked for a portion of the file, but the server cannot
+ supply that portion.
+ """
+ http_status = 416
+ message = _("Requested Range Not Satisfiable")
+
+
+class ExpectationFailed(HTTPClientError):
+ """HTTP 417 - Expectation Failed.
+
+ The server cannot meet the requirements of the Expect request-header field.
+ """
+ http_status = 417
+ message = _("Expectation Failed")
+
+
+class UnprocessableEntity(HTTPClientError):
+ """HTTP 422 - Unprocessable Entity.
+
+ The request was well-formed but was unable to be followed due to semantic
+ errors.
+ """
+ http_status = 422
+ message = _("Unprocessable Entity")
+
+
+class InternalServerError(HttpServerError):
+ """HTTP 500 - Internal Server Error.
+
+ A generic error message, given when no more specific message is suitable.
+ """
+ http_status = 500
+ message = _("Internal Server Error")
+
+
+# NotImplemented is a python keyword.
+class HttpNotImplemented(HttpServerError):
+ """HTTP 501 - Not Implemented.
+
+ The server either does not recognize the request method, or it lacks
+ the ability to fulfill the request.
+ """
+ http_status = 501
+ message = _("Not Implemented")
+
+
+class BadGateway(HttpServerError):
+ """HTTP 502 - Bad Gateway.
+
+ The server was acting as a gateway or proxy and received an invalid
+ response from the upstream server.
+ """
+ http_status = 502
+ message = _("Bad Gateway")
+
+
+class ServiceUnavailable(HttpServerError):
+ """HTTP 503 - Service Unavailable.
+
+ The server is currently unavailable.
+ """
+ http_status = 503
+ message = _("Service Unavailable")
+
+
+class GatewayTimeout(HttpServerError):
+ """HTTP 504 - Gateway Timeout.
+
+ The server was acting as a gateway or proxy and did not receive a timely
+ response from the upstream server.
+ """
+ http_status = 504
+ message = _("Gateway Timeout")
+
+
+class HttpVersionNotSupported(HttpServerError):
+ """HTTP 505 - HttpVersion Not Supported.
+
+ The server does not support the HTTP protocol version used in the request.
+ """
+ http_status = 505
+ message = _("HTTP Version Not Supported")
+
+
+# _code_map contains all the classes that have http_status attribute.
+_code_map = dict(
+ (getattr(obj, 'http_status', None), obj)
+ for name, obj in six.iteritems(vars(sys.modules[__name__]))
+ if inspect.isclass(obj) and getattr(obj, 'http_status', False)
+)
+
+
+def from_response(response, method, url):
+ """Returns an instance of :class:`HttpError` or subclass based on response.
+
+ :param response: instance of `requests.Response` class
+ :param method: HTTP method used for request
+ :param url: URL used for request
+ """
+
+ req_id = response.headers.get("x-openstack-request-id")
+ # NOTE(hdd) true for older versions of nova and cinder
+ if not req_id:
+ req_id = response.headers.get("x-compute-request-id")
+ kwargs = {
+ "http_status": response.status_code,
+ "response": response,
+ "method": method,
+ "url": url,
+ "request_id": req_id,
+ }
+ if "retry-after" in response.headers:
+ kwargs["retry_after"] = response.headers["retry-after"]
+
+ content_type = response.headers.get("Content-Type", "")
+ if content_type.startswith("application/json"):
+ try:
+ body = response.json()
+ except ValueError:
+ pass
+ else:
+ if isinstance(body, dict):
+ error = body.get(list(body)[0])
+ if isinstance(error, dict):
+ kwargs["message"] = (error.get("message") or
+ error.get("faultstring"))
+ kwargs["details"] = (error.get("details") or
+ six.text_type(body))
+ elif content_type.startswith("text/"):
+ kwargs["details"] = response.text
+
+ try:
+ cls = _code_map[response.status_code]
+ except KeyError:
+ if 500 <= response.status_code < 600:
+ cls = HttpServerError
+ elif 400 <= response.status_code < 500:
+ cls = HTTPClientError
+ else:
+ cls = HttpError
+ return cls(**kwargs)
diff --git a/client/escalatorclient/openstack/common/apiclient/utils.py b/client/escalatorclient/openstack/common/apiclient/utils.py
new file mode 100644
index 0000000..c0f612a
--- /dev/null
+++ b/client/escalatorclient/openstack/common/apiclient/utils.py
@@ -0,0 +1,100 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+########################################################################
+#
+# THIS MODULE IS DEPRECATED
+#
+# Please refer to
+# https://etherpad.openstack.org/p/kilo-escalatorclient-library-proposals for
+# the discussion leading to this deprecation.
+#
+# We recommend checking out the python-openstacksdk project
+# (https://launchpad.net/python-openstacksdk) instead.
+#
+########################################################################
+
+from oslo_utils import encodeutils
+from oslo_utils import uuidutils
+import six
+
+from escalatorclient.openstack.common._i18n import _
+from escalatorclient.openstack.common.apiclient import exceptions
+
+
+def find_resource(manager, name_or_id, **find_args):
+ """Look for resource in a given manager.
+
+ Used as a helper for the _find_* methods.
+ Example:
+
+ .. code-block:: python
+
+ def _find_hypervisor(cs, hypervisor):
+ #Get a hypervisor by name or ID.
+ return cliutils.find_resource(cs.hypervisors, hypervisor)
+ """
+ # first try to get entity as integer id
+ try:
+ return manager.get(int(name_or_id))
+ except (TypeError, ValueError, exceptions.NotFound):
+ pass
+
+ # now try to get entity as uuid
+ try:
+ if six.PY2:
+ tmp_id = encodeutils.safe_encode(name_or_id)
+ else:
+ tmp_id = encodeutils.safe_decode(name_or_id)
+
+ if uuidutils.is_uuid_like(tmp_id):
+ return manager.get(tmp_id)
+ except (TypeError, ValueError, exceptions.NotFound):
+ pass
+
+ # for str id which is not uuid
+ if getattr(manager, 'is_alphanum_id_allowed', False):
+ try:
+ return manager.get(name_or_id)
+ except exceptions.NotFound:
+ pass
+
+ try:
+ try:
+ return manager.find(human_id=name_or_id, **find_args)
+ except exceptions.NotFound:
+ pass
+
+ # finally try to find entity by name
+ try:
+ resource = getattr(manager, 'resource_class', None)
+ name_attr = resource.NAME_ATTR if resource else 'name'
+ kwargs = {name_attr: name_or_id}
+ kwargs.update(find_args)
+ return manager.find(**kwargs)
+ except exceptions.NotFound:
+ msg = _("No %(name)s with a name or "
+ "ID of '%(name_or_id)s' exists.") % \
+ {
+ "name": manager.resource_class.__name__.lower(),
+ "name_or_id": name_or_id
+ }
+ raise exceptions.CommandError(msg)
+ except exceptions.NoUniqueMatch:
+ msg = _("Multiple %(name)s matches found for "
+ "'%(name_or_id)s', use an ID to be more specific.") % \
+ {
+ "name": manager.resource_class.__name__.lower(),
+ "name_or_id": name_or_id
+ }
+ raise exceptions.CommandError(msg)
diff --git a/client/escalatorclient/shell.py b/client/escalatorclient/shell.py
new file mode 100644
index 0000000..782106c
--- /dev/null
+++ b/client/escalatorclient/shell.py
@@ -0,0 +1,714 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Command-line interface to the OpenStack Images API.
+"""
+
+from __future__ import print_function
+
+import argparse
+import copy
+import getpass
+import json
+import logging
+import os
+from os.path import expanduser
+import sys
+import traceback
+
+from oslo_utils import encodeutils
+from oslo_utils import importutils
+import six.moves.urllib.parse as urlparse
+
+import escalatorclient
+from escalatorclient import _i18n
+from escalatorclient.common import utils
+from escalatorclient import exc
+
+from keystoneclient.auth.identity import v2 as v2_auth
+from keystoneclient.auth.identity import v3 as v3_auth
+from keystoneclient import discover
+from keystoneclient.openstack.common.apiclient import exceptions as ks_exc
+from keystoneclient import session
+
+osprofiler_profiler = importutils.try_import("osprofiler.profiler")
+_ = _i18n._
+
+
+class escalatorShell(object):
+
+ def _append_global_identity_args(self, parser):
+ # FIXME(bobt): these are global identity (Keystone) arguments which
+ # should be consistent and shared by all service clients. Therefore,
+ # they should be provided by python-keystoneclient. We will need to
+ # refactor this code once this functionality is avaible in
+ # python-keystoneclient. See
+ #
+ # https://bugs.launchpad.net/python-keystoneclient/+bug/1332337
+ #
+ parser.add_argument('-k', '--insecure',
+ default=False,
+ action='store_true',
+ help='Explicitly allow escalatorclient to perform '
+ '\"insecure SSL\" (https) requests. The server\'s '
+ 'certificate will not be verified against any '
+ 'certificate authorities. This option should '
+ 'be used with caution.')
+
+ parser.add_argument('--os-cert',
+ help='Path of certificate file to use in SSL '
+ 'connection. This file can optionally be '
+ 'prepended with the private key.')
+
+ parser.add_argument('--cert-file',
+ dest='os_cert',
+ help='DEPRECATED! Use --os-cert.')
+
+ parser.add_argument('--os-key',
+ help='Path of client key to use in SSL '
+ 'connection. This option is not necessary '
+ 'if your key is prepended to your cert file.')
+
+ parser.add_argument('--key-file',
+ dest='os_key',
+ help='DEPRECATED! Use --os-key.')
+
+ parser.add_argument('--os-cacert',
+ metavar='<ca-certificate-file>',
+ dest='os_cacert',
+ default=utils.env('OS_CACERT'),
+ help='Path of CA TLS certificate(s) used to '
+ 'verify the remote server\'s certificate. '
+ 'Without this option escalator looks for the '
+ 'default system CA certificates.')
+
+ parser.add_argument('--ca-file',
+ dest='os_cacert',
+ help='DEPRECATED! Use --os-cacert.')
+
+ parser.add_argument('--os-username',
+ default=utils.env('OS_USERNAME'),
+ help='Defaults to env[OS_USERNAME].')
+
+ parser.add_argument('--os_username',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-user-id',
+ default=utils.env('OS_USER_ID'),
+ help='Defaults to env[OS_USER_ID].')
+
+ parser.add_argument('--os-user-domain-id',
+ default=utils.env('OS_USER_DOMAIN_ID'),
+ help='Defaults to env[OS_USER_DOMAIN_ID].')
+
+ parser.add_argument('--os-user-domain-name',
+ default=utils.env('OS_USER_DOMAIN_NAME'),
+ help='Defaults to env[OS_USER_DOMAIN_NAME].')
+
+ parser.add_argument('--os-project-id',
+ default=utils.env('OS_PROJECT_ID'),
+ help='Another way to specify tenant ID. '
+ 'This option is mutually exclusive with '
+ ' --os-tenant-id. '
+ 'Defaults to env[OS_PROJECT_ID].')
+
+ parser.add_argument('--os-project-name',
+ default=utils.env('OS_PROJECT_NAME'),
+ help='Another way to specify tenant name. '
+ 'This option is mutually exclusive with '
+ ' --os-tenant-name. '
+ 'Defaults to env[OS_PROJECT_NAME].')
+
+ parser.add_argument('--os-project-domain-id',
+ default=utils.env('OS_PROJECT_DOMAIN_ID'),
+ help='Defaults to env[OS_PROJECT_DOMAIN_ID].')
+
+ parser.add_argument('--os-project-domain-name',
+ default=utils.env('OS_PROJECT_DOMAIN_NAME'),
+ help='Defaults to env[OS_PROJECT_DOMAIN_NAME].')
+
+ parser.add_argument('--os-password',
+ default=utils.env('OS_PASSWORD'),
+ help='Defaults to env[OS_PASSWORD].')
+
+ parser.add_argument('--os_password',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-tenant-id',
+ default=utils.env('OS_TENANT_ID'),
+ help='Defaults to env[OS_TENANT_ID].')
+
+ parser.add_argument('--os_tenant_id',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-tenant-name',
+ default=utils.env('OS_TENANT_NAME'),
+ help='Defaults to env[OS_TENANT_NAME].')
+
+ parser.add_argument('--os_tenant_name',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-auth-url',
+ default=utils.env('OS_AUTH_URL'),
+ help='Defaults to env[OS_AUTH_URL].')
+
+ parser.add_argument('--os_auth_url',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-region-name',
+ default=utils.env('OS_REGION_NAME'),
+ help='Defaults to env[OS_REGION_NAME].')
+
+ parser.add_argument('--os_region_name',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-auth-token',
+ default=utils.env('OS_AUTH_TOKEN'),
+ help='Defaults to env[OS_AUTH_TOKEN].')
+
+ parser.add_argument('--os_auth_token',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-service-type',
+ default=utils.env('OS_SERVICE_TYPE'),
+ help='Defaults to env[OS_SERVICE_TYPE].')
+
+ parser.add_argument('--os_service_type',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-endpoint-type',
+ default=utils.env('OS_ENDPOINT_TYPE'),
+ help='Defaults to env[OS_ENDPOINT_TYPE].')
+
+ parser.add_argument('--os_endpoint_type',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-endpoint',
+ default=utils.env('OS_ENDPOINT'),
+ help='Defaults to env[OS_ENDPOINT].')
+
+ parser.add_argument('--os_endpoint',
+ help=argparse.SUPPRESS)
+
+ def get_base_parser(self):
+ parser = argparse.ArgumentParser(
+ prog='escalator',
+ description=__doc__.strip(),
+ epilog='See "escalator help COMMAND" '
+ 'for help on a specific command.',
+ add_help=False,
+ formatter_class=HelpFormatter,
+ )
+
+ # Global arguments
+ parser.add_argument('-h', '--help',
+ action='store_true',
+ help=argparse.SUPPRESS,
+ )
+
+ parser.add_argument('-d', '--debug',
+ default=bool(utils.env('ESCALATORCLIENT_DEBUG')),
+ action='store_true',
+ help='Defaults to env[ESCALATORCLIENT_DEBUG].')
+
+ parser.add_argument('-v', '--verbose',
+ default=False, action="store_true",
+ help="Print more verbose output")
+
+ parser.add_argument('--get-schema',
+ default=False, action="store_true",
+ dest='get_schema',
+ help='Ignores cached copy and forces retrieval '
+ 'of schema that generates portions of the '
+ 'help text. Ignored with API version 1.')
+
+ parser.add_argument('--timeout',
+ default=600,
+ help='Number of seconds to wait for a response')
+
+ parser.add_argument('--no-ssl-compression',
+ dest='ssl_compression',
+ default=True, action='store_false',
+ help='Disable SSL compression when using https.')
+
+ parser.add_argument('-f', '--force',
+ dest='force',
+ default=False, action='store_true',
+ help='Prevent select actions from requesting '
+ 'user confirmation.')
+
+ parser.add_argument('--os-image-url',
+ default=utils.env('OS_IMAGE_URL'),
+ help=('Defaults to env[OS_IMAGE_URL]. '
+ 'If the provided image url contains '
+ 'a version number and '
+ '`--os-image-api-version` is omitted '
+ 'the version of the URL will be picked as '
+ 'the image api version to use.'))
+
+ parser.add_argument('--os_image_url',
+ help=argparse.SUPPRESS)
+
+ parser.add_argument('--os-image-api-version',
+ default=utils.env('OS_IMAGE_API_VERSION',
+ default=None),
+ help='Defaults to env[OS_IMAGE_API_VERSION] or 1.')
+
+ parser.add_argument('--os_image_api_version',
+ help=argparse.SUPPRESS)
+
+ if osprofiler_profiler:
+ parser.add_argument('--profile',
+ metavar='HMAC_KEY',
+ help='HMAC key to use for encrypting context '
+ 'data for performance profiling of operation. '
+ 'This key should be the value of HMAC key '
+ 'configured in osprofiler middleware in '
+ 'escalator, it is specified in paste '
+ 'configuration file at '
+ '/etc/escalator/api-paste.ini and '
+ '/etc/escalator/registry-paste.ini. '
+ 'Without key '
+ 'the profiling will not be triggered even '
+ 'if osprofiler is enabled on server side.')
+
+ # FIXME(bobt): this method should come from python-keystoneclient
+ self._append_global_identity_args(parser)
+
+ return parser
+
+ def get_subcommand_parser(self, version):
+ parser = self.get_base_parser()
+
+ self.subcommands = {}
+ subparsers = parser.add_subparsers(metavar='<subcommand>')
+ try:
+ submodule = utils.import_versioned_module(version, 'shell')
+ except ImportError:
+ print('"%s" is not a supported API version. Example '
+ 'values are "1" or "2".' % version)
+ utils.exit()
+
+ self._find_actions(subparsers, submodule)
+ self._find_actions(subparsers, self)
+
+ self._add_bash_completion_subparser(subparsers)
+
+ return parser
+
+ def _find_actions(self, subparsers, actions_module):
+ for attr in (a for a in dir(actions_module) if a.startswith('do_')):
+ # I prefer to be hypen-separated instead of underscores.
+ command = attr[3:].replace('_', '-')
+ callback = getattr(actions_module, attr)
+ desc = callback.__doc__ or ''
+ help = desc.strip().split('\n')[0]
+ arguments = getattr(callback, 'arguments', [])
+
+ subparser = subparsers.add_parser(command,
+ help=help,
+ description=desc,
+ add_help=False,
+ formatter_class=HelpFormatter
+ )
+ subparser.add_argument('-h', '--help',
+ action='help',
+ help=argparse.SUPPRESS,
+ )
+ self.subcommands[command] = subparser
+ for (args, kwargs) in arguments:
+ subparser.add_argument(*args, **kwargs)
+ subparser.set_defaults(func=callback)
+
+ def _add_bash_completion_subparser(self, subparsers):
+ subparser = subparsers.add_parser('bash_completion',
+ add_help=False,
+ formatter_class=HelpFormatter)
+ self.subcommands['bash_completion'] = subparser
+ subparser.set_defaults(func=self.do_bash_completion)
+
+ def _get_image_url(self, args):
+ """Translate the available url-related options into a single string.
+
+ Return the endpoint that should be used to talk to escalator if a
+ clear decision can be made. Otherwise, return None.
+ """
+ if args.os_image_url:
+ return args.os_image_url
+ else:
+ return None
+
+ def _discover_auth_versions(self, session, auth_url):
+ # discover the API versions the server is supporting base on the
+ # given URL
+ v2_auth_url = None
+ v3_auth_url = None
+ try:
+ ks_discover = discover.Discover(session=session, auth_url=auth_url)
+ v2_auth_url = ks_discover.url_for('2.0')
+ v3_auth_url = ks_discover.url_for('3.0')
+ except ks_exc.ClientException as e:
+ # Identity service may not support discover API version.
+ # Lets trying to figure out the API version from the original URL.
+ url_parts = urlparse.urlparse(auth_url)
+ (scheme, netloc, path, params, query, fragment) = url_parts
+ path = path.lower()
+ if path.startswith('/v3'):
+ v3_auth_url = auth_url
+ elif path.startswith('/v2'):
+ v2_auth_url = auth_url
+ else:
+ # not enough information to determine the auth version
+ msg = ('Unable to determine the Keystone version '
+ 'to authenticate with using the given '
+ 'auth_url. Identity service may not support API '
+ 'version discovery. Please provide a versioned '
+ 'auth_url instead. error=%s') % (e)
+ raise exc.CommandError(msg)
+
+ return (v2_auth_url, v3_auth_url)
+
+ def _get_keystone_session(self, **kwargs):
+ ks_session = session.Session.construct(kwargs)
+
+ # discover the supported keystone versions using the given auth url
+ auth_url = kwargs.pop('auth_url', None)
+ (v2_auth_url, v3_auth_url) = self._discover_auth_versions(
+ session=ks_session,
+ auth_url=auth_url)
+
+ # Determine which authentication plugin to use. First inspect the
+ # auth_url to see the supported version. If both v3 and v2 are
+ # supported, then use the highest version if possible.
+ user_id = kwargs.pop('user_id', None)
+ username = kwargs.pop('username', None)
+ password = kwargs.pop('password', None)
+ user_domain_name = kwargs.pop('user_domain_name', None)
+ user_domain_id = kwargs.pop('user_domain_id', None)
+ # project and tenant can be used interchangeably
+ project_id = (kwargs.pop('project_id', None) or
+ kwargs.pop('tenant_id', None))
+ project_name = (kwargs.pop('project_name', None) or
+ kwargs.pop('tenant_name', None))
+ project_domain_id = kwargs.pop('project_domain_id', None)
+ project_domain_name = kwargs.pop('project_domain_name', None)
+ auth = None
+
+ use_domain = (user_domain_id or
+ user_domain_name or
+ project_domain_id or
+ project_domain_name)
+ use_v3 = v3_auth_url and (use_domain or (not v2_auth_url))
+ use_v2 = v2_auth_url and not use_domain
+
+ if use_v3:
+ auth = v3_auth.Password(
+ v3_auth_url,
+ user_id=user_id,
+ username=username,
+ password=password,
+ user_domain_id=user_domain_id,
+ user_domain_name=user_domain_name,
+ project_id=project_id,
+ project_name=project_name,
+ project_domain_id=project_domain_id,
+ project_domain_name=project_domain_name)
+ elif use_v2:
+ auth = v2_auth.Password(
+ v2_auth_url,
+ username,
+ password,
+ tenant_id=project_id,
+ tenant_name=project_name)
+ else:
+ # if we get here it means domain information is provided
+ # (caller meant to use Keystone V3) but the auth url is
+ # actually Keystone V2. Obviously we can't authenticate a V3
+ # user using V2.
+ exc.CommandError("Credential and auth_url mismatch. The given "
+ "auth_url is using Keystone V2 endpoint, which "
+ "may not able to handle Keystone V3 credentials. "
+ "Please provide a correct Keystone V3 auth_url.")
+
+ ks_session.auth = auth
+ return ks_session
+
+ def _get_endpoint_and_token(self, args, force_auth=False):
+ image_url = self._get_image_url(args)
+ auth_token = args.os_auth_token
+
+ auth_reqd = force_auth or\
+ (utils.is_authentication_required(args.func) and not
+ (auth_token and image_url))
+
+ if not auth_reqd:
+ endpoint = image_url
+ token = args.os_auth_token
+ else:
+
+ if not args.os_username:
+ raise exc.CommandError(
+ _("You must provide a username via"
+ " either --os-username or "
+ "env[OS_USERNAME]"))
+
+ if not args.os_password:
+ # No password, If we've got a tty, try prompting for it
+ if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty():
+ # Check for Ctl-D
+ try:
+ args.os_password = getpass.getpass('OS Password: ')
+ except EOFError:
+ pass
+ # No password because we didn't have a tty or the
+ # user Ctl-D when prompted.
+ if not args.os_password:
+ raise exc.CommandError(
+ _("You must provide a password via "
+ "either --os-password, "
+ "env[OS_PASSWORD], "
+ "or prompted response"))
+
+ # Validate password flow auth
+ project_info = (
+ args.os_tenant_name or args.os_tenant_id or (
+ args.os_project_name and (
+ args.os_project_domain_name or
+ args.os_project_domain_id
+ )
+ ) or args.os_project_id
+ )
+
+ if not project_info:
+ # tenant is deprecated in Keystone v3. Use the latest
+ # terminology instead.
+ raise exc.CommandError(
+ _("You must provide a project_id or project_name ("
+ "with project_domain_name or project_domain_id) "
+ "via "
+ " --os-project-id (env[OS_PROJECT_ID])"
+ " --os-project-name (env[OS_PROJECT_NAME]),"
+ " --os-project-domain-id "
+ "(env[OS_PROJECT_DOMAIN_ID])"
+ " --os-project-domain-name "
+ "(env[OS_PROJECT_DOMAIN_NAME])"))
+
+ if not args.os_auth_url:
+ raise exc.CommandError(
+ _("You must provide an auth url via"
+ " either --os-auth-url or "
+ "via env[OS_AUTH_URL]"))
+
+ kwargs = {
+ 'auth_url': args.os_auth_url,
+ 'username': args.os_username,
+ 'user_id': args.os_user_id,
+ 'user_domain_id': args.os_user_domain_id,
+ 'user_domain_name': args.os_user_domain_name,
+ 'password': args.os_password,
+ 'tenant_name': args.os_tenant_name,
+ 'tenant_id': args.os_tenant_id,
+ 'project_name': args.os_project_name,
+ 'project_id': args.os_project_id,
+ 'project_domain_name': args.os_project_domain_name,
+ 'project_domain_id': args.os_project_domain_id,
+ 'insecure': args.insecure,
+ 'cacert': args.os_cacert,
+ 'cert': args.os_cert,
+ 'key': args.os_key
+ }
+ ks_session = self._get_keystone_session(**kwargs)
+ token = args.os_auth_token or ks_session.get_token()
+
+ endpoint_type = args.os_endpoint_type or 'public'
+ service_type = args.os_service_type or 'image'
+ endpoint = args.os_image_url or ks_session.get_endpoint(
+ service_type=service_type,
+ interface=endpoint_type,
+ region_name=args.os_region_name)
+
+ return endpoint, token
+
+ def _get_versioned_client(self, api_version, args, force_auth=False):
+ # ndpoint, token = self._get_endpoint_and_token(
+ # args,force_auth=force_auth)
+ # endpoint = "http://10.43.175.62:19292"
+ endpoint = args.os_endpoint
+ # print endpoint
+ kwargs = {
+ # 'token': token,
+ 'insecure': args.insecure,
+ 'timeout': args.timeout,
+ 'cacert': args.os_cacert,
+ 'cert': args.os_cert,
+ 'key': args.os_key,
+ 'ssl_compression': args.ssl_compression
+ }
+ client = escalatorclient.Client(api_version, endpoint, **kwargs)
+ return client
+
+ def _cache_schemas(self, options, home_dir='~/.escalatorclient'):
+ homedir = expanduser(home_dir)
+ if not os.path.exists(homedir):
+ os.makedirs(homedir)
+
+ resources = ['image', 'metadefs/namespace', 'metadefs/resource_type']
+ schema_file_paths = [homedir + os.sep + x + '_schema.json'
+ for x in ['image', 'namespace', 'resource_type']]
+
+ client = None
+ for resource, schema_file_path in zip(resources, schema_file_paths):
+ if (not os.path.exists(schema_file_path)) or options.get_schema:
+ try:
+ if not client:
+ client = self._get_versioned_client('2', options,
+ force_auth=True)
+ schema = client.schemas.get(resource)
+
+ with open(schema_file_path, 'w') as f:
+ f.write(json.dumps(schema.raw()))
+ except Exception:
+ # NOTE(esheffield) do nothing here, we'll get a message
+ # later if the schema is missing
+ pass
+
+ def main(self, argv):
+ # Parse args once to find version
+
+ # NOTE(flepied) Under Python3, parsed arguments are removed
+ # from the list so make a copy for the first parsing
+ base_argv = copy.deepcopy(argv)
+ parser = self.get_base_parser()
+ (options, args) = parser.parse_known_args(base_argv)
+
+ try:
+ # NOTE(flaper87): Try to get the version from the
+ # image-url first. If no version was specified, fallback
+ # to the api-image-version arg. If both of these fail then
+ # fallback to the minimum supported one and let keystone
+ # do the magic.
+ endpoint = self._get_image_url(options)
+ endpoint, url_version = utils.strip_version(endpoint)
+ except ValueError:
+ # NOTE(flaper87): ValueError is raised if no endpoint is povided
+ url_version = None
+
+ # build available subcommands based on version
+ try:
+ api_version = int(options.os_image_api_version or url_version or 1)
+ except ValueError:
+ print("Invalid API version parameter")
+ utils.exit()
+
+ if api_version == 2:
+ self._cache_schemas(options)
+
+ subcommand_parser = self.get_subcommand_parser(api_version)
+ self.parser = subcommand_parser
+
+ # Handle top-level --help/-h before attempting to parse
+ # a command off the command line
+ if options.help or not argv:
+ self.do_help(options)
+ return 0
+
+ # Parse args again and call whatever callback was selected
+ args = subcommand_parser.parse_args(argv)
+
+ # Short-circuit and deal with help command right away.
+ if args.func == self.do_help:
+ self.do_help(args)
+ return 0
+ elif args.func == self.do_bash_completion:
+ self.do_bash_completion(args)
+ return 0
+
+ LOG = logging.getLogger('escalatorclient')
+ LOG.addHandler(logging.StreamHandler())
+ LOG.setLevel(logging.DEBUG if args.debug else logging.INFO)
+
+ profile = osprofiler_profiler and options.profile
+ if profile:
+ osprofiler_profiler.init(options.profile)
+
+ client = self._get_versioned_client(api_version, args,
+ force_auth=False)
+
+ try:
+ args.func(client, args)
+ except exc.Unauthorized:
+ raise exc.CommandError("Invalid OpenStack Identity credentials.")
+ except Exception:
+ # NOTE(kragniz) Print any exceptions raised to stderr if the
+ # --debug flag is set
+ if args.debug:
+ traceback.print_exc()
+ raise
+ finally:
+ if profile:
+ trace_id = osprofiler_profiler.get().get_base_id()
+ print("Profiling trace ID: %s" % trace_id)
+ print("To display trace use next command:\n"
+ "osprofiler trace show --html %s " % trace_id)
+
+ @utils.arg('command', metavar='<subcommand>', nargs='?',
+ help='Display help for <subcommand>.')
+ def do_help(self, args):
+ """
+ Display help about this program or one of its subcommands.
+ """
+ if getattr(args, 'command', None):
+ if args.command in self.subcommands:
+ self.subcommands[args.command].print_help()
+ else:
+ raise exc.CommandError("'%s' is not a valid subcommand" %
+ args.command)
+ else:
+ self.parser.print_help()
+
+ def do_bash_completion(self, _args):
+ """Prints arguments for bash_completion.
+
+ Prints all of the commands and options to stdout so that the
+ escalator.bash_completion script doesn't have to hard code them.
+ """
+ commands = set()
+ options = set()
+ for sc_str, sc in self.subcommands.items():
+ commands.add(sc_str)
+ for option in sc._optionals._option_string_actions.keys():
+ options.add(option)
+
+ commands.remove('bash_completion')
+ commands.remove('bash-completion')
+ print(' '.join(commands | options))
+
+
+class HelpFormatter(argparse.HelpFormatter):
+
+ def start_section(self, heading):
+ # Title-case the headings
+ heading = '%s%s' % (heading[0].upper(), heading[1:])
+ super(HelpFormatter, self).start_section(heading)
+
+
+def main():
+ try:
+ escalatorShell().main(map(encodeutils.safe_decode, sys.argv[1:]))
+ except KeyboardInterrupt:
+ utils.exit('... terminating escalator client', exit_code=130)
+ except Exception as e:
+ utils.exit(utils.exception_to_str(e))
diff --git a/client/escalatorclient/v1/__init__.py b/client/escalatorclient/v1/__init__.py
new file mode 100644
index 0000000..cd35765
--- /dev/null
+++ b/client/escalatorclient/v1/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from escalatorclient.v1.client import Client # noqa
diff --git a/client/escalatorclient/v1/client.py b/client/escalatorclient/v1/client.py
new file mode 100644
index 0000000..f74300f
--- /dev/null
+++ b/client/escalatorclient/v1/client.py
@@ -0,0 +1,36 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from escalatorclient.common import http
+from escalatorclient.common import utils
+from escalatorclient.v1.versions import VersionManager
+
+
+class Client(object):
+ """Client for the escalator v1 API.
+
+ :param string endpoint: A user-supplied endpoint URL for the escalator
+ service.
+ :param string token: Token for authentication.
+ :param integer timeout: Allows customization of the timeout for client
+ http requests. (optional)
+ """
+
+ def __init__(self, endpoint, *args, **kwargs):
+ """Initialize a new client for the escalator v1 API."""
+ endpoint, version = utils.strip_version(endpoint)
+ self.version = version or 1.0
+ self.http_client = http.HTTPClient(endpoint, *args, **kwargs)
+ self.node = VersionManager(self.http_client)
diff --git a/client/escalatorclient/v1/shell.py b/client/escalatorclient/v1/shell.py
new file mode 100644
index 0000000..f926a8a
--- /dev/null
+++ b/client/escalatorclient/v1/shell.py
@@ -0,0 +1,178 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import print_function
+
+import copy
+import functools
+from oslo_utils import encodeutils
+from oslo_utils import strutils
+import escalatorclient.v1.versions
+from escalatorclient.common import utils
+from escalatorclient import exc
+
+_bool_strict = functools.partial(strutils.bool_from_string, strict=True)
+
+
+def _escalator_show(escalator, max_column_width=80):
+ info = copy.deepcopy(escalator._info)
+ exclusive_field = ('deleted', 'deleted_at')
+ for field in exclusive_field:
+ if field in info:
+ info.pop(field)
+ utils.print_dict(info, max_column_width=max_column_width)
+
+
+@utils.arg('--type', metavar='<TYPE>',
+ help='Type of escalator version, supported type are "internal": '
+ 'the internal version of escalator.')
+def do_version(dc, args):
+ """Get version of escalator."""
+ fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
+
+ # Filter out values we can't use
+ VERSION_PARAMS = escalatorclient.v1.version.VERSION_PARAMS
+ fields = dict(filter(lambda x: x[0] in VERSION_PARAMS, fields.items()))
+ version = dc.version.version(**fields)
+ _escalator_show(version)
+
+
+@utils.arg('id', metavar='<ID>',
+ help='Filter version to those that have this id.')
+def do_version_detail(dc, args):
+ """Get backend_types of escalator."""
+ version = utils.find_resource(dc.versions, args.id)
+ _escalator_show(version)
+
+
+@utils.arg('name', metavar='<NAME>',
+ help='name of version.')
+@utils.arg('type', metavar='<TYPE>',
+ help='version type.eg redhat7.0...')
+@utils.arg('--size', metavar='<SIZE>',
+ help='size of the version file.')
+@utils.arg('--checksum', metavar='<CHECKSUM>',
+ help='md5 of version file')
+@utils.arg('--version', metavar='<VERSION>',
+ help='version number of version file')
+@utils.arg('--description', metavar='<DESCRIPTION>',
+ help='description of version file')
+@utils.arg('--status', metavar='<STATUS>',
+ help='version file status.default:init')
+def do_version_add(dc, args):
+ """Add a version."""
+
+ fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
+
+ # Filter out values we can't use
+ CREATE_PARAMS = escalatorclient.v1.versions.CREATE_PARAMS
+ fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items()))
+
+ version = dc.versions.add(**fields)
+ _escalator_show(version)
+
+
+@utils.arg('id', metavar='<ID>',
+ help='ID of versions.')
+@utils.arg('--name', metavar='<NAME>',
+ help='name of version.')
+@utils.arg('--type', metavar='<TYPE>',
+ help='version type.eg redhat7.0...')
+@utils.arg('--size', metavar='<SIZE>',
+ help='size of the version file.')
+@utils.arg('--checksum', metavar='<CHECKSUM>',
+ help='md5 of version file')
+@utils.arg('--version', metavar='<VERSION>',
+ help='version number of version file')
+@utils.arg('--description', metavar='<DESCRIPTION>',
+ help='description of version file')
+@utils.arg('--status', metavar='<STATUS>',
+ help='version file status.default:init')
+def do_version_update(dc, args):
+ """Add a version."""
+
+ fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
+
+ # Filter out values we can't use
+ CREATE_PARAMS = escalatorclient.v1.versions.CREATE_PARAMS
+ fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items()))
+ version_id = fields.get('id', None)
+ version = dc.versions.update(version_id, **fields)
+ _escalator_show(version)
+
+
+@utils.arg('id', metavar='<ID>', nargs='+',
+ help='ID of versions.')
+def do_version_delete(dc, args):
+ """Delete specified template(s)."""
+ fields = dict(filter(lambda x: x[1] is not None, vars(args).items()))
+ versions = fields.get('id', None)
+ for version in versions:
+ try:
+ if args.verbose:
+ print('Requesting version delete for %s ...' %
+ encodeutils.safe_decode(version), end=' ')
+ dc.versions.delete(version)
+ if args.verbose:
+ print('[Done]')
+ except exc.HTTPException as e:
+ if args.verbose:
+ print('[Fail]')
+ print('%s: Unable to delete version %s' % (e, version))
+
+
+@utils.arg('--name', metavar='<NAME>',
+ help='Filter version to those that have this name.')
+@utils.arg('--status', metavar='<STATUS>',
+ help='Filter version status.')
+@utils.arg('--type', metavar='<type>',
+ help='Filter by type.')
+@utils.arg('--version', metavar='<version>',
+ help='Filter by version number.')
+@utils.arg('--page-size', metavar='<SIZE>', default=None, type=int,
+ help='Number to request in each paginated request.')
+@utils.arg('--sort-key', default='name',
+ choices=escalatorclient.v1.versions.SORT_KEY_VALUES,
+ help='Sort version list by specified field.')
+@utils.arg('--sort-dir', default='asc',
+ choices=escalatorclient.v1.versions.SORT_DIR_VALUES,
+ help='Sort version list in specified direction.')
+def do_version_list(dc, args):
+ """List hosts you can access."""
+ filter_keys = ['name', 'type', 'status', 'version']
+ filter_items = [(key, getattr(args, key)) for key in filter_keys]
+ filters = dict([item for item in filter_items if item[1] is not None])
+
+ kwargs = {'filters': filters}
+ if args.page_size is not None:
+ kwargs['page_size'] = args.page_size
+
+ kwargs['sort_key'] = args.sort_key
+ kwargs['sort_dir'] = args.sort_dir
+
+ versions = dc.versions.list(**kwargs)
+
+ columns = ['ID', 'NAME', 'TYPE', 'VERSION', 'size',
+ 'checksum', 'description', 'status', 'VERSION_PATCH']
+
+ utils.print_list(versions, columns)
+
+
+@utils.arg('id', metavar='<ID>',
+ help='Filter version patch to those that have this id.')
+def do_version_patch_detail(dc, args):
+ """Get version_patch of escalator."""
+ version = utils.find_resource(dc.version_patchs, args.id)
+ _escalator_show(version)
diff --git a/client/escalatorclient/v1/versions.py b/client/escalatorclient/v1/versions.py
new file mode 100644
index 0000000..f54ea23
--- /dev/null
+++ b/client/escalatorclient/v1/versions.py
@@ -0,0 +1,294 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from oslo_utils import encodeutils
+from oslo_utils import strutils
+import six
+import six.moves.urllib.parse as urlparse
+
+from escalatorclient.common import utils
+from escalatorclient.openstack.common.apiclient import base
+
+CREATE_PARAMS = ('id', 'name', 'description', 'type', 'version', 'size',
+ 'checksum', 'status', 'os_status', 'version_patch')
+
+DEFAULT_PAGE_SIZE = 200
+VERSION_PARAMS = ('type')
+SORT_DIR_VALUES = ('asc', 'desc')
+SORT_KEY_VALUES = (
+ 'name', 'id', 'cluster_id', 'created_at', 'updated_at', 'status')
+
+OS_REQ_ID_HDR = 'x-openstack-request-id'
+
+
+class Version(base.Resource):
+
+ def __repr__(self):
+ return "<Version %s>" % self._info
+
+ def update(self, **fields):
+ self.manager.update(self, **fields)
+
+ def delete(self, **kwargs):
+ return self.manager.delete(self)
+
+ def data(self, **kwargs):
+ return self.manager.data(self, **kwargs)
+
+
+class VersionManager(base.ManagerWithFind):
+ resource_class = Version
+
+ def _list(self, url, response_key, obj_class=None, body=None):
+ resp, body = self.client.get(url)
+
+ if obj_class is None:
+ obj_class = self.resource_class
+
+ data = body[response_key]
+ return ([obj_class(self, res, loaded=True) for res in data if res],
+ resp)
+
+ def _version_meta_from_headers(self, headers):
+ meta = {'properties': {}}
+ safe_decode = encodeutils.safe_decode
+ for key, value in six.iteritems(headers):
+ value = safe_decode(value, incoming='utf-8')
+ if key.startswith('x-image-meta-property-'):
+ _key = safe_decode(key[22:], incoming='utf-8')
+ meta['properties'][_key] = value
+ elif key.startswith('x-image-meta-'):
+ _key = safe_decode(key[13:], incoming='utf-8')
+ meta[_key] = value
+
+ for key in ['is_public', 'protected', 'deleted']:
+ if key in meta:
+ meta[key] = strutils.bool_from_string(meta[key])
+
+ return self._format_version_meta_for_user(meta)
+
+ def _version_meta_to_headers(self, fields):
+ headers = {}
+ fields_copy = copy.deepcopy(fields)
+ for key, value in six.iteritems(fields_copy):
+ headers['%s' % key] = utils.to_str(value)
+ return headers
+
+ @staticmethod
+ def _format_version_meta_for_user(meta):
+ for key in ['size', 'min_ram', 'min_disk']:
+ if key in meta:
+ try:
+ meta[key] = int(meta[key]) if meta[key] else 0
+ except ValueError:
+ pass
+ return meta
+
+ def get(self, version, **kwargs):
+ """Get the metadata for a specific version.
+
+ :param version: image object or id to look up
+ :rtype: :class:`version`
+ """
+ version_id = base.getid(version)
+ resp, body = self.client.get('/v1/versions/%s'
+ % urlparse.quote(str(version_id)))
+ # meta = self._version_meta_from_headers(resp.headers)
+ return_request_id = kwargs.get('return_req_id', None)
+ if return_request_id is not None:
+ return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
+ # return version(self, meta)
+ return Version(self, self._format_version_meta_for_user(
+ body['version']))
+
+ def _build_params(self, parameters):
+ params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)}
+
+ if 'marker' in parameters:
+ params['marker'] = parameters['marker']
+
+ sort_key = parameters.get('sort_key')
+ if sort_key is not None:
+ if sort_key in SORT_KEY_VALUES:
+ params['sort_key'] = sort_key
+ else:
+ raise ValueError('sort_key must be one of the following: %s.'
+ % ', '.join(SORT_KEY_VALUES))
+
+ sort_dir = parameters.get('sort_dir')
+ if sort_dir is not None:
+ if sort_dir in SORT_DIR_VALUES:
+ params['sort_dir'] = sort_dir
+ else:
+ raise ValueError('sort_dir must be one of the following: %s.'
+ % ', '.join(SORT_DIR_VALUES))
+
+ filters = parameters.get('filters', {})
+ params.update(filters)
+
+ return params
+
+ def list(self, **kwargs):
+ """Get a list of versions.
+
+ :param page_size: number of items to request in each paginated request
+ :param limit: maximum number of versions to return
+ :param marker:begin returning versions that appear later in version
+ list than that represented by this version id
+ :param filters: dict of direct comparison filters that mimics the
+ structure of an version object
+ :param return_request_id: If an empty list is provided, populate this
+ list with the request ID value from the header
+ x-openstack-request-id
+ :rtype: list of :class:`version`
+ """
+ absolute_limit = kwargs.get('limit')
+ page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE)
+
+ def paginate(qp, return_request_id=None):
+ for param, value in six.iteritems(qp):
+ if isinstance(value, six.string_types):
+ # Note(flaper87) Url encoding should
+ # be moved inside http utils, at least
+ # shouldn't be here.
+ #
+ # Making sure all params are str before
+ # trying to encode them
+ qp[param] = encodeutils.safe_decode(value)
+
+ url = '/v1/versions?%s' % urlparse.urlencode(qp)
+ versions, resp = self._list(url, "versions")
+
+ if return_request_id is not None:
+ return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
+
+ for version in versions:
+ yield version
+
+ return_request_id = kwargs.get('return_req_id', None)
+
+ params = self._build_params(kwargs)
+
+ seen = 0
+ while True:
+ seen_last_page = 0
+ filtered = 0
+ for version in paginate(params, return_request_id):
+ last_version = version.id
+
+ if (absolute_limit is not None and
+ seen + seen_last_page >= absolute_limit):
+ # Note(kragniz): we've seen enough images
+ return
+ else:
+ seen_last_page += 1
+ yield version
+
+ seen += seen_last_page
+
+ if seen_last_page + filtered == 0:
+ # Note(kragniz): we didn't get any versions in the last page
+ return
+
+ if absolute_limit is not None and seen >= absolute_limit:
+ # Note(kragniz): reached the limit of versions to return
+ return
+
+ if page_size and seen_last_page + filtered < page_size:
+ # Note(kragniz): we've reached the last page of the versions
+ return
+
+ # Note(kragniz): there are more versions to come
+ params['marker'] = last_version
+ seen_last_page = 0
+
+ def add(self, **kwargs):
+ """Add a version
+
+ TODO(bcwaldon): document accepted params
+ """
+
+ fields = {}
+ for field in kwargs:
+ if field in CREATE_PARAMS:
+ fields[field] = kwargs[field]
+ elif field == 'return_req_id':
+ continue
+ else:
+ msg = 'create() got an unexpected keyword argument \'%s\''
+ raise TypeError(msg % field)
+
+ hdrs = self._version_meta_to_headers(fields)
+
+ resp, body = self.client.post('/v1/versions',
+ headers=None,
+ data=hdrs)
+ return_request_id = kwargs.get('return_req_id', None)
+ if return_request_id is not None:
+ return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
+
+ return Version(self, self._format_version_meta_for_user(
+ body['version']))
+
+ def delete(self, version, **kwargs):
+ """Delete an version."""
+ url = "/v1/versions/%s" % base.getid(version)
+ resp, body = self.client.delete(url)
+ return_request_id = kwargs.get('return_req_id', None)
+ if return_request_id is not None:
+ return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
+
+ def update(self, version, **kwargs):
+ """Update an version
+
+ TODO(bcwaldon): document accepted params
+ """
+ hdrs = {}
+ fields = {}
+ for field in kwargs:
+ if field in CREATE_PARAMS:
+ fields[field] = kwargs[field]
+ elif field == 'return_req_id':
+ continue
+ hdrs.update(self._version_meta_to_headers(fields))
+
+ url = '/v1/versions/%s' % base.getid(version)
+ resp, body = self.client.put(url, headers=None, data=hdrs)
+ return_request_id = kwargs.get('return_req_id', None)
+ if return_request_id is not None:
+ return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None))
+
+ return Version(self, self._format_version_meta_for_user(
+ body['version_meta']))
+
+ def version(self, **kwargs):
+ """Get internal or external version of escalator.
+
+ TODO(bcwaldon): document accepted params
+ """
+ fields = {}
+ for field in kwargs:
+ if field in VERSION_PARAMS:
+ fields[field] = kwargs[field]
+ else:
+ msg = 'install() got an unexpected keyword argument \'%s\''
+ raise TypeError(msg % field)
+
+ url = '/v1/version'
+ hdrs = self._restore_meta_to_headers(fields)
+ resp, body = self.client.post(url, headers=None, data=hdrs)
+ return Version(self, body)
diff --git a/client/pylintrc b/client/pylintrc
new file mode 100644
index 0000000..6b073fd
--- /dev/null
+++ b/client/pylintrc
@@ -0,0 +1,27 @@
+[Messages Control]
+# W0511: TODOs in code comments are fine.
+# W0142: *args and **kwargs are fine.
+# W0622: Redefining id is fine.
+disable-msg=W0511,W0142,W0622
+
+[Basic]
+# Variable names can be 1 to 31 characters long, with lowercase and underscores
+variable-rgx=[a-z_][a-z0-9_]{0,30}$
+
+# Argument names can be 2 to 31 characters long, with lowercase and underscores
+argument-rgx=[a-z_][a-z0-9_]{1,30}$
+
+# Method names should be at least 3 characters long
+# and be lowercased with underscores
+method-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Module names matching nova-* are ok (files in bin/)
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$
+
+# Don't require docstrings on tests.
+no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
+
+[Design]
+max-public-methods=100
+min-public-methods=0
+max-args=6
diff --git a/client/requirements.txt b/client/requirements.txt
new file mode 100644
index 0000000..c34e04a
--- /dev/null
+++ b/client/requirements.txt
@@ -0,0 +1,14 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+pbr>=0.6,!=0.7,<1.0
+Babel>=1.3
+argparse
+PrettyTable>=0.7,<0.8
+python-keystoneclient>=1.0.0
+pyOpenSSL>=0.11
+requests>=2.2.0,!=2.4.0
+warlock>=1.0.1,<2
+six>=1.7.0
+oslo.utils>=1.2.0 # Apache-2.0
+oslo.i18n>=1.3.0 # Apache-2.0
diff --git a/client/run_tests.sh b/client/run_tests.sh
new file mode 100644
index 0000000..80edda6
--- /dev/null
+++ b/client/run_tests.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+function usage {
+ echo "Usage: $0 [OPTION]..."
+ echo "Run python-escalatorclient's test suite(s)"
+ echo ""
+ echo " -p, --pep8 Just run flake8"
+ echo " -h, --help Print this usage message"
+ echo ""
+ echo "This script is deprecated and currently retained for compatibility."
+ echo 'You can run the full test suite for multiple environments by running "tox".'
+ echo 'You can run tests for only python 2.7 by running "tox -e py27", or run only'
+ echo 'the flake8 tests with "tox -e pep8".'
+ exit
+}
+
+command -v tox > /dev/null 2>&1
+if [ $? -ne 0 ]; then
+ echo 'This script requires "tox" to run.'
+ echo 'You can install it with "pip install tox".'
+ exit 1;
+fi
+
+just_pep8=0
+
+function process_option {
+ case "$1" in
+ -h|--help) usage;;
+ -p|--pep8) let just_pep8=1;;
+ esac
+}
+
+for arg in "$@"; do
+ process_option $arg
+done
+
+if [ $just_pep8 -eq 1 ]; then
+ tox -e pep8
+ exit
+fi
+
+tox -e py27 $toxargs 2>&1 | tee run_tests.err.log || exit
+if [ ${PIPESTATUS[0]} -ne 0 ]; then
+ exit ${PIPESTATUS[0]}
+fi
+
+if [ -z "$toxargs" ]; then
+ tox -e pep8
+fi
diff --git a/client/setup.cfg b/client/setup.cfg
new file mode 100644
index 0000000..165fb68
--- /dev/null
+++ b/client/setup.cfg
@@ -0,0 +1,46 @@
+[metadata]
+name = escalatorclient
+summary = Escalator Client Library
+description-file =
+ README.rst
+license = Apache License, Version 2.0
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+ Development Status :: 5 - Production/Stable
+ Environment :: Console
+ Environment :: OpenStack
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+ Programming Language :: Python :: 2.6
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.3
+
+[files]
+packages =
+ escalatorclient
+
+[global]
+setup-hooks =
+ pbr.hooks.setup_hook
+
+[entry_points]
+console_scripts =
+ escalator = escalatorclient.shell:main
+
+[build_sphinx]
+source-dir = doc/source
+build-dir = doc/build
+all_files = 1
+
+[upload_sphinx]
+upload-dir = doc/build/html
+
+[wheel]
+universal = 1
diff --git a/client/setup.py b/client/setup.py
new file mode 100644
index 0000000..7363757
--- /dev/null
+++ b/client/setup.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
+setuptools.setup(
+ setup_requires=['pbr'],
+ pbr=True)
diff --git a/client/test-requirements.txt b/client/test-requirements.txt
new file mode 100644
index 0000000..06cb4aa
--- /dev/null
+++ b/client/test-requirements.txt
@@ -0,0 +1,13 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+hacking>=0.8.0,<0.9
+
+coverage>=3.6
+discover
+mox3>=0.7.0
+mock>=1.0
+oslosphinx>=2.2.0 # Apache-2.0
+sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
+testrepository>=0.0.18
+testtools>=0.9.36,!=1.2.0
diff --git a/client/tox.ini b/client/tox.ini
new file mode 100644
index 0000000..6a6ea7d
--- /dev/null
+++ b/client/tox.ini
@@ -0,0 +1,39 @@
+[tox]
+envlist = py26,py27,py33,py34,pypy,pep8
+minversion = 1.6
+skipsdist = True
+
+[testenv]
+usedevelop = True
+install_command = pip install -U {opts} {packages}
+setenv = VIRTUAL_ENV={envdir}
+ OS_STDOUT_NOCAPTURE=False
+ OS_STDERR_NOCAPTURE=False
+ PYTHONHASHSEED=0
+
+deps = -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+commands = python setup.py testr --testr-args='{posargs}'
+
+[testenv:pep8]
+commands = flake8
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:cover]
+commands = python setup.py testr --coverage --testr-args='{posargs}'
+
+[testenv:docs]
+commands=
+ python setup.py build_sphinx
+
+[tox:jenkins]
+downloadcache = ~/cache/pip
+
+[flake8]
+# H233 Python 3.x incompatible use of print operator
+# H404 multi line docstring should start with a summary
+ignore = F403,F812,H233,H404,F401,E731
+show-source = True
+exclude = .venv,.tox,dist,doc,*egg,build
diff --git a/docs/design/etc/conf.py b/docs/design/etc/conf.py
index 0066035..c933038 100644
--- a/docs/design/etc/conf.py
+++ b/docs/design/etc/conf.py
@@ -1,6 +1,17 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import datetime
-import sys
-import os
try:
__import__('imp').find_module('sphinx.ext.numfig')
@@ -20,9 +31,7 @@ html_use_index = False
pdf_documents = [('index', u'OPNFV', u'OPNFV Project', u'OPNFV')]
pdf_fit_mode = "shrink"
-pdf_stylesheets = ['sphinx','kerning','a4']
-#latex_domain_indices = False
-#latex_use_modindex = False
+pdf_stylesheets = ['sphinx', 'kerning', 'a4']
latex_elements = {
'printindex': '',
diff --git a/docs/etc/conf.py b/docs/etc/conf.py
index 0066035..e2579d9 100644
--- a/docs/etc/conf.py
+++ b/docs/etc/conf.py
@@ -1,6 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
import datetime
-import sys
-import os
try:
__import__('imp').find_module('sphinx.ext.numfig')
@@ -20,9 +32,7 @@ html_use_index = False
pdf_documents = [('index', u'OPNFV', u'OPNFV Project', u'OPNFV')]
pdf_fit_mode = "shrink"
-pdf_stylesheets = ['sphinx','kerning','a4']
-#latex_domain_indices = False
-#latex_use_modindex = False
+pdf_stylesheets = ['sphinx', 'kerning', 'a4']
latex_elements = {
'printindex': '',
diff --git a/docs/gap_analysis/etc/conf.py b/docs/gap_analysis/etc/conf.py
index 0066035..c933038 100644
--- a/docs/gap_analysis/etc/conf.py
+++ b/docs/gap_analysis/etc/conf.py
@@ -1,6 +1,17 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import datetime
-import sys
-import os
try:
__import__('imp').find_module('sphinx.ext.numfig')
@@ -20,9 +31,7 @@ html_use_index = False
pdf_documents = [('index', u'OPNFV', u'OPNFV Project', u'OPNFV')]
pdf_fit_mode = "shrink"
-pdf_stylesheets = ['sphinx','kerning','a4']
-#latex_domain_indices = False
-#latex_use_modindex = False
+pdf_stylesheets = ['sphinx', 'kerning', 'a4']
latex_elements = {
'printindex': '',
diff --git a/docs/requirements/102-Terminologies.rst b/docs/requirements/102-Terminologies.rst
index 221196b..f065bca 100644
--- a/docs/requirements/102-Terminologies.rst
+++ b/docs/requirements/102-Terminologies.rst
@@ -5,30 +5,59 @@ Terminology
Terminologies
=============
-Operator
- The term refers to network service providers and Virtual Network
- Function (VNF) providers.
+Backup
+ The term refers to making a copy of the system persistent data to a storage,
+ so that it can be used to restore the system or a given part of it to the same
+ state as it was when the backup was created. Restoring from backup will lose
+ volatile states like CPU and memory content. Changes made to the system from
+ the moment the backup was created to the moment it is used to restore the
+ (sub)system are also lost in the restoration process.
+
+Carrier Grade
+ The refers to a system, or a hardware or software component that is extremely
+ reliable, well tested and proven in its capabilities. Carrier grade systems are
+ tested and engineered to meet or exceed "five nines" high availability standards,
+ and provide very fast fault recovery through redundancy (normally less than 50
+ milliseconds). Sometimes, Carrier grade is also referred as Carrier Class.
+
+Downgrade
+ The term refers to an upgrade operation in which an earlier version of the
+ software is restored through the upgrade procedure. Compared to rollback,
+ Downgrade is normally initiated with Operator, and it is allowed to select any
+ earlier version, providing the compatibility of the versions is met or upgrade
+ strategies are allowed (whether service outage or data lost can be tolerant.)
End-User
The term refers to a subscriber of the Operator's services.
-Network Service
- The term refers to a service provided by an Operator to its
- end-users using a set of (virtualized) Network Functions
+High Availability(HA)
+ High Availability refers to a system or component that is continuously
+ operational for a desirably long length of time even a part of it is out of
+ service. Carrier Grade Availability is a typical HA example. HA system is popular
+ in Operator's data center for critical tasks. Non-HA system is normally deployed
+ for experimental or in-critical tasks in favor of its simplicity.
Infrastructure Services
The term refers to services provided by the NFV Infrastructure to the VNFs
as required by the Management & Orchestration functions and especially the VIM.
I.e. these are the virtual resources as perceived by the VNFs.
-Smooth Upgrade
- The term refers to an upgrade that results in no service outage
- for the end-users.
+Infrastructure Resource Model
+ The term refers to the representation of infrastructure resources,
+ namely: the physical resources, the virtualization
+ facility resources and the virtual resources.
-Rolling Upgrade
- The term refers to an upgrade strategy, which upgrades a node or a subset
- of nodes at a time in a wave style rolling through the data centre. It
- is a popular upgrade strategy to maintain service availability.
+Network Service
+ The term refers to a service provided by an Operator to its
+ end-users using a set of (virtualized) Network Functions
+
+Operator
+ The term refers to network service providers and Virtual Network
+ Function (VNF) providers.
+
+Outage
+ The terms refers to the period of time when a given service is not available
+ to End-Users.
Parallel Universe Upgrade
The term refers to an upgrade strategy, which creates and deploys
@@ -36,25 +65,42 @@ Parallel Universe Upgrade
system continues running. The state of the old system is transferred
to the new system after sufficient testing of the new system.
-Infrastructure Resource Model
- The term refers to the representation of infrastructure resources,
- namely: the physical resources, the virtualization
- facility resources and the virtual resources.
-
Physical Resource
The term refers to a piece of hardware in the NFV infrastructure that may
also include firmware enabling this piece of hardware.
-Virtual Resource
- The term refers to a resource, which is provided as services built on top
- of the physical resources via the virtualization facilities; in particular,
- virtual resources are the resources on which VNFs are deployed. Examples of
- virtual resources are: VMs, virtual switches, virtual routers, virtual disks.
+Restore
+ The term refers to a failure handling strategy that reverts the changes
+ done, for example, by an upgrade by restoring the system from some backup
+ data. This results in the loss of any change and data persisted after the
+ backup was been taken. To recover those additional measures need to be taken
+ if necessary (e.g. Rollforward).
-Visualization Facility
- The term refers to a resource that enables the creation
- of virtual environments on top of the physical resources, e.g.
- hypervisor, OpenStack, etc.
+Rollback
+ The term refers to a failure handling strategy that reverts the changes
+ done by a potentially failed upgrade execution one by one in a reverse order.
+ I.e. it is like undoing the changes done by the upgrade.
+
+Rollforward
+ The term refers to a failure handling strategy applied after a restore
+ (from a backup) operation to recover any loss of data persisted between
+ the time the backup has been taken and the moment it is restored. Rollforward
+ requires that data that needs to survive the restore operation is logged at
+ a location not impacted by the restore so that it can be re-applied to the
+ system after its restoration from the backup.
+
+Rolling Upgrade
+ The term refers to an upgrade strategy, which upgrades a node or a subset
+ of nodes at a time in a wave style rolling through the data centre. It
+ is a popular upgrade strategy to maintain service availability.
+
+Smooth Upgrade
+ The term refers to an upgrade that results in no service outage
+ for the end-users.
+
+Snapshot
+ The term refer to the state of a system at a particular point in time, or
+ the action of capturing such a state.
Upgrade Campaign
The term refers to a choreography that describes how the upgrade should
@@ -69,48 +115,18 @@ Upgrade Duration
upgrade campaign has started until it has been committed. Depending on
the upgrade strategy, the state of the configuration and the upgrade target
some parts of the system may be in a more vulnerable state with respect to
- service availbility.
-
-Outage
- The period of time during which a given service is not provided is referred
- as the outage of that given service. If a subsystem or the entire system
- does not provide any service, it is the outage of the given subsystem or the
- system. Smooth upgrade means upgrade with no outage for the user plane, i.e.
- no VNF should experience service outage.
-
-Rollback
- The term refers to a failure handling strategy that reverts the changes
- done by a potentially failed upgrade execution one by one in a reverse order.
- I.e. it is like undoing the changes done by the upgrade.
+ service availability.
-Backup
- The term refers to data persisted to a storage, so that it can be used to
- restore the system or a given part of it in the same state as it was when the
- backup was created assuming a cold restart. Changes made to the system from
- the moment the backup was created till the moment it is used to restore the
- (sub)system are lost in the restoration process.
-
-Restore
- The term refers to a failure handling strategy that reverts the changes
- done, for example, by an upgrade by restoring the system from some backup
- data. This results in the loss of any change and data persisted after the
- backup was been taken. To recover those additional measures need to be taken
- if necessary (e.g. rollforward).
-
-Rollforward
- The term refers to a failure handling strategy applied after a restore
- (from a backup) opertaion to recover any loss of data persisted between
- the time the backup has been taken and the moment it is restored. Rollforward
- requires that data that needs to survive the restore operation is logged at
- a location not impacted by the restore so that it can be re-applied to the
- system after its restoration from the backup.
+Virtualization Facility
+ The term refers to a resource that enables the creation
+ of virtual environments on top of the physical resources, e.g.
+ hypervisor, OpenStack, etc.
-Downgrade
- The term refers to an upgrade in which an earlier version of the software
- is restored through the upgrade procedure. A system can be downgraded to any
- earlier version and the compatibility of the versions will determine the
- applicable upgrade strategies and whether service outage can be avoided.
- In particular any data conversion needs special attention.
+Virtual Resource
+ The term refers to a resource, which is provided as services built on top
+ of the physical resources via the virtualization facilities; in particular,
+ virtual resources are the resources on which VNFs are deployed. Examples of
+ virtual resources are: VMs, virtual switches, virtual routers, virtual disks.
Abbreviations
=============
@@ -126,4 +142,3 @@ VIM
sometimes it is also referred as control plane in this document.
The VIM controls and manages the NFVI compute, network and storage
resources to provide the required virtual resources to the VNFs.
-
diff --git a/docs/requirements/104-Requirements.rst b/docs/requirements/104-Requirements.rst
index b6e7f57..3dd66dc 100644
--- a/docs/requirements/104-Requirements.rst
+++ b/docs/requirements/104-Requirements.rst
@@ -5,180 +5,42 @@ Requirements
Upgrade duration
================
-As the OPNFV end-users are primarily Telecom operators, the network
-services provided by the VNFs deployed on the NFVI should meet the
-requirement of 'Carrier Grade'.::
-
- In telecommunication, a "carrier grade" or"carrier class" refers to a
- system, or a hardware or software component that is extremely reliable,
- well tested and proven in its capabilities. Carrier grade systems are
- tested and engineered to meet or exceed "five nines" high availability
- standards, and provide very fast fault recovery through redundancy
- (normally less than 50 milliseconds). [from wikipedia.org]
-
-"five nines" means working all the time in ONE YEAR except 5'15".
-
-::
-
- We have learnt that a well prepared upgrade of OpenStack needs 10
- minutes. The major time slot in the outage time is used spent on
- synchronizing the database. [from ' Ten minutes OpenStack Upgrade? Done!
- ' by Symantec]
-
-This 10 minutes of downtime of the OpenStack services however did not impact the
-users, i.e. the VMs running on the compute nodes. This was the outage of
-the control plane only. On the other hand with respect to the
-preparations this was a manually tailored upgrade specific to the
-particular deployment and the versions of each OpenStack service.
-
-The project targets to achieve a more generic methodology, which however
-requires that the upgrade objects fulfil certain requirements. Since
-this is only possible on the long run we target first the upgrade
-of the different VIM services from version to version.
-
-**Questions:**
-
-1. Can we manage to upgrade OPNFV in only 5 minutes?
-
-.. <MT> The first question is whether we have the same carrier grade
- requirement on the control plane as on the user plane. I.e. how
- much control plane outage we can/willing to tolerate?
- In the above case probably if the database is only half of the size
- we can do the upgrade in 5 minutes, but is that good? It also means
- that if the database is twice as much then the outage is 20
- minutes.
- For the user plane we should go for less as with two release yearly
- that means 10 minutes outage per year.
-
-.. <Malla> 10 minutes outage per year to the users? Plus, if we take
- control plane into the consideration, then total outage will be
- more than 10 minute in whole network, right?
-
-.. <MT> The control plane outage does not have to cause outage to
- the users, but it may of course depending on the size of the system
- as it's more likely that there's a failure that needs to be handled
- by the control plane.
-
-2. Is it acceptable for end users ? Such as a planed service
- interruption will lasting more than ten minutes for software
- upgrade.
-
-.. <MT> For user plane, no it's not acceptable in case of
- carrier-grade. The 5' 15" downtime should include unplanned and
- planned downtimes.
-
-.. <Malla> I go agree with Maria, it is not acceptable.
-
-3. Will any VNFs still working well when VIM is down?
-
-.. <MT> In case of OpenStack it seems yes. .:)
-
-The maximum duration of an upgrade
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The duration of an upgrade is related to and proportional with the
-scale and the complexity of the OPNFV platform as well as the
-granularity (in function and in space) of the upgrade.
-
-.. <Malla> Also, if is a partial upgrade like module upgrade, it depends
- also on the OPNFV modules and their tight connection entities as well.
-
-.. <MT> Since the maintenance window is shrinking and becoming non-existent
- the duration of the upgrade is secondary to the requirement of smooth upgrade.
- But probably we want to be able to put a time constraint on each upgrade
- during which it must complete otherwise it is considered failed and the system
- should be rolled back. I.e. in case of automatic execution it might not be clear
- if an upgrade is long or just hanging. The time constraints may be a function
- of the size of the system in terms of the upgrade object(s).
-
-The maximum duration of a roll back when an upgrade is failed
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The duration of a roll back is short than the corresponding upgrade. It
-depends on the duration of restore the software and configure data from
-pre-upgrade backup / snapshot.
-
-.. <MT> During the upgrade process two types of failure may happen:
- In case we can recover from the failure by undoing the upgrade
- actions it is possible to roll back the already executed part of the
- upgrade in graceful manner introducing no more service outage than
- what was introduced during the upgrade. Such a graceful roll back
- requires typically the same amount of time as the executed portion of
- the upgrade and impose minimal state/data loss.
-
-.. <MT> Requirement: It should be possible to roll back gracefully the
- failed upgrade of stateful services of the control plane.
- In case we cannot recover from the failure by just undoing the
- upgrade actions, we have to restore the upgraded entities from their
- backed up state. In other terms the system falls back to an earlier
- state, which is typically a faster recovery procedure than graceful
- roll back and depending on the statefulness of the entities involved it
- may result in significant state/data loss.
-
-.. <MT> Two possible types of failures can happen during an upgrade
-
-.. <MT> We can recover from the failure that occurred in the upgrade process:
- In this case, a graceful rolling back of the executed part of the
- upgrade may be possible which would "undo" the executed part in a
- similar fashion. Thus, such a roll back introduces no more service
- outage during an upgrade than the executed part introduced. This
- process typically requires the same amount of time as the executed
- portion of the upgrade and impose minimal state/data loss.
-
-.. <MT> We cannot recover from the failure that occurred in the upgrade
- process: In this case, the system needs to fall back to an earlier
- consistent state by reloading this backed-up state. This is typically
- a faster recovery procedure than the graceful roll back, but can cause
- state/data loss. The state/data loss usually depends on the
- statefulness of the entities whose state is restored from the backup.
-
-The maximum duration of a VNF interruption (Service outage)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Since not the entire process of a smooth upgrade will affect the VNFs,
-the duration of the VNF interruption may be shorter than the duration
-of the upgrade. In some cases, the VNF running without the control
-from of the VIM is acceptable.
-
-.. <MT> Should require explicitly that the NFVI should be able to
- provide its services to the VNFs independent of the control plane?
-
-.. <MT> Requirement: The upgrade of the control plane must not cause
- interruption of the NFVI services provided to the VNFs.
-
-.. <MT> With respect to carrier-grade the yearly service outage of the
- VNF should not exceed 5' 15" regardless whether it is planned or
- unplanned outage. Considering the HA requirements TL-9000 requires an
- end-to-end service recovery time of 15 seconds based on which the ETSI
- GS NFV-REL 001 V1.1.1 (2015-01) document defines three service
- availability levels (SAL). The proposed example service recovery times
- for these levels are:
-
-.. <MT> SAL1: 5-6 seconds
-
-.. <MT> SAL2: 10-15 seconds
-
-.. <MT> SAL3: 20-25 seconds
-
-.. <Pva> my comment was actually that the downtime metrics of the
- underlying elements, components and services are small fraction of the
- total E2E service availability time. No-one on the E2E service path
- will get the whole downtime allocation (in this context it includes
- upgrade process related outages for the services provided by VIM etc.
- elements that are subject to upgrade process).
-
-.. <MT> So what you are saying is that the upgrade of any entity
- (component, service) shouldn't cause even this much service
- interruption. This was the reason I brought these figures here as well
- that they are posing some kind of upper-upper boundary. Ideally the
- interruption is in the millisecond range i.e. no more than a
- switch-over or a live migration.
-
-.. <MT> Requirement: Any interruption caused to the VNF by the upgrade
- of the NFVI should be in the sub-second range.
-
-.. <MT]> In the future we also need to consider the upgrade of the NFVI,
- i.e. HW, firmware, hypervisors, host OS etc.
+Being a telecom service system, OPNFV shall target at carrier grade availability,
+which allows only about 5 minutes of outage in a year. Base on this basic input
+and discussions on the current solutions, The following requirements are defined
+from the perspective of time constraints:
+
+- OPNFV platform must be deployed with HA to allow live upgrade possible. Considering of
+ the scale, complexity, and life cycle of OPNFV system, allocating less than
+ 5 minutes out of a year for upgrade is in-realistic. Therefore OPNFV should
+ be deployed with HA, allowing part of system being upgraded, while its
+ redundant parts continue to serve End-User. This hopefully relieves the time
+ constraint on upgrade operation to achievable level.
+
+- VNF service interruption for each switching should be sub-second range. In
+ HA system, switching from an in-service system/component to the redundant
+ ones normally cause service interruption. From example live-migrating a
+ virtual machine from one hypervisor to another typically take the virtual
+ machine out of service for about 500ms. Summing up all these interruptions in
+ a year shall be less than 5 minutes in order to fulfill the five-nines carrier
+ grade availability. In addition, when interruption goes over a second, End-User
+ experience is likely impacted. This document therefore recommends service
+ switching should be less than a second.
+
+- VIM interruption shall not result in NFVI interruption. VIM in general has more
+ logic built-in, therefore more complicated, and likely less reliable than NFVI.
+ To minimize the impact from VIM to NFVI, unless VIM explicitly order NFVI stop
+ functioning, NFVI shall continue working as it should.
+
+- Total upgrade duration should be less than 2 hours. Even time constraint is
+ relieved with HA design, the total time for upgrade operation is recommended
+ to limit in 2 hours. The reason is that upgrade might interfere End-User
+ unexpectedly, shorter maintenance window is less possible risk. In this
+ document, upgrade duration is started at the moment that End-User services
+ are possibly impacted to the moment that upgrade is concluded with either
+ commit or rollback. Regarding on the scale and complexity of OPNFV system,
+ this requirements looks challenging, however OPNFV implementations should
+ target this with introducing novel designs and solutions.
Pre-upgrading Environment
=========================
diff --git a/docs/requirements/105-Use_Cases.rst b/docs/requirements/105-Use_Cases.rst
index 9f13110..9183f0b 100644
--- a/docs/requirements/105-Use_Cases.rst
+++ b/docs/requirements/105-Use_Cases.rst
@@ -5,29 +5,6 @@ Use Cases
This section describes the use cases in different system configuration
to verify the requirements of Escalator.
-System Configurations
-=====================
-
-HA configuration
-^^^^^^^^^^^^^^^^
-
-A HA configuration system is very popular in the operator's data centre.
-It is a typical product environment. It is always running 7\*24 with VNFs
-running on it to provide services to the end users.
-
-
-Non-HA configuration
-^^^^^^^^^^^^^^^^^^^^
-
-A non-HA configuration system is normally deployed for experimental or
-development usages, such as a Vagrant/VM environment.
-
-Escalator supports the upgrade system in this configuration, but it may
-not guarantee a smooth upgrade.
-
-Use cases
-=========
-
Use case #1: Smooth upgrade in a HA configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For a system with HA configuration, the operator can use Escalator to
diff --git a/docs/requirements/etc/conf.py b/docs/requirements/etc/conf.py
index 0066035..c933038 100644
--- a/docs/requirements/etc/conf.py
+++ b/docs/requirements/etc/conf.py
@@ -1,6 +1,17 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import datetime
-import sys
-import os
try:
__import__('imp').find_module('sphinx.ext.numfig')
@@ -20,9 +31,7 @@ html_use_index = False
pdf_documents = [('index', u'OPNFV', u'OPNFV Project', u'OPNFV')]
pdf_fit_mode = "shrink"
-pdf_stylesheets = ['sphinx','kerning','a4']
-#latex_domain_indices = False
-#latex_use_modindex = False
+pdf_stylesheets = ['sphinx', 'kerning', 'a4']
latex_elements = {
'printindex': '',