From a55651eb098da2e1aa90c93294a59857711b48c1 Mon Sep 17 00:00:00 2001 From: SerenaFeng Date: Wed, 1 Jun 2016 15:36:17 +0800 Subject: project-ize testAPI JIRA: FUNCTEST-284 Change-Id: I219e934bb11f50de84df2aa0345ecc7885223491 Signed-off-by: SerenaFeng --- utils/test/result_collection_api/README.md | 17 - utils/test/result_collection_api/README.rst | 64 + .../test/result_collection_api/common/__init__.py | 8 - utils/test/result_collection_api/common/config.py | 91 - .../test/result_collection_api/common/constants.py | 15 - utils/test/result_collection_api/config.ini | 12 - .../result_collection_api/dashboard/__init__.py | 8 - .../dashboard/bottlenecks2Dashboard.py | 199 -- .../dashboard/dashboard_utils.py | 78 - .../dashboard/doctor2Dashboard.py | 105 - .../dashboard/functest2Dashboard.py | 472 ---- .../dashboard/promise2Dashboard.py | 103 - .../dashboard/qtip2Dashboard.py | 121 - .../dashboard/vsperf2Dashboard.py | 121 - .../dashboard/yardstick2Dashboard.py | 210 -- utils/test/result_collection_api/etc/config.ini | 12 + .../opnfv_testapi/__init__.py | 0 .../opnfv_testapi/cmd/__init__.py | 0 .../opnfv_testapi/cmd/result_collection_api.py | 112 + .../opnfv_testapi/common/__init__.py | 8 + .../opnfv_testapi/common/config.py | 91 + .../opnfv_testapi/common/constants.py | 15 + .../opnfv_testapi/dashboard/__init__.py | 8 + .../dashboard/bottlenecks2Dashboard.py | 199 ++ .../opnfv_testapi/dashboard/dashboard_utils.py | 78 + .../opnfv_testapi/dashboard/doctor2Dashboard.py | 105 + .../opnfv_testapi/dashboard/functest2Dashboard.py | 472 ++++ .../opnfv_testapi/dashboard/promise2Dashboard.py | 103 + .../opnfv_testapi/dashboard/qtip2Dashboard.py | 121 + .../opnfv_testapi/dashboard/vsperf2Dashboard.py | 121 + .../opnfv_testapi/dashboard/yardstick2Dashboard.py | 210 ++ .../opnfv_testapi/resources/__init__.py | 8 + .../opnfv_testapi/resources/dashboard_handlers.py | 100 + .../opnfv_testapi/resources/handlers.py | 230 ++ .../opnfv_testapi/resources/models.py | 70 + .../opnfv_testapi/resources/pod_handlers.py | 79 + .../opnfv_testapi/resources/pod_models.py | 79 + .../opnfv_testapi/resources/project_handlers.py | 84 + .../opnfv_testapi/resources/project_models.py | 88 + .../opnfv_testapi/resources/result_handlers.py | 162 ++ .../opnfv_testapi/resources/result_models.py | 162 ++ .../opnfv_testapi/resources/testcase_handlers.py | 107 + .../opnfv_testapi/resources/testcase_models.py | 99 + .../opnfv_testapi/tests/__init__.py | 1 + .../opnfv_testapi/tests/unit/__init__.py | 1 + .../opnfv_testapi/tests/unit/fake_pymongo.py | 144 + .../opnfv_testapi/tests/unit/test_base.py | 148 ++ .../opnfv_testapi/tests/unit/test_dashboard.py | 71 + .../opnfv_testapi/tests/unit/test_fake_pymongo.py | 68 + .../opnfv_testapi/tests/unit/test_pod.py | 81 + .../opnfv_testapi/tests/unit/test_project.py | 133 + .../opnfv_testapi/tests/unit/test_result.py | 267 ++ .../opnfv_testapi/tests/unit/test_testcase.py | 183 ++ .../opnfv_testapi/tests/unit/test_version.py | 25 + .../opnfv_testapi/tornado_swagger_ui/LICENSE | 20 + .../opnfv_testapi/tornado_swagger_ui/README | 1 + .../opnfv_testapi/tornado_swagger_ui/README.md | 277 ++ .../opnfv_testapi/tornado_swagger_ui/__init__.py | 4 + .../tornado_swagger_ui/example/basic.py | 219 ++ .../opnfv_testapi/tornado_swagger_ui/setup.py | 30 + .../tornado_swagger_ui/tornado_swagger/__init__.py | 4 + .../tornado_swagger_ui/tornado_swagger/handlers.py | 39 + .../tornado_swagger_ui/tornado_swagger/settings.py | 26 + .../tornado_swagger/static/.gitignore | 1 + .../static/css/highlight.default.css | 135 + .../static/css/hightlight.default.css | 135 + .../tornado_swagger/static/css/screen.css | 1224 +++++++++ .../tornado_swagger/static/endpoint.html | 77 + .../static/images/explorer_icons.png | Bin 0 -> 5763 bytes .../tornado_swagger/static/images/logo_small.png | Bin 0 -> 770 bytes .../static/images/pet_store_api.png | Bin 0 -> 824 bytes .../tornado_swagger/static/images/throbber.gif | Bin 0 -> 9257 bytes .../tornado_swagger/static/images/wordnik_api.png | Bin 0 -> 980 bytes .../tornado_swagger/static/index.html | 85 + .../tornado_swagger/static/lib/backbone-min.js | 38 + .../tornado_swagger/static/lib/handlebars-1.0.0.js | 2278 ++++++++++++++++ .../static/lib/highlight.7.3.pack.js | 1 + .../tornado_swagger/static/lib/jquery-1.8.0.min.js | 2 + .../static/lib/jquery.ba-bbq.min.js | 18 + .../static/lib/jquery.slideto.min.js | 1 + .../static/lib/jquery.wiggle.min.js | 8 + .../tornado_swagger/static/lib/shred.bundle.js | 2765 ++++++++++++++++++++ .../tornado_swagger/static/lib/shred/content.js | 193 ++ .../tornado_swagger/static/lib/swagger-oauth.js | 211 ++ .../tornado_swagger/static/lib/swagger.js | 1417 ++++++++++ .../tornado_swagger/static/lib/underscore-min.js | 32 + .../tornado_swagger/static/o2c.html | 15 + .../tornado_swagger/static/swagger-ui.js | 2247 ++++++++++++++++ .../tornado_swagger/static/swagger-ui.min.js | 1 + .../tornado_swagger_ui/tornado_swagger/swagger.py | 285 ++ .../tornado_swagger_ui/tornado_swagger/views.py | 130 + utils/test/result_collection_api/requirements.txt | 8 + .../result_collection_api/resources/__init__.py | 8 - .../resources/dashboard_handlers.py | 99 - .../result_collection_api/resources/handlers.py | 230 -- .../test/result_collection_api/resources/models.py | 70 - .../resources/pod_handlers.py | 79 - .../result_collection_api/resources/pod_models.py | 79 - .../resources/project_handlers.py | 84 - .../resources/project_models.py | 88 - .../resources/result_handlers.py | 162 -- .../resources/result_models.py | 162 -- .../resources/testcase_handlers.py | 107 - .../resources/testcase_models.py | 99 - .../result_collection_api/result_collection_api.py | 109 - utils/test/result_collection_api/run_test.sh | 4 +- .../samples/sample.json.postman_collection | 1159 -------- utils/test/result_collection_api/setup.cfg | 45 + utils/test/result_collection_api/setup.py | 12 + .../result_collection_api/test-requirements.txt | 7 + utils/test/result_collection_api/tests/__init__.py | 1 - .../result_collection_api/tests/unit/__init__.py | 1 - .../tests/unit/fake_pymongo.py | 144 - .../result_collection_api/tests/unit/test_base.py | 145 - .../tests/unit/test_dashboard.py | 71 - .../tests/unit/test_fake_pymongo.py | 68 - .../result_collection_api/tests/unit/test_pod.py | 81 - .../tests/unit/test_project.py | 132 - .../tests/unit/test_result.py | 267 -- .../tests/unit/test_testcase.py | 183 -- .../tests/unit/test_version.py | 24 - .../tornado_swagger_ui/LICENSE | 20 - .../tornado_swagger_ui/README | 1 - .../tornado_swagger_ui/README.md | 277 -- .../tornado_swagger_ui/__init__.py | 4 - .../tornado_swagger_ui/example/basic.py | 219 -- .../tornado_swagger_ui/setup.py | 30 - .../tornado_swagger_ui/tornado_swagger/__init__.py | 4 - .../tornado_swagger_ui/tornado_swagger/handlers.py | 39 - .../tornado_swagger_ui/tornado_swagger/settings.py | 26 - .../tornado_swagger/static/.gitignore | 1 - .../static/css/highlight.default.css | 135 - .../static/css/hightlight.default.css | 135 - .../tornado_swagger/static/css/screen.css | 1224 --------- .../tornado_swagger/static/endpoint.html | 77 - .../static/images/explorer_icons.png | Bin 5763 -> 0 bytes .../tornado_swagger/static/images/logo_small.png | Bin 770 -> 0 bytes .../static/images/pet_store_api.png | Bin 824 -> 0 bytes .../tornado_swagger/static/images/throbber.gif | Bin 9257 -> 0 bytes .../tornado_swagger/static/images/wordnik_api.png | Bin 980 -> 0 bytes .../tornado_swagger/static/index.html | 85 - .../tornado_swagger/static/lib/backbone-min.js | 38 - .../tornado_swagger/static/lib/handlebars-1.0.0.js | 2278 ---------------- .../static/lib/highlight.7.3.pack.js | 1 - .../tornado_swagger/static/lib/jquery-1.8.0.min.js | 2 - .../static/lib/jquery.ba-bbq.min.js | 18 - .../static/lib/jquery.slideto.min.js | 1 - .../static/lib/jquery.wiggle.min.js | 8 - .../tornado_swagger/static/lib/shred.bundle.js | 2765 -------------------- .../tornado_swagger/static/lib/shred/content.js | 193 -- .../tornado_swagger/static/lib/swagger-oauth.js | 211 -- .../tornado_swagger/static/lib/swagger.js | 1417 ---------- .../tornado_swagger/static/lib/underscore-min.js | 32 - .../tornado_swagger/static/o2c.html | 15 - .../tornado_swagger/static/swagger-ui.js | 2247 ---------------- .../tornado_swagger/static/swagger-ui.min.js | 1 - .../tornado_swagger_ui/tornado_swagger/swagger.py | 285 -- .../tornado_swagger_ui/tornado_swagger/views.py | 130 - 158 files changed, 16102 insertions(+), 17133 deletions(-) delete mode 100644 utils/test/result_collection_api/README.md create mode 100644 utils/test/result_collection_api/README.rst delete mode 100644 utils/test/result_collection_api/common/__init__.py delete mode 100644 utils/test/result_collection_api/common/config.py delete mode 100644 utils/test/result_collection_api/common/constants.py delete mode 100644 utils/test/result_collection_api/config.ini delete mode 100644 utils/test/result_collection_api/dashboard/__init__.py delete mode 100755 utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py delete mode 100644 utils/test/result_collection_api/dashboard/dashboard_utils.py delete mode 100644 utils/test/result_collection_api/dashboard/doctor2Dashboard.py delete mode 100644 utils/test/result_collection_api/dashboard/functest2Dashboard.py delete mode 100644 utils/test/result_collection_api/dashboard/promise2Dashboard.py delete mode 100644 utils/test/result_collection_api/dashboard/qtip2Dashboard.py delete mode 100755 utils/test/result_collection_api/dashboard/vsperf2Dashboard.py delete mode 100644 utils/test/result_collection_api/dashboard/yardstick2Dashboard.py create mode 100644 utils/test/result_collection_api/etc/config.ini create mode 100644 utils/test/result_collection_api/opnfv_testapi/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/cmd/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/cmd/result_collection_api.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/common/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/common/config.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/common/constants.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py create mode 100755 utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py create mode 100755 utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/handlers.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/models.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/pod_handlers.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/pod_models.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/project_models.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/result_models.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/resources/testcase_models.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_base.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_pod.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_testcase.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tests/unit/test_version.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/LICENSE create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README.md create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/example/basic.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/setup.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/__init__.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/handlers.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/settings.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/.gitignore create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/highlight.default.css create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/hightlight.default.css create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/screen.css create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/endpoint.html create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/explorer_icons.png create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/logo_small.png create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/pet_store_api.png create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/throbber.gif create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/wordnik_api.png create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/index.html create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/backbone-min.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/handlebars-1.0.0.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/highlight.7.3.pack.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/jquery-1.8.0.min.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/jquery.ba-bbq.min.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/jquery.slideto.min.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/jquery.wiggle.min.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/shred.bundle.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/shred/content.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/swagger-oauth.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/swagger.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/underscore-min.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/o2c.html create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/swagger-ui.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/swagger-ui.min.js create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/swagger.py create mode 100644 utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/views.py create mode 100644 utils/test/result_collection_api/requirements.txt delete mode 100644 utils/test/result_collection_api/resources/__init__.py delete mode 100644 utils/test/result_collection_api/resources/dashboard_handlers.py delete mode 100644 utils/test/result_collection_api/resources/handlers.py delete mode 100644 utils/test/result_collection_api/resources/models.py delete mode 100644 utils/test/result_collection_api/resources/pod_handlers.py delete mode 100644 utils/test/result_collection_api/resources/pod_models.py delete mode 100644 utils/test/result_collection_api/resources/project_handlers.py delete mode 100644 utils/test/result_collection_api/resources/project_models.py delete mode 100644 utils/test/result_collection_api/resources/result_handlers.py delete mode 100644 utils/test/result_collection_api/resources/result_models.py delete mode 100644 utils/test/result_collection_api/resources/testcase_handlers.py delete mode 100644 utils/test/result_collection_api/resources/testcase_models.py delete mode 100644 utils/test/result_collection_api/result_collection_api.py delete mode 100644 utils/test/result_collection_api/samples/sample.json.postman_collection create mode 100644 utils/test/result_collection_api/setup.cfg create mode 100644 utils/test/result_collection_api/setup.py create mode 100644 utils/test/result_collection_api/test-requirements.txt delete mode 100644 utils/test/result_collection_api/tests/__init__.py delete mode 100644 utils/test/result_collection_api/tests/unit/__init__.py delete mode 100644 utils/test/result_collection_api/tests/unit/fake_pymongo.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_base.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_dashboard.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_fake_pymongo.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_pod.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_project.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_result.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_testcase.py delete mode 100644 utils/test/result_collection_api/tests/unit/test_version.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/LICENSE delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/README delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/README.md delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/__init__.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/example/basic.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/setup.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/__init__.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/handlers.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/settings.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/.gitignore delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/css/highlight.default.css delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/css/hightlight.default.css delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/css/screen.css delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/endpoint.html delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/images/explorer_icons.png delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/images/logo_small.png delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/images/pet_store_api.png delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/images/throbber.gif delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/images/wordnik_api.png delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/index.html delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/backbone-min.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/handlebars-1.0.0.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/highlight.7.3.pack.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/jquery-1.8.0.min.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/jquery.ba-bbq.min.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/jquery.slideto.min.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/jquery.wiggle.min.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/shred.bundle.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/shred/content.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/swagger-oauth.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/swagger.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/lib/underscore-min.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/o2c.html delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/swagger-ui.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/static/swagger-ui.min.js delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/swagger.py delete mode 100644 utils/test/result_collection_api/tornado_swagger_ui/tornado_swagger/views.py (limited to 'utils/test/result_collection_api') diff --git a/utils/test/result_collection_api/README.md b/utils/test/result_collection_api/README.md deleted file mode 100644 index 2798db6e3..000000000 --- a/utils/test/result_collection_api/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# result_collection_api - -## prepare: -Install: - -``` -pip install testtools -pip install discover -pip install futures -``` - -## How to: -run_test.sh: - -``` -bash ./run_test.sh -``` diff --git a/utils/test/result_collection_api/README.rst b/utils/test/result_collection_api/README.rst new file mode 100644 index 000000000..c0075bc76 --- /dev/null +++ b/utils/test/result_collection_api/README.rst @@ -0,0 +1,64 @@ +============= +opnfv-testapi +============= + +Test Results Collector of OPNFV Test Projects + +Start Server +============== + +Getting setup +^^^^^^^^^^^^^ + +Requirements for opnfv-testapi: + +* tornado +* epydoc + +These requirements are expressed in the requirements.txt file and may be +installed by running the following (from within a virtual environment):: + + pip install -r requirements.txt + +How to install +^^^^^^^^^^^^^^ + +From within your environment, just run: + + python setup.py install + +How to run +^^^^^^^^^^ + +From within your environment, just run: + + opnfv-testapi + +This will start a server on port 8000. Just visit http://localhost:8000 + +For swagger website, just visit http://localhost:8000/swagger/spec.html + +Unittest +===================== + +Getting setup +^^^^^^^^^^^^^ + +Requirements for unittest: + +* testtools +* discover +* futures + +These requirements are expressed in the test-requirements.txt file and may be +installed by running the following (from within a virtual environment):: + + pip install -r test-requirements.txt + +How to run +^^^^^^^^^^ + +From within your environment, just run:: + + bash run_test.sh + diff --git a/utils/test/result_collection_api/common/__init__.py b/utils/test/result_collection_api/common/__init__.py deleted file mode 100644 index 05c0c9392..000000000 --- a/utils/test/result_collection_api/common/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Orange -# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## diff --git a/utils/test/result_collection_api/common/config.py b/utils/test/result_collection_api/common/config.py deleted file mode 100644 index 369bdd248..000000000 --- a/utils/test/result_collection_api/common/config.py +++ /dev/null @@ -1,91 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Orange -# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# feng.xiaowei@zte.com.cn remove prepare_put_request 5-30-2016 -############################################################################## - - -from ConfigParser import SafeConfigParser, NoOptionError - - -class ParseError(Exception): - """ - Custom exception class for config file - """ - - def __init__(self, message): - self.msg = message - - def __str__(self): - return 'error parsing config file : %s' % self.msg - - -class APIConfig: - """ - The purpose of this class is to load values correctly from the config file. - Each key is declared as an attribute in __init__() and linked in parse() - """ - - def __init__(self): - self._default_config_location = "config.ini" - self.mongo_url = None - self.mongo_dbname = None - self.api_port = None - self.api_debug_on = None - self._parser = None - - def _get_parameter(self, section, param): - try: - return self._parser.get(section, param) - except NoOptionError: - raise ParseError("[%s.%s] parameter not found" % (section, param)) - - def _get_int_parameter(self, section, param): - try: - return int(self._get_parameter(section, param)) - except ValueError: - raise ParseError("[%s.%s] not an int" % (section, param)) - - def _get_bool_parameter(self, section, param): - result = self._get_parameter(section, param) - if str(result).lower() == 'true': - return True - if str(result).lower() == 'false': - return False - - raise ParseError( - "[%s.%s : %s] not a boolean" % (section, param, result)) - - @staticmethod - def parse(config_location=None): - obj = APIConfig() - - if config_location is None: - config_location = obj._default_config_location - - obj._parser = SafeConfigParser() - obj._parser.read(config_location) - if not obj._parser: - raise ParseError("%s not found" % config_location) - - # Linking attributes to keys from file with their sections - obj.mongo_url = obj._get_parameter("mongo", "url") - obj.mongo_dbname = obj._get_parameter("mongo", "dbname") - - obj.api_port = obj._get_int_parameter("api", "port") - obj.api_debug_on = obj._get_bool_parameter("api", "debug") - - return obj - - def __str__(self): - return "mongo_url = %s \n" \ - "mongo_dbname = %s \n" \ - "api_port = %s \n" \ - "api_debug_on = %s \n" % (self.mongo_url, - self.mongo_dbname, - self.api_port, - self.api_debug_on) diff --git a/utils/test/result_collection_api/common/constants.py b/utils/test/result_collection_api/common/constants.py deleted file mode 100644 index 4d39a142d..000000000 --- a/utils/test/result_collection_api/common/constants.py +++ /dev/null @@ -1,15 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Orange -# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - -DEFAULT_REPRESENTATION = "application/json" -HTTP_BAD_REQUEST = 400 -HTTP_FORBIDDEN = 403 -HTTP_NOT_FOUND = 404 -HTTP_OK = 200 diff --git a/utils/test/result_collection_api/config.ini b/utils/test/result_collection_api/config.ini deleted file mode 100644 index f703cc6c4..000000000 --- a/utils/test/result_collection_api/config.ini +++ /dev/null @@ -1,12 +0,0 @@ -# to add a new parameter in the config file, -# the CONF object in config.ini must be updated -[mongo] -# URL of the mongo DB -# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1 -url = mongodb://127.0.0.1:27017/ -dbname = test_results_collection -[api] -# Listening port -port = 8000 -# With debug_on set to true, error traces will be shown in HTTP responses -debug = True \ No newline at end of file diff --git a/utils/test/result_collection_api/dashboard/__init__.py b/utils/test/result_collection_api/dashboard/__init__.py deleted file mode 100644 index 05c0c9392..000000000 --- a/utils/test/result_collection_api/dashboard/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Orange -# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## diff --git a/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py b/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py deleted file mode 100755 index 2e106bec8..000000000 --- a/utils/test/result_collection_api/dashboard/bottlenecks2Dashboard.py +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python -# -############################################################################## -# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# -# This script is used to build dashboard ready json results -# It may be used for all the test case of the Bottlenecks project -# a new method format__for_dashboard(results) -# v0.1: basic example with methods for Rubbos. -# -import os -import requests -import json - - -def get_bottlenecks_cases(): - """ - get the list of the supported test cases - TODO: update the list when adding a new test case for the dashboard - """ - return ["rubbos", "tu1", "tu3"] - - -def check_bottlenecks_case_exist(case): - """ - check if the testcase exists - if the test case is not defined or not declared in the list - return False - """ - bottlenecks_cases = get_bottlenecks_cases() - - if case is None or case not in bottlenecks_cases: - return False - else: - return True - - -def format_bottlenecks_for_dashboard(case, results): - """ - generic method calling the method corresponding to the test case - check that the testcase is properly declared first - then build the call to the specific method - """ - if check_bottlenecks_case_exist(case): - cmd = "format_" + case + "_for_dashboard(results)" - res = eval(cmd) - else: - res = [] - print "Test cases not declared" - return res - - -def format_rubbos_for_dashboard(results): - """ - Post processing for the Rubbos test case - """ - test_data = [{'description': 'Rubbos results'}] - - # Graph 1:Rubbos maximal throughput - # ******************************** - #new_element = [] - #for each_result in results: - # throughput_data = [record['throughput'] for record in each_result['details']] - # new_element.append({'x': each_result['start_date'], - # 'y': max(throughput_data)}) - - #test_data.append({'name': "Rubbos max throughput", - # 'info': {'type': "graph", - # 'xlabel': 'time', - # 'ylabel': 'maximal throughput'}, - # 'data_set': new_element}) - - # Graph 2: Rubbos last record - # ******************************** - new_element = [] - latest_result = results[-1]["details"] - for data in latest_result: - client_num = int(data["client"]) - throughput = int(data["throughput"]) - new_element.append({'x': client_num, - 'y': throughput}) - test_data.append({'name': "Rubbos throughput vs client number", - 'info': {'type': "graph", - 'xlabel': 'client number', - 'ylabel': 'throughput'}, - 'data_set': new_element}) - - return test_data - - -def format_tu1_for_dashboard(results): - test_data = [{'description': 'Tu-1 performance result'}] - line_element = [] - bar_element = {} - last_result = results[-1]["details"] - for key in sorted(last_result): - bandwith = last_result[key]["Bandwidth"] - pktsize = int(key) - line_element.append({'x': pktsize, - 'y': bandwith * 1000}) - bar_element[key] = bandwith * 1000 - # graph1, line - test_data.append({'name': "VM2VM max single directional throughput", - 'info': {'type': "graph", - 'xlabel': 'pktsize', - 'ylabel': 'bandwith(kpps)'}, - 'data_set': line_element}) - # graph2, bar - test_data.append({'name': "VM2VM max single directional throughput", - 'info': {"type": "bar"}, - 'data_set': bar_element}) - return test_data - - -def format_tu3_for_dashboard(results): - test_data = [{'description': 'Tu-3 performance result'}] - new_element = [] - bar_element = {} - last_result = results[-1]["details"] - for key in sorted(last_result): - bandwith = last_result[key]["Bandwidth"] - pktsize = int(key) - new_element.append({'x': pktsize, - 'y': bandwith * 1000}) - bar_element[key] = bandwith * 1000 - # graph1, line - test_data.append({'name': "VM2VM max bidirectional throughput", - 'info': {'type': "graph", - 'xlabel': 'pktsize', - 'ylabel': 'bandwith(kpps)'}, - 'data_set': new_element}) - # graph2, bar - test_data.append({'name': "VM2VM max single directional throughput", - 'info': {"type": "bar"}, - 'data_set': bar_element}) - return test_data - - -############################ For local test ################################ - -def _read_sample_output(filename): - curr_path = os.path.dirname(os.path.abspath(__file__)) - output = os.path.join(curr_path, filename) - with open(output) as f: - sample_output = f.read() - - result = json.loads(sample_output) - return result - - -# Copy form functest/testcases/Dashboard/dashboard_utils.py -# and did some minor modification for local test. -def _get_results(db_url, test_criteria): - test_project = test_criteria["project"] - testcase = test_criteria["testcase"] - - # Build headers - headers = {'Content-Type': 'application/json'} - - # build the request - # if criteria is all => remove criteria - url = db_url + "/results?project=" + test_project + "&case=" + testcase - - # Send Request to Test DB - myData = requests.get(url, headers=headers) - - # Get result as a json object - myNewData = json.loads(myData.text) - - # Get results - myDataResults = myNewData['test_results'] - return myDataResults - -#only for local test -def _test(): - db_url = "http://testresults.opnfv.org/testapi" - results = _get_results(db_url, {"project": "bottlenecks", "testcase": "rubbos"}) - test_result = format_rubbos_for_dashboard(results) - print json.dumps(test_result, indent=4) - - results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu1"}) - #results = _read_sample_output("sample") - #print json.dumps(results, indent=4) - test_result = format_tu1_for_dashboard(results) - print json.dumps(test_result, indent=4) - results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu3"}) - test_result = format_tu3_for_dashboard(results) - print json.dumps(test_result, indent=4) - - -if __name__ == '__main__': - _test() - diff --git a/utils/test/result_collection_api/dashboard/dashboard_utils.py b/utils/test/result_collection_api/dashboard/dashboard_utils.py deleted file mode 100644 index 9de5b191c..000000000 --- a/utils/test/result_collection_api/dashboard/dashboard_utils.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2015 Orange -# morgan.richomme@orange.com -# -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# This script is used to retieve data from test DB -# and format them into a json format adapted for a dashboard -# -# v0.1: basic example -# -import os -import re -import sys -from functest2Dashboard import format_functest_for_dashboard, \ - check_functest_case_exist -from yardstick2Dashboard import format_yardstick_for_dashboard, \ - check_yardstick_case_exist -from vsperf2Dashboard import format_vsperf_for_dashboard, \ - check_vsperf_case_exist -from bottlenecks2Dashboard import format_bottlenecks_for_dashboard, \ - check_bottlenecks_case_exist -from qtip2Dashboard import format_qtip_for_dashboard, \ - check_qtip_case_exist -from promise2Dashboard import format_promise_for_dashboard, \ - check_promise_case_exist -from doctor2Dashboard import format_doctor_for_dashboard, \ - check_doctor_case_exist - -# any project test project wishing to provide dashboard ready values -# must include at least 2 methods -# - format__for_dashboard -# - check__case_exist - - -def check_dashboard_ready_project(test_project): - # Check that the first param corresponds to a project - # for whoch dashboard processing is available - # print("test_project: %s" % test_project) - project_module = 'dashboard.'+test_project + '2Dashboard' - return True if project_module in sys.modules else False - - -def check_dashboard_ready_case(project, case): - cmd = "check_" + project + "_case_exist(case)" - return eval(cmd) - - -def get_dashboard_cases(): - # Retrieve all the test cases that could provide - # Dashboard ready graphs - # look in the releng repo - # search all the project2Dashboard.py files - # we assume that dashboard processing of project - # is performed in the 2Dashboard.py file - modules = [] - cp = re.compile('dashboard.*2Dashboard') - for module in sys.modules: - if re.match(cp, module): - modules.append(module) - - return modules - - -def get_dashboard_result(project, case, results=None): - # get the dashboard ready results - # paramters are: - # project: project name - # results: array of raw results pre-filterded - # according to the parameters of the request - cmd = "format_" + project + "_for_dashboard(case,results)" - res = eval(cmd) - return res diff --git a/utils/test/result_collection_api/dashboard/doctor2Dashboard.py b/utils/test/result_collection_api/dashboard/doctor2Dashboard.py deleted file mode 100644 index 38b23abb4..000000000 --- a/utils/test/result_collection_api/dashboard/doctor2Dashboard.py +++ /dev/null @@ -1,105 +0,0 @@ - #!/usr/bin/python -# -# Copyright (c) 2015 Orange -# morgan.richomme@orange.com -# -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# This script is used to build dashboard ready json results -# It may be used for all the test case of the Doctor project -# a new method format__for_dashboard(results) -# -import re -import datetime - - -def get_doctor_cases(): - """ - get the list of the supported test cases - TODO: update the list when adding a new test case for the dashboard - """ - return ["doctor-notification","doctor-mark-down"] - - -def format_doctor_for_dashboard(case, results): - """ - generic method calling the method corresponding to the test case - check that the testcase is properly declared first - then build the call to the specific method - """ - - if check_doctor_case_exist(case): - # note we add _case because testcase and project had the same name - # TODO refactoring...looks fine at the beginning wit only 1 project - # not very ugly now and clearly not optimized... - cmd = "format_" + case.replace('-','_') + "_case_for_dashboard(results)" - res = eval(cmd) - else: - res = [] - return res - - -def check_doctor_case_exist(case): - """ - check if the testcase exists - if the test case is not defined or not declared in the list - return False - """ - doctor_cases = get_doctor_cases() - - if (case is None or case not in doctor_cases): - return False - else: - return True - - -def format_doctor_mark_down_case_for_dashboard(results): - """ - Post processing for the doctor test case - """ - test_data = [{'description': 'doctor-mark-down results for Dashboard'}] - return test_data - - -def format_doctor_notification_case_for_dashboard(results): - """ - Post processing for the doctor-notification test case - """ - test_data = [{'description': 'doctor results for Dashboard'}] - # Graph 1: (duration)=f(time) - # *************************************** - new_element = [] - - # default duration 0:00:08.999904 - # consider only seconds => 09 - for data in results: - t = data['details']['duration'] - new_element.append({'x': data['start_date'], - 'y': t}) - - test_data.append({'name': "doctor-notification duration ", - 'info': {'type': "graph", - 'xlabel': 'time (s)', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: bar - # ************ - nbTest = 0 - nbTestOk = 0 - - for data in results: - nbTest += 1 - if data['details']['status'] == "OK": - nbTestOk += 1 - - test_data.append({'name': "doctor-notification status", - 'info': {"type": "bar"}, - 'data_set': [{'Nb tests': nbTest, - 'Nb Success': nbTestOk}]}) - - return test_data diff --git a/utils/test/result_collection_api/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/dashboard/functest2Dashboard.py deleted file mode 100644 index 86521b984..000000000 --- a/utils/test/result_collection_api/dashboard/functest2Dashboard.py +++ /dev/null @@ -1,472 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2015 Orange -# morgan.richomme@orange.com -# -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# This script is used to build dashboard ready json results -# It may be used for all the test case of the Functest project -# a new method format__for_dashboard(results) -# v0.1: basic example with methods for odl, Tempest, Rally and vPing -# -import datetime -import re - - -def get_functest_cases(): - """ - get the list of the supported test cases - TODO: update the list when adding a new test case for the dashboard - """ - return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL", - "ONOS", "Rally"] - - -def format_functest_for_dashboard(case, results): - """ - generic method calling the method corresponding to the test case - check that the testcase is properly declared first - then build the call to the specific method - """ - if check_functest_case_exist(case): - cmd = "format_" + case + "_for_dashboard(results)" - res = eval(cmd) - else: - res = [] - print "Test cases not declared" - return res - - -def check_functest_case_exist(case): - """ - check if the testcase exists - if the test case is not defined or not declared in the list - return False - """ - functest_cases = get_functest_cases() - - if (case is None or case not in functest_cases): - return False - else: - return True - - -def format_status_for_dashboard(results): - test_data = [{'description': 'Functest status'}] - - # define magic equation for the status.... - # 5 suites: vPing, odl, Tempest, vIMS, Rally - # Which overall KPI make sense... - - # TODO to be done and discussed - testcases = get_functest_cases() - test_data.append({'nb test suite(s) run': len(testcases)-1}) - test_data.append({'vPing': '100%'}) - test_data.append({'VIM status': '82%'}) - test_data.append({'SDN Controllers': {'odl': '92%', - 'onos': '95%', - 'ocl': '93%'}}) - test_data.append({'VNF deployment': '95%'}) - - return test_data - - -def format_vIMS_for_dashboard(results): - """ - Post processing for the vIMS test case - """ - test_data = [{'description': 'vIMS results for Dashboard'}] - - # Graph 1: (duration_deployment_orchestrator, - # duration_deployment_vnf, - # duration_test) = f(time) - # ******************************** - new_element = [] - - for data in results: - new_element.append({'x': data['start_date'], - 'y1': data['details']['orchestrator']['duration'], - 'y2': data['details']['vIMS']['duration'], - 'y3': data['details']['sig_test']['duration']}) - - test_data.append({'name': "vIMS orchestrator/VNF/test duration", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'orchestation deployment duration', - 'y2label': 'vIMS deployment duration', - 'y3label': 'vIMS test duration'}, - 'data_set': new_element}) - - # Graph 2: (Nb test, nb failure, nb skipped)=f(time) - # ************************************************** - new_element = [] - - for data in results: - # Retrieve all the tests - nbTests = 0 - nbFailures = 0 - nbSkipped = 0 - vIMS_test = data['details']['sig_test']['result'] - - for data_test in vIMS_test: - # Calculate nb of tests run and nb of tests failed - # vIMS_results = get_vIMSresults(vIMS_test) - # print vIMS_results - try: - if data_test['result'] == "Passed": - nbTests += 1 - elif data_test['result'] == "Failed": - nbFailures += 1 - elif data_test['result'] == "Skipped": - nbSkipped += 1 - except: - nbTests = 0 - - new_element.append({'x': data['start_date'], - 'y1': nbTests, - 'y2': nbFailures, - 'y3': nbSkipped}) - - test_data.append({'name': "vIMS nb tests passed/failed/skipped", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Number of tests passed', - 'y2label': 'Number of tests failed', - 'y3label': 'Number of tests skipped'}, - 'data_set': new_element}) - - # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed) - # ******************************************************** - nbTests = 0 - nbFailures = 0 - - for data in results: - vIMS_test = data['details']['sig_test']['result'] - - for data_test in vIMS_test: - nbTestsOK = 0 - nbTestsKO = 0 - - try: - if data_test['result'] == "Passed": - nbTestsOK += 1 - elif data_test['result'] == "Failed": - nbTestsKO += 1 - except: - nbTestsOK = 0 - - nbTests += nbTestsOK + nbTestsKO - nbFailures += nbTestsKO - - test_data.append({'name': "Total number of tests run/failure tests", - 'info': {"type": "bar"}, - 'data_set': [{'Run': nbTests, - 'Failed': nbFailures}]}) - - return test_data - - -def format_Tempest_for_dashboard(results): - """ - Post processing for the Tempest test case - """ - test_data = [{'description': 'Tempest results for Dashboard'}] - - # Graph 1: Test_Duration = f(time) - # ******************************** - new_element = [] - for data in results: - new_element.append({'x': data['start_date'], - 'y': data['details']['duration']}) - - test_data.append({'name': "Tempest duration", - 'info': {'type': "graph", - 'xlabel': 'time', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: (Nb test, nb failure)=f(time) - # *************************************** - new_element = [] - for data in results: - new_element.append({'x': data['start_date'], - 'y1': data['details']['tests'], - 'y2': data['details']['failures']}) - - test_data.append({'name': "Tempest nb tests/nb failures", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Number of tests', - 'y2label': 'Number of failures'}, - 'data_set': new_element}) - - # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed) - # ******************************************************** - nbTests = 0 - nbFailures = 0 - - for data in results: - nbTests += data['details']['tests'] - nbFailures += data['details']['failures'] - - test_data.append({'name': "Total number of tests run/failure tests", - 'info': {"type": "bar"}, - 'data_set': [{'Run': nbTests, - 'Failed': nbFailures}]}) - - # Graph 4: (Success rate)=f(time) - # *************************************** - new_element = [] - for data in results: - try: - diff = (int(data['details']['tests']) - int(data['details']['failures'])) - success_rate = 100*diff/int(data['details']['tests']) - except: - success_rate = 0 - - new_element.append({'x': data['start_date'], - 'y1': success_rate}) - - test_data.append({'name': "Tempest success rate", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Success rate'}, - 'data_set': new_element}) - - return test_data - - -def format_ODL_for_dashboard(results): - """ - Post processing for the ODL test case - """ - test_data = [{'description': 'ODL results for Dashboard'}] - - # Graph 1: (Nb test, nb failure)=f(time) - # *************************************** - new_element = [] - - for data in results: - odl_results = data['details']['details'] - nbFailures = 0 - for odl in odl_results: - if (odl['test_status']['@status'] == "FAIL"): - nbFailures += 1 - new_element.append({'x': data['start_date'], - 'y1': len(odl_results), - 'y2': nbFailures}) - - test_data.append({'name': "ODL nb tests/nb failures", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Number of tests', - 'y2label': 'Number of failures'}, - 'data_set': new_element}) - return test_data - - -def format_ONOS_for_dashboard(results): - """ - Post processing for the odl test case - """ - test_data = [{'description': 'ONOS results for Dashboard'}] - # Graph 1: (duration FUNCvirtNet)=f(time) - # *************************************** - new_element = [] - - # default duration 0:00:08.999904 - # consider only seconds => 09 - for data in results: - t = data['details']['FUNCvirNet']['duration'] - h, m, s = re.split(':', t) - s = round(float(s)) - new_duration = int(datetime.timedelta(hours=int(h), - minutes=int(m), - seconds=int(s)).total_seconds()) - new_element.append({'x': data['start_date'], - 'y': new_duration}) - - test_data.append({'name': "ONOS FUNCvirNet duration ", - 'info': {'type': "graph", - 'xlabel': 'time (s)', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: (Nb test, nb failure)FuncvirtNet=f(time) - # *************************************** - new_element = [] - - for data in results: - onos_results = data['details']['FUNCvirNet']['status'] - nbFailures = 0 - for onos in onos_results: - if (onos['Case result'] == "FAIL"): - nbFailures += 1 - new_element.append({'x': data['start_date'], - 'y1': len(onos_results), - 'y2': nbFailures}) - - test_data.append({'name': "ONOS FUNCvirNet nb tests/nb failures", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Number of tests', - 'y2label': 'Number of failures'}, - 'data_set': new_element}) - - # Graph 3: (duration FUNCvirtNetL3)=f(time) - # *************************************** - new_element = [] - - # default duration 0:00:08.999904 - # consider only seconds => 09 - for data in results: - t = data['details']['FUNCvirNetL3']['duration'] - h, m, s = re.split(':', t) - s = round(float(s)) - new_duration = int(datetime.timedelta(hours=int(h), - minutes=int(m), - seconds=int(s)).total_seconds()) - new_element.append({'x': data['start_date'], - 'y': new_duration}) - - test_data.append({'name': "ONOS FUNCvirNetL3 duration", - 'info': {'type': "graph", - 'xlabel': 'time (s)', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 4: (Nb test, nb failure)FuncvirtNetL3=f(time) - # *************************************** - new_element = [] - - for data in results: - onos_results = data['details']['FUNCvirNetL3']['status'] - nbFailures = 0 - for onos in onos_results: - if (onos['Case result'] == "FAIL"): - nbFailures += 1 - new_element.append({'x': data['start_date'], - 'y1': len(onos_results), - 'y2': nbFailures}) - - test_data.append({'name': "ONOS FUNCvirNetL3 nb tests/nb failures", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Number of tests', - 'y2label': 'Number of failures'}, - 'data_set': new_element}) - return test_data - - -def format_Rally_for_dashboard(results): - """ - Post processing for the Rally test case - """ - test_data = [{'description': 'Rally results for Dashboard'}] - # Graph 1: Test_Duration = f(time) - # ******************************** - new_element = [] - for data in results: - summary_cursor = len(data['details']) - 1 - new_element.append({'x': data['start_date'], - 'y': int(data['details'][summary_cursor]['summary']['duration'])}) - - test_data.append({'name': "rally duration", - 'info': {'type': "graph", - 'xlabel': 'time', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: Success rate = f(time) - # ******************************** - new_element = [] - for data in results: - new_element.append({'x': data['start_date'], - 'y': float(data['details'][summary_cursor]['summary']['nb success'])}) - - test_data.append({'name': "rally success rate", - 'info': {'type': "graph", - 'xlabel': 'time', - 'ylabel': 'success rate (%)'}, - 'data_set': new_element}) - - return test_data - - -def format_vPing_for_dashboard(results): - """ - Post processing for the vPing test case - """ - test_data = [{'description': 'vPing results for Dashboard'}] - - # Graph 1: Test_Duration = f(time) - # ******************************** - new_element = [] - for data in results: - new_element.append({'x': data['start_date'], - 'y': data['details']['duration']}) - - test_data.append({'name': "vPing duration", - 'info': {'type': "graph", - 'xlabel': 'time', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: bar - # ************ - nbTest = 0 - nbTestOk = 0 - - for data in results: - nbTest += 1 - if data['details']['status'] == "OK": - nbTestOk += 1 - - test_data.append({'name': "vPing status", - 'info': {"type": "bar"}, - 'data_set': [{'Nb tests': nbTest, - 'Nb Success': nbTestOk}]}) - - return test_data - - -def format_vPing_userdata_for_dashboard(results): - """ - Post processing for the vPing_userdata test case - """ - test_data = [{'description': 'vPing_userdata results for Dashboard'}] - - # Graph 1: Test_Duration = f(time) - # ******************************** - new_element = [] - for data in results: - new_element.append({'x': data['start_date'], - 'y': data['details']['duration']}) - - test_data.append({'name': "vPing_userdata duration", - 'info': {'type': "graph", - 'xlabel': 'time', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: bar - # ************ - nbTest = 0 - nbTestOk = 0 - - for data in results: - nbTest += 1 - if data['details']['status'] == "OK": - nbTestOk += 1 - - test_data.append({'name': "vPing_userdata status", - 'info': {"type": "bar"}, - 'data_set': [{'Nb tests': nbTest, - 'Nb Success': nbTestOk}]}) - - return test_data diff --git a/utils/test/result_collection_api/dashboard/promise2Dashboard.py b/utils/test/result_collection_api/dashboard/promise2Dashboard.py deleted file mode 100644 index 84f43a7d1..000000000 --- a/utils/test/result_collection_api/dashboard/promise2Dashboard.py +++ /dev/null @@ -1,103 +0,0 @@ - #!/usr/bin/python -# -# Copyright (c) 2015 Orange -# morgan.richomme@orange.com -# -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# This script is used to build dashboard ready json results -# It may be used for all the test case of the Promise project -# a new method format__for_dashboard(results) -# v0.1: basic example with methods for odl, Tempest, Rally and vPing -# -import re -import datetime - - -def get_promise_cases(): - """ - get the list of the supported test cases - TODO: update the list when adding a new test case for the dashboard - """ - return ["promise"] - - -def format_promise_for_dashboard(case, results): - """ - generic method calling the method corresponding to the test case - check that the testcase is properly declared first - then build the call to the specific method - """ - if check_promise_case_exist(case): - # note we add _case because testcase and project had the same name - # TODO refactoring...looks fine at the beginning wit only 1 project - # not very ugly now and clearly not optimized... - cmd = "format_" + case + "_case_for_dashboard(results)" - res = eval(cmd) - else: - res = [] - print "Test cases not declared" - return res - - -def check_promise_case_exist(case): - """ - check if the testcase exists - if the test case is not defined or not declared in the list - return False - """ - promise_cases = get_promise_cases() - - if (case is None or case not in promise_cases): - return False - else: - return True - - - - - -def format_promise_case_for_dashboard(results): - """ - Post processing for the promise test case - """ - test_data = [{'description': 'Promise results for Dashboard'}] - # Graph 1: (duration)=f(time) - # *************************************** - new_element = [] - - # default duration 0:00:08.999904 - # consider only seconds => 09 - for data in results: - t = data['details']['duration'] - new_element.append({'x': data['creation_date'], - 'y': t}) - - test_data.append({'name': "Promise duration ", - 'info': {'type': "graph", - 'xlabel': 'time (s)', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: (Nb test, nb failure)=f(time) - # *************************************** - new_element = [] - - for data in results: - promise_results = data['details'] - new_element.append({'x': data['creation_date'], - 'y1': promise_results['tests'], - 'y2': promise_results['failures']}) - - test_data.append({'name': "Promise nb tests/nb failures", - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Number of tests', - 'y2label': 'Number of failures'}, - 'data_set': new_element}) - - return test_data diff --git a/utils/test/result_collection_api/dashboard/qtip2Dashboard.py b/utils/test/result_collection_api/dashboard/qtip2Dashboard.py deleted file mode 100644 index 6ceccd374..000000000 --- a/utils/test/result_collection_api/dashboard/qtip2Dashboard.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python - -############################################################################## -# Copyright (c) 2015 Dell Inc and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - - -def get_qtip_cases(): - """ - get the list of the supported test cases - TODO: update the list when adding a new test case for the dashboard - """ - return ["compute_test_suite","storage_test_suite","network_test_suite"] - -def check_qtip_case_exist(case): - """ - check if the testcase exists - if the test case is not defined or not declared in the list - return False - """ - qtip_cases = get_qtip_cases() - if (case is None or case not in qtip_cases): - return False - else: - return True - -def format_qtip_for_dashboard(case, results): - """ - generic method calling the method corresponding to the test case - check that the testcase is properly declared first - then build the call to the specific method - """ - if check_qtip_case_exist(case): - res = format_common_for_dashboard(case, results) - else: - res = [] - print "Test cases not declared" - return res - -def format_common_for_dashboard(case, results): - """ - Common post processing - """ - test_data_description = case + " results for Dashboard" - test_data = [{'description': test_data_description}] - - graph_name = '' - if "network_test_suite" in case: - graph_name = "Throughput index" - else: - graph_name = "Index" - - # Graph 1: - # ******************************** - new_element = [] - for date, index in results: - new_element.append({'x': date, - 'y1': index, - }) - - test_data.append({'name': graph_name, - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'Index Number'}, - 'data_set': new_element}) - - return test_data - - -############################ For local test ################################ -import os -import requests -import json -from collections import defaultdict - -def _get_results(db_url, testcase): - - testproject = testcase["project"] - testcase = testcase["testcase"] - resultarray = defaultdict() - #header - header = {'Content-Type': 'application/json'} - #url - url = db_url + "/results?project="+testproject+"&case="+testcase - data = requests.get(url,header) - datajson = data.json() - for x in range(0, len(datajson['test_results'])): - - rawresults = datajson['test_results'][x]['details'] - index = rawresults['index'] - resultarray[str(datajson['test_results'][x]['start_date'])]=index - - return resultarray - -def _test(): - - db_url = "http://testresults.opnfv.org/testapi" - raw_result = defaultdict() - - raw_result = _get_results(db_url, {"project": "qtip", "testcase": "compute_test_suite"}) - resultitems= raw_result.items() - result = format_qtip_for_dashboard("compute_test_suite", resultitems) - print result - - raw_result = _get_results(db_url, {"project": "qtip", "testcase": "storage_test_suite"}) - resultitems= raw_result.items() - result = format_qtip_for_dashboard("storage_test_suite", resultitems) - print result - - raw_result = _get_results(db_url, {"project": "qtip", "testcase": "network_test_suite"}) - resultitems= raw_result.items() - result = format_qtip_for_dashboard("network_test_suite", resultitems) - print result - -if __name__ == '__main__': - _test() diff --git a/utils/test/result_collection_api/dashboard/vsperf2Dashboard.py b/utils/test/result_collection_api/dashboard/vsperf2Dashboard.py deleted file mode 100755 index 5a6882da4..000000000 --- a/utils/test/result_collection_api/dashboard/vsperf2Dashboard.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python - -# Copyright 2015 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"), -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -def get_vsperf_cases(): - """ - get the list of the supported test cases - TODO: update the list when adding a new test case for the dashboard - """ - return ["tput_ovsdpdk", "tput_ovs", - "b2b_ovsdpdk", "b2b_ovs", - "tput_mod_vlan_ovsdpdk", "tput_mod_vlan_ovs", - "cont_ovsdpdk", "cont_ovs", - "pvp_cont_ovsdpdkuser", "pvp_cont_ovsdpdkcuse", "pvp_cont_ovsvirtio", - "pvvp_cont_ovsdpdkuser", "pvvp_cont_ovsdpdkcuse", "pvvp_cont_ovsvirtio", - "scalability_ovsdpdk", "scalability_ovs", - "pvp_tput_ovsdpdkuser", "pvp_tput_ovsdpdkcuse", "pvp_tput_ovsvirtio", - "pvp_b2b_ovsdpdkuser", "pvp_b2b_ovsdpdkcuse", "pvp_b2b_ovsvirtio", - "pvvp_tput_ovsdpdkuser", "pvvp_tput_ovsdpdkcuse", "pvvp_tput_ovsvirtio", - "pvvp_b2b_ovsdpdkuser", "pvvp_b2b_ovsdpdkcuse", "pvvp_b2b_ovsvirtio", - "cpu_load_ovsdpdk", "cpu_load_ovs", - "mem_load_ovsdpdk", "mem_load_ovs"] - - -def check_vsperf_case_exist(case): - """ - check if the testcase exists - if the test case is not defined or not declared in the list - return False - """ - vsperf_cases = get_vsperf_cases() - - if (case is None or case not in vsperf_cases): - return False - else: - return True - - -def format_vsperf_for_dashboard(case, results): - """ - generic method calling the method corresponding to the test case - check that the testcase is properly declared first - then build the call to the specific method - """ - if check_vsperf_case_exist(case): - res = format_common_for_dashboard(case, results) - else: - res = [] - print "Test cases not declared" - return res - - -def format_common_for_dashboard(case, results): - """ - Common post processing - """ - test_data_description = case + " results for Dashboard" - test_data = [{'description': test_data_description}] - - graph_name = '' - if "b2b" in case: - graph_name = "B2B frames" - else: - graph_name = "Rx frames per second" - - # Graph 1: Rx fps = f(time) - # ******************************** - new_element = [] - for data in results: - new_element.append({'x': data['start_date'], - 'y1': data['details']['64'], - 'y2': data['details']['128'], - 'y3': data['details']['512'], - 'y4': data['details']['1024'], - 'y5': data['details']['1518']}) - - test_data.append({'name': graph_name, - 'info': {'type': "graph", - 'xlabel': 'time', - 'y1label': 'frame size 64B', - 'y2label': 'frame size 128B', - 'y3label': 'frame size 512B', - 'y4label': 'frame size 1024B', - 'y5label': 'frame size 1518B'}, - 'data_set': new_element}) - - return test_data - - - - -############################ For local test ################################ -import os - -def _test(): - ans = [{'start_date': '2015-09-12', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '26.804', '1024': '1097.284', '512': '178.137', '1518': '12635.860', '128': '100.564'}}, - {'start_date': '2015-09-33', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '16.804', '1024': '1087.284', '512': '168.137', '1518': '12625.860', '128': '99.564'}}] - - result = format_vsperf_for_dashboard("pvp_cont_ovsdpdkcuse", ans) - print result - - result = format_vsperf_for_dashboard("b2b_ovsdpdk", ans) - print result - - result = format_vsperf_for_dashboard("non_existing", ans) - print result - -if __name__ == '__main__': - _test() diff --git a/utils/test/result_collection_api/dashboard/yardstick2Dashboard.py b/utils/test/result_collection_api/dashboard/yardstick2Dashboard.py deleted file mode 100644 index 4f022d5b9..000000000 --- a/utils/test/result_collection_api/dashboard/yardstick2Dashboard.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/python -# -############################################################################## -# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# -# This script is used to build dashboard ready json results -# It may be used for all the test case of the Yardstick project -# a new method format__for_dashboard(results) -# v0.1: basic example with methods for Ping, Iperf, Netperf, Pktgen, -# Fio, Lmbench, Perf, Cyclictest. -# - - -def get_yardstick_cases(): - """ - get the list of the supported test cases - TODO: update the list when adding a new test case for the dashboard - """ - return ["Ping", "Iperf", "Netperf", "Pktgen", "Fio", "Lmbench", - "Perf", "Cyclictest"] - - -def format_yardstick_for_dashboard(case, results): - """ - generic method calling the method corresponding to the test case - check that the testcase is properly declared first - then build the call to the specific method - """ - if check_yardstick_case_exist(case): - cmd = "format_" + case + "_for_dashboard(results)" - res = eval(cmd) - else: - res = [] - print "Test cases not declared" - return res - - -def check_yardstick_case_exist(case): - """ - check if the testcase exists - if the test case is not defined or not declared in the list - return False - """ - yardstick_cases = get_yardstick_cases() - - if (case is None or case not in yardstick_cases): - return False - else: - return True - - -def _get_test_status_bar(results): - nbTest = 0 - nbTestOk = 0 - - for data in results: - nbTest += 1 - records = [record for record in data['details'] - if "benchmark" in record - and record["benchmark"]["errors"] != ""] - if len(records) == 0: - nbTestOk += 1 - return nbTest, nbTestOk - - -def format_Ping_for_dashboard(results): - """ - Post processing for the Ping test case - """ - test_data = [{'description': 'Ping results for Dashboard'}] - - # Graph 1: Test_Duration = f(time) - # ******************************** - new_element = [] - for data in results: - records = [record["benchmark"]["data"]["rtt"] - for record in data['details'] - if "benchmark" in record] - - avg_rtt = sum(records) / len(records) - new_element.append({'x': data['start_date'], - 'y': avg_rtt}) - - test_data.append({'name': "ping duration", - 'info': {'type': "graph", - 'xlabel': 'time', - 'ylabel': 'duration (s)'}, - 'data_set': new_element}) - - # Graph 2: bar - # ************ - nbTest, nbTestOk = _get_test_status_bar(results) - - test_data.append({'name': "ping status", - 'info': {"type": "bar"}, - 'data_set': [{'Nb tests': nbTest, - 'Nb Success': nbTestOk}]}) - - return test_data - - -def format_iperf_for_dashboard(results): - """ - Post processing for the Iperf test case - """ - test_data = [{'description': 'Iperf results for Dashboard'}] - return test_data - - -def format_netperf_for_dashboard(results): - """ - Post processing for the Netperf test case - """ - test_data = [{'description': 'Netperf results for Dashboard'}] - return test_data - - -def format_pktgen_for_dashboard(results): - """ - Post processing for the Pktgen test case - """ - test_data = [{'description': 'Pktgen results for Dashboard'}] - return test_data - - -def format_fio_for_dashboard(results): - """ - Post processing for the Fio test case - """ - test_data = [{'description': 'Fio results for Dashboard'}] - return test_data - - -def format_lmbench_for_dashboard(results): - """ - Post processing for the Lmbench test case - """ - test_data = [{'description': 'Lmbench results for Dashboard'}] - return test_data - - -def format_perf_for_dashboard(results): - """ - Post processing for the Perf test case - """ - test_data = [{'description': 'Perf results for Dashboard'}] - return test_data - - -def format_cyclictest_for_dashboard(results): - """ - Post processing for the Cyclictest test case - """ - test_data = [{'description': 'Cyclictest results for Dashboard'}] - return test_data - - -############################ For local test ################################ -import json -import os -import requests - -def _read_sample_output(filename): - curr_path = os.path.dirname(os.path.abspath(__file__)) - output = os.path.join(curr_path, filename) - with open(output) as f: - sample_output = f.read() - - result = json.loads(sample_output) - return result - -# Copy form functest/testcases/Dashboard/dashboard_utils.py -# and did some minor modification for local test. -def _get_results(db_url, test_criteria): - - test_project = test_criteria["project"] - testcase = test_criteria["testcase"] - - # Build headers - headers = {'Content-Type': 'application/json'} - - # build the request - # if criteria is all => remove criteria - url = db_url + "/results?project=" + test_project + "&case=" + testcase - - # Send Request to Test DB - myData = requests.get(url, headers=headers) - - # Get result as a json object - myNewData = json.loads(myData.text) - - # Get results - myDataResults = myNewData['test_results'] - - return myDataResults - -def _test(): - db_url = "http://213.77.62.197" - result = _get_results(db_url, - {"project": "yardstick", "testcase": "Ping"}) - print format_ping_for_dashboard(result) - -if __name__ == '__main__': - _test() diff --git a/utils/test/result_collection_api/etc/config.ini b/utils/test/result_collection_api/etc/config.ini new file mode 100644 index 000000000..f703cc6c4 --- /dev/null +++ b/utils/test/result_collection_api/etc/config.ini @@ -0,0 +1,12 @@ +# to add a new parameter in the config file, +# the CONF object in config.ini must be updated +[mongo] +# URL of the mongo DB +# Mongo auth url => mongodb://user1:pwd1@host1/?authSource=db1 +url = mongodb://127.0.0.1:27017/ +dbname = test_results_collection +[api] +# Listening port +port = 8000 +# With debug_on set to true, error traces will be shown in HTTP responses +debug = True \ No newline at end of file diff --git a/utils/test/result_collection_api/opnfv_testapi/__init__.py b/utils/test/result_collection_api/opnfv_testapi/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/test/result_collection_api/opnfv_testapi/cmd/__init__.py b/utils/test/result_collection_api/opnfv_testapi/cmd/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/test/result_collection_api/opnfv_testapi/cmd/result_collection_api.py b/utils/test/result_collection_api/opnfv_testapi/cmd/result_collection_api.py new file mode 100644 index 000000000..e59e28c28 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/cmd/result_collection_api.py @@ -0,0 +1,112 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +""" +Pre-requisites: + pip install motor + pip install tornado + +We can launch the API with this file + +TODOs : + - logging + - json args validation with schemes + - POST/PUT/DELETE for PODs + - POST/PUT/GET/DELETE for installers, platforms (enrich results info) + - count cases for GET on projects + - count results for GET on cases + - include objects + - swagger documentation + - setup file + - results pagination + - unit tests + +""" + +import argparse + +import tornado.ioloop +import motor + +from opnfv_testapi.resources.handlers import VersionHandler +from opnfv_testapi.resources.testcase_handlers import TestcaseCLHandler, \ + TestcaseGURHandler +from opnfv_testapi.resources.pod_handlers import PodCLHandler, PodGURHandler +from opnfv_testapi.resources.project_handlers import ProjectCLHandler, \ + ProjectGURHandler +from opnfv_testapi.resources.result_handlers import ResultsCLHandler, \ + ResultsGURHandler +from opnfv_testapi.resources.dashboard_handlers import DashboardHandler +from opnfv_testapi.common.config import APIConfig +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + +# optionally get config file from command line +parser = argparse.ArgumentParser() +parser.add_argument("-c", "--config-file", dest='config_file', + help="Config file location") +args = parser.parse_args() +CONF = APIConfig().parse(args.config_file) + +# connecting to MongoDB server, and choosing database +client = motor.MotorClient(CONF.mongo_url) +db = client[CONF.mongo_dbname] + + +def make_app(): + return swagger.Application( + [ + # GET /version => GET API version + (r"/versions", VersionHandler), + + # few examples: + # GET /api/v1/pods => Get all pods + # GET /api/v1/pods/1 => Get details on POD 1 + (r"/api/v1/pods", PodCLHandler), + (r"/api/v1/pods/([^/]+)", PodGURHandler), + + # few examples: + # GET /projects + # GET /projects/yardstick + (r"/api/v1/projects", ProjectCLHandler), + (r"/api/v1/projects/([^/]+)", ProjectGURHandler), + + # few examples + # GET /projects/qtip/cases => Get cases for qtip + (r"/api/v1/projects/([^/]+)/cases", TestcaseCLHandler), + (r"/api/v1/projects/([^/]+)/cases/([^/]+)", TestcaseGURHandler), + + # new path to avoid a long depth + # GET /results?project=functest&case=keystone.catalog&pod=1 + # => get results with optional filters + # POST /results => + # Push results with mandatory request payload parameters + # (project, case, and pod) + (r"/api/v1/results", ResultsCLHandler), + (r"/api/v1/results/([^/]+)", ResultsGURHandler), + + # Method to manage Dashboard ready results + # GET /dashboard?project=functest&case=vPing&pod=opnfv-jump2 + # => get results in dasboard ready format + # get /dashboard + # => get the list of project with dashboard ready results + (r"/dashboard/v1/results", DashboardHandler), + ], + db=db, + debug=CONF.api_debug_on, + ) + + +def main(): + application = make_app() + application.listen(CONF.api_port) + tornado.ioloop.IOLoop.current().start() + + +if __name__ == "__main__": + main() diff --git a/utils/test/result_collection_api/opnfv_testapi/common/__init__.py b/utils/test/result_collection_api/opnfv_testapi/common/__init__.py new file mode 100644 index 000000000..05c0c9392 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/common/__init__.py @@ -0,0 +1,8 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## diff --git a/utils/test/result_collection_api/opnfv_testapi/common/config.py b/utils/test/result_collection_api/opnfv_testapi/common/config.py new file mode 100644 index 000000000..c444e67e3 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/common/config.py @@ -0,0 +1,91 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# feng.xiaowei@zte.com.cn remove prepare_put_request 5-30-2016 +############################################################################## + + +from ConfigParser import SafeConfigParser, NoOptionError + + +class ParseError(Exception): + """ + Custom exception class for config file + """ + + def __init__(self, message): + self.msg = message + + def __str__(self): + return 'error parsing config file : %s' % self.msg + + +class APIConfig: + """ + The purpose of this class is to load values correctly from the config file. + Each key is declared as an attribute in __init__() and linked in parse() + """ + + def __init__(self): + self._default_config_location = "/etc/opnfv_testapi/config.ini" + self.mongo_url = None + self.mongo_dbname = None + self.api_port = None + self.api_debug_on = None + self._parser = None + + def _get_parameter(self, section, param): + try: + return self._parser.get(section, param) + except NoOptionError: + raise ParseError("[%s.%s] parameter not found" % (section, param)) + + def _get_int_parameter(self, section, param): + try: + return int(self._get_parameter(section, param)) + except ValueError: + raise ParseError("[%s.%s] not an int" % (section, param)) + + def _get_bool_parameter(self, section, param): + result = self._get_parameter(section, param) + if str(result).lower() == 'true': + return True + if str(result).lower() == 'false': + return False + + raise ParseError( + "[%s.%s : %s] not a boolean" % (section, param, result)) + + @staticmethod + def parse(config_location=None): + obj = APIConfig() + + if config_location is None: + config_location = obj._default_config_location + + obj._parser = SafeConfigParser() + obj._parser.read(config_location) + if not obj._parser: + raise ParseError("%s not found" % config_location) + + # Linking attributes to keys from file with their sections + obj.mongo_url = obj._get_parameter("mongo", "url") + obj.mongo_dbname = obj._get_parameter("mongo", "dbname") + + obj.api_port = obj._get_int_parameter("api", "port") + obj.api_debug_on = obj._get_bool_parameter("api", "debug") + + return obj + + def __str__(self): + return "mongo_url = %s \n" \ + "mongo_dbname = %s \n" \ + "api_port = %s \n" \ + "api_debug_on = %s \n" % (self.mongo_url, + self.mongo_dbname, + self.api_port, + self.api_debug_on) diff --git a/utils/test/result_collection_api/opnfv_testapi/common/constants.py b/utils/test/result_collection_api/opnfv_testapi/common/constants.py new file mode 100644 index 000000000..4d39a142d --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/common/constants.py @@ -0,0 +1,15 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + + +DEFAULT_REPRESENTATION = "application/json" +HTTP_BAD_REQUEST = 400 +HTTP_FORBIDDEN = 403 +HTTP_NOT_FOUND = 404 +HTTP_OK = 200 diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py new file mode 100644 index 000000000..05c0c9392 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/__init__.py @@ -0,0 +1,8 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py new file mode 100755 index 000000000..2e106bec8 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Bottlenecks project +# a new method format__for_dashboard(results) +# v0.1: basic example with methods for Rubbos. +# +import os +import requests +import json + + +def get_bottlenecks_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["rubbos", "tu1", "tu3"] + + +def check_bottlenecks_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + bottlenecks_cases = get_bottlenecks_cases() + + if case is None or case not in bottlenecks_cases: + return False + else: + return True + + +def format_bottlenecks_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_bottlenecks_case_exist(case): + cmd = "format_" + case + "_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + print "Test cases not declared" + return res + + +def format_rubbos_for_dashboard(results): + """ + Post processing for the Rubbos test case + """ + test_data = [{'description': 'Rubbos results'}] + + # Graph 1:Rubbos maximal throughput + # ******************************** + #new_element = [] + #for each_result in results: + # throughput_data = [record['throughput'] for record in each_result['details']] + # new_element.append({'x': each_result['start_date'], + # 'y': max(throughput_data)}) + + #test_data.append({'name': "Rubbos max throughput", + # 'info': {'type': "graph", + # 'xlabel': 'time', + # 'ylabel': 'maximal throughput'}, + # 'data_set': new_element}) + + # Graph 2: Rubbos last record + # ******************************** + new_element = [] + latest_result = results[-1]["details"] + for data in latest_result: + client_num = int(data["client"]) + throughput = int(data["throughput"]) + new_element.append({'x': client_num, + 'y': throughput}) + test_data.append({'name': "Rubbos throughput vs client number", + 'info': {'type': "graph", + 'xlabel': 'client number', + 'ylabel': 'throughput'}, + 'data_set': new_element}) + + return test_data + + +def format_tu1_for_dashboard(results): + test_data = [{'description': 'Tu-1 performance result'}] + line_element = [] + bar_element = {} + last_result = results[-1]["details"] + for key in sorted(last_result): + bandwith = last_result[key]["Bandwidth"] + pktsize = int(key) + line_element.append({'x': pktsize, + 'y': bandwith * 1000}) + bar_element[key] = bandwith * 1000 + # graph1, line + test_data.append({'name': "VM2VM max single directional throughput", + 'info': {'type': "graph", + 'xlabel': 'pktsize', + 'ylabel': 'bandwith(kpps)'}, + 'data_set': line_element}) + # graph2, bar + test_data.append({'name': "VM2VM max single directional throughput", + 'info': {"type": "bar"}, + 'data_set': bar_element}) + return test_data + + +def format_tu3_for_dashboard(results): + test_data = [{'description': 'Tu-3 performance result'}] + new_element = [] + bar_element = {} + last_result = results[-1]["details"] + for key in sorted(last_result): + bandwith = last_result[key]["Bandwidth"] + pktsize = int(key) + new_element.append({'x': pktsize, + 'y': bandwith * 1000}) + bar_element[key] = bandwith * 1000 + # graph1, line + test_data.append({'name': "VM2VM max bidirectional throughput", + 'info': {'type': "graph", + 'xlabel': 'pktsize', + 'ylabel': 'bandwith(kpps)'}, + 'data_set': new_element}) + # graph2, bar + test_data.append({'name': "VM2VM max single directional throughput", + 'info': {"type": "bar"}, + 'data_set': bar_element}) + return test_data + + +############################ For local test ################################ + +def _read_sample_output(filename): + curr_path = os.path.dirname(os.path.abspath(__file__)) + output = os.path.join(curr_path, filename) + with open(output) as f: + sample_output = f.read() + + result = json.loads(sample_output) + return result + + +# Copy form functest/testcases/Dashboard/dashboard_utils.py +# and did some minor modification for local test. +def _get_results(db_url, test_criteria): + test_project = test_criteria["project"] + testcase = test_criteria["testcase"] + + # Build headers + headers = {'Content-Type': 'application/json'} + + # build the request + # if criteria is all => remove criteria + url = db_url + "/results?project=" + test_project + "&case=" + testcase + + # Send Request to Test DB + myData = requests.get(url, headers=headers) + + # Get result as a json object + myNewData = json.loads(myData.text) + + # Get results + myDataResults = myNewData['test_results'] + return myDataResults + +#only for local test +def _test(): + db_url = "http://testresults.opnfv.org/testapi" + results = _get_results(db_url, {"project": "bottlenecks", "testcase": "rubbos"}) + test_result = format_rubbos_for_dashboard(results) + print json.dumps(test_result, indent=4) + + results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu1"}) + #results = _read_sample_output("sample") + #print json.dumps(results, indent=4) + test_result = format_tu1_for_dashboard(results) + print json.dumps(test_result, indent=4) + results = _get_results(db_url, {"project": "bottlenecks", "testcase": "tu3"}) + test_result = format_tu3_for_dashboard(results) + print json.dumps(test_result, indent=4) + + +if __name__ == '__main__': + _test() + diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py new file mode 100644 index 000000000..090aaa5b4 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to retieve data from test DB +# and format them into a json format adapted for a dashboard +# +# v0.1: basic example +# +import os +import re +import sys +from functest2Dashboard import format_functest_for_dashboard, \ + check_functest_case_exist +from yardstick2Dashboard import format_yardstick_for_dashboard, \ + check_yardstick_case_exist +from vsperf2Dashboard import format_vsperf_for_dashboard, \ + check_vsperf_case_exist +from bottlenecks2Dashboard import format_bottlenecks_for_dashboard, \ + check_bottlenecks_case_exist +from qtip2Dashboard import format_qtip_for_dashboard, \ + check_qtip_case_exist +from promise2Dashboard import format_promise_for_dashboard, \ + check_promise_case_exist +from doctor2Dashboard import format_doctor_for_dashboard, \ + check_doctor_case_exist + +# any project test project wishing to provide dashboard ready values +# must include at least 2 methods +# - format__for_dashboard +# - check__case_exist + + +def check_dashboard_ready_project(test_project): + # Check that the first param corresponds to a project + # for whoch dashboard processing is available + # print("test_project: %s" % test_project) + project_module = 'opnfv_testapi.dashboard.'+test_project + '2Dashboard' + return True if project_module in sys.modules else False + + +def check_dashboard_ready_case(project, case): + cmd = "check_" + project + "_case_exist(case)" + return eval(cmd) + + +def get_dashboard_cases(): + # Retrieve all the test cases that could provide + # Dashboard ready graphs + # look in the releng repo + # search all the project2Dashboard.py files + # we assume that dashboard processing of project + # is performed in the 2Dashboard.py file + modules = [] + cp = re.compile('dashboard.*2Dashboard') + for module in sys.modules: + if re.match(cp, module): + modules.append(module) + + return modules + + +def get_dashboard_result(project, case, results=None): + # get the dashboard ready results + # paramters are: + # project: project name + # results: array of raw results pre-filterded + # according to the parameters of the request + cmd = "format_" + project + "_for_dashboard(case,results)" + res = eval(cmd) + return res diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py new file mode 100644 index 000000000..38b23abb4 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py @@ -0,0 +1,105 @@ + #!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Doctor project +# a new method format__for_dashboard(results) +# +import re +import datetime + + +def get_doctor_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["doctor-notification","doctor-mark-down"] + + +def format_doctor_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + + if check_doctor_case_exist(case): + # note we add _case because testcase and project had the same name + # TODO refactoring...looks fine at the beginning wit only 1 project + # not very ugly now and clearly not optimized... + cmd = "format_" + case.replace('-','_') + "_case_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + return res + + +def check_doctor_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + doctor_cases = get_doctor_cases() + + if (case is None or case not in doctor_cases): + return False + else: + return True + + +def format_doctor_mark_down_case_for_dashboard(results): + """ + Post processing for the doctor test case + """ + test_data = [{'description': 'doctor-mark-down results for Dashboard'}] + return test_data + + +def format_doctor_notification_case_for_dashboard(results): + """ + Post processing for the doctor-notification test case + """ + test_data = [{'description': 'doctor results for Dashboard'}] + # Graph 1: (duration)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['duration'] + new_element.append({'x': data['start_date'], + 'y': t}) + + test_data.append({'name': "doctor-notification duration ", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: bar + # ************ + nbTest = 0 + nbTestOk = 0 + + for data in results: + nbTest += 1 + if data['details']['status'] == "OK": + nbTestOk += 1 + + test_data.append({'name': "doctor-notification status", + 'info': {"type": "bar"}, + 'data_set': [{'Nb tests': nbTest, + 'Nb Success': nbTestOk}]}) + + return test_data diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py new file mode 100644 index 000000000..86521b984 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py @@ -0,0 +1,472 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Functest project +# a new method format__for_dashboard(results) +# v0.1: basic example with methods for odl, Tempest, Rally and vPing +# +import datetime +import re + + +def get_functest_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["status", "vPing", "vPing_userdata", "vIMS", "Tempest", "ODL", + "ONOS", "Rally"] + + +def format_functest_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_functest_case_exist(case): + cmd = "format_" + case + "_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + print "Test cases not declared" + return res + + +def check_functest_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + functest_cases = get_functest_cases() + + if (case is None or case not in functest_cases): + return False + else: + return True + + +def format_status_for_dashboard(results): + test_data = [{'description': 'Functest status'}] + + # define magic equation for the status.... + # 5 suites: vPing, odl, Tempest, vIMS, Rally + # Which overall KPI make sense... + + # TODO to be done and discussed + testcases = get_functest_cases() + test_data.append({'nb test suite(s) run': len(testcases)-1}) + test_data.append({'vPing': '100%'}) + test_data.append({'VIM status': '82%'}) + test_data.append({'SDN Controllers': {'odl': '92%', + 'onos': '95%', + 'ocl': '93%'}}) + test_data.append({'VNF deployment': '95%'}) + + return test_data + + +def format_vIMS_for_dashboard(results): + """ + Post processing for the vIMS test case + """ + test_data = [{'description': 'vIMS results for Dashboard'}] + + # Graph 1: (duration_deployment_orchestrator, + # duration_deployment_vnf, + # duration_test) = f(time) + # ******************************** + new_element = [] + + for data in results: + new_element.append({'x': data['start_date'], + 'y1': data['details']['orchestrator']['duration'], + 'y2': data['details']['vIMS']['duration'], + 'y3': data['details']['sig_test']['duration']}) + + test_data.append({'name': "vIMS orchestrator/VNF/test duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'orchestation deployment duration', + 'y2label': 'vIMS deployment duration', + 'y3label': 'vIMS test duration'}, + 'data_set': new_element}) + + # Graph 2: (Nb test, nb failure, nb skipped)=f(time) + # ************************************************** + new_element = [] + + for data in results: + # Retrieve all the tests + nbTests = 0 + nbFailures = 0 + nbSkipped = 0 + vIMS_test = data['details']['sig_test']['result'] + + for data_test in vIMS_test: + # Calculate nb of tests run and nb of tests failed + # vIMS_results = get_vIMSresults(vIMS_test) + # print vIMS_results + try: + if data_test['result'] == "Passed": + nbTests += 1 + elif data_test['result'] == "Failed": + nbFailures += 1 + elif data_test['result'] == "Skipped": + nbSkipped += 1 + except: + nbTests = 0 + + new_element.append({'x': data['start_date'], + 'y1': nbTests, + 'y2': nbFailures, + 'y3': nbSkipped}) + + test_data.append({'name': "vIMS nb tests passed/failed/skipped", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests passed', + 'y2label': 'Number of tests failed', + 'y3label': 'Number of tests skipped'}, + 'data_set': new_element}) + + # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed) + # ******************************************************** + nbTests = 0 + nbFailures = 0 + + for data in results: + vIMS_test = data['details']['sig_test']['result'] + + for data_test in vIMS_test: + nbTestsOK = 0 + nbTestsKO = 0 + + try: + if data_test['result'] == "Passed": + nbTestsOK += 1 + elif data_test['result'] == "Failed": + nbTestsKO += 1 + except: + nbTestsOK = 0 + + nbTests += nbTestsOK + nbTestsKO + nbFailures += nbTestsKO + + test_data.append({'name': "Total number of tests run/failure tests", + 'info': {"type": "bar"}, + 'data_set': [{'Run': nbTests, + 'Failed': nbFailures}]}) + + return test_data + + +def format_Tempest_for_dashboard(results): + """ + Post processing for the Tempest test case + """ + test_data = [{'description': 'Tempest results for Dashboard'}] + + # Graph 1: Test_Duration = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['start_date'], + 'y': data['details']['duration']}) + + test_data.append({'name': "Tempest duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: (Nb test, nb failure)=f(time) + # *************************************** + new_element = [] + for data in results: + new_element.append({'x': data['start_date'], + 'y1': data['details']['tests'], + 'y2': data['details']['failures']}) + + test_data.append({'name': "Tempest nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + + # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed) + # ******************************************************** + nbTests = 0 + nbFailures = 0 + + for data in results: + nbTests += data['details']['tests'] + nbFailures += data['details']['failures'] + + test_data.append({'name': "Total number of tests run/failure tests", + 'info': {"type": "bar"}, + 'data_set': [{'Run': nbTests, + 'Failed': nbFailures}]}) + + # Graph 4: (Success rate)=f(time) + # *************************************** + new_element = [] + for data in results: + try: + diff = (int(data['details']['tests']) - int(data['details']['failures'])) + success_rate = 100*diff/int(data['details']['tests']) + except: + success_rate = 0 + + new_element.append({'x': data['start_date'], + 'y1': success_rate}) + + test_data.append({'name': "Tempest success rate", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Success rate'}, + 'data_set': new_element}) + + return test_data + + +def format_ODL_for_dashboard(results): + """ + Post processing for the ODL test case + """ + test_data = [{'description': 'ODL results for Dashboard'}] + + # Graph 1: (Nb test, nb failure)=f(time) + # *************************************** + new_element = [] + + for data in results: + odl_results = data['details']['details'] + nbFailures = 0 + for odl in odl_results: + if (odl['test_status']['@status'] == "FAIL"): + nbFailures += 1 + new_element.append({'x': data['start_date'], + 'y1': len(odl_results), + 'y2': nbFailures}) + + test_data.append({'name': "ODL nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + return test_data + + +def format_ONOS_for_dashboard(results): + """ + Post processing for the odl test case + """ + test_data = [{'description': 'ONOS results for Dashboard'}] + # Graph 1: (duration FUNCvirtNet)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['FUNCvirNet']['duration'] + h, m, s = re.split(':', t) + s = round(float(s)) + new_duration = int(datetime.timedelta(hours=int(h), + minutes=int(m), + seconds=int(s)).total_seconds()) + new_element.append({'x': data['start_date'], + 'y': new_duration}) + + test_data.append({'name': "ONOS FUNCvirNet duration ", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: (Nb test, nb failure)FuncvirtNet=f(time) + # *************************************** + new_element = [] + + for data in results: + onos_results = data['details']['FUNCvirNet']['status'] + nbFailures = 0 + for onos in onos_results: + if (onos['Case result'] == "FAIL"): + nbFailures += 1 + new_element.append({'x': data['start_date'], + 'y1': len(onos_results), + 'y2': nbFailures}) + + test_data.append({'name': "ONOS FUNCvirNet nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + + # Graph 3: (duration FUNCvirtNetL3)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['FUNCvirNetL3']['duration'] + h, m, s = re.split(':', t) + s = round(float(s)) + new_duration = int(datetime.timedelta(hours=int(h), + minutes=int(m), + seconds=int(s)).total_seconds()) + new_element.append({'x': data['start_date'], + 'y': new_duration}) + + test_data.append({'name': "ONOS FUNCvirNetL3 duration", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 4: (Nb test, nb failure)FuncvirtNetL3=f(time) + # *************************************** + new_element = [] + + for data in results: + onos_results = data['details']['FUNCvirNetL3']['status'] + nbFailures = 0 + for onos in onos_results: + if (onos['Case result'] == "FAIL"): + nbFailures += 1 + new_element.append({'x': data['start_date'], + 'y1': len(onos_results), + 'y2': nbFailures}) + + test_data.append({'name': "ONOS FUNCvirNetL3 nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + return test_data + + +def format_Rally_for_dashboard(results): + """ + Post processing for the Rally test case + """ + test_data = [{'description': 'Rally results for Dashboard'}] + # Graph 1: Test_Duration = f(time) + # ******************************** + new_element = [] + for data in results: + summary_cursor = len(data['details']) - 1 + new_element.append({'x': data['start_date'], + 'y': int(data['details'][summary_cursor]['summary']['duration'])}) + + test_data.append({'name': "rally duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: Success rate = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['start_date'], + 'y': float(data['details'][summary_cursor]['summary']['nb success'])}) + + test_data.append({'name': "rally success rate", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'success rate (%)'}, + 'data_set': new_element}) + + return test_data + + +def format_vPing_for_dashboard(results): + """ + Post processing for the vPing test case + """ + test_data = [{'description': 'vPing results for Dashboard'}] + + # Graph 1: Test_Duration = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['start_date'], + 'y': data['details']['duration']}) + + test_data.append({'name': "vPing duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: bar + # ************ + nbTest = 0 + nbTestOk = 0 + + for data in results: + nbTest += 1 + if data['details']['status'] == "OK": + nbTestOk += 1 + + test_data.append({'name': "vPing status", + 'info': {"type": "bar"}, + 'data_set': [{'Nb tests': nbTest, + 'Nb Success': nbTestOk}]}) + + return test_data + + +def format_vPing_userdata_for_dashboard(results): + """ + Post processing for the vPing_userdata test case + """ + test_data = [{'description': 'vPing_userdata results for Dashboard'}] + + # Graph 1: Test_Duration = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['start_date'], + 'y': data['details']['duration']}) + + test_data.append({'name': "vPing_userdata duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: bar + # ************ + nbTest = 0 + nbTestOk = 0 + + for data in results: + nbTest += 1 + if data['details']['status'] == "OK": + nbTestOk += 1 + + test_data.append({'name': "vPing_userdata status", + 'info': {"type": "bar"}, + 'data_set': [{'Nb tests': nbTest, + 'Nb Success': nbTestOk}]}) + + return test_data diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py new file mode 100644 index 000000000..84f43a7d1 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py @@ -0,0 +1,103 @@ + #!/usr/bin/python +# +# Copyright (c) 2015 Orange +# morgan.richomme@orange.com +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Promise project +# a new method format__for_dashboard(results) +# v0.1: basic example with methods for odl, Tempest, Rally and vPing +# +import re +import datetime + + +def get_promise_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["promise"] + + +def format_promise_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_promise_case_exist(case): + # note we add _case because testcase and project had the same name + # TODO refactoring...looks fine at the beginning wit only 1 project + # not very ugly now and clearly not optimized... + cmd = "format_" + case + "_case_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + print "Test cases not declared" + return res + + +def check_promise_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + promise_cases = get_promise_cases() + + if (case is None or case not in promise_cases): + return False + else: + return True + + + + + +def format_promise_case_for_dashboard(results): + """ + Post processing for the promise test case + """ + test_data = [{'description': 'Promise results for Dashboard'}] + # Graph 1: (duration)=f(time) + # *************************************** + new_element = [] + + # default duration 0:00:08.999904 + # consider only seconds => 09 + for data in results: + t = data['details']['duration'] + new_element.append({'x': data['creation_date'], + 'y': t}) + + test_data.append({'name': "Promise duration ", + 'info': {'type': "graph", + 'xlabel': 'time (s)', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: (Nb test, nb failure)=f(time) + # *************************************** + new_element = [] + + for data in results: + promise_results = data['details'] + new_element.append({'x': data['creation_date'], + 'y1': promise_results['tests'], + 'y2': promise_results['failures']}) + + test_data.append({'name': "Promise nb tests/nb failures", + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Number of tests', + 'y2label': 'Number of failures'}, + 'data_set': new_element}) + + return test_data diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py new file mode 100644 index 000000000..6ceccd374 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/qtip2Dashboard.py @@ -0,0 +1,121 @@ +#!/usr/bin/python + +############################################################################## +# Copyright (c) 2015 Dell Inc and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + + +def get_qtip_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["compute_test_suite","storage_test_suite","network_test_suite"] + +def check_qtip_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + qtip_cases = get_qtip_cases() + if (case is None or case not in qtip_cases): + return False + else: + return True + +def format_qtip_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_qtip_case_exist(case): + res = format_common_for_dashboard(case, results) + else: + res = [] + print "Test cases not declared" + return res + +def format_common_for_dashboard(case, results): + """ + Common post processing + """ + test_data_description = case + " results for Dashboard" + test_data = [{'description': test_data_description}] + + graph_name = '' + if "network_test_suite" in case: + graph_name = "Throughput index" + else: + graph_name = "Index" + + # Graph 1: + # ******************************** + new_element = [] + for date, index in results: + new_element.append({'x': date, + 'y1': index, + }) + + test_data.append({'name': graph_name, + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'Index Number'}, + 'data_set': new_element}) + + return test_data + + +############################ For local test ################################ +import os +import requests +import json +from collections import defaultdict + +def _get_results(db_url, testcase): + + testproject = testcase["project"] + testcase = testcase["testcase"] + resultarray = defaultdict() + #header + header = {'Content-Type': 'application/json'} + #url + url = db_url + "/results?project="+testproject+"&case="+testcase + data = requests.get(url,header) + datajson = data.json() + for x in range(0, len(datajson['test_results'])): + + rawresults = datajson['test_results'][x]['details'] + index = rawresults['index'] + resultarray[str(datajson['test_results'][x]['start_date'])]=index + + return resultarray + +def _test(): + + db_url = "http://testresults.opnfv.org/testapi" + raw_result = defaultdict() + + raw_result = _get_results(db_url, {"project": "qtip", "testcase": "compute_test_suite"}) + resultitems= raw_result.items() + result = format_qtip_for_dashboard("compute_test_suite", resultitems) + print result + + raw_result = _get_results(db_url, {"project": "qtip", "testcase": "storage_test_suite"}) + resultitems= raw_result.items() + result = format_qtip_for_dashboard("storage_test_suite", resultitems) + print result + + raw_result = _get_results(db_url, {"project": "qtip", "testcase": "network_test_suite"}) + resultitems= raw_result.items() + result = format_qtip_for_dashboard("network_test_suite", resultitems) + print result + +if __name__ == '__main__': + _test() diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py new file mode 100755 index 000000000..5a6882da4 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/vsperf2Dashboard.py @@ -0,0 +1,121 @@ +#!/usr/bin/python + +# Copyright 2015 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"), +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def get_vsperf_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["tput_ovsdpdk", "tput_ovs", + "b2b_ovsdpdk", "b2b_ovs", + "tput_mod_vlan_ovsdpdk", "tput_mod_vlan_ovs", + "cont_ovsdpdk", "cont_ovs", + "pvp_cont_ovsdpdkuser", "pvp_cont_ovsdpdkcuse", "pvp_cont_ovsvirtio", + "pvvp_cont_ovsdpdkuser", "pvvp_cont_ovsdpdkcuse", "pvvp_cont_ovsvirtio", + "scalability_ovsdpdk", "scalability_ovs", + "pvp_tput_ovsdpdkuser", "pvp_tput_ovsdpdkcuse", "pvp_tput_ovsvirtio", + "pvp_b2b_ovsdpdkuser", "pvp_b2b_ovsdpdkcuse", "pvp_b2b_ovsvirtio", + "pvvp_tput_ovsdpdkuser", "pvvp_tput_ovsdpdkcuse", "pvvp_tput_ovsvirtio", + "pvvp_b2b_ovsdpdkuser", "pvvp_b2b_ovsdpdkcuse", "pvvp_b2b_ovsvirtio", + "cpu_load_ovsdpdk", "cpu_load_ovs", + "mem_load_ovsdpdk", "mem_load_ovs"] + + +def check_vsperf_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + vsperf_cases = get_vsperf_cases() + + if (case is None or case not in vsperf_cases): + return False + else: + return True + + +def format_vsperf_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_vsperf_case_exist(case): + res = format_common_for_dashboard(case, results) + else: + res = [] + print "Test cases not declared" + return res + + +def format_common_for_dashboard(case, results): + """ + Common post processing + """ + test_data_description = case + " results for Dashboard" + test_data = [{'description': test_data_description}] + + graph_name = '' + if "b2b" in case: + graph_name = "B2B frames" + else: + graph_name = "Rx frames per second" + + # Graph 1: Rx fps = f(time) + # ******************************** + new_element = [] + for data in results: + new_element.append({'x': data['start_date'], + 'y1': data['details']['64'], + 'y2': data['details']['128'], + 'y3': data['details']['512'], + 'y4': data['details']['1024'], + 'y5': data['details']['1518']}) + + test_data.append({'name': graph_name, + 'info': {'type': "graph", + 'xlabel': 'time', + 'y1label': 'frame size 64B', + 'y2label': 'frame size 128B', + 'y3label': 'frame size 512B', + 'y4label': 'frame size 1024B', + 'y5label': 'frame size 1518B'}, + 'data_set': new_element}) + + return test_data + + + + +############################ For local test ################################ +import os + +def _test(): + ans = [{'start_date': '2015-09-12', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '26.804', '1024': '1097.284', '512': '178.137', '1518': '12635.860', '128': '100.564'}}, + {'start_date': '2015-09-33', 'project_name': 'vsperf', 'version': 'ovs_master', 'pod_name': 'pod1-vsperf', 'case_name': 'tput_ovsdpdk', 'installer': 'build_sie', 'details': {'64': '16.804', '1024': '1087.284', '512': '168.137', '1518': '12625.860', '128': '99.564'}}] + + result = format_vsperf_for_dashboard("pvp_cont_ovsdpdkcuse", ans) + print result + + result = format_vsperf_for_dashboard("b2b_ovsdpdk", ans) + print result + + result = format_vsperf_for_dashboard("non_existing", ans) + print result + +if __name__ == '__main__': + _test() diff --git a/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py b/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py new file mode 100644 index 000000000..4f022d5b9 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py @@ -0,0 +1,210 @@ +#!/usr/bin/python +# +############################################################################## +# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# This script is used to build dashboard ready json results +# It may be used for all the test case of the Yardstick project +# a new method format__for_dashboard(results) +# v0.1: basic example with methods for Ping, Iperf, Netperf, Pktgen, +# Fio, Lmbench, Perf, Cyclictest. +# + + +def get_yardstick_cases(): + """ + get the list of the supported test cases + TODO: update the list when adding a new test case for the dashboard + """ + return ["Ping", "Iperf", "Netperf", "Pktgen", "Fio", "Lmbench", + "Perf", "Cyclictest"] + + +def format_yardstick_for_dashboard(case, results): + """ + generic method calling the method corresponding to the test case + check that the testcase is properly declared first + then build the call to the specific method + """ + if check_yardstick_case_exist(case): + cmd = "format_" + case + "_for_dashboard(results)" + res = eval(cmd) + else: + res = [] + print "Test cases not declared" + return res + + +def check_yardstick_case_exist(case): + """ + check if the testcase exists + if the test case is not defined or not declared in the list + return False + """ + yardstick_cases = get_yardstick_cases() + + if (case is None or case not in yardstick_cases): + return False + else: + return True + + +def _get_test_status_bar(results): + nbTest = 0 + nbTestOk = 0 + + for data in results: + nbTest += 1 + records = [record for record in data['details'] + if "benchmark" in record + and record["benchmark"]["errors"] != ""] + if len(records) == 0: + nbTestOk += 1 + return nbTest, nbTestOk + + +def format_Ping_for_dashboard(results): + """ + Post processing for the Ping test case + """ + test_data = [{'description': 'Ping results for Dashboard'}] + + # Graph 1: Test_Duration = f(time) + # ******************************** + new_element = [] + for data in results: + records = [record["benchmark"]["data"]["rtt"] + for record in data['details'] + if "benchmark" in record] + + avg_rtt = sum(records) / len(records) + new_element.append({'x': data['start_date'], + 'y': avg_rtt}) + + test_data.append({'name': "ping duration", + 'info': {'type': "graph", + 'xlabel': 'time', + 'ylabel': 'duration (s)'}, + 'data_set': new_element}) + + # Graph 2: bar + # ************ + nbTest, nbTestOk = _get_test_status_bar(results) + + test_data.append({'name': "ping status", + 'info': {"type": "bar"}, + 'data_set': [{'Nb tests': nbTest, + 'Nb Success': nbTestOk}]}) + + return test_data + + +def format_iperf_for_dashboard(results): + """ + Post processing for the Iperf test case + """ + test_data = [{'description': 'Iperf results for Dashboard'}] + return test_data + + +def format_netperf_for_dashboard(results): + """ + Post processing for the Netperf test case + """ + test_data = [{'description': 'Netperf results for Dashboard'}] + return test_data + + +def format_pktgen_for_dashboard(results): + """ + Post processing for the Pktgen test case + """ + test_data = [{'description': 'Pktgen results for Dashboard'}] + return test_data + + +def format_fio_for_dashboard(results): + """ + Post processing for the Fio test case + """ + test_data = [{'description': 'Fio results for Dashboard'}] + return test_data + + +def format_lmbench_for_dashboard(results): + """ + Post processing for the Lmbench test case + """ + test_data = [{'description': 'Lmbench results for Dashboard'}] + return test_data + + +def format_perf_for_dashboard(results): + """ + Post processing for the Perf test case + """ + test_data = [{'description': 'Perf results for Dashboard'}] + return test_data + + +def format_cyclictest_for_dashboard(results): + """ + Post processing for the Cyclictest test case + """ + test_data = [{'description': 'Cyclictest results for Dashboard'}] + return test_data + + +############################ For local test ################################ +import json +import os +import requests + +def _read_sample_output(filename): + curr_path = os.path.dirname(os.path.abspath(__file__)) + output = os.path.join(curr_path, filename) + with open(output) as f: + sample_output = f.read() + + result = json.loads(sample_output) + return result + +# Copy form functest/testcases/Dashboard/dashboard_utils.py +# and did some minor modification for local test. +def _get_results(db_url, test_criteria): + + test_project = test_criteria["project"] + testcase = test_criteria["testcase"] + + # Build headers + headers = {'Content-Type': 'application/json'} + + # build the request + # if criteria is all => remove criteria + url = db_url + "/results?project=" + test_project + "&case=" + testcase + + # Send Request to Test DB + myData = requests.get(url, headers=headers) + + # Get result as a json object + myNewData = json.loads(myData.text) + + # Get results + myDataResults = myNewData['test_results'] + + return myDataResults + +def _test(): + db_url = "http://213.77.62.197" + result = _get_results(db_url, + {"project": "yardstick", "testcase": "Ping"}) + print format_ping_for_dashboard(result) + +if __name__ == '__main__': + _test() diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/__init__.py b/utils/test/result_collection_api/opnfv_testapi/resources/__init__.py new file mode 100644 index 000000000..05c0c9392 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/__init__.py @@ -0,0 +1,8 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py new file mode 100644 index 000000000..00abbb92b --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/dashboard_handlers.py @@ -0,0 +1,100 @@ +from tornado.web import HTTPError + +from opnfv_testapi.common.constants import HTTP_NOT_FOUND +from opnfv_testapi.dashboard.dashboard_utils import \ + check_dashboard_ready_project, \ + check_dashboard_ready_case, get_dashboard_result +from opnfv_testapi.resources.result_handlers import GenericResultHandler +from opnfv_testapi.resources.result_models import TestResult +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + + +class GenericDashboardHandler(GenericResultHandler): + def __init__(self, application, request, **kwargs): + super(GenericDashboardHandler, self).__init__(application, + request, + **kwargs) + self.table = self.db_results + self.table_cls = TestResult + + +class DashboardHandler(GenericDashboardHandler): + @swagger.operation(nickname='query') + def get(self): + """ + @description: Retrieve dashboard ready result(s) + for a test project + @notes: Retrieve dashboard ready result(s) for a test project + Available filters for this request are : + - project : project name + - case : case name + - pod : pod name + - version : platform version (Arno-R1, ...) + - installer (fuel, ...) + - period : x (x last days) + + GET /dashboard?project=functest&case=vPing&version=Colorado \ + &pod=pod_name&period=15 + @rtype: L{string} + @param pod: pod name + @type pod: L{string} + @in pod: query + @required pod: False + @param project: project name + @type project: L{string} + @in project: query + @required project: True + @param case: case name + @type case: L{string} + @in case: query + @required case: True + @param version: i.e. Colorado + @type version: L{string} + @in version: query + @required version: False + @param installer: fuel/apex/joid/compass + @type installer: L{string} + @in installer: query + @required installer: False + @param period: last days + @type period: L{string} + @in period: query + @required period: False + @return 200: test result exist + @raise 400: period is not in + @raise 404: project or case name missing, + or project or case is not dashboard ready + """ + + project_arg = self.get_query_argument("project", None) + case_arg = self.get_query_argument("case", None) + + # on /dashboard retrieve the list of projects and testcases + # ready for dashboard + if project_arg is None: + raise HTTPError(HTTP_NOT_FOUND, "Project name missing") + + if not check_dashboard_ready_project(project_arg): + raise HTTPError(HTTP_NOT_FOUND, + 'Project [{}] not dashboard ready' + .format(project_arg)) + + if case_arg is None: + raise HTTPError( + HTTP_NOT_FOUND, + 'Test case missing for project [{}]'.format(project_arg)) + + if not check_dashboard_ready_case(project_arg, case_arg): + raise HTTPError( + HTTP_NOT_FOUND, + 'Test case [{}] not dashboard ready for project [{}]' + .format(case_arg, project_arg)) + + # special case of status for project + if case_arg == 'status': + self.finish_request(get_dashboard_result(project_arg, case_arg)) + else: + def get_result(res, project, case): + return get_dashboard_result(project, case, res) + + self._list(self.set_query(), get_result, project_arg, case_arg) diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py new file mode 100644 index 000000000..4b39b247a --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/handlers.py @@ -0,0 +1,230 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# feng.xiaowei@zte.com.cn refactor db.pod to db.pods 5-19-2016 +# feng.xiaowei@zte.com.cn refactor test_project to project 5-19-2016 +# feng.xiaowei@zte.com.cn refactor response body 5-19-2016 +# feng.xiaowei@zte.com.cn refactor pod/project response info 5-19-2016 +# feng.xiaowei@zte.com.cn refactor testcase related handler 5-20-2016 +# feng.xiaowei@zte.com.cn refactor result related handler 5-23-2016 +# feng.xiaowei@zte.com.cn refactor dashboard related handler 5-24-2016 +# feng.xiaowei@zte.com.cn add methods to GenericApiHandler 5-26-2016 +# feng.xiaowei@zte.com.cn remove PodHandler 5-26-2016 +# feng.xiaowei@zte.com.cn remove ProjectHandler 5-26-2016 +# feng.xiaowei@zte.com.cn remove TestcaseHandler 5-27-2016 +# feng.xiaowei@zte.com.cn remove ResultHandler 5-29-2016 +# feng.xiaowei@zte.com.cn remove DashboardHandler 5-30-2016 +############################################################################## + +import json +from datetime import datetime + +from tornado.web import RequestHandler, asynchronous, HTTPError +from tornado import gen + +from models import CreateResponse +from opnfv_testapi.common.constants import DEFAULT_REPRESENTATION, \ + HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_FORBIDDEN +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + + +class GenericApiHandler(RequestHandler): + def __init__(self, application, request, **kwargs): + super(GenericApiHandler, self).__init__(application, request, **kwargs) + self.db = self.settings["db"] + self.json_args = None + self.table = None + self.table_cls = None + self.db_projects = 'projects' + self.db_pods = 'pods' + self.db_testcases = 'testcases' + self.db_results = 'results' + + def prepare(self): + if self.request.method != "GET" and self.request.method != "DELETE": + if self.request.headers.get("Content-Type") is not None: + if self.request.headers["Content-Type"].startswith( + DEFAULT_REPRESENTATION): + try: + self.json_args = json.loads(self.request.body) + except (ValueError, KeyError, TypeError) as error: + raise HTTPError(HTTP_BAD_REQUEST, + "Bad Json format [{}]". + format(error)) + + def finish_request(self, json_object=None): + if json_object: + self.write(json.dumps(json_object)) + self.set_header("Content-Type", DEFAULT_REPRESENTATION) + self.finish() + + def _create_response(self, resource): + href = self.request.full_url() + '/' + str(resource) + return CreateResponse(href=href).format() + + def format_data(self, data): + cls_data = self.table_cls.from_dict(data) + return cls_data.format_http() + + @asynchronous + @gen.coroutine + def _create(self, miss_checks, db_checks, **kwargs): + """ + :param miss_checks: [miss1, miss2] + :param db_checks: [(table, exist, query, error)] + """ + if self.json_args is None: + raise HTTPError(HTTP_BAD_REQUEST, "no body") + + data = self.table_cls.from_dict(self.json_args) + for miss in miss_checks: + miss_data = data.__getattribute__(miss) + if miss_data is None or miss_data == '': + raise HTTPError(HTTP_BAD_REQUEST, + '{} missing'.format(miss)) + + for k, v in kwargs.iteritems(): + data.__setattr__(k, v) + + for table, exist, query, error in db_checks: + check = yield self._eval_db_find_one(query(data), table) + if (exist and not check) or (not exist and check): + code, message = error(data) + raise HTTPError(code, message) + + data.creation_date = datetime.now() + _id = yield self._eval_db(self.table, 'insert', data.format()) + if 'name' in self.json_args: + resource = data.name + else: + resource = _id + self.finish_request(self._create_response(resource)) + + @asynchronous + @gen.coroutine + def _list(self, query=None, res_op=None, *args): + if query is None: + query = {} + data = [] + cursor = self._eval_db(self.table, 'find', query) + while (yield cursor.fetch_next): + data.append(self.format_data(cursor.next_object())) + if res_op is None: + res = {self.table: data} + else: + res = res_op(data, *args) + self.finish_request(res) + + @asynchronous + @gen.coroutine + def _get_one(self, query): + data = yield self._eval_db_find_one(query) + if data is None: + raise HTTPError(HTTP_NOT_FOUND, + "[{}] not exist in table [{}]" + .format(query, self.table)) + self.finish_request(self.format_data(data)) + + @asynchronous + @gen.coroutine + def _delete(self, query): + data = yield self._eval_db_find_one(query) + if data is None: + raise HTTPError(HTTP_NOT_FOUND, + "[{}] not exit in table [{}]" + .format(query, self.table)) + + yield self._eval_db(self.table, 'remove', query) + self.finish_request() + + @asynchronous + @gen.coroutine + def _update(self, query, db_keys): + if self.json_args is None: + raise HTTPError(HTTP_BAD_REQUEST, "No payload") + + # check old data exist + from_data = yield self._eval_db_find_one(query) + if from_data is None: + raise HTTPError(HTTP_NOT_FOUND, + "{} could not be found in table [{}]" + .format(query, self.table)) + + data = self.table_cls.from_dict(from_data) + # check new data exist + equal, new_query = self._update_query(db_keys, data) + if not equal: + to_data = yield self._eval_db_find_one(new_query) + if to_data is not None: + raise HTTPError(HTTP_FORBIDDEN, + "{} already exists in table [{}]" + .format(new_query, self.table)) + + # we merge the whole document """ + edit_request = data.format() + edit_request.update(self._update_requests(data)) + + """ Updating the DB """ + yield self._eval_db(self.table, 'update', query, edit_request) + edit_request['_id'] = str(data._id) + self.finish_request(edit_request) + + def _update_requests(self, data): + request = dict() + for k, v in self.json_args.iteritems(): + request = self._update_request(request, k, v, + data.__getattribute__(k)) + if not request: + raise HTTPError(HTTP_FORBIDDEN, "Nothing to update") + return request + + @staticmethod + def _update_request(edit_request, key, new_value, old_value): + """ + This function serves to prepare the elements in the update request. + We try to avoid replace the exact values in the db + edit_request should be a dict in which we add an entry (key) after + comparing values + """ + if not (new_value is None): + if len(new_value) > 0: + if new_value != old_value: + edit_request[key] = new_value + + return edit_request + + def _update_query(self, keys, data): + query = dict() + equal = True + for key in keys: + new = self.json_args.get(key) + old = data.__getattribute__(key) + if new is None: + new = old + elif new != old: + equal = False + query[key] = new + return equal, query + + def _eval_db(self, table, method, *args): + return eval('self.db.%s.%s(*args)' % (table, method)) + + def _eval_db_find_one(self, query, table=None): + if table is None: + table = self.table + return self._eval_db(table, 'find_one', query) + + +class VersionHandler(GenericApiHandler): + @swagger.operation(nickname='list') + def get(self): + """ + @description: list all supported versions + @rtype: L{Versions} + """ + versions = [{'version': 'v1.0', 'description': 'basics'}] + self.finish_request({'versions': versions}) diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/models.py b/utils/test/result_collection_api/opnfv_testapi/resources/models.py new file mode 100644 index 000000000..881f65dd3 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/models.py @@ -0,0 +1,70 @@ +############################################################################## +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# feng.xiaowei@zte.com.cn mv Pod to pod_models.py 5-18-2016 +# feng.xiaowei@zte.com.cn add MetaCreateResponse/MetaGetResponse 5-18-2016 +# feng.xiaowei@zte.com.cn mv TestProject to project_models.py 5-19-2016 +# feng.xiaowei@zte.com.cn delete meta class 5-19-2016 +# feng.xiaowei@zte.com.cn add CreateResponse 5-19-2016 +# feng.xiaowei@zte.com.cn mv TestCase to testcase_models.py 5-20-2016 +# feng.xiaowei@zte.com.cn mv TestResut to result_models.py 5-23-2016 +############################################################################## +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + + +class CreateResponse(object): + def __init__(self, href=''): + self.href = href + + @staticmethod + def from_dict(res_dict): + if res_dict is None: + return None + + res = CreateResponse() + res.href = res_dict.get('href') + return res + + def format(self): + return {'href': self.href} + + +@swagger.model() +class Versions(object): + """ + @property versions: + @ptype versions: C{list} of L{Version} + """ + def __init__(self): + self.versions = list() + + @staticmethod + def from_dict(res_dict): + if res_dict is None: + return None + + res = Versions() + for version in res_dict.get('versions'): + res.versions.append(Version.from_dict(version)) + return res + + +@swagger.model() +class Version(object): + def __init__(self, version=None, description=None): + self.version = version + self.description = description + + @staticmethod + def from_dict(a_dict): + if a_dict is None: + return None + + ver = Version() + ver.version = a_dict.get('version') + ver.description = str(a_dict.get('description')) + return ver diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/pod_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/pod_handlers.py new file mode 100644 index 000000000..5a4b55506 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/pod_handlers.py @@ -0,0 +1,79 @@ +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger +from handlers import GenericApiHandler +from pod_models import Pod +from opnfv_testapi.common.constants import HTTP_FORBIDDEN + + +class GenericPodHandler(GenericApiHandler): + def __init__(self, application, request, **kwargs): + super(GenericPodHandler, self).__init__(application, request, **kwargs) + self.table = 'pods' + self.table_cls = Pod + + +class PodCLHandler(GenericPodHandler): + @swagger.operation(nickname='list-all') + def get(self): + """ + @description: list all pods + @return 200: list all pods, empty list is no pod exist + @rtype: L{Pods} + """ + self._list() + + @swagger.operation(nickname='create') + def post(self): + """ + @description: create a pod + @param body: pod to be created + @type body: L{PodCreateRequest} + @in body: body + @rtype: L{Pod} + @return 200: pod is created. + @raise 403: pod already exists + @raise 400: body or name not provided + """ + def query(data): + return {'name': data.name} + + def error(data): + message = '{} already exists as a pod'.format(data.name) + return HTTP_FORBIDDEN, message + + miss_checks = ['name'] + db_checks = [(self.table, False, query, error)] + self._create(miss_checks, db_checks) + + +class PodGURHandler(GenericPodHandler): + @swagger.operation(nickname='get-one') + def get(self, pod_name): + """ + @description: get a single pod by pod_name + @rtype: L{Pod} + @return 200: pod exist + @raise 404: pod not exist + """ + query = dict() + query['name'] = pod_name + self._get_one(query) + + def delete(self, pod_name): + """ Remove a POD + + # check for an existing pod to be deleted + mongo_dict = yield self.db.pods.find_one( + {'name': pod_name}) + pod = TestProject.pod(mongo_dict) + if pod is None: + raise HTTPError(HTTP_NOT_FOUND, + "{} could not be found as a pod to be deleted" + .format(pod_name)) + + # just delete it, or maybe save it elsewhere in a future + res = yield self.db.projects.remove( + {'name': pod_name}) + + self.finish_request(answer) + """ + pass diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/pod_models.py b/utils/test/result_collection_api/opnfv_testapi/resources/pod_models.py new file mode 100644 index 000000000..2e645032c --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/pod_models.py @@ -0,0 +1,79 @@ +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + +__author__ = '__serena__' + +# name: name of the POD e.g. zte-1 +# mode: metal or virtual +# details: any detail +# role: ci-pod or community-pod or single-node + + +@swagger.model() +class PodCreateRequest(object): + def __init__(self, name, mode='', details='', role=""): + self.name = name + self.mode = mode + self.details = details + self.role = role + + def format(self): + return { + "name": self.name, + "mode": self.mode, + "details": self.details, + "role": self.role, + } + + +@swagger.model() +class Pod(PodCreateRequest): + def __init__(self, + name='', mode='', details='', + role="", _id='', create_date=''): + super(Pod, self).__init__(name, mode, details, role) + self._id = _id + self.creation_date = create_date + + @staticmethod + def from_dict(pod_dict): + if pod_dict is None: + return None + + p = Pod() + p._id = pod_dict.get('_id') + p.creation_date = str(pod_dict.get('creation_date')) + p.name = pod_dict.get('name') + p.mode = pod_dict.get('mode') + p.details = pod_dict.get('details') + p.role = pod_dict.get('role') + return p + + def format(self): + f = super(Pod, self).format() + f['creation_date'] = str(self.creation_date) + return f + + def format_http(self): + f = self.format() + f['_id'] = str(self._id) + return f + + +@swagger.model() +class Pods(object): + """ + @property pods: + @ptype pods: C{list} of L{Pod} + """ + def __init__(self): + self.pods = list() + + @staticmethod + def from_dict(res_dict): + if res_dict is None: + return None + + res = Pods() + for pod in res_dict.get('pods'): + res.pods.append(Pod.from_dict(pod)) + return res diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py new file mode 100644 index 000000000..191a93347 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/project_handlers.py @@ -0,0 +1,84 @@ +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger +from handlers import GenericApiHandler +from opnfv_testapi.common.constants import HTTP_FORBIDDEN +from project_models import Project + + +class GenericProjectHandler(GenericApiHandler): + def __init__(self, application, request, **kwargs): + super(GenericProjectHandler, self).__init__(application, + request, + **kwargs) + self.table = 'projects' + self.table_cls = Project + + +class ProjectCLHandler(GenericProjectHandler): + @swagger.operation(nickname="list-all") + def get(self): + """ + @description: list all projects + @return 200: return all projects, empty list is no project exist + @rtype: L{Projects} + """ + self._list() + + @swagger.operation(nickname="create") + def post(self): + """ + @description: create a project + @param body: project to be created + @type body: L{ProjectCreateRequest} + @in body: body + @rtype: L{Project} + @return 200: project is created. + @raise 403: project already exists + @raise 400: body or name not provided + """ + def query(data): + return {'name': data.name} + + def error(data): + message = '{} already exists as a project'.format(data.name) + return HTTP_FORBIDDEN, message + + miss_checks = ['name'] + db_checks = [(self.table, False, query, error)] + self._create(miss_checks, db_checks) + + +class ProjectGURHandler(GenericProjectHandler): + @swagger.operation(nickname='get-one') + def get(self, project_name): + """ + @description: get a single project by project_name + @rtype: L{Project} + @return 200: project exist + @raise 404: project not exist + """ + self._get_one({'name': project_name}) + + @swagger.operation(nickname="update") + def put(self, project_name): + """ + @description: update a single project by project_name + @param body: project to be updated + @type body: L{ProjectUpdateRequest} + @in body: body + @rtype: L{Project} + @return 200: update success + @raise 404: project not exist + @raise 403: new project name already exist or nothing to update + """ + query = {'name': project_name} + db_keys = ['name'] + self._update(query, db_keys) + + @swagger.operation(nickname='delete') + def delete(self, project_name): + """ + @description: delete a project by project_name + @return 200: delete success + @raise 404: project not exist + """ + self._delete({'name': project_name}) diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/project_models.py b/utils/test/result_collection_api/opnfv_testapi/resources/project_models.py new file mode 100644 index 000000000..fbb6beba3 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/project_models.py @@ -0,0 +1,88 @@ +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + +__author__ = '__serena__' + + +@swagger.model() +class ProjectCreateRequest(object): + def __init__(self, name, description=''): + self.name = name + self.description = description + + def format(self): + return { + "name": self.name, + "description": self.description, + } + + +@swagger.model() +class ProjectUpdateRequest(object): + def __init__(self, name='', description=''): + self.name = name + self.description = description + + def format(self): + return { + "name": self.name, + "description": self.description, + } + + +@swagger.model() +class Project(object): + def __init__(self, + name=None, _id=None, description=None, create_date=None): + self._id = _id + self.name = name + self.description = description + self.creation_date = create_date + + @staticmethod + def from_dict(res_dict): + + if res_dict is None: + return None + + t = Project() + t._id = res_dict.get('_id') + t.creation_date = res_dict.get('creation_date') + t.name = res_dict.get('name') + t.description = res_dict.get('description') + + return t + + def format(self): + return { + "name": self.name, + "description": self.description, + "creation_date": str(self.creation_date) + } + + def format_http(self): + return { + "_id": str(self._id), + "name": self.name, + "description": self.description, + "creation_date": str(self.creation_date), + } + + +@swagger.model() +class Projects(object): + """ + @property projects: + @ptype projects: C{list} of L{Project} + """ + def __init__(self): + self.projects = list() + + @staticmethod + def from_dict(res_dict): + if res_dict is None: + return None + + res = Projects() + for project in res_dict.get('projects'): + res.projects.append(Project.from_dict(project)) + return res diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py new file mode 100644 index 000000000..a9aa17bba --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py @@ -0,0 +1,162 @@ +from datetime import datetime, timedelta + +from bson.objectid import ObjectId +from tornado.web import HTTPError + +from opnfv_testapi.common.constants import HTTP_BAD_REQUEST, HTTP_NOT_FOUND +from opnfv_testapi.resources.handlers import GenericApiHandler +from opnfv_testapi.resources.result_models import TestResult +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + + +class GenericResultHandler(GenericApiHandler): + def __init__(self, application, request, **kwargs): + super(GenericResultHandler, self).__init__(application, + request, + **kwargs) + self.table = self.db_results + self.table_cls = TestResult + + def set_query(self): + query = dict() + for k in self.request.query_arguments.keys(): + v = self.get_query_argument(k) + if k == 'project' or k == 'pod' or k == 'case': + query[k + '_name'] = v + elif k == 'period': + try: + v = int(v) + except: + raise HTTPError(HTTP_BAD_REQUEST, 'period must be int') + if v > 0: + period = datetime.now() - timedelta(days=v) + obj = {"$gte": str(period)} + query['start_date'] = obj + elif k == 'trust_indicator': + query[k] = float(v) + else: + query[k] = v + return query + + +class ResultsCLHandler(GenericResultHandler): + @swagger.operation(nickname="list-all") + def get(self): + """ + @description: Retrieve result(s) for a test project + on a specific pod. + @notes: Retrieve result(s) for a test project on a specific pod. + Available filters for this request are : + - project : project name + - case : case name + - pod : pod name + - version : platform version (Arno-R1, ...) + - installer (fuel, ...) + - build_tag : Jenkins build tag name + - period : x (x last days) + - scenario : the test scenario (previously version) + - criteria : the global criteria status passed or failed + - trust_indicator : evaluate the stability of the test case + to avoid running systematically long and stable test case + + GET /results/project=functest&case=vPing&version=Arno-R1 \ + &pod=pod_name&period=15 + @return 200: all test results consist with query, + empty list if no result is found + @rtype: L{TestResults} + @param pod: pod name + @type pod: L{string} + @in pod: query + @required pod: False + @param project: project name + @type project: L{string} + @in project: query + @required project: True + @param case: case name + @type case: L{string} + @in case: query + @required case: True + @param version: i.e. Colorado + @type version: L{string} + @in version: query + @required version: False + @param installer: fuel/apex/joid/compass + @type installer: L{string} + @in installer: query + @required installer: False + @param build_tag: i.e. v3.0 + @type build_tag: L{string} + @in build_tag: query + @required build_tag: False + @param scenario: i.e. odl + @type scenario: L{string} + @in scenario: query + @required scenario: False + @param criteria: i.e. passed + @type criteria: L{string} + @in criteria: query + @required criteria: False + @param period: last days + @type period: L{string} + @in period: query + @required period: False + @param trust_indicator: must be int/long/float + @type trust_indicator: L{string} + @in trust_indicator: query + @required trust_indicator: False + """ + self._list(self.set_query()) + + @swagger.operation(nickname="create") + def post(self): + """ + @description: create a test result + @param body: result to be created + @type body: L{ResultCreateRequest} + @in body: body + @rtype: L{TestResult} + @return 200: result is created. + @raise 404: pod/project/testcase not exist + @raise 400: body/pod_name/project_name/case_name not provided + """ + def pod_query(data): + return {'name': data.pod_name} + + def pod_error(data): + message = 'Could not find pod [{}]'.format(data.pod_name) + return HTTP_NOT_FOUND, message + + def project_query(data): + return {'name': data.project_name} + + def project_error(data): + message = 'Could not find project [{}]'.format(data.project_name) + return HTTP_NOT_FOUND, message + + def testcase_query(data): + return {'project_name': data.project_name, 'name': data.case_name} + + def testcase_error(data): + message = 'Could not find testcase [{}] in project [{}]'\ + .format(data.case_name, data.project_name) + return HTTP_NOT_FOUND, message + + miss_checks = ['pod_name', 'project_name', 'case_name'] + db_checks = [('pods', True, pod_query, pod_error), + ('projects', True, project_query, project_error), + ('testcases', True, testcase_query, testcase_error)] + self._create(miss_checks, db_checks) + + +class ResultsGURHandler(GenericResultHandler): + @swagger.operation(nickname='get-one') + def get(self, result_id): + """ + @description: get a single result by result_id + @rtype: L{TestResult} + @return 200: test result exist + @raise 404: test result not exist + """ + query = dict() + query["_id"] = ObjectId(result_id) + self._get_one(query) diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py b/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py new file mode 100644 index 000000000..cf896735f --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/result_models.py @@ -0,0 +1,162 @@ +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + + +@swagger.model() +class ResultCreateRequest(object): + def __init__(self, + pod_name=None, + project_name=None, + case_name=None, + installer=None, + version=None, + start_date=None, + stop_date=None, + details=None, + build_tag=None, + scenario=None, + criteria=None, + trust_indicator=None): + self.pod_name = pod_name + self.project_name = project_name + self.case_name = case_name + self.installer = installer + self.version = version + self.start_date = start_date + self.stop_date = stop_date + self.details = details + self.build_tag = build_tag + self.scenario = scenario + self.criteria = criteria + self.trust_indicator = trust_indicator + + def format(self): + return { + "pod_name": self.pod_name, + "project_name": self.project_name, + "case_name": self.case_name, + "installer": self.installer, + "version": self.version, + "start_date": self.start_date, + "stop_date": self.stop_date, + "details": self.details, + "build_tag": self.build_tag, + "scenario": self.scenario, + "criteria": self.criteria, + "trust_indicator": self.trust_indicator + } + + +@swagger.model() +class TestResult(object): + """ + @property trust_indicator: must be int/long/float + @ptype trust_indicator: L{float} + """ + def __init__(self, _id=None, case_name=None, project_name=None, + pod_name=None, installer=None, version=None, + start_date=None, stop_date=None, details=None, + build_tag=None, scenario=None, criteria=None, + trust_indicator=None): + self._id = _id + self.case_name = case_name + self.project_name = project_name + self.pod_name = pod_name + self.installer = installer + self.version = version + self.start_date = start_date + self.stop_date = stop_date + self.details = details + self.build_tag = build_tag + self.scenario = scenario + self.criteria = criteria + self.trust_indicator = trust_indicator + + @staticmethod + def from_dict(a_dict): + + if a_dict is None: + return None + + t = TestResult() + t._id = a_dict.get('_id') + t.case_name = a_dict.get('case_name') + t.pod_name = a_dict.get('pod_name') + t.project_name = a_dict.get('project_name') + t.description = a_dict.get('description') + t.start_date = str(a_dict.get('start_date')) + t.stop_date = str(a_dict.get('stop_date')) + t.details = a_dict.get('details') + t.version = a_dict.get('version') + t.installer = a_dict.get('installer') + t.build_tag = a_dict.get('build_tag') + t.scenario = a_dict.get('scenario') + t.criteria = a_dict.get('criteria') + # 0 < trust indicator < 1 + # if bad value => set this indicator to 0 + t.trust_indicator = a_dict.get('trust_indicator') + if t.trust_indicator is not None: + if isinstance(t.trust_indicator, (int, long, float)): + if t.trust_indicator < 0: + t.trust_indicator = 0 + elif t.trust_indicator > 1: + t.trust_indicator = 1 + else: + t.trust_indicator = 0 + else: + t.trust_indicator = 0 + return t + + def format(self): + return { + "case_name": self.case_name, + "project_name": self.project_name, + "pod_name": self.pod_name, + "description": self.description, + "start_date": str(self.start_date), + "stop_date": str(self.stop_date), + "version": self.version, + "installer": self.installer, + "details": self.details, + "build_tag": self.build_tag, + "scenario": self.scenario, + "criteria": self.criteria, + "trust_indicator": self.trust_indicator + } + + def format_http(self): + return { + "_id": str(self._id), + "case_name": self.case_name, + "project_name": self.project_name, + "pod_name": self.pod_name, + "description": self.description, + "start_date": str(self.start_date), + "stop_date": str(self.stop_date), + "version": self.version, + "installer": self.installer, + "details": self.details, + "build_tag": self.build_tag, + "scenario": self.scenario, + "criteria": self.criteria, + "trust_indicator": self.trust_indicator + } + + +@swagger.model() +class TestResults(object): + """ + @property results: + @ptype results: C{list} of L{TestResult} + """ + def __init__(self): + self.results = list() + + @staticmethod + def from_dict(a_dict): + if a_dict is None: + return None + + res = TestResults() + for result in a_dict.get('results'): + res.results.append(TestResult.from_dict(result)) + return res diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py b/utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py new file mode 100644 index 000000000..dbeebeb98 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/testcase_handlers.py @@ -0,0 +1,107 @@ +from opnfv_testapi.common.constants import HTTP_FORBIDDEN +from opnfv_testapi.resources.handlers import GenericApiHandler +from opnfv_testapi.resources.testcase_models import Testcase +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + + +class GenericTestcaseHandler(GenericApiHandler): + def __init__(self, application, request, **kwargs): + super(GenericTestcaseHandler, self).__init__(application, + request, + **kwargs) + self.table = self.db_testcases + self.table_cls = Testcase + + +class TestcaseCLHandler(GenericTestcaseHandler): + @swagger.operation(nickname="list-all") + def get(self, project_name): + """ + @description: list all testcases of a project by project_name + @return 200: return all testcases of this project, + empty list is no testcase exist in this project + @rtype: L{TestCases} + """ + query = dict() + query['project_name'] = project_name + self._list(query) + + @swagger.operation(nickname="create") + def post(self, project_name): + """ + @description: create a testcase of a project by project_name + @param body: testcase to be created + @type body: L{TestcaseCreateRequest} + @in body: body + @rtype: L{Testcase} + @return 200: testcase is created in this project. + @raise 403: project not exist + or testcase already exists in this project + @raise 400: body or name not provided + """ + def p_query(data): + return {'name': data.project_name} + + def tc_query(data): + return { + 'project_name': data.project_name, + 'name': data.name + } + + def p_error(data): + message = 'Could not find project [{}]'.format(data.project_name) + return HTTP_FORBIDDEN, message + + def tc_error(data): + message = '{} already exists as a testcase in project {}'\ + .format(data.name, data.project_name) + return HTTP_FORBIDDEN, message + + miss_checks = ['name'] + db_checks = [(self.db_projects, True, p_query, p_error), + (self.db_testcases, False, tc_query, tc_error)] + self._create(miss_checks, db_checks, project_name=project_name) + + +class TestcaseGURHandler(GenericTestcaseHandler): + @swagger.operation(nickname='get-one') + def get(self, project_name, case_name): + """ + @description: get a single testcase + by case_name and project_name + @rtype: L{Testcase} + @return 200: testcase exist + @raise 404: testcase not exist + """ + query = dict() + query['project_name'] = project_name + query["name"] = case_name + self._get_one(query) + + @swagger.operation(nickname="update") + def put(self, project_name, case_name): + """ + @description: update a single testcase + by project_name and case_name + @param body: testcase to be updated + @type body: L{TestcaseUpdateRequest} + @in body: body + @rtype: L{Project} + @return 200: update success + @raise 404: testcase or project not exist + @raise 403: new testcase name already exist in project + or nothing to update + """ + query = {'project_name': project_name, 'name': case_name} + db_keys = ['name', 'project_name'] + self._update(query, db_keys) + + @swagger.operation(nickname='delete') + def delete(self, project_name, case_name): + """ + @description: delete a testcase by project_name and case_name + @return 200: delete success + @raise 404: testcase not exist + """ + query = {'project_name': project_name, 'name': case_name} + self._delete(query) diff --git a/utils/test/result_collection_api/opnfv_testapi/resources/testcase_models.py b/utils/test/result_collection_api/opnfv_testapi/resources/testcase_models.py new file mode 100644 index 000000000..aa5789e70 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/resources/testcase_models.py @@ -0,0 +1,99 @@ +from opnfv_testapi.tornado_swagger_ui.tornado_swagger import swagger + +__author__ = '__serena__' + + +@swagger.model() +class TestcaseCreateRequest(object): + def __init__(self, name, url=None, description=None): + self.name = name + self.url = url + self.description = description + + def format(self): + return { + "name": self.name, + "description": self.description, + "url": self.url, + } + + +@swagger.model() +class TestcaseUpdateRequest(object): + def __init__(self, name=None, description=None, project_name=None): + self.name = name + self.description = description + self.project_name = project_name + + def format(self): + return { + "name": self.name, + "description": self.description, + "project_name": self.project_name, + } + + +@swagger.model() +class Testcase(object): + def __init__(self): + self._id = None + self.name = None + self.project_name = None + self.description = None + self.url = None + self.creation_date = None + + @staticmethod + def from_dict(a_dict): + + if a_dict is None: + return None + + t = Testcase() + t._id = a_dict.get('_id') + t.project_name = a_dict.get('project_name') + t.creation_date = a_dict.get('creation_date') + t.name = a_dict.get('name') + t.description = a_dict.get('description') + t.url = a_dict.get('url') + + return t + + def format(self): + return { + "name": self.name, + "description": self.description, + "project_name": self.project_name, + "creation_date": str(self.creation_date), + "url": self.url + } + + def format_http(self): + return { + "_id": str(self._id), + "name": self.name, + "project_name": self.project_name, + "description": self.description, + "creation_date": str(self.creation_date), + "url": self.url, + } + + +@swagger.model() +class Testcases(object): + """ + @property testcases: + @ptype testcases: C{list} of L{Testcase} + """ + def __init__(self): + self.testcases = list() + + @staticmethod + def from_dict(res_dict): + if res_dict is None: + return None + + res = Testcases() + for testcase in res_dict.get('testcases'): + res.testcases.append(Testcase.from_dict(testcase)) + return res diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/__init__.py b/utils/test/result_collection_api/opnfv_testapi/tests/__init__.py new file mode 100644 index 000000000..9f28b0bfa --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/__init__.py @@ -0,0 +1 @@ +__author__ = 'serena' diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/__init__.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/__init__.py new file mode 100644 index 000000000..3ed9fd0f3 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/__init__.py @@ -0,0 +1 @@ +__author__ = 'root' diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py new file mode 100644 index 000000000..bebb9e8b3 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py @@ -0,0 +1,144 @@ +from bson.objectid import ObjectId +from concurrent.futures import ThreadPoolExecutor + + +__author__ = 'serena' + + +def thread_execute(method, *args, **kwargs): + with ThreadPoolExecutor(max_workers=2) as executor: + result = executor.submit(method, *args, **kwargs) + return result + + +class MemCursor(object): + def __init__(self, collection): + self.collection = collection + self.count = len(self.collection) + + def _is_next_exist(self): + return self.count != 0 + + @property + def fetch_next(self): + return thread_execute(self._is_next_exist) + + def next_object(self): + self.count -= 1 + return self.collection.pop() + + +class MemDb(object): + + def __init__(self): + self.contents = [] + pass + + def _find_one(self, spec_or_id=None, *args): + if spec_or_id is not None and not isinstance(spec_or_id, dict): + spec_or_id = {"_id": spec_or_id} + if '_id' in spec_or_id: + spec_or_id['_id'] = str(spec_or_id['_id']) + cursor = self._find(spec_or_id, *args) + for result in cursor: + return result + return None + + def find_one(self, spec_or_id=None, *args): + return thread_execute(self._find_one, spec_or_id, *args) + + def _insert(self, doc_or_docs, check_keys=True): + + docs = doc_or_docs + return_one = False + if isinstance(docs, dict): + return_one = True + docs = [docs] + + ids = [] + for doc in docs: + if '_id' not in doc: + doc['_id'] = str(ObjectId()) + if not check_keys or not self._find_one(doc['_id']): + ids.append(doc['_id']) + self.contents.append(doc_or_docs) + + if len(ids) == 0: + return None + if return_one: + return ids[0] + else: + return ids + + def insert(self, doc_or_docs, check_keys=True): + return thread_execute(self._insert, doc_or_docs, check_keys) + + @staticmethod + def _compare_date(spec, value): + for k, v in spec.iteritems(): + if k == '$gte' and value >= v: + return True + return False + + @staticmethod + def _in(content, *args): + for arg in args: + for k, v in arg.iteritems(): + if k == 'start_date': + if not MemDb._compare_date(v, content.get(k)): + return False + elif k == 'trust_indicator': + if float(content.get(k)) != float(v): + return False + elif content.get(k, None) != v: + return False + + return True + + def _find(self, *args): + res = [] + for content in self.contents: + if self._in(content, *args): + res.append(content) + + return res + + def find(self, *args): + return MemCursor(self._find(*args)) + + def _update(self, spec, document): + updated = False + for index in range(len(self.contents)): + content = self.contents[index] + if self._in(content, spec): + for k, v in document.iteritems(): + updated = True + content[k] = v + self.contents[index] = content + return updated + + def update(self, spec, document): + return thread_execute(self._update, spec, document) + + def _remove(self, spec_or_id=None): + if spec_or_id is None: + self.contents = [] + if not isinstance(spec_or_id, dict): + spec_or_id = {'_id': spec_or_id} + for index in range(len(self.contents)): + content = self.contents[index] + if self._in(content, spec_or_id): + del self.contents[index] + return True + return False + + def remove(self, spec_or_id=None): + return thread_execute(self._remove, spec_or_id) + + def clear(self): + self._remove() + +pods = MemDb() +projects = MemDb() +testcases = MemDb() +results = MemDb() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_base.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_base.py new file mode 100644 index 000000000..8f0c28488 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_base.py @@ -0,0 +1,148 @@ +import json + +from tornado.web import Application +from tornado.testing import AsyncHTTPTestCase + +from opnfv_testapi.resources.pod_handlers import PodCLHandler, PodGURHandler +from opnfv_testapi.resources.project_handlers import ProjectCLHandler, \ + ProjectGURHandler +from opnfv_testapi.resources.handlers import VersionHandler +from opnfv_testapi.resources.testcase_handlers import TestcaseCLHandler, \ + TestcaseGURHandler +from opnfv_testapi.resources.result_handlers import ResultsCLHandler, \ + ResultsGURHandler +from opnfv_testapi.resources.dashboard_handlers import DashboardHandler +from opnfv_testapi.resources.models import CreateResponse +import fake_pymongo + + +class TestBase(AsyncHTTPTestCase): + headers = {'Content-Type': 'application/json; charset=UTF-8'} + + def setUp(self): + self.basePath = '' + self.create_res = CreateResponse + self.get_res = None + self.list_res = None + self.update_res = None + self.req_d = None + self.req_e = None + self.addCleanup(self._clear) + super(TestBase, self).setUp() + + def get_app(self): + return Application( + [ + (r"/versions", VersionHandler), + (r"/api/v1/pods", PodCLHandler), + (r"/api/v1/pods/([^/]+)", PodGURHandler), + (r"/api/v1/projects", ProjectCLHandler), + (r"/api/v1/projects/([^/]+)", ProjectGURHandler), + (r"/api/v1/projects/([^/]+)/cases", TestcaseCLHandler), + (r"/api/v1/projects/([^/]+)/cases/([^/]+)", + TestcaseGURHandler), + (r"/api/v1/results", ResultsCLHandler), + (r"/api/v1/results/([^/]+)", ResultsGURHandler), + (r"/dashboard/v1/results", DashboardHandler), + ], + db=fake_pymongo, + debug=True, + ) + + def create_d(self, *args): + return self.create(self.req_d, *args) + + def create_e(self, *args): + return self.create(self.req_e, *args) + + def create(self, req=None, *args): + return self.create_help(self.basePath, req, *args) + + def create_help(self, uri, req, *args): + if req: + req = req.format() + res = self.fetch(self._update_uri(uri, *args), + method='POST', + body=json.dumps(req), + headers=self.headers) + + return self._get_return(res, self.create_res) + + def get(self, *args): + res = self.fetch(self._get_uri(*args), + method='GET', + headers=self.headers) + + def inner(): + new_args, num = self._get_valid_args(*args) + return self.get_res \ + if num != self._need_arg_num(self.basePath) else self.list_res + return self._get_return(res, inner()) + + def query(self, query): + res = self.fetch(self._get_query_uri(query), + method='GET', + headers=self.headers) + return self._get_return(res, self.list_res) + + def update(self, new=None, *args): + if new: + new = new.format() + res = self.fetch(self._get_uri(*args), + method='PUT', + body=json.dumps(new), + headers=self.headers) + return self._get_return(res, self.update_res) + + def delete(self, *args): + res = self.fetch(self._get_uri(*args), + method='DELETE', + headers=self.headers) + return res.code, res.body + + @staticmethod + def _get_valid_args(*args): + new_args = tuple(['%s' % arg for arg in args if arg is not None]) + return new_args, len(new_args) + + def _need_arg_num(self, uri): + return uri.count('%s') + + def _get_query_uri(self, query): + return self.basePath + '?' + query + + def _get_uri(self, *args): + return self._update_uri(self.basePath, *args) + + def _update_uri(self, uri, *args): + r_uri = uri + new_args, num = self._get_valid_args(*args) + if num != self._need_arg_num(uri): + r_uri += '/%s' + + return r_uri % tuple(['%s' % arg for arg in new_args]) + + def _get_return(self, res, cls): + code = res.code + body = res.body + return code, self._get_return_body(code, body, cls) + + @staticmethod + def _get_return_body(code, body, cls): + return cls.from_dict(json.loads(body)) if code < 300 and cls else body + + def assert_href(self, body): + self.assertIn(self.basePath, body.href) + + def assert_create_body(self, body, req=None, *args): + if not req: + req = self.req_d + new_args = args + tuple([req.name]) + self.assertIn(self._get_uri(*new_args), body.href) + + @staticmethod + def _clear(): + fake_pymongo.pods.clear() + fake_pymongo.projects.clear() + fake_pymongo.testcases.clear() + fake_pymongo.results.clear() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py new file mode 100644 index 000000000..16a3140d8 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py @@ -0,0 +1,71 @@ +import unittest + +from test_result import TestResultBase +from opnfv_testapi.common.constants import HTTP_NOT_FOUND, HTTP_OK + +__author__ = '__serena__' + + +class TestDashboardBase(TestResultBase): + def setUp(self): + super(TestDashboardBase, self).setUp() + self.basePath = '/dashboard/v1/results' + self.create_help('/api/v1/results', self.req_d) + self.create_help('/api/v1/results', self.req_d) + self.list_res = None + + +class TestDashboardQuery(TestDashboardBase): + def test_projectMissing(self): + code, body = self.query(self._set_query(project='missing')) + self.assertEqual(code, HTTP_NOT_FOUND) + self.assertIn('Project name missing', body) + + def test_projectNotReady(self): + code, body = self.query(self._set_query(project='notReadyProject')) + self.assertEqual(code, HTTP_NOT_FOUND) + self.assertIn('Project [notReadyProject] not dashboard ready', body) + + def test_testcaseMissing(self): + code, body = self.query(self._set_query(case='missing')) + self.assertEqual(code, HTTP_NOT_FOUND) + self.assertIn('Test case missing for project [{}]' + .format(self.project), + body) + + def test_testcaseNotReady(self): + code, body = self.query(self._set_query(case='notReadyCase')) + self.assertEqual(code, HTTP_NOT_FOUND) + self.assertIn( + 'Test case [notReadyCase] not dashboard ready for project [%s]' + % self.project, + body) + + def test_success(self): + code, body = self.query(self._set_query()) + self.assertEqual(code, HTTP_OK) + self.assertIn('{"description": "vPing results for Dashboard"}', body) + + def test_caseIsStatus(self): + code, body = self.query(self._set_query(case='status')) + self.assertEqual(code, HTTP_OK) + self.assertIn('{"description": "Functest status"}', body) + + def _set_query(self, project=None, case=None): + uri = '' + for k, v in list(locals().iteritems()): + if k == 'self' or k == 'uri': + continue + if v is None: + v = eval('self.' + k) + if v != 'missing': + uri += '{}={}&'.format(k, v) + uri += 'pod={}&'.format(self.pod) + uri += 'version={}&'.format(self.version) + uri += 'installer={}&'.format(self.installer) + uri += 'period={}&'.format(5) + return uri[0:-1] + + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py new file mode 100644 index 000000000..6920fcad8 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py @@ -0,0 +1,68 @@ +import unittest +from tornado.web import Application +from tornado import gen +from tornado.testing import AsyncHTTPTestCase, gen_test + +import fake_pymongo + + +class MyTest(AsyncHTTPTestCase): + def setUp(self): + super(MyTest, self).setUp() + self.db = fake_pymongo + self.addCleanup(self._clear) + self.io_loop.run_sync(self.fixture_setup) + + def get_app(self): + return Application() + + @gen.coroutine + def fixture_setup(self): + self.test1 = {'_id': '1', 'name': 'test1'} + self.test2 = {'name': 'test2'} + yield self.db.pods.insert({'_id': '1', 'name': 'test1'}) + yield self.db.pods.insert({'name': 'test2'}) + + @gen_test + def test_find_one(self): + user = yield self.db.pods.find_one({'name': 'test1'}) + self.assertEqual(user, self.test1) + self.db.pods.remove() + + @gen_test + def test_find(self): + cursor = self.db.pods.find() + names = [] + while (yield cursor.fetch_next): + ob = cursor.next_object() + names.append(ob.get('name')) + self.assertItemsEqual(names, ['test1', 'test2']) + + @gen_test + def test_update(self): + yield self.db.pods.update({'_id': '1'}, {'name': 'new_test1'}) + user = yield self.db.pods.find_one({'_id': '1'}) + self.assertEqual(user.get('name', None), 'new_test1') + + @gen_test + def test_remove(self): + yield self.db.pods.remove({'_id': '1'}) + user = yield self.db.pods.find_one({'_id': '1'}) + self.assertIsNone(user) + + @gen_test + def test_insert_check_keys(self): + yield self.db.pods.insert({'_id': '1', 'name': 'test1'}, + check_keys=False) + cursor = self.db.pods.find({'_id': '1'}) + names = [] + while (yield cursor.fetch_next): + ob = cursor.next_object() + names.append(ob.get('name')) + self.assertItemsEqual(names, ['test1', 'test1']) + + def _clear(self): + self.db.pods.clear() + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_pod.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_pod.py new file mode 100644 index 000000000..2f5d84d8d --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_pod.py @@ -0,0 +1,81 @@ +import unittest + +from test_base import TestBase +from opnfv_testapi.resources.pod_models import PodCreateRequest, Pod, Pods +from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ + HTTP_FORBIDDEN, HTTP_NOT_FOUND + + +class TestPodBase(TestBase): + def setUp(self): + super(TestPodBase, self).setUp() + self.req_d = PodCreateRequest('zte-1', 'virtual', + 'zte pod 1', 'ci-pod') + self.req_e = PodCreateRequest('zte-2', 'metal', 'zte pod 2') + self.get_res = Pod + self.list_res = Pods + self.basePath = '/api/v1/pods' + + def assert_get_body(self, pod, req=None): + if not req: + req = self.req_d + self.assertEqual(pod.name, req.name) + self.assertEqual(pod.mode, req.mode) + self.assertEqual(pod.details, req.details) + self.assertEqual(pod.role, req.role) + self.assertIsNotNone(pod.creation_date) + self.assertIsNotNone(pod._id) + + +class TestPodCreate(TestPodBase): + def test_withoutBody(self): + (code, body) = self.create() + self.assertEqual(code, HTTP_BAD_REQUEST) + + def test_emptyName(self): + req_empty = PodCreateRequest('') + (code, body) = self.create(req_empty) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('name missing', body) + + def test_noneName(self): + req_none = PodCreateRequest(None) + (code, body) = self.create(req_none) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('name missing', body) + + def test_success(self): + code, body = self.create_d() + self.assertEqual(code, HTTP_OK) + self.assert_create_body(body) + + def test_alreadyExist(self): + self.create_d() + code, body = self.create_d() + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn('already exists', body) + + +class TestPodGet(TestPodBase): + def test_notExist(self): + code, body = self.get('notExist') + self.assertEqual(code, HTTP_NOT_FOUND) + + def test_getOne(self): + self.create_d() + code, body = self.get(self.req_d.name) + self.assert_get_body(body) + + def test_list(self): + self.create_d() + self.create_e() + code, body = self.get() + self.assertEqual(len(body.pods), 2) + for pod in body.pods: + if self.req_d.name == pod.name: + self.assert_get_body(pod) + else: + self.assert_get_body(pod, self.req_e) + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py new file mode 100644 index 000000000..1b4af916c --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py @@ -0,0 +1,133 @@ +import unittest + +from test_base import TestBase +from opnfv_testapi.resources.project_models import ProjectCreateRequest, \ + Project, Projects +from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ + HTTP_FORBIDDEN, HTTP_NOT_FOUND + + +class TestProjectBase(TestBase): + def setUp(self): + super(TestProjectBase, self).setUp() + self.req_d = ProjectCreateRequest('vping', 'vping-ssh test') + self.req_e = ProjectCreateRequest('doctor', 'doctor test') + self.get_res = Project + self.list_res = Projects + self.update_res = Project + self.basePath = '/api/v1/projects' + + def assert_body(self, project, req=None): + if not req: + req = self.req_d + self.assertEqual(project.name, req.name) + self.assertEqual(project.description, req.description) + self.assertIsNotNone(project._id) + self.assertIsNotNone(project.creation_date) + + +class TestProjectCreate(TestProjectBase): + def test_withoutBody(self): + (code, body) = self.create() + self.assertEqual(code, HTTP_BAD_REQUEST) + + def test_emptyName(self): + req_empty = ProjectCreateRequest('') + (code, body) = self.create(req_empty) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('name missing', body) + + def test_noneName(self): + req_none = ProjectCreateRequest(None) + (code, body) = self.create(req_none) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('name missing', body) + + def test_success(self): + (code, body) = self.create_d() + self.assertEqual(code, HTTP_OK) + self.assert_create_body(body) + + def test_alreadyExist(self): + self.create_d() + (code, body) = self.create_d() + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn('already exists', body) + + +class TestProjectGet(TestProjectBase): + def test_notExist(self): + code, body = self.get('notExist') + self.assertEqual(code, HTTP_NOT_FOUND) + + def test_getOne(self): + self.create_d() + code, body = self.get(self.req_d.name) + self.assertEqual(code, HTTP_OK) + self.assert_body(body) + + def test_list(self): + self.create_d() + self.create_e() + code, body = self.get() + for project in body.projects: + if self.req_d.name == project.name: + self.assert_body(project) + else: + self.assert_body(project, self.req_e) + + +class TestProjectUpdate(TestProjectBase): + def test_withoutBody(self): + code, _ = self.update(None, 'noBody') + self.assertEqual(code, HTTP_BAD_REQUEST) + + def test_notFound(self): + code, _ = self.update(self.req_e, 'notFound') + self.assertEqual(code, HTTP_NOT_FOUND) + + def test_newNameExist(self): + self.create_d() + self.create_e() + code, body = self.update(self.req_e, self.req_d.name) + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn("already exists", body) + + def test_noUpdate(self): + self.create_d() + code, body = self.update(self.req_d, self.req_d.name) + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn("Nothing to update", body) + + def test_success(self): + self.create_d() + code, body = self.get(self.req_d.name) + _id = body._id + + req = ProjectCreateRequest('newName', 'new description') + code, body = self.update(req, self.req_d.name) + self.assertEqual(code, HTTP_OK) + self.assertEqual(_id, body._id) + self.assert_body(body, req) + + _, new_body = self.get(req.name) + self.assertEqual(_id, new_body._id) + self.assert_body(new_body, req) + + +class TestProjectDelete(TestProjectBase): + def test_notFound(self): + code, body = self.delete('notFound') + self.assertEqual(code, HTTP_NOT_FOUND) + + def test_success(self): + self.create_d() + code, body = self.delete(self.req_d.name) + self.assertEqual(code, HTTP_OK) + self.assertEqual(body, '') + + code, body = self.get(self.req_d.name) + self.assertEqual(code, HTTP_NOT_FOUND) + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py new file mode 100644 index 000000000..5e424f8f0 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py @@ -0,0 +1,267 @@ +import unittest +import copy + +from test_base import TestBase +from opnfv_testapi.resources.pod_models import PodCreateRequest +from opnfv_testapi.resources.project_models import ProjectCreateRequest +from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest +from opnfv_testapi.resources.result_models import ResultCreateRequest, \ + TestResult, TestResults +from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND + +__author__ = '__serena__' + + +class Details(object): + def __init__(self, timestart=None, duration=None, status=None): + self.timestart = timestart + self.duration = duration + self.status = status + + def format(self): + return { + "timestart": self.timestart, + "duration": self.duration, + "status": self.status + } + + @staticmethod + def from_dict(a_dict): + + if a_dict is None: + return None + + t = Details() + t.timestart = a_dict.get('timestart') + t.duration = a_dict.get('duration') + t.status = a_dict.get('status') + return t + + +class TestResultBase(TestBase): + def setUp(self): + self.pod = 'zte-pod1' + self.project = 'functest' + self.case = 'vPing' + self.installer = 'fuel' + self.version = 'C' + self.build_tag = 'v3.0' + self.scenario = 'odl-l2' + self.criteria = 'passed' + self.trust_indicator = 0.7 + self.start_date = "2016-05-23 07:16:09.477097" + self.stop_date = "2016-05-23 07:16:19.477097" + super(TestResultBase, self).setUp() + self.details = Details(timestart='0', duration='9s', status='OK') + self.req_d = ResultCreateRequest(pod_name=self.pod, + project_name=self.project, + case_name=self.case, + installer=self.installer, + version=self.version, + start_date=self.start_date, + stop_date=self.stop_date, + details=self.details.format(), + build_tag=self.build_tag, + scenario=self.scenario, + criteria=self.criteria, + trust_indicator=self.trust_indicator) + self.get_res = TestResult + self.list_res = TestResults + self.basePath = '/api/v1/results' + self.req_pod = PodCreateRequest(self.pod, 'metal', 'zte pod 1') + self.req_project = ProjectCreateRequest(self.project, 'vping test') + self.req_testcase = TestcaseCreateRequest(self.case, + '/cases/vping', + 'vping-ssh test') + self.create_help('/api/v1/pods', self.req_pod) + self.create_help('/api/v1/projects', self.req_project) + self.create_help('/api/v1/projects/%s/cases', + self.req_testcase, + self.project) + + def assert_res(self, code, result, req=None): + self.assertEqual(code, HTTP_OK) + if req is None: + req = self.req_d + self.assertEqual(result.pod_name, req.pod_name) + self.assertEqual(result.project_name, req.project_name) + self.assertEqual(result.case_name, req.case_name) + self.assertEqual(result.installer, req.installer) + self.assertEqual(result.version, req.version) + details_req = Details.from_dict(req.details) + details_res = Details.from_dict(result.details) + self.assertEqual(details_res.duration, details_req.duration) + self.assertEqual(details_res.timestart, details_req.timestart) + self.assertEqual(details_res.status, details_req.status) + self.assertEqual(result.build_tag, req.build_tag) + self.assertEqual(result.scenario, req.scenario) + self.assertEqual(result.criteria, req.criteria) + self.assertEqual(result.trust_indicator, req.trust_indicator) + self.assertEqual(result.start_date, req.start_date) + self.assertEqual(result.stop_date, req.stop_date) + self.assertIsNotNone(result._id) + + +class TestResultCreate(TestResultBase): + def test_nobody(self): + (code, body) = self.create(None) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('no body', body) + + def test_podNotProvided(self): + req = self.req_d + req.pod_name = None + (code, body) = self.create(req) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('pod_name missing', body) + + def test_projectNotProvided(self): + req = self.req_d + req.project_name = None + (code, body) = self.create(req) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('project_name missing', body) + + def test_testcaseNotProvided(self): + req = self.req_d + req.case_name = None + (code, body) = self.create(req) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('case_name missing', body) + + def test_noPod(self): + req = self.req_d + req.pod_name = 'notExistPod' + (code, body) = self.create(req) + self.assertEqual(code, HTTP_NOT_FOUND) + self.assertIn('Could not find pod', body) + + def test_noProject(self): + req = self.req_d + req.project_name = 'notExistProject' + (code, body) = self.create(req) + self.assertEqual(code, HTTP_NOT_FOUND) + self.assertIn('Could not find project', body) + + def test_noTestcase(self): + req = self.req_d + req.case_name = 'notExistTestcase' + (code, body) = self.create(req) + self.assertEqual(code, HTTP_NOT_FOUND) + self.assertIn('Could not find testcase', body) + + def test_success(self): + (code, body) = self.create_d() + self.assertEqual(code, HTTP_OK) + self.assert_href(body) + + def test_createSameResults(self): + req_again = copy.deepcopy(self.req_d) + req_again.start_date = "2016-05-23 08:16:09.477097" + req_again.stop_date = "2016-05-23 08:16:19.477097" + + (code, body) = self.create(req_again) + self.assertEqual(code, HTTP_OK) + self.assert_href(body) + + +class TestResultGet(TestResultBase): + def test_getOne(self): + _, res = self.create_d() + _id = res.href.split('/')[-1] + code, body = self.get(_id) + self.assert_res(code, body) + + def test_queryPod(self): + self._query_and_assert(self._set_query('pod')) + + def test_queryProject(self): + self._query_and_assert(self._set_query('project')) + + def test_queryTestcase(self): + self._query_and_assert(self._set_query('case')) + + def test_queryVersion(self): + self._query_and_assert(self._set_query('version')) + + def test_queryInstaller(self): + self._query_and_assert(self._set_query('installer')) + + def test_queryBuildTag(self): + self._query_and_assert(self._set_query('build_tag')) + + def test_queryScenario(self): + self._query_and_assert(self._set_query('scenario')) + + def test_queryTrustIndicator(self): + self._query_and_assert(self._set_query('trust_indicator')) + + def test_queryCriteria(self): + self._query_and_assert(self._set_query('criteria')) + + def test_queryPeriodFail(self): + self._query_and_assert(self._set_query('period=1'), + aheadof=True, + found=False) + + def test_queryPeriodSuccess(self): + self._query_and_assert(self._set_query('period=1'), + aheadof=False, + found=True) + + def test_combination(self): + self._query_and_assert(self._set_query('pod', + 'project', + 'case', + 'version', + 'installer', + 'build_tag', + 'scenario', + 'trust_indicator', + 'criteria', + 'period=1')) + + def test_notFound(self): + self._query_and_assert(self._set_query('pod=notExistPod', + 'project', + 'case', + 'version', + 'installer', + 'build_tag', + 'scenario', + 'trust_indicator', + 'criteria', + 'period=1'), + found=False) + + def _query_and_assert(self, query, aheadof=False, found=True): + import copy + from datetime import datetime, timedelta + req = copy.deepcopy(self.req_d) + if aheadof: + req.start_date = datetime.now() - timedelta(days=10) + else: + req.start_date = datetime.now() + req.stop_date = str(req.start_date + timedelta(minutes=10)) + req.start_date = str(req.start_date) + _, res = self.create(req) + code, body = self.query(query) + if not found: + self.assertEqual(code, HTTP_OK) + self.assertEqual(0, len(body.results)) + else: + self.assertEqual(1, len(body.results)) + for result in body.results: + self.assert_res(code, result, req) + + def _set_query(self, *args): + uri = '' + for arg in args: + if '=' in arg: + uri += arg + '&' + else: + uri += '{}={}&'.format(arg, eval('self.' + arg)) + return uri[0: -1] + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_testcase.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_testcase.py new file mode 100644 index 000000000..dc2082100 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_testcase.py @@ -0,0 +1,183 @@ +import unittest + +from test_base import TestBase +from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest, \ + Testcase, Testcases, TestcaseUpdateRequest +from opnfv_testapi.resources.project_models import ProjectCreateRequest +from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \ + HTTP_FORBIDDEN, HTTP_NOT_FOUND + + +__author__ = '__serena__' + + +class TestCaseBase(TestBase): + def setUp(self): + super(TestCaseBase, self).setUp() + self.req_d = TestcaseCreateRequest('vping_1', + '/cases/vping_1', + 'vping-ssh test') + self.req_e = TestcaseCreateRequest('doctor_1', + '/cases/doctor_1', + 'create doctor') + self.update_d = TestcaseUpdateRequest('vping_1', + 'vping-ssh test', + 'functest') + self.update_e = TestcaseUpdateRequest('doctor_1', + 'create doctor', + 'functest') + self.get_res = Testcase + self.list_res = Testcases + self.update_res = Testcase + self.basePath = '/api/v1/projects/%s/cases' + self.create_project() + + def assert_body(self, case, req=None): + if not req: + req = self.req_d + self.assertEqual(case.name, req.name) + self.assertEqual(case.description, req.description) + self.assertEqual(case.url, req.url) + self.assertIsNotNone(case._id) + self.assertIsNotNone(case.creation_date) + + def assert_update_body(self, old, new, req=None): + if not req: + req = self.req_d + self.assertEqual(new.name, req.name) + self.assertEqual(new.description, req.description) + self.assertEqual(new.url, old.url) + self.assertIsNotNone(new._id) + self.assertIsNotNone(new.creation_date) + + def create_project(self): + req_p = ProjectCreateRequest('functest', 'vping-ssh test') + self.create_help('/api/v1/projects', req_p) + self.project = req_p.name + + def create_d(self): + return super(TestCaseBase, self).create_d(self.project) + + def create_e(self): + return super(TestCaseBase, self).create_e(self.project) + + def get(self, case=None): + return super(TestCaseBase, self).get(self.project, case) + + def update(self, new=None, case=None): + return super(TestCaseBase, self).update(new, self.project, case) + + def delete(self, case): + return super(TestCaseBase, self).delete(self.project, case) + + +class TestCaseCreate(TestCaseBase): + def test_noBody(self): + (code, body) = self.create(None, 'vping') + self.assertEqual(code, HTTP_BAD_REQUEST) + + def test_noProject(self): + code, body = self.create(self.req_d, 'noProject') + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn('Could not find project', body) + + def test_emptyName(self): + req_empty = TestcaseCreateRequest('') + (code, body) = self.create(req_empty, self.project) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('name missing', body) + + def test_noneName(self): + req_none = TestcaseCreateRequest(None) + (code, body) = self.create(req_none, self.project) + self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertIn('name missing', body) + + def test_success(self): + code, body = self.create_d() + self.assertEqual(code, HTTP_OK) + self.assert_create_body(body, None, self.project) + + def test_alreadyExist(self): + self.create_d() + code, body = self.create_d() + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn('already exists', body) + + +class TestCaseGet(TestCaseBase): + def test_notExist(self): + code, body = self.get('notExist') + self.assertEqual(code, HTTP_NOT_FOUND) + + def test_getOne(self): + self.create_d() + code, body = self.get(self.req_d.name) + self.assertEqual(code, HTTP_OK) + self.assert_body(body) + + def test_list(self): + self.create_d() + self.create_e() + code, body = self.get() + for case in body.testcases: + if self.req_d.name == case.name: + self.assert_body(case) + else: + self.assert_body(case, self.req_e) + + +class TestCaseUpdate(TestCaseBase): + def test_noBody(self): + code, _ = self.update(case='noBody') + self.assertEqual(code, HTTP_BAD_REQUEST) + + def test_notFound(self): + code, _ = self.update(self.update_e, 'notFound') + self.assertEqual(code, HTTP_NOT_FOUND) + + def test_newNameExist(self): + self.create_d() + self.create_e() + code, body = self.update(self.update_e, self.req_d.name) + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn("already exists", body) + + def test_noUpdate(self): + self.create_d() + code, body = self.update(self.update_d, self.req_d.name) + self.assertEqual(code, HTTP_FORBIDDEN) + self.assertIn("Nothing to update", body) + + def test_success(self): + self.create_d() + code, body = self.get(self.req_d.name) + _id = body._id + + code, body = self.update(self.update_e, self.req_d.name) + self.assertEqual(code, HTTP_OK) + self.assertEqual(_id, body._id) + self.assert_update_body(self.req_d, body, self.update_e) + + _, new_body = self.get(self.req_e.name) + self.assertEqual(_id, new_body._id) + self.assert_update_body(self.req_d, new_body, self.update_e) + + +class TestCaseDelete(TestCaseBase): + def test_notFound(self): + code, body = self.delete('notFound') + self.assertEqual(code, HTTP_NOT_FOUND) + + def test_success(self): + self.create_d() + code, body = self.delete(self.req_d.name) + self.assertEqual(code, HTTP_OK) + self.assertEqual(body, '') + + code, body = self.get(self.req_d.name) + self.assertEqual(code, HTTP_NOT_FOUND) + + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_version.py b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_version.py new file mode 100644 index 000000000..626b29083 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tests/unit/test_version.py @@ -0,0 +1,25 @@ +import unittest + +from test_base import TestBase +from opnfv_testapi.resources.models import Versions + +__author__ = 'serena' + + +class TestVersionbBase(TestBase): + def setUp(self): + super(TestVersionbBase, self).setUp() + self.list_res = Versions + self.basePath = '/versions' + + +class TestVersion(TestVersionbBase): + def test_success(self): + code, body = self.get() + self.assertEqual(200, code) + self.assertEqual(len(body.versions), 1) + self.assertEqual(body.versions[0].version, 'v1.0') + self.assertEqual(body.versions[0].description, 'basics') + +if __name__ == '__main__': + unittest.main() diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/LICENSE b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/LICENSE new file mode 100644 index 000000000..fbefeff74 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ran Tavory + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README new file mode 100644 index 000000000..1bcc1145e --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README @@ -0,0 +1 @@ +Please see documentation here: https://github.com/SerenaFeng/tornado-swagger diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README.md b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README.md new file mode 100644 index 000000000..e90e1309a --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/README.md @@ -0,0 +1,277 @@ +# tornado-swagger + +## What is tornado-swagger? +tornado is a wrapper for tornado which enables swagger-ui support. + +In essense, you just need to wrap the Api instance and add a few python decorators to get full swagger support. + +## How to: +Install: + +``` +python setup.py install +``` +(This installs tornado and epydoc as well) + + +And in your program, where you'd usually just use tornado, add just a little bit of sauce and get a swagger spec out. + + +```python +from tornado.web import RequestHandler, HTTPError +from tornado_swagger import swagger + +swagger.docs() + +# You may decorate your operation with @swagger.operation and use docs to inform information +class ItemNoParamHandler(GenericApiHandler): + @swagger.operation(nickname='create') + def post(self): + """ + @param body: create test results for a item. + @type body: L{Item} + @return 200: item is created. + @raise 400: invalid input + """ + +# Operations not decorated with @swagger.operation do not get added to the swagger docs + +class ItemNoParamHandler(GenericApiHandler): + def options(self): + """ + I'm not visible in the swagger docs + """ + pass + + +# Then you use swagger.Application instead of tornado.web.Application +# and do other operations as usual + +def make_app(): + return swagger.Application([ + (r"/items", ItemNoParamHandler), + (r"/items/([^/]+)", ItemHandler), + (r"/items/([^/]+)/cases/([^/]+)", ItemOptionParamHandler), + ]) + +# You define models like this: +@swagger.model +class Item: + """ + @descriptin: + This is an example of a model class that has parameters in its constructor + and the fields in the swagger spec are derived from the parameters to __init__. + @notes: + In this case we would have property1, property2 as required parameters and property3 as optional parameter. + @property property3: Item decription + @ptype property3: L{PropertySubclass} + """ + def __init__(self, property1, property2=None): + self.property1 = property1 + self.property2 = property2 + +# Swagger json: + "models": { + "Item": { + "description": "A description...", + "id": "Item", + "required": [ + "property1", + ], + "properties": [ + "property1": { + "type": "string" + }, + "property2": { + "type": "string" + "default": null + } + ] + } + } + +# If you declare an __init__ method with meaningful arguments +# then those args could be used to deduce the swagger model fields. +# just as shown above + +# if you declare an @property in docs, this property property2 will also be used to deduce the swagger model fields +class Item: + """ + @property property3: Item description + """ + def __init__(self, property1, property2): + self.property1 = property1 + self.property2 = property2 + +# Swagger json: + "models": { + "Item": { + "description": "A description...", + "id": "Item", + "required": [ + "property1", + ], + "properties": [ + "property1": { + "type": "string" + }, + "property2": { + "type": "string" + } + "property3": { + "type": "string" + } + ] + } + } + +# if you declare an argument with @ptype, the type of this argument will be specified rather than the default 'string' +class Item: + """ + @ptype property3: L{PropertySubclass} + """ + def __init__(self, property1, property2, property3=None): + self.property1 = property1 + self.property2 = property2 + self.property3 = property3 + +# Swagger json: + "models": { + "Item": { + "description": "A description...", + "id": "Item", + "required": [ + "property1", + ], + "properties": [ + "property1": { + "type": "string" + }, + "property2": { + "type": "string" + }, + "property3": { + "type": "PropertySubclass" + "default": null + } + ] + } + } + +# if you want to declare an list property, you can do it like this: +class Item: + """ + @ptype property3: L{PropertySubclass} + @ptype property4: C{list} of L{PropertySubclass} + """ + def __init__(self, property1, property2, property3, property4=None): + self.property1 = property1 + self.property2 = property2 + self.property3 = property3 + self.property4 = property4 + +# Swagger json: + "models": { + "Item": { + "description": "A description...", + "id": "Item", + "required": [ + "property1", + ], + "properties": [ + "property1": { + "type": "string" + }, + "property2": { + "type": "string" + }, + "property3": { + "type": "PropertySubclass" + "default": null + }, + "property4": { + "default": null, + "items": { + "type": "PropertySubclass"}, + "type": "array" + } + } + ] + } + } + +# if it is a query: +class ItemQueryHandler(GenericApiHandler): + @swagger.operation(nickname='query') + def get(self): + """ + @param property1: + @type property1: L{string} + @in property1: query + @required property1: False + + @param property2: + @type property2: L{string} + @in property2: query + @required property2: True + @rtype: L{Item} + + @notes: GET /item?property1=1&property2=1 + """ + +# Swagger json: + "apis": [ + { + "operations": [ + { + "parameters": [ + { + "name": "property1", + "dataType": "string", + "paramType": "query", + "description": "" + }, + { + "name": "property2", + "dataType": "string", + "paramType": "query", + "required": true, + "description": "" + } + ], + "responseClass": "Item", + "notes": null, + "responseMessages": [], + "summary": null, + "httpMethod": "GET", + "nickname": "query" + } + ], + "path": "/item", + "description": null + }, + .... + ] +``` + +# Running and testing + +Now run your tornado app + +``` +python basic.py +``` + +And visit: + +``` +curl http://localhost:711/swagger/spec +``` + +access to web +``` +http://localhost:711/swagger/spec.html +``` + +# Passing more metadata to swagger +customized arguments used in creating the 'swagger.docs' object will be supported later diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/__init__.py b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/__init__.py new file mode 100644 index 000000000..031a4a20e --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +__author__ = 'serena' diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/example/basic.py b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/example/basic.py new file mode 100644 index 000000000..93ff00b43 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/example/basic.py @@ -0,0 +1,219 @@ +import json + +import tornado.ioloop +from tornado.web import RequestHandler, HTTPError +from tornado_swagger_ui.tornado_swagger import swagger + +DEFAULT_REPRESENTATION = "application/json" +HTTP_BAD_REQUEST = 400 +HTTP_FORBIDDEN = 403 +HTTP_NOT_FOUND = 404 + +swagger.docs() + + +@swagger.model() +class PropertySubclass: + def __init__(self, sub_property=None): + self.sub_property = sub_property + + +@swagger.model() +class Item: + """ + @description: + This is an example of a model class that has parameters in its + constructor and the fields in the swagger spec are derived from + the parameters to __init__. + @notes: + In this case we would have property1, name as required parameters + and property3 as optional parameter. + @property property3: Item description + @ptype property3: L{PropertySubclass} + @ptype property4: C{list} of L{PropertySubclass} + """ + def __init__(self, + property1, + property2=None, + property3=None, + property4=None): + self.property1 = property1 + self.property2 = property2 + self.property3 = property3 + self.property4 = property4 + + def format_http(self): + return { + "property1": self.property1, + "property2": self.property2, + "property3": self.property3, + "property4": self.property4, + } + + @staticmethod + def item_from_dict(item_dict): + + if item_dict is None: + return None + + t = Item(None) + t.property1 = item_dict.get('property1') + t.property2 = item_dict.get('property2') + t.property3 = item_dict.get('property3') + t.property4 = item_dict.get('property4') + + return t + + @classmethod + def test_classmethod(cls): + pass + + +items = {} + + +class GenericApiHandler(RequestHandler): + """ + The purpose of this class is to take benefit of inheritance and prepare + a set of common functions for + the handlers + """ + + def initialize(self): + """ Prepares the database for the entire class """ + pass + + def prepare(self): + if self.request.method != "GET" and self.request.method != "DELETE": + self.json_args = None + content_type = self.request.headers.get("Content-Type") + if content_type is not None: + if content_type.startswith(DEFAULT_REPRESENTATION): + try: + self.json_args = json.loads(self.request.body) + except (ValueError, KeyError, TypeError) as error: + raise HTTPError(HTTP_BAD_REQUEST, + "Bad Json format [{}]". + format(error)) + + def finish_request(self, json_object): + self.write(json.dumps(json_object)) + self.set_header("Content-Type", DEFAULT_REPRESENTATION) + self.finish() + + +class ItemNoParamHandler(GenericApiHandler): + @swagger.operation(nickname='create') + def post(self): + """ + @param body: create a item. + @type body: L{Item} + @in body: body + @return 200: item is created. + @raise 400: invalid input + """ + property1 = self.json_args.get('property1') + item = Item.item_from_dict(self.json_args) + items[property1] = item + Item.test_classmethod() + self.finish_request(item.format_http()) + + @swagger.operation(nickname='list') + def get(self): + """ + @rtype: L{Item} + """ + res = [] + for key, value in items.iteritems(): + res.append(value.format_http()) + self.finish_request(res) + + def options(self): + """ + I'm not visible in the swagger docs + """ + self.finish_request("I'm invisible in the swagger docs") + + +class ItemHandler(GenericApiHandler): + @swagger.operation(nickname='get') + def get(self, arg): + """ + @rtype: L{Item} + @description: get information of a item + @notes: + get a item, + + This will be added to the Implementation Notes. + It lets you put very long text in your api. + """ + self.finish_request(items[arg].format_http()) + + @swagger.operation(nickname='delete') + def delete(self, arg): + """ + @description: delete a item + @notes: + delete a item in items + """ + del items[arg] + self.finish_request("success") + + +class ItemOptionParamHandler(GenericApiHandler): + @swagger.operation(nickname='create') + def post(self, arg1, arg2=''): + """ + @return 200: case is created + """ + fs = open("/home/%s/%s" % (arg1, arg2), "wb") + fs.write(self.request.body) + self.write("success") + + +class ItemQueryHandler(GenericApiHandler): + @swagger.operation(nickname='query') + def get(self): + """ + @param property1: + @type property1: L{string} + @in property1: query + @required property1: False + + @param property2: + @type property2: L{string} + @in property2: query + @required property2: True + @rtype: L{Item} + @notes: GET /item?property1=1&property2=1 + """ + property1 = self.get_query_argument("property1", None) + property2 = self.get_query_argument("property2", None) + + res = [] + if property1 is None: + for key, value in items.iteritems(): + if property2 is None: + res.append(value.format_http()) + elif value.property2 == property2: + res.append(value.format_http()) + elif property1 in items: + if items.get(property1).property2 == property2: + res.append(items.get(property1).format_http()) + + self.finish_request(res) + + +def make_app(): + return swagger.Application([ + (r"/item", ItemQueryHandler), + (r"/items", ItemNoParamHandler), + (r"/items/([^/]+)", ItemHandler), + (r"/items/([^/]+)/cases/([^/]+)", ItemOptionParamHandler), + ]) + + +if __name__ == "__main__": + app = make_app() + app.listen(711) + tornado.ioloop.IOLoop.current().start() diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/setup.py b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/setup.py new file mode 100644 index 000000000..57dc48a9a --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/setup.py @@ -0,0 +1,30 @@ +try: + from setuptools import setup +except ImportError: + from distutils.core import setup + +with open('README') as f: + long_description = f.read() + +setup(name='tornado-swagger', + version='1.0', + url='https://github.com/SerenaFeng/tornado-swagger', + zip_safe=False, + packages=['tornado_swagger'], + package_data={ + 'tornado_swagger': [ + 'static/*.*', + 'static/css/*.*', + 'static/images/*.*', + 'static/lib/*.*', + 'static/lib/shred/*.*' + ] + }, + description='Extract swagger specs from your tornado project', + author='Serena Feng', + license='MIT', + long_description=long_description, + install_requires=[ + 'tornado>=3.1', + 'epydoc>=0.3.1' + ]) diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/__init__.py b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/__init__.py new file mode 100644 index 000000000..031a4a20e --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +__author__ = 'serena' diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/handlers.py b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/handlers.py new file mode 100644 index 000000000..8bcb9668f --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/handlers.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from tornado.web import URLSpec, StaticFileHandler + +from settings import default_settings, \ + SWAGGER_API_DOCS, SWAGGER_API_LIST, SWAGGER_API_SPEC +from views import SwaggerUIHandler, SwaggerResourcesHandler, SwaggerApiHandler + +__author__ = 'serena' + + +def swagger_handlers(): + prefix = default_settings.get('swagger_prefix', '/swagger') + if prefix[-1] != '/': + prefix += '/' + + def _path(suffix): + return prefix + suffix + return [ + URLSpec( + _path(r'spec.html$'), + SwaggerUIHandler, + default_settings, + name=SWAGGER_API_DOCS), + URLSpec( + _path(r'spec.json$'), + SwaggerResourcesHandler, + default_settings, + name=SWAGGER_API_LIST), + URLSpec( + _path(r'spec$'), + SwaggerApiHandler, + default_settings, + name=SWAGGER_API_SPEC), + ( + _path(r'(.*\.(css|png|gif|js))'), + StaticFileHandler, + {'path': default_settings.get('static_path')}), + ] diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/settings.py b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/settings.py new file mode 100644 index 000000000..8f43c4a96 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/settings.py @@ -0,0 +1,26 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import os.path + +__author__ = 'serena' + +SWAGGER_VERSION = '1.2' + +SWAGGER_API_DOCS = 'swagger-api-docs' +SWAGGER_API_LIST = 'swagger-api-list' +SWAGGER_API_SPEC = 'swagger-api-spec' + +STATIC_PATH = os.path.join(os.path.dirname(os.path.normpath(__file__)), + 'static') + +default_settings = { + 'base_url': '/', + 'static_path': STATIC_PATH, + 'swagger_prefix': '/swagger', + 'api_version': 'v1.0', + 'api_key': '', + 'enabled_methods': ['get', 'post', 'put', 'patch', 'delete'], + 'exclude_namespaces': [], +} + +models = [] diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/.gitignore b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/.gitignore new file mode 100644 index 000000000..ebf4281dc --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/.gitignore @@ -0,0 +1 @@ +!lib diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/highlight.default.css b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/highlight.default.css new file mode 100644 index 000000000..e417fc179 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/highlight.default.css @@ -0,0 +1,135 @@ +/* + +Original style from softwaremaniacs.org (c) Ivan Sagalaev + +*/ + +pre code { + display: block; padding: 0.5em; + background: #F0F0F0; +} + +pre code, +pre .subst, +pre .tag .title, +pre .lisp .title, +pre .clojure .built_in, +pre .nginx .title { + color: black; +} + +pre .string, +pre .title, +pre .constant, +pre .parent, +pre .tag .value, +pre .rules .value, +pre .rules .value .number, +pre .preprocessor, +pre .ruby .symbol, +pre .ruby .symbol .string, +pre .aggregate, +pre .template_tag, +pre .django .variable, +pre .smalltalk .class, +pre .addition, +pre .flow, +pre .stream, +pre .bash .variable, +pre .apache .tag, +pre .apache .cbracket, +pre .tex .command, +pre .tex .special, +pre .erlang_repl .function_or_atom, +pre .markdown .header { + color: #800; +} + +pre .comment, +pre .annotation, +pre .template_comment, +pre .diff .header, +pre .chunk, +pre .markdown .blockquote { + color: #888; +} + +pre .number, +pre .date, +pre .regexp, +pre .literal, +pre .smalltalk .symbol, +pre .smalltalk .char, +pre .go .constant, +pre .change, +pre .markdown .bullet, +pre .markdown .link_url { + color: #080; +} + +pre .label, +pre .javadoc, +pre .ruby .string, +pre .decorator, +pre .filter .argument, +pre .localvars, +pre .array, +pre .attr_selector, +pre .important, +pre .pseudo, +pre .pi, +pre .doctype, +pre .deletion, +pre .envvar, +pre .shebang, +pre .apache .sqbracket, +pre .nginx .built_in, +pre .tex .formula, +pre .erlang_repl .reserved, +pre .prompt, +pre .markdown .link_label, +pre .vhdl .attribute, +pre .clojure .attribute, +pre .coffeescript .property { + color: #88F +} + +pre .keyword, +pre .id, +pre .phpdoc, +pre .title, +pre .built_in, +pre .aggregate, +pre .css .tag, +pre .javadoctag, +pre .phpdoc, +pre .yardoctag, +pre .smalltalk .class, +pre .winutils, +pre .bash .variable, +pre .apache .tag, +pre .go .typename, +pre .tex .command, +pre .markdown .strong, +pre .request, +pre .status { + font-weight: bold; +} + +pre .markdown .emphasis { + font-style: italic; +} + +pre .nginx .built_in { + font-weight: normal; +} + +pre .coffeescript .javascript, +pre .javascript .xml, +pre .tex .formula, +pre .xml .javascript, +pre .xml .vbscript, +pre .xml .css, +pre .xml .cdata { + opacity: 0.5; +} diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/hightlight.default.css b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/hightlight.default.css new file mode 100644 index 000000000..e417fc179 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/hightlight.default.css @@ -0,0 +1,135 @@ +/* + +Original style from softwaremaniacs.org (c) Ivan Sagalaev + +*/ + +pre code { + display: block; padding: 0.5em; + background: #F0F0F0; +} + +pre code, +pre .subst, +pre .tag .title, +pre .lisp .title, +pre .clojure .built_in, +pre .nginx .title { + color: black; +} + +pre .string, +pre .title, +pre .constant, +pre .parent, +pre .tag .value, +pre .rules .value, +pre .rules .value .number, +pre .preprocessor, +pre .ruby .symbol, +pre .ruby .symbol .string, +pre .aggregate, +pre .template_tag, +pre .django .variable, +pre .smalltalk .class, +pre .addition, +pre .flow, +pre .stream, +pre .bash .variable, +pre .apache .tag, +pre .apache .cbracket, +pre .tex .command, +pre .tex .special, +pre .erlang_repl .function_or_atom, +pre .markdown .header { + color: #800; +} + +pre .comment, +pre .annotation, +pre .template_comment, +pre .diff .header, +pre .chunk, +pre .markdown .blockquote { + color: #888; +} + +pre .number, +pre .date, +pre .regexp, +pre .literal, +pre .smalltalk .symbol, +pre .smalltalk .char, +pre .go .constant, +pre .change, +pre .markdown .bullet, +pre .markdown .link_url { + color: #080; +} + +pre .label, +pre .javadoc, +pre .ruby .string, +pre .decorator, +pre .filter .argument, +pre .localvars, +pre .array, +pre .attr_selector, +pre .important, +pre .pseudo, +pre .pi, +pre .doctype, +pre .deletion, +pre .envvar, +pre .shebang, +pre .apache .sqbracket, +pre .nginx .built_in, +pre .tex .formula, +pre .erlang_repl .reserved, +pre .prompt, +pre .markdown .link_label, +pre .vhdl .attribute, +pre .clojure .attribute, +pre .coffeescript .property { + color: #88F +} + +pre .keyword, +pre .id, +pre .phpdoc, +pre .title, +pre .built_in, +pre .aggregate, +pre .css .tag, +pre .javadoctag, +pre .phpdoc, +pre .yardoctag, +pre .smalltalk .class, +pre .winutils, +pre .bash .variable, +pre .apache .tag, +pre .go .typename, +pre .tex .command, +pre .markdown .strong, +pre .request, +pre .status { + font-weight: bold; +} + +pre .markdown .emphasis { + font-style: italic; +} + +pre .nginx .built_in { + font-weight: normal; +} + +pre .coffeescript .javascript, +pre .javascript .xml, +pre .tex .formula, +pre .xml .javascript, +pre .xml .vbscript, +pre .xml .css, +pre .xml .cdata { + opacity: 0.5; +} diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/screen.css b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/screen.css new file mode 100644 index 000000000..2882b8d66 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/css/screen.css @@ -0,0 +1,1224 @@ +/* http://meyerweb.com/eric/tools/css/reset/ v2.0 | 20110126 */ +html, +body, +div, +span, +applet, +object, +iframe, +h1, +h2, +h3, +h4, +h5, +h6, +p, +blockquote, +pre, +a, +abbr, +acronym, +address, +big, +cite, +code, +del, +dfn, +em, +img, +ins, +kbd, +q, +s, +samp, +small, +strike, +strong, +sub, +sup, +tt, +var, +b, +u, +i, +center, +dl, +dt, +dd, +ol, +ul, +li, +fieldset, +form, +label, +legend, +table, +caption, +tbody, +tfoot, +thead, +tr, +th, +td, +article, +aside, +canvas, +details, +embed, +figure, +figcaption, +footer, +header, +hgroup, +menu, +nav, +output, +ruby, +section, +summary, +time, +mark, +audio, +video { + margin: 0; + padding: 0; + border: 0; + font-size: 100%; + font: inherit; + vertical-align: baseline; +} +/* HTML5 display-role reset for older browsers */ +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +menu, +nav, +section { + display: block; +} +body { + line-height: 1; +} +ol, +ul { + list-style: none; +} +blockquote, +q { + quotes: none; +} +blockquote:before, +blockquote:after, +q:before, +q:after { + content: ''; + content: none; +} +table { + border-collapse: collapse; + border-spacing: 0; +} +.swagger-ui-wrap { + line-height: 1; + font-family: "Droid Sans", sans-serif; + max-width: 960px; + margin-left: auto; + margin-right: auto; +} +.swagger-ui-wrap b, +.swagger-ui-wrap strong { + font-family: "Droid Sans", sans-serif; + font-weight: bold; +} +.swagger-ui-wrap q, +.swagger-ui-wrap blockquote { + quotes: none; +} +.swagger-ui-wrap p { + line-height: 1.4em; + padding: 0 0 10px; + color: #333333; +} +.swagger-ui-wrap q:before, +.swagger-ui-wrap q:after, +.swagger-ui-wrap blockquote:before, +.swagger-ui-wrap blockquote:after { + content: none; +} +.swagger-ui-wrap .heading_with_menu h1, +.swagger-ui-wrap .heading_with_menu h2, +.swagger-ui-wrap .heading_with_menu h3, +.swagger-ui-wrap .heading_with_menu h4, +.swagger-ui-wrap .heading_with_menu h5, +.swagger-ui-wrap .heading_with_menu h6 { + display: block; + clear: none; + float: left; + -moz-box-sizing: border-box; + -webkit-box-sizing: border-box; + -ms-box-sizing: border-box; + box-sizing: border-box; + width: 60%; +} +.swagger-ui-wrap table { + border-collapse: collapse; + border-spacing: 0; +} +.swagger-ui-wrap table thead tr th { + padding: 5px; + font-size: 0.9em; + color: #666666; + border-bottom: 1px solid #999999; +} +.swagger-ui-wrap table tbody tr:last-child td { + border-bottom: none; +} +.swagger-ui-wrap table tbody tr.offset { + background-color: #f0f0f0; +} +.swagger-ui-wrap table tbody tr td { + padding: 6px; + font-size: 0.9em; + border-bottom: 1px solid #cccccc; + vertical-align: top; + line-height: 1.3em; +} +.swagger-ui-wrap ol { + margin: 0px 0 10px; + padding: 0 0 0 18px; + list-style-type: decimal; +} +.swagger-ui-wrap ol li { + padding: 5px 0px; + font-size: 0.9em; + color: #333333; +} +.swagger-ui-wrap ol, +.swagger-ui-wrap ul { + list-style: none; +} +.swagger-ui-wrap h1 a, +.swagger-ui-wrap h2 a, +.swagger-ui-wrap h3 a, +.swagger-ui-wrap h4 a, +.swagger-ui-wrap h5 a, +.swagger-ui-wrap h6 a { + text-decoration: none; +} +.swagger-ui-wrap h1 a:hover, +.swagger-ui-wrap h2 a:hover, +.swagger-ui-wrap h3 a:hover, +.swagger-ui-wrap h4 a:hover, +.swagger-ui-wrap h5 a:hover, +.swagger-ui-wrap h6 a:hover { + text-decoration: underline; +} +.swagger-ui-wrap h1 span.divider, +.swagger-ui-wrap h2 span.divider, +.swagger-ui-wrap h3 span.divider, +.swagger-ui-wrap h4 span.divider, +.swagger-ui-wrap h5 span.divider, +.swagger-ui-wrap h6 span.divider { + color: #aaaaaa; +} +.swagger-ui-wrap a { + color: #547f00; +} +.swagger-ui-wrap a img { + border: none; +} +.swagger-ui-wrap article, +.swagger-ui-wrap aside, +.swagger-ui-wrap details, +.swagger-ui-wrap figcaption, +.swagger-ui-wrap figure, +.swagger-ui-wrap footer, +.swagger-ui-wrap header, +.swagger-ui-wrap hgroup, +.swagger-ui-wrap menu, +.swagger-ui-wrap nav, +.swagger-ui-wrap section, +.swagger-ui-wrap summary { + display: block; +} +.swagger-ui-wrap pre { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + background-color: #fcf6db; + border: 1px solid #e5e0c6; + padding: 10px; +} +.swagger-ui-wrap pre code { + line-height: 1.6em; + background: none; +} +.swagger-ui-wrap .content > .content-type > div > label { + clear: both; + display: block; + color: #0F6AB4; + font-size: 1.1em; + margin: 0; + padding: 15px 0 5px; +} +.swagger-ui-wrap .content pre { + font-size: 12px; + margin-top: 5px; + padding: 5px; +} +.swagger-ui-wrap .icon-btn { + cursor: pointer; +} +.swagger-ui-wrap .info_title { + padding-bottom: 10px; + font-weight: bold; + font-size: 25px; +} +.swagger-ui-wrap p.big, +.swagger-ui-wrap div.big p { + font-size: 1em; + margin-bottom: 10px; +} +.swagger-ui-wrap form.fullwidth ol li.string input, +.swagger-ui-wrap form.fullwidth ol li.url input, +.swagger-ui-wrap form.fullwidth ol li.text textarea, +.swagger-ui-wrap form.fullwidth ol li.numeric input { + width: 500px !important; +} +.swagger-ui-wrap .info_license { + padding-bottom: 5px; +} +.swagger-ui-wrap .info_tos { + padding-bottom: 5px; +} +.swagger-ui-wrap .message-fail { + color: #cc0000; +} +.swagger-ui-wrap .info_contact { + padding-bottom: 5px; +} +.swagger-ui-wrap .info_description { + padding-bottom: 10px; + font-size: 15px; +} +.swagger-ui-wrap .markdown ol li, +.swagger-ui-wrap .markdown ul li { + padding: 3px 0px; + line-height: 1.4em; + color: #333333; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.string input, +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.url input, +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.numeric input { + display: block; + padding: 4px; + width: auto; + clear: both; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.string input.title, +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.url input.title, +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.numeric input.title { + font-size: 1.3em; +} +.swagger-ui-wrap table.fullwidth { + width: 100%; +} +.swagger-ui-wrap .model-signature { + font-family: "Droid Sans", sans-serif; + font-size: 1em; + line-height: 1.5em; +} +.swagger-ui-wrap .model-signature .signature-nav a { + text-decoration: none; + color: #AAA; +} +.swagger-ui-wrap .model-signature .signature-nav a:hover { + text-decoration: underline; + color: black; +} +.swagger-ui-wrap .model-signature .signature-nav .selected { + color: black; + text-decoration: none; +} +.swagger-ui-wrap .model-signature .propType { + color: #5555aa; +} +.swagger-ui-wrap .model-signature pre:hover { + background-color: #ffffdd; +} +.swagger-ui-wrap .model-signature pre { + font-size: .85em; + line-height: 1.2em; + overflow: auto; + max-height: 200px; + cursor: pointer; +} +.swagger-ui-wrap .model-signature ul.signature-nav { + display: block; + margin: 0; + padding: 0; +} +.swagger-ui-wrap .model-signature ul.signature-nav li:last-child { + padding-right: 0; + border-right: none; +} +.swagger-ui-wrap .model-signature ul.signature-nav li { + float: left; + margin: 0 5px 5px 0; + padding: 2px 5px 2px 0; + border-right: 1px solid #ddd; +} +.swagger-ui-wrap .model-signature .propOpt { + color: #555; +} +.swagger-ui-wrap .model-signature .snippet small { + font-size: 0.75em; +} +.swagger-ui-wrap .model-signature .propOptKey { + font-style: italic; +} +.swagger-ui-wrap .model-signature .description .strong { + font-weight: bold; + color: #000; + font-size: .9em; +} +.swagger-ui-wrap .model-signature .description div { + font-size: 0.9em; + line-height: 1.5em; + margin-left: 1em; +} +.swagger-ui-wrap .model-signature .description .stronger { + font-weight: bold; + color: #000; +} +.swagger-ui-wrap .model-signature .propName { + font-weight: bold; +} +.swagger-ui-wrap .model-signature .signature-container { + clear: both; +} +.swagger-ui-wrap .body-textarea { + width: 300px; + height: 100px; + border: 1px solid #aaa; +} +.swagger-ui-wrap .markdown p code, +.swagger-ui-wrap .markdown li code { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + background-color: #f0f0f0; + color: black; + padding: 1px 3px; +} +.swagger-ui-wrap .required { + font-weight: bold; +} +.swagger-ui-wrap input.parameter { + width: 300px; + border: 1px solid #aaa; +} +.swagger-ui-wrap h1 { + color: black; + font-size: 1.5em; + line-height: 1.3em; + padding: 10px 0 10px 0; + font-family: "Droid Sans", sans-serif; + font-weight: bold; +} +.swagger-ui-wrap .heading_with_menu { + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-ui-wrap .heading_with_menu ul { + display: block; + clear: none; + float: right; + -moz-box-sizing: border-box; + -webkit-box-sizing: border-box; + -ms-box-sizing: border-box; + box-sizing: border-box; + margin-top: 10px; +} +.swagger-ui-wrap h2 { + color: black; + font-size: 1.3em; + padding: 10px 0 10px 0; +} +.swagger-ui-wrap h2 a { + color: black; +} +.swagger-ui-wrap h2 span.sub { + font-size: 0.7em; + color: #999999; + font-style: italic; +} +.swagger-ui-wrap h2 span.sub a { + color: #777777; +} +.swagger-ui-wrap span.weak { + color: #666666; +} +.swagger-ui-wrap .message-success { + color: #89BF04; +} +.swagger-ui-wrap caption, +.swagger-ui-wrap th, +.swagger-ui-wrap td { + text-align: left; + font-weight: normal; + vertical-align: middle; +} +.swagger-ui-wrap .code { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.text textarea { + font-family: "Droid Sans", sans-serif; + height: 250px; + padding: 4px; + display: block; + clear: both; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.select select { + display: block; + clear: both; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.boolean { + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.boolean label { + display: block; + float: left; + clear: none; + margin: 0; + padding: 0; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.boolean input { + display: block; + float: left; + clear: none; + margin: 0 5px 0 0; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li.required label { + color: black; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li label { + display: block; + clear: both; + width: auto; + padding: 0 0 3px; + color: #666666; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li label abbr { + padding-left: 3px; + color: #888888; +} +.swagger-ui-wrap form.formtastic fieldset.inputs ol li p.inline-hints { + margin-left: 0; + font-style: italic; + font-size: 0.9em; + margin: 0; +} +.swagger-ui-wrap form.formtastic fieldset.buttons { + margin: 0; + padding: 0; +} +.swagger-ui-wrap span.blank, +.swagger-ui-wrap span.empty { + color: #888888; + font-style: italic; +} +.swagger-ui-wrap .markdown h3 { + color: #547f00; +} +.swagger-ui-wrap .markdown h4 { + color: #666666; +} +.swagger-ui-wrap .markdown pre { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + background-color: #fcf6db; + border: 1px solid #e5e0c6; + padding: 10px; + margin: 0 0 10px 0; +} +.swagger-ui-wrap .markdown pre code { + line-height: 1.6em; +} +.swagger-ui-wrap div.gist { + margin: 20px 0 25px 0 !important; +} +.swagger-ui-wrap ul#resources { + font-family: "Droid Sans", sans-serif; + font-size: 0.9em; +} +.swagger-ui-wrap ul#resources li.resource { + border-bottom: 1px solid #dddddd; +} +.swagger-ui-wrap ul#resources li.resource:hover div.heading h2 a, +.swagger-ui-wrap ul#resources li.resource.active div.heading h2 a { + color: black; +} +.swagger-ui-wrap ul#resources li.resource:hover div.heading ul.options li a, +.swagger-ui-wrap ul#resources li.resource.active div.heading ul.options li a { + color: #555555; +} +.swagger-ui-wrap ul#resources li.resource:last-child { + border-bottom: none; +} +.swagger-ui-wrap ul#resources li.resource div.heading { + border: 1px solid transparent; + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options { + overflow: hidden; + padding: 0; + display: block; + clear: none; + float: right; + margin: 14px 10px 0 0; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li { + float: left; + clear: none; + margin: 0; + padding: 2px 10px; + border-right: 1px solid #dddddd; + color: #666666; + font-size: 0.9em; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li a { + color: #aaaaaa; + text-decoration: none; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li a:hover { + text-decoration: underline; + color: black; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li a:hover, +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li a:active, +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li a.active { + text-decoration: underline; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li:first-child, +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li.first { + padding-left: 0; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li:last-child, +.swagger-ui-wrap ul#resources li.resource div.heading ul.options li.last { + padding-right: 0; + border-right: none; +} +.swagger-ui-wrap ul#resources li.resource div.heading ul.options:first-child, +.swagger-ui-wrap ul#resources li.resource div.heading ul.options.first { + padding-left: 0; +} +.swagger-ui-wrap ul#resources li.resource div.heading h2 { + color: #999999; + padding-left: 0; + display: block; + clear: none; + float: left; + font-family: "Droid Sans", sans-serif; + font-weight: bold; +} +.swagger-ui-wrap ul#resources li.resource div.heading h2 a { + color: #999999; +} +.swagger-ui-wrap ul#resources li.resource div.heading h2 a:hover { + color: black; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation { + float: none; + clear: both; + overflow: hidden; + display: block; + margin: 0 0 10px; + padding: 0; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading { + float: none; + clear: both; + overflow: hidden; + display: block; + margin: 0; + padding: 0; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 { + display: block; + clear: none; + float: left; + width: auto; + margin: 0; + padding: 0; + line-height: 1.1em; + color: black; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.path { + padding-left: 10px; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.path a { + color: black; + text-decoration: none; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.path a:hover { + text-decoration: underline; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span.http_method a { + text-transform: uppercase; + text-decoration: none; + color: white; + display: inline-block; + width: 50px; + font-size: 0.7em; + text-align: center; + padding: 7px 0 4px; + -moz-border-radius: 2px; + -webkit-border-radius: 2px; + -o-border-radius: 2px; + -ms-border-radius: 2px; + -khtml-border-radius: 2px; + border-radius: 2px; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading h3 span { + margin: 0; + padding: 0; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options { + overflow: hidden; + padding: 0; + display: block; + clear: none; + float: right; + margin: 6px 10px 0 0; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options li { + float: left; + clear: none; + margin: 0; + padding: 2px 10px; + font-size: 0.9em; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options li a { + text-decoration: none; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.heading ul.options li.access { + color: black; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content { + border-top: none; + padding: 10px; + -moz-border-radius-bottomleft: 6px; + -webkit-border-bottom-left-radius: 6px; + -o-border-bottom-left-radius: 6px; + -ms-border-bottom-left-radius: 6px; + -khtml-border-bottom-left-radius: 6px; + border-bottom-left-radius: 6px; + -moz-border-radius-bottomright: 6px; + -webkit-border-bottom-right-radius: 6px; + -o-border-bottom-right-radius: 6px; + -ms-border-bottom-right-radius: 6px; + -khtml-border-bottom-right-radius: 6px; + border-bottom-right-radius: 6px; + margin: 0 0 20px; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content h4 { + font-size: 1.1em; + margin: 0; + padding: 15px 0 5px; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header { + float: none; + clear: both; + overflow: hidden; + display: block; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header a { + padding: 4px 0 0 10px; + display: inline-block; + font-size: 0.9em; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header img { + display: block; + clear: none; + float: right; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.sandbox_header input.submit { + display: block; + clear: none; + float: left; + padding: 6px 8px; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content form input[type='text'].error { + outline: 2px solid black; + outline-color: #cc0000; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation div.content div.response div.block pre { + font-family: "Anonymous Pro", "Menlo", "Consolas", "Bitstream Vera Sans Mono", "Courier New", monospace; + padding: 10px; + font-size: 0.9em; + max-height: 400px; + overflow-y: auto; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading { + background-color: #f9f2e9; + border: 1px solid #f0e0ca; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading h3 span.http_method a { + background-color: #c5862b; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #f0e0ca; + color: #c5862b; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li a { + color: #c5862b; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content { + background-color: #faf5ee; + border: 1px solid #f0e0ca; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content h4 { + color: #c5862b; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content div.sandbox_header a { + color: #dcb67f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading { + background-color: #fcffcd; + border: 1px solid black; + border-color: #ffd20f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading h3 span.http_method a { + text-transform: uppercase; + background-color: #ffd20f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #ffd20f; + color: #ffd20f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li a { + color: #ffd20f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content { + background-color: #fcffcd; + border: 1px solid black; + border-color: #ffd20f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content h4 { + color: #ffd20f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content div.sandbox_header a { + color: #6fc992; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading { + background-color: #f5e8e8; + border: 1px solid #e8c6c7; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading h3 span.http_method a { + text-transform: uppercase; + background-color: #a41e22; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #e8c6c7; + color: #a41e22; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li a { + color: #a41e22; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content { + background-color: #f7eded; + border: 1px solid #e8c6c7; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content h4 { + color: #a41e22; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content div.sandbox_header a { + color: #c8787a; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading { + background-color: #e7f6ec; + border: 1px solid #c3e8d1; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading h3 span.http_method a { + background-color: #10a54a; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #c3e8d1; + color: #10a54a; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li a { + color: #10a54a; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content { + background-color: #ebf7f0; + border: 1px solid #c3e8d1; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content h4 { + color: #10a54a; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content div.sandbox_header a { + color: #6fc992; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading { + background-color: #FCE9E3; + border: 1px solid #F5D5C3; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading h3 span.http_method a { + background-color: #D38042; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #f0cecb; + color: #D38042; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li a { + color: #D38042; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content { + background-color: #faf0ef; + border: 1px solid #f0cecb; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content h4 { + color: #D38042; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content div.sandbox_header a { + color: #dcb67f; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading { + background-color: #e7f0f7; + border: 1px solid #c3d9ec; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading h3 span.http_method a { + background-color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #c3d9ec; + color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li a { + color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content { + background-color: #ebf3f9; + border: 1px solid #c3d9ec; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content h4 { + color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content div.sandbox_header a { + color: #6fa5d2; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading { + background-color: #e7f0f7; + border: 1px solid #c3d9ec; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading h3 span.http_method a { + background-color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading ul.options li { + border-right: 1px solid #dddddd; + border-right-color: #c3d9ec; + color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.heading ul.options li a { + color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.content { + background-color: #ebf3f9; + border: 1px solid #c3d9ec; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.content h4 { + color: #0f6ab4; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.options div.content div.sandbox_header a { + color: #6fa5d2; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.content, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.content, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.content, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.content, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.content, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.content { + border-top: none; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li:last-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li:last-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li:last-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li:last-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li:last-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li:last-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.get div.heading ul.options li.last, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.post div.heading ul.options li.last, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.head div.heading ul.options li.last, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.put div.heading ul.options li.last, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.patch div.heading ul.options li.last, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations li.operation.delete div.heading ul.options li.last { + padding-right: 0; + border-right: none; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li a:hover, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li a:active, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li a.active { + text-decoration: underline; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li:first-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations ul.options li.first { + padding-left: 0; +} +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations:first-child, +.swagger-ui-wrap ul#resources li.resource ul.endpoints li.endpoint ul.operations.first { + padding-left: 0; +} +.swagger-ui-wrap p#colophon { + margin: 0 15px 40px 15px; + padding: 10px 0; + font-size: 0.8em; + border-top: 1px solid #dddddd; + font-family: "Droid Sans", sans-serif; + color: #999999; + font-style: italic; +} +.swagger-ui-wrap p#colophon a { + text-decoration: none; + color: #547f00; +} +.swagger-ui-wrap h3 { + color: black; + font-size: 1.1em; + padding: 10px 0 10px 0; +} +.swagger-ui-wrap .markdown ol, +.swagger-ui-wrap .markdown ul { + font-family: "Droid Sans", sans-serif; + margin: 5px 0 10px; + padding: 0 0 0 18px; + list-style-type: disc; +} +.swagger-ui-wrap form.form_box { + background-color: #ebf3f9; + border: 1px solid #c3d9ec; + padding: 10px; +} +.swagger-ui-wrap form.form_box label { + color: #0f6ab4 !important; +} +.swagger-ui-wrap form.form_box input[type=submit] { + display: block; + padding: 10px; +} +.swagger-ui-wrap form.form_box p.weak { + font-size: 0.8em; +} +.swagger-ui-wrap form.form_box p { + font-size: 0.9em; + padding: 0 0 15px; + color: #7e7b6d; +} +.swagger-ui-wrap form.form_box p a { + color: #646257; +} +.swagger-ui-wrap form.form_box p strong { + color: black; +} +.title { + font-style: bold; +} +.secondary_form { + display: none; +} +.main_image { + display: block; + margin-left: auto; + margin-right: auto; +} +.oauth_body { + margin-left: 100px; + margin-right: 100px; +} +.oauth_submit { + text-align: center; +} +.api-popup-dialog { + z-index: 10000; + position: absolute; + width: 500px; + background: #FFF; + padding: 20px; + border: 1px solid #ccc; + border-radius: 5px; + display: none; + font-size: 13px; + color: #777; +} +.api-popup-dialog .api-popup-title { + font-size: 24px; + padding: 10px 0; +} +.api-popup-dialog .api-popup-title { + font-size: 24px; + padding: 10px 0; +} +.api-popup-dialog p.error-msg { + padding-left: 5px; + padding-bottom: 5px; +} +.api-popup-dialog button.api-popup-authbtn { + height: 30px; +} +.api-popup-dialog button.api-popup-cancel { + height: 30px; +} +.api-popup-scopes { + padding: 10px 20px; +} +.api-popup-scopes li { + padding: 5px 0; + line-height: 20px; +} +.api-popup-scopes .api-scope-desc { + padding-left: 20px; + font-style: italic; +} +.api-popup-scopes li input { + position: relative; + top: 2px; +} +.api-popup-actions { + padding-top: 10px; +} +.access { + float: right; +} +.auth { + float: right; +} +#api_information_panel { + position: absolute; + background: #FFF; + border: 1px solid #ccc; + border-radius: 5px; + display: none; + font-size: 13px; + max-width: 300px; + line-height: 30px; + color: black; + padding: 5px; +} +#api_information_panel p .api-msg-enabled { + color: green; +} +#api_information_panel p .api-msg-disabled { + color: red; +} +.api-ic { + height: 18px; + vertical-align: middle; + display: inline-block; + background: url(../images/explorer_icons.png) no-repeat; +} +.ic-info { + background-position: 0 0; + width: 18px; + margin-top: -7px; + margin-left: 4px; +} +.ic-warning { + background-position: -60px 0; + width: 18px; + margin-top: -7px; + margin-left: 4px; +} +.ic-error { + background-position: -30px 0; + width: 18px; + margin-top: -7px; + margin-left: 4px; +} +.ic-off { + background-position: -90px 0; + width: 58px; + margin-top: -4px; + cursor: pointer; +} +.ic-on { + background-position: -160px 0; + width: 58px; + margin-top: -4px; + cursor: pointer; +} +#header { + background-color: #89bf04; + padding: 14px; +} +#header a#logo { + font-size: 1.5em; + font-weight: bold; + text-decoration: none; + background: transparent url(../images/logo_small.png) no-repeat left center; + padding: 20px 0 20px 40px; + color: white; +} +#header form#api_selector { + display: block; + clear: none; + float: right; +} +#header form#api_selector .input { + display: block; + clear: none; + float: left; + margin: 0 10px 0 0; +} +#header form#api_selector .input input#input_apiKey { + width: 200px; +} +#header form#api_selector .input input#input_baseUrl { + width: 400px; +} +#header form#api_selector .input a#explore { + display: block; + text-decoration: none; + font-weight: bold; + padding: 6px 8px; + font-size: 0.9em; + color: white; + background-color: #547f00; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + -o-border-radius: 4px; + -ms-border-radius: 4px; + -khtml-border-radius: 4px; + border-radius: 4px; +} +#header form#api_selector .input a#explore:hover { + background-color: #547f00; +} +#header form#api_selector .input input { + font-size: 0.9em; + padding: 3px; + margin: 0; +} +#content_message { + margin: 10px 15px; + font-style: italic; + color: #999999; +} +#message-bar { + min-height: 30px; + text-align: center; + padding-top: 10px; +} diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/endpoint.html b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/endpoint.html new file mode 100644 index 000000000..4ae3bde0c --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/endpoint.html @@ -0,0 +1,77 @@ + + + + + + Api Docs for {{path}} + + + + + +
+
+
+

{{path}}

+

{{description if description != None}}

+
+
+ {% for operation in operations %} +
+
+

{{operation.method}}

+

{{operation.summary if operation.summary != None}}

+
+
+ {% if operation.parameters %} +

Parameters

+
+ {% for parameter in operation.parameters %} +
+ {{parameter.name}} + {% if parameter.description %} + - {{parameter.description}} + {% endif %} +
+
Type: {{parameter.dataType}}
+
Allow Multiple: {{parameter.allowMultiple}}
+
Required: {{parameter.required}}
+ {% endfor %} +
+ {% endif %} + {% if operation.notes %} +

Implementation notes: {{operation.notes}}

+ {% endif %} + {% if operation.responseClass %} +

Response Class: {{operation.responseClass}}

+ {% endif %} +
+
+ {% endfor %} +
+
+ + + + diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/explorer_icons.png b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/explorer_icons.png new file mode 100644 index 000000000..ed9d2fffb Binary files /dev/null and b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/explorer_icons.png differ diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/logo_small.png b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/logo_small.png new file mode 100644 index 000000000..5496a6557 Binary files /dev/null and b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/logo_small.png differ diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/pet_store_api.png b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/pet_store_api.png new file mode 100644 index 000000000..f9f9cd4ae Binary files /dev/null and b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/pet_store_api.png differ diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/throbber.gif b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/throbber.gif new file mode 100644 index 000000000..063938892 Binary files /dev/null and b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/throbber.gif differ diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/wordnik_api.png b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/wordnik_api.png new file mode 100644 index 000000000..dca4f1455 Binary files /dev/null and b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/images/wordnik_api.png differ diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/index.html b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/index.html new file mode 100644 index 000000000..db209f4a8 --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/index.html @@ -0,0 +1,85 @@ + + + + Swagger UI + + + + + + + + + + + + + + + + + + + + + + + + +
 
+
+ + diff --git a/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/backbone-min.js b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/backbone-min.js new file mode 100644 index 000000000..c1c0d4fff --- /dev/null +++ b/utils/test/result_collection_api/opnfv_testapi/tornado_swagger_ui/tornado_swagger/static/lib/backbone-min.js @@ -0,0 +1,38 @@ +// Backbone.js 0.9.2 + +// (c) 2010-2012 Jeremy Ashkenas, DocumentCloud Inc. +// Backbone may be freely distributed under the MIT license. +// For all details and documentation: +// http://backbonejs.org +(function(){var l=this,y=l.Backbone,z=Array.prototype.slice,A=Array.prototype.splice,g;g="undefined"!==typeof exports?exports:l.Backbone={};g.VERSION="0.9.2";var f=l._;!f&&"undefined"!==typeof require&&(f=require("underscore"));var i=l.jQuery||l.Zepto||l.ender;g.setDomLibrary=function(a){i=a};g.noConflict=function(){l.Backbone=y;return this};g.emulateHTTP=!1;g.emulateJSON=!1;var p=/\s+/,k=g.Events={on:function(a,b,c){var d,e,f,g,j;if(!b)return this;a=a.split(p);for(d=this._callbacks||(this._callbacks= +{});e=a.shift();)f=(j=d[e])?j.tail:{},f.next=g={},f.context=c,f.callback=b,d[e]={tail:g,next:j?j.next:f};return this},off:function(a,b,c){var d,e,h,g,j,q;if(e=this._callbacks){if(!a&&!b&&!c)return delete this._callbacks,this;for(a=a?a.split(p):f.keys(e);d=a.shift();)if(h=e[d],delete e[d],h&&(b||c))for(g=h.tail;(h=h.next)!==g;)if(j=h.callback,q=h.context,b&&j!==b||c&&q!==c)this.on(d,j,q);return this}},trigger:function(a){var b,c,d,e,f,g;if(!(d=this._callbacks))return this;f=d.all;a=a.split(p);for(g= +z.call(arguments,1);b=a.shift();){if(c=d[b])for(e=c.tail;(c=c.next)!==e;)c.callback.apply(c.context||this,g);if(c=f){e=c.tail;for(b=[b].concat(g);(c=c.next)!==e;)c.callback.apply(c.context||this,b)}}return this}};k.bind=k.on;k.unbind=k.off;var o=g.Model=function(a,b){var c;a||(a={});b&&b.parse&&(a=this.parse(a));if(c=n(this,"defaults"))a=f.extend({},c,a);b&&b.collection&&(this.collection=b.collection);this.attributes={};this._escapedAttributes={};this.cid=f.uniqueId("c");this.changed={};this._silent= +{};this._pending={};this.set(a,{silent:!0});this.changed={};this._silent={};this._pending={};this._previousAttributes=f.clone(this.attributes);this.initialize.apply(this,arguments)};f.extend(o.prototype,k,{changed:null,_silent:null,_pending:null,idAttribute:"id",initialize:function(){},toJSON:function(){return f.clone(this.attributes)},get:function(a){return this.attributes[a]},escape:function(a){var b;if(b=this._escapedAttributes[a])return b;b=this.get(a);return this._escapedAttributes[a]=f.escape(null== +b?"":""+b)},has:function(a){return null!=this.get(a)},set:function(a,b,c){var d,e;f.isObject(a)||null==a?(d=a,c=b):(d={},d[a]=b);c||(c={});if(!d)return this;d instanceof o&&(d=d.attributes);if(c.unset)for(e in d)d[e]=void 0;if(!this._validate(d,c))return!1;this.idAttribute in d&&(this.id=d[this.idAttribute]);var b=c.changes={},h=this.attributes,g=this._escapedAttributes,j=this._previousAttributes||{};for(e in d){a=d[e];if(!f.isEqual(h[e],a)||c.unset&&f.has(h,e))delete g[e],(c.silent?this._silent: +b)[e]=!0;c.unset?delete h[e]:h[e]=a;!f.isEqual(j[e],a)||f.has(h,e)!=f.has(j,e)?(this.changed[e]=a,c.silent||(this._pending[e]=!0)):(delete this.changed[e],delete this._pending[e])}c.silent||this.change(c);return this},unset:function(a,b){(b||(b={})).unset=!0;return this.set(a,null,b)},clear:function(a){(a||(a={})).unset=!0;return this.set(f.clone(this.attributes),a)},fetch:function(a){var a=a?f.clone(a):{},b=this,c=a.success;a.success=function(d,e,f){if(!b.set(b.parse(d,f),a))return!1;c&&c(b,d)}; +a.error=g.wrapError(a.error,b,a);return(this.sync||g.sync).call(this,"read",this,a)},save:function(a,b,c){var d,e;f.isObject(a)||null==a?(d=a,c=b):(d={},d[a]=b);c=c?f.clone(c):{};if(c.wait){if(!this._validate(d,c))return!1;e=f.clone(this.attributes)}a=f.extend({},c,{silent:!0});if(d&&!this.set(d,c.wait?a:c))return!1;var h=this,i=c.success;c.success=function(a,b,e){b=h.parse(a,e);if(c.wait){delete c.wait;b=f.extend(d||{},b)}if(!h.set(b,c))return false;i?i(h,a):h.trigger("sync",h,a,c)};c.error=g.wrapError(c.error, +h,c);b=this.isNew()?"create":"update";b=(this.sync||g.sync).call(this,b,this,c);c.wait&&this.set(e,a);return b},destroy:function(a){var a=a?f.clone(a):{},b=this,c=a.success,d=function(){b.trigger("destroy",b,b.collection,a)};if(this.isNew())return d(),!1;a.success=function(e){a.wait&&d();c?c(b,e):b.trigger("sync",b,e,a)};a.error=g.wrapError(a.error,b,a);var e=(this.sync||g.sync).call(this,"delete",this,a);a.wait||d();return e},url:function(){var a=n(this,"urlRoot")||n(this.collection,"url")||t(); +return this.isNew()?a:a+("/"==a.charAt(a.length-1)?"":"/")+encodeURIComponent(this.id)},parse:function(a){return a},clone:function(){return new this.constructor(this.attributes)},isNew:function(){return null==this.id},change:function(a){a||(a={});var b=this._changing;this._changing=!0;for(var c in this._silent)this._pending[c]=!0;var d=f.extend({},a.changes,this._silent);this._silent={};for(c in d)this.trigger("change:"+c,this,this.get(c),a);if(b)return this;for(;!f.isEmpty(this._pending);){this._pending= +{};this.trigger("change",this,a);for(c in this.changed)!this._pending[c]&&!this._silent[c]&&delete this.changed[c];this._previousAttributes=f.clone(this.attributes)}this._changing=!1;return this},hasChanged:function(a){return!arguments.length?!f.isEmpty(this.changed):f.has(this.changed,a)},changedAttributes:function(a){if(!a)return this.hasChanged()?f.clone(this.changed):!1;var b,c=!1,d=this._previousAttributes,e;for(e in a)if(!f.isEqual(d[e],b=a[e]))(c||(c={}))[e]=b;return c},previous:function(a){return!arguments.length|| +!this._previousAttributes?null:this._previousAttributes[a]},previousAttributes:function(){return f.clone(this._previousAttributes)},isValid:function(){return!this.validate(this.attributes)},_validate:function(a,b){if(b.silent||!this.validate)return!0;var a=f.extend({},this.attributes,a),c=this.validate(a,b);if(!c)return!0;b&&b.error?b.error(this,c,b):this.trigger("error",this,c,b);return!1}});var r=g.Collection=function(a,b){b||(b={});b.model&&(this.model=b.model);b.comparator&&(this.comparator=b.comparator); +this._reset();this.initialize.apply(this,arguments);a&&this.reset(a,{silent:!0,parse:b.parse})};f.extend(r.prototype,k,{model:o,initialize:function(){},toJSON:function(a){return this.map(function(b){return b.toJSON(a)})},add:function(a,b){var c,d,e,g,i,j={},k={},l=[];b||(b={});a=f.isArray(a)?a.slice():[a];c=0;for(d=a.length;c=b))this.iframe=i('