From 354afd448cefc348f185b9adc4b5e6d38f96a3b0 Mon Sep 17 00:00:00 2001
From: Morgan Richomme <morgan.richomme@orange.com>
Date: Tue, 4 Jul 2017 17:23:33 +0200
Subject: Initiate packetization of Testing reporting

Testing reporting provides
- static reporting pages [1] (jinja2)
- landing pages including test case catalogue [2] (angular)

It consumes the Test API to build web pages providing status for
the testing projects (so far functest, yardstick, storperf and
qtip).

Dockerization has been initiated [3]. The goal is to replace the
static page hosted on testresults.opnfv.org by a docker regenerated
and redeployed when reporting code is changed

But the docker and more generally the testing reporting directory
must be refactored to
- manage dependencies properly
- leverage tox (py27, docs, pylint, pep8)

This patch
- setups tox (py27, pep8, pylint, docs)
- integrate the requirement management
- fix pep8 errors
- introduce 1 dummy unit test

This patch does not
- fix pylint errors
- create any doc

[1]: http://testresults.opnfv.org/reporting2/display/index.html
[2]: http://testresults.opnfv.org/reporting2/reporting/index.html
[3]: https://gerrit.opnfv.org/gerrit/#/c/36735/

Change-Id: I4613de7ca7036d6c6bbb8f58ade492b1d673599b
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
---
 utils/test/reporting/api/__init__.py               |   0
 utils/test/reporting/api/api/__init__.py           |   0
 utils/test/reporting/api/api/conf.py               |   1 -
 utils/test/reporting/api/api/handlers/__init__.py  |  19 -
 utils/test/reporting/api/api/handlers/landing.py   | 169 --------
 utils/test/reporting/api/api/handlers/projects.py  |  27 --
 utils/test/reporting/api/api/handlers/testcases.py |  33 --
 utils/test/reporting/api/api/server.py             |  27 --
 utils/test/reporting/api/api/urls.py               |  20 -
 utils/test/reporting/api/conf.py                   |   1 +
 utils/test/reporting/api/handlers/__init__.py      |  19 +
 utils/test/reporting/api/handlers/landing.py       | 169 ++++++++
 utils/test/reporting/api/handlers/projects.py      |  27 ++
 utils/test/reporting/api/handlers/testcases.py     |  33 ++
 utils/test/reporting/api/requirements.txt          |   3 -
 utils/test/reporting/api/server.py                 |  27 ++
 utils/test/reporting/api/setup.cfg                 |  32 --
 utils/test/reporting/api/setup.py                  |   9 -
 utils/test/reporting/api/urls.py                   |  20 +
 utils/test/reporting/docker/Dockerfile             |  20 +-
 utils/test/reporting/docker/nginx.conf             |   4 +-
 utils/test/reporting/docker/reporting.sh           |  70 +---
 utils/test/reporting/docker/supervisor.conf        |   4 +-
 utils/test/reporting/docker/web_server.sh          |  16 +
 utils/test/reporting/docs/_build/.buildinfo        |   4 +
 .../docs/_build/.doctrees/environment.pickle       | Bin 0 -> 4207 bytes
 .../reporting/docs/_build/.doctrees/index.doctree  | Bin 0 -> 4892 bytes
 utils/test/reporting/docs/conf.py                  | 341 +++++++++++++++
 utils/test/reporting/docs/index.rst                |  16 +
 utils/test/reporting/functest/__init__.py          |   0
 utils/test/reporting/functest/img/gauge_0.png      | Bin 3644 -> 0 bytes
 utils/test/reporting/functest/img/gauge_100.png    | Bin 3191 -> 0 bytes
 utils/test/reporting/functest/img/gauge_16.7.png   | Bin 3170 -> 0 bytes
 utils/test/reporting/functest/img/gauge_25.png     | Bin 3108 -> 0 bytes
 utils/test/reporting/functest/img/gauge_33.3.png   | Bin 3081 -> 0 bytes
 utils/test/reporting/functest/img/gauge_41.7.png   | Bin 3169 -> 0 bytes
 utils/test/reporting/functest/img/gauge_50.png     | Bin 3123 -> 0 bytes
 utils/test/reporting/functest/img/gauge_58.3.png   | Bin 3161 -> 0 bytes
 utils/test/reporting/functest/img/gauge_66.7.png   | Bin 3069 -> 0 bytes
 utils/test/reporting/functest/img/gauge_75.png     | Bin 3030 -> 0 bytes
 utils/test/reporting/functest/img/gauge_8.3.png    | Bin 2993 -> 0 bytes
 utils/test/reporting/functest/img/gauge_83.3.png   | Bin 3122 -> 0 bytes
 utils/test/reporting/functest/img/gauge_91.7.png   | Bin 3008 -> 0 bytes
 utils/test/reporting/functest/img/icon-nok.png     | Bin 2317 -> 0 bytes
 utils/test/reporting/functest/img/icon-ok.png      | Bin 4063 -> 0 bytes
 .../test/reporting/functest/img/weather-clear.png  | Bin 1560 -> 0 bytes
 .../reporting/functest/img/weather-few-clouds.png  | Bin 1927 -> 0 bytes
 .../reporting/functest/img/weather-overcast.png    | Bin 1588 -> 0 bytes
 .../test/reporting/functest/img/weather-storm.png  | Bin 2137 -> 0 bytes
 utils/test/reporting/functest/index.html           |  53 ---
 utils/test/reporting/functest/reporting-status.py  | 306 --------------
 utils/test/reporting/functest/reporting-tempest.py | 155 -------
 utils/test/reporting/functest/reporting-vims.py    | 126 ------
 utils/test/reporting/functest/scenarioResult.py    |  29 --
 .../functest/template/index-status-tmpl.html       | 157 -------
 .../functest/template/index-tempest-tmpl.html      |  95 -----
 .../functest/template/index-vims-tmpl.html         |  92 ----
 utils/test/reporting/functest/testCase.py          | 125 ------
 utils/test/reporting/qtip/__init__.py              |   0
 utils/test/reporting/qtip/index.html               |  51 ---
 utils/test/reporting/qtip/reporting-status.py      | 110 -----
 .../reporting/qtip/template/index-status-tmpl.html |  86 ----
 utils/test/reporting/reporting.yaml                |  68 ---
 utils/test/reporting/reporting/__init__.py         |   0
 .../test/reporting/reporting/functest/__init__.py  |   0
 .../reporting/reporting/functest/img/gauge_0.png   | Bin 0 -> 3644 bytes
 .../reporting/reporting/functest/img/gauge_100.png | Bin 0 -> 3191 bytes
 .../reporting/functest/img/gauge_16.7.png          | Bin 0 -> 3170 bytes
 .../reporting/reporting/functest/img/gauge_25.png  | Bin 0 -> 3108 bytes
 .../reporting/functest/img/gauge_33.3.png          | Bin 0 -> 3081 bytes
 .../reporting/functest/img/gauge_41.7.png          | Bin 0 -> 3169 bytes
 .../reporting/reporting/functest/img/gauge_50.png  | Bin 0 -> 3123 bytes
 .../reporting/functest/img/gauge_58.3.png          | Bin 0 -> 3161 bytes
 .../reporting/functest/img/gauge_66.7.png          | Bin 0 -> 3069 bytes
 .../reporting/reporting/functest/img/gauge_75.png  | Bin 0 -> 3030 bytes
 .../reporting/reporting/functest/img/gauge_8.3.png | Bin 0 -> 2993 bytes
 .../reporting/functest/img/gauge_83.3.png          | Bin 0 -> 3122 bytes
 .../reporting/functest/img/gauge_91.7.png          | Bin 0 -> 3008 bytes
 .../reporting/reporting/functest/img/icon-nok.png  | Bin 0 -> 2317 bytes
 .../reporting/reporting/functest/img/icon-ok.png   | Bin 0 -> 4063 bytes
 .../reporting/functest/img/weather-clear.png       | Bin 0 -> 1560 bytes
 .../reporting/functest/img/weather-few-clouds.png  | Bin 0 -> 1927 bytes
 .../reporting/functest/img/weather-overcast.png    | Bin 0 -> 1588 bytes
 .../reporting/functest/img/weather-storm.png       | Bin 0 -> 2137 bytes
 utils/test/reporting/reporting/functest/index.html |  53 +++
 .../reporting/functest/reporting-status.py         | 309 ++++++++++++++
 .../reporting/functest/reporting-tempest.py        | 155 +++++++
 .../reporting/reporting/functest/reporting-vims.py | 126 ++++++
 .../reporting/reporting/functest/scenarioResult.py |  29 ++
 .../functest/template/index-status-tmpl.html       | 157 +++++++
 .../functest/template/index-tempest-tmpl.html      |  95 +++++
 .../functest/template/index-vims-tmpl.html         |  92 ++++
 .../test/reporting/reporting/functest/testCase.py  | 125 ++++++
 utils/test/reporting/reporting/qtip/__init__.py    |   0
 utils/test/reporting/reporting/qtip/index.html     |  51 +++
 .../reporting/reporting/qtip/reporting-status.py   | 112 +++++
 .../reporting/qtip/template/index-status-tmpl.html |  86 ++++
 utils/test/reporting/reporting/reporting.yaml      |  68 +++
 .../test/reporting/reporting/storperf/__init__.py  |   0
 .../reporting/storperf/reporting-status.py         | 145 +++++++
 .../storperf/template/index-status-tmpl.html       | 110 +++++
 utils/test/reporting/reporting/tests/__init__.py   |   0
 .../reporting/reporting/tests/unit/__init__.py     |   0
 .../reporting/tests/unit/utils/__init__.py         |   0
 .../reporting/tests/unit/utils/test_utils.py       |  28 ++
 utils/test/reporting/reporting/utils/__init__.py   |   0
 .../reporting/reporting/utils/reporting_utils.py   | 463 +++++++++++++++++++++
 .../reporting/reporting/utils/scenarioResult.py    |  33 ++
 .../test/reporting/reporting/yardstick/__init__.py |   0
 .../reporting/reporting/yardstick/img/gauge_0.png  | Bin 0 -> 3644 bytes
 .../reporting/yardstick/img/gauge_100.png          | Bin 0 -> 3191 bytes
 .../reporting/yardstick/img/gauge_16.7.png         | Bin 0 -> 3170 bytes
 .../reporting/reporting/yardstick/img/gauge_25.png | Bin 0 -> 3108 bytes
 .../reporting/yardstick/img/gauge_33.3.png         | Bin 0 -> 3081 bytes
 .../reporting/yardstick/img/gauge_41.7.png         | Bin 0 -> 3169 bytes
 .../reporting/reporting/yardstick/img/gauge_50.png | Bin 0 -> 3123 bytes
 .../reporting/yardstick/img/gauge_58.3.png         | Bin 0 -> 3161 bytes
 .../reporting/yardstick/img/gauge_66.7.png         | Bin 0 -> 3069 bytes
 .../reporting/reporting/yardstick/img/gauge_75.png | Bin 0 -> 3030 bytes
 .../reporting/yardstick/img/gauge_8.3.png          | Bin 0 -> 2993 bytes
 .../reporting/yardstick/img/gauge_83.3.png         | Bin 0 -> 3122 bytes
 .../reporting/yardstick/img/gauge_91.7.png         | Bin 0 -> 3008 bytes
 .../reporting/reporting/yardstick/img/icon-nok.png | Bin 0 -> 2317 bytes
 .../reporting/reporting/yardstick/img/icon-ok.png  | Bin 0 -> 4063 bytes
 .../reporting/yardstick/img/weather-clear.png      | Bin 0 -> 1560 bytes
 .../reporting/yardstick/img/weather-few-clouds.png | Bin 0 -> 1927 bytes
 .../reporting/yardstick/img/weather-overcast.png   | Bin 0 -> 1588 bytes
 .../reporting/yardstick/img/weather-storm.png      | Bin 0 -> 2137 bytes
 .../test/reporting/reporting/yardstick/index.html  |  51 +++
 .../reporting/yardstick/reporting-status.py        | 120 ++++++
 .../reporting/reporting/yardstick/scenarios.py     |  27 ++
 .../yardstick/template/index-status-tmpl.html      | 110 +++++
 utils/test/reporting/requirements.txt              |   7 +
 utils/test/reporting/run_test.sh                   |  45 +-
 utils/test/reporting/setup.cfg                     |  12 +
 utils/test/reporting/setup.py                      |  32 +-
 utils/test/reporting/storperf/reporting-status.py  | 145 -------
 .../storperf/template/index-status-tmpl.html       | 110 -----
 utils/test/reporting/test-requirements.txt         |   5 +
 utils/test/reporting/tests/__init__.py             |   0
 utils/test/reporting/tests/unit/__init__.py        |   0
 utils/test/reporting/tests/unit/utils/__init__.py  |   0
 .../test/reporting/tests/unit/utils/test_utils.py  |  29 --
 utils/test/reporting/tox.ini                       |  27 ++
 utils/test/reporting/utils/__init__.py             |   0
 utils/test/reporting/utils/reporting_utils.py      | 461 --------------------
 utils/test/reporting/utils/scenarioResult.py       |  33 --
 utils/test/reporting/yardstick/img/gauge_0.png     | Bin 3644 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_100.png   | Bin 3191 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_16.7.png  | Bin 3170 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_25.png    | Bin 3108 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_33.3.png  | Bin 3081 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_41.7.png  | Bin 3169 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_50.png    | Bin 3123 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_58.3.png  | Bin 3161 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_66.7.png  | Bin 3069 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_75.png    | Bin 3030 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_8.3.png   | Bin 2993 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_83.3.png  | Bin 3122 -> 0 bytes
 utils/test/reporting/yardstick/img/gauge_91.7.png  | Bin 3008 -> 0 bytes
 utils/test/reporting/yardstick/img/icon-nok.png    | Bin 2317 -> 0 bytes
 utils/test/reporting/yardstick/img/icon-ok.png     | Bin 4063 -> 0 bytes
 .../test/reporting/yardstick/img/weather-clear.png | Bin 1560 -> 0 bytes
 .../reporting/yardstick/img/weather-few-clouds.png | Bin 1927 -> 0 bytes
 .../reporting/yardstick/img/weather-overcast.png   | Bin 1588 -> 0 bytes
 .../test/reporting/yardstick/img/weather-storm.png | Bin 2137 -> 0 bytes
 utils/test/reporting/yardstick/index.html          |  51 ---
 utils/test/reporting/yardstick/reporting-status.py | 120 ------
 utils/test/reporting/yardstick/scenarios.py        |  27 --
 .../yardstick/template/index-status-tmpl.html      | 110 -----
 170 files changed, 3323 insertions(+), 3000 deletions(-)
 create mode 100644 utils/test/reporting/api/__init__.py
 delete mode 100644 utils/test/reporting/api/api/__init__.py
 delete mode 100644 utils/test/reporting/api/api/conf.py
 delete mode 100644 utils/test/reporting/api/api/handlers/__init__.py
 delete mode 100644 utils/test/reporting/api/api/handlers/landing.py
 delete mode 100644 utils/test/reporting/api/api/handlers/projects.py
 delete mode 100644 utils/test/reporting/api/api/handlers/testcases.py
 delete mode 100644 utils/test/reporting/api/api/server.py
 delete mode 100644 utils/test/reporting/api/api/urls.py
 create mode 100644 utils/test/reporting/api/conf.py
 create mode 100644 utils/test/reporting/api/handlers/__init__.py
 create mode 100644 utils/test/reporting/api/handlers/landing.py
 create mode 100644 utils/test/reporting/api/handlers/projects.py
 create mode 100644 utils/test/reporting/api/handlers/testcases.py
 delete mode 100644 utils/test/reporting/api/requirements.txt
 create mode 100644 utils/test/reporting/api/server.py
 delete mode 100644 utils/test/reporting/api/setup.cfg
 delete mode 100644 utils/test/reporting/api/setup.py
 create mode 100644 utils/test/reporting/api/urls.py
 create mode 100755 utils/test/reporting/docker/web_server.sh
 create mode 100644 utils/test/reporting/docs/_build/.buildinfo
 create mode 100644 utils/test/reporting/docs/_build/.doctrees/environment.pickle
 create mode 100644 utils/test/reporting/docs/_build/.doctrees/index.doctree
 create mode 100644 utils/test/reporting/docs/conf.py
 create mode 100644 utils/test/reporting/docs/index.rst
 delete mode 100644 utils/test/reporting/functest/__init__.py
 delete mode 100644 utils/test/reporting/functest/img/gauge_0.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_100.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_16.7.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_25.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_33.3.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_41.7.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_50.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_58.3.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_66.7.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_75.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_8.3.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_83.3.png
 delete mode 100644 utils/test/reporting/functest/img/gauge_91.7.png
 delete mode 100644 utils/test/reporting/functest/img/icon-nok.png
 delete mode 100644 utils/test/reporting/functest/img/icon-ok.png
 delete mode 100644 utils/test/reporting/functest/img/weather-clear.png
 delete mode 100644 utils/test/reporting/functest/img/weather-few-clouds.png
 delete mode 100644 utils/test/reporting/functest/img/weather-overcast.png
 delete mode 100644 utils/test/reporting/functest/img/weather-storm.png
 delete mode 100644 utils/test/reporting/functest/index.html
 delete mode 100755 utils/test/reporting/functest/reporting-status.py
 delete mode 100755 utils/test/reporting/functest/reporting-tempest.py
 delete mode 100755 utils/test/reporting/functest/reporting-vims.py
 delete mode 100644 utils/test/reporting/functest/scenarioResult.py
 delete mode 100644 utils/test/reporting/functest/template/index-status-tmpl.html
 delete mode 100644 utils/test/reporting/functest/template/index-tempest-tmpl.html
 delete mode 100644 utils/test/reporting/functest/template/index-vims-tmpl.html
 delete mode 100644 utils/test/reporting/functest/testCase.py
 delete mode 100644 utils/test/reporting/qtip/__init__.py
 delete mode 100644 utils/test/reporting/qtip/index.html
 delete mode 100644 utils/test/reporting/qtip/reporting-status.py
 delete mode 100644 utils/test/reporting/qtip/template/index-status-tmpl.html
 delete mode 100644 utils/test/reporting/reporting.yaml
 create mode 100644 utils/test/reporting/reporting/__init__.py
 create mode 100644 utils/test/reporting/reporting/functest/__init__.py
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_0.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_100.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_16.7.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_25.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_33.3.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_41.7.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_50.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_58.3.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_66.7.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_75.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_8.3.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_83.3.png
 create mode 100644 utils/test/reporting/reporting/functest/img/gauge_91.7.png
 create mode 100644 utils/test/reporting/reporting/functest/img/icon-nok.png
 create mode 100644 utils/test/reporting/reporting/functest/img/icon-ok.png
 create mode 100644 utils/test/reporting/reporting/functest/img/weather-clear.png
 create mode 100644 utils/test/reporting/reporting/functest/img/weather-few-clouds.png
 create mode 100644 utils/test/reporting/reporting/functest/img/weather-overcast.png
 create mode 100644 utils/test/reporting/reporting/functest/img/weather-storm.png
 create mode 100644 utils/test/reporting/reporting/functest/index.html
 create mode 100755 utils/test/reporting/reporting/functest/reporting-status.py
 create mode 100755 utils/test/reporting/reporting/functest/reporting-tempest.py
 create mode 100755 utils/test/reporting/reporting/functest/reporting-vims.py
 create mode 100644 utils/test/reporting/reporting/functest/scenarioResult.py
 create mode 100644 utils/test/reporting/reporting/functest/template/index-status-tmpl.html
 create mode 100644 utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html
 create mode 100644 utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
 create mode 100644 utils/test/reporting/reporting/functest/testCase.py
 create mode 100644 utils/test/reporting/reporting/qtip/__init__.py
 create mode 100644 utils/test/reporting/reporting/qtip/index.html
 create mode 100644 utils/test/reporting/reporting/qtip/reporting-status.py
 create mode 100644 utils/test/reporting/reporting/qtip/template/index-status-tmpl.html
 create mode 100644 utils/test/reporting/reporting/reporting.yaml
 create mode 100644 utils/test/reporting/reporting/storperf/__init__.py
 create mode 100644 utils/test/reporting/reporting/storperf/reporting-status.py
 create mode 100644 utils/test/reporting/reporting/storperf/template/index-status-tmpl.html
 create mode 100644 utils/test/reporting/reporting/tests/__init__.py
 create mode 100644 utils/test/reporting/reporting/tests/unit/__init__.py
 create mode 100644 utils/test/reporting/reporting/tests/unit/utils/__init__.py
 create mode 100644 utils/test/reporting/reporting/tests/unit/utils/test_utils.py
 create mode 100644 utils/test/reporting/reporting/utils/__init__.py
 create mode 100644 utils/test/reporting/reporting/utils/reporting_utils.py
 create mode 100644 utils/test/reporting/reporting/utils/scenarioResult.py
 create mode 100644 utils/test/reporting/reporting/yardstick/__init__.py
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_0.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_100.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_16.7.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_25.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_33.3.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_41.7.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_50.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_58.3.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_66.7.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_75.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_8.3.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_83.3.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/gauge_91.7.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/icon-nok.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/icon-ok.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/weather-clear.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/weather-overcast.png
 create mode 100644 utils/test/reporting/reporting/yardstick/img/weather-storm.png
 create mode 100644 utils/test/reporting/reporting/yardstick/index.html
 create mode 100644 utils/test/reporting/reporting/yardstick/reporting-status.py
 create mode 100644 utils/test/reporting/reporting/yardstick/scenarios.py
 create mode 100644 utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
 create mode 100644 utils/test/reporting/requirements.txt
 create mode 100644 utils/test/reporting/setup.cfg
 delete mode 100644 utils/test/reporting/storperf/reporting-status.py
 delete mode 100644 utils/test/reporting/storperf/template/index-status-tmpl.html
 create mode 100644 utils/test/reporting/test-requirements.txt
 delete mode 100644 utils/test/reporting/tests/__init__.py
 delete mode 100644 utils/test/reporting/tests/unit/__init__.py
 delete mode 100644 utils/test/reporting/tests/unit/utils/__init__.py
 delete mode 100644 utils/test/reporting/tests/unit/utils/test_utils.py
 create mode 100644 utils/test/reporting/tox.ini
 delete mode 100644 utils/test/reporting/utils/__init__.py
 delete mode 100644 utils/test/reporting/utils/reporting_utils.py
 delete mode 100644 utils/test/reporting/utils/scenarioResult.py
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_0.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_100.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_16.7.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_25.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_33.3.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_41.7.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_50.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_58.3.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_66.7.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_75.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_8.3.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_83.3.png
 delete mode 100644 utils/test/reporting/yardstick/img/gauge_91.7.png
 delete mode 100644 utils/test/reporting/yardstick/img/icon-nok.png
 delete mode 100644 utils/test/reporting/yardstick/img/icon-ok.png
 delete mode 100644 utils/test/reporting/yardstick/img/weather-clear.png
 delete mode 100644 utils/test/reporting/yardstick/img/weather-few-clouds.png
 delete mode 100644 utils/test/reporting/yardstick/img/weather-overcast.png
 delete mode 100644 utils/test/reporting/yardstick/img/weather-storm.png
 delete mode 100644 utils/test/reporting/yardstick/index.html
 delete mode 100644 utils/test/reporting/yardstick/reporting-status.py
 delete mode 100644 utils/test/reporting/yardstick/scenarios.py
 delete mode 100644 utils/test/reporting/yardstick/template/index-status-tmpl.html

(limited to 'utils/test/reporting')

diff --git a/utils/test/reporting/api/__init__.py b/utils/test/reporting/api/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/api/api/__init__.py b/utils/test/reporting/api/api/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/utils/test/reporting/api/api/conf.py b/utils/test/reporting/api/api/conf.py
deleted file mode 100644
index 5897d4f97..000000000
--- a/utils/test/reporting/api/api/conf.py
+++ /dev/null
@@ -1 +0,0 @@
-base_url = 'http://testresults.opnfv.org/test/api/v1'
diff --git a/utils/test/reporting/api/api/handlers/__init__.py b/utils/test/reporting/api/api/handlers/__init__.py
deleted file mode 100644
index bcda66438..000000000
--- a/utils/test/reporting/api/api/handlers/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from tornado.web import RequestHandler
-
-
-class BaseHandler(RequestHandler):
-    def _set_header(self):
-        self.set_header('Access-Control-Allow-Origin', '*')
-        self.set_header('Access-Control-Allow-Headers',
-                        'Content-Type, Content-Length, Authorization, \
-                        Accept, X-Requested-With , PRIVATE-TOKEN')
-        self.set_header('Access-Control-Allow-Methods',
-                        'PUT, POST, GET, DELETE, OPTIONS')
diff --git a/utils/test/reporting/api/api/handlers/landing.py b/utils/test/reporting/api/api/handlers/landing.py
deleted file mode 100644
index 749916fb6..000000000
--- a/utils/test/reporting/api/api/handlers/landing.py
+++ /dev/null
@@ -1,169 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import requests
-
-from tornado.escape import json_encode
-from tornado.escape import json_decode
-
-from api.handlers import BaseHandler
-from api import conf
-
-
-class FiltersHandler(BaseHandler):
-    def get(self):
-        self._set_header()
-
-        filters = {
-            'filters': {
-                'status': ['success', 'warning', 'danger'],
-                'projects': ['functest', 'yardstick'],
-                'installers': ['apex', 'compass', 'fuel', 'joid'],
-                'version': ['colorado', 'master'],
-                'loops': ['daily', 'weekly', 'monthly'],
-                'time': ['10 days', '30 days']
-            }
-        }
-        return self.write(json_encode(filters))
-
-
-class ScenariosHandler(BaseHandler):
-    def post(self):
-        self._set_header()
-
-        body = json_decode(self.request.body)
-        args = self._get_args(body)
-
-        scenarios = self._get_result_data(self._get_scenarios(), args)
-
-        return self.write(json_encode(dict(scenarios=scenarios)))
-
-    def _get_result_data(self, data, args):
-        data = self._filter_status(data, args)
-        return {s: self._get_scenario_result(s, data[s], args) for s in data}
-
-    def _filter_status(self, data, args):
-        return {k: v for k, v in data.items() if v['status'] in args['status']}
-
-    def _get_scenario_result(self, scenario, data, args):
-        result = {
-            'status': data.get('status'),
-            'installers': self._get_installers_result(data['installers'], args)
-        }
-        return result
-
-    def _get_installers_result(self, data, args):
-        func = self._get_installer_result
-        return {k: func(k, data.get(k, {}), args) for k in args['installers']}
-
-    def _get_installer_result(self, installer, data, args):
-        projects = data.get(args['version'], [])
-        return [self._get_project_data(projects, p) for p in args['projects']]
-
-    def _get_project_data(self, projects, project):
-        atom = {
-            'project': project,
-            'score': None,
-            'status': None
-        }
-        for p in projects:
-            if p['project'] == project:
-                return p
-        return atom
-
-    def _get_scenarios(self):
-        url = '{}/scenarios'.format(conf.base_url)
-        resp = requests.get(url).json()
-        data = self._change_to_utf8(resp).get('scenarios', {})
-        return {a.get('name'): self._get_scenario(a.get('installers', [])
-                                                  ) for a in data}
-
-    def _get_scenario(self, data):
-        installers = {a.get('installer'): self._get_installer(a.get('versions',
-                                                                    [])
-                                                              ) for a in data}
-        scenario = {
-            'status': self._get_status(),
-            'installers': installers
-        }
-        return scenario
-
-    def _get_status(self):
-        return 'success'
-
-    def _get_installer(self, data):
-        return {a.get('version'): self._get_version(a) for a in data}
-
-    def _get_version(self, data):
-        try:
-            scores = data.get('score', {}).get('projects')[0]
-            trusts = data.get('trust_indicator', {}).get('projects')[0]
-        except (TypeError, IndexError):
-            return []
-        else:
-            scores = {key: [dict(date=a.get('date')[:10],
-                                 score=a.get('score')
-                                 ) for a in scores[key]] for key in scores}
-            trusts = {key: [dict(date=a.get('date')[:10],
-                                 status=a.get('status')
-                                 ) for a in trusts[key]] for key in trusts}
-            atom = self._get_atom(scores, trusts)
-            return [dict(project=k,
-                         score=sorted(atom[k], reverse=True)[0].get('score'),
-                         status=sorted(atom[k], reverse=True)[0].get('status')
-                         ) for k in atom if atom[k]]
-
-    def _get_atom(self, scores, trusts):
-        s = {k: {a['date']: a['score'] for a in scores[k]} for k in scores}
-        t = {k: {a['date']: a['status'] for a in trusts[k]} for k in trusts}
-        return {k: [dict(score=s[k][a], status=t[k][a], data=a
-                         ) for a in s[k] if a in t[k]] for k in s}
-
-    def _change_to_utf8(self, obj):
-        if isinstance(obj, dict):
-            return {str(k): self._change_to_utf8(v) for k, v in obj.items()}
-        elif isinstance(obj, list):
-            return [self._change_to_utf8(ele) for ele in obj]
-        else:
-            try:
-                new = eval(obj)
-                if isinstance(new, int):
-                    return obj
-                return self._change_to_utf8(new)
-            except (NameError, TypeError, SyntaxError):
-                return str(obj)
-
-    def _get_args(self, body):
-        status = self._get_status_args(body)
-        projects = self._get_projects_args(body)
-        installers = self._get_installers_args(body)
-
-        args = {
-            'status': status,
-            'projects': projects,
-            'installers': installers,
-            'version': body.get('version', 'master').lower(),
-            'loops': body.get('loops', 'daily').lower(),
-            'time': body.get('times', '10 days')[:2].lower()
-        }
-        return args
-
-    def _get_status_args(self, body):
-        status_all = ['success', 'warning', 'danger']
-        status = [a.lower() for a in body.get('status', ['all'])]
-        return status_all if 'all' in status else status
-
-    def _get_projects_args(self, body):
-        project_all = ['functest', 'yardstick']
-        projects = [a.lower() for a in body.get('projects', ['all'])]
-        return project_all if 'all' in projects else projects
-
-    def _get_installers_args(self, body):
-        installer_all = ['apex', 'compass', 'fuel', 'joid']
-        installers = [a.lower() for a in body.get('installers', ['all'])]
-        return installer_all if 'all' in installers else installers
diff --git a/utils/test/reporting/api/api/handlers/projects.py b/utils/test/reporting/api/api/handlers/projects.py
deleted file mode 100644
index 02412cd62..000000000
--- a/utils/test/reporting/api/api/handlers/projects.py
+++ /dev/null
@@ -1,27 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apaiche.org/licenses/LICENSE-2.0
-##############################################################################
-import requests
-
-from tornado.escape import json_encode
-
-from api.handlers import BaseHandler
-from api import conf
-
-
-class Projects(BaseHandler):
-    def get(self):
-        self._set_header()
-
-        url = '{}/projects'.format(conf.base_url)
-        projects = requests.get(url).json().get('projects', {})
-
-        project_url = 'https://wiki.opnfv.org/display/{}'
-        data = {p['name']: project_url.format(p['name']) for p in projects}
-
-        return self.write(json_encode(data))
diff --git a/utils/test/reporting/api/api/handlers/testcases.py b/utils/test/reporting/api/api/handlers/testcases.py
deleted file mode 100644
index 2b9118623..000000000
--- a/utils/test/reporting/api/api/handlers/testcases.py
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import requests
-
-from tornado.escape import json_encode
-
-from api.handlers import BaseHandler
-from api import conf
-
-
-class TestCases(BaseHandler):
-    def get(self, project):
-        self._set_header()
-
-        url = '{}/projects/{}/cases'.format(conf.base_url, project)
-        cases = requests.get(url).json().get('testcases', [])
-        data = [{t['name']: t['catalog_description']} for t in cases]
-        self.write(json_encode(data))
-
-
-class TestCase(BaseHandler):
-    def get(self, project, name):
-        self._set_header()
-
-        url = '{}/projects/{}/cases/{}'.format(conf.base_url, project, name)
-        data = requests.get(url).json()
-        self.write(json_encode(data))
diff --git a/utils/test/reporting/api/api/server.py b/utils/test/reporting/api/api/server.py
deleted file mode 100644
index e340b0181..000000000
--- a/utils/test/reporting/api/api/server.py
+++ /dev/null
@@ -1,27 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import tornado.ioloop
-import tornado.web
-from tornado.options import define
-from tornado.options import options
-
-from api.urls import mappings
-
-define("port", default=8000, help="run on the given port", type=int)
-
-
-def main():
-    tornado.options.parse_command_line()
-    application = tornado.web.Application(mappings)
-    application.listen(options.port)
-    tornado.ioloop.IOLoop.current().start()
-
-
-if __name__ == "__main__":
-    main()
diff --git a/utils/test/reporting/api/api/urls.py b/utils/test/reporting/api/api/urls.py
deleted file mode 100644
index a5228b2d4..000000000
--- a/utils/test/reporting/api/api/urls.py
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from api.handlers import landing
-from api.handlers import projects
-from api.handlers import testcases
-
-mappings = [
-    (r"/landing-page/filters", landing.FiltersHandler),
-    (r"/landing-page/scenarios", landing.ScenariosHandler),
-
-    (r"/projects-page/projects", projects.Projects),
-    (r"/projects/([^/]+)/cases", testcases.TestCases),
-    (r"/projects/([^/]+)/cases/([^/]+)", testcases.TestCase)
-]
diff --git a/utils/test/reporting/api/conf.py b/utils/test/reporting/api/conf.py
new file mode 100644
index 000000000..5897d4f97
--- /dev/null
+++ b/utils/test/reporting/api/conf.py
@@ -0,0 +1 @@
+base_url = 'http://testresults.opnfv.org/test/api/v1'
diff --git a/utils/test/reporting/api/handlers/__init__.py b/utils/test/reporting/api/handlers/__init__.py
new file mode 100644
index 000000000..bcda66438
--- /dev/null
+++ b/utils/test/reporting/api/handlers/__init__.py
@@ -0,0 +1,19 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from tornado.web import RequestHandler
+
+
+class BaseHandler(RequestHandler):
+    def _set_header(self):
+        self.set_header('Access-Control-Allow-Origin', '*')
+        self.set_header('Access-Control-Allow-Headers',
+                        'Content-Type, Content-Length, Authorization, \
+                        Accept, X-Requested-With , PRIVATE-TOKEN')
+        self.set_header('Access-Control-Allow-Methods',
+                        'PUT, POST, GET, DELETE, OPTIONS')
diff --git a/utils/test/reporting/api/handlers/landing.py b/utils/test/reporting/api/handlers/landing.py
new file mode 100644
index 000000000..749916fb6
--- /dev/null
+++ b/utils/test/reporting/api/handlers/landing.py
@@ -0,0 +1,169 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import requests
+
+from tornado.escape import json_encode
+from tornado.escape import json_decode
+
+from api.handlers import BaseHandler
+from api import conf
+
+
+class FiltersHandler(BaseHandler):
+    def get(self):
+        self._set_header()
+
+        filters = {
+            'filters': {
+                'status': ['success', 'warning', 'danger'],
+                'projects': ['functest', 'yardstick'],
+                'installers': ['apex', 'compass', 'fuel', 'joid'],
+                'version': ['colorado', 'master'],
+                'loops': ['daily', 'weekly', 'monthly'],
+                'time': ['10 days', '30 days']
+            }
+        }
+        return self.write(json_encode(filters))
+
+
+class ScenariosHandler(BaseHandler):
+    def post(self):
+        self._set_header()
+
+        body = json_decode(self.request.body)
+        args = self._get_args(body)
+
+        scenarios = self._get_result_data(self._get_scenarios(), args)
+
+        return self.write(json_encode(dict(scenarios=scenarios)))
+
+    def _get_result_data(self, data, args):
+        data = self._filter_status(data, args)
+        return {s: self._get_scenario_result(s, data[s], args) for s in data}
+
+    def _filter_status(self, data, args):
+        return {k: v for k, v in data.items() if v['status'] in args['status']}
+
+    def _get_scenario_result(self, scenario, data, args):
+        result = {
+            'status': data.get('status'),
+            'installers': self._get_installers_result(data['installers'], args)
+        }
+        return result
+
+    def _get_installers_result(self, data, args):
+        func = self._get_installer_result
+        return {k: func(k, data.get(k, {}), args) for k in args['installers']}
+
+    def _get_installer_result(self, installer, data, args):
+        projects = data.get(args['version'], [])
+        return [self._get_project_data(projects, p) for p in args['projects']]
+
+    def _get_project_data(self, projects, project):
+        atom = {
+            'project': project,
+            'score': None,
+            'status': None
+        }
+        for p in projects:
+            if p['project'] == project:
+                return p
+        return atom
+
+    def _get_scenarios(self):
+        url = '{}/scenarios'.format(conf.base_url)
+        resp = requests.get(url).json()
+        data = self._change_to_utf8(resp).get('scenarios', {})
+        return {a.get('name'): self._get_scenario(a.get('installers', [])
+                                                  ) for a in data}
+
+    def _get_scenario(self, data):
+        installers = {a.get('installer'): self._get_installer(a.get('versions',
+                                                                    [])
+                                                              ) for a in data}
+        scenario = {
+            'status': self._get_status(),
+            'installers': installers
+        }
+        return scenario
+
+    def _get_status(self):
+        return 'success'
+
+    def _get_installer(self, data):
+        return {a.get('version'): self._get_version(a) for a in data}
+
+    def _get_version(self, data):
+        try:
+            scores = data.get('score', {}).get('projects')[0]
+            trusts = data.get('trust_indicator', {}).get('projects')[0]
+        except (TypeError, IndexError):
+            return []
+        else:
+            scores = {key: [dict(date=a.get('date')[:10],
+                                 score=a.get('score')
+                                 ) for a in scores[key]] for key in scores}
+            trusts = {key: [dict(date=a.get('date')[:10],
+                                 status=a.get('status')
+                                 ) for a in trusts[key]] for key in trusts}
+            atom = self._get_atom(scores, trusts)
+            return [dict(project=k,
+                         score=sorted(atom[k], reverse=True)[0].get('score'),
+                         status=sorted(atom[k], reverse=True)[0].get('status')
+                         ) for k in atom if atom[k]]
+
+    def _get_atom(self, scores, trusts):
+        s = {k: {a['date']: a['score'] for a in scores[k]} for k in scores}
+        t = {k: {a['date']: a['status'] for a in trusts[k]} for k in trusts}
+        return {k: [dict(score=s[k][a], status=t[k][a], data=a
+                         ) for a in s[k] if a in t[k]] for k in s}
+
+    def _change_to_utf8(self, obj):
+        if isinstance(obj, dict):
+            return {str(k): self._change_to_utf8(v) for k, v in obj.items()}
+        elif isinstance(obj, list):
+            return [self._change_to_utf8(ele) for ele in obj]
+        else:
+            try:
+                new = eval(obj)
+                if isinstance(new, int):
+                    return obj
+                return self._change_to_utf8(new)
+            except (NameError, TypeError, SyntaxError):
+                return str(obj)
+
+    def _get_args(self, body):
+        status = self._get_status_args(body)
+        projects = self._get_projects_args(body)
+        installers = self._get_installers_args(body)
+
+        args = {
+            'status': status,
+            'projects': projects,
+            'installers': installers,
+            'version': body.get('version', 'master').lower(),
+            'loops': body.get('loops', 'daily').lower(),
+            'time': body.get('times', '10 days')[:2].lower()
+        }
+        return args
+
+    def _get_status_args(self, body):
+        status_all = ['success', 'warning', 'danger']
+        status = [a.lower() for a in body.get('status', ['all'])]
+        return status_all if 'all' in status else status
+
+    def _get_projects_args(self, body):
+        project_all = ['functest', 'yardstick']
+        projects = [a.lower() for a in body.get('projects', ['all'])]
+        return project_all if 'all' in projects else projects
+
+    def _get_installers_args(self, body):
+        installer_all = ['apex', 'compass', 'fuel', 'joid']
+        installers = [a.lower() for a in body.get('installers', ['all'])]
+        return installer_all if 'all' in installers else installers
diff --git a/utils/test/reporting/api/handlers/projects.py b/utils/test/reporting/api/handlers/projects.py
new file mode 100644
index 000000000..02412cd62
--- /dev/null
+++ b/utils/test/reporting/api/handlers/projects.py
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apaiche.org/licenses/LICENSE-2.0
+##############################################################################
+import requests
+
+from tornado.escape import json_encode
+
+from api.handlers import BaseHandler
+from api import conf
+
+
+class Projects(BaseHandler):
+    def get(self):
+        self._set_header()
+
+        url = '{}/projects'.format(conf.base_url)
+        projects = requests.get(url).json().get('projects', {})
+
+        project_url = 'https://wiki.opnfv.org/display/{}'
+        data = {p['name']: project_url.format(p['name']) for p in projects}
+
+        return self.write(json_encode(data))
diff --git a/utils/test/reporting/api/handlers/testcases.py b/utils/test/reporting/api/handlers/testcases.py
new file mode 100644
index 000000000..2b9118623
--- /dev/null
+++ b/utils/test/reporting/api/handlers/testcases.py
@@ -0,0 +1,33 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import requests
+
+from tornado.escape import json_encode
+
+from api.handlers import BaseHandler
+from api import conf
+
+
+class TestCases(BaseHandler):
+    def get(self, project):
+        self._set_header()
+
+        url = '{}/projects/{}/cases'.format(conf.base_url, project)
+        cases = requests.get(url).json().get('testcases', [])
+        data = [{t['name']: t['catalog_description']} for t in cases]
+        self.write(json_encode(data))
+
+
+class TestCase(BaseHandler):
+    def get(self, project, name):
+        self._set_header()
+
+        url = '{}/projects/{}/cases/{}'.format(conf.base_url, project, name)
+        data = requests.get(url).json()
+        self.write(json_encode(data))
diff --git a/utils/test/reporting/api/requirements.txt b/utils/test/reporting/api/requirements.txt
deleted file mode 100644
index 12ad6881b..000000000
--- a/utils/test/reporting/api/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-tornado==4.4.2
-requests==2.1.0
-
diff --git a/utils/test/reporting/api/server.py b/utils/test/reporting/api/server.py
new file mode 100644
index 000000000..e340b0181
--- /dev/null
+++ b/utils/test/reporting/api/server.py
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import tornado.ioloop
+import tornado.web
+from tornado.options import define
+from tornado.options import options
+
+from api.urls import mappings
+
+define("port", default=8000, help="run on the given port", type=int)
+
+
+def main():
+    tornado.options.parse_command_line()
+    application = tornado.web.Application(mappings)
+    application.listen(options.port)
+    tornado.ioloop.IOLoop.current().start()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/utils/test/reporting/api/setup.cfg b/utils/test/reporting/api/setup.cfg
deleted file mode 100644
index 53d1092b9..000000000
--- a/utils/test/reporting/api/setup.cfg
+++ /dev/null
@@ -1,32 +0,0 @@
-[metadata]
-name = reporting
-
-author = JackChan
-author-email = chenjiankun1@huawei.com
-
-classifier =
-    Environment :: opnfv
-    Intended Audience :: Information Technology
-    Intended Audience :: System Administrators
-    License :: OSI Approved :: Apache Software License
-    Operating System :: POSIX :: Linux
-    Programming Language :: Python
-    Programming Language :: Python :: 2
-    Programming Language :: Python :: 2.7
-
-[global]
-setup-hooks =
-    pbr.hooks.setup_hook
-
-[files]
-packages =
-    api
-
-[entry_points]
-console_scripts =
-    api = api.server:main
-
-[egg_info]
-tag_build =
-tag_date = 0
-tag_svn_revision = 0
diff --git a/utils/test/reporting/api/setup.py b/utils/test/reporting/api/setup.py
deleted file mode 100644
index d97481642..000000000
--- a/utils/test/reporting/api/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import setuptools
-
-
-__author__ = 'JackChan'
-
-
-setuptools.setup(
-    setup_requires=['pbr>=1.8'],
-    pbr=True)
diff --git a/utils/test/reporting/api/urls.py b/utils/test/reporting/api/urls.py
new file mode 100644
index 000000000..a5228b2d4
--- /dev/null
+++ b/utils/test/reporting/api/urls.py
@@ -0,0 +1,20 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from api.handlers import landing
+from api.handlers import projects
+from api.handlers import testcases
+
+mappings = [
+    (r"/landing-page/filters", landing.FiltersHandler),
+    (r"/landing-page/scenarios", landing.ScenariosHandler),
+
+    (r"/projects-page/projects", projects.Projects),
+    (r"/projects/([^/]+)/cases", testcases.TestCases),
+    (r"/projects/([^/]+)/cases/([^/]+)", testcases.TestCase)
+]
diff --git a/utils/test/reporting/docker/Dockerfile b/utils/test/reporting/docker/Dockerfile
index ad278ce1e..f5168d1ae 100644
--- a/utils/test/reporting/docker/Dockerfile
+++ b/utils/test/reporting/docker/Dockerfile
@@ -16,22 +16,20 @@
 FROM nginx:stable
 
 MAINTAINER Morgan Richomme <morgan.richomme@orange.com>
-LABEL version="danube.1.0" description="OPNFV Test Reporting Docker container"
+LABEL version="1.0" description="OPNFV Test Reporting Docker container"
 
 ARG BRANCH=master
 
 ENV HOME /home/opnfv
-ENV working_dir /home/opnfv/utils/test/reporting
-ENV TERM xterm
-ENV COLORTERM gnome-terminal
-ENV CONFIG_REPORTING_YAML /home/opnfv/utils/test/reporting/reporting.yaml
+ENV working_dir ${HOME}/releng/utils/test/reporting
+ENV CONFIG_REPORTING_YAML ${working_dir}/reporting.yaml
 
+WORKDIR ${HOME}
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
 ssh \
 python-pip \
 git-core \
-wkhtmltopdf \
 nodejs \
 npm \
 supervisor \
@@ -39,15 +37,13 @@ supervisor \
 
 RUN pip install --upgrade pip
 
-RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng /home/opnfv
-RUN pip install -r ${working_dir}/docker/requirements.pip
-
-WORKDIR ${working_dir}/api
-RUN pip install -r requirements.txt
-RUN python setup.py install
+RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${HOME}/releng
+RUN pip install -r ${working_dir}/requirements.txt
 
 WORKDIR ${working_dir}
+RUN python setup.py install
 RUN docker/reporting.sh
+RUN docker/web_server.sh
 
 expose 8000
 
diff --git a/utils/test/reporting/docker/nginx.conf b/utils/test/reporting/docker/nginx.conf
index 9e2697248..ced8179c1 100644
--- a/utils/test/reporting/docker/nginx.conf
+++ b/utils/test/reporting/docker/nginx.conf
@@ -15,10 +15,10 @@ server {
     }
 
     location /reporting/ {
-        alias /home/opnfv/utils/test/reporting/pages/dist/;
+        alias /home/opnfv/releng/utils/test/reporting/pages/dist/;
     }
 
     location /display/ {
-        alias /home/opnfv/utils/test/reporting/display/;
+        alias /home/opnfv/releng/utils/test/reporting/display/;
     }
 }
diff --git a/utils/test/reporting/docker/reporting.sh b/utils/test/reporting/docker/reporting.sh
index 7fe97a88e..076dc4719 100755
--- a/utils/test/reporting/docker/reporting.sh
+++ b/utils/test/reporting/docker/reporting.sh
@@ -1,10 +1,10 @@
 #!/bin/bash
 
-export PYTHONPATH="${PYTHONPATH}:."
-export CONFIG_REPORTING_YAML=./reporting.yaml
+export PYTHONPATH="${PYTHONPATH}:./reporting"
+export CONFIG_REPORTING_YAML=./reporting/reporting.yaml
 
 declare -a versions=(danube master)
-declare -a projects=(functest storperf yardstick)
+declare -a projects=(functest storperf yardstick qtip)
 
 project=$1
 reporting_type=$2
@@ -29,8 +29,9 @@ cp -Rf js display
 #  projet   |        option
 #   $1      |          $2
 # functest  | status, vims, tempest
-# yardstick |
-# storperf  |
+# yardstick | status
+# storperf  | status
+# qtip      | status
 
 function report_project()
 {
@@ -40,7 +41,7 @@ function report_project()
   echo "********************************"
   echo " $project reporting "
   echo "********************************"
-  python ./$dir/reporting-$type.py
+  python ./reporting/$dir/reporting-$type.py
   if [ $? ]; then
     echo "$project reporting $type...OK"
   else
@@ -50,53 +51,28 @@ function report_project()
 
 if [ -z "$1" ]; then
   echo "********************************"
-  echo " Functest reporting "
+  echo " * Static status reporting     *"
   echo "********************************"
-  echo "reporting vIMS..."
-  python ./functest/reporting-vims.py
-  echo "reporting vIMS...OK"
-  sleep 10
-  echo "reporting Tempest..."
-  python ./functest/reporting-tempest.py
-  echo "reporting Tempest...OK"
-  sleep 10
-  echo "reporting status..."
-  python ./functest/reporting-status.py
-  echo "Functest reporting status...OK"
-
-  echo "********************************"
-  echo " Yardstick reporting "
-  echo "********************************"
-  python ./yardstick/reporting-status.py
-  echo "Yardstick reporting status...OK"
+  for i in "${projects[@]}"
+  do
+    report_project $i $i "status"
+    sleep 5
+  done
+  report_project "QTIP" "qtip" "status"
 
-  echo "********************************"
-  echo " Storperf reporting "
-  echo "********************************"
-  python ./storperf/reporting-status.py
-  echo "Storperf reporting status...OK"
 
-  report_project "QTIP" "qtip" "status"
+  echo "Functest reporting vIMS..."
+  report_project "functest" "functest" "vims"
+  echo "reporting vIMS...OK"
+  sleep 5
+  echo "Functest reporting Tempest..."
+  report_project "functest" "functest" "tempest"
+  echo "reporting Tempest...OK"
+  sleep 5
 
 else
   if [ -z "$2" ]; then
     reporting_type="status"
   fi
-  echo "********************************"
-  echo " $project/$reporting_type reporting "
-  echo "********************************"
-  python ./$project/reporting-$reporting_type.py
+  report_project $project $project $reporting_type
 fi
-cp -r display /usr/share/nginx/html
-
-
-# nginx config
-cp /home/opnfv/utils/test/reporting/docker/nginx.conf /etc/nginx/conf.d/
-echo "daemon off;" >> /etc/nginx/nginx.conf
-
-# supervisor config
-cp /home/opnfv/utils/test/reporting/docker/supervisor.conf /etc/supervisor/conf.d/
-
-ln -s /usr/bin/nodejs /usr/bin/node
-
-cd pages && /bin/bash angular.sh
diff --git a/utils/test/reporting/docker/supervisor.conf b/utils/test/reporting/docker/supervisor.conf
index b323dd029..49310d430 100644
--- a/utils/test/reporting/docker/supervisor.conf
+++ b/utils/test/reporting/docker/supervisor.conf
@@ -3,7 +3,7 @@ nodaemon = true
 
 [program:tornado]
 user = root
-directory = /home/opnfv/utils/test/reporting/api/api
+directory = /home/opnfv/releng/utils/test/reporting/api
 command = python server.py --port=800%(process_num)d
 process_name=%(program_name)s%(process_num)d
 numprocs=4
@@ -15,5 +15,5 @@ command = service nginx restart
 
 [program:configuration]
 user = root
-directory = /home/opnfv/utils/test/reporting/pages
+directory = /home/opnfv/releng/utils/test/reporting/pages
 command = bash config.sh
diff --git a/utils/test/reporting/docker/web_server.sh b/utils/test/reporting/docker/web_server.sh
new file mode 100755
index 000000000..a34c11dd7
--- /dev/null
+++ b/utils/test/reporting/docker/web_server.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+cp -r display /usr/share/nginx/html
+
+
+# nginx config
+cp /home/opnfv/releng/utils/test/reporting/docker/nginx.conf /etc/nginx/conf.d/
+echo "daemon off;" >> /etc/nginx/nginx.conf
+
+# supervisor config
+cp /home/opnfv/releng/utils/test/reporting/docker/supervisor.conf /etc/supervisor/conf.d/
+
+ln -s /usr/bin/nodejs /usr/bin/node
+
+# Manage Angular front end
+cd pages && /bin/bash angular.sh
+
diff --git a/utils/test/reporting/docs/_build/.buildinfo b/utils/test/reporting/docs/_build/.buildinfo
new file mode 100644
index 000000000..6bd6fd634
--- /dev/null
+++ b/utils/test/reporting/docs/_build/.buildinfo
@@ -0,0 +1,4 @@
+# Sphinx build info version 1
+# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: 235ce07a48cec983846ad34dfd375b07
+tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/utils/test/reporting/docs/_build/.doctrees/environment.pickle b/utils/test/reporting/docs/_build/.doctrees/environment.pickle
new file mode 100644
index 000000000..23f59c377
Binary files /dev/null and b/utils/test/reporting/docs/_build/.doctrees/environment.pickle differ
diff --git a/utils/test/reporting/docs/_build/.doctrees/index.doctree b/utils/test/reporting/docs/_build/.doctrees/index.doctree
new file mode 100644
index 000000000..51e2d5ad3
Binary files /dev/null and b/utils/test/reporting/docs/_build/.doctrees/index.doctree differ
diff --git a/utils/test/reporting/docs/conf.py b/utils/test/reporting/docs/conf.py
new file mode 100644
index 000000000..2e70d2b63
--- /dev/null
+++ b/utils/test/reporting/docs/conf.py
@@ -0,0 +1,341 @@
+# -*- coding: utf-8 -*-
+#
+# OPNFV testing Reporting documentation build configuration file, created by
+# sphinx-quickstart on Mon July 4 10:03:43 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'OPNFV Reporting'
+copyright = u'2017, #opnfv-testperf (chat.freenode.net)'
+author = u'#opnfv-testperf (chat.freenode.net)'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u'master'
+# The full version, including alpha/beta/rc tags.
+release = u'master'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = 'en'
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#
+# today = ''
+#
+# Else, today_fmt is used as the format for a strftime call.
+#
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents.
+# "<project> v<release> documentation" by default.
+#
+# html_title = u'OPNFV Functest vmaster'
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#
+# html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs.  This file should be a Windows icon file (.ico) being 16x16 or
+# 32x32 pixels large.
+#
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#
+# html_extra_path = []
+
+# If not None, a 'Last updated on:' timestamp is inserted at every page
+# bottom, using the given strftime format.
+# The empty string is equivalent to '%b %d, %Y'.
+#
+# html_last_updated_fmt = None
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+#
+# html_domain_indices = True
+
+# If false, no index is generated.
+#
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
+#
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# 'ja' uses this config value.
+# 'zh' user can custom change `jieba` dictionary path.
+#
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'OPNFVreportingdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+     # The paper size ('letterpaper' or 'a4paper').
+     #
+     # 'papersize': 'letterpaper',
+
+     # The font size ('10pt', '11pt' or '12pt').
+     #
+     # 'pointsize': '10pt',
+
+     # Additional stuff for the LaTeX preamble.
+     #
+     # 'preamble': '',
+
+     # Latex figure (float) alignment
+     #
+     # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+    (master_doc, 'OPNFVReporting.tex',
+     u'OPNFV testing Reporting Documentation',
+     u'\\#opnfv-testperf (chat.freenode.net)', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+#
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#
+# latex_appendices = []
+
+# It false, will not define \strong, \code, 	itleref, \crossref ... but only
+# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
+# packages.
+#
+# latex_keep_old_macro_names = True
+
+# If false, no module index is generated.
+#
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'opnfvReporting', u'OPNFV Testing Reporting Documentation',
+     [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    (master_doc, 'OPNFVReporting', u'OPNFV Testing reporting Documentation',
+     author, 'OPNFVTesting', 'One line description of project.',
+     'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+#
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#
+# texinfo_no_detailmenu = False
diff --git a/utils/test/reporting/docs/index.rst b/utils/test/reporting/docs/index.rst
new file mode 100644
index 000000000..af4187672
--- /dev/null
+++ b/utils/test/reporting/docs/index.rst
@@ -0,0 +1,16 @@
+Welcome to OPNFV Testing reporting documentation!
+=================================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/utils/test/reporting/functest/__init__.py b/utils/test/reporting/functest/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/utils/test/reporting/functest/img/gauge_0.png b/utils/test/reporting/functest/img/gauge_0.png
deleted file mode 100644
index ecefc0e66..000000000
Binary files a/utils/test/reporting/functest/img/gauge_0.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_100.png b/utils/test/reporting/functest/img/gauge_100.png
deleted file mode 100644
index e199e1561..000000000
Binary files a/utils/test/reporting/functest/img/gauge_100.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_16.7.png b/utils/test/reporting/functest/img/gauge_16.7.png
deleted file mode 100644
index 3e3993c3b..000000000
Binary files a/utils/test/reporting/functest/img/gauge_16.7.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_25.png b/utils/test/reporting/functest/img/gauge_25.png
deleted file mode 100644
index 4923659b9..000000000
Binary files a/utils/test/reporting/functest/img/gauge_25.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_33.3.png b/utils/test/reporting/functest/img/gauge_33.3.png
deleted file mode 100644
index 364574b4a..000000000
Binary files a/utils/test/reporting/functest/img/gauge_33.3.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_41.7.png b/utils/test/reporting/functest/img/gauge_41.7.png
deleted file mode 100644
index 8c3e910fa..000000000
Binary files a/utils/test/reporting/functest/img/gauge_41.7.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_50.png b/utils/test/reporting/functest/img/gauge_50.png
deleted file mode 100644
index 2874b9fcf..000000000
Binary files a/utils/test/reporting/functest/img/gauge_50.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_58.3.png b/utils/test/reporting/functest/img/gauge_58.3.png
deleted file mode 100644
index beedc8aa9..000000000
Binary files a/utils/test/reporting/functest/img/gauge_58.3.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_66.7.png b/utils/test/reporting/functest/img/gauge_66.7.png
deleted file mode 100644
index 93f44d133..000000000
Binary files a/utils/test/reporting/functest/img/gauge_66.7.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_75.png b/utils/test/reporting/functest/img/gauge_75.png
deleted file mode 100644
index 9fc261ff8..000000000
Binary files a/utils/test/reporting/functest/img/gauge_75.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_8.3.png b/utils/test/reporting/functest/img/gauge_8.3.png
deleted file mode 100644
index 59f86571e..000000000
Binary files a/utils/test/reporting/functest/img/gauge_8.3.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_83.3.png b/utils/test/reporting/functest/img/gauge_83.3.png
deleted file mode 100644
index 27ae4ec54..000000000
Binary files a/utils/test/reporting/functest/img/gauge_83.3.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/gauge_91.7.png b/utils/test/reporting/functest/img/gauge_91.7.png
deleted file mode 100644
index 280865714..000000000
Binary files a/utils/test/reporting/functest/img/gauge_91.7.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/icon-nok.png b/utils/test/reporting/functest/img/icon-nok.png
deleted file mode 100644
index 526b5294b..000000000
Binary files a/utils/test/reporting/functest/img/icon-nok.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/icon-ok.png b/utils/test/reporting/functest/img/icon-ok.png
deleted file mode 100644
index 3a9de2e89..000000000
Binary files a/utils/test/reporting/functest/img/icon-ok.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/weather-clear.png b/utils/test/reporting/functest/img/weather-clear.png
deleted file mode 100644
index a0d967750..000000000
Binary files a/utils/test/reporting/functest/img/weather-clear.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/weather-few-clouds.png b/utils/test/reporting/functest/img/weather-few-clouds.png
deleted file mode 100644
index acfa78398..000000000
Binary files a/utils/test/reporting/functest/img/weather-few-clouds.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/weather-overcast.png b/utils/test/reporting/functest/img/weather-overcast.png
deleted file mode 100644
index 4296246d0..000000000
Binary files a/utils/test/reporting/functest/img/weather-overcast.png and /dev/null differ
diff --git a/utils/test/reporting/functest/img/weather-storm.png b/utils/test/reporting/functest/img/weather-storm.png
deleted file mode 100644
index 956f0e20f..000000000
Binary files a/utils/test/reporting/functest/img/weather-storm.png and /dev/null differ
diff --git a/utils/test/reporting/functest/index.html b/utils/test/reporting/functest/index.html
deleted file mode 100644
index bb1bce209..000000000
--- a/utils/test/reporting/functest/index.html
+++ /dev/null
@@ -1,53 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-        <h3 class="text-muted">Functest reporting page</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
-            <li><a href="index-status-apex.html">Apex</a></li>
-            <li><a href="index-status-compass.html">Compass</a></li>
-            <li><a href="index-status-fuel.html">Fuel</a></li>
-            <li><a href="index-status-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-main">
-            <h2>Functest</h2>
-             This project develops test suites that cover functionaling test cases in OPNFV.
-             <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario.
-             <br> Weekly meeting: every Tuesday 8 AM UTC
-             <br> IRC chan #opnfv-testperf
-
-            <br>
-            <h2>Useful Links</h2>
-            <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li>
-            <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li>
-            <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li>
-            <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li>
-            <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li>
-
-        </div>
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py
deleted file mode 100755
index 77ab7840f..000000000
--- a/utils/test/reporting/functest/reporting-status.py
+++ /dev/null
@@ -1,306 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import jinja2
-import os
-import sys
-import time
-
-import testCase as tc
-import scenarioResult as sr
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-# Logger
-logger = rp_utils.getLogger("Functest-Status")
-
-# Initialization
-testValid = []
-otherTestCases = []
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-# init just connection_check to get the list of scenarios
-# as all the scenarios run connection_check
-healthcheck = tc.TestCase("connection_check", "functest", -1)
-
-# Retrieve the Functest configuration to detect which tests are relevant
-# according to the installer, scenario
-cf = rp_utils.get_config('functest.test_conf')
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-installers = rp_utils.get_config('general.installers')
-blacklist = rp_utils.get_config('functest.blacklist')
-log_level = rp_utils.get_config('general.log.log_level')
-exclude_noha = rp_utils.get_config('functest.exclude_noha')
-exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
-
-functest_yaml_config = rp_utils.getFunctestConfig()
-
-logger.info("*******************************************")
-logger.info("*                                         *")
-logger.info("*   Generating reporting scenario status  *")
-logger.info("*   Data retention: %s days               *" % period)
-logger.info("*   Log level: %s                         *" % log_level)
-logger.info("*                                         *")
-logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
-logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
-logger.info("*                                         *")
-logger.info("*******************************************")
-
-# Retrieve test cases of Tier 1 (smoke)
-config_tiers = functest_yaml_config.get("tiers")
-
-# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
-# to validate scenarios
-# Tier > 2 are not used to validate scenarios but we display the results anyway
-# tricky thing for the API as some tests are Functest tests
-# other tests are declared directly in the feature projects
-for tier in config_tiers:
-    if tier['order'] >= 0 and tier['order'] < 2:
-        for case in tier['testcases']:
-            if case['case_name'] not in blacklist:
-                testValid.append(tc.TestCase(case['case_name'],
-                                             "functest",
-                                             case['dependencies']))
-    elif tier['order'] == 2:
-        for case in tier['testcases']:
-            if case['case_name'] not in blacklist:
-                testValid.append(tc.TestCase(case['case_name'],
-                                             case['case_name'],
-                                             case['dependencies']))
-    elif tier['order'] > 2:
-        for case in tier['testcases']:
-            if case['case_name'] not in blacklist:
-                otherTestCases.append(tc.TestCase(case['case_name'],
-                                                  "functest",
-                                                  case['dependencies']))
-
-logger.debug("Functest reporting start")
-
-# For all the versions
-for version in versions:
-    # For all the installers
-    scenario_directory = "./display/" + version + "/functest/"
-    scenario_file_name = scenario_directory + "scenario_history.txt"
-
-    # check that the directory exists, if not create it
-    # (first run on new version)
-    if not os.path.exists(scenario_directory):
-        os.makedirs(scenario_directory)
-
-    # initiate scenario file if it does not exist
-    if not os.path.isfile(scenario_file_name):
-        with open(scenario_file_name, "a") as my_file:
-            logger.debug("Create scenario file: %s" % scenario_file_name)
-            my_file.write("date,scenario,installer,detail,score\n")
-
-    for installer in installers:
-
-        # get scenarios
-        scenario_results = rp_utils.getScenarios(healthcheck,
-                                                 installer,
-                                                 version)
-        # get nb of supported architecture (x86, aarch64)
-        architectures = rp_utils.getArchitectures(scenario_results)
-        logger.info("Supported architectures: {}".format(architectures))
-
-        for architecture in architectures:
-            logger.info("architecture: {}".format(architecture))
-            # Consider only the results for the selected architecture
-            # i.e drop x86 for aarch64 and vice versa
-            filter_results = rp_utils.filterArchitecture(scenario_results,
-                                                         architecture)
-            scenario_stats = rp_utils.getScenarioStats(filter_results)
-            items = {}
-            scenario_result_criteria = {}
-
-            # in case of more than 1 architecture supported
-            # precise the architecture
-            installer_display = installer
-            if (len(architectures) > 1):
-                installer_display = installer + "@" + architecture
-
-            # For all the scenarios get results
-            for s, s_result in filter_results.items():
-                logger.info("---------------------------------")
-                logger.info("installer %s, version %s, scenario %s:" %
-                            (installer, version, s))
-                logger.debug("Scenario results: %s" % s_result)
-
-                # Green or Red light for a given scenario
-                nb_test_runnable_for_this_scenario = 0
-                scenario_score = 0
-                # url of the last jenkins log corresponding to a given
-                # scenario
-                s_url = ""
-                if len(s_result) > 0:
-                    build_tag = s_result[len(s_result)-1]['build_tag']
-                    logger.debug("Build tag: %s" % build_tag)
-                    s_url = rp_utils.getJenkinsUrl(build_tag)
-                    if s_url is None:
-                        s_url = "http://testresultS.opnfv.org/reporting"
-                    logger.info("last jenkins url: %s" % s_url)
-                testCases2BeDisplayed = []
-                # Check if test case is runnable / installer, scenario
-                # for the test case used for Scenario validation
-                try:
-                    # 1) Manage the test cases for the scenario validation
-                    # concretely Tiers 0-3
-                    for test_case in testValid:
-                        test_case.checkRunnable(installer, s,
-                                                test_case.getConstraints())
-                        logger.debug("testcase %s (%s) is %s" %
-                                     (test_case.getDisplayName(),
-                                      test_case.getName(),
-                                      test_case.isRunnable))
-                        time.sleep(1)
-                        if test_case.isRunnable:
-                            name = test_case.getName()
-                            displayName = test_case.getDisplayName()
-                            project = test_case.getProject()
-                            nb_test_runnable_for_this_scenario += 1
-                            logger.info(" Searching results for case %s " %
-                                        (displayName))
-                            result = rp_utils.getResult(name, installer,
-                                                        s, version)
-                            # if no result set the value to 0
-                            if result < 0:
-                                result = 0
-                            logger.info(" >>>> Test score = " + str(result))
-                            test_case.setCriteria(result)
-                            test_case.setIsRunnable(True)
-                            testCases2BeDisplayed.append(tc.TestCase(name,
-                                                                     project,
-                                                                     "",
-                                                                     result,
-                                                                     True,
-                                                                     1))
-                            scenario_score = scenario_score + result
-
-                    # 2) Manage the test cases for the scenario qualification
-                    # concretely Tiers > 3
-                    for test_case in otherTestCases:
-                        test_case.checkRunnable(installer, s,
-                                                test_case.getConstraints())
-                        logger.debug("testcase %s (%s) is %s" %
-                                     (test_case.getDisplayName(),
-                                      test_case.getName(),
-                                      test_case.isRunnable))
-                        time.sleep(1)
-                        if test_case.isRunnable:
-                            name = test_case.getName()
-                            displayName = test_case.getDisplayName()
-                            project = test_case.getProject()
-                            logger.info(" Searching results for case %s " %
-                                        (displayName))
-                            result = rp_utils.getResult(name, installer,
-                                                        s, version)
-                            # at least 1 result for the test
-                            if result > -1:
-                                test_case.setCriteria(result)
-                                test_case.setIsRunnable(True)
-                                testCases2BeDisplayed.append(tc.TestCase(
-                                    name,
-                                    project,
-                                    "",
-                                    result,
-                                    True,
-                                    4))
-                            else:
-                                logger.debug("No results found")
-
-                        items[s] = testCases2BeDisplayed
-                except:
-                    logger.error("Error: installer %s, version %s, scenario %s"
-                                 % (installer, version, s))
-                    logger.error("No data available: %s" % (sys.exc_info()[0]))
-
-                # **********************************************
-                # Evaluate the results for scenario validation
-                # **********************************************
-                # the validation criteria = nb runnable tests x 3
-                # because each test case = 0,1,2 or 3
-                scenario_criteria = nb_test_runnable_for_this_scenario * 3
-                # if 0 runnable tests set criteria at a high value
-                if scenario_criteria < 1:
-                    scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
-
-                s_score = str(scenario_score) + "/" + str(scenario_criteria)
-                s_score_percent = rp_utils.getScenarioPercent(
-                    scenario_score,
-                    scenario_criteria)
-
-                s_status = "KO"
-                if scenario_score < scenario_criteria:
-                    logger.info(">>>> scenario not OK, score = %s/%s" %
-                                (scenario_score, scenario_criteria))
-                    s_status = "KO"
-                else:
-                    logger.info(">>>>> scenario OK, save the information")
-                    s_status = "OK"
-                    path_validation_file = ("./display/" + version +
-                                            "/functest/" +
-                                            "validated_scenario_history.txt")
-                    with open(path_validation_file, "a") as f:
-                        time_format = "%Y-%m-%d %H:%M"
-                        info = (datetime.datetime.now().strftime(time_format) +
-                                ";" + installer_display + ";" + s + "\n")
-                        f.write(info)
-
-                # Save daily results in a file
-                with open(scenario_file_name, "a") as f:
-                    info = (reportingDate + "," + s + "," + installer_display +
-                            "," + s_score + "," +
-                            str(round(s_score_percent)) + "\n")
-                    f.write(info)
-
-                scenario_result_criteria[s] = sr.ScenarioResult(
-                    s_status,
-                    s_score,
-                    s_score_percent,
-                    s_url)
-                logger.info("--------------------------")
-
-            templateLoader = jinja2.FileSystemLoader(".")
-            templateEnv = jinja2.Environment(
-                loader=templateLoader, autoescape=True)
-
-            TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
-            template = templateEnv.get_template(TEMPLATE_FILE)
-
-            outputText = template.render(
-                            scenario_stats=scenario_stats,
-                            scenario_results=scenario_result_criteria,
-                            items=items,
-                            installer=installer_display,
-                            period=period,
-                            version=version,
-                            date=reportingDate)
-
-            with open("./display/" + version +
-                      "/functest/status-" +
-                      installer_display + ".html", "wb") as fh:
-                fh.write(outputText)
-
-            logger.info("Manage export CSV & PDF")
-            rp_utils.export_csv(scenario_file_name, installer_display, version)
-            logger.error("CSV generated...")
-
-            # Generate outputs for export
-            # pdf
-            # TODO Change once web site updated...use the current one
-            # to test pdf production
-            url_pdf = rp_utils.get_config('general.url')
-            pdf_path = ("./display/" + version +
-                        "/functest/status-" + installer_display + ".html")
-            pdf_doc_name = ("./display/" + version +
-                            "/functest/status-" + installer_display + ".pdf")
-            rp_utils.export_pdf(pdf_path, pdf_doc_name)
-            logger.info("PDF generated...")
diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py
deleted file mode 100755
index 0304298b4..000000000
--- a/utils/test/reporting/functest/reporting-tempest.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-# SPDX-license-identifier: Apache-2.0
-
-from urllib2 import Request, urlopen, URLError
-from datetime import datetime
-import json
-import jinja2
-import os
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-installers = rp_utils.get_config('general.installers')
-items = ["tests", "Success rate", "duration"]
-
-CURRENT_DIR = os.getcwd()
-
-PERIOD = rp_utils.get_config('general.period')
-criteria_nb_test = 165
-criteria_duration = 1800
-criteria_success_rate = 90
-
-logger = rp_utils.getLogger("Tempest")
-logger.info("************************************************")
-logger.info("*   Generating reporting Tempest_smoke_serial  *")
-logger.info("*   Data retention = %s days                   *" % PERIOD)
-logger.info("*                                              *")
-logger.info("************************************************")
-
-logger.info("Success criteria:")
-logger.info("nb tests executed > %s s " % criteria_nb_test)
-logger.info("test duration < %s s " % criteria_duration)
-logger.info("success rate > %s " % criteria_success_rate)
-
-# For all the versions
-for version in rp_utils.get_config('general.versions'):
-    for installer in installers:
-        # we consider the Tempest results of the last PERIOD days
-        url = ("http://" + rp_utils.get_config('testapi.url') +
-               "?case=tempest_smoke_serial")
-        request = Request(url + '&period=' + str(PERIOD) +
-                          '&installer=' + installer +
-                          '&version=' + version)
-        logger.info("Search tempest_smoke_serial results for installer %s"
-                    " for version %s"
-                    % (installer, version))
-        try:
-            response = urlopen(request)
-            k = response.read()
-            results = json.loads(k)
-        except URLError as e:
-            logger.error("Error code: %s" % e)
-
-        test_results = results['results']
-
-        scenario_results = {}
-        criteria = {}
-        errors = {}
-
-        for r in test_results:
-            # Retrieve all the scenarios per installer
-            # In Brahmaputra use version
-            # Since Colorado use scenario
-            if not r['scenario'] in scenario_results.keys():
-                scenario_results[r['scenario']] = []
-            scenario_results[r['scenario']].append(r)
-
-        for s, s_result in scenario_results.items():
-            scenario_results[s] = s_result[0:5]
-            # For each scenario, we build a result object to deal with
-            # results, criteria and error handling
-            for result in scenario_results[s]:
-                result["start_date"] = result["start_date"].split(".")[0]
-
-                # retrieve results
-                # ****************
-                nb_tests_run = result['details']['tests']
-                nb_tests_failed = result['details']['failures']
-                if nb_tests_run != 0:
-                    success_rate = 100 * ((int(nb_tests_run) -
-                                           int(nb_tests_failed)) /
-                                          int(nb_tests_run))
-                else:
-                    success_rate = 0
-
-                result['details']["tests"] = nb_tests_run
-                result['details']["Success rate"] = str(success_rate) + "%"
-
-                # Criteria management
-                # *******************
-                crit_tests = False
-                crit_rate = False
-                crit_time = False
-
-                # Expect that at least 165 tests are run
-                if nb_tests_run >= criteria_nb_test:
-                    crit_tests = True
-
-                # Expect that at least 90% of success
-                if success_rate >= criteria_success_rate:
-                    crit_rate = True
-
-                # Expect that the suite duration is inferior to 30m
-                stop_date = datetime.strptime(result['stop_date'],
-                                              '%Y-%m-%d %H:%M:%S')
-                start_date = datetime.strptime(result['start_date'],
-                                               '%Y-%m-%d %H:%M:%S')
-
-                delta = stop_date - start_date
-                if (delta.total_seconds() < criteria_duration):
-                    crit_time = True
-
-                result['criteria'] = {'tests': crit_tests,
-                                      'Success rate': crit_rate,
-                                      'duration': crit_time}
-                try:
-                    logger.debug("Scenario %s, Installer %s"
-                                 % (s_result[1]['scenario'], installer))
-                    logger.debug("Nb Test run: %s" % nb_tests_run)
-                    logger.debug("Test duration: %s"
-                                 % result['details']['duration'])
-                    logger.debug("Success rate: %s" % success_rate)
-                except:
-                    logger.error("Data format error")
-
-                # Error management
-                # ****************
-                try:
-                    errors = result['details']['errors']
-                    result['errors'] = errors.replace('{0}', '')
-                except:
-                    logger.error("Error field not present (Brahamputra runs?)")
-
-        templateLoader = jinja2.FileSystemLoader(".")
-        templateEnv = jinja2.Environment(loader=templateLoader,
-                                         autoescape=True)
-
-        TEMPLATE_FILE = "./functest/template/index-tempest-tmpl.html"
-        template = templateEnv.get_template(TEMPLATE_FILE)
-
-        outputText = template.render(scenario_results=scenario_results,
-                                     items=items,
-                                     installer=installer)
-
-        with open("./display/" + version +
-                  "/functest/tempest-" + installer + ".html", "wb") as fh:
-            fh.write(outputText)
-logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py
deleted file mode 100755
index b236b8963..000000000
--- a/utils/test/reporting/functest/reporting-vims.py
+++ /dev/null
@@ -1,126 +0,0 @@
-from urllib2 import Request, urlopen, URLError
-import json
-import jinja2
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-logger = rp_utils.getLogger("vIMS")
-
-
-def sig_test_format(sig_test):
-    nbPassed = 0
-    nbFailures = 0
-    nbSkipped = 0
-    for data_test in sig_test:
-        if data_test['result'] == "Passed":
-            nbPassed += 1
-        elif data_test['result'] == "Failed":
-            nbFailures += 1
-        elif data_test['result'] == "Skipped":
-            nbSkipped += 1
-    total_sig_test_result = {}
-    total_sig_test_result['passed'] = nbPassed
-    total_sig_test_result['failures'] = nbFailures
-    total_sig_test_result['skipped'] = nbSkipped
-    return total_sig_test_result
-
-period = rp_utils.get_config('general.period')
-versions = rp_utils.get_config('general.versions')
-url_base = rp_utils.get_config('testapi.url')
-
-logger.info("****************************************")
-logger.info("*   Generating reporting vIMS          *")
-logger.info("*   Data retention = %s days           *" % period)
-logger.info("*                                      *")
-logger.info("****************************************")
-
-installers = rp_utils.get_config('general.installers')
-step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
-logger.info("Start processing....")
-
-# For all the versions
-for version in versions:
-    for installer in installers:
-        logger.info("Search vIMS results for installer: %s, version: %s"
-                    % (installer, version))
-        request = Request("http://" + url_base + '?case=vims&installer=' +
-                          installer + '&version=' + version)
-
-        try:
-            response = urlopen(request)
-            k = response.read()
-            results = json.loads(k)
-        except URLError as e:
-            logger.error("Error code: %s" % e)
-
-        test_results = results['results']
-
-        logger.debug("Results found: %s" % test_results)
-
-        scenario_results = {}
-        for r in test_results:
-            if not r['scenario'] in scenario_results.keys():
-                scenario_results[r['scenario']] = []
-            scenario_results[r['scenario']].append(r)
-
-        for s, s_result in scenario_results.items():
-            scenario_results[s] = s_result[0:5]
-            logger.debug("Search for success criteria")
-            for result in scenario_results[s]:
-                result["start_date"] = result["start_date"].split(".")[0]
-                sig_test = result['details']['sig_test']['result']
-                if not sig_test == "" and isinstance(sig_test, list):
-                    format_result = sig_test_format(sig_test)
-                    if format_result['failures'] > format_result['passed']:
-                        result['details']['sig_test']['duration'] = 0
-                    result['details']['sig_test']['result'] = format_result
-                nb_step_ok = 0
-                nb_step = len(result['details'])
-
-                for step_name, step_result in result['details'].items():
-                    if step_result['duration'] != 0:
-                        nb_step_ok += 1
-                    m, s = divmod(step_result['duration'], 60)
-                    m_display = ""
-                    if int(m) != 0:
-                        m_display += str(int(m)) + "m "
-
-                    step_result['duration_display'] = (m_display +
-                                                       str(int(s)) + "s")
-
-                result['pr_step_ok'] = 0
-                if nb_step != 0:
-                    result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100
-                try:
-                    logger.debug("Scenario %s, Installer %s"
-                                 % (s_result[1]['scenario'], installer))
-                    res = result['details']['orchestrator']['duration']
-                    logger.debug("Orchestrator deployment: %s s"
-                                 % res)
-                    logger.debug("vIMS deployment: %s s"
-                                 % result['details']['vIMS']['duration'])
-                    logger.debug("Signaling testing: %s s"
-                                 % result['details']['sig_test']['duration'])
-                    logger.debug("Signaling testing results: %s"
-                                 % format_result)
-                except:
-                    logger.error("Data badly formatted")
-                logger.debug("----------------------------------------")
-
-        templateLoader = jinja2.FileSystemLoader(".")
-        templateEnv = jinja2.Environment(loader=templateLoader,
-                                         autoescape=True)
-
-        TEMPLATE_FILE = "./functest/template/index-vims-tmpl.html"
-        template = templateEnv.get_template(TEMPLATE_FILE)
-
-        outputText = template.render(scenario_results=scenario_results,
-                                     step_order=step_order,
-                                     installer=installer)
-
-        with open("./display/" + version + "/functest/vims-" +
-                  installer + ".html", "wb") as fh:
-            fh.write(outputText)
-
-logger.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/functest/scenarioResult.py b/utils/test/reporting/functest/scenarioResult.py
deleted file mode 100644
index 5a54eed96..000000000
--- a/utils/test/reporting/functest/scenarioResult.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-
-class ScenarioResult(object):
-
-    def __init__(self, status, score=0, score_percent=0, url_lastrun=''):
-        self.status = status
-        self.score = score
-        self.score_percent = score_percent
-        self.url_lastrun = url_lastrun
-
-    def getStatus(self):
-        return self.status
-
-    def getScore(self):
-        return self.score
-
-    def getScorePercent(self):
-        return self.score_percent
-
-    def getUrlLastRun(self):
-        return self.url_lastrun
diff --git a/utils/test/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/functest/template/index-status-tmpl.html
deleted file mode 100644
index cc4edaac5..000000000
--- a/utils/test/reporting/functest/template/index-status-tmpl.html
+++ /dev/null
@@ -1,157 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="../../css/default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
-    <script type="text/javascript" src="../../js/gauge.js"></script>
-    <script type="text/javascript" src="../../js/trend.js"></script>
-    <script>
-    function onDocumentReady() {
-    	// Gauge management
-        {% for scenario in scenario_stats.iteritems() -%}
-    	    var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
-        {%- endfor %}
-
-    	// assign success rate to the gauge
-    	function updateReadings() {
-    	    {% for scenario,iteration in scenario_stats.iteritems() -%}
-    	     	gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
-    	     {%- endfor %}
-    	}
-    	updateReadings();
-        }
-
-        // trend line management
-        d3.csv("./scenario_history.txt", function(data) {
-       // ***************************************
-       // Create the trend line
-      {% for scenario,iteration in scenario_stats.iteritems() -%}
-       // for scenario {{scenario}}
-       // Filter results
-        var trend{{loop.index}} = data.filter(function(row) {
-    	     return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
-    	})
-       // Parse the date
-        trend{{loop.index}}.forEach(function(d) {
-    	    d.date = parseDate(d.date);
-    	    d.score = +d.score
-        });
-        // Draw the trend line
-        var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
-        // ****************************************
-        {%- endfor %}
-    });
-    if ( !window.isLoaded ) {
-        window.addEventListener("load", function() {
-    			onDocumentReady();
-        }, false);
-    } else {
-    	onDocumentReady();
-    }
-</script>
-<script type="text/javascript">
-$(document).ready(function (){
-    $(".btn-more").click(function() {
-    	$(this).hide();
-    	$(this).parent().find(".panel-default").show();
-    });
-})
-</script>
-
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-        <h3 class="text-muted">Functest status page ({{version}}, {{date}})</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="../../index.html">Home</a></li>
-            <li><a href="status-apex.html">Apex</a></li>
-            <li><a href="status-compass.html">Compass</a></li>
-            <li><a href="status-fuel@x86.html">fuel@x86</a></li>
-            <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
-            <li><a href="status-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-header">
-            <h2>{{installer}}</h2>
-        </div>
-
-        <div class="scenario-overview">
-            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
-                <table class="table">
-                    <tr>
-                        <th width="40%">Scenario</th>
-                        <th width="20%">Status</th>
-                        <th width="20%">Trend</th>
-                        <th width="10%">Score</th>
-                        <th width="10%">Iteration</th>
-                    </tr>
-                        {% for scenario,iteration in scenario_stats.iteritems() -%}
-                            <tr class="tr-ok">
-                                <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
-                                <td><div id="gaugeScenario{{loop.index}}"></div></td>
-                                <td><div id="trend_svg{{loop.index}}"></div></td>
-                                <td>{{scenario_results[scenario].getScore()}}</td>
-                                <td>{{iteration}}</td>
-                            </tr>
-                            {%- endfor %}
-                        </table>
-        </div>
-
-
-        {% for scenario, iteration in scenario_stats.iteritems() -%}
-        <div class="scenario-part">
-            <div class="page-header">
-                <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3>
-            </div>
-                    <div class="panel panel-default">
-                    <div class="panel-heading">
-                        <span class="panel-header-item">
-                        </span>
-                    </div>
-                    <table class="table">
-                        <tr>
-                            {% for test in items[scenario] -%}
-                            <th>
-                            {% if test.getCriteria() > -1 -%}
-                            {{test.getDisplayName() }}
-                            {%- endif %}
-                            {% if test.getTier() > 3 -%}
-                            *
-                            {%- endif %}
-                             </th>
-                            {%- endfor %}
-                        </tr>
-                        <tr class="tr-weather-weather">
-                            {% for test in items[scenario] -%}
-                            {% if test.getCriteria() > 2 -%}
-                                <td><img src="../../img/weather-clear.png"></td>
-                            {%- elif test.getCriteria() > 1 -%}
-                                <td><img src="../../img/weather-few-clouds.png"></td>
-                            {%- elif test.getCriteria() > 0 -%}
-                                <td><img src="../../img/weather-overcast.png"></td>
-                            {%- elif test.getCriteria() > -1 -%}
-                                <td><img src="../../img/weather-storm.png"></td>
-                            {%- endif %}
-                            {%- endfor %}
-                        </tr>
-                    </table>
-                </div>
-        </div>
-        {%- endfor %}
-    see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
-     <div> <br>
-    <a href="./status-{{installer}}.pdf" class="myButtonPdf">Export to PDF</a>   <a href="./scenario_history_{{installer}}.txt" class="myButtonCSV">Export to CSV</a>
-    </div>
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/functest/template/index-tempest-tmpl.html
deleted file mode 100644
index 3a222276e..000000000
--- a/utils/test/reporting/functest/template/index-tempest-tmpl.html
+++ /dev/null
@@ -1,95 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="../../css/default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-        <h3 class="text-muted">Tempest status page</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="../../index.html">Home</a></li>
-            <li><a href="tempest-apex.html">Apex</a></li>
-            <li><a href="tempest-compass.html">Compass</a></li>
-            <li><a href="tempest-daisy.html">Daisy</a></li>
-            <li><a href="tempest-fuel.html">Fuel</a></li>
-            <li><a href="tempest-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-header">
-            <h2>{{installer}}</h2>
-        </div>
-        {% for scenario_name, results in scenario_results.iteritems() -%}
-        <div class="scenario-part">
-            <div class="page-header">
-                <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
-            </div>
-            {% for result in results -%}
-                {% if loop.index > 2 -%}
-                    <div class="panel panel-default" hidden>
-                {%- else -%}
-                    <div class="panel panel-default">
-                {%- endif %}
-                        <div class="panel-heading">
-                            <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
-                            <span class="panel-header-item">
-                                <h4><b>{{result.start_date}}</b></h4>
-                            </span>
-                            <span class="badge panel-pod-name">{{result.pod_name}}</span>
-                        </div>
-                        <table class="table">
-                            <tr>
-                                <th width="20%">Item</th>
-                                <th width="10%">Result</th>
-                                <th width="10%">Status</th>
-                                <th width="60%">Errors</th>
-                            </tr>
-                            {% for item in items -%}
-                                {% if item in result.details.keys() -%}
-                                    {% if result.criteria[item] -%}
-                                        <tr class="tr-ok">
-                                            <td>{{item}}</td>
-                                            <td>{{result.details[item]}}</td>
-                                            <td><span class="glyphicon glyphicon-ok"></td>
-                                            {% if item is equalto "Success rate" %}
-                                            <td>{{result.errors}}</td>
-                                            {% endif %}
-                                        </tr>
-                                    {%- else -%}
-                                        <tr class="tr-danger">
-                                            <td>{{item}}</td>
-                                            <td>{{result.details[item]}}</td>
-                                            <td><span class="glyphicon glyphicon-remove"></td>
-                                            {% if item is equalto "Success rate" %}
-                                            <td>{{result.errors}}</td>
-                                            {% endif %}                                            
-                                        </tr>
-                                    {%- endif %}
-                                {%- endif %}
-                            {%- endfor %}
-                        </table>
-                    </div>
-            {%- endfor %}
-            <button type="button" class="btn btn-more">More than two</button>
-        </div>
-        {%- endfor %}
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/functest/template/index-vims-tmpl.html
deleted file mode 100644
index cd51607b7..000000000
--- a/utils/test/reporting/functest/template/index-vims-tmpl.html
+++ /dev/null
@@ -1,92 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="../../css/default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-        <h3 class="text-muted">vIMS status page</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="../../index.html">Home</a></li>
-            <li><a href="vims-fuel.html">Fuel</a></li>
-            <li><a href="vims-compass.html">Compass</a></li>
-            <li><a href="vims-daisy.html">Daisy</a></li>
-            <li><a href="vims-joid.html">JOID</a></li>
-            <li><a href="vims-apex.html">APEX</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-header">
-            <h2>{{installer}}</h2>
-        </div>
-        {% for scenario_name, results in scenario_results.iteritems() -%}
-        <div class="scenario-part">
-            <div class="page-header">
-                <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
-            </div>
-            {% for result in results -%}
-                {% if loop.index > 2 -%}
-                    <div class="panel panel-default" hidden>
-                {%- else -%}
-                    <div class="panel panel-default">
-                {%- endif %}
-                        <div class="panel-heading">
-                            <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
-                            <span class="panel-header-item">
-                                <h4><b>{{result.start_date}}</b></h4>
-                            </span>
-                            <span class="badge panel-pod-name">{{result.pod_name}}</span>
-                        </div>
-                        <table class="table">
-                            <tr>
-                                <th width="20%">Step</th>
-                                <th width="10%">Status</th>
-                                <th width="10%">Duration</th>
-                                <th width="60%">Result</th>
-                            </tr>
-                            {% for step_od_name in step_order -%}
-                                {% if step_od_name in result.details.keys() -%}
-                                    {% set step_result = result.details[step_od_name] -%}
-                                    {% if step_result.duration != 0 -%}
-                                        <tr class="tr-ok">
-                                            <td>{{step_od_name}}</td>
-                                            <td><span class="glyphicon glyphicon-ok"></td>
-                                            <td><b>{{step_result.duration_display}}</b></td>
-                                            <td>{{step_result.result}}</td>
-                                        </tr>
-                                    {%- else -%}
-                                        <tr class="tr-danger">
-                                            <td>{{step_od_name}}</td>
-                                            <td><span class="glyphicon glyphicon-remove"></td>
-                                            <td><b>0s</b></td>
-                                            <td>{{step_result.result}}</td>
-                                        </tr>
-                                    {%- endif %}
-                                {%- endif %}
-                            {%- endfor %}
-                        </table>
-                    </div>
-            {%- endfor %}
-            <button type="button" class="btn btn-more">More than two</button>
-        </div>
-        {%- endfor %}
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py
deleted file mode 100644
index 9834f0753..000000000
--- a/utils/test/reporting/functest/testCase.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import re
-
-
-class TestCase(object):
-
-    def __init__(self, name, project, constraints,
-                 criteria=-1, isRunnable=True, tier=-1):
-        self.name = name
-        self.project = project
-        self.constraints = constraints
-        self.criteria = criteria
-        self.isRunnable = isRunnable
-        self.tier = tier
-        display_name_matrix = {'healthcheck': 'healthcheck',
-                               'vping_ssh': 'vPing (ssh)',
-                               'vping_userdata': 'vPing (userdata)',
-                               'odl': 'ODL',
-                               'onos': 'ONOS',
-                               'ocl': 'OCL',
-                               'tempest_smoke_serial': 'Tempest (smoke)',
-                               'tempest_full_parallel': 'Tempest (full)',
-                               'tempest_defcore': 'Tempest (Defcore)',
-                               'refstack_defcore': 'Refstack',
-                               'rally_sanity': 'Rally (smoke)',
-                               'bgpvpn': 'bgpvpn',
-                               'rally_full': 'Rally (full)',
-                               'vims': 'vIMS',
-                               'doctor-notification': 'Doctor',
-                               'promise': 'Promise',
-                               'moon': 'Moon',
-                               'copper': 'Copper',
-                               'security_scan': 'Security',
-                               'multisite': 'Multisite',
-                               'domino-multinode': 'Domino',
-                               'functest-odl-sfc': 'SFC',
-                               'onos_sfc': 'SFC',
-                               'parser-basics': 'Parser',
-                               'connection_check': 'Health (connection)',
-                               'api_check': 'Health (api)',
-                               'snaps_smoke': 'SNAPS',
-                               'snaps_health_check': 'Health (dhcp)',
-                               'gluon_vping': 'Netready',
-                               'fds': 'FDS',
-                               'cloudify_ims': 'vIMS (Cloudify)',
-                               'orchestra_ims': 'OpenIMS (OpenBaton)',
-                               'opera_ims': 'vIMS (Open-O)',
-                               'vyos_vrouter': 'vyos',
-                               'barometercollectd': 'Barometer',
-                               'odl_netvirt': 'Netvirt',
-                               'security_scan': 'Security'}
-        try:
-            self.displayName = display_name_matrix[self.name]
-        except:
-            self.displayName = "unknown"
-
-    def getName(self):
-        return self.name
-
-    def getProject(self):
-        return self.project
-
-    def getConstraints(self):
-        return self.constraints
-
-    def getCriteria(self):
-        return self.criteria
-
-    def getTier(self):
-        return self.tier
-
-    def setCriteria(self, criteria):
-        self.criteria = criteria
-
-    def setIsRunnable(self, isRunnable):
-        self.isRunnable = isRunnable
-
-    def checkRunnable(self, installer, scenario, config):
-        # Re-use Functest declaration
-        # Retrieve Functest configuration file functest_config.yaml
-        is_runnable = True
-        config_test = config
-        # print " *********************** "
-        # print TEST_ENV
-        # print " ---------------------- "
-        # print "case = " + self.name
-        # print "installer = " + installer
-        # print "scenario = " + scenario
-        # print "project = " + self.project
-
-        # Retrieve test constraints
-        # Retrieve test execution param
-        test_execution_context = {"installer": installer,
-                                  "scenario": scenario}
-
-        # By default we assume that all the tests are always runnable...
-        # if test_env not empty => dependencies to be checked
-        if config_test is not None and len(config_test) > 0:
-            # possible criteria = ["installer", "scenario"]
-            # consider test criteria from config file
-            # compare towards CI env through CI en variable
-            for criteria in config_test:
-                if re.search(config_test[criteria],
-                             test_execution_context[criteria]) is None:
-                    # print "Test "+ test + " cannot be run on the environment"
-                    is_runnable = False
-        # print is_runnable
-        self.isRunnable = is_runnable
-
-    def toString(self):
-        testcase = ("Name=" + self.name + ";Criteria=" +
-                    str(self.criteria) + ";Project=" + self.project +
-                    ";Constraints=" + str(self.constraints) +
-                    ";IsRunnable" + str(self.isRunnable))
-        return testcase
-
-    def getDisplayName(self):
-        return self.displayName
diff --git a/utils/test/reporting/qtip/__init__.py b/utils/test/reporting/qtip/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/utils/test/reporting/qtip/index.html b/utils/test/reporting/qtip/index.html
deleted file mode 100644
index 0f9df8564..000000000
--- a/utils/test/reporting/qtip/index.html
+++ /dev/null
@@ -1,51 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-        <h3 class="text-muted">QTIP reporting page</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
-            <li><a href="index-status-apex.html">Apex</a></li>
-            <li><a href="index-status-compass.html">Compass</a></li>
-            <li><a href="index-status-fuel.html">Fuel</a></li>
-            <li><a href="index-status-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-main">
-            <h2>QTIP</h2>
-            QTIP is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
-            <br>The QTIP framework is deployed in several OPNFV community labs.
-            <br>It is installer, infrastructure and application independent.
-
-            <h2>Useful Links</h2>
-            <li><a href="https://wiki.opnfv.org/download/attachments/5734608/qtip%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">QTIP in Depth</a></li>
-            <li><a href="https://git.opnfv.org/cgit/qtip">QTIP Repo</a></li>
-            <li><a href="https://wiki.opnfv.org/display/qtip">QTIP Project</a></li>
-            <li><a href="https://build.opnfv.org/ci/view/qtip/">QTIP Jenkins page</a></li>
-            <li><a href="https://jira.opnfv.org/browse/QTIP-119?jql=project%20%3D%20QTIP">JIRA</a></li>
-
-        </div>
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/qtip/reporting-status.py b/utils/test/reporting/qtip/reporting-status.py
deleted file mode 100644
index 5967cf6b9..000000000
--- a/utils/test/reporting/qtip/reporting-status.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import os
-
-import jinja2
-import utils.reporting_utils as rp_utils
-import utils.scenarioResult as sr
-
-installers = rp_utils.get_config('general.installers')
-versions = rp_utils.get_config('general.versions')
-PERIOD = rp_utils.get_config('general.period')
-
-# Logger
-logger = rp_utils.getLogger("Qtip-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-logger.info("*******************************************")
-logger.info("*   Generating reporting scenario status  *")
-logger.info("*   Data retention = %s days              *" % PERIOD)
-logger.info("*                                         *")
-logger.info("*******************************************")
-
-
-def prepare_profile_file(version):
-    profile_dir = './display/{}/qtip'.format(version)
-    if not os.path.exists(profile_dir):
-        os.makedirs(profile_dir)
-
-    profile_file = '{}/scenario_history.txt'.format(profile_dir, version)
-    if not os.path.exists(profile_file):
-        with open(profile_file, 'w') as f:
-            info = 'date,scenario,installer,details,score\n'
-            f.write(info)
-            f.close()
-    return profile_file
-
-
-def profile_results(results, installer, profile_fd):
-    result_criterias = {}
-    for s_p, s_p_result in results.iteritems():
-        ten_criteria = len(s_p_result)
-        ten_score = sum(s_p_result)
-
-        LASTEST_TESTS = rp_utils.get_config(
-            'general.nb_iteration_tests_success_criteria')
-        four_result = s_p_result[:LASTEST_TESTS]
-        four_criteria = len(four_result)
-        four_score = sum(four_result)
-
-        s_four_score = str(four_score / four_criteria)
-        s_ten_score = str(ten_score / ten_criteria)
-
-        info = '{},{},{},{},{}\n'.format(reportingDate,
-                                         s_p,
-                                         installer,
-                                         s_ten_score,
-                                         s_four_score)
-        profile_fd.write(info)
-        result_criterias[s_p] = sr.ScenarioResult('OK',
-                                                  s_four_score,
-                                                  s_ten_score,
-                                                  '100')
-
-        logger.info("--------------------------")
-    return result_criterias
-
-
-def render_html(prof_results, installer, version):
-    template_loader = jinja2.FileSystemLoader(".")
-    template_env = jinja2.Environment(loader=template_loader,
-                                      autoescape=True)
-
-    template_file = "./qtip/template/index-status-tmpl.html"
-    template = template_env.get_template(template_file)
-
-    render_outcome = template.render(prof_results=prof_results,
-                                     installer=installer,
-                                     period=PERIOD,
-                                     version=version,
-                                     date=reportingDate)
-
-    with open('./display/{}/qtip/status-{}.html'.format(version, installer),
-              'wb') as fh:
-        fh.write(render_outcome)
-
-
-def render_reporter():
-    for version in versions:
-        profile_file = prepare_profile_file(version)
-        profile_fd = open(profile_file, 'a')
-        for installer in installers:
-            results = rp_utils.getQtipResults(version, installer)
-            prof_results = profile_results(results, installer, profile_fd)
-            render_html(prof_results=prof_results,
-                        installer=installer,
-                        version=version)
-        profile_fd.close()
-        logger.info("Manage export CSV")
-        rp_utils.generate_csv(profile_file)
-        logger.info("CSV generated...")
-
-if __name__ == '__main__':
-    render_reporter()
diff --git a/utils/test/reporting/qtip/template/index-status-tmpl.html b/utils/test/reporting/qtip/template/index-status-tmpl.html
deleted file mode 100644
index 26da36ceb..000000000
--- a/utils/test/reporting/qtip/template/index-status-tmpl.html
+++ /dev/null
@@ -1,86 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="../../css/default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
-    <script type="text/javascript" src="../../js/trend-qtip.js"></script>
-    <script>
-        // trend line management
-        d3.csv("./scenario_history.csv", function(data) {
-            // ***************************************
-            // Create the trend line
-            {% for scenario in prof_results.keys() -%}
-            // for scenario {{scenario}}
-            // Filter results
-                var trend{{loop.index}} = data.filter(function(row) {
-                    return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
-                })
-            // Parse the date
-            trend{{loop.index}}.forEach(function(d) {
-                d.date = parseDate(d.date);
-                d.score = +d.score
-            });
-            // Draw the trend line
-            var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
-            // ****************************************
-            {%- endfor %}
-        });
-    </script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-          <h3 class="text-muted">QTIP status page ({{version}}, {{date}})</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
-            <li><a href="index-status-apex.html">Apex</a></li>
-            <li><a href="index-status-compass.html">Compass</a></li>
-            <li><a href="index-status-fuel.html">Fuel</a></li>
-            <li><a href="index-status-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-header">
-            <h2>{{installer}}</h2>
-        </div>
-
-        <div class="scenario-overview">
-            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
-                <table class="table">
-                    <tr>
-                        <th width="25%">Pod/Scenario</th>
-                        <th width="25%">Trend</th>
-                        <th width="25%">Last 4 Iterations</th>
-                        <th width="25%">Last 10 Days</th>
-                    </tr>
-                        {% for scenario,result in prof_results.iteritems() -%}
-                            <tr class="tr-ok">
-                                <td>{{scenario}}</td>
-                                <td><div id="trend_svg{{loop.index}}"></div></td>
-                                <td>{{prof_results[scenario].getFourDaysScore()}}</td>
-                                <td>{{prof_results[scenario].getTenDaysScore()}}</td>
-                            </tr>
-                        {%- endfor %}
-                </table>
-        </div>
-
-
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml
deleted file mode 100644
index 1692f481d..000000000
--- a/utils/test/reporting/reporting.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-general:
-    installers:
-        - apex
-        - compass
-        - fuel
-        - joid
-
-    versions:
-        - master
-        - danube
-
-    log:
-        log_file: reporting.log
-        log_level: ERROR
-
-    period: 10
-
-    nb_iteration_tests_success_criteria: 4
-
-    directories:
-        # Relative to the path where the repo is cloned:
-        dir_reporting: utils/tests/reporting/
-        dir_log: utils/tests/reporting/log/
-        dir_conf: utils/tests/reporting/conf/
-        dir_utils: utils/tests/reporting/utils/
-        dir_templates: utils/tests/reporting/templates/
-        dir_display: utils/tests/reporting/display/
-
-    url: testresults.opnfv.org/reporting/
-
-testapi:
-    url: testresults.opnfv.org/test/api/v1/results
-
-functest:
-    blacklist:
-        - ovno
-        - security_scan
-        - healthcheck
-        - odl_netvirt
-        - aaa
-        - cloudify_ims
-        - orchestra_ims
-        - juju_epc
-        - orchestra
-    max_scenario_criteria: 50
-    test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
-    log_level: ERROR
-    jenkins_url: https://build.opnfv.org/ci/view/functest/job/
-    exclude_noha: False
-    exclude_virtual: False
-
-yardstick:
-    test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
-    log_level: ERROR
-
-storperf:
-    test_list:
-        - snia_steady_state
-    log_level: ERROR
-
-qtip:
-    log_level: ERROR
-    period: 1
-
-bottleneck:
-
-vsperf:
diff --git a/utils/test/reporting/reporting/__init__.py b/utils/test/reporting/reporting/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/functest/__init__.py b/utils/test/reporting/reporting/functest/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/functest/img/gauge_0.png b/utils/test/reporting/reporting/functest/img/gauge_0.png
new file mode 100644
index 000000000..ecefc0e66
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_0.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_100.png b/utils/test/reporting/reporting/functest/img/gauge_100.png
new file mode 100644
index 000000000..e199e1561
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_100.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_16.7.png b/utils/test/reporting/reporting/functest/img/gauge_16.7.png
new file mode 100644
index 000000000..3e3993c3b
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_16.7.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_25.png b/utils/test/reporting/reporting/functest/img/gauge_25.png
new file mode 100644
index 000000000..4923659b9
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_25.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_33.3.png b/utils/test/reporting/reporting/functest/img/gauge_33.3.png
new file mode 100644
index 000000000..364574b4a
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_33.3.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_41.7.png b/utils/test/reporting/reporting/functest/img/gauge_41.7.png
new file mode 100644
index 000000000..8c3e910fa
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_41.7.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_50.png b/utils/test/reporting/reporting/functest/img/gauge_50.png
new file mode 100644
index 000000000..2874b9fcf
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_50.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_58.3.png b/utils/test/reporting/reporting/functest/img/gauge_58.3.png
new file mode 100644
index 000000000..beedc8aa9
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_58.3.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_66.7.png b/utils/test/reporting/reporting/functest/img/gauge_66.7.png
new file mode 100644
index 000000000..93f44d133
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_66.7.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_75.png b/utils/test/reporting/reporting/functest/img/gauge_75.png
new file mode 100644
index 000000000..9fc261ff8
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_75.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_8.3.png b/utils/test/reporting/reporting/functest/img/gauge_8.3.png
new file mode 100644
index 000000000..59f86571e
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_8.3.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_83.3.png b/utils/test/reporting/reporting/functest/img/gauge_83.3.png
new file mode 100644
index 000000000..27ae4ec54
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_83.3.png differ
diff --git a/utils/test/reporting/reporting/functest/img/gauge_91.7.png b/utils/test/reporting/reporting/functest/img/gauge_91.7.png
new file mode 100644
index 000000000..280865714
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/gauge_91.7.png differ
diff --git a/utils/test/reporting/reporting/functest/img/icon-nok.png b/utils/test/reporting/reporting/functest/img/icon-nok.png
new file mode 100644
index 000000000..526b5294b
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/icon-nok.png differ
diff --git a/utils/test/reporting/reporting/functest/img/icon-ok.png b/utils/test/reporting/reporting/functest/img/icon-ok.png
new file mode 100644
index 000000000..3a9de2e89
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/icon-ok.png differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-clear.png b/utils/test/reporting/reporting/functest/img/weather-clear.png
new file mode 100644
index 000000000..a0d967750
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/weather-clear.png differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-few-clouds.png b/utils/test/reporting/reporting/functest/img/weather-few-clouds.png
new file mode 100644
index 000000000..acfa78398
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/weather-few-clouds.png differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-overcast.png b/utils/test/reporting/reporting/functest/img/weather-overcast.png
new file mode 100644
index 000000000..4296246d0
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/weather-overcast.png differ
diff --git a/utils/test/reporting/reporting/functest/img/weather-storm.png b/utils/test/reporting/reporting/functest/img/weather-storm.png
new file mode 100644
index 000000000..956f0e20f
Binary files /dev/null and b/utils/test/reporting/reporting/functest/img/weather-storm.png differ
diff --git a/utils/test/reporting/reporting/functest/index.html b/utils/test/reporting/reporting/functest/index.html
new file mode 100644
index 000000000..bb1bce209
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/index.html
@@ -0,0 +1,53 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+        <h3 class="text-muted">Functest reporting page</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="index-status-apex.html">Apex</a></li>
+            <li><a href="index-status-compass.html">Compass</a></li>
+            <li><a href="index-status-fuel.html">Fuel</a></li>
+            <li><a href="index-status-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-main">
+            <h2>Functest</h2>
+             This project develops test suites that cover functionaling test cases in OPNFV.
+             <br>The test suites are integrated in the continuation integration (CI) framework and used to evaluate/validate scenario.
+             <br> Weekly meeting: every Tuesday 8 AM UTC
+             <br> IRC chan #opnfv-testperf
+
+            <br>
+            <h2>Useful Links</h2>
+            <li><a href="http://events.linuxfoundation.org/sites/events/files/slides/Functest%20in%20Depth_0.pdf">Functest in Depth</a></li>
+            <li><a href="https://git.opnfv.org/cgit/functest">Functest Repo</a></li>
+            <li><a href="https://wiki.opnfv.org/opnfv_functional_testing">Functest Project</a></li>
+            <li><a href="https://build.opnfv.org/ci/view/functest/">Functest Jenkins page</a></li>
+            <li><a href="https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=59&projectKey=FUNCTEST">JIRA</a></li>
+
+        </div>
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/functest/reporting-status.py b/utils/test/reporting/reporting/functest/reporting-status.py
new file mode 100755
index 000000000..48c4bb1c7
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/reporting-status.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import jinja2
+import os
+import sys
+import time
+
+import testCase as tc
+import scenarioResult as sr
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+"""Functest reporting status"""
+
+# Logger
+logger = rp_utils.getLogger("Functest-Status")
+
+# Initialization
+testValid = []
+otherTestCases = []
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+# init just connection_check to get the list of scenarios
+# as all the scenarios run connection_check
+healthcheck = tc.TestCase("connection_check", "functest", -1)
+
+# Retrieve the Functest configuration to detect which tests are relevant
+# according to the installer, scenario
+cf = rp_utils.get_config('functest.test_conf')
+period = rp_utils.get_config('general.period')
+versions = rp_utils.get_config('general.versions')
+installers = rp_utils.get_config('general.installers')
+blacklist = rp_utils.get_config('functest.blacklist')
+log_level = rp_utils.get_config('general.log.log_level')
+exclude_noha = rp_utils.get_config('functest.exclude_noha')
+exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
+
+functest_yaml_config = rp_utils.getFunctestConfig()
+
+logger.info("*******************************************")
+logger.info("*                                         *")
+logger.info("*   Generating reporting scenario status  *")
+logger.info("*   Data retention: %s days               *" % period)
+logger.info("*   Log level: %s                         *" % log_level)
+logger.info("*                                         *")
+logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
+logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
+logger.info("*                                         *")
+logger.info("*******************************************")
+
+# Retrieve test cases of Tier 1 (smoke)
+config_tiers = functest_yaml_config.get("tiers")
+
+# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
+# to validate scenarios
+# Tier > 2 are not used to validate scenarios but we display the results anyway
+# tricky thing for the API as some tests are Functest tests
+# other tests are declared directly in the feature projects
+for tier in config_tiers:
+    if tier['order'] >= 0 and tier['order'] < 2:
+        for case in tier['testcases']:
+            if case['case_name'] not in blacklist:
+                testValid.append(tc.TestCase(case['case_name'],
+                                             "functest",
+                                             case['dependencies']))
+    elif tier['order'] == 2:
+        for case in tier['testcases']:
+            if case['case_name'] not in blacklist:
+                testValid.append(tc.TestCase(case['case_name'],
+                                             case['case_name'],
+                                             case['dependencies']))
+    elif tier['order'] > 2:
+        for case in tier['testcases']:
+            if case['case_name'] not in blacklist:
+                otherTestCases.append(tc.TestCase(case['case_name'],
+                                                  "functest",
+                                                  case['dependencies']))
+
+logger.debug("Functest reporting start")
+
+# For all the versions
+for version in versions:
+    # For all the installers
+    scenario_directory = "./display/" + version + "/functest/"
+    scenario_file_name = scenario_directory + "scenario_history.txt"
+
+    # check that the directory exists, if not create it
+    # (first run on new version)
+    if not os.path.exists(scenario_directory):
+        os.makedirs(scenario_directory)
+
+    # initiate scenario file if it does not exist
+    if not os.path.isfile(scenario_file_name):
+        with open(scenario_file_name, "a") as my_file:
+            logger.debug("Create scenario file: %s" % scenario_file_name)
+            my_file.write("date,scenario,installer,detail,score\n")
+
+    for installer in installers:
+
+        # get scenarios
+        scenario_results = rp_utils.getScenarios(healthcheck,
+                                                 installer,
+                                                 version)
+        # get nb of supported architecture (x86, aarch64)
+        architectures = rp_utils.getArchitectures(scenario_results)
+        logger.info("Supported architectures: {}".format(architectures))
+
+        for architecture in architectures:
+            logger.info("architecture: {}".format(architecture))
+            # Consider only the results for the selected architecture
+            # i.e drop x86 for aarch64 and vice versa
+            filter_results = rp_utils.filterArchitecture(scenario_results,
+                                                         architecture)
+            scenario_stats = rp_utils.getScenarioStats(filter_results)
+            items = {}
+            scenario_result_criteria = {}
+
+            # in case of more than 1 architecture supported
+            # precise the architecture
+            installer_display = installer
+            if (len(architectures) > 1):
+                installer_display = installer + "@" + architecture
+
+            # For all the scenarios get results
+            for s, s_result in filter_results.items():
+                logger.info("---------------------------------")
+                logger.info("installer %s, version %s, scenario %s:" %
+                            (installer, version, s))
+                logger.debug("Scenario results: %s" % s_result)
+
+                # Green or Red light for a given scenario
+                nb_test_runnable_for_this_scenario = 0
+                scenario_score = 0
+                # url of the last jenkins log corresponding to a given
+                # scenario
+                s_url = ""
+                if len(s_result) > 0:
+                    build_tag = s_result[len(s_result)-1]['build_tag']
+                    logger.debug("Build tag: %s" % build_tag)
+                    s_url = rp_utils.getJenkinsUrl(build_tag)
+                    if s_url is None:
+                        s_url = "http://testresultS.opnfv.org/reporting"
+                    logger.info("last jenkins url: %s" % s_url)
+                testCases2BeDisplayed = []
+                # Check if test case is runnable / installer, scenario
+                # for the test case used for Scenario validation
+                try:
+                    # 1) Manage the test cases for the scenario validation
+                    # concretely Tiers 0-3
+                    for test_case in testValid:
+                        test_case.checkRunnable(installer, s,
+                                                test_case.getConstraints())
+                        logger.debug("testcase %s (%s) is %s" %
+                                     (test_case.getDisplayName(),
+                                      test_case.getName(),
+                                      test_case.isRunnable))
+                        time.sleep(1)
+                        if test_case.isRunnable:
+                            name = test_case.getName()
+                            displayName = test_case.getDisplayName()
+                            project = test_case.getProject()
+                            nb_test_runnable_for_this_scenario += 1
+                            logger.info(" Searching results for case %s " %
+                                        (displayName))
+                            result = rp_utils.getResult(name, installer,
+                                                        s, version)
+                            # if no result set the value to 0
+                            if result < 0:
+                                result = 0
+                            logger.info(" >>>> Test score = " + str(result))
+                            test_case.setCriteria(result)
+                            test_case.setIsRunnable(True)
+                            testCases2BeDisplayed.append(tc.TestCase(name,
+                                                                     project,
+                                                                     "",
+                                                                     result,
+                                                                     True,
+                                                                     1))
+                            scenario_score = scenario_score + result
+
+                    # 2) Manage the test cases for the scenario qualification
+                    # concretely Tiers > 3
+                    for test_case in otherTestCases:
+                        test_case.checkRunnable(installer, s,
+                                                test_case.getConstraints())
+                        logger.debug("testcase %s (%s) is %s" %
+                                     (test_case.getDisplayName(),
+                                      test_case.getName(),
+                                      test_case.isRunnable))
+                        time.sleep(1)
+                        if test_case.isRunnable:
+                            name = test_case.getName()
+                            displayName = test_case.getDisplayName()
+                            project = test_case.getProject()
+                            logger.info(" Searching results for case %s " %
+                                        (displayName))
+                            result = rp_utils.getResult(name, installer,
+                                                        s, version)
+                            # at least 1 result for the test
+                            if result > -1:
+                                test_case.setCriteria(result)
+                                test_case.setIsRunnable(True)
+                                testCases2BeDisplayed.append(tc.TestCase(
+                                    name,
+                                    project,
+                                    "",
+                                    result,
+                                    True,
+                                    4))
+                            else:
+                                logger.debug("No results found")
+
+                        items[s] = testCases2BeDisplayed
+                except:
+                    logger.error("Error: installer %s, version %s, scenario %s"
+                                 % (installer, version, s))
+                    logger.error("No data available: %s" % (sys.exc_info()[0]))
+
+                # **********************************************
+                # Evaluate the results for scenario validation
+                # **********************************************
+                # the validation criteria = nb runnable tests x 3
+                # because each test case = 0,1,2 or 3
+                scenario_criteria = nb_test_runnable_for_this_scenario * 3
+                # if 0 runnable tests set criteria at a high value
+                if scenario_criteria < 1:
+                    scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
+
+                s_score = str(scenario_score) + "/" + str(scenario_criteria)
+                s_score_percent = rp_utils.getScenarioPercent(
+                    scenario_score,
+                    scenario_criteria)
+
+                s_status = "KO"
+                if scenario_score < scenario_criteria:
+                    logger.info(">>>> scenario not OK, score = %s/%s" %
+                                (scenario_score, scenario_criteria))
+                    s_status = "KO"
+                else:
+                    logger.info(">>>>> scenario OK, save the information")
+                    s_status = "OK"
+                    path_validation_file = ("./display/" + version +
+                                            "/functest/" +
+                                            "validated_scenario_history.txt")
+                    with open(path_validation_file, "a") as f:
+                        time_format = "%Y-%m-%d %H:%M"
+                        info = (datetime.datetime.now().strftime(time_format) +
+                                ";" + installer_display + ";" + s + "\n")
+                        f.write(info)
+
+                # Save daily results in a file
+                with open(scenario_file_name, "a") as f:
+                    info = (reportingDate + "," + s + "," + installer_display +
+                            "," + s_score + "," +
+                            str(round(s_score_percent)) + "\n")
+                    f.write(info)
+
+                scenario_result_criteria[s] = sr.ScenarioResult(
+                    s_status,
+                    s_score,
+                    s_score_percent,
+                    s_url)
+                logger.info("--------------------------")
+
+            templateLoader = jinja2.FileSystemLoader(".")
+            templateEnv = jinja2.Environment(
+                loader=templateLoader, autoescape=True)
+
+            TEMPLATE_FILE = ("./reporting/functest/template"
+                             "/index-status-tmpl.html")
+            template = templateEnv.get_template(TEMPLATE_FILE)
+
+            outputText = template.render(
+                            scenario_stats=scenario_stats,
+                            scenario_results=scenario_result_criteria,
+                            items=items,
+                            installer=installer_display,
+                            period=period,
+                            version=version,
+                            date=reportingDate)
+
+            with open("./display/" + version +
+                      "/functest/status-" +
+                      installer_display + ".html", "wb") as fh:
+                fh.write(outputText)
+
+            logger.info("Manage export CSV & PDF")
+            rp_utils.export_csv(scenario_file_name, installer_display, version)
+            logger.error("CSV generated...")
+
+            # Generate outputs for export
+            # pdf
+            # TODO Change once web site updated...use the current one
+            # to test pdf production
+            url_pdf = rp_utils.get_config('general.url')
+            pdf_path = ("./display/" + version +
+                        "/functest/status-" + installer_display + ".html")
+            pdf_doc_name = ("./display/" + version +
+                            "/functest/status-" + installer_display + ".pdf")
+            rp_utils.export_pdf(pdf_path, pdf_doc_name)
+            logger.info("PDF generated...")
diff --git a/utils/test/reporting/reporting/functest/reporting-tempest.py b/utils/test/reporting/reporting/functest/reporting-tempest.py
new file mode 100755
index 000000000..bc2885639
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/reporting-tempest.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# SPDX-license-identifier: Apache-2.0
+
+from urllib2 import Request, urlopen, URLError
+from datetime import datetime
+import json
+import jinja2
+import os
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+installers = rp_utils.get_config('general.installers')
+items = ["tests", "Success rate", "duration"]
+
+CURRENT_DIR = os.getcwd()
+
+PERIOD = rp_utils.get_config('general.period')
+criteria_nb_test = 165
+criteria_duration = 1800
+criteria_success_rate = 90
+
+logger = rp_utils.getLogger("Tempest")
+logger.info("************************************************")
+logger.info("*   Generating reporting Tempest_smoke_serial  *")
+logger.info("*   Data retention = %s days                   *" % PERIOD)
+logger.info("*                                              *")
+logger.info("************************************************")
+
+logger.info("Success criteria:")
+logger.info("nb tests executed > %s s " % criteria_nb_test)
+logger.info("test duration < %s s " % criteria_duration)
+logger.info("success rate > %s " % criteria_success_rate)
+
+# For all the versions
+for version in rp_utils.get_config('general.versions'):
+    for installer in installers:
+        # we consider the Tempest results of the last PERIOD days
+        url = ("http://" + rp_utils.get_config('testapi.url') +
+               "?case=tempest_smoke_serial")
+        request = Request(url + '&period=' + str(PERIOD) +
+                          '&installer=' + installer +
+                          '&version=' + version)
+        logger.info("Search tempest_smoke_serial results for installer %s"
+                    " for version %s"
+                    % (installer, version))
+        try:
+            response = urlopen(request)
+            k = response.read()
+            results = json.loads(k)
+        except URLError as e:
+            logger.error("Error code: %s" % e)
+
+        test_results = results['results']
+
+        scenario_results = {}
+        criteria = {}
+        errors = {}
+
+        for r in test_results:
+            # Retrieve all the scenarios per installer
+            # In Brahmaputra use version
+            # Since Colorado use scenario
+            if not r['scenario'] in scenario_results.keys():
+                scenario_results[r['scenario']] = []
+            scenario_results[r['scenario']].append(r)
+
+        for s, s_result in scenario_results.items():
+            scenario_results[s] = s_result[0:5]
+            # For each scenario, we build a result object to deal with
+            # results, criteria and error handling
+            for result in scenario_results[s]:
+                result["start_date"] = result["start_date"].split(".")[0]
+
+                # retrieve results
+                # ****************
+                nb_tests_run = result['details']['tests']
+                nb_tests_failed = result['details']['failures']
+                if nb_tests_run != 0:
+                    success_rate = 100 * ((int(nb_tests_run) -
+                                           int(nb_tests_failed)) /
+                                          int(nb_tests_run))
+                else:
+                    success_rate = 0
+
+                result['details']["tests"] = nb_tests_run
+                result['details']["Success rate"] = str(success_rate) + "%"
+
+                # Criteria management
+                # *******************
+                crit_tests = False
+                crit_rate = False
+                crit_time = False
+
+                # Expect that at least 165 tests are run
+                if nb_tests_run >= criteria_nb_test:
+                    crit_tests = True
+
+                # Expect that at least 90% of success
+                if success_rate >= criteria_success_rate:
+                    crit_rate = True
+
+                # Expect that the suite duration is inferior to 30m
+                stop_date = datetime.strptime(result['stop_date'],
+                                              '%Y-%m-%d %H:%M:%S')
+                start_date = datetime.strptime(result['start_date'],
+                                               '%Y-%m-%d %H:%M:%S')
+
+                delta = stop_date - start_date
+                if (delta.total_seconds() < criteria_duration):
+                    crit_time = True
+
+                result['criteria'] = {'tests': crit_tests,
+                                      'Success rate': crit_rate,
+                                      'duration': crit_time}
+                try:
+                    logger.debug("Scenario %s, Installer %s"
+                                 % (s_result[1]['scenario'], installer))
+                    logger.debug("Nb Test run: %s" % nb_tests_run)
+                    logger.debug("Test duration: %s"
+                                 % result['details']['duration'])
+                    logger.debug("Success rate: %s" % success_rate)
+                except:
+                    logger.error("Data format error")
+
+                # Error management
+                # ****************
+                try:
+                    errors = result['details']['errors']
+                    result['errors'] = errors.replace('{0}', '')
+                except:
+                    logger.error("Error field not present (Brahamputra runs?)")
+
+        templateLoader = jinja2.FileSystemLoader(".")
+        templateEnv = jinja2.Environment(loader=templateLoader,
+                                         autoescape=True)
+
+        TEMPLATE_FILE = "./reporting/functest/template/index-tempest-tmpl.html"
+        template = templateEnv.get_template(TEMPLATE_FILE)
+
+        outputText = template.render(scenario_results=scenario_results,
+                                     items=items,
+                                     installer=installer)
+
+        with open("./display/" + version +
+                  "/functest/tempest-" + installer + ".html", "wb") as fh:
+            fh.write(outputText)
+logger.info("Tempest automatic reporting succesfully generated.")
diff --git a/utils/test/reporting/reporting/functest/reporting-vims.py b/utils/test/reporting/reporting/functest/reporting-vims.py
new file mode 100755
index 000000000..14fddbe25
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/reporting-vims.py
@@ -0,0 +1,126 @@
+from urllib2 import Request, urlopen, URLError
+import json
+import jinja2
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+logger = rp_utils.getLogger("vIMS")
+
+
+def sig_test_format(sig_test):
+    nbPassed = 0
+    nbFailures = 0
+    nbSkipped = 0
+    for data_test in sig_test:
+        if data_test['result'] == "Passed":
+            nbPassed += 1
+        elif data_test['result'] == "Failed":
+            nbFailures += 1
+        elif data_test['result'] == "Skipped":
+            nbSkipped += 1
+    total_sig_test_result = {}
+    total_sig_test_result['passed'] = nbPassed
+    total_sig_test_result['failures'] = nbFailures
+    total_sig_test_result['skipped'] = nbSkipped
+    return total_sig_test_result
+
+period = rp_utils.get_config('general.period')
+versions = rp_utils.get_config('general.versions')
+url_base = rp_utils.get_config('testapi.url')
+
+logger.info("****************************************")
+logger.info("*   Generating reporting vIMS          *")
+logger.info("*   Data retention = %s days           *" % period)
+logger.info("*                                      *")
+logger.info("****************************************")
+
+installers = rp_utils.get_config('general.installers')
+step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("Start processing....")
+
+# For all the versions
+for version in versions:
+    for installer in installers:
+        logger.info("Search vIMS results for installer: %s, version: %s"
+                    % (installer, version))
+        request = Request("http://" + url_base + '?case=vims&installer=' +
+                          installer + '&version=' + version)
+
+        try:
+            response = urlopen(request)
+            k = response.read()
+            results = json.loads(k)
+        except URLError as e:
+            logger.error("Error code: %s" % e)
+
+        test_results = results['results']
+
+        logger.debug("Results found: %s" % test_results)
+
+        scenario_results = {}
+        for r in test_results:
+            if not r['scenario'] in scenario_results.keys():
+                scenario_results[r['scenario']] = []
+            scenario_results[r['scenario']].append(r)
+
+        for s, s_result in scenario_results.items():
+            scenario_results[s] = s_result[0:5]
+            logger.debug("Search for success criteria")
+            for result in scenario_results[s]:
+                result["start_date"] = result["start_date"].split(".")[0]
+                sig_test = result['details']['sig_test']['result']
+                if not sig_test == "" and isinstance(sig_test, list):
+                    format_result = sig_test_format(sig_test)
+                    if format_result['failures'] > format_result['passed']:
+                        result['details']['sig_test']['duration'] = 0
+                    result['details']['sig_test']['result'] = format_result
+                nb_step_ok = 0
+                nb_step = len(result['details'])
+
+                for step_name, step_result in result['details'].items():
+                    if step_result['duration'] != 0:
+                        nb_step_ok += 1
+                    m, s = divmod(step_result['duration'], 60)
+                    m_display = ""
+                    if int(m) != 0:
+                        m_display += str(int(m)) + "m "
+
+                    step_result['duration_display'] = (m_display +
+                                                       str(int(s)) + "s")
+
+                result['pr_step_ok'] = 0
+                if nb_step != 0:
+                    result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100
+                try:
+                    logger.debug("Scenario %s, Installer %s"
+                                 % (s_result[1]['scenario'], installer))
+                    res = result['details']['orchestrator']['duration']
+                    logger.debug("Orchestrator deployment: %s s"
+                                 % res)
+                    logger.debug("vIMS deployment: %s s"
+                                 % result['details']['vIMS']['duration'])
+                    logger.debug("Signaling testing: %s s"
+                                 % result['details']['sig_test']['duration'])
+                    logger.debug("Signaling testing results: %s"
+                                 % format_result)
+                except Exception:
+                    logger.error("Data badly formatted")
+                logger.debug("----------------------------------------")
+
+        templateLoader = jinja2.FileSystemLoader(".")
+        templateEnv = jinja2.Environment(loader=templateLoader,
+                                         autoescape=True)
+
+        TEMPLATE_FILE = "./reporting/functest/template/index-vims-tmpl.html"
+        template = templateEnv.get_template(TEMPLATE_FILE)
+
+        outputText = template.render(scenario_results=scenario_results,
+                                     step_order=step_order,
+                                     installer=installer)
+
+        with open("./display/" + version + "/functest/vims-" +
+                  installer + ".html", "wb") as fh:
+            fh.write(outputText)
+
+logger.info("vIMS report succesfully generated")
diff --git a/utils/test/reporting/reporting/functest/scenarioResult.py b/utils/test/reporting/reporting/functest/scenarioResult.py
new file mode 100644
index 000000000..5a54eed96
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/scenarioResult.py
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+
+class ScenarioResult(object):
+
+    def __init__(self, status, score=0, score_percent=0, url_lastrun=''):
+        self.status = status
+        self.score = score
+        self.score_percent = score_percent
+        self.url_lastrun = url_lastrun
+
+    def getStatus(self):
+        return self.status
+
+    def getScore(self):
+        return self.score
+
+    def getScorePercent(self):
+        return self.score_percent
+
+    def getUrlLastRun(self):
+        return self.url_lastrun
diff --git a/utils/test/reporting/reporting/functest/template/index-status-tmpl.html b/utils/test/reporting/reporting/functest/template/index-status-tmpl.html
new file mode 100644
index 000000000..cc4edaac5
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/template/index-status-tmpl.html
@@ -0,0 +1,157 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="../../css/default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+    <script type="text/javascript" src="../../js/gauge.js"></script>
+    <script type="text/javascript" src="../../js/trend.js"></script>
+    <script>
+    function onDocumentReady() {
+    	// Gauge management
+        {% for scenario in scenario_stats.iteritems() -%}
+    	    var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+        {%- endfor %}
+
+    	// assign success rate to the gauge
+    	function updateReadings() {
+    	    {% for scenario,iteration in scenario_stats.iteritems() -%}
+    	     	gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+    	     {%- endfor %}
+    	}
+    	updateReadings();
+        }
+
+        // trend line management
+        d3.csv("./scenario_history.txt", function(data) {
+       // ***************************************
+       // Create the trend line
+      {% for scenario,iteration in scenario_stats.iteritems() -%}
+       // for scenario {{scenario}}
+       // Filter results
+        var trend{{loop.index}} = data.filter(function(row) {
+    	     return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+    	})
+       // Parse the date
+        trend{{loop.index}}.forEach(function(d) {
+    	    d.date = parseDate(d.date);
+    	    d.score = +d.score
+        });
+        // Draw the trend line
+        var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+        // ****************************************
+        {%- endfor %}
+    });
+    if ( !window.isLoaded ) {
+        window.addEventListener("load", function() {
+    			onDocumentReady();
+        }, false);
+    } else {
+    	onDocumentReady();
+    }
+</script>
+<script type="text/javascript">
+$(document).ready(function (){
+    $(".btn-more").click(function() {
+    	$(this).hide();
+    	$(this).parent().find(".panel-default").show();
+    });
+})
+</script>
+
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+        <h3 class="text-muted">Functest status page ({{version}}, {{date}})</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="../../index.html">Home</a></li>
+            <li><a href="status-apex.html">Apex</a></li>
+            <li><a href="status-compass.html">Compass</a></li>
+            <li><a href="status-fuel@x86.html">fuel@x86</a></li>
+            <li><a href="status-fuel@aarch64.html">fuel@aarch64</a></li>
+            <li><a href="status-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-header">
+            <h2>{{installer}}</h2>
+        </div>
+
+        <div class="scenario-overview">
+            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+                <table class="table">
+                    <tr>
+                        <th width="40%">Scenario</th>
+                        <th width="20%">Status</th>
+                        <th width="20%">Trend</th>
+                        <th width="10%">Score</th>
+                        <th width="10%">Iteration</th>
+                    </tr>
+                        {% for scenario,iteration in scenario_stats.iteritems() -%}
+                            <tr class="tr-ok">
+                                <td><a href={{scenario_results[scenario].getUrlLastRun()}}>{{scenario}}</a></td>
+                                <td><div id="gaugeScenario{{loop.index}}"></div></td>
+                                <td><div id="trend_svg{{loop.index}}"></div></td>
+                                <td>{{scenario_results[scenario].getScore()}}</td>
+                                <td>{{iteration}}</td>
+                            </tr>
+                            {%- endfor %}
+                        </table>
+        </div>
+
+
+        {% for scenario, iteration in scenario_stats.iteritems() -%}
+        <div class="scenario-part">
+            <div class="page-header">
+                <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario}}</b></h3>
+            </div>
+                    <div class="panel panel-default">
+                    <div class="panel-heading">
+                        <span class="panel-header-item">
+                        </span>
+                    </div>
+                    <table class="table">
+                        <tr>
+                            {% for test in items[scenario] -%}
+                            <th>
+                            {% if test.getCriteria() > -1 -%}
+                            {{test.getDisplayName() }}
+                            {%- endif %}
+                            {% if test.getTier() > 3 -%}
+                            *
+                            {%- endif %}
+                             </th>
+                            {%- endfor %}
+                        </tr>
+                        <tr class="tr-weather-weather">
+                            {% for test in items[scenario] -%}
+                            {% if test.getCriteria() > 2 -%}
+                                <td><img src="../../img/weather-clear.png"></td>
+                            {%- elif test.getCriteria() > 1 -%}
+                                <td><img src="../../img/weather-few-clouds.png"></td>
+                            {%- elif test.getCriteria() > 0 -%}
+                                <td><img src="../../img/weather-overcast.png"></td>
+                            {%- elif test.getCriteria() > -1 -%}
+                                <td><img src="../../img/weather-storm.png"></td>
+                            {%- endif %}
+                            {%- endfor %}
+                        </tr>
+                    </table>
+                </div>
+        </div>
+        {%- endfor %}
+    see <a href="https://wiki.opnfv.org/pages/viewpage.action?pageId=6828617">Functest scoring wiki page</a> for details on scenario scoring
+     <div> <br>
+    <a href="./status-{{installer}}.pdf" class="myButtonPdf">Export to PDF</a>   <a href="./scenario_history_{{installer}}.txt" class="myButtonCSV">Export to CSV</a>
+    </div>
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html b/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html
new file mode 100644
index 000000000..3a222276e
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/template/index-tempest-tmpl.html
@@ -0,0 +1,95 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="../../css/default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+        <h3 class="text-muted">Tempest status page</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="../../index.html">Home</a></li>
+            <li><a href="tempest-apex.html">Apex</a></li>
+            <li><a href="tempest-compass.html">Compass</a></li>
+            <li><a href="tempest-daisy.html">Daisy</a></li>
+            <li><a href="tempest-fuel.html">Fuel</a></li>
+            <li><a href="tempest-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-header">
+            <h2>{{installer}}</h2>
+        </div>
+        {% for scenario_name, results in scenario_results.iteritems() -%}
+        <div class="scenario-part">
+            <div class="page-header">
+                <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
+            </div>
+            {% for result in results -%}
+                {% if loop.index > 2 -%}
+                    <div class="panel panel-default" hidden>
+                {%- else -%}
+                    <div class="panel panel-default">
+                {%- endif %}
+                        <div class="panel-heading">
+                            <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
+                            <span class="panel-header-item">
+                                <h4><b>{{result.start_date}}</b></h4>
+                            </span>
+                            <span class="badge panel-pod-name">{{result.pod_name}}</span>
+                        </div>
+                        <table class="table">
+                            <tr>
+                                <th width="20%">Item</th>
+                                <th width="10%">Result</th>
+                                <th width="10%">Status</th>
+                                <th width="60%">Errors</th>
+                            </tr>
+                            {% for item in items -%}
+                                {% if item in result.details.keys() -%}
+                                    {% if result.criteria[item] -%}
+                                        <tr class="tr-ok">
+                                            <td>{{item}}</td>
+                                            <td>{{result.details[item]}}</td>
+                                            <td><span class="glyphicon glyphicon-ok"></td>
+                                            {% if item is equalto "Success rate" %}
+                                            <td>{{result.errors}}</td>
+                                            {% endif %}
+                                        </tr>
+                                    {%- else -%}
+                                        <tr class="tr-danger">
+                                            <td>{{item}}</td>
+                                            <td>{{result.details[item]}}</td>
+                                            <td><span class="glyphicon glyphicon-remove"></td>
+                                            {% if item is equalto "Success rate" %}
+                                            <td>{{result.errors}}</td>
+                                            {% endif %}                                            
+                                        </tr>
+                                    {%- endif %}
+                                {%- endif %}
+                            {%- endfor %}
+                        </table>
+                    </div>
+            {%- endfor %}
+            <button type="button" class="btn btn-more">More than two</button>
+        </div>
+        {%- endfor %}
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
new file mode 100644
index 000000000..cd51607b7
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html
@@ -0,0 +1,92 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="../../css/default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+        <h3 class="text-muted">vIMS status page</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="../../index.html">Home</a></li>
+            <li><a href="vims-fuel.html">Fuel</a></li>
+            <li><a href="vims-compass.html">Compass</a></li>
+            <li><a href="vims-daisy.html">Daisy</a></li>
+            <li><a href="vims-joid.html">JOID</a></li>
+            <li><a href="vims-apex.html">APEX</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-header">
+            <h2>{{installer}}</h2>
+        </div>
+        {% for scenario_name, results in scenario_results.iteritems() -%}
+        <div class="scenario-part">
+            <div class="page-header">
+                <h3><span class="glyphicon glyphicon-chevron-right"> <b>{{scenario_name}}</b></h3>
+            </div>
+            {% for result in results -%}
+                {% if loop.index > 2 -%}
+                    <div class="panel panel-default" hidden>
+                {%- else -%}
+                    <div class="panel panel-default">
+                {%- endif %}
+                        <div class="panel-heading">
+                            <div class="progress-bar" role="progressbar" aria-valuenow="{{result.pr_step_ok}}" aria-valuemin="0" aria-valuemax="100" style="width: {{result.pr_step_ok}}%"></div>
+                            <span class="panel-header-item">
+                                <h4><b>{{result.start_date}}</b></h4>
+                            </span>
+                            <span class="badge panel-pod-name">{{result.pod_name}}</span>
+                        </div>
+                        <table class="table">
+                            <tr>
+                                <th width="20%">Step</th>
+                                <th width="10%">Status</th>
+                                <th width="10%">Duration</th>
+                                <th width="60%">Result</th>
+                            </tr>
+                            {% for step_od_name in step_order -%}
+                                {% if step_od_name in result.details.keys() -%}
+                                    {% set step_result = result.details[step_od_name] -%}
+                                    {% if step_result.duration != 0 -%}
+                                        <tr class="tr-ok">
+                                            <td>{{step_od_name}}</td>
+                                            <td><span class="glyphicon glyphicon-ok"></td>
+                                            <td><b>{{step_result.duration_display}}</b></td>
+                                            <td>{{step_result.result}}</td>
+                                        </tr>
+                                    {%- else -%}
+                                        <tr class="tr-danger">
+                                            <td>{{step_od_name}}</td>
+                                            <td><span class="glyphicon glyphicon-remove"></td>
+                                            <td><b>0s</b></td>
+                                            <td>{{step_result.result}}</td>
+                                        </tr>
+                                    {%- endif %}
+                                {%- endif %}
+                            {%- endfor %}
+                        </table>
+                    </div>
+            {%- endfor %}
+            <button type="button" class="btn btn-more">More than two</button>
+        </div>
+        {%- endfor %}
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/functest/testCase.py b/utils/test/reporting/reporting/functest/testCase.py
new file mode 100644
index 000000000..9834f0753
--- /dev/null
+++ b/utils/test/reporting/reporting/functest/testCase.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import re
+
+
+class TestCase(object):
+
+    def __init__(self, name, project, constraints,
+                 criteria=-1, isRunnable=True, tier=-1):
+        self.name = name
+        self.project = project
+        self.constraints = constraints
+        self.criteria = criteria
+        self.isRunnable = isRunnable
+        self.tier = tier
+        display_name_matrix = {'healthcheck': 'healthcheck',
+                               'vping_ssh': 'vPing (ssh)',
+                               'vping_userdata': 'vPing (userdata)',
+                               'odl': 'ODL',
+                               'onos': 'ONOS',
+                               'ocl': 'OCL',
+                               'tempest_smoke_serial': 'Tempest (smoke)',
+                               'tempest_full_parallel': 'Tempest (full)',
+                               'tempest_defcore': 'Tempest (Defcore)',
+                               'refstack_defcore': 'Refstack',
+                               'rally_sanity': 'Rally (smoke)',
+                               'bgpvpn': 'bgpvpn',
+                               'rally_full': 'Rally (full)',
+                               'vims': 'vIMS',
+                               'doctor-notification': 'Doctor',
+                               'promise': 'Promise',
+                               'moon': 'Moon',
+                               'copper': 'Copper',
+                               'security_scan': 'Security',
+                               'multisite': 'Multisite',
+                               'domino-multinode': 'Domino',
+                               'functest-odl-sfc': 'SFC',
+                               'onos_sfc': 'SFC',
+                               'parser-basics': 'Parser',
+                               'connection_check': 'Health (connection)',
+                               'api_check': 'Health (api)',
+                               'snaps_smoke': 'SNAPS',
+                               'snaps_health_check': 'Health (dhcp)',
+                               'gluon_vping': 'Netready',
+                               'fds': 'FDS',
+                               'cloudify_ims': 'vIMS (Cloudify)',
+                               'orchestra_ims': 'OpenIMS (OpenBaton)',
+                               'opera_ims': 'vIMS (Open-O)',
+                               'vyos_vrouter': 'vyos',
+                               'barometercollectd': 'Barometer',
+                               'odl_netvirt': 'Netvirt',
+                               'security_scan': 'Security'}
+        try:
+            self.displayName = display_name_matrix[self.name]
+        except:
+            self.displayName = "unknown"
+
+    def getName(self):
+        return self.name
+
+    def getProject(self):
+        return self.project
+
+    def getConstraints(self):
+        return self.constraints
+
+    def getCriteria(self):
+        return self.criteria
+
+    def getTier(self):
+        return self.tier
+
+    def setCriteria(self, criteria):
+        self.criteria = criteria
+
+    def setIsRunnable(self, isRunnable):
+        self.isRunnable = isRunnable
+
+    def checkRunnable(self, installer, scenario, config):
+        # Re-use Functest declaration
+        # Retrieve Functest configuration file functest_config.yaml
+        is_runnable = True
+        config_test = config
+        # print " *********************** "
+        # print TEST_ENV
+        # print " ---------------------- "
+        # print "case = " + self.name
+        # print "installer = " + installer
+        # print "scenario = " + scenario
+        # print "project = " + self.project
+
+        # Retrieve test constraints
+        # Retrieve test execution param
+        test_execution_context = {"installer": installer,
+                                  "scenario": scenario}
+
+        # By default we assume that all the tests are always runnable...
+        # if test_env not empty => dependencies to be checked
+        if config_test is not None and len(config_test) > 0:
+            # possible criteria = ["installer", "scenario"]
+            # consider test criteria from config file
+            # compare towards CI env through CI en variable
+            for criteria in config_test:
+                if re.search(config_test[criteria],
+                             test_execution_context[criteria]) is None:
+                    # print "Test "+ test + " cannot be run on the environment"
+                    is_runnable = False
+        # print is_runnable
+        self.isRunnable = is_runnable
+
+    def toString(self):
+        testcase = ("Name=" + self.name + ";Criteria=" +
+                    str(self.criteria) + ";Project=" + self.project +
+                    ";Constraints=" + str(self.constraints) +
+                    ";IsRunnable" + str(self.isRunnable))
+        return testcase
+
+    def getDisplayName(self):
+        return self.displayName
diff --git a/utils/test/reporting/reporting/qtip/__init__.py b/utils/test/reporting/reporting/qtip/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/qtip/index.html b/utils/test/reporting/reporting/qtip/index.html
new file mode 100644
index 000000000..0f9df8564
--- /dev/null
+++ b/utils/test/reporting/reporting/qtip/index.html
@@ -0,0 +1,51 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+        <h3 class="text-muted">QTIP reporting page</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="index-status-apex.html">Apex</a></li>
+            <li><a href="index-status-compass.html">Compass</a></li>
+            <li><a href="index-status-fuel.html">Fuel</a></li>
+            <li><a href="index-status-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-main">
+            <h2>QTIP</h2>
+            QTIP is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
+            <br>The QTIP framework is deployed in several OPNFV community labs.
+            <br>It is installer, infrastructure and application independent.
+
+            <h2>Useful Links</h2>
+            <li><a href="https://wiki.opnfv.org/download/attachments/5734608/qtip%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">QTIP in Depth</a></li>
+            <li><a href="https://git.opnfv.org/cgit/qtip">QTIP Repo</a></li>
+            <li><a href="https://wiki.opnfv.org/display/qtip">QTIP Project</a></li>
+            <li><a href="https://build.opnfv.org/ci/view/qtip/">QTIP Jenkins page</a></li>
+            <li><a href="https://jira.opnfv.org/browse/QTIP-119?jql=project%20%3D%20QTIP">JIRA</a></li>
+
+        </div>
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/qtip/reporting-status.py b/utils/test/reporting/reporting/qtip/reporting-status.py
new file mode 100644
index 000000000..f0127b50f
--- /dev/null
+++ b/utils/test/reporting/reporting/qtip/reporting-status.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import os
+
+import jinja2
+import utils.reporting_utils as rp_utils
+import utils.scenarioResult as sr
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Qtip-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("*   Generating reporting scenario status  *")
+logger.info("*   Data retention = {} days              *".format(PERIOD))
+logger.info("*                                         *")
+logger.info("*******************************************")
+
+
+def prepare_profile_file(version):
+    profile_dir = './display/{}/qtip'.format(version)
+    if not os.path.exists(profile_dir):
+        os.makedirs(profile_dir)
+
+    profile_file = "{}/{}/scenario_history.txt".format(profile_dir,
+                                                       version)
+    if not os.path.exists(profile_file):
+        with open(profile_file, 'w') as f:
+            info = 'date,scenario,installer,details,score\n'
+            f.write(info)
+            f.close()
+    return profile_file
+
+
+def profile_results(results, installer, profile_fd):
+    result_criterias = {}
+    for s_p, s_p_result in results.iteritems():
+        ten_criteria = len(s_p_result)
+        ten_score = sum(s_p_result)
+
+        LASTEST_TESTS = rp_utils.get_config(
+            'general.nb_iteration_tests_success_criteria')
+        four_result = s_p_result[:LASTEST_TESTS]
+        four_criteria = len(four_result)
+        four_score = sum(four_result)
+
+        s_four_score = str(four_score / four_criteria)
+        s_ten_score = str(ten_score / ten_criteria)
+
+        info = '{},{},{},{},{}\n'.format(reportingDate,
+                                         s_p,
+                                         installer,
+                                         s_ten_score,
+                                         s_four_score)
+        profile_fd.write(info)
+        result_criterias[s_p] = sr.ScenarioResult('OK',
+                                                  s_four_score,
+                                                  s_ten_score,
+                                                  '100')
+
+        logger.info("--------------------------")
+    return result_criterias
+
+
+def render_html(prof_results, installer, version):
+    template_loader = jinja2.FileSystemLoader(".")
+    template_env = jinja2.Environment(loader=template_loader,
+                                      autoescape=True)
+
+    template_file = "./reporting/qtip/template/index-status-tmpl.html"
+    template = template_env.get_template(template_file)
+
+    render_outcome = template.render(prof_results=prof_results,
+                                     installer=installer,
+                                     period=PERIOD,
+                                     version=version,
+                                     date=reportingDate)
+
+    with open('./display/{}/qtip/status-{}.html'.format(version, installer),
+              'wb') as fh:
+        fh.write(render_outcome)
+
+
+def render_reporter():
+    for version in versions:
+        profile_file = prepare_profile_file(version)
+        profile_fd = open(profile_file, 'a')
+        for installer in installers:
+            results = rp_utils.getQtipResults(version, installer)
+            prof_results = profile_results(results, installer, profile_fd)
+            render_html(prof_results=prof_results,
+                        installer=installer,
+                        version=version)
+        profile_fd.close()
+        logger.info("Manage export CSV")
+        rp_utils.generate_csv(profile_file)
+        logger.info("CSV generated...")
+
+
+if __name__ == '__main__':
+    render_reporter()
diff --git a/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html b/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html
new file mode 100644
index 000000000..26da36ceb
--- /dev/null
+++ b/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html
@@ -0,0 +1,86 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="../../css/default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+    <script type="text/javascript" src="../../js/trend-qtip.js"></script>
+    <script>
+        // trend line management
+        d3.csv("./scenario_history.csv", function(data) {
+            // ***************************************
+            // Create the trend line
+            {% for scenario in prof_results.keys() -%}
+            // for scenario {{scenario}}
+            // Filter results
+                var trend{{loop.index}} = data.filter(function(row) {
+                    return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+                })
+            // Parse the date
+            trend{{loop.index}}.forEach(function(d) {
+                d.date = parseDate(d.date);
+                d.score = +d.score
+            });
+            // Draw the trend line
+            var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+            // ****************************************
+            {%- endfor %}
+        });
+    </script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+          <h3 class="text-muted">QTIP status page ({{version}}, {{date}})</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="index-status-apex.html">Apex</a></li>
+            <li><a href="index-status-compass.html">Compass</a></li>
+            <li><a href="index-status-fuel.html">Fuel</a></li>
+            <li><a href="index-status-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-header">
+            <h2>{{installer}}</h2>
+        </div>
+
+        <div class="scenario-overview">
+            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+                <table class="table">
+                    <tr>
+                        <th width="25%">Pod/Scenario</th>
+                        <th width="25%">Trend</th>
+                        <th width="25%">Last 4 Iterations</th>
+                        <th width="25%">Last 10 Days</th>
+                    </tr>
+                        {% for scenario,result in prof_results.iteritems() -%}
+                            <tr class="tr-ok">
+                                <td>{{scenario}}</td>
+                                <td><div id="trend_svg{{loop.index}}"></div></td>
+                                <td>{{prof_results[scenario].getFourDaysScore()}}</td>
+                                <td>{{prof_results[scenario].getTenDaysScore()}}</td>
+                            </tr>
+                        {%- endfor %}
+                </table>
+        </div>
+
+
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/reporting.yaml b/utils/test/reporting/reporting/reporting.yaml
new file mode 100644
index 000000000..1692f481d
--- /dev/null
+++ b/utils/test/reporting/reporting/reporting.yaml
@@ -0,0 +1,68 @@
+---
+general:
+    installers:
+        - apex
+        - compass
+        - fuel
+        - joid
+
+    versions:
+        - master
+        - danube
+
+    log:
+        log_file: reporting.log
+        log_level: ERROR
+
+    period: 10
+
+    nb_iteration_tests_success_criteria: 4
+
+    directories:
+        # Relative to the path where the repo is cloned:
+        dir_reporting: utils/tests/reporting/
+        dir_log: utils/tests/reporting/log/
+        dir_conf: utils/tests/reporting/conf/
+        dir_utils: utils/tests/reporting/utils/
+        dir_templates: utils/tests/reporting/templates/
+        dir_display: utils/tests/reporting/display/
+
+    url: testresults.opnfv.org/reporting/
+
+testapi:
+    url: testresults.opnfv.org/test/api/v1/results
+
+functest:
+    blacklist:
+        - ovno
+        - security_scan
+        - healthcheck
+        - odl_netvirt
+        - aaa
+        - cloudify_ims
+        - orchestra_ims
+        - juju_epc
+        - orchestra
+    max_scenario_criteria: 50
+    test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml
+    log_level: ERROR
+    jenkins_url: https://build.opnfv.org/ci/view/functest/job/
+    exclude_noha: False
+    exclude_virtual: False
+
+yardstick:
+    test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml
+    log_level: ERROR
+
+storperf:
+    test_list:
+        - snia_steady_state
+    log_level: ERROR
+
+qtip:
+    log_level: ERROR
+    period: 1
+
+bottleneck:
+
+vsperf:
diff --git a/utils/test/reporting/reporting/storperf/__init__.py b/utils/test/reporting/reporting/storperf/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/storperf/reporting-status.py b/utils/test/reporting/reporting/storperf/reporting-status.py
new file mode 100644
index 000000000..0c188a338
--- /dev/null
+++ b/utils/test/reporting/reporting/storperf/reporting-status.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import jinja2
+import os
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+import utils.scenarioResult as sr
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Storperf-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("*   Generating reporting scenario status  *")
+logger.info("*   Data retention = %s days              *" % PERIOD)
+logger.info("*                                         *")
+logger.info("*******************************************")
+
+# retrieve the list of storperf tests
+storperf_tests = rp_utils.get_config('storperf.test_list')
+logger.info("Storperf tests: %s" % storperf_tests)
+
+# For all the versions
+for version in versions:
+    # For all the installers
+    for installer in installers:
+        # get scenarios results data
+        # for the moment we consider only 1 case snia_steady_state
+        scenario_results = rp_utils.getScenarios("snia_steady_state",
+                                                 installer,
+                                                 version)
+        # logger.info("scenario_results: %s" % scenario_results)
+
+        scenario_stats = rp_utils.getScenarioStats(scenario_results)
+        logger.info("scenario_stats: %s" % scenario_stats)
+        items = {}
+        scenario_result_criteria = {}
+
+        # From each scenarios get results list
+        for s, s_result in scenario_results.items():
+            logger.info("---------------------------------")
+            logger.info("installer %s, version %s, scenario %s", installer,
+                        version, s)
+            ten_criteria = len(s_result)
+
+            ten_score = 0
+            for v in s_result:
+                if "PASS" in v['criteria']:
+                    ten_score += 1
+
+            logger.info("ten_score: %s / %s" % (ten_score, ten_criteria))
+
+            four_score = 0
+            try:
+                LASTEST_TESTS = rp_utils.get_config(
+                    'general.nb_iteration_tests_success_criteria')
+                s_result.sort(key=lambda x: x['start_date'])
+                four_result = s_result[-LASTEST_TESTS:]
+                logger.debug("four_result: {}".format(four_result))
+                logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
+                # logger.debug("four_result: {}".format(four_result))
+                four_criteria = len(four_result)
+                for v in four_result:
+                    if "PASS" in v['criteria']:
+                        four_score += 1
+                logger.info("4 Score: %s / %s " % (four_score,
+                                                   four_criteria))
+            except:
+                logger.error("Impossible to retrieve the four_score")
+
+            try:
+                s_status = (four_score * 100) / four_criteria
+            except:
+                s_status = 0
+            logger.info("Score percent = %s" % str(s_status))
+            s_four_score = str(four_score) + '/' + str(four_criteria)
+            s_ten_score = str(ten_score) + '/' + str(ten_criteria)
+            s_score_percent = str(s_status)
+
+            logger.debug(" s_status: {}".format(s_status))
+            if s_status == 100:
+                logger.info(">>>>> scenario OK, save the information")
+            else:
+                logger.info(">>>> scenario not OK, last 4 iterations = %s, \
+                             last 10 days = %s" % (s_four_score, s_ten_score))
+
+            s_url = ""
+            if len(s_result) > 0:
+                build_tag = s_result[len(s_result)-1]['build_tag']
+                logger.debug("Build tag: %s" % build_tag)
+                s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
+                logger.info("last jenkins url: %s" % s_url)
+
+            # Save daily results in a file
+            path_validation_file = ("./display/" + version +
+                                    "/storperf/scenario_history.txt")
+
+            if not os.path.exists(path_validation_file):
+                with open(path_validation_file, 'w') as f:
+                    info = 'date,scenario,installer,details,score\n'
+                    f.write(info)
+
+            with open(path_validation_file, "a") as f:
+                info = (reportingDate + "," + s + "," + installer +
+                        "," + s_ten_score + "," +
+                        str(s_score_percent) + "\n")
+                f.write(info)
+
+            scenario_result_criteria[s] = sr.ScenarioResult(s_status,
+                                                            s_four_score,
+                                                            s_ten_score,
+                                                            s_score_percent,
+                                                            s_url)
+
+            logger.info("--------------------------")
+
+        templateLoader = jinja2.FileSystemLoader(".")
+        templateEnv = jinja2.Environment(loader=templateLoader,
+                                         autoescape=True)
+
+        TEMPLATE_FILE = "./reporting/storperf/template/index-status-tmpl.html"
+        template = templateEnv.get_template(TEMPLATE_FILE)
+
+        outputText = template.render(scenario_results=scenario_result_criteria,
+                                     installer=installer,
+                                     period=PERIOD,
+                                     version=version,
+                                     date=reportingDate)
+
+        with open("./display/" + version +
+                  "/storperf/status-" + installer + ".html", "wb") as fh:
+            fh.write(outputText)
diff --git a/utils/test/reporting/reporting/storperf/template/index-status-tmpl.html b/utils/test/reporting/reporting/storperf/template/index-status-tmpl.html
new file mode 100644
index 000000000..e872272c3
--- /dev/null
+++ b/utils/test/reporting/reporting/storperf/template/index-status-tmpl.html
@@ -0,0 +1,110 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="../../css/default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+    <script type="text/javascript" src="../../js/gauge.js"></script>
+    <script type="text/javascript" src="../../js/trend.js"></script>
+    <script>
+        function onDocumentReady() {
+            // Gauge management
+            {% for scenario in scenario_results.keys() -%}
+            var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+            {%- endfor %}
+            // assign success rate to the gauge
+            function updateReadings() {
+                {% for scenario in scenario_results.keys() -%}
+                 gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+                 {%- endfor %}
+            }
+            updateReadings();
+        }
+
+        // trend line management
+        d3.csv("./scenario_history.txt", function(data) {
+            // ***************************************
+            // Create the trend line
+            {% for scenario in scenario_results.keys() -%}
+            // for scenario {{scenario}}
+            // Filter results
+                var trend{{loop.index}} = data.filter(function(row) {
+                    return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+                })
+            // Parse the date
+            trend{{loop.index}}.forEach(function(d) {
+                d.date = parseDate(d.date);
+                d.score = +d.score
+            });
+            // Draw the trend line
+            var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+            // ****************************************
+            {%- endfor %}
+        });
+        if ( !window.isLoaded ) {
+            window.addEventListener("load", function() {
+            onDocumentReady();
+            }, false);
+        } else {
+            onDocumentReady();
+        }
+    </script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+          <h3 class="text-muted">Storperf status page ({{version}}, {{date}})</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="status-apex.html">Apex</a></li>
+            <li><a href="status-compass.html">Compass</a></li>
+            <li><a href="status-fuel.html">Fuel</a></li>
+            <li><a href="status-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-header">
+            <h2>{{installer}}</h2>
+        </div>
+
+        <div class="scenario-overview">
+            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+                <table class="table">
+                    <tr>
+                        <th width="40%">Scenario</th>
+                        <th width="20%">Status</th>
+                        <th width="20%">Trend</th>
+                        <th width="10%">Last 4 Iterations</th>
+                        <th width="10%">Last 10 Days</th>
+                    </tr>
+                        {% for scenario,result in scenario_results.iteritems() -%}
+                            <tr class="tr-ok">
+                                <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
+                                <td><div id="gaugeScenario{{loop.index}}"></div></td>
+                                <td><div id="trend_svg{{loop.index}}"></div></td>
+                                <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
+                                <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
+                            </tr>
+                        {%- endfor %}
+                </table>
+        </div>
+
+
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/tests/__init__.py b/utils/test/reporting/reporting/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/tests/unit/__init__.py b/utils/test/reporting/reporting/tests/unit/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/tests/unit/utils/__init__.py b/utils/test/reporting/reporting/tests/unit/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/tests/unit/utils/test_utils.py b/utils/test/reporting/reporting/tests/unit/utils/test_utils.py
new file mode 100644
index 000000000..9614d74ff
--- /dev/null
+++ b/utils/test/reporting/reporting/tests/unit/utils/test_utils.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+import logging
+import unittest
+
+from reporting.utils import reporting_utils
+
+
+class reportingUtilsTesting(unittest.TestCase):
+
+    logging.disable(logging.CRITICAL)
+
+    def setUp(self):
+        self.test = reporting_utils
+
+    def test_foo(self):
+        self.assertTrue(0 < 1)
+
+
+if __name__ == "__main__":
+    unittest.main(verbosity=2)
diff --git a/utils/test/reporting/reporting/utils/__init__.py b/utils/test/reporting/reporting/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/utils/reporting_utils.py b/utils/test/reporting/reporting/utils/reporting_utils.py
new file mode 100644
index 000000000..62820914a
--- /dev/null
+++ b/utils/test/reporting/reporting/utils/reporting_utils.py
@@ -0,0 +1,463 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+from urllib2 import Request, urlopen, URLError
+import logging
+import json
+import os
+import requests
+import pdfkit
+import yaml
+
+
+# ----------------------------------------------------------
+#
+#               YAML UTILS
+#
+# -----------------------------------------------------------
+def get_parameter_from_yaml(parameter, file):
+    """
+    Returns the value of a given parameter in file.yaml
+    parameter must be given in string format with dots
+    Example: general.openstack.image_name
+    """
+    with open(file) as f:
+        file_yaml = yaml.safe_load(f)
+    f.close()
+    value = file_yaml
+    for element in parameter.split("."):
+        value = value.get(element)
+        if value is None:
+            raise ValueError("The parameter %s is not defined in"
+                             " reporting.yaml" % parameter)
+    return value
+
+
+def get_config(parameter):
+    yaml_ = os.environ["CONFIG_REPORTING_YAML"]
+    return get_parameter_from_yaml(parameter, yaml_)
+
+
+# ----------------------------------------------------------
+#
+#               LOGGER UTILS
+#
+# -----------------------------------------------------------
+def getLogger(module):
+    logFormatter = logging.Formatter("%(asctime)s [" +
+                                     module +
+                                     "] [%(levelname)-5.5s]  %(message)s")
+    logger = logging.getLogger()
+    log_file = get_config('general.log.log_file')
+    log_level = get_config('general.log.log_level')
+
+    fileHandler = logging.FileHandler("{0}/{1}".format('.', log_file))
+    fileHandler.setFormatter(logFormatter)
+    logger.addHandler(fileHandler)
+
+    consoleHandler = logging.StreamHandler()
+    consoleHandler.setFormatter(logFormatter)
+    logger.addHandler(consoleHandler)
+    logger.setLevel(log_level)
+    return logger
+
+
+# ----------------------------------------------------------
+#
+#               REPORTING UTILS
+#
+# -----------------------------------------------------------
+def getApiResults(case, installer, scenario, version):
+    results = json.dumps([])
+    # to remove proxy (to be removed at the end for local test only)
+    # proxy_handler = urllib2.ProxyHandler({})
+    # opener = urllib2.build_opener(proxy_handler)
+    # urllib2.install_opener(opener)
+    # url = "http://127.0.0.1:8000/results?case=" + case + \
+    #       "&period=30&installer=" + installer
+    period = get_config('general.period')
+    url_base = get_config('testapi.url')
+    nb_tests = get_config('general.nb_iteration_tests_success_criteria')
+
+    url = ("http://" + url_base + "?case=" + case +
+           "&period=" + str(period) + "&installer=" + installer +
+           "&scenario=" + scenario + "&version=" + version +
+           "&last=" + str(nb_tests))
+    request = Request(url)
+
+    try:
+        response = urlopen(request)
+        k = response.read()
+        results = json.loads(k)
+    except URLError as e:
+        print 'No kittez. Got an error code:'.format(e)
+
+    return results
+
+
+def getScenarios(case, installer, version):
+
+    try:
+        case = case.getName()
+    except:
+        # if case is not an object test case, try the string
+        if type(case) == str:
+            case = case
+        else:
+            raise ValueError("Case cannot be evaluated")
+
+    period = get_config('general.period')
+    url_base = get_config('testapi.url')
+
+    url = ("http://" + url_base + "?case=" + case +
+           "&period=" + str(period) + "&installer=" + installer +
+           "&version=" + version)
+
+    try:
+        request = Request(url)
+        response = urlopen(request)
+        k = response.read()
+        results = json.loads(k)
+        test_results = results['results']
+        try:
+            page = results['pagination']['total_pages']
+            if page > 1:
+                test_results = []
+                for i in range(1, page + 1):
+                    url_page = url + "&page=" + str(i)
+                    request = Request(url_page)
+                    response = urlopen(request)
+                    k = response.read()
+                    results = json.loads(k)
+                    test_results += results['results']
+        except KeyError:
+            print ('No pagination detected')
+    except URLError as err:
+        print 'Got an error code: {}'.format(err)
+
+    if test_results is not None:
+        test_results.reverse()
+        scenario_results = {}
+
+        for r in test_results:
+            # Retrieve all the scenarios per installer
+            if not r['scenario'] in scenario_results.keys():
+                scenario_results[r['scenario']] = []
+            # Do we consider results from virtual pods ...
+            # Do we consider results for non HA scenarios...
+            exclude_virtual_pod = get_config('functest.exclude_virtual')
+            exclude_noha = get_config('functest.exclude_noha')
+            if ((exclude_virtual_pod and "virtual" in r['pod_name']) or
+                    (exclude_noha and "noha" in r['scenario'])):
+                print "exclude virtual pod results..."
+            else:
+                scenario_results[r['scenario']].append(r)
+
+    return scenario_results
+
+
+def getScenarioStats(scenario_results):
+    scenario_stats = {}
+    for k, v in scenario_results.iteritems():
+        scenario_stats[k] = len(v)
+
+    return scenario_stats
+
+
+def getScenarioStatus(installer, version):
+    period = get_config('general.period')
+    url_base = get_config('testapi.url')
+
+    url = ("http://" + url_base + "?case=scenario_status" +
+           "&installer=" + installer +
+           "&version=" + version + "&period=" + str(period))
+    request = Request(url)
+
+    try:
+        response = urlopen(request)
+        k = response.read()
+        response.close()
+        results = json.loads(k)
+        test_results = results['results']
+    except URLError as e:
+        print 'Got an error code: {}'.format(e)
+
+    scenario_results = {}
+    result_dict = {}
+    if test_results is not None:
+        for r in test_results:
+            if r['stop_date'] != 'None' and r['criteria'] is not None:
+                if not r['scenario'] in scenario_results.keys():
+                    scenario_results[r['scenario']] = []
+                scenario_results[r['scenario']].append(r)
+
+        for k, v in scenario_results.items():
+            # scenario_results[k] = v[:LASTEST_TESTS]
+            s_list = []
+            for element in v:
+                if element['criteria'] == 'SUCCESS':
+                    s_list.append(1)
+                else:
+                    s_list.append(0)
+            result_dict[k] = s_list
+
+    # return scenario_results
+    return result_dict
+
+
+def getQtipResults(version, installer):
+    period = get_config('qtip.period')
+    url_base = get_config('testapi.url')
+
+    url = ("http://" + url_base + "?project=qtip" +
+           "&installer=" + installer +
+           "&version=" + version + "&period=" + str(period))
+    request = Request(url)
+
+    try:
+        response = urlopen(request)
+        k = response.read()
+        response.close()
+        results = json.loads(k)['results']
+    except URLError as err:
+        print 'Got an error code: {}'.format(err)
+
+    result_dict = {}
+    if results:
+        for r in results:
+            key = '{}/{}'.format(r['pod_name'], r['scenario'])
+            if key not in result_dict.keys():
+                result_dict[key] = []
+            result_dict[key].append(r['details']['score'])
+
+    # return scenario_results
+    return result_dict
+
+
+def getNbtestOk(results):
+    nb_test_ok = 0
+    for r in results:
+        for k, v in r.iteritems():
+            try:
+                if "PASS" in v:
+                    nb_test_ok += 1
+            except:
+                print "Cannot retrieve test status"
+    return nb_test_ok
+
+
+def getResult(testCase, installer, scenario, version):
+
+    # retrieve raw results
+    results = getApiResults(testCase, installer, scenario, version)
+    # let's concentrate on test results only
+    test_results = results['results']
+
+    # if results found, analyze them
+    if test_results is not None:
+        test_results.reverse()
+
+        scenario_results = []
+
+        # print " ---------------- "
+        # print test_results
+        # print " ---------------- "
+        # print "nb of results:" + str(len(test_results))
+
+        for r in test_results:
+            # print r["start_date"]
+            # print r["criteria"]
+            scenario_results.append({r["start_date"]: r["criteria"]})
+        # sort results
+        scenario_results.sort()
+        # 4 levels for the results
+        # 3: 4+ consecutive runs passing the success criteria
+        # 2: <4 successful consecutive runs but passing the criteria
+        # 1: close to pass the success criteria
+        # 0: 0% success, not passing
+        # -1: no run available
+        test_result_indicator = 0
+        nbTestOk = getNbtestOk(scenario_results)
+
+        # print "Nb test OK (last 10 days):"+ str(nbTestOk)
+        # check that we have at least 4 runs
+        if len(scenario_results) < 1:
+            # No results available
+            test_result_indicator = -1
+        elif nbTestOk < 1:
+            test_result_indicator = 0
+        elif nbTestOk < 2:
+            test_result_indicator = 1
+        else:
+            # Test the last 4 run
+            if (len(scenario_results) > 3):
+                last4runResults = scenario_results[-4:]
+                nbTestOkLast4 = getNbtestOk(last4runResults)
+                # print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
+                if nbTestOkLast4 > 3:
+                    test_result_indicator = 3
+                else:
+                    test_result_indicator = 2
+            else:
+                test_result_indicator = 2
+    return test_result_indicator
+
+
+def getJenkinsUrl(build_tag):
+    # e.g. jenkins-functest-apex-apex-daily-colorado-daily-colorado-246
+    # id = 246
+    # jenkins-functest-compass-huawei-pod5-daily-master-136
+    # id = 136
+    # note it is linked to jenkins format
+    # if this format changes...function to be adapted....
+    url_base = get_config('functest.jenkins_url')
+    try:
+        build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
+        url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] +
+                  "/" + str(build_id[0]))
+        jenkins_url = url_base + url_id + "/console"
+    except:
+        print 'Impossible to get jenkins url:'
+
+    if "jenkins-" not in build_tag:
+        jenkins_url = None
+
+    return jenkins_url
+
+
+def getScenarioPercent(scenario_score, scenario_criteria):
+    score = 0.0
+    try:
+        score = float(scenario_score) / float(scenario_criteria) * 100
+    except:
+        print 'Impossible to calculate the percentage score'
+    return score
+
+
+# *********
+# Functest
+# *********
+def getFunctestConfig(version=""):
+    config_file = get_config('functest.test_conf') + version
+    response = requests.get(config_file)
+    return yaml.safe_load(response.text)
+
+
+def getArchitectures(scenario_results):
+    supported_arch = ['x86']
+    if (len(scenario_results) > 0):
+        for scenario_result in scenario_results.values():
+            for value in scenario_result:
+                if ("armband" in value['build_tag']):
+                    supported_arch.append('aarch64')
+                    return supported_arch
+    return supported_arch
+
+
+def filterArchitecture(results, architecture):
+    filtered_results = {}
+    for name, results in results.items():
+        filtered_values = []
+        for value in results:
+            if (architecture is "x86"):
+                # drop aarch64 results
+                if ("armband" not in value['build_tag']):
+                    filtered_values.append(value)
+            elif(architecture is "aarch64"):
+                # drop x86 results
+                if ("armband" in value['build_tag']):
+                    filtered_values.append(value)
+        if (len(filtered_values) > 0):
+            filtered_results[name] = filtered_values
+    return filtered_results
+
+
+# *********
+# Yardstick
+# *********
+def subfind(given_list, pattern_list):
+    LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
+    for i in range(len(given_list)):
+        if given_list[i] == pattern_list[0] and \
+                given_list[i:i + LASTEST_TESTS] == pattern_list:
+            return True
+    return False
+
+
+def _get_percent(status):
+
+    if status * 100 % 6:
+        return round(float(status) * 100 / 6, 1)
+    else:
+        return status * 100 / 6
+
+
+def get_percent(four_list, ten_list):
+    four_score = 0
+    ten_score = 0
+
+    for v in four_list:
+        four_score += v
+    for v in ten_list:
+        ten_score += v
+
+    LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
+    if four_score == LASTEST_TESTS:
+        status = 6
+    elif subfind(ten_list, [1, 1, 1, 1]):
+        status = 5
+    elif ten_score == 0:
+        status = 0
+    else:
+        status = four_score + 1
+
+    return _get_percent(status)
+
+
+def _test():
+    status = getScenarioStatus("compass", "master")
+    print "status:++++++++++++++++++++++++"
+    print(json.dumps(status, indent=4))
+
+
+# ----------------------------------------------------------
+#
+#               Export
+#
+# -----------------------------------------------------------
+
+def export_csv(scenario_file_name, installer, version):
+    # csv
+    # generate sub files based on scenario_history.txt
+    scenario_installer_file_name = ("./display/" + version +
+                                    "/functest/scenario_history_" +
+                                    installer + ".csv")
+    scenario_installer_file = open(scenario_installer_file_name, "a")
+    with open(scenario_file_name, "r") as scenario_file:
+        scenario_installer_file.write("date,scenario,installer,detail,score\n")
+        for line in scenario_file:
+            if installer in line:
+                scenario_installer_file.write(line)
+        scenario_installer_file.close
+
+
+def generate_csv(scenario_file):
+    import shutil
+    # csv
+    # generate sub files based on scenario_history.txt
+    csv_file = scenario_file.replace('txt', 'csv')
+    shutil.copy2(scenario_file, csv_file)
+
+
+def export_pdf(pdf_path, pdf_doc_name):
+    try:
+        pdfkit.from_file(pdf_path, pdf_doc_name)
+    except IOError:
+        print "Error but pdf generated anyway..."
+    except:
+        print "impossible to generate PDF"
diff --git a/utils/test/reporting/reporting/utils/scenarioResult.py b/utils/test/reporting/reporting/utils/scenarioResult.py
new file mode 100644
index 000000000..6029d7f42
--- /dev/null
+++ b/utils/test/reporting/reporting/utils/scenarioResult.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+
+class ScenarioResult(object):
+    def __init__(self, status, four_days_score='', ten_days_score='',
+                 score_percent=0.0, last_url=''):
+        self.status = status
+        self.four_days_score = four_days_score
+        self.ten_days_score = ten_days_score
+        self.score_percent = score_percent
+        self.last_url = last_url
+
+    def getStatus(self):
+        return self.status
+
+    def getTenDaysScore(self):
+        return self.ten_days_score
+
+    def getFourDaysScore(self):
+        return self.four_days_score
+
+    def getScorePercent(self):
+        return self.score_percent
+
+    def getLastUrl(self):
+        return self.last_url
diff --git a/utils/test/reporting/reporting/yardstick/__init__.py b/utils/test/reporting/reporting/yardstick/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_0.png b/utils/test/reporting/reporting/yardstick/img/gauge_0.png
new file mode 100644
index 000000000..ecefc0e66
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_0.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_100.png b/utils/test/reporting/reporting/yardstick/img/gauge_100.png
new file mode 100644
index 000000000..e199e1561
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_100.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_16.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_16.7.png
new file mode 100644
index 000000000..3e3993c3b
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_16.7.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_25.png b/utils/test/reporting/reporting/yardstick/img/gauge_25.png
new file mode 100644
index 000000000..4923659b9
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_25.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_33.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_33.3.png
new file mode 100644
index 000000000..364574b4a
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_33.3.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_41.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_41.7.png
new file mode 100644
index 000000000..8c3e910fa
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_41.7.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_50.png b/utils/test/reporting/reporting/yardstick/img/gauge_50.png
new file mode 100644
index 000000000..2874b9fcf
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_50.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_58.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_58.3.png
new file mode 100644
index 000000000..beedc8aa9
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_58.3.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_66.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_66.7.png
new file mode 100644
index 000000000..93f44d133
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_66.7.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_75.png b/utils/test/reporting/reporting/yardstick/img/gauge_75.png
new file mode 100644
index 000000000..9fc261ff8
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_75.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_8.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_8.3.png
new file mode 100644
index 000000000..59f86571e
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_8.3.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_83.3.png b/utils/test/reporting/reporting/yardstick/img/gauge_83.3.png
new file mode 100644
index 000000000..27ae4ec54
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_83.3.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/gauge_91.7.png b/utils/test/reporting/reporting/yardstick/img/gauge_91.7.png
new file mode 100644
index 000000000..280865714
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/gauge_91.7.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/icon-nok.png b/utils/test/reporting/reporting/yardstick/img/icon-nok.png
new file mode 100644
index 000000000..526b5294b
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/icon-nok.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/icon-ok.png b/utils/test/reporting/reporting/yardstick/img/icon-ok.png
new file mode 100644
index 000000000..3a9de2e89
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/icon-ok.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-clear.png b/utils/test/reporting/reporting/yardstick/img/weather-clear.png
new file mode 100644
index 000000000..a0d967750
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/weather-clear.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png b/utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png
new file mode 100644
index 000000000..acfa78398
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/weather-few-clouds.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-overcast.png b/utils/test/reporting/reporting/yardstick/img/weather-overcast.png
new file mode 100644
index 000000000..4296246d0
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/weather-overcast.png differ
diff --git a/utils/test/reporting/reporting/yardstick/img/weather-storm.png b/utils/test/reporting/reporting/yardstick/img/weather-storm.png
new file mode 100644
index 000000000..956f0e20f
Binary files /dev/null and b/utils/test/reporting/reporting/yardstick/img/weather-storm.png differ
diff --git a/utils/test/reporting/reporting/yardstick/index.html b/utils/test/reporting/reporting/yardstick/index.html
new file mode 100644
index 000000000..488f1421d
--- /dev/null
+++ b/utils/test/reporting/reporting/yardstick/index.html
@@ -0,0 +1,51 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+        <h3 class="text-muted">Yardstick reporting page</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="index-status-apex.html">Apex</a></li>
+            <li><a href="index-status-compass.html">Compass</a></li>
+            <li><a href="index-status-fuel.html">Fuel</a></li>
+            <li><a href="index-status-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-main">
+            <h2>Yardstick</h2>
+            Yardstick is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
+            <br>The Yardstick framework is deployed in several OPNFV community labs.
+            <br>It is installer, infrastructure and application independent.
+
+            <h2>Useful Links</h2>
+            <li><a href="https://wiki.opnfv.org/download/attachments/5734608/yardstick%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">Yardstick in Depth</a></li>
+            <li><a href="https://git.opnfv.org/cgit/yardstick">Yardstick Repo</a></li>
+            <li><a href="https://wiki.opnfv.org/display/yardstick">Yardstick Project</a></li>
+            <li><a href="https://build.opnfv.org/ci/view/yardstick/">Yardstick Jenkins page</a></li>
+            <li><a href="https://jira.opnfv.org/browse/YARDSTICK-119?jql=project%20%3D%20YARDSTICK">JIRA</a></li>
+
+        </div>
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/reporting/yardstick/reporting-status.py b/utils/test/reporting/reporting/yardstick/reporting-status.py
new file mode 100644
index 000000000..85c386bf1
--- /dev/null
+++ b/utils/test/reporting/reporting/yardstick/reporting-status.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import datetime
+import jinja2
+import os
+
+import utils.scenarioResult as sr
+from scenarios import config as cf
+
+# manage conf
+import utils.reporting_utils as rp_utils
+
+installers = rp_utils.get_config('general.installers')
+versions = rp_utils.get_config('general.versions')
+PERIOD = rp_utils.get_config('general.period')
+
+# Logger
+logger = rp_utils.getLogger("Yardstick-Status")
+reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
+
+logger.info("*******************************************")
+logger.info("*   Generating reporting scenario status  *")
+logger.info("*   Data retention = %s days              *" % PERIOD)
+logger.info("*                                         *")
+logger.info("*******************************************")
+
+
+# For all the versions
+for version in versions:
+    # For all the installers
+    for installer in installers:
+        # get scenarios results data
+        scenario_results = rp_utils.getScenarioStatus(installer, version)
+        if 'colorado' == version:
+            stable_result = rp_utils.getScenarioStatus(installer,
+                                                       'stable/colorado')
+            for k, v in stable_result.items():
+                if k not in scenario_results.keys():
+                    scenario_results[k] = []
+                scenario_results[k] += stable_result[k]
+        scenario_result_criteria = {}
+
+        for s in scenario_results.keys():
+            if installer in cf.keys() and s in cf[installer].keys():
+                scenario_results.pop(s)
+
+        # From each scenarios get results list
+        for s, s_result in scenario_results.items():
+            logger.info("---------------------------------")
+            logger.info("installer %s, version %s, scenario %s", installer,
+                        version, s)
+
+            ten_criteria = len(s_result)
+            ten_score = 0
+            for v in s_result:
+                ten_score += v
+
+            LASTEST_TESTS = rp_utils.get_config(
+                'general.nb_iteration_tests_success_criteria')
+            four_result = s_result[:LASTEST_TESTS]
+            four_criteria = len(four_result)
+            four_score = 0
+            for v in four_result:
+                four_score += v
+
+            s_status = str(rp_utils.get_percent(four_result, s_result))
+            s_four_score = str(four_score) + '/' + str(four_criteria)
+            s_ten_score = str(ten_score) + '/' + str(ten_criteria)
+            s_score_percent = rp_utils.get_percent(four_result, s_result)
+
+            if '100' == s_status:
+                logger.info(">>>>> scenario OK, save the information")
+            else:
+                logger.info(">>>> scenario not OK, last 4 iterations = %s, \
+                            last 10 days = %s" % (s_four_score, s_ten_score))
+
+            # Save daily results in a file
+            path_validation_file = ("./display/" + version +
+                                    "/yardstick/scenario_history.txt")
+
+            if not os.path.exists(path_validation_file):
+                with open(path_validation_file, 'w') as f:
+                    info = 'date,scenario,installer,details,score\n'
+                    f.write(info)
+
+            with open(path_validation_file, "a") as f:
+                info = (reportingDate + "," + s + "," + installer +
+                        "," + s_ten_score + "," +
+                        str(s_score_percent) + "\n")
+                f.write(info)
+
+            scenario_result_criteria[s] = sr.ScenarioResult(s_status,
+                                                            s_four_score,
+                                                            s_ten_score,
+                                                            s_score_percent)
+
+            logger.info("--------------------------")
+
+        templateLoader = jinja2.FileSystemLoader(".")
+        templateEnv = jinja2.Environment(loader=templateLoader,
+                                         autoescape=True)
+
+        TEMPLATE_FILE = "./reporting/yardstick/template/index-status-tmpl.html"
+        template = templateEnv.get_template(TEMPLATE_FILE)
+
+        outputText = template.render(scenario_results=scenario_result_criteria,
+                                     installer=installer,
+                                     period=PERIOD,
+                                     version=version,
+                                     date=reportingDate)
+
+        with open("./display/" + version +
+                  "/yardstick/status-" + installer + ".html", "wb") as fh:
+            fh.write(outputText)
diff --git a/utils/test/reporting/reporting/yardstick/scenarios.py b/utils/test/reporting/reporting/yardstick/scenarios.py
new file mode 100644
index 000000000..26e8c8bb0
--- /dev/null
+++ b/utils/test/reporting/reporting/yardstick/scenarios.py
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import requests
+import yaml
+
+import utils.reporting_utils as rp_utils
+
+yardstick_conf = rp_utils.get_config('yardstick.test_conf')
+response = requests.get(yardstick_conf)
+yaml_file = yaml.safe_load(response.text)
+reporting = yaml_file.get('reporting')
+
+config = {}
+
+for element in reporting:
+    name = element['name']
+    scenarios = element['scenario']
+    for s in scenarios:
+        if name not in config:
+            config[name] = {}
+        config[name][s] = True
diff --git a/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html b/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
new file mode 100644
index 000000000..77ba9502f
--- /dev/null
+++ b/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html
@@ -0,0 +1,110 @@
+ <html>
+  <head>
+    <meta charset="utf-8">
+    <!-- Bootstrap core CSS -->
+    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
+    <link href="../../css/default.css" rel="stylesheet">
+    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
+    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
+    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
+    <script type="text/javascript" src="../../js/gauge.js"></script>
+    <script type="text/javascript" src="../../js/trend.js"></script>
+    <script>
+        function onDocumentReady() {
+            // Gauge management
+            {% for scenario in scenario_results.keys() -%}
+            var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
+            {%- endfor %}
+            // assign success rate to the gauge
+            function updateReadings() {
+                {% for scenario in scenario_results.keys() -%}
+                 gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
+                 {%- endfor %}
+            }
+            updateReadings();
+        }
+
+        // trend line management
+        d3.csv("./scenario_history.csv", function(data) {
+            // ***************************************
+            // Create the trend line
+            {% for scenario in scenario_results.keys() -%}
+            // for scenario {{scenario}}
+            // Filter results
+                var trend{{loop.index}} = data.filter(function(row) {
+                    return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
+                })
+            // Parse the date
+            trend{{loop.index}}.forEach(function(d) {
+                d.date = parseDate(d.date);
+                d.score = +d.score
+            });
+            // Draw the trend line
+            var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
+            // ****************************************
+            {%- endfor %}
+        });
+        if ( !window.isLoaded ) {
+            window.addEventListener("load", function() {
+            onDocumentReady();
+            }, false);
+        } else {
+            onDocumentReady();
+        }
+    </script>
+    <script type="text/javascript">
+    $(document).ready(function (){
+        $(".btn-more").click(function() {
+            $(this).hide();
+            $(this).parent().find(".panel-default").show();
+        });
+    })
+    </script>
+  </head>
+    <body>
+    <div class="container">
+      <div class="masthead">
+          <h3 class="text-muted">Yardstick status page ({{version}}, {{date}})</h3>
+        <nav>
+          <ul class="nav nav-justified">
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="status-apex.html">Apex</a></li>
+            <li><a href="status-compass.html">Compass</a></li>
+            <li><a href="status-fuel.html">Fuel</a></li>
+            <li><a href="status-joid.html">Joid</a></li>
+          </ul>
+        </nav>
+      </div>
+<div class="row">
+    <div class="col-md-1"></div>
+    <div class="col-md-10">
+        <div class="page-header">
+            <h2>{{installer}}</h2>
+        </div>
+
+        <div class="scenario-overview">
+            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
+                <table class="table">
+                    <tr>
+                        <th width="40%">Scenario</th>
+                        <th width="20%">Status</th>
+                        <th width="20%">Trend</th>
+                        <th width="10%">Last 4 Iterations</th>
+                        <th width="10%">Last 10 Days</th>
+                    </tr>
+                        {% for scenario,result in scenario_results.iteritems() -%}
+                            <tr class="tr-ok">
+                                <td>{{scenario}}</td>
+                                <td><div id="gaugeScenario{{loop.index}}"></div></td>
+                                <td><div id="trend_svg{{loop.index}}"></div></td>
+                                <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
+                                <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
+                            </tr>
+                        {%- endfor %}
+                </table>
+        </div>
+
+
+    </div>
+    <div class="col-md-1"></div>
+</div>
diff --git a/utils/test/reporting/requirements.txt b/utils/test/reporting/requirements.txt
new file mode 100644
index 000000000..344064ddc
--- /dev/null
+++ b/utils/test/reporting/requirements.txt
@@ -0,0 +1,7 @@
+pdfkit>=0.6.1 # MIT
+wkhtmltopdf-pack>=0.12.3 # MIT
+PyYAML>=3.10.0 # MIT
+simplejson>=2.2.0 # MIT
+Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
+requests!=2.12.2,>=2.10.0 # Apache-2.0
+tornado>=4.4.2 # Apache-2.0
diff --git a/utils/test/reporting/run_test.sh b/utils/test/reporting/run_test.sh
index 8c674ce5f..b83b550b8 100755
--- a/utils/test/reporting/run_test.sh
+++ b/utils/test/reporting/run_test.sh
@@ -1,44 +1,3 @@
 #!/bin/bash
-set -o errexit
-set -o pipefail
-
-
-# Get script directory
-SCRIPTDIR=`dirname $0`
-
-# Creating virtual environment
-if [ ! -z $VIRTUAL_ENV ]; then
-    venv=$VIRTUAL_ENV
-else
-    venv=$SCRIPTDIR/.venv
-    virtualenv $venv
-fi
-
-source $venv/bin/activate
-
-export CONFIG_REPORTING_YAML=$SCRIPTDIR/reporting.yaml
-
-# ***************
-# Run unit tests
-# ***************
-echo "Running unit tests..."
-
-# install python packages
-easy_install -U setuptools
-easy_install -U pip
-pip install -r $SCRIPTDIR/docker/requirements.pip
-pip install -e $SCRIPTDIR
-
-python $SCRIPTDIR/setup.py develop
-
-# unit tests
-# TODO: remove cover-erase
-# To be deleted when all functest packages will be listed
-nosetests --with-xunit \
-         --cover-package=$SCRIPTDIR/utils \
-         --with-coverage \
-         --cover-xml \
-         $SCRIPTDIR/tests/unit
-rc=$?
-
-deactivate
+tox
+exit $?
diff --git a/utils/test/reporting/setup.cfg b/utils/test/reporting/setup.cfg
new file mode 100644
index 000000000..9543945c7
--- /dev/null
+++ b/utils/test/reporting/setup.cfg
@@ -0,0 +1,12 @@
+[metadata]
+name = reporting
+version = 1
+home-page = https://wiki.opnfv.org/display/testing
+
+[files]
+packages =
+    reporting
+    api
+scripts =
+    docker/reporting.sh
+    docker/web_server.sh
diff --git a/utils/test/reporting/setup.py b/utils/test/reporting/setup.py
index 627785eca..a52d90555 100644
--- a/utils/test/reporting/setup.py
+++ b/utils/test/reporting/setup.py
@@ -1,22 +1,24 @@
-##############################################################################
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
 
-from setuptools import setup, find_packages
+# pylint: disable=missing-docstring
+
+import setuptools
 
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
 
-setup(
-    name="reporting",
-    version="master",
-    packages=find_packages(),
-    include_package_data=True,
-    package_data={
-    },
-    url="https://www.opnfv.org",
-    install_requires=["coverage==4.1",
-                      "mock==1.3.0",
-                      "nose==1.3.7"],
-)
+setuptools.setup(
+    setup_requires=['pbr>=1.8'],
+    pbr=True)
diff --git a/utils/test/reporting/storperf/reporting-status.py b/utils/test/reporting/storperf/reporting-status.py
deleted file mode 100644
index 888e339f8..000000000
--- a/utils/test/reporting/storperf/reporting-status.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import jinja2
-import os
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-import utils.scenarioResult as sr
-
-installers = rp_utils.get_config('general.installers')
-versions = rp_utils.get_config('general.versions')
-PERIOD = rp_utils.get_config('general.period')
-
-# Logger
-logger = rp_utils.getLogger("Storperf-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-logger.info("*******************************************")
-logger.info("*   Generating reporting scenario status  *")
-logger.info("*   Data retention = %s days              *" % PERIOD)
-logger.info("*                                         *")
-logger.info("*******************************************")
-
-# retrieve the list of storperf tests
-storperf_tests = rp_utils.get_config('storperf.test_list')
-logger.info("Storperf tests: %s" % storperf_tests)
-
-# For all the versions
-for version in versions:
-    # For all the installers
-    for installer in installers:
-        # get scenarios results data
-        # for the moment we consider only 1 case snia_steady_state
-        scenario_results = rp_utils.getScenarios("snia_steady_state",
-                                                 installer,
-                                                 version)
-        # logger.info("scenario_results: %s" % scenario_results)
-
-        scenario_stats = rp_utils.getScenarioStats(scenario_results)
-        logger.info("scenario_stats: %s" % scenario_stats)
-        items = {}
-        scenario_result_criteria = {}
-
-        # From each scenarios get results list
-        for s, s_result in scenario_results.items():
-            logger.info("---------------------------------")
-            logger.info("installer %s, version %s, scenario %s", installer,
-                        version, s)
-            ten_criteria = len(s_result)
-
-            ten_score = 0
-            for v in s_result:
-                if "PASS" in v['criteria']:
-                    ten_score += 1
-
-            logger.info("ten_score: %s / %s" % (ten_score, ten_criteria))
-
-            four_score = 0
-            try:
-                LASTEST_TESTS = rp_utils.get_config(
-                    'general.nb_iteration_tests_success_criteria')
-                s_result.sort(key=lambda x: x['start_date'])
-                four_result = s_result[-LASTEST_TESTS:]
-                logger.debug("four_result: {}".format(four_result))
-                logger.debug("LASTEST_TESTS: {}".format(LASTEST_TESTS))
-                # logger.debug("four_result: {}".format(four_result))
-                four_criteria = len(four_result)
-                for v in four_result:
-                    if "PASS" in v['criteria']:
-                        four_score += 1
-                logger.info("4 Score: %s / %s " % (four_score,
-                                                   four_criteria))
-            except:
-                logger.error("Impossible to retrieve the four_score")
-
-            try:
-                s_status = (four_score * 100) / four_criteria
-            except:
-                s_status = 0
-            logger.info("Score percent = %s" % str(s_status))
-            s_four_score = str(four_score) + '/' + str(four_criteria)
-            s_ten_score = str(ten_score) + '/' + str(ten_criteria)
-            s_score_percent = str(s_status)
-
-            logger.debug(" s_status: {}".format(s_status))
-            if s_status == 100:
-                logger.info(">>>>> scenario OK, save the information")
-            else:
-                logger.info(">>>> scenario not OK, last 4 iterations = %s, \
-                             last 10 days = %s" % (s_four_score, s_ten_score))
-
-            s_url = ""
-            if len(s_result) > 0:
-                build_tag = s_result[len(s_result)-1]['build_tag']
-                logger.debug("Build tag: %s" % build_tag)
-                s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
-                logger.info("last jenkins url: %s" % s_url)
-
-            # Save daily results in a file
-            path_validation_file = ("./display/" + version +
-                                    "/storperf/scenario_history.txt")
-
-            if not os.path.exists(path_validation_file):
-                with open(path_validation_file, 'w') as f:
-                    info = 'date,scenario,installer,details,score\n'
-                    f.write(info)
-
-            with open(path_validation_file, "a") as f:
-                info = (reportingDate + "," + s + "," + installer +
-                        "," + s_ten_score + "," +
-                        str(s_score_percent) + "\n")
-                f.write(info)
-
-            scenario_result_criteria[s] = sr.ScenarioResult(s_status,
-                                                            s_four_score,
-                                                            s_ten_score,
-                                                            s_score_percent,
-                                                            s_url)
-
-            logger.info("--------------------------")
-
-        templateLoader = jinja2.FileSystemLoader(".")
-        templateEnv = jinja2.Environment(loader=templateLoader,
-                                         autoescape=True)
-
-        TEMPLATE_FILE = "./storperf/template/index-status-tmpl.html"
-        template = templateEnv.get_template(TEMPLATE_FILE)
-
-        outputText = template.render(scenario_results=scenario_result_criteria,
-                                     installer=installer,
-                                     period=PERIOD,
-                                     version=version,
-                                     date=reportingDate)
-
-        with open("./display/" + version +
-                  "/storperf/status-" + installer + ".html", "wb") as fh:
-            fh.write(outputText)
diff --git a/utils/test/reporting/storperf/template/index-status-tmpl.html b/utils/test/reporting/storperf/template/index-status-tmpl.html
deleted file mode 100644
index e872272c3..000000000
--- a/utils/test/reporting/storperf/template/index-status-tmpl.html
+++ /dev/null
@@ -1,110 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="../../css/default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
-    <script type="text/javascript" src="../../js/gauge.js"></script>
-    <script type="text/javascript" src="../../js/trend.js"></script>
-    <script>
-        function onDocumentReady() {
-            // Gauge management
-            {% for scenario in scenario_results.keys() -%}
-            var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
-            {%- endfor %}
-            // assign success rate to the gauge
-            function updateReadings() {
-                {% for scenario in scenario_results.keys() -%}
-                 gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
-                 {%- endfor %}
-            }
-            updateReadings();
-        }
-
-        // trend line management
-        d3.csv("./scenario_history.txt", function(data) {
-            // ***************************************
-            // Create the trend line
-            {% for scenario in scenario_results.keys() -%}
-            // for scenario {{scenario}}
-            // Filter results
-                var trend{{loop.index}} = data.filter(function(row) {
-                    return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
-                })
-            // Parse the date
-            trend{{loop.index}}.forEach(function(d) {
-                d.date = parseDate(d.date);
-                d.score = +d.score
-            });
-            // Draw the trend line
-            var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
-            // ****************************************
-            {%- endfor %}
-        });
-        if ( !window.isLoaded ) {
-            window.addEventListener("load", function() {
-            onDocumentReady();
-            }, false);
-        } else {
-            onDocumentReady();
-        }
-    </script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-          <h3 class="text-muted">Storperf status page ({{version}}, {{date}})</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
-            <li><a href="status-apex.html">Apex</a></li>
-            <li><a href="status-compass.html">Compass</a></li>
-            <li><a href="status-fuel.html">Fuel</a></li>
-            <li><a href="status-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-header">
-            <h2>{{installer}}</h2>
-        </div>
-
-        <div class="scenario-overview">
-            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
-                <table class="table">
-                    <tr>
-                        <th width="40%">Scenario</th>
-                        <th width="20%">Status</th>
-                        <th width="20%">Trend</th>
-                        <th width="10%">Last 4 Iterations</th>
-                        <th width="10%">Last 10 Days</th>
-                    </tr>
-                        {% for scenario,result in scenario_results.iteritems() -%}
-                            <tr class="tr-ok">
-                                <td><a href="{{scenario_results[scenario].getLastUrl()}}">{{scenario}}</a></td>
-                                <td><div id="gaugeScenario{{loop.index}}"></div></td>
-                                <td><div id="trend_svg{{loop.index}}"></div></td>
-                                <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
-                                <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
-                            </tr>
-                        {%- endfor %}
-                </table>
-        </div>
-
-
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/test-requirements.txt b/utils/test/reporting/test-requirements.txt
new file mode 100644
index 000000000..738f50862
--- /dev/null
+++ b/utils/test/reporting/test-requirements.txt
@@ -0,0 +1,5 @@
+coverage>=4.0 # Apache-2.0
+mock>=2.0 # BSD
+nose # LGPL
+flake8<2.6.0,>=2.5.4 # MIT
+pylint==1.4.5 # GPLv2
diff --git a/utils/test/reporting/tests/__init__.py b/utils/test/reporting/tests/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/utils/test/reporting/tests/unit/__init__.py b/utils/test/reporting/tests/unit/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/utils/test/reporting/tests/unit/utils/__init__.py b/utils/test/reporting/tests/unit/utils/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/utils/test/reporting/tests/unit/utils/test_utils.py b/utils/test/reporting/tests/unit/utils/test_utils.py
deleted file mode 100644
index b9c39806c..000000000
--- a/utils/test/reporting/tests/unit/utils/test_utils.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-import unittest
-
-from utils import reporting_utils
-
-
-class reportingUtilsTesting(unittest.TestCase):
-
-    logging.disable(logging.CRITICAL)
-
-    def setUp(self):
-        self.test = reporting_utils
-
-    def test_getConfig(self):
-        self.assertEqual(self.test.get_config("general.period"), 10)
-# TODO
-# ...
-
-if __name__ == "__main__":
-    unittest.main(verbosity=2)
diff --git a/utils/test/reporting/tox.ini b/utils/test/reporting/tox.ini
new file mode 100644
index 000000000..2df503050
--- /dev/null
+++ b/utils/test/reporting/tox.ini
@@ -0,0 +1,27 @@
+[tox]
+envlist = pep8,pylint,py27
+
+[testenv]
+usedevelop = True
+deps =
+  -r{toxinidir}/requirements.txt
+  -r{toxinidir}/test-requirements.txt
+commands = nosetests --with-xunit \
+  --with-coverage \
+  --cover-tests \
+  --cover-package=reporting \
+  --cover-xml \
+  --cover-html \
+  reporting/tests/unit
+
+[testenv:pep8]
+basepython = python2.7
+commands = flake8
+
+[testenv:pylint]
+basepython = python2.7
+whitelist_externals = bash
+commands =
+  bash -c "\
+  pylint --disable=locally-disabled reporting| \
+    tee pylint.out | sed -ne '/Raw metrics/,//p'"
diff --git a/utils/test/reporting/utils/__init__.py b/utils/test/reporting/utils/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py
deleted file mode 100644
index 0a178ba1f..000000000
--- a/utils/test/reporting/utils/reporting_utils.py
+++ /dev/null
@@ -1,461 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-from urllib2 import Request, urlopen, URLError
-import logging
-import json
-import os
-import requests
-import pdfkit
-import yaml
-
-
-# ----------------------------------------------------------
-#
-#               YAML UTILS
-#
-# -----------------------------------------------------------
-def get_parameter_from_yaml(parameter, file):
-    """
-    Returns the value of a given parameter in file.yaml
-    parameter must be given in string format with dots
-    Example: general.openstack.image_name
-    """
-    with open(file) as f:
-        file_yaml = yaml.safe_load(f)
-    f.close()
-    value = file_yaml
-    for element in parameter.split("."):
-        value = value.get(element)
-        if value is None:
-            raise ValueError("The parameter %s is not defined in"
-                             " reporting.yaml" % parameter)
-    return value
-
-
-def get_config(parameter):
-    yaml_ = os.environ["CONFIG_REPORTING_YAML"]
-    return get_parameter_from_yaml(parameter, yaml_)
-
-
-# ----------------------------------------------------------
-#
-#               LOGGER UTILS
-#
-# -----------------------------------------------------------
-def getLogger(module):
-    logFormatter = logging.Formatter("%(asctime)s [" +
-                                     module +
-                                     "] [%(levelname)-5.5s]  %(message)s")
-    logger = logging.getLogger()
-    log_file = get_config('general.log.log_file')
-    log_level = get_config('general.log.log_level')
-
-    fileHandler = logging.FileHandler("{0}/{1}".format('.', log_file))
-    fileHandler.setFormatter(logFormatter)
-    logger.addHandler(fileHandler)
-
-    consoleHandler = logging.StreamHandler()
-    consoleHandler.setFormatter(logFormatter)
-    logger.addHandler(consoleHandler)
-    logger.setLevel(log_level)
-    return logger
-
-
-# ----------------------------------------------------------
-#
-#               REPORTING UTILS
-#
-# -----------------------------------------------------------
-def getApiResults(case, installer, scenario, version):
-    results = json.dumps([])
-    # to remove proxy (to be removed at the end for local test only)
-    # proxy_handler = urllib2.ProxyHandler({})
-    # opener = urllib2.build_opener(proxy_handler)
-    # urllib2.install_opener(opener)
-    # url = "http://127.0.0.1:8000/results?case=" + case + \
-    #       "&period=30&installer=" + installer
-    period = get_config('general.period')
-    url_base = get_config('testapi.url')
-    nb_tests = get_config('general.nb_iteration_tests_success_criteria')
-
-    url = ("http://" + url_base + "?case=" + case +
-           "&period=" + str(period) + "&installer=" + installer +
-           "&scenario=" + scenario + "&version=" + version +
-           "&last=" + str(nb_tests))
-    request = Request(url)
-
-    try:
-        response = urlopen(request)
-        k = response.read()
-        results = json.loads(k)
-    except URLError as e:
-        print('No kittez. Got an error code:', e)
-
-    return results
-
-
-def getScenarios(case, installer, version):
-
-    try:
-        case = case.getName()
-    except:
-        # if case is not an object test case, try the string
-        if type(case) == str:
-            case = case
-        else:
-            raise ValueError("Case cannot be evaluated")
-
-    period = get_config('general.period')
-    url_base = get_config('testapi.url')
-
-    url = ("http://" + url_base + "?case=" + case +
-           "&period=" + str(period) + "&installer=" + installer +
-           "&version=" + version)
-
-    try:
-        request = Request(url)
-        response = urlopen(request)
-        k = response.read()
-        results = json.loads(k)
-        test_results = results['results']
-
-        page = results['pagination']['total_pages']
-        if page > 1:
-            test_results = []
-            for i in range(1, page + 1):
-                url_page = url + "&page=" + str(i)
-                request = Request(url_page)
-                response = urlopen(request)
-                k = response.read()
-                results = json.loads(k)
-                test_results += results['results']
-    except URLError as err:
-        print('Got an error code:', err)
-
-    if test_results is not None:
-        test_results.reverse()
-        scenario_results = {}
-
-        for r in test_results:
-            # Retrieve all the scenarios per installer
-            if not r['scenario'] in scenario_results.keys():
-                scenario_results[r['scenario']] = []
-            # Do we consider results from virtual pods ...
-            # Do we consider results for non HA scenarios...
-            exclude_virtual_pod = get_config('functest.exclude_virtual')
-            exclude_noha = get_config('functest.exclude_noha')
-            if ((exclude_virtual_pod and "virtual" in r['pod_name']) or
-                    (exclude_noha and "noha" in r['scenario'])):
-                print("exclude virtual pod results...")
-            else:
-                scenario_results[r['scenario']].append(r)
-
-    return scenario_results
-
-
-def getScenarioStats(scenario_results):
-    scenario_stats = {}
-    for k, v in scenario_results.iteritems():
-        scenario_stats[k] = len(v)
-
-    return scenario_stats
-
-
-def getScenarioStatus(installer, version):
-    period = get_config('general.period')
-    url_base = get_config('testapi.url')
-
-    url = ("http://" + url_base + "?case=scenario_status" +
-           "&installer=" + installer +
-           "&version=" + version + "&period=" + str(period))
-    request = Request(url)
-
-    try:
-        response = urlopen(request)
-        k = response.read()
-        response.close()
-        results = json.loads(k)
-        test_results = results['results']
-    except URLError as e:
-        print('Got an error code:', e)
-
-    scenario_results = {}
-    result_dict = {}
-    if test_results is not None:
-        for r in test_results:
-            if r['stop_date'] != 'None' and r['criteria'] is not None:
-                if not r['scenario'] in scenario_results.keys():
-                    scenario_results[r['scenario']] = []
-                scenario_results[r['scenario']].append(r)
-
-        for k, v in scenario_results.items():
-            # scenario_results[k] = v[:LASTEST_TESTS]
-            s_list = []
-            for element in v:
-                if element['criteria'] == 'SUCCESS':
-                    s_list.append(1)
-                else:
-                    s_list.append(0)
-            result_dict[k] = s_list
-
-    # return scenario_results
-    return result_dict
-
-
-def getQtipResults(version, installer):
-    period = get_config('qtip.period')
-    url_base = get_config('testapi.url')
-
-    url = ("http://" + url_base + "?project=qtip" +
-           "&installer=" + installer +
-           "&version=" + version + "&period=" + str(period))
-    request = Request(url)
-
-    try:
-        response = urlopen(request)
-        k = response.read()
-        response.close()
-        results = json.loads(k)['results']
-    except URLError as err:
-        print('Got an error code:', err)
-
-    result_dict = {}
-    if results:
-        for r in results:
-            key = '{}/{}'.format(r['pod_name'], r['scenario'])
-            if key not in result_dict.keys():
-                result_dict[key] = []
-            result_dict[key].append(r['details']['score'])
-
-    # return scenario_results
-    return result_dict
-
-
-def getNbtestOk(results):
-    nb_test_ok = 0
-    for r in results:
-        for k, v in r.iteritems():
-            try:
-                if "PASS" in v:
-                    nb_test_ok += 1
-            except:
-                print("Cannot retrieve test status")
-    return nb_test_ok
-
-
-def getResult(testCase, installer, scenario, version):
-
-    # retrieve raw results
-    results = getApiResults(testCase, installer, scenario, version)
-    # let's concentrate on test results only
-    test_results = results['results']
-
-    # if results found, analyze them
-    if test_results is not None:
-        test_results.reverse()
-
-        scenario_results = []
-
-        # print " ---------------- "
-        # print test_results
-        # print " ---------------- "
-        # print "nb of results:" + str(len(test_results))
-
-        for r in test_results:
-            # print r["start_date"]
-            # print r["criteria"]
-            scenario_results.append({r["start_date"]: r["criteria"]})
-        # sort results
-        scenario_results.sort()
-        # 4 levels for the results
-        # 3: 4+ consecutive runs passing the success criteria
-        # 2: <4 successful consecutive runs but passing the criteria
-        # 1: close to pass the success criteria
-        # 0: 0% success, not passing
-        # -1: no run available
-        test_result_indicator = 0
-        nbTestOk = getNbtestOk(scenario_results)
-
-        # print "Nb test OK (last 10 days):"+ str(nbTestOk)
-        # check that we have at least 4 runs
-        if len(scenario_results) < 1:
-            # No results available
-            test_result_indicator = -1
-        elif nbTestOk < 1:
-            test_result_indicator = 0
-        elif nbTestOk < 2:
-            test_result_indicator = 1
-        else:
-            # Test the last 4 run
-            if (len(scenario_results) > 3):
-                last4runResults = scenario_results[-4:]
-                nbTestOkLast4 = getNbtestOk(last4runResults)
-                # print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
-                if nbTestOkLast4 > 3:
-                    test_result_indicator = 3
-                else:
-                    test_result_indicator = 2
-            else:
-                test_result_indicator = 2
-    return test_result_indicator
-
-
-def getJenkinsUrl(build_tag):
-    # e.g. jenkins-functest-apex-apex-daily-colorado-daily-colorado-246
-    # id = 246
-    # jenkins-functest-compass-huawei-pod5-daily-master-136
-    # id = 136
-    # note it is linked to jenkins format
-    # if this format changes...function to be adapted....
-    url_base = get_config('functest.jenkins_url')
-    try:
-        build_id = [int(s) for s in build_tag.split("-") if s.isdigit()]
-        url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] +
-                  "/" + str(build_id[0]))
-        jenkins_url = url_base + url_id + "/console"
-    except:
-        print('Impossible to get jenkins url:')
-
-    if "jenkins-" not in build_tag:
-        jenkins_url = None
-
-    return jenkins_url
-
-
-def getScenarioPercent(scenario_score, scenario_criteria):
-    score = 0.0
-    try:
-        score = float(scenario_score) / float(scenario_criteria) * 100
-    except:
-        print('Impossible to calculate the percentage score')
-    return score
-
-
-# *********
-# Functest
-# *********
-def getFunctestConfig(version=""):
-    config_file = get_config('functest.test_conf') + version
-    response = requests.get(config_file)
-    return yaml.safe_load(response.text)
-
-
-def getArchitectures(scenario_results):
-    supported_arch = ['x86']
-    if (len(scenario_results) > 0):
-        for scenario_result in scenario_results.values():
-            for value in scenario_result:
-                if ("armband" in value['build_tag']):
-                    supported_arch.append('aarch64')
-                    return supported_arch
-    return supported_arch
-
-
-def filterArchitecture(results, architecture):
-    filtered_results = {}
-    for name, results in results.items():
-        filtered_values = []
-        for value in results:
-            if (architecture is "x86"):
-                # drop aarch64 results
-                if ("armband" not in value['build_tag']):
-                    filtered_values.append(value)
-            elif(architecture is "aarch64"):
-                # drop x86 results
-                if ("armband" in value['build_tag']):
-                    filtered_values.append(value)
-        if (len(filtered_values) > 0):
-            filtered_results[name] = filtered_values
-    return filtered_results
-
-
-# *********
-# Yardstick
-# *********
-def subfind(given_list, pattern_list):
-    LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
-    for i in range(len(given_list)):
-        if given_list[i] == pattern_list[0] and \
-                given_list[i:i + LASTEST_TESTS] == pattern_list:
-            return True
-    return False
-
-
-def _get_percent(status):
-
-    if status * 100 % 6:
-        return round(float(status) * 100 / 6, 1)
-    else:
-        return status * 100 / 6
-
-
-def get_percent(four_list, ten_list):
-    four_score = 0
-    ten_score = 0
-
-    for v in four_list:
-        four_score += v
-    for v in ten_list:
-        ten_score += v
-
-    LASTEST_TESTS = get_config('general.nb_iteration_tests_success_criteria')
-    if four_score == LASTEST_TESTS:
-        status = 6
-    elif subfind(ten_list, [1, 1, 1, 1]):
-        status = 5
-    elif ten_score == 0:
-        status = 0
-    else:
-        status = four_score + 1
-
-    return _get_percent(status)
-
-
-def _test():
-    status = getScenarioStatus("compass", "master")
-    print("status:++++++++++++++++++++++++")
-    print(json.dumps(status, indent=4))
-
-
-# ----------------------------------------------------------
-#
-#               Export
-#
-# -----------------------------------------------------------
-
-def export_csv(scenario_file_name, installer, version):
-    # csv
-    # generate sub files based on scenario_history.txt
-    scenario_installer_file_name = ("./display/" + version +
-                                    "/functest/scenario_history_" +
-                                    installer + ".csv")
-    scenario_installer_file = open(scenario_installer_file_name, "a")
-    with open(scenario_file_name, "r") as scenario_file:
-        scenario_installer_file.write("date,scenario,installer,detail,score\n")
-        for line in scenario_file:
-            if installer in line:
-                scenario_installer_file.write(line)
-        scenario_installer_file.close
-
-
-def generate_csv(scenario_file):
-    import shutil
-    # csv
-    # generate sub files based on scenario_history.txt
-    csv_file = scenario_file.replace('txt', 'csv')
-    shutil.copy2(scenario_file, csv_file)
-
-
-def export_pdf(pdf_path, pdf_doc_name):
-    try:
-        pdfkit.from_file(pdf_path, pdf_doc_name)
-    except IOError:
-        print("Error but pdf generated anyway...")
-    except:
-        print("impossible to generate PDF")
diff --git a/utils/test/reporting/utils/scenarioResult.py b/utils/test/reporting/utils/scenarioResult.py
deleted file mode 100644
index 6029d7f42..000000000
--- a/utils/test/reporting/utils/scenarioResult.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-
-class ScenarioResult(object):
-    def __init__(self, status, four_days_score='', ten_days_score='',
-                 score_percent=0.0, last_url=''):
-        self.status = status
-        self.four_days_score = four_days_score
-        self.ten_days_score = ten_days_score
-        self.score_percent = score_percent
-        self.last_url = last_url
-
-    def getStatus(self):
-        return self.status
-
-    def getTenDaysScore(self):
-        return self.ten_days_score
-
-    def getFourDaysScore(self):
-        return self.four_days_score
-
-    def getScorePercent(self):
-        return self.score_percent
-
-    def getLastUrl(self):
-        return self.last_url
diff --git a/utils/test/reporting/yardstick/img/gauge_0.png b/utils/test/reporting/yardstick/img/gauge_0.png
deleted file mode 100644
index ecefc0e66..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_0.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_100.png b/utils/test/reporting/yardstick/img/gauge_100.png
deleted file mode 100644
index e199e1561..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_100.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_16.7.png b/utils/test/reporting/yardstick/img/gauge_16.7.png
deleted file mode 100644
index 3e3993c3b..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_16.7.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_25.png b/utils/test/reporting/yardstick/img/gauge_25.png
deleted file mode 100644
index 4923659b9..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_25.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_33.3.png b/utils/test/reporting/yardstick/img/gauge_33.3.png
deleted file mode 100644
index 364574b4a..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_33.3.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_41.7.png b/utils/test/reporting/yardstick/img/gauge_41.7.png
deleted file mode 100644
index 8c3e910fa..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_41.7.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_50.png b/utils/test/reporting/yardstick/img/gauge_50.png
deleted file mode 100644
index 2874b9fcf..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_50.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_58.3.png b/utils/test/reporting/yardstick/img/gauge_58.3.png
deleted file mode 100644
index beedc8aa9..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_58.3.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_66.7.png b/utils/test/reporting/yardstick/img/gauge_66.7.png
deleted file mode 100644
index 93f44d133..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_66.7.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_75.png b/utils/test/reporting/yardstick/img/gauge_75.png
deleted file mode 100644
index 9fc261ff8..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_75.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_8.3.png b/utils/test/reporting/yardstick/img/gauge_8.3.png
deleted file mode 100644
index 59f86571e..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_8.3.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_83.3.png b/utils/test/reporting/yardstick/img/gauge_83.3.png
deleted file mode 100644
index 27ae4ec54..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_83.3.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/gauge_91.7.png b/utils/test/reporting/yardstick/img/gauge_91.7.png
deleted file mode 100644
index 280865714..000000000
Binary files a/utils/test/reporting/yardstick/img/gauge_91.7.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/icon-nok.png b/utils/test/reporting/yardstick/img/icon-nok.png
deleted file mode 100644
index 526b5294b..000000000
Binary files a/utils/test/reporting/yardstick/img/icon-nok.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/icon-ok.png b/utils/test/reporting/yardstick/img/icon-ok.png
deleted file mode 100644
index 3a9de2e89..000000000
Binary files a/utils/test/reporting/yardstick/img/icon-ok.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/weather-clear.png b/utils/test/reporting/yardstick/img/weather-clear.png
deleted file mode 100644
index a0d967750..000000000
Binary files a/utils/test/reporting/yardstick/img/weather-clear.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/weather-few-clouds.png b/utils/test/reporting/yardstick/img/weather-few-clouds.png
deleted file mode 100644
index acfa78398..000000000
Binary files a/utils/test/reporting/yardstick/img/weather-few-clouds.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/weather-overcast.png b/utils/test/reporting/yardstick/img/weather-overcast.png
deleted file mode 100644
index 4296246d0..000000000
Binary files a/utils/test/reporting/yardstick/img/weather-overcast.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/img/weather-storm.png b/utils/test/reporting/yardstick/img/weather-storm.png
deleted file mode 100644
index 956f0e20f..000000000
Binary files a/utils/test/reporting/yardstick/img/weather-storm.png and /dev/null differ
diff --git a/utils/test/reporting/yardstick/index.html b/utils/test/reporting/yardstick/index.html
deleted file mode 100644
index 488f1421d..000000000
--- a/utils/test/reporting/yardstick/index.html
+++ /dev/null
@@ -1,51 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-        <h3 class="text-muted">Yardstick reporting page</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
-            <li><a href="index-status-apex.html">Apex</a></li>
-            <li><a href="index-status-compass.html">Compass</a></li>
-            <li><a href="index-status-fuel.html">Fuel</a></li>
-            <li><a href="index-status-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-main">
-            <h2>Yardstick</h2>
-            Yardstick is used in OPNFV for verifying the OPNFV infrastructure and some of the OPNFV features.
-            <br>The Yardstick framework is deployed in several OPNFV community labs.
-            <br>It is installer, infrastructure and application independent.
-
-            <h2>Useful Links</h2>
-            <li><a href="https://wiki.opnfv.org/download/attachments/5734608/yardstick%20in%20depth.pdf?version=1&modificationDate=1463410431000&api=v2">Yardstick in Depth</a></li>
-            <li><a href="https://git.opnfv.org/cgit/yardstick">Yardstick Repo</a></li>
-            <li><a href="https://wiki.opnfv.org/display/yardstick">Yardstick Project</a></li>
-            <li><a href="https://build.opnfv.org/ci/view/yardstick/">Yardstick Jenkins page</a></li>
-            <li><a href="https://jira.opnfv.org/browse/YARDSTICK-119?jql=project%20%3D%20YARDSTICK">JIRA</a></li>
-
-        </div>
-    </div>
-    <div class="col-md-1"></div>
-</div>
diff --git a/utils/test/reporting/yardstick/reporting-status.py b/utils/test/reporting/yardstick/reporting-status.py
deleted file mode 100644
index 12f42ca31..000000000
--- a/utils/test/reporting/yardstick/reporting-status.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import datetime
-import jinja2
-import os
-
-import utils.scenarioResult as sr
-from scenarios import config as cf
-
-# manage conf
-import utils.reporting_utils as rp_utils
-
-installers = rp_utils.get_config('general.installers')
-versions = rp_utils.get_config('general.versions')
-PERIOD = rp_utils.get_config('general.period')
-
-# Logger
-logger = rp_utils.getLogger("Yardstick-Status")
-reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
-
-logger.info("*******************************************")
-logger.info("*   Generating reporting scenario status  *")
-logger.info("*   Data retention = %s days              *" % PERIOD)
-logger.info("*                                         *")
-logger.info("*******************************************")
-
-
-# For all the versions
-for version in versions:
-    # For all the installers
-    for installer in installers:
-        # get scenarios results data
-        scenario_results = rp_utils.getScenarioStatus(installer, version)
-        if 'colorado' == version:
-            stable_result = rp_utils.getScenarioStatus(installer,
-                                                       'stable/colorado')
-            for k, v in stable_result.items():
-                if k not in scenario_results.keys():
-                    scenario_results[k] = []
-                scenario_results[k] += stable_result[k]
-        scenario_result_criteria = {}
-
-        for s in scenario_results.keys():
-            if installer in cf.keys() and s in cf[installer].keys():
-                scenario_results.pop(s)
-
-        # From each scenarios get results list
-        for s, s_result in scenario_results.items():
-            logger.info("---------------------------------")
-            logger.info("installer %s, version %s, scenario %s", installer,
-                        version, s)
-
-            ten_criteria = len(s_result)
-            ten_score = 0
-            for v in s_result:
-                ten_score += v
-
-            LASTEST_TESTS = rp_utils.get_config(
-                'general.nb_iteration_tests_success_criteria')
-            four_result = s_result[:LASTEST_TESTS]
-            four_criteria = len(four_result)
-            four_score = 0
-            for v in four_result:
-                four_score += v
-
-            s_status = str(rp_utils.get_percent(four_result, s_result))
-            s_four_score = str(four_score) + '/' + str(four_criteria)
-            s_ten_score = str(ten_score) + '/' + str(ten_criteria)
-            s_score_percent = rp_utils.get_percent(four_result, s_result)
-
-            if '100' == s_status:
-                logger.info(">>>>> scenario OK, save the information")
-            else:
-                logger.info(">>>> scenario not OK, last 4 iterations = %s, \
-                            last 10 days = %s" % (s_four_score, s_ten_score))
-
-            # Save daily results in a file
-            path_validation_file = ("./display/" + version +
-                                    "/yardstick/scenario_history.txt")
-
-            if not os.path.exists(path_validation_file):
-                with open(path_validation_file, 'w') as f:
-                    info = 'date,scenario,installer,details,score\n'
-                    f.write(info)
-
-            with open(path_validation_file, "a") as f:
-                info = (reportingDate + "," + s + "," + installer +
-                        "," + s_ten_score + "," +
-                        str(s_score_percent) + "\n")
-                f.write(info)
-
-            scenario_result_criteria[s] = sr.ScenarioResult(s_status,
-                                                            s_four_score,
-                                                            s_ten_score,
-                                                            s_score_percent)
-
-            logger.info("--------------------------")
-
-        templateLoader = jinja2.FileSystemLoader(".")
-        templateEnv = jinja2.Environment(loader=templateLoader,
-                                         autoescape=True)
-
-        TEMPLATE_FILE = "./yardstick/template/index-status-tmpl.html"
-        template = templateEnv.get_template(TEMPLATE_FILE)
-
-        outputText = template.render(scenario_results=scenario_result_criteria,
-                                     installer=installer,
-                                     period=PERIOD,
-                                     version=version,
-                                     date=reportingDate)
-
-        with open("./display/" + version +
-                  "/yardstick/status-" + installer + ".html", "wb") as fh:
-            fh.write(outputText)
diff --git a/utils/test/reporting/yardstick/scenarios.py b/utils/test/reporting/yardstick/scenarios.py
deleted file mode 100644
index 26e8c8bb0..000000000
--- a/utils/test/reporting/yardstick/scenarios.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/python
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import requests
-import yaml
-
-import utils.reporting_utils as rp_utils
-
-yardstick_conf = rp_utils.get_config('yardstick.test_conf')
-response = requests.get(yardstick_conf)
-yaml_file = yaml.safe_load(response.text)
-reporting = yaml_file.get('reporting')
-
-config = {}
-
-for element in reporting:
-    name = element['name']
-    scenarios = element['scenario']
-    for s in scenarios:
-        if name not in config:
-            config[name] = {}
-        config[name][s] = True
diff --git a/utils/test/reporting/yardstick/template/index-status-tmpl.html b/utils/test/reporting/yardstick/template/index-status-tmpl.html
deleted file mode 100644
index 77ba9502f..000000000
--- a/utils/test/reporting/yardstick/template/index-status-tmpl.html
+++ /dev/null
@@ -1,110 +0,0 @@
- <html>
-  <head>
-    <meta charset="utf-8">
-    <!-- Bootstrap core CSS -->
-    <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet">
-    <link href="../../css/default.css" rel="stylesheet">
-    <script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1/jquery.min.js"></script>
-    <script type="text/javascript" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script>
-    <script type="text/javascript" src="http://d3js.org/d3.v2.min.js"></script>
-    <script type="text/javascript" src="../../js/gauge.js"></script>
-    <script type="text/javascript" src="../../js/trend.js"></script>
-    <script>
-        function onDocumentReady() {
-            // Gauge management
-            {% for scenario in scenario_results.keys() -%}
-            var gaugeScenario{{loop.index}} = gauge('#gaugeScenario{{loop.index}}');
-            {%- endfor %}
-            // assign success rate to the gauge
-            function updateReadings() {
-                {% for scenario in scenario_results.keys() -%}
-                 gaugeScenario{{loop.index}}.update({{scenario_results[scenario].getScorePercent()}});
-                 {%- endfor %}
-            }
-            updateReadings();
-        }
-
-        // trend line management
-        d3.csv("./scenario_history.csv", function(data) {
-            // ***************************************
-            // Create the trend line
-            {% for scenario in scenario_results.keys() -%}
-            // for scenario {{scenario}}
-            // Filter results
-                var trend{{loop.index}} = data.filter(function(row) {
-                    return row["scenario"]=="{{scenario}}" && row["installer"]=="{{installer}}";
-                })
-            // Parse the date
-            trend{{loop.index}}.forEach(function(d) {
-                d.date = parseDate(d.date);
-                d.score = +d.score
-            });
-            // Draw the trend line
-            var mytrend = trend("#trend_svg{{loop.index}}",trend{{loop.index}})
-            // ****************************************
-            {%- endfor %}
-        });
-        if ( !window.isLoaded ) {
-            window.addEventListener("load", function() {
-            onDocumentReady();
-            }, false);
-        } else {
-            onDocumentReady();
-        }
-    </script>
-    <script type="text/javascript">
-    $(document).ready(function (){
-        $(".btn-more").click(function() {
-            $(this).hide();
-            $(this).parent().find(".panel-default").show();
-        });
-    })
-    </script>
-  </head>
-    <body>
-    <div class="container">
-      <div class="masthead">
-          <h3 class="text-muted">Yardstick status page ({{version}}, {{date}})</h3>
-        <nav>
-          <ul class="nav nav-justified">
-            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
-            <li><a href="status-apex.html">Apex</a></li>
-            <li><a href="status-compass.html">Compass</a></li>
-            <li><a href="status-fuel.html">Fuel</a></li>
-            <li><a href="status-joid.html">Joid</a></li>
-          </ul>
-        </nav>
-      </div>
-<div class="row">
-    <div class="col-md-1"></div>
-    <div class="col-md-10">
-        <div class="page-header">
-            <h2>{{installer}}</h2>
-        </div>
-
-        <div class="scenario-overview">
-            <div class="panel-heading"><h4><b>List of last scenarios ({{version}}) run over the last {{period}} days </b></h4></div>
-                <table class="table">
-                    <tr>
-                        <th width="40%">Scenario</th>
-                        <th width="20%">Status</th>
-                        <th width="20%">Trend</th>
-                        <th width="10%">Last 4 Iterations</th>
-                        <th width="10%">Last 10 Days</th>
-                    </tr>
-                        {% for scenario,result in scenario_results.iteritems() -%}
-                            <tr class="tr-ok">
-                                <td>{{scenario}}</td>
-                                <td><div id="gaugeScenario{{loop.index}}"></div></td>
-                                <td><div id="trend_svg{{loop.index}}"></div></td>
-                                <td>{{scenario_results[scenario].getFourDaysScore()}}</td>
-                                <td>{{scenario_results[scenario].getTenDaysScore()}}</td>
-                            </tr>
-                        {%- endfor %}
-                </table>
-        </div>
-
-
-    </div>
-    <div class="col-md-1"></div>
-</div>
-- 
cgit