diff options
Diffstat (limited to 'utils')
44 files changed, 982 insertions, 636 deletions
diff --git a/utils/fetch_os_creds.sh b/utils/fetch_os_creds.sh index 0873a6832..ac7595053 100755 --- a/utils/fetch_os_creds.sh +++ b/utils/fetch_os_creds.sh @@ -167,11 +167,7 @@ elif [ "$installer_type" == "apex" ]; then sudo scp $ssh_options root@$installer_ip:/home/stack/overcloudrc.v3 $dest_path elif [ "$installer_type" == "compass" ]; then - if [ "${BRANCH}" == "master" ]; then - sudo docker cp compass-tasks:/opt/openrc $dest_path &> /dev/null - sudo chown $(whoami):$(whoami) $dest_path - sudo docker cp compass-tasks:/opt/os_cacert $os_cacert - else + if [ "${BRANCH}" == "stable/danube" ]; then verify_connectivity $installer_ip controller_ip=$(sshpass -p'root' ssh 2>/dev/null $ssh_options root@${installer_ip} \ 'mysql -ucompass -pcompass -Dcompass -e"select * from cluster;"' \ @@ -201,6 +197,10 @@ elif [ "$installer_type" == "compass" ]; then fi info "public_ip: $public_ip" swap_to_public $public_ip + else + sudo docker cp compass-tasks:/opt/openrc $dest_path &> /dev/null + sudo chown $(whoami):$(whoami) $dest_path + sudo docker cp compass-tasks:/opt/os_cacert $os_cacert fi elif [ "$installer_type" == "joid" ]; then diff --git a/utils/jenkins-jnlp-connect.sh b/utils/jenkins-jnlp-connect.sh index 13cb025d6..cd81f29d3 100755 --- a/utils/jenkins-jnlp-connect.sh +++ b/utils/jenkins-jnlp-connect.sh @@ -52,7 +52,7 @@ main () { fi if [[ $(whoami) != "root" ]]; then - if sudo -l | grep "requiretty | grep -v "\!requiretty"; then + if sudo -l | grep "requiretty" | grep -v "\!requiretty"; then echo "please comment out Defaults requiretty from /etc/sudoers" exit 1 fi diff --git a/utils/test/dashboard/setup.cfg b/utils/test/dashboard/setup.cfg index dd0135861..859dcc0fb 100644 --- a/utils/test/dashboard/setup.cfg +++ b/utils/test/dashboard/setup.cfg @@ -6,6 +6,7 @@ description-file = author = SerenaFeng author-email = feng.xiaowei@zte.com.cn #home-page = http://www.opnfv.org/ +license = Apache-2.0 classifier = Environment :: opnfv Intended Audience :: Information Technology diff --git a/utils/test/opts/one_click_deploy.py b/utils/test/opts/one_click_deploy.py new file mode 100644 index 000000000..074827021 --- /dev/null +++ b/utils/test/opts/one_click_deploy.py @@ -0,0 +1,67 @@ +import argparse +import os + +from jinja2 import Environment + +DOCKER_COMPOSE_FILE = './docker-compose.yml' +DOCKER_COMPOSE_TEMPLATE = """ +version: '2' +services: + mongo: + image: mongo:3.2.1 + container_name: opnfv-mongo + testapi: + image: opnfv/testapi:latest + container_name: opnfv-testapi + environment: + - mongodb_url=mongodb://mongo:27017/ + - base_url={{ vars.base_url }} + ports: + - "{{ vars.testapi_port }}:8000" + links: + - mongo + reporting: + image: opnfv/reporting:latest + container_name: opnfv-reporting + ports: + - "{{ vars.reporting_port }}:8000" +""" + + +def render_docker_compose(testapi_port, reporting_port, testapi_base_url): + vars = { + "testapi_port": testapi_port, + "reporting_port": reporting_port, + "base_url": testapi_base_url, + } + yml = Environment().from_string(DOCKER_COMPOSE_TEMPLATE).render(vars=vars) + with open(DOCKER_COMPOSE_FILE, 'w') as f: + f.write(yml) + f.close() + + +def main(args): + render_docker_compose(args.testapi_port, + args.reporting_port, + args.testapi_base_url) + os.system('docker-compose -f {} up -d'.format(DOCKER_COMPOSE_FILE)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Backup MongoDBs') + parser.add_argument('-tp', '--testapi-port', + type=int, + required=False, + default=8082, + help='testapi exposed port') + parser.add_argument('-tl', '--testapi-base-url', + type=str, + required=True, + help='testapi exposed base-url') + parser.add_argument('-rp', '--reporting-port', + type=int, + required=False, + default=8084, + help='reporting exposed port') + + main(parser.parse_args()) diff --git a/utils/test/testapi/tools/watchdog/docker_watch.sh b/utils/test/opts/watchdog.sh index f1d8946b6..51868d709 100644 --- a/utils/test/testapi/tools/watchdog/docker_watch.sh +++ b/utils/test/opts/watchdog.sh @@ -27,7 +27,7 @@ declare -A urls=( ["testapi"]="http://testresults.opnfv.org/test/" \ ### Functions related to checking. function is_deploying() { - xml=$(curl -m10 "https://build.opnfv.org/ci/job/${1}-automate-master/lastBuild/api/xml?depth=1") + xml=$(curl -m10 "https://build.opnfv.org/ci/job/${1}-automate-docker-deploy-master/lastBuild/api/xml?depth=1") building=$(grep -oPm1 "(?<=<building>)[^<]+" <<< "$xml") if [[ $building == "false" ]] then @@ -64,12 +64,11 @@ function check_modules() { failed_modules=() for module in "${modules[@]}" do - if is_deploying $module; then - continue - fi if ! check_connectivity $module "${urls[$module]}"; then - echo -e "$module failed" - failed_modules+=($module) + if ! is_deploying $module; then + echo -e "$module failed" + failed_modules+=($module) + fi fi done if [ ! -z "$failed_modules" ]; then @@ -114,13 +113,11 @@ function start_containers_fix() { function start_container_fix() { echo -e "Starting a container $module" - sudo docker stop $module - sudo docker start $module + sudo docker restart $module sleep 5 if ! check_connectivity $module "${urls[$module]}"; then echo -e "Starting an old container $module_old" - sudo docker stop $module - sudo docker start $module"_old" + sudo docker restart $module"_old" sleep 5 fi } diff --git a/utils/test/reporting/docker/Dockerfile b/utils/test/reporting/docker/Dockerfile index f2357909d..07440ad29 100644 --- a/utils/test/reporting/docker/Dockerfile +++ b/utils/test/reporting/docker/Dockerfile @@ -22,7 +22,7 @@ ARG BRANCH=master ENV HOME /home/opnfv ENV working_dir ${HOME}/releng/utils/test/reporting -ENV CONFIG_REPORTING_YAML ${working_dir}/reporting.yaml +ENV CONFIG_REPORTING_YAML ${working_dir}/reporting/reporting.yaml WORKDIR ${HOME} # Packaged dependencies diff --git a/utils/test/reporting/docker/nginx.conf b/utils/test/reporting/docker/nginx.conf index 95baf0e48..66bd7e497 100644 --- a/utils/test/reporting/docker/nginx.conf +++ b/utils/test/reporting/docker/nginx.conf @@ -15,7 +15,7 @@ server { } location /api/ { - http_pass http://backends/; + proxy_pass http://backends/; } location /display/ { diff --git a/utils/test/reporting/docker/reporting.sh b/utils/test/reporting/docker/reporting.sh index 6cc7a7c9e..2cb438d00 100755 --- a/utils/test/reporting/docker/reporting.sh +++ b/utils/test/reporting/docker/reporting.sh @@ -3,7 +3,7 @@ export PYTHONPATH="${PYTHONPATH}:./reporting" export CONFIG_REPORTING_YAML=./reporting/reporting.yaml -declare -a versions=(danube master) +declare -a versions=(euphrates danube master) declare -a projects=(functest storperf yardstick qtip vsperf bottlenecks) project=$1 @@ -59,8 +59,6 @@ if [ -z "$1" ]; then report_project $i $i "status" sleep 5 done - report_project "QTIP" "qtip" "status" - echo "Functest reporting vIMS..." report_project "functest" "functest" "vims" diff --git a/utils/test/reporting/html/euphrates.html b/utils/test/reporting/html/euphrates.html new file mode 100644 index 000000000..ff7061b4a --- /dev/null +++ b/utils/test/reporting/html/euphrates.html @@ -0,0 +1,141 @@ +<!DOCTYPE HTML> +<!-- + Phantom by HTML5 UP + html5up.net | @ajlkn + Free for personal and commercial use under the CCA 3.0 license (html5up.net/license) +--> +<html> + <head> + <title>Phantom by HTML5 UP</title> + <meta charset="utf-8" /> + <meta name="viewport" content="width=device-width, initial-scale=1" /> + <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]--> + <link rel="stylesheet" href="3rd_party/css/main.css" /> + <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]--> + <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]--> + </head> + <body> + <!-- Wrapper --> + <div id="wrapper"> + + <!-- Header --> + <header id="header"> + <div class="inner"> + + <!-- Logo --> + <a href="index.html" class="logo"> + <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span> + </a> + + </div> + </header> + + <!-- Menu --> + <!-- Main --> + <div id="main"> + <div class="inner"> + <header> + <h1>Euphrates reporting</h1> + </header> + <section class="tiles"> + <article class="style3"> + <span class="image"> + <img src="img/projectIcon_functest_250x250.png" alt="" /> + </span> + <a href="functest-euphrates.html"> + <h2>Functest</h2> + <div class="content"> + <p>Functional testing</p> + </div> + </a> + </article> + <article class="style2"> + <span class="image"> + <img src="img/projectIcon_yardstick_250x250.png" alt="" /> + </span> + <a href="euphrates/yardstick/status-apex.html"> + <h2>Yardstick</h2> + <div class="content"> + <p>Qualification and performance testing</p> + </div> + </a> + </article> + <article class="style4"> + <span class="image"> + <img src="img/projectIcon_storperf_250x250.png" alt="" /> + </span> + <a href="euphrates/storperf/status-apex.html"> + <h2>Storperf</h2> + <div class="content"> + <p>Storage testing</p> + </div> + </a> + </article> + <article class="style5"> + <span class="image"> + <img src="img/projectIcon_vsperf_250x250.png" alt="" /> + </span> + <a href="euphrates/vsperf/status-apex.html"> + <h2>Vsperf</h2> + <div class="content"> + <p>Virtual switch testing</p> + </div> + </a> + </article> + <article class="style1"> + <span class="image"> + <img src="img/projectIcon_qtip_250x250.png" alt="" /> + </span> + <a href="euphrates/qtip/status-apex.html"> + <h2>Qtip</h2> + <div class="content"> + <p>Benchmark as a service</p> + </div> + </a> + </article> + <article class="style6"> + <span class="image"> + <img src="img/projectIcon_bottlenecks_250x250.png" alt="" /> + </span> + <a href="euphrates/bottlenecks/status-apex.html"> + <h2>Bottlenecks</h2> + <div class="content"> + <p>Bottleneck finder</p> + </div> + </a> + </article> + </section> + </div> + </div> + + <!-- Footer --> + <footer id="footer"> + <div class="inner"> + <section> + <h2>OPNFV Testing Working group</h2> + </section> + <section> + <h2>Follow</h2> + <ul class="icons"> + <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li> + <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li> + <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li> + </ul> + </section> + <ul class="copyright"> + <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li> + </ul> + </div> + </footer> + + </div> + + <!-- Scripts --> + <script src="3rd_party/js/jquery.min.js"></script> + <script src="3rd_party/js/skel.min.js"></script> + <script src="3rd_party/js/util.js"></script> + <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]--> + <script src="3rd_party/js/main.js"></script> + + </body> +</html> diff --git a/utils/test/reporting/html/functest-euphrates.html b/utils/test/reporting/html/functest-euphrates.html new file mode 100644 index 000000000..c203e6151 --- /dev/null +++ b/utils/test/reporting/html/functest-euphrates.html @@ -0,0 +1,109 @@ +<!DOCTYPE HTML> +<!-- + Phantom by HTML5 UP + html5up.net | @ajlkn + Free for personal and commercial use under the CCA 3.0 license (html5up.net/license) +--> +<html> + <head> + <title>Phantom by HTML5 UP</title> + <meta charset="utf-8" /> + <meta name="viewport" content="width=device-width, initial-scale=1" /> + <!--[if lte IE 8]><script src="3rd_party/js/ie/html5shiv.js"></script><![endif]--> + <link rel="stylesheet" href="3rd_party/css/main.css" /> + <!--[if lte IE 9]><link rel="stylesheet" href="3rd_party/css/ie9.css" /><![endif]--> + <!--[if lte IE 8]><link rel="stylesheet" href="3rd_party/css/ie8.css" /><![endif]--> + </head> + <body> + <!-- Wrapper --> + <div id="wrapper"> + + <!-- Header --> + <header id="header"> + <div class="inner"> + + <!-- Logo --> + <a href="index.html" class="logo"> + <span class="symbol"><img src="img/logo.svg" alt="" /></span><span class="title">Phantom</span> + </a> + + </div> + </header> + + <!-- Menu --> + + <!-- Main --> + <div id="main"> + <div class="inner"> + <header> + <h1>Functest reporting</h1> + </header> + <section class="tiles"> + <article class="style5"> + <span class="image"> + <img src="img/pic05.jpg" alt="" /> + </span> + <a href="euphrates/functest/status-apex.html"> + <h2>Status</h2> + <div class="content"> + <p>Scenario status</p> + </div> + </a> + </article> + <article class="style2"> + <span class="image"> + <img src="img/pic02.jpg" alt="" /> + </span> + <a href="euphrates/functest/vims-apex.html"> + <h2>vIMS</h2> + <div class="content"> + <p>Virtual IMS</p> + </div> + </a> + </article> + <article class="style3"> + <span class="image"> + <img src="img/pic03.jpg" alt="" /> + </span> + <a href="euphrates/functest/tempest-apex.html"> + <h2>Tempest</h2> + <div class="content"> + <p>Tempest OpenStack suite</p> + </div> + </a> + </article> + </section> + </div> + </div> + + <!-- Footer --> + <footer id="footer"> + <div class="inner"> + <section> + <h2>OPNFV Testing Working group</h2> + </section> + <section> + <h2>Follow</h2> + <ul class="icons"> + <li><a href="https://twitter.com/opnfv" class="icon style2 fa-twitter"><span class="label">Twitter</span></a></li> + <li><a href="http://git.opnfv.org" class="icon style2 fa-github"><span class="label">GitHub</span></a></li> + <li><a href="mailto:test-wg@list.opnfv.org" class="icon style2 fa-envelope-o"><span class="label">Email</span></a></li> + </ul> + </section> + <ul class="copyright"> + <li>© Untitled. All rights reserved</li><li>Design: <a href="http://html5up.net">HTML5 UP</a></li> + </ul> + </div> + </footer> + + </div> + + <!-- Scripts --> + <script src="3rd_party/js/jquery.min.js"></script> + <script src="3rd_party/js/skel.min.js"></script> + <script src="3rd_party/js/util.js"></script> + <!--[if lte IE 8]><script src="3rd_party/js/ie/respond.min.js"></script><![endif]--> + <script src="3rd_party/js/main.js"></script> + + </body> +</html> diff --git a/utils/test/reporting/html/index.html b/utils/test/reporting/html/index.html index c6627ffe5..27890451f 100644 --- a/utils/test/reporting/html/index.html +++ b/utils/test/reporting/html/index.html @@ -37,17 +37,6 @@ </div> </header> - <!-- Menu --> - <!--- <nav id="menu"> - <h2>Menu</h2> - <ul> - <li><a href="index.html">Home</a></li> - <li><a href="colorado.html">Colorado</a></li> - <li><a href="danube.html">Danube</a></li> - </ul> - </nav> - ---> - <!-- Main --> <div id="main"> <div class="inner"> <header> @@ -61,7 +50,7 @@ <a href="colorado.html"> <h2>Colorado</h2> <div class="content"> - <p>Colorado 1.0 released on the 22nd of September</p> + <p>Colorado 1.0 (22/09/2016)</p> </div> </a> </article> @@ -72,17 +61,28 @@ <a href="danube.html"> <h2>Danube</h2> <div class="content"> - <p>Danube 1.0 planned on the 22nd of March</p> + <p>Danube 1.0 (22/03/2017)</p> </div> </a> </article> - <article class="style6"> + <article class="style4"> <span class="image"> <img src="img/euphrates.jpg" alt="" /> </span> - <a href="master.html"> + <a href="euphrates.html"> <h2>Euphrates</h2> <div class="content"> + <p>Euphreates (ETA 10/2017)</p> + </div> + </a> + </article> + <article class="style6"> + <span class="image"> + <img src="img/misc-npc-letterblock-m-800px.png" alt="" /> + </span> + <a href="master.html"> + <h2>Master</h2> + <div class="content"> <p>Master</p> </div> </a> diff --git a/utils/test/reporting/img/euphrates.jpg b/utils/test/reporting/img/euphrates.jpg Binary files differindex 3625b50cb..3eb490d7c 100644 --- a/utils/test/reporting/img/euphrates.jpg +++ b/utils/test/reporting/img/euphrates.jpg diff --git a/utils/test/reporting/img/misc-npc-letterblock-m-800px.png b/utils/test/reporting/img/misc-npc-letterblock-m-800px.png Binary files differnew file mode 100644 index 000000000..9d62e9e73 --- /dev/null +++ b/utils/test/reporting/img/misc-npc-letterblock-m-800px.png diff --git a/utils/test/reporting/reporting/bottlenecks/reporting-status.py b/utils/test/reporting/reporting/bottlenecks/reporting-status.py index 8966d0690..225227ac3 100644 --- a/utils/test/reporting/reporting/bottlenecks/reporting-status.py +++ b/utils/test/reporting/reporting/bottlenecks/reporting-status.py @@ -37,10 +37,14 @@ for version in VERSIONS: # For all the installers for installer in INSTALLERS: # get scenarios results data + if version != 'master': + new_version = "stable/{}".format(version) + else: + new_version = version scenario_results = rp_utils.getScenarios("bottlenecks", "posca_factor_ping", installer, - version) + new_version) LOGGER.info("scenario_results: %s", scenario_results) scenario_stats = rp_utils.getScenarioStats(scenario_results) diff --git a/utils/test/reporting/reporting/functest/reporting-status.py b/utils/test/reporting/reporting/functest/reporting-status.py index 02bf67d0e..c71e00f3b 100755 --- a/utils/test/reporting/reporting/functest/reporting-status.py +++ b/utils/test/reporting/reporting/functest/reporting-status.py @@ -13,8 +13,8 @@ import time import jinja2 -import testCase as tc -import scenarioResult as sr +import reporting.functest.testCase as tc +import reporting.functest.scenarioResult as sr import reporting.utils.reporting_utils as rp_utils """ @@ -22,7 +22,7 @@ Functest reporting status """ # Logger -logger = rp_utils.getLogger("Functest-Status") +LOGGER = rp_utils.getLogger("Functest-Status") # Initialization testValid = [] @@ -46,16 +46,16 @@ exclude_virtual = rp_utils.get_config('functest.exclude_virtual') functest_yaml_config = rp_utils.getFunctestConfig() -logger.info("*******************************************") -logger.info("* *") -logger.info("* Generating reporting scenario status *") -logger.info("* Data retention: %s days *" % period) -logger.info("* Log level: %s *" % log_level) -logger.info("* *") -logger.info("* Virtual PODs exluded: %s *" % exclude_virtual) -logger.info("* NOHA scenarios excluded: %s *" % exclude_noha) -logger.info("* *") -logger.info("*******************************************") +LOGGER.info("*******************************************") +LOGGER.info("* *") +LOGGER.info("* Generating reporting scenario status *") +LOGGER.info("* Data retention: %s days *", period) +LOGGER.info("* Log level: %s *", log_level) +LOGGER.info("* *") +LOGGER.info("* Virtual PODs exluded: %s *", exclude_virtual) +LOGGER.info("* NOHA scenarios excluded: %s *", exclude_noha) +LOGGER.info("* *") +LOGGER.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") @@ -75,9 +75,9 @@ for tier in config_tiers: elif tier['order'] == 2: for case in tier['testcases']: if case['case_name'] not in blacklist: - testValid.append(tc.TestCase(case['case_name'], - case['case_name'], - case['dependencies'])) + otherTestCases.append(tc.TestCase(case['case_name'], + case['case_name'], + case['dependencies'])) elif tier['order'] > 2: for case in tier['testcases']: if case['case_name'] not in blacklist: @@ -85,7 +85,7 @@ for tier in config_tiers: "functest", case['dependencies'])) -logger.debug("Functest reporting start") +LOGGER.debug("Functest reporting start") # For all the versions for version in versions: @@ -101,7 +101,7 @@ for version in versions: # initiate scenario file if it does not exist if not os.path.isfile(scenario_file_name): with open(scenario_file_name, "a") as my_file: - logger.debug("Create scenario file: %s" % scenario_file_name) + LOGGER.debug("Create scenario file: %s", scenario_file_name) my_file.write("date,scenario,installer,detail,score\n") for installer in installers: @@ -113,10 +113,10 @@ for version in versions: version) # get nb of supported architecture (x86, aarch64) architectures = rp_utils.getArchitectures(scenario_results) - logger.info("Supported architectures: {}".format(architectures)) + LOGGER.info("Supported architectures: %s", architectures) for architecture in architectures: - logger.info("architecture: {}".format(architecture)) + LOGGER.info("Architecture: %s", architecture) # Consider only the results for the selected architecture # i.e drop x86 for aarch64 and vice versa filter_results = rp_utils.filterArchitecture(scenario_results, @@ -133,10 +133,10 @@ for version in versions: # For all the scenarios get results for s, s_result in filter_results.items(): - logger.info("---------------------------------") - logger.info("installer %s, version %s, scenario %s:" % - (installer, version, s)) - logger.debug("Scenario results: %s" % s_result) + LOGGER.info("---------------------------------") + LOGGER.info("installer %s, version %s, scenario %s:", + installer, version, s) + LOGGER.debug("Scenario results: %s", s_result) # Green or Red light for a given scenario nb_test_runnable_for_this_scenario = 0 @@ -146,11 +146,11 @@ for version in versions: s_url = "" if len(s_result) > 0: build_tag = s_result[len(s_result)-1]['build_tag'] - logger.debug("Build tag: %s" % build_tag) + LOGGER.debug("Build tag: %s", build_tag) s_url = rp_utils.getJenkinsUrl(build_tag) if s_url is None: s_url = "http://testresultS.opnfv.org/reporting" - logger.info("last jenkins url: %s" % s_url) + LOGGER.info("last jenkins url: %s", s_url) testCases2BeDisplayed = [] # Check if test case is runnable / installer, scenario # for the test case used for Scenario validation @@ -160,24 +160,24 @@ for version in versions: for test_case in testValid: test_case.checkRunnable(installer, s, test_case.getConstraints()) - logger.debug("testcase %s (%s) is %s" % - (test_case.getDisplayName(), - test_case.getName(), - test_case.isRunnable)) + LOGGER.debug("testcase %s (%s) is %s", + test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable) time.sleep(1) if test_case.isRunnable: name = test_case.getName() displayName = test_case.getDisplayName() project = test_case.getProject() nb_test_runnable_for_this_scenario += 1 - logger.info(" Searching results for case %s " % - (displayName)) + LOGGER.info(" Searching results for case %s ", + displayName) result = rp_utils.getResult(name, installer, s, version) # if no result set the value to 0 if result < 0: result = 0 - logger.info(" >>>> Test score = " + str(result)) + LOGGER.info(" >>>> Test score = " + str(result)) test_case.setCriteria(result) test_case.setIsRunnable(True) testCases2BeDisplayed.append(tc.TestCase(name, @@ -193,17 +193,17 @@ for version in versions: for test_case in otherTestCases: test_case.checkRunnable(installer, s, test_case.getConstraints()) - logger.debug("testcase %s (%s) is %s" % - (test_case.getDisplayName(), - test_case.getName(), - test_case.isRunnable)) + LOGGER.debug("testcase %s (%s) is %s", + test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable) time.sleep(1) if test_case.isRunnable: name = test_case.getName() displayName = test_case.getDisplayName() project = test_case.getProject() - logger.info(" Searching results for case %s " % - (displayName)) + LOGGER.info(" Searching results for case %s ", + displayName) result = rp_utils.getResult(name, installer, s, version) # at least 1 result for the test @@ -218,24 +218,38 @@ for version in versions: True, 4)) else: - logger.debug("No results found") + LOGGER.debug("No results found") items[s] = testCases2BeDisplayed - except Exception: - logger.error("Error: installer %s, version %s, scenario %s" - % (installer, version, s)) - logger.error("No data available: %s" % (sys.exc_info()[0])) + except Exception: # pylint: disable=broad-except + LOGGER.error("Error installer %s, version %s, scenario %s", + installer, version, s) + LOGGER.error("No data available: %s", sys.exc_info()[0]) # ********************************************** # Evaluate the results for scenario validation # ********************************************** # the validation criteria = nb runnable tests x 3 - # because each test case = 0,1,2 or 3 - scenario_criteria = nb_test_runnable_for_this_scenario * 3 - # if 0 runnable tests set criteria at a high value - if scenario_criteria < 1: - scenario_criteria = 50 # conf.MAX_SCENARIO_CRITERIA + # because each test case can get + # 0 point (never PASS) + # 1 point at least (PASS once over the time window) + # 2 points (PASS more than once but 1 FAIL on the last 4) + # 3 points PASS on the last 4 iterations + # e.g. 1 scenario = 10 cases + # 1 iteration : max score = 10 (10x1) + # 2 iterations : max score = 20 (10x2) + # 3 iterations : max score = 20 + # 4 or more iterations : max score = 30 (1x30) + if len(s_result) > 3: + k_score = 3 + elif len(s_result) < 2: + k_score = 1 + else: + k_score = 2 + + scenario_criteria = nb_test_runnable_for_this_scenario*k_score + # score for reporting s_score = str(scenario_score) + "/" + str(scenario_criteria) s_score_percent = rp_utils.getScenarioPercent( scenario_score, @@ -243,11 +257,11 @@ for version in versions: s_status = "KO" if scenario_score < scenario_criteria: - logger.info(">>>> scenario not OK, score = %s/%s" % - (scenario_score, scenario_criteria)) + LOGGER.info(">>>> scenario not OK, score = %s/%s", + scenario_score, scenario_criteria) s_status = "KO" else: - logger.info(">>>>> scenario OK, save the information") + LOGGER.info(">>>>> scenario OK, save the information") s_status = "OK" path_validation_file = ("./display/" + version + "/functest/" + @@ -270,7 +284,7 @@ for version in versions: s_score, s_score_percent, s_url) - logger.info("--------------------------") + LOGGER.info("--------------------------") templateLoader = jinja2.FileSystemLoader(".") templateEnv = jinja2.Environment( @@ -294,9 +308,9 @@ for version in versions: installer_display + ".html", "wb") as fh: fh.write(outputText) - logger.info("Manage export CSV & PDF") + LOGGER.info("Manage export CSV & PDF") rp_utils.export_csv(scenario_file_name, installer_display, version) - logger.error("CSV generated...") + LOGGER.error("CSV generated...") # Generate outputs for export # pdf @@ -306,4 +320,4 @@ for version in versions: pdf_doc_name = ("./display/" + version + "/functest/status-" + installer_display + ".pdf") rp_utils.export_pdf(pdf_path, pdf_doc_name) - logger.info("PDF generated...") + LOGGER.info("PDF generated...") diff --git a/utils/test/reporting/reporting/functest/reporting-vims.py b/utils/test/reporting/reporting/functest/reporting-vims.py index 14fddbe25..3b25e911d 100755 --- a/utils/test/reporting/reporting/functest/reporting-vims.py +++ b/utils/test/reporting/reporting/functest/reporting-vims.py @@ -1,112 +1,128 @@ +#!/usr/bin/python +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" +vIMS reporting status +""" from urllib2 import Request, urlopen, URLError import json import jinja2 -# manage conf -import utils.reporting_utils as rp_utils - -logger = rp_utils.getLogger("vIMS") - - -def sig_test_format(sig_test): - nbPassed = 0 - nbFailures = 0 - nbSkipped = 0 - for data_test in sig_test: - if data_test['result'] == "Passed": - nbPassed += 1 - elif data_test['result'] == "Failed": - nbFailures += 1 - elif data_test['result'] == "Skipped": - nbSkipped += 1 - total_sig_test_result = {} - total_sig_test_result['passed'] = nbPassed - total_sig_test_result['failures'] = nbFailures - total_sig_test_result['skipped'] = nbSkipped - return total_sig_test_result - -period = rp_utils.get_config('general.period') -versions = rp_utils.get_config('general.versions') -url_base = rp_utils.get_config('testapi.url') - -logger.info("****************************************") -logger.info("* Generating reporting vIMS *") -logger.info("* Data retention = %s days *" % period) -logger.info("* *") -logger.info("****************************************") - -installers = rp_utils.get_config('general.installers') -step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] -logger.info("Start processing....") +import reporting.utils.reporting_utils as rp_utils + +LOGGER = rp_utils.getLogger("vIMS") + +PERIOD = rp_utils.get_config('general.period') +VERSIONS = rp_utils.get_config('general.versions') +URL_BASE = rp_utils.get_config('testapi.url') + +LOGGER.info("****************************************") +LOGGER.info("* Generating reporting vIMS *") +LOGGER.info("* Data retention = %s days *", PERIOD) +LOGGER.info("* *") +LOGGER.info("****************************************") + +INSTALLERS = rp_utils.get_config('general.installers') +STEP_ORDER = ["initialisation", "orchestrator", "vnf", "test_vnf"] +LOGGER.info("Start vIMS reporting processing....") # For all the versions -for version in versions: - for installer in installers: - logger.info("Search vIMS results for installer: %s, version: %s" - % (installer, version)) - request = Request("http://" + url_base + '?case=vims&installer=' + - installer + '&version=' + version) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError as e: - logger.error("Error code: %s" % e) - - test_results = results['results'] - - logger.debug("Results found: %s" % test_results) - - scenario_results = {} - for r in test_results: - if not r['scenario'] in scenario_results.keys(): - scenario_results[r['scenario']] = [] - scenario_results[r['scenario']].append(r) - - for s, s_result in scenario_results.items(): - scenario_results[s] = s_result[0:5] - logger.debug("Search for success criteria") - for result in scenario_results[s]: - result["start_date"] = result["start_date"].split(".")[0] - sig_test = result['details']['sig_test']['result'] - if not sig_test == "" and isinstance(sig_test, list): - format_result = sig_test_format(sig_test) - if format_result['failures'] > format_result['passed']: - result['details']['sig_test']['duration'] = 0 - result['details']['sig_test']['result'] = format_result - nb_step_ok = 0 - nb_step = len(result['details']) - - for step_name, step_result in result['details'].items(): - if step_result['duration'] != 0: - nb_step_ok += 1 - m, s = divmod(step_result['duration'], 60) - m_display = "" - if int(m) != 0: - m_display += str(int(m)) + "m " - - step_result['duration_display'] = (m_display + - str(int(s)) + "s") - - result['pr_step_ok'] = 0 - if nb_step != 0: - result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100 - try: - logger.debug("Scenario %s, Installer %s" - % (s_result[1]['scenario'], installer)) - res = result['details']['orchestrator']['duration'] - logger.debug("Orchestrator deployment: %s s" - % res) - logger.debug("vIMS deployment: %s s" - % result['details']['vIMS']['duration']) - logger.debug("Signaling testing: %s s" - % result['details']['sig_test']['duration']) - logger.debug("Signaling testing results: %s" - % format_result) - except Exception: - logger.error("Data badly formatted") - logger.debug("----------------------------------------") +for version in VERSIONS: + for installer in INSTALLERS: + + # get nb of supported architecture (x86, aarch64) + # get scenarios + scenario_results = rp_utils.getScenarios("functest", + "cloudify_ims", + installer, + version) + + architectures = rp_utils.getArchitectures(scenario_results) + LOGGER.info("Supported architectures: %s", architectures) + + for architecture in architectures: + LOGGER.info("Architecture: %s", architecture) + # Consider only the results for the selected architecture + # i.e drop x86 for aarch64 and vice versa + filter_results = rp_utils.filterArchitecture(scenario_results, + architecture) + scenario_stats = rp_utils.getScenarioStats(filter_results) + items = {} + scenario_result_criteria = {} + + # in case of more than 1 architecture supported + # precise the architecture + installer_display = installer + if "fuel" in installer: + installer_display = installer + "@" + architecture + + LOGGER.info("Search vIMS results for installer: %s, version: %s", + installer, version) + request = Request("http://" + URL_BASE + '?case=cloudify_ims&' + 'installer=' + installer + '&version=' + version) + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError as err: + LOGGER.error("Error code: %s", err) + + test_results = results['results'] + + # LOGGER.debug("Results found: %s" % test_results) + + scenario_results = {} + for r in test_results: + if not r['scenario'] in scenario_results.keys(): + scenario_results[r['scenario']] = [] + scenario_results[r['scenario']].append(r) + + # LOGGER.debug("scenario result: %s" % scenario_results) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + for result in scenario_results[s]: + try: + format_result = result['details']['test_vnf']['result'] + + # round durations of the different steps + result['details']['orchestrator']['duration'] = round( + result['details']['orchestrator']['duration'], 1) + result['details']['vnf']['duration'] = round( + result['details']['vnf']['duration'], 1) + result['details']['test_vnf']['duration'] = round( + result['details']['test_vnf']['duration'], 1) + + res_orch = \ + result['details']['orchestrator']['duration'] + res_vnf = result['details']['vnf']['duration'] + res_test_vnf = \ + result['details']['test_vnf']['duration'] + res_signaling = \ + result['details']['test_vnf']['result']['failures'] + + # Manage test result status + if res_signaling != 0: + LOGGER.debug("At least 1 signalig test FAIL") + result['details']['test_vnf']['status'] = "FAIL" + else: + LOGGER.debug("All signalig tests PASS") + result['details']['test_vnf']['status'] = "PASS" + + LOGGER.debug("Scenario %s, Installer %s", + s_result[1]['scenario'], installer) + LOGGER.debug("Orchestrator deployment: %ss", res_orch) + LOGGER.debug("vIMS deployment: %ss", res_vnf) + LOGGER.debug("VNF testing: %ss", res_test_vnf) + LOGGER.debug("VNF testing results: %s", format_result) + except Exception as err: # pylint: disable=broad-except + LOGGER.error("Uncomplete data %s", err) + LOGGER.debug("----------------------------------------") templateLoader = jinja2.FileSystemLoader(".") templateEnv = jinja2.Environment(loader=templateLoader, @@ -116,11 +132,11 @@ for version in versions: template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(scenario_results=scenario_results, - step_order=step_order, - installer=installer) - + step_order=STEP_ORDER, + installer=installer_display) + LOGGER.debug("Generate html page for %s", installer_display) with open("./display/" + version + "/functest/vims-" + - installer + ".html", "wb") as fh: + installer_display + ".html", "wb") as fh: fh.write(outputText) -logger.info("vIMS report succesfully generated") +LOGGER.info("vIMS report succesfully generated") diff --git a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html index cd51607b7..9bd2b2f66 100644 --- a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html +++ b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html @@ -22,11 +22,12 @@ <nav> <ul class="nav nav-justified"> <li class="active"><a href="../../index.html">Home</a></li> - <li><a href="vims-fuel.html">Fuel</a></li> + <li><a href="vims-apex.html">Apex</a></li> <li><a href="vims-compass.html">Compass</a></li> <li><a href="vims-daisy.html">Daisy</a></li> - <li><a href="vims-joid.html">JOID</a></li> - <li><a href="vims-apex.html">APEX</a></li> + <li><a href="vims-fuel@x86.html">Fuel@x86</a></li> + <li><a href="vims-fuel@aarch64.html">Fuel@aarch64</a></li> + <li><a href="vims-joid.html">Joid</a></li> </ul> </nav> </div> @@ -58,17 +59,17 @@ <tr> <th width="20%">Step</th> <th width="10%">Status</th> - <th width="10%">Duration</th> + <th width="10%">Duration(s)</th> <th width="60%">Result</th> </tr> {% for step_od_name in step_order -%} {% if step_od_name in result.details.keys() -%} {% set step_result = result.details[step_od_name] -%} - {% if step_result.duration != 0 -%} + {% if step_result.status == "PASS" -%} <tr class="tr-ok"> <td>{{step_od_name}}</td> <td><span class="glyphicon glyphicon-ok"></td> - <td><b>{{step_result.duration_display}}</b></td> + <td><b>{{step_result.duration}}</b></td> <td>{{step_result.result}}</td> </tr> {%- else -%} diff --git a/utils/test/reporting/reporting/functest/testCase.py b/utils/test/reporting/reporting/functest/testCase.py index 9834f0753..a182dd4cf 100644 --- a/utils/test/reporting/reporting/functest/testCase.py +++ b/utils/test/reporting/reporting/functest/testCase.py @@ -50,9 +50,10 @@ class TestCase(object): 'gluon_vping': 'Netready', 'fds': 'FDS', 'cloudify_ims': 'vIMS (Cloudify)', - 'orchestra_ims': 'OpenIMS (OpenBaton)', + 'orchestra_openims': 'OpenIMS (OpenBaton)', + 'orchestra_clearwaterims': 'vIMS (OpenBaton)', 'opera_ims': 'vIMS (Open-O)', - 'vyos_vrouter': 'vyos', + 'vyos_vrouter': 'vyos (Cloudify)', 'barometercollectd': 'Barometer', 'odl_netvirt': 'Netvirt', 'security_scan': 'Security'} diff --git a/utils/test/reporting/reporting/qtip/reporting-status.py b/utils/test/reporting/reporting/qtip/reporting-status.py index 56f9e0aee..524338ca0 100644 --- a/utils/test/reporting/reporting/qtip/reporting-status.py +++ b/utils/test/reporting/reporting/qtip/reporting-status.py @@ -45,27 +45,22 @@ def prepare_profile_file(version): def profile_results(results, installer, profile_fd): result_criterias = {} for s_p, s_p_result in results.iteritems(): - ten_criteria = len(s_p_result) - ten_score = sum(s_p_result) + avg_last_ten = sum([int(s) for s in s_p_result]) / len(s_p_result) LASTEST_TESTS = rp_utils.get_config( 'general.nb_iteration_tests_success_criteria') - four_result = s_p_result[:LASTEST_TESTS] - four_criteria = len(four_result) - four_score = sum(four_result) - - s_four_score = str(four_score / four_criteria) - s_ten_score = str(ten_score / ten_criteria) + last_four = s_p_result[-LASTEST_TESTS:] + avg_last_four = sum([int(s) for s in last_four]) / len(last_four) info = '{},{},{},{},{}\n'.format(reportingDate, s_p, installer, - s_ten_score, - s_four_score) + '', + avg_last_four) profile_fd.write(info) result_criterias[s_p] = sr.ScenarioResult('OK', - s_four_score, - s_ten_score, + avg_last_four, + avg_last_ten, '100') logger.info("--------------------------") diff --git a/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html b/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html index 92f3395dc..f55f78144 100644 --- a/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html +++ b/utils/test/reporting/reporting/qtip/template/index-status-tmpl.html @@ -47,10 +47,10 @@ <ul class="nav nav-justified"> <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li> <li><a href="status-apex.html">Apex</a></li> - <li><a href="status-compass.html">Compass</a></li> - <li><a href="status-daisy.html">Daisy</a></li> - <li><a href="status-fuel.html">Fuel</a></li> - <li><a href="status-joid.html">Joid</a></li> + <!--<li><a href="status-compass.html">Compass</a></li>--> + <!--<li><a href="status-daisy.html">Daisy</a></li>--> + <!--<li><a href="status-fuel.html">Fuel</a></li>--> + <!--<li><a href="status-joid.html">Joid</a></li>--> </ul> </nav> </div> @@ -66,9 +66,9 @@ <table class="table"> <tr> <th width="25%">Pod/Scenario</th> - <th width="25%">Trend</th> - <th width="25%">Last 4 Iterations</th> - <th width="25%">Last 10 Days</th> + <th width="25%">Scoring Trend</th> + <th width="25%">Avg. in Last 4 Runs</th> + <th width="25%">Avg. in Last 10 Days</th> </tr> {% for scenario,result in prof_results.iteritems() -%} <tr class="tr-ok"> diff --git a/utils/test/reporting/reporting/reporting.yaml b/utils/test/reporting/reporting/reporting.yaml index 9bb90b806..2a4aa492c 100644 --- a/utils/test/reporting/reporting/reporting.yaml +++ b/utils/test/reporting/reporting/reporting.yaml @@ -9,6 +9,7 @@ general: versions: - master + - euphrates - danube log: @@ -35,15 +36,8 @@ testapi: functest: blacklist: - - ovno - - security_scan - - healthcheck - odl_netvirt - - aaa - - cloudify_ims - - orchestra_ims - juju_epc - - orchestra max_scenario_criteria: 50 test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml log_level: ERROR @@ -62,7 +56,7 @@ storperf: qtip: log_level: ERROR - period: 1 + period: 10 bottlenecks: test_list: diff --git a/utils/test/reporting/reporting/utils/reporting_utils.py b/utils/test/reporting/reporting/utils/reporting_utils.py index 235bd6ef9..65267ca11 100644 --- a/utils/test/reporting/reporting/utils/reporting_utils.py +++ b/utils/test/reporting/reporting/utils/reporting_utils.py @@ -186,7 +186,6 @@ def getScenarioStats(scenario_results): def getScenarioStatus(installer, version): """ Get the status of a scenariofor Yardstick - they used criteria SUCCESS (default: PASS) """ period = get_config('general.period') url_base = get_config('testapi.url') @@ -205,25 +204,34 @@ def getScenarioStatus(installer, version): except URLError: print "GetScenarioStatus: error when calling the API" - scenario_results = {} - result_dict = {} + x86 = 'x86' + aarch64 = 'aarch64' + scenario_results = {x86: {}, aarch64: {}} + result_dict = {x86: {}, aarch64: {}} if test_results is not None: for test_r in test_results: if (test_r['stop_date'] != 'None' and test_r['criteria'] is not None): - if not test_r['scenario'] in scenario_results.keys(): - scenario_results[test_r['scenario']] = [] - scenario_results[test_r['scenario']].append(test_r) - - for scen_k, scen_v in scenario_results.items(): - # scenario_results[k] = v[:LASTEST_TESTS] - s_list = [] - for element in scen_v: - if element['criteria'] == 'SUCCESS': - s_list.append(1) + scenario_name = test_r['scenario'] + if 'arm' in test_r['pod_name']: + if not test_r['scenario'] in scenario_results[aarch64]: + scenario_results[aarch64][scenario_name] = [] + scenario_results[aarch64][scenario_name].append(test_r) else: - s_list.append(0) - result_dict[scen_k] = s_list + if not test_r['scenario'] in scenario_results[x86]: + scenario_results[x86][scenario_name] = [] + scenario_results[x86][scenario_name].append(test_r) + + for key in scenario_results: + for scen_k, scen_v in scenario_results[key].items(): + # scenario_results[k] = v[:LASTEST_TESTS] + s_list = [] + for element in scen_v: + if element['criteria'] == 'PASS': + s_list.append(1) + else: + s_list.append(0) + result_dict[key][scen_k] = s_list # return scenario_results return result_dict diff --git a/utils/test/reporting/reporting/yardstick/reporting-status.py b/utils/test/reporting/reporting/yardstick/reporting-status.py index 6584f4e8d..10cacf006 100644 --- a/utils/test/reporting/reporting/yardstick/reporting-status.py +++ b/utils/test/reporting/reporting/yardstick/reporting-status.py @@ -11,109 +11,159 @@ import os import jinja2 -import reporting.utils.scenarioResult as sr -import reporting.utils.reporting_utils as rp_utils -from scenarios import config as cf +from reporting.utils.scenarioResult import ScenarioResult +from reporting.utils import reporting_utils as utils +from scenarios import config as blacklist -installers = rp_utils.get_config('general.installers') -versions = rp_utils.get_config('general.versions') -PERIOD = rp_utils.get_config('general.period') # Logger -logger = rp_utils.getLogger("Yardstick-Status") -reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") - -logger.info("*******************************************") -logger.info("* Generating reporting scenario status *") -logger.info("* Data retention = %s days *" % PERIOD) -logger.info("* *") -logger.info("*******************************************") - - -# For all the versions -for version in versions: - # For all the installers - for installer in installers: - # get scenarios results data - scenario_results = rp_utils.getScenarioStatus(installer, version) - if 'colorado' == version: - stable_result = rp_utils.getScenarioStatus(installer, - 'stable/colorado') - for k, v in stable_result.items(): - if k not in scenario_results.keys(): - scenario_results[k] = [] - scenario_results[k] += stable_result[k] - scenario_result_criteria = {} - - for s in scenario_results.keys(): - if installer in cf.keys() and s in cf[installer].keys(): - scenario_results.pop(s) - - # From each scenarios get results list - for s, s_result in scenario_results.items(): - logger.info("---------------------------------") - logger.info("installer %s, version %s, scenario %s", installer, - version, s) - - ten_criteria = len(s_result) - ten_score = 0 - for v in s_result: - ten_score += v - - LASTEST_TESTS = rp_utils.get_config( - 'general.nb_iteration_tests_success_criteria') - four_result = s_result[:LASTEST_TESTS] - four_criteria = len(four_result) - four_score = 0 - for v in four_result: - four_score += v - - s_status = str(rp_utils.get_percent(four_result, s_result)) - s_four_score = str(four_score) + '/' + str(four_criteria) - s_ten_score = str(ten_score) + '/' + str(ten_criteria) - s_score_percent = rp_utils.get_percent(four_result, s_result) - - if '100' == s_status: - logger.info(">>>>> scenario OK, save the information") - else: - logger.info(">>>> scenario not OK, last 4 iterations = %s, \ - last 10 days = %s" % (s_four_score, s_ten_score)) - - # Save daily results in a file - path_validation_file = ("./display/" + version + - "/yardstick/scenario_history.txt") - - if not os.path.exists(path_validation_file): - with open(path_validation_file, 'w') as f: - info = 'date,scenario,installer,details,score\n' - f.write(info) - - with open(path_validation_file, "a") as f: - info = (reportingDate + "," + s + "," + installer + - "," + s_ten_score + "," + - str(s_score_percent) + "\n") - f.write(info) - - scenario_result_criteria[s] = sr.ScenarioResult(s_status, - s_four_score, - s_ten_score, - s_score_percent) - - logger.info("--------------------------") - - templateLoader = jinja2.FileSystemLoader(".") - templateEnv = jinja2.Environment(loader=templateLoader, - autoescape=True) - - TEMPLATE_FILE = "./reporting/yardstick/template/index-status-tmpl.html" - template = templateEnv.get_template(TEMPLATE_FILE) - - outputText = template.render(scenario_results=scenario_result_criteria, - installer=installer, - period=PERIOD, - version=version, - date=reportingDate) - - with open("./display/" + version + - "/yardstick/status-" + installer + ".html", "wb") as fh: - fh.write(outputText) +LOG = utils.getLogger("Yardstick-Status") + + +def get_scenario_data(version, installer): + scenarios = utils.getScenarioStatus(installer, version) + + if 'colorado' == version: + data = utils.getScenarioStatus(installer, 'stable/colorado') + for archi, value in data.items(): + for k, v in value.items(): + if k not in scenarios[archi]: + scenarios[archi][k] = [] + scenarios[archi][k].extend(data[archi][k]) + + for archi, value in scenarios.items(): + for scenario in value: + if installer in blacklist and scenario in blacklist[installer]: + scenarios[archi].pop(scenario) + + return scenarios + + +def write_history_data(version, + scenario, + installer, + archi, + ten_score, + percent): + # Save daily results in a file + history_file = './display/{}/yardstick/scenario_history.txt'.format( + version) + + if not os.path.exists(history_file): + with open(history_file, 'w') as f: + f.write('date,scenario,installer,details,score\n') + + date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + if installer == 'fuel': + installer = '{}@{}'.format(installer, archi) + with open(history_file, "a") as f: + info = '{},{},{},{},{}\n'.format(date, + scenario, + installer, + ten_score, + percent) + f.write(info) + + +def generate_page(scenario_data, installer, period, version, architecture): + date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + + templateLoader = jinja2.FileSystemLoader(".") + template_env = jinja2.Environment(loader=templateLoader, + autoescape=True) + + template_file = "./reporting/yardstick/template/index-status-tmpl.html" + template = template_env.get_template(template_file) + + if installer == 'fuel': + installer = '{}@{}'.format(installer, architecture) + + output_text = template.render(scenario_results=scenario_data, + installer=installer, + period=period, + version=version, + date=date) + + page_file = './display/{}/yardstick/status-{}.html'.format(version, + installer) + with open(page_file, 'wb') as f: + f.write(output_text) + + +def do_statistic(data): + ten_score = 0 + for v in data: + ten_score += v + + last_count = utils.get_config( + 'general.nb_iteration_tests_success_criteria') + last_data = data[:last_count] + last_score = 0 + for v in last_data: + last_score += v + + percent = utils.get_percent(last_data, data) + status = str(percent) + last_score = '{}/{}'.format(last_score, len(last_data)) + ten_score = '{}/{}'.format(ten_score, len(data)) + + if '100' == status: + LOG.info(">>>>> scenario OK, save the information") + else: + LOG.info(">>>> scenario not OK, last 4 iterations = %s, \ + last 10 days = %s" % (last_score, ten_score)) + + return last_score, ten_score, percent, status + + +def generate_reporting_page(version, installer, archi, scenarios, period): + scenario_data = {} + + # From each scenarios get results list + for scenario, data in scenarios.items(): + LOG.info("---------------------------------") + + LOG.info("installer %s, version %s, scenario %s", + installer, + version, + scenario) + last_score, ten_score, percent, status = do_statistic(data) + write_history_data(version, + scenario, + installer, + archi, + ten_score, + percent) + scenario_data[scenario] = ScenarioResult(status, + last_score, + ten_score, + percent) + + LOG.info("--------------------------") + if scenario_data: + generate_page(scenario_data, installer, period, version, archi) + + +def main(): + installers = utils.get_config('general.installers') + versions = utils.get_config('general.versions') + period = utils.get_config('general.period') + + LOG.info("*******************************************") + LOG.info("* Generating reporting scenario status *") + LOG.info("* Data retention = %s days *" % period) + LOG.info("* *") + LOG.info("*******************************************") + + # For all the versions + for version in versions: + # For all the installers + for installer in installers: + # get scenarios results data + scenarios = get_scenario_data(version, installer) + for k, v in scenarios.items(): + generate_reporting_page(version, installer, k, v, period) + + +if __name__ == '__main__': + main() diff --git a/utils/test/reporting/reporting/yardstick/scenarios.py b/utils/test/reporting/reporting/yardstick/scenarios.py index 26e8c8bb0..7504493b2 100644 --- a/utils/test/reporting/reporting/yardstick/scenarios.py +++ b/utils/test/reporting/reporting/yardstick/scenarios.py @@ -9,7 +9,7 @@ import requests import yaml -import utils.reporting_utils as rp_utils +import reporting.utils.reporting_utils as rp_utils yardstick_conf = rp_utils.get_config('yardstick.test_conf') response = requests.get(yardstick_conf) diff --git a/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html b/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html index 77ba9502f..3db32e531 100644 --- a/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html +++ b/utils/test/reporting/reporting/yardstick/template/index-status-tmpl.html @@ -25,7 +25,7 @@ } // trend line management - d3.csv("./scenario_history.csv", function(data) { + d3.csv("./scenario_history.txt", function(data) { // *************************************** // Create the trend line {% for scenario in scenario_results.keys() -%} @@ -70,7 +70,8 @@ <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li> <li><a href="status-apex.html">Apex</a></li> <li><a href="status-compass.html">Compass</a></li> - <li><a href="status-fuel.html">Fuel</a></li> + <li><a href="status-fuel@x86.html">Fuel@x86</a></li> + <li><a href="status-fuel@aarch64.html">Fuel@aarch64</a></li> <li><a href="status-joid.html">Joid</a></li> </ul> </nav> diff --git a/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html b/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html index e366670a9..22f29347b 100644 --- a/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html +++ b/utils/test/testapi/3rd_party/static/testapi-ui/components/pods/pods.html @@ -54,6 +54,7 @@ <a href="#" ng-click="showPod = !showPod">{{pod.name}}</a> <div class="show-pod" ng-class="{ 'hidden': ! showPod }" style="margin-left:24px;"> <p> + owner: {{pod.owner}}<br> role: {{pod.role}}<br> mode: {{pod.mode}}<br> create_date: {{pod.creation_date}}<br> diff --git a/utils/test/testapi/deployment/deploy.py b/utils/test/testapi/deployment/deploy.py deleted file mode 100644 index 6433fa6b2..000000000 --- a/utils/test/testapi/deployment/deploy.py +++ /dev/null @@ -1,40 +0,0 @@ -import argparse -import os - -from jinja2 import Environment, FileSystemLoader - -env = Environment(loader=FileSystemLoader('./')) -docker_compose_yml = './docker-compose.yml' -docker_compose_template = './docker-compose.yml.template' - - -def render_docker_compose(port, base_url): - vars = { - "expose_port": port, - "base_url": base_url, - } - template = env.get_template(docker_compose_template) - yml = template.render(vars=vars) - - with open(docker_compose_yml, 'w') as f: - f.write(yml) - f.close() - - -def main(args): - render_docker_compose(args.expose_port, args.base_url) - os.system('docker-compose -f {} up -d'.format(docker_compose_yml)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Backup MongoDBs') - parser.add_argument('-p', '--expose-port', - type=int, - required=False, - default=8000, - help='testapi exposed port') - parser.add_argument('-l', '--base-url', - type=str, - required=True, - help='testapi exposed base-url') - main(parser.parse_args()) diff --git a/utils/test/testapi/deployment/docker-compose.yml.template b/utils/test/testapi/deployment/docker-compose.yml.template deleted file mode 100644 index cd684048e..000000000 --- a/utils/test/testapi/deployment/docker-compose.yml.template +++ /dev/null @@ -1,15 +0,0 @@ -version: '2' -services: - mongo: - image: mongo:3.2.1 - container_name: opnfv-mongo - testapi: - image: opnfv/testapi:latest - container_name: opnfv-testapi - environment: - - mongodb_url=mongodb://mongo:27017/ - - base_url={{ vars.base_url }} - ports: - - "{{ vars.expose_port }}:8000" - links: - - mongo diff --git a/utils/test/testapi/htmlize/doc-build.sh b/utils/test/testapi/htmlize/doc-build.sh deleted file mode 100644 index 33560ceea..000000000 --- a/utils/test/testapi/htmlize/doc-build.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -o errexit - -# Create virtual environment -virtualenv $WORKSPACE/testapi_venv -source $WORKSPACE/testapi_venv/bin/activate - -# Swgger Codegen Tool -url="http://repo1.maven.org/maven2/io/swagger/swagger-codegen-cli/2.2.1/swagger-codegen-cli-2.2.1.jar" - -# Check for jar file locally and in the repo -if [ ! -f swagger-codegen-cli.jar ]; -then - wget http://repo1.maven.org/maven2/io/swagger/swagger-codegen-cli/2.2.1/swagger-codegen-cli-2.2.1.jar -O swagger-codegen-cli.jar -fi - -# Install Pre-requistics -pip install requests - -python ./utils/test/testapi/htmlize/htmlize.py -o ${WORKSPACE}/ diff --git a/utils/test/testapi/htmlize/htmlize.py b/utils/test/testapi/htmlize/htmlize.py deleted file mode 100644 index da6a6cf91..000000000 --- a/utils/test/testapi/htmlize/htmlize.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python - -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 - -import argparse -import requests -import json -import os - - -def main(args): - - # Merging two specs - api_response = requests.get(args.api_declaration_url) - api_response = json.loads(api_response.content) - resource_response = requests.get(args.resource_listing_url) - resource_response = json.loads(resource_response.content) - resource_response['models'] = api_response['models'] - resource_response['apis'] = api_response['apis'] - - # Storing the swagger specs - with open('specs.json', 'w') as outfile: - json.dump(resource_response, outfile) - - # Generating html page - cmd = 'java -jar swagger-codegen-cli.jar generate \ - -i specs.json -l html2 -o %s' % (args.output_directory) - if os.system(cmd) == 0: - exit(0) - else: - exit(1) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Create \ - Swagger Spec documentation') - parser.add_argument('-ru', '--resource-listing-url', - type=str, - required=False, - default=('http://testresults.opnfv.org' - '/test/swagger/resources.json'), - help='Resource Listing Spec File') - parser.add_argument('-au', '--api-declaration-url', - type=str, - required=False, - default=('http://testresults.opnfv.org' - '/test/swagger/APIs'), - help='API Declaration Spec File') - parser.add_argument('-o', '--output-directory', - required=True, - default='./', - help='Output Directory where the \ - file should be stored') - main(parser.parse_args()) diff --git a/utils/test/testapi/htmlize/push-doc-artifact.sh b/utils/test/testapi/htmlize/push-doc-artifact.sh deleted file mode 100644 index 4cf1988b0..000000000 --- a/utils/test/testapi/htmlize/push-doc-artifact.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -set -o pipefail - -export PATH=$PATH:/usr/local/bin/ - -project=$PROJECT -workspace=$WORKSPACE -artifact_dir="$project/docs" - -set +e -gsutil&>/dev/null -if [ $? != 0 ]; then - echo "Not possible to push results to artifact: gsutil not installed" - exit 1 -else - gsutil ls gs://artifacts.opnfv.org/"$project"/ &>/dev/null - if [ $? != 0 ]; then - echo "Not possible to push results to artifact: gsutil not installed." - exit 1 - else - echo "Uploading document to artifact $artifact_dir" - gsutil cp "$workspace"/index.html gs://artifacts.opnfv.org/"$artifact_dir"/testapi.html >/dev/null 2>&1 - echo "Document can be found at http://artifacts.opnfv.org/releng/docs/testapi.html" - fi -fi diff --git a/utils/test/testapi/opnfv_testapi/common/check.py b/utils/test/testapi/opnfv_testapi/common/check.py index acd331784..e80b1c6b7 100644 --- a/utils/test/testapi/opnfv_testapi/common/check.py +++ b/utils/test/testapi/opnfv_testapi/common/check.py @@ -10,19 +10,33 @@ import functools import re from tornado import gen -from tornado import web +from opnfv_testapi.common import constants from opnfv_testapi.common import message from opnfv_testapi.common import raises from opnfv_testapi.db import api as dbapi -def authenticate(method): - @web.asynchronous - @gen.coroutine +def is_authorized(method): @functools.wraps(method) def wrapper(self, *args, **kwargs): - if self.auth: + if self.table in ['pods']: + testapi_id = self.get_secure_cookie(constants.TESTAPI_ID) + if not testapi_id: + raises.Unauthorized(message.not_login()) + user_info = yield dbapi.db_find_one('users', {'user': testapi_id}) + if not user_info: + raises.Unauthorized(message.not_lfid()) + kwargs['owner'] = testapi_id + ret = yield gen.coroutine(method)(self, *args, **kwargs) + raise gen.Return(ret) + return wrapper + + +def valid_token(method): + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + if self.auth and self.table == 'results': try: token = self.request.headers['X-Auth-Token'] except KeyError: diff --git a/utils/test/testapi/opnfv_testapi/common/message.py b/utils/test/testapi/opnfv_testapi/common/message.py index 951cbaf9c..8b5c3fb7a 100644 --- a/utils/test/testapi/opnfv_testapi/common/message.py +++ b/utils/test/testapi/opnfv_testapi/common/message.py @@ -42,6 +42,14 @@ def invalid_token(): return 'Invalid Token' +def not_login(): + return 'TestAPI id is not provided' + + +def not_lfid(): + return 'Not a valid Linux Foundation Account' + + def no_update(): return 'Nothing to update' diff --git a/utils/test/testapi/opnfv_testapi/resources/handlers.py b/utils/test/testapi/opnfv_testapi/resources/handlers.py index ed55c7028..8e5dab235 100644 --- a/utils/test/testapi/opnfv_testapi/resources/handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/handlers.py @@ -73,7 +73,10 @@ class GenericApiHandler(web.RequestHandler): cls_data = self.table_cls.from_dict(data) return cls_data.format_http() - @check.authenticate + @web.asynchronous + @gen.coroutine + @check.is_authorized + @check.valid_token @check.no_body @check.miss_fields @check.carriers_exist @@ -172,13 +175,15 @@ class GenericApiHandler(web.RequestHandler): def _get_one(self, data, query=None): self.finish_request(self.format_data(data)) - @check.authenticate + @web.asynchronous + @gen.coroutine @check.not_exist def _delete(self, data, query=None): yield dbapi.db_delete(self.table, query) self.finish_request() - @check.authenticate + @web.asynchronous + @gen.coroutine @check.no_body @check.not_exist @check.updated_one_not_exist @@ -189,7 +194,8 @@ class GenericApiHandler(web.RequestHandler): update_req['_id'] = str(data._id) self.finish_request(update_req) - @check.authenticate + @web.asynchronous + @gen.coroutine @check.no_body @check.not_exist @check.updated_one_not_exist diff --git a/utils/test/testapi/opnfv_testapi/resources/pod_models.py b/utils/test/testapi/opnfv_testapi/resources/pod_models.py index 2c3ea978b..415d3d66b 100644 --- a/utils/test/testapi/opnfv_testapi/resources/pod_models.py +++ b/utils/test/testapi/opnfv_testapi/resources/pod_models.py @@ -29,13 +29,14 @@ class PodCreateRequest(models.ModelBase): class Pod(models.ModelBase): def __init__(self, name='', mode='', details='', - role="", _id='', create_date=''): + role="", _id='', create_date='', owner=''): self.name = name self.mode = mode self.details = details self.role = role self._id = _id self.creation_date = create_date + self.owner = owner @swagger.model() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py b/utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py index 8cfc513be..ea2297275 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/common/test_config.py @@ -1,4 +1,5 @@ import argparse +import pytest def test_config_normal(mocker, config_normal): @@ -13,3 +14,11 @@ def test_config_normal(mocker, config_normal): assert CONF.api_debug is True assert CONF.api_authenticate is False assert CONF.ui_url == 'http://localhost:8000' + + +def test_config_file_not_exist(mocker): + mocker.patch('os.path.exists', return_value=False) + with pytest.raises(Exception) as m_exc: + from opnfv_testapi.common import config + config.Config() + assert 'not found' in str(m_exc.value) diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/executor.py b/utils/test/testapi/opnfv_testapi/tests/unit/executor.py index b8f696caf..aa99b9086 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/executor.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/executor.py @@ -9,6 +9,39 @@ import functools import httplib +from concurrent.futures import ThreadPoolExecutor +import mock + + +O_get_secure_cookie = ( + 'opnfv_testapi.resources.handlers.GenericApiHandler.get_secure_cookie') + + +def thread_execute(method, *args, **kwargs): + with ThreadPoolExecutor(max_workers=2) as executor: + result = executor.submit(method, *args, **kwargs) + return result + + +def mock_invalid_lfid(): + def _mock_invalid_lfid(xstep): + def wrap(self, *args, **kwargs): + with mock.patch(O_get_secure_cookie) as m_cookie: + m_cookie.return_value = 'InvalidUser' + return xstep(self, *args, **kwargs) + return wrap + return _mock_invalid_lfid + + +def mock_valid_lfid(): + def _mock_valid_lfid(xstep): + def wrap(self, *args, **kwargs): + with mock.patch(O_get_secure_cookie) as m_cookie: + m_cookie.return_value = 'ValidUser' + return xstep(self, *args, **kwargs) + return wrap + return _mock_valid_lfid + def upload(excepted_status, excepted_response): def _upload(create_request): diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py index 3320a866a..c44a92c11 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py @@ -288,3 +288,4 @@ testcases = MemDb('testcases') results = MemDb('results') scenarios = MemDb('scenarios') tokens = MemDb('tokens') +users = MemDb('users') diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py index 39633e5f5..89cd7e8ed 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_base.py @@ -6,13 +6,16 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +from datetime import datetime import json from os import path +from bson.objectid import ObjectId import mock from tornado import testing from opnfv_testapi.resources import models +from opnfv_testapi.resources import pod_models from opnfv_testapi.tests.unit import fake_pymongo @@ -26,10 +29,32 @@ class TestBase(testing.AsyncHTTPTestCase): self.get_res = None self.list_res = None self.update_res = None + self.pod_d = pod_models.Pod(name='zte-pod1', + mode='virtual', + details='zte pod 1', + role='community-ci', + _id=str(ObjectId()), + owner='ValidUser', + create_date=str(datetime.now())) + self.pod_e = pod_models.Pod(name='zte-pod2', + mode='metal', + details='zte pod 2', + role='production-ci', + _id=str(ObjectId()), + owner='ValidUser', + create_date=str(datetime.now())) self.req_d = None self.req_e = None self.addCleanup(self._clear) super(TestBase, self).setUp() + fake_pymongo.users.insert({"user": "ValidUser", + 'email': 'validuser@lf.com', + 'fullname': 'Valid User', + 'groups': [ + 'opnfv-testapi-users', + 'opnfv-gerrit-functest-submitters', + 'opnfv-gerrit-qtip-contributors'] + }) def tearDown(self): self.db_patcher.stop() diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_pod.py b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_pod.py index d1a19f7f0..5d9da3a86 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_pod.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_pod.py @@ -12,24 +12,29 @@ import unittest from opnfv_testapi.common import message from opnfv_testapi.resources import pod_models from opnfv_testapi.tests.unit import executor +from opnfv_testapi.tests.unit import fake_pymongo from opnfv_testapi.tests.unit.resources import test_base as base class TestPodBase(base.TestBase): def setUp(self): super(TestPodBase, self).setUp() - self.req_d = pod_models.PodCreateRequest('zte-1', 'virtual', - 'zte pod 1', 'ci-pod') - self.req_e = pod_models.PodCreateRequest('zte-2', 'metal', 'zte pod 2') - self.req_f = pod_models.PodCreateRequest('Zte-1', 'virtual', - 'zte pod 1', 'ci-pod') self.get_res = pod_models.Pod self.list_res = pod_models.Pods self.basePath = '/api/v1/pods' + self.req_d = pod_models.PodCreateRequest(name=self.pod_d.name, + mode=self.pod_d.mode, + details=self.pod_d.details, + role=self.pod_d.role) + self.req_e = pod_models.PodCreateRequest(name=self.pod_e.name, + mode=self.pod_e.mode, + details=self.pod_e.details, + role=self.pod_e.role) def assert_get_body(self, pod, req=None): if not req: req = self.req_d + self.assertEqual(pod.owner, 'ValidUser') self.assertEqual(pod.name, req.name) self.assertEqual(pod.mode, req.mode) self.assertEqual(pod.details, req.details) @@ -39,38 +44,54 @@ class TestPodBase(base.TestBase): class TestPodCreate(TestPodBase): + @executor.create(httplib.BAD_REQUEST, message.not_login()) + def test_notlogin(self): + return self.req_d + + @executor.mock_invalid_lfid() + @executor.create(httplib.BAD_REQUEST, message.not_lfid()) + def test_invalidLfid(self): + return self.req_d + + @executor.mock_valid_lfid() @executor.create(httplib.BAD_REQUEST, message.no_body()) def test_withoutBody(self): return None + @executor.mock_valid_lfid() @executor.create(httplib.BAD_REQUEST, message.missing('name')) def test_emptyName(self): return pod_models.PodCreateRequest('') + @executor.mock_valid_lfid() @executor.create(httplib.BAD_REQUEST, message.missing('name')) def test_noneName(self): return pod_models.PodCreateRequest(None) + @executor.mock_valid_lfid() @executor.create(httplib.OK, 'assert_create_body') def test_success(self): return self.req_d + @executor.mock_valid_lfid() @executor.create(httplib.FORBIDDEN, message.exist_base) def test_alreadyExist(self): - self.create_d() + fake_pymongo.pods.insert(self.pod_d.format()) return self.req_d + @executor.mock_valid_lfid() @executor.create(httplib.FORBIDDEN, message.exist_base) def test_alreadyExistCaseInsensitive(self): - self.create(self.req_f) + fake_pymongo.pods.insert(self.pod_d.format()) + self.req_d.name = self.req_d.name.upper() return self.req_d class TestPodGet(TestPodBase): def setUp(self): super(TestPodGet, self).setUp() - self.create_d() - self.create_e() + fake_pymongo.pods.insert(self.pod_d.format()) + fake_pymongo.pods.insert(self.pod_e.format()) @executor.get(httplib.NOT_FOUND, message.not_found_base) def test_notExist(self): @@ -78,7 +99,7 @@ class TestPodGet(TestPodBase): @executor.get(httplib.OK, 'assert_get_body') def test_getOne(self): - return self.req_d.name + return self.pod_d.name @executor.get(httplib.OK, '_assert_list') def test_list(self): @@ -87,10 +108,10 @@ class TestPodGet(TestPodBase): def _assert_list(self, body): self.assertEqual(len(body.pods), 2) for pod in body.pods: - if self.req_d.name == pod.name: + if self.pod_d.name == pod.name: self.assert_get_body(pod) else: - self.assert_get_body(pod, self.req_e) + self.assert_get_body(pod, self.pod_e) if __name__ == '__main__': diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_result.py b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_result.py index 1e83ed308..1df31f36c 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_result.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_result.py @@ -7,17 +7,19 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import copy +from datetime import datetime +from datetime import timedelta import httplib -import unittest -from datetime import datetime, timedelta import json +import urllib +import unittest from opnfv_testapi.common import message -from opnfv_testapi.resources import pod_models from opnfv_testapi.resources import project_models from opnfv_testapi.resources import result_models from opnfv_testapi.resources import testcase_models from opnfv_testapi.tests.unit import executor +from opnfv_testapi.tests.unit import fake_pymongo from opnfv_testapi.tests.unit.resources import test_base as base @@ -52,7 +54,8 @@ class Details(object): class TestResultBase(base.TestBase): def setUp(self): - self.pod = 'zte-pod1' + super(TestResultBase, self).setUp() + self.pod = self.pod_d.name self.project = 'functest' self.case = 'vPing' self.installer = 'fuel' @@ -65,7 +68,6 @@ class TestResultBase(base.TestBase): self.stop_date = str(datetime.now() + timedelta(minutes=1)) self.update_date = str(datetime.now() + timedelta(days=1)) self.update_step = -0.05 - super(TestResultBase, self).setUp() self.details = Details(timestart='0', duration='9s', status='OK') self.req_d = result_models.ResultCreateRequest( pod_name=self.pod, @@ -84,10 +86,6 @@ class TestResultBase(base.TestBase): self.list_res = result_models.TestResults self.update_res = result_models.TestResult self.basePath = '/api/v1/results' - self.req_pod = pod_models.PodCreateRequest( - self.pod, - 'metal', - 'zte pod 1') self.req_project = project_models.ProjectCreateRequest( self.project, 'vping test') @@ -95,7 +93,7 @@ class TestResultBase(base.TestBase): self.case, '/cases/vping', 'vping-ssh test') - self.create_help('/api/v1/pods', self.req_pod) + fake_pymongo.pods.insert(self.pod_d.format()) self.create_help('/api/v1/projects', self.req_project) self.create_help('/api/v1/projects/%s/cases', self.req_testcase, @@ -271,29 +269,29 @@ class TestResultGet(TestResultBase): @executor.query(httplib.BAD_REQUEST, message.must_int('period')) def test_queryPeriodNotInt(self): - return self._set_query('period=a') + return self._set_query(period='a') @executor.query(httplib.OK, '_query_period_one', 1) def test_queryPeriodSuccess(self): - return self._set_query('period=5') + return self._set_query(period=5) @executor.query(httplib.BAD_REQUEST, message.must_int('last')) def test_queryLastNotInt(self): - return self._set_query('last=a') + return self._set_query(last='a') @executor.query(httplib.OK, '_query_last_one', 1) def test_queryLast(self): - return self._set_query('last=1') + return self._set_query(last=1) @executor.query(httplib.OK, '_query_success', 4) def test_queryPublic(self): self._create_public_data() - return self._set_query('') + return self._set_query() @executor.query(httplib.OK, '_query_success', 1) def test_queryPrivate(self): self._create_private_data() - return self._set_query('public=false') + return self._set_query(public='false') @executor.query(httplib.OK, '_query_period_one', 1) def test_combination(self): @@ -306,12 +304,11 @@ class TestResultGet(TestResultBase): 'scenario', 'trust_indicator', 'criteria', - 'period=5') + period=5) @executor.query(httplib.OK, '_query_success', 0) def test_notFound(self): - return self._set_query('pod=notExistPod', - 'project', + return self._set_query('project', 'case', 'version', 'installer', @@ -319,7 +316,8 @@ class TestResultGet(TestResultBase): 'scenario', 'trust_indicator', 'criteria', - 'period=1') + pod='notExistPod', + period=1) @executor.query(httplib.OK, '_query_success', 1) def test_filterErrorStartdate(self): @@ -327,7 +325,7 @@ class TestResultGet(TestResultBase): self._create_error_start_date('None') self._create_error_start_date('null') self._create_error_start_date('') - return self._set_query('period=5') + return self._set_query(period=5) def _query_success(self, body, number): self.assertEqual(number, len(body.results)) @@ -366,18 +364,16 @@ class TestResultGet(TestResultBase): self.create(req) return req - def _set_query(self, *args): + def _set_query(self, *args, **kwargs): def get_value(arg): return self.__getattribute__(arg) \ if arg != 'trust_indicator' else self.trust_indicator.current - uri = '' + query = [] for arg in args: - if arg: - if '=' in arg: - uri += arg + '&' - else: - uri += '{}={}&'.format(arg, get_value(arg)) - return uri[0: -1] + query.append((arg, get_value(arg))) + for k, v in kwargs.iteritems(): + query.append((k, v)) + return urllib.urlencode(query) class TestResultUpdate(TestResultBase): diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_token.py b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_token.py index 940e256c6..bd64723be 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_token.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/resources/test_token.py @@ -9,13 +9,12 @@ import unittest from tornado import web from opnfv_testapi.common import message -from opnfv_testapi.resources import project_models from opnfv_testapi.tests.unit import executor from opnfv_testapi.tests.unit import fake_pymongo -from opnfv_testapi.tests.unit.resources import test_base as base +from opnfv_testapi.tests.unit.resources import test_result -class TestToken(base.TestBase): +class TestTokenCreateResult(test_result.TestResultBase): def get_app(self): from opnfv_testapi.router import url_mappings return web.Application( @@ -25,27 +24,23 @@ class TestToken(base.TestBase): auth=True ) - -class TestTokenCreateProject(TestToken): def setUp(self): - super(TestTokenCreateProject, self).setUp() - self.req_d = project_models.ProjectCreateRequest('vping') + super(TestTokenCreateResult, self).setUp() fake_pymongo.tokens.insert({"access_token": "12345"}) - self.basePath = '/api/v1/projects' @executor.create(httplib.FORBIDDEN, message.invalid_token()) - def test_projectCreateTokenInvalid(self): + def test_resultCreateTokenInvalid(self): self.headers['X-Auth-Token'] = '1234' return self.req_d @executor.create(httplib.UNAUTHORIZED, message.unauthorized()) - def test_projectCreateTokenUnauthorized(self): + def test_resultCreateTokenUnauthorized(self): if 'X-Auth-Token' in self.headers: self.headers.pop('X-Auth-Token') return self.req_d @executor.create(httplib.OK, '_create_success') - def test_projectCreateTokenSuccess(self): + def test_resultCreateTokenSuccess(self): self.headers['X-Auth-Token'] = '12345' return self.req_d @@ -53,62 +48,5 @@ class TestTokenCreateProject(TestToken): self.assertIn('CreateResponse', str(type(body))) -class TestTokenDeleteProject(TestToken): - def setUp(self): - super(TestTokenDeleteProject, self).setUp() - self.req_d = project_models.ProjectCreateRequest('vping') - fake_pymongo.tokens.insert({"access_token": "12345"}) - self.basePath = '/api/v1/projects' - self.headers['X-Auth-Token'] = '12345' - self.create_d() - - @executor.delete(httplib.FORBIDDEN, message.invalid_token()) - def test_projectDeleteTokenIvalid(self): - self.headers['X-Auth-Token'] = '1234' - return self.req_d.name - - @executor.delete(httplib.UNAUTHORIZED, message.unauthorized()) - def test_projectDeleteTokenUnauthorized(self): - self.headers.pop('X-Auth-Token') - return self.req_d.name - - @executor.delete(httplib.OK, '_delete_success') - def test_projectDeleteTokenSuccess(self): - return self.req_d.name - - def _delete_success(self, body): - self.assertEqual('', body) - - -class TestTokenUpdateProject(TestToken): - def setUp(self): - super(TestTokenUpdateProject, self).setUp() - self.req_d = project_models.ProjectCreateRequest('vping') - fake_pymongo.tokens.insert({"access_token": "12345"}) - self.basePath = '/api/v1/projects' - self.headers['X-Auth-Token'] = '12345' - self.create_d() - - @executor.update(httplib.FORBIDDEN, message.invalid_token()) - def test_projectUpdateTokenIvalid(self): - self.headers['X-Auth-Token'] = '1234' - req = project_models.ProjectUpdateRequest('newName', 'new description') - return req, self.req_d.name - - @executor.update(httplib.UNAUTHORIZED, message.unauthorized()) - def test_projectUpdateTokenUnauthorized(self): - self.headers.pop('X-Auth-Token') - req = project_models.ProjectUpdateRequest('newName', 'new description') - return req, self.req_d.name - - @executor.update(httplib.OK, '_update_success') - def test_projectUpdateTokenSuccess(self): - req = project_models.ProjectUpdateRequest('newName', 'new description') - return req, self.req_d.name - - def _update_success(self, request, body): - self.assertIn(request.name, body) - - if __name__ == '__main__': unittest.main() diff --git a/utils/test/testapi/opts/deploy.py b/utils/test/testapi/opts/deploy.py new file mode 100644 index 000000000..f58690c5d --- /dev/null +++ b/utils/test/testapi/opts/deploy.py @@ -0,0 +1,55 @@ +import argparse +import os + +from jinja2 import Environment + +DOCKER_COMPOSE_FILE = './docker-compose.yml' +DOCKER_COMPOSE_TEMPLATE = """ +version: '2' +services: + mongo: + image: mongo:3.2.1 + container_name: opnfv-mongo + testapi: + image: opnfv/testapi:latest + container_name: opnfv-testapi + environment: + - mongodb_url=mongodb://mongo:27017/ + - base_url={{ vars.testapi_base_url }} + ports: + - "{{ vars.testapi_port }}:8000" + links: + - mongo +""" + + +def render_docker_compose(testapi_port, testapi_base_url): + vars = { + "testapi_port": testapi_port, + "testapi_base_url": testapi_base_url, + } + + yml = Environment().from_string(DOCKER_COMPOSE_TEMPLATE).render(vars=vars) + + with open(DOCKER_COMPOSE_FILE, 'w') as f: + f.write(yml) + f.close() + + +def main(args): + render_docker_compose(args.testapi_port, args.testapi_base_url) + os.system('docker-compose -f {} up -d'.format(DOCKER_COMPOSE_FILE)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Backup MongoDBs') + parser.add_argument('-tp', '--testapi-port', + type=int, + required=False, + default=8000, + help='testapi exposed port') + parser.add_argument('-tl', '--testapi-base-url', + type=str, + required=True, + help='testapi exposed base-url') + main(parser.parse_args()) diff --git a/utils/test/testapi/setup.cfg b/utils/test/testapi/setup.cfg index d9aa6762e..23341e4b4 100644 --- a/utils/test/testapi/setup.cfg +++ b/utils/test/testapi/setup.cfg @@ -6,6 +6,7 @@ description-file = author = SerenaFeng author-email = feng.xiaowei@zte.com.cn #home-page = http://www.opnfv.org/ +license = Apache-2.0 classifier = Environment :: opnfv Intended Audience :: Information Technology |