diff options
Diffstat (limited to 'utils/test')
-rw-r--r-- | utils/test/opts/one_click_deploy.py | 67 | ||||
-rw-r--r-- | utils/test/opts/watchdog.sh (renamed from utils/test/testapi/tools/watchdog/docker_watch.sh) | 17 | ||||
-rw-r--r-- | utils/test/reporting/docker/nginx.conf | 2 | ||||
-rwxr-xr-x | utils/test/reporting/reporting/functest/reporting-status.py | 96 | ||||
-rwxr-xr-x | utils/test/reporting/reporting/functest/reporting-vims.py | 232 | ||||
-rw-r--r-- | utils/test/reporting/reporting/functest/template/index-vims-tmpl.html | 13 | ||||
-rw-r--r-- | utils/test/reporting/reporting/functest/testCase.py | 5 | ||||
-rw-r--r-- | utils/test/testapi/deployment/deploy.py | 40 | ||||
-rw-r--r-- | utils/test/testapi/deployment/docker-compose.yml.template | 15 | ||||
-rw-r--r-- | utils/test/testapi/opts/deploy.py | 55 |
10 files changed, 312 insertions, 230 deletions
diff --git a/utils/test/opts/one_click_deploy.py b/utils/test/opts/one_click_deploy.py new file mode 100644 index 000000000..074827021 --- /dev/null +++ b/utils/test/opts/one_click_deploy.py @@ -0,0 +1,67 @@ +import argparse +import os + +from jinja2 import Environment + +DOCKER_COMPOSE_FILE = './docker-compose.yml' +DOCKER_COMPOSE_TEMPLATE = """ +version: '2' +services: + mongo: + image: mongo:3.2.1 + container_name: opnfv-mongo + testapi: + image: opnfv/testapi:latest + container_name: opnfv-testapi + environment: + - mongodb_url=mongodb://mongo:27017/ + - base_url={{ vars.base_url }} + ports: + - "{{ vars.testapi_port }}:8000" + links: + - mongo + reporting: + image: opnfv/reporting:latest + container_name: opnfv-reporting + ports: + - "{{ vars.reporting_port }}:8000" +""" + + +def render_docker_compose(testapi_port, reporting_port, testapi_base_url): + vars = { + "testapi_port": testapi_port, + "reporting_port": reporting_port, + "base_url": testapi_base_url, + } + yml = Environment().from_string(DOCKER_COMPOSE_TEMPLATE).render(vars=vars) + with open(DOCKER_COMPOSE_FILE, 'w') as f: + f.write(yml) + f.close() + + +def main(args): + render_docker_compose(args.testapi_port, + args.reporting_port, + args.testapi_base_url) + os.system('docker-compose -f {} up -d'.format(DOCKER_COMPOSE_FILE)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Backup MongoDBs') + parser.add_argument('-tp', '--testapi-port', + type=int, + required=False, + default=8082, + help='testapi exposed port') + parser.add_argument('-tl', '--testapi-base-url', + type=str, + required=True, + help='testapi exposed base-url') + parser.add_argument('-rp', '--reporting-port', + type=int, + required=False, + default=8084, + help='reporting exposed port') + + main(parser.parse_args()) diff --git a/utils/test/testapi/tools/watchdog/docker_watch.sh b/utils/test/opts/watchdog.sh index f1d8946b6..51868d709 100644 --- a/utils/test/testapi/tools/watchdog/docker_watch.sh +++ b/utils/test/opts/watchdog.sh @@ -27,7 +27,7 @@ declare -A urls=( ["testapi"]="http://testresults.opnfv.org/test/" \ ### Functions related to checking. function is_deploying() { - xml=$(curl -m10 "https://build.opnfv.org/ci/job/${1}-automate-master/lastBuild/api/xml?depth=1") + xml=$(curl -m10 "https://build.opnfv.org/ci/job/${1}-automate-docker-deploy-master/lastBuild/api/xml?depth=1") building=$(grep -oPm1 "(?<=<building>)[^<]+" <<< "$xml") if [[ $building == "false" ]] then @@ -64,12 +64,11 @@ function check_modules() { failed_modules=() for module in "${modules[@]}" do - if is_deploying $module; then - continue - fi if ! check_connectivity $module "${urls[$module]}"; then - echo -e "$module failed" - failed_modules+=($module) + if ! is_deploying $module; then + echo -e "$module failed" + failed_modules+=($module) + fi fi done if [ ! -z "$failed_modules" ]; then @@ -114,13 +113,11 @@ function start_containers_fix() { function start_container_fix() { echo -e "Starting a container $module" - sudo docker stop $module - sudo docker start $module + sudo docker restart $module sleep 5 if ! check_connectivity $module "${urls[$module]}"; then echo -e "Starting an old container $module_old" - sudo docker stop $module - sudo docker start $module"_old" + sudo docker restart $module"_old" sleep 5 fi } diff --git a/utils/test/reporting/docker/nginx.conf b/utils/test/reporting/docker/nginx.conf index 95baf0e48..66bd7e497 100644 --- a/utils/test/reporting/docker/nginx.conf +++ b/utils/test/reporting/docker/nginx.conf @@ -15,7 +15,7 @@ server { } location /api/ { - http_pass http://backends/; + proxy_pass http://backends/; } location /display/ { diff --git a/utils/test/reporting/reporting/functest/reporting-status.py b/utils/test/reporting/reporting/functest/reporting-status.py index 02bf67d0e..808c84144 100755 --- a/utils/test/reporting/reporting/functest/reporting-status.py +++ b/utils/test/reporting/reporting/functest/reporting-status.py @@ -22,7 +22,7 @@ Functest reporting status """ # Logger -logger = rp_utils.getLogger("Functest-Status") +LOGGER = rp_utils.getLogger("Functest-Status") # Initialization testValid = [] @@ -46,16 +46,16 @@ exclude_virtual = rp_utils.get_config('functest.exclude_virtual') functest_yaml_config = rp_utils.getFunctestConfig() -logger.info("*******************************************") -logger.info("* *") -logger.info("* Generating reporting scenario status *") -logger.info("* Data retention: %s days *" % period) -logger.info("* Log level: %s *" % log_level) -logger.info("* *") -logger.info("* Virtual PODs exluded: %s *" % exclude_virtual) -logger.info("* NOHA scenarios excluded: %s *" % exclude_noha) -logger.info("* *") -logger.info("*******************************************") +LOGGER.info("*******************************************") +LOGGER.info("* *") +LOGGER.info("* Generating reporting scenario status *") +LOGGER.info("* Data retention: %s days *", period) +LOGGER.info("* Log level: %s *", log_level) +LOGGER.info("* *") +LOGGER.info("* Virtual PODs exluded: %s *", exclude_virtual) +LOGGER.info("* NOHA scenarios excluded: %s *", exclude_noha) +LOGGER.info("* *") +LOGGER.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") @@ -75,9 +75,9 @@ for tier in config_tiers: elif tier['order'] == 2: for case in tier['testcases']: if case['case_name'] not in blacklist: - testValid.append(tc.TestCase(case['case_name'], - case['case_name'], - case['dependencies'])) + otherTestCases.append(tc.TestCase(case['case_name'], + case['case_name'], + case['dependencies'])) elif tier['order'] > 2: for case in tier['testcases']: if case['case_name'] not in blacklist: @@ -85,7 +85,7 @@ for tier in config_tiers: "functest", case['dependencies'])) -logger.debug("Functest reporting start") +LOGGER.debug("Functest reporting start") # For all the versions for version in versions: @@ -101,7 +101,7 @@ for version in versions: # initiate scenario file if it does not exist if not os.path.isfile(scenario_file_name): with open(scenario_file_name, "a") as my_file: - logger.debug("Create scenario file: %s" % scenario_file_name) + LOGGER.debug("Create scenario file: %s", scenario_file_name) my_file.write("date,scenario,installer,detail,score\n") for installer in installers: @@ -113,10 +113,10 @@ for version in versions: version) # get nb of supported architecture (x86, aarch64) architectures = rp_utils.getArchitectures(scenario_results) - logger.info("Supported architectures: {}".format(architectures)) + LOGGER.info("Supported architectures: %s", architectures) for architecture in architectures: - logger.info("architecture: {}".format(architecture)) + LOGGER.info("Architecture: %s", architecture) # Consider only the results for the selected architecture # i.e drop x86 for aarch64 and vice versa filter_results = rp_utils.filterArchitecture(scenario_results, @@ -133,10 +133,10 @@ for version in versions: # For all the scenarios get results for s, s_result in filter_results.items(): - logger.info("---------------------------------") - logger.info("installer %s, version %s, scenario %s:" % - (installer, version, s)) - logger.debug("Scenario results: %s" % s_result) + LOGGER.info("---------------------------------") + LOGGER.info("installer %s, version %s, scenario %s:", + installer, version, s) + LOGGER.debug("Scenario results: %s", s_result) # Green or Red light for a given scenario nb_test_runnable_for_this_scenario = 0 @@ -146,11 +146,11 @@ for version in versions: s_url = "" if len(s_result) > 0: build_tag = s_result[len(s_result)-1]['build_tag'] - logger.debug("Build tag: %s" % build_tag) + LOGGER.debug("Build tag: %s", build_tag) s_url = rp_utils.getJenkinsUrl(build_tag) if s_url is None: s_url = "http://testresultS.opnfv.org/reporting" - logger.info("last jenkins url: %s" % s_url) + LOGGER.info("last jenkins url: %s", s_url) testCases2BeDisplayed = [] # Check if test case is runnable / installer, scenario # for the test case used for Scenario validation @@ -160,24 +160,24 @@ for version in versions: for test_case in testValid: test_case.checkRunnable(installer, s, test_case.getConstraints()) - logger.debug("testcase %s (%s) is %s" % - (test_case.getDisplayName(), - test_case.getName(), - test_case.isRunnable)) + LOGGER.debug("testcase %s (%s) is %s", + test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable) time.sleep(1) if test_case.isRunnable: name = test_case.getName() displayName = test_case.getDisplayName() project = test_case.getProject() nb_test_runnable_for_this_scenario += 1 - logger.info(" Searching results for case %s " % - (displayName)) + LOGGER.info(" Searching results for case %s ", + displayName) result = rp_utils.getResult(name, installer, s, version) # if no result set the value to 0 if result < 0: result = 0 - logger.info(" >>>> Test score = " + str(result)) + LOGGER.info(" >>>> Test score = " + str(result)) test_case.setCriteria(result) test_case.setIsRunnable(True) testCases2BeDisplayed.append(tc.TestCase(name, @@ -193,17 +193,17 @@ for version in versions: for test_case in otherTestCases: test_case.checkRunnable(installer, s, test_case.getConstraints()) - logger.debug("testcase %s (%s) is %s" % - (test_case.getDisplayName(), - test_case.getName(), - test_case.isRunnable)) + LOGGER.debug("testcase %s (%s) is %s", + test_case.getDisplayName(), + test_case.getName(), + test_case.isRunnable) time.sleep(1) if test_case.isRunnable: name = test_case.getName() displayName = test_case.getDisplayName() project = test_case.getProject() - logger.info(" Searching results for case %s " % - (displayName)) + LOGGER.info(" Searching results for case %s ", + displayName) result = rp_utils.getResult(name, installer, s, version) # at least 1 result for the test @@ -218,13 +218,13 @@ for version in versions: True, 4)) else: - logger.debug("No results found") + LOGGER.debug("No results found") items[s] = testCases2BeDisplayed except Exception: - logger.error("Error: installer %s, version %s, scenario %s" - % (installer, version, s)) - logger.error("No data available: %s" % (sys.exc_info()[0])) + LOGGER.error("Error installer %s, version %s, scenario %s", + installer, version, s) + LOGGER.error("No data available: %s", sys.exc_info()[0]) # ********************************************** # Evaluate the results for scenario validation @@ -243,11 +243,11 @@ for version in versions: s_status = "KO" if scenario_score < scenario_criteria: - logger.info(">>>> scenario not OK, score = %s/%s" % - (scenario_score, scenario_criteria)) + LOGGER.info(">>>> scenario not OK, score = %s/%s", + scenario_score, scenario_criteria) s_status = "KO" else: - logger.info(">>>>> scenario OK, save the information") + LOGGER.info(">>>>> scenario OK, save the information") s_status = "OK" path_validation_file = ("./display/" + version + "/functest/" + @@ -270,7 +270,7 @@ for version in versions: s_score, s_score_percent, s_url) - logger.info("--------------------------") + LOGGER.info("--------------------------") templateLoader = jinja2.FileSystemLoader(".") templateEnv = jinja2.Environment( @@ -294,9 +294,9 @@ for version in versions: installer_display + ".html", "wb") as fh: fh.write(outputText) - logger.info("Manage export CSV & PDF") + LOGGER.info("Manage export CSV & PDF") rp_utils.export_csv(scenario_file_name, installer_display, version) - logger.error("CSV generated...") + LOGGER.error("CSV generated...") # Generate outputs for export # pdf @@ -306,4 +306,4 @@ for version in versions: pdf_doc_name = ("./display/" + version + "/functest/status-" + installer_display + ".pdf") rp_utils.export_pdf(pdf_path, pdf_doc_name) - logger.info("PDF generated...") + LOGGER.info("PDF generated...") diff --git a/utils/test/reporting/reporting/functest/reporting-vims.py b/utils/test/reporting/reporting/functest/reporting-vims.py index 14fddbe25..3b25e911d 100755 --- a/utils/test/reporting/reporting/functest/reporting-vims.py +++ b/utils/test/reporting/reporting/functest/reporting-vims.py @@ -1,112 +1,128 @@ +#!/usr/bin/python +# +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" +vIMS reporting status +""" from urllib2 import Request, urlopen, URLError import json import jinja2 -# manage conf -import utils.reporting_utils as rp_utils - -logger = rp_utils.getLogger("vIMS") - - -def sig_test_format(sig_test): - nbPassed = 0 - nbFailures = 0 - nbSkipped = 0 - for data_test in sig_test: - if data_test['result'] == "Passed": - nbPassed += 1 - elif data_test['result'] == "Failed": - nbFailures += 1 - elif data_test['result'] == "Skipped": - nbSkipped += 1 - total_sig_test_result = {} - total_sig_test_result['passed'] = nbPassed - total_sig_test_result['failures'] = nbFailures - total_sig_test_result['skipped'] = nbSkipped - return total_sig_test_result - -period = rp_utils.get_config('general.period') -versions = rp_utils.get_config('general.versions') -url_base = rp_utils.get_config('testapi.url') - -logger.info("****************************************") -logger.info("* Generating reporting vIMS *") -logger.info("* Data retention = %s days *" % period) -logger.info("* *") -logger.info("****************************************") - -installers = rp_utils.get_config('general.installers') -step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"] -logger.info("Start processing....") +import reporting.utils.reporting_utils as rp_utils + +LOGGER = rp_utils.getLogger("vIMS") + +PERIOD = rp_utils.get_config('general.period') +VERSIONS = rp_utils.get_config('general.versions') +URL_BASE = rp_utils.get_config('testapi.url') + +LOGGER.info("****************************************") +LOGGER.info("* Generating reporting vIMS *") +LOGGER.info("* Data retention = %s days *", PERIOD) +LOGGER.info("* *") +LOGGER.info("****************************************") + +INSTALLERS = rp_utils.get_config('general.installers') +STEP_ORDER = ["initialisation", "orchestrator", "vnf", "test_vnf"] +LOGGER.info("Start vIMS reporting processing....") # For all the versions -for version in versions: - for installer in installers: - logger.info("Search vIMS results for installer: %s, version: %s" - % (installer, version)) - request = Request("http://" + url_base + '?case=vims&installer=' + - installer + '&version=' + version) - - try: - response = urlopen(request) - k = response.read() - results = json.loads(k) - except URLError as e: - logger.error("Error code: %s" % e) - - test_results = results['results'] - - logger.debug("Results found: %s" % test_results) - - scenario_results = {} - for r in test_results: - if not r['scenario'] in scenario_results.keys(): - scenario_results[r['scenario']] = [] - scenario_results[r['scenario']].append(r) - - for s, s_result in scenario_results.items(): - scenario_results[s] = s_result[0:5] - logger.debug("Search for success criteria") - for result in scenario_results[s]: - result["start_date"] = result["start_date"].split(".")[0] - sig_test = result['details']['sig_test']['result'] - if not sig_test == "" and isinstance(sig_test, list): - format_result = sig_test_format(sig_test) - if format_result['failures'] > format_result['passed']: - result['details']['sig_test']['duration'] = 0 - result['details']['sig_test']['result'] = format_result - nb_step_ok = 0 - nb_step = len(result['details']) - - for step_name, step_result in result['details'].items(): - if step_result['duration'] != 0: - nb_step_ok += 1 - m, s = divmod(step_result['duration'], 60) - m_display = "" - if int(m) != 0: - m_display += str(int(m)) + "m " - - step_result['duration_display'] = (m_display + - str(int(s)) + "s") - - result['pr_step_ok'] = 0 - if nb_step != 0: - result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100 - try: - logger.debug("Scenario %s, Installer %s" - % (s_result[1]['scenario'], installer)) - res = result['details']['orchestrator']['duration'] - logger.debug("Orchestrator deployment: %s s" - % res) - logger.debug("vIMS deployment: %s s" - % result['details']['vIMS']['duration']) - logger.debug("Signaling testing: %s s" - % result['details']['sig_test']['duration']) - logger.debug("Signaling testing results: %s" - % format_result) - except Exception: - logger.error("Data badly formatted") - logger.debug("----------------------------------------") +for version in VERSIONS: + for installer in INSTALLERS: + + # get nb of supported architecture (x86, aarch64) + # get scenarios + scenario_results = rp_utils.getScenarios("functest", + "cloudify_ims", + installer, + version) + + architectures = rp_utils.getArchitectures(scenario_results) + LOGGER.info("Supported architectures: %s", architectures) + + for architecture in architectures: + LOGGER.info("Architecture: %s", architecture) + # Consider only the results for the selected architecture + # i.e drop x86 for aarch64 and vice versa + filter_results = rp_utils.filterArchitecture(scenario_results, + architecture) + scenario_stats = rp_utils.getScenarioStats(filter_results) + items = {} + scenario_result_criteria = {} + + # in case of more than 1 architecture supported + # precise the architecture + installer_display = installer + if "fuel" in installer: + installer_display = installer + "@" + architecture + + LOGGER.info("Search vIMS results for installer: %s, version: %s", + installer, version) + request = Request("http://" + URL_BASE + '?case=cloudify_ims&' + 'installer=' + installer + '&version=' + version) + try: + response = urlopen(request) + k = response.read() + results = json.loads(k) + except URLError as err: + LOGGER.error("Error code: %s", err) + + test_results = results['results'] + + # LOGGER.debug("Results found: %s" % test_results) + + scenario_results = {} + for r in test_results: + if not r['scenario'] in scenario_results.keys(): + scenario_results[r['scenario']] = [] + scenario_results[r['scenario']].append(r) + + # LOGGER.debug("scenario result: %s" % scenario_results) + + for s, s_result in scenario_results.items(): + scenario_results[s] = s_result[0:5] + for result in scenario_results[s]: + try: + format_result = result['details']['test_vnf']['result'] + + # round durations of the different steps + result['details']['orchestrator']['duration'] = round( + result['details']['orchestrator']['duration'], 1) + result['details']['vnf']['duration'] = round( + result['details']['vnf']['duration'], 1) + result['details']['test_vnf']['duration'] = round( + result['details']['test_vnf']['duration'], 1) + + res_orch = \ + result['details']['orchestrator']['duration'] + res_vnf = result['details']['vnf']['duration'] + res_test_vnf = \ + result['details']['test_vnf']['duration'] + res_signaling = \ + result['details']['test_vnf']['result']['failures'] + + # Manage test result status + if res_signaling != 0: + LOGGER.debug("At least 1 signalig test FAIL") + result['details']['test_vnf']['status'] = "FAIL" + else: + LOGGER.debug("All signalig tests PASS") + result['details']['test_vnf']['status'] = "PASS" + + LOGGER.debug("Scenario %s, Installer %s", + s_result[1]['scenario'], installer) + LOGGER.debug("Orchestrator deployment: %ss", res_orch) + LOGGER.debug("vIMS deployment: %ss", res_vnf) + LOGGER.debug("VNF testing: %ss", res_test_vnf) + LOGGER.debug("VNF testing results: %s", format_result) + except Exception as err: # pylint: disable=broad-except + LOGGER.error("Uncomplete data %s", err) + LOGGER.debug("----------------------------------------") templateLoader = jinja2.FileSystemLoader(".") templateEnv = jinja2.Environment(loader=templateLoader, @@ -116,11 +132,11 @@ for version in versions: template = templateEnv.get_template(TEMPLATE_FILE) outputText = template.render(scenario_results=scenario_results, - step_order=step_order, - installer=installer) - + step_order=STEP_ORDER, + installer=installer_display) + LOGGER.debug("Generate html page for %s", installer_display) with open("./display/" + version + "/functest/vims-" + - installer + ".html", "wb") as fh: + installer_display + ".html", "wb") as fh: fh.write(outputText) -logger.info("vIMS report succesfully generated") +LOGGER.info("vIMS report succesfully generated") diff --git a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html index cd51607b7..9bd2b2f66 100644 --- a/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html +++ b/utils/test/reporting/reporting/functest/template/index-vims-tmpl.html @@ -22,11 +22,12 @@ <nav> <ul class="nav nav-justified"> <li class="active"><a href="../../index.html">Home</a></li> - <li><a href="vims-fuel.html">Fuel</a></li> + <li><a href="vims-apex.html">Apex</a></li> <li><a href="vims-compass.html">Compass</a></li> <li><a href="vims-daisy.html">Daisy</a></li> - <li><a href="vims-joid.html">JOID</a></li> - <li><a href="vims-apex.html">APEX</a></li> + <li><a href="vims-fuel@x86.html">Fuel@x86</a></li> + <li><a href="vims-fuel@aarch64.html">Fuel@aarch64</a></li> + <li><a href="vims-joid.html">Joid</a></li> </ul> </nav> </div> @@ -58,17 +59,17 @@ <tr> <th width="20%">Step</th> <th width="10%">Status</th> - <th width="10%">Duration</th> + <th width="10%">Duration(s)</th> <th width="60%">Result</th> </tr> {% for step_od_name in step_order -%} {% if step_od_name in result.details.keys() -%} {% set step_result = result.details[step_od_name] -%} - {% if step_result.duration != 0 -%} + {% if step_result.status == "PASS" -%} <tr class="tr-ok"> <td>{{step_od_name}}</td> <td><span class="glyphicon glyphicon-ok"></td> - <td><b>{{step_result.duration_display}}</b></td> + <td><b>{{step_result.duration}}</b></td> <td>{{step_result.result}}</td> </tr> {%- else -%} diff --git a/utils/test/reporting/reporting/functest/testCase.py b/utils/test/reporting/reporting/functest/testCase.py index 9834f0753..a182dd4cf 100644 --- a/utils/test/reporting/reporting/functest/testCase.py +++ b/utils/test/reporting/reporting/functest/testCase.py @@ -50,9 +50,10 @@ class TestCase(object): 'gluon_vping': 'Netready', 'fds': 'FDS', 'cloudify_ims': 'vIMS (Cloudify)', - 'orchestra_ims': 'OpenIMS (OpenBaton)', + 'orchestra_openims': 'OpenIMS (OpenBaton)', + 'orchestra_clearwaterims': 'vIMS (OpenBaton)', 'opera_ims': 'vIMS (Open-O)', - 'vyos_vrouter': 'vyos', + 'vyos_vrouter': 'vyos (Cloudify)', 'barometercollectd': 'Barometer', 'odl_netvirt': 'Netvirt', 'security_scan': 'Security'} diff --git a/utils/test/testapi/deployment/deploy.py b/utils/test/testapi/deployment/deploy.py deleted file mode 100644 index 6433fa6b2..000000000 --- a/utils/test/testapi/deployment/deploy.py +++ /dev/null @@ -1,40 +0,0 @@ -import argparse -import os - -from jinja2 import Environment, FileSystemLoader - -env = Environment(loader=FileSystemLoader('./')) -docker_compose_yml = './docker-compose.yml' -docker_compose_template = './docker-compose.yml.template' - - -def render_docker_compose(port, base_url): - vars = { - "expose_port": port, - "base_url": base_url, - } - template = env.get_template(docker_compose_template) - yml = template.render(vars=vars) - - with open(docker_compose_yml, 'w') as f: - f.write(yml) - f.close() - - -def main(args): - render_docker_compose(args.expose_port, args.base_url) - os.system('docker-compose -f {} up -d'.format(docker_compose_yml)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Backup MongoDBs') - parser.add_argument('-p', '--expose-port', - type=int, - required=False, - default=8000, - help='testapi exposed port') - parser.add_argument('-l', '--base-url', - type=str, - required=True, - help='testapi exposed base-url') - main(parser.parse_args()) diff --git a/utils/test/testapi/deployment/docker-compose.yml.template b/utils/test/testapi/deployment/docker-compose.yml.template deleted file mode 100644 index cd684048e..000000000 --- a/utils/test/testapi/deployment/docker-compose.yml.template +++ /dev/null @@ -1,15 +0,0 @@ -version: '2' -services: - mongo: - image: mongo:3.2.1 - container_name: opnfv-mongo - testapi: - image: opnfv/testapi:latest - container_name: opnfv-testapi - environment: - - mongodb_url=mongodb://mongo:27017/ - - base_url={{ vars.base_url }} - ports: - - "{{ vars.expose_port }}:8000" - links: - - mongo diff --git a/utils/test/testapi/opts/deploy.py b/utils/test/testapi/opts/deploy.py new file mode 100644 index 000000000..f58690c5d --- /dev/null +++ b/utils/test/testapi/opts/deploy.py @@ -0,0 +1,55 @@ +import argparse +import os + +from jinja2 import Environment + +DOCKER_COMPOSE_FILE = './docker-compose.yml' +DOCKER_COMPOSE_TEMPLATE = """ +version: '2' +services: + mongo: + image: mongo:3.2.1 + container_name: opnfv-mongo + testapi: + image: opnfv/testapi:latest + container_name: opnfv-testapi + environment: + - mongodb_url=mongodb://mongo:27017/ + - base_url={{ vars.testapi_base_url }} + ports: + - "{{ vars.testapi_port }}:8000" + links: + - mongo +""" + + +def render_docker_compose(testapi_port, testapi_base_url): + vars = { + "testapi_port": testapi_port, + "testapi_base_url": testapi_base_url, + } + + yml = Environment().from_string(DOCKER_COMPOSE_TEMPLATE).render(vars=vars) + + with open(DOCKER_COMPOSE_FILE, 'w') as f: + f.write(yml) + f.close() + + +def main(args): + render_docker_compose(args.testapi_port, args.testapi_base_url) + os.system('docker-compose -f {} up -d'.format(DOCKER_COMPOSE_FILE)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Backup MongoDBs') + parser.add_argument('-tp', '--testapi-port', + type=int, + required=False, + default=8000, + help='testapi exposed port') + parser.add_argument('-tl', '--testapi-base-url', + type=str, + required=True, + help='testapi exposed base-url') + main(parser.parse_args()) |