diff options
Diffstat (limited to 'testcases')
-rw-r--r-- | testcases/Controllers/ODL/CI/odlreport2db.py | 1 | ||||
-rw-r--r-- | testcases/Controllers/ONOS/Teston/CI/onosfunctest.py | 1 | ||||
-rwxr-xr-x | testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py | 189 | ||||
-rwxr-xr-x | testcases/VIM/OpenStack/CI/libraries/run_rally.py | 29 | ||||
-rw-r--r-- | testcases/VIM/OpenStack/CI/libraries/run_tempest.py | 20 | ||||
-rw-r--r-- | testcases/config_functest.yaml | 10 | ||||
-rw-r--r-- | testcases/features/doctor.py | 2 | ||||
-rw-r--r-- | testcases/functest_utils.py | 94 | ||||
-rw-r--r-- | testcases/tests/TestFunctestUtils.py | 7 | ||||
-rw-r--r-- | testcases/vIMS/CI/vIMS.py | 9 | ||||
-rw-r--r-- | testcases/vPing/CI/libraries/vPing_ssh.py (renamed from testcases/vPing/CI/libraries/vPing2.py) | 9 | ||||
-rw-r--r-- | testcases/vPing/CI/libraries/vPing_userdata.py (renamed from testcases/vPing/CI/libraries/vPing.py) | 9 |
12 files changed, 260 insertions, 120 deletions
diff --git a/testcases/Controllers/ODL/CI/odlreport2db.py b/testcases/Controllers/ODL/CI/odlreport2db.py index 1538f79cf..47067963a 100644 --- a/testcases/Controllers/ODL/CI/odlreport2db.py +++ b/testcases/Controllers/ODL/CI/odlreport2db.py @@ -130,6 +130,7 @@ def main(argv): # -p opnfv-jump-2 # -s os-odl_l2-ha functest_utils.push_results_to_db(database, + "functest", data['case_name'], None, data['pod_name'], diff --git a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py index bf031cb47..dc45088b3 100644 --- a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py +++ b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py @@ -182,6 +182,7 @@ def main(): pod_name = functest_utils.get_pod_name(logger) result = GetResult() functest_utils.push_results_to_db(TEST_DB, + "functest", "ONOS", logger, pod_name, scenario, payload=result) diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py index 0fb6ce7d4..0d1992604 100755 --- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py +++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py @@ -13,16 +13,17 @@ # 0.3 (19/10/2015) remove Tempest from run_rally # and push result into test DB # - -import re -import json -import os import argparse +import json import logging -import yaml +import os +import re import requests import subprocess import sys +import time +import yaml + from novaclient import client as novaclient from glanceclient import client as glanceclient from keystoneclient.v2_0 import client as keystoneclient @@ -53,6 +54,9 @@ parser.add_argument("-s", "--smoke", parser.add_argument("-v", "--verbose", help="Print verbose info about the progress", action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") args = parser.parse_args() @@ -78,7 +82,7 @@ formatter = logging.Formatter("%(asctime)s - %(name)s - " ch.setFormatter(formatter) logger.addHandler(ch) -REPO_PATH=os.environ['repos_dir']+'/functest/' +REPO_PATH = os.environ['repos_dir']+'/functest/' if not os.path.exists(REPO_PATH): logger.error("Functest repository directory not found '%s'" % REPO_PATH) exit(-1) @@ -90,8 +94,8 @@ with open("/home/opnfv/functest/conf/config_functest.yaml") as f: f.close() HOME = os.environ['HOME']+"/" -####todo: -#SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \ +### todo: +# SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \ # get("directories").get("dir_rally_scn") SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/" ### @@ -108,8 +112,6 @@ CONCURRENCY = 4 RESULTS_DIR = functest_yaml.get("general").get("directories"). \ get("dir_rally_res") TEST_DB = functest_yaml.get("results").get("test_db_url") -FLOATING_NETWORK = functest_yaml.get("general"). \ - get("openstack").get("neutron_public_net_name") PRIVATE_NETWORK = functest_yaml.get("general"). \ get("openstack").get("neutron_private_net_name") @@ -125,14 +127,17 @@ GLANCE_IMAGE_PATH = functest_yaml.get("general"). \ CINDER_VOLUME_TYPE_NAME = "volume_test" -def push_results_to_db(payload): +SUMMARY = [] + + +def push_results_to_db(case, payload): url = TEST_DB + "/results" installer = functest_utils.get_installer_type(logger) scenario = functest_utils.get_scenario(logger) pod_name = functest_utils.get_pod_name(logger) # TODO pod_name hardcoded, info shall come from Jenkins - params = {"project_name": "functest", "case_name": "Rally", + params = {"project_name": "functest", "case_name": case, "pod_name": pod_name, "installer": installer, "version": scenario, "details": payload} @@ -180,9 +185,6 @@ def build_task_args(test_file_name): task_args['image_name'] = GLANCE_IMAGE_NAME task_args['flavor_name'] = FLAVOR_NAME task_args['glance_image_location'] = GLANCE_IMAGE_PATH - task_args['floating_network'] = FLOATING_NETWORK - task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'], - PRIVATE_NETWORK).encode('ascii', 'ignore') task_args['tmpl_dir'] = TEMPLATE_DIR task_args['sup_dir'] = SUPPORT_DIR task_args['users_amount'] = USERS_AMOUNT @@ -190,19 +192,32 @@ def build_task_args(test_file_name): task_args['iterations'] = ITERATIONS_AMOUNT task_args['concurrency'] = CONCURRENCY + ext_net = functest_utils.get_external_net(client_dict['neutron']) + if ext_net: + task_args['floating_network'] = str(ext_net) + else: + task_args['floating_network'] = '' + + net_id = functest_utils.get_network_id(client_dict['neutron'], + PRIVATE_NETWORK) + task_args['netid'] = str(net_id) + return task_args -def get_output(proc): +def get_output(proc, test_name): + global SUMMARY result = "" - if args.verbose: - while proc.poll() is None: - line = proc.stdout.readline() - print line.replace('\n', '') + nb_tests = 0 + overall_duration = 0.0 + success = 0.0 + nb_totals = 0 + + while proc.poll() is None: + line = proc.stdout.readline() + if args.verbose: result += line - else: - while proc.poll() is None: - line = proc.stdout.readline() + else: if "Load duration" in line or \ "started" in line or \ "finished" in line or \ @@ -214,7 +229,36 @@ def get_output(proc): result += "\n" + line elif "Full duration" in line: result += line + "\n\n" - logger.info("\n" + result) + + # parse output for summary report + if "| " in line and \ + "| action" not in line and \ + "| Starting" not in line and \ + "| Completed" not in line and \ + "| ITER" not in line and \ + "| " not in line and \ + "| total" not in line: + nb_tests += 1 + elif "| total" in line: + percentage = ((line.split('|')[8]).strip(' ')).strip('%') + success += float(percentage) + nb_totals += 1 + elif "Full duration" in line: + overall_duration += float(line.split(': ')[1]) + + overall_duration="{:10.2f}".format(overall_duration) + if nb_totals == 0: + success_avg = 0 + else: + success_avg = "{:0.2f}".format(success / nb_totals) + + scenario_summary = {'test_name': test_name, + 'overall_duration': overall_duration, + 'nb_tests': nb_tests, + 'success': success_avg} + SUMMARY.append(scenario_summary) + + logger.info("\n" + result) return result @@ -225,7 +269,7 @@ def run_task(test_name): # :param test_name: name for the rally test # :return: void # - + global SUMMARY logger.info('Starting test scenario "{}" ...'.format(test_name)) task_file = '{}task.yaml'.format(SCENARIOS_DIR) @@ -233,7 +277,8 @@ def run_task(test_name): logger.error("Task file '%s' does not exist." % task_file) exit(-1) - test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name) + test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", + test_name) if not os.path.exists(test_file_name): logger.error("The scenario '%s' does not exist." % test_file_name) exit(-1) @@ -245,18 +290,19 @@ def run_task(test_name): "--task-args \"{}\" ".format(build_task_args(test_name)) logger.debug('running command line : {}'.format(cmd_line)) - p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True) - output = get_output(p) + p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, + stderr=RALLY_STDERR, shell=True) + output = get_output(p, test_name) task_id = get_task_id(output) logger.debug('task_id : {}'.format(task_id)) if task_id is None: - logger.error("failed to retrieve task_id") + logger.error("Failed to retrieve task_id.") exit(-1) # check for result directory and create it otherwise if not os.path.exists(RESULTS_DIR): - logger.debug('does not exists, we create it'.format(RESULTS_DIR)) + logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR)) os.makedirs(RESULTS_DIR) # write html report file @@ -283,7 +329,7 @@ def run_task(test_name): # Push results in payload of testcase if args.report: logger.debug("Push result into DB") - push_results_to_db(json_data) + push_results_to_db("Rally_details", json_data) """ parse JSON operation result """ if task_succeed(json_results): @@ -293,23 +339,25 @@ def run_task(test_name): def main(): + global SUMMARY # configure script if not (args.test_name in tests): logger.error('argument not valid') exit(-1) + SUMMARY = [] creds_nova = functest_utils.get_credentials("nova") - nova_client = novaclient.Client('2',**creds_nova) + nova_client = novaclient.Client('2', **creds_nova) creds_neutron = functest_utils.get_credentials("neutron") neutron_client = neutronclient.Client(**creds_neutron) creds_keystone = functest_utils.get_credentials("keystone") keystone_client = keystoneclient.Client(**creds_keystone) glance_endpoint = keystone_client.service_catalog.url_for(service_type='image', - endpoint_type='publicURL') + endpoint_type='publicURL') glance_client = glanceclient.Client(1, glance_endpoint, token=keystone_client.auth_token) creds_cinder = functest_utils.get_credentials("cinder") - cinder_client = cinderclient.Client('2',creds_cinder['username'], + cinder_client = cinderclient.Client('2', creds_cinder['username'], creds_cinder['api_key'], creds_cinder['project_id'], creds_cinder['auth_url'], @@ -317,9 +365,10 @@ def main(): client_dict['neutron'] = neutron_client - volume_types = functest_utils.list_volume_types(cinder_client, private=False) + volume_types = functest_utils.list_volume_types(cinder_client, + private=False) if not volume_types: - volume_type = functest_utils.create_volume_type(cinder_client, \ + volume_type = functest_utils.create_volume_type(cinder_client, CINDER_VOLUME_TYPE_NAME) if not volume_type: logger.error("Failed to create volume type...") @@ -333,10 +382,11 @@ def main(): image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) if image_id == '': - logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \ + logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH)) - image_id = functest_utils.create_glance_image(glance_client,\ - GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH) + image_id = functest_utils.create_glance_image(glance_client, + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) if not image_id: logger.error("Failed to create the Glance image...") exit(-1) @@ -345,7 +395,7 @@ def main(): % (GLANCE_IMAGE_NAME, image_id)) else: logger.debug("Using existing image '%s' with ID '%s'..." \ - % (GLANCE_IMAGE_NAME,image_id)) + % (GLANCE_IMAGE_NAME, image_id)) if args.test_name == "all": for test_name in tests: @@ -353,9 +403,66 @@ def main(): test_name == 'vm'): run_task(test_name) else: - print(args.test_name) + logger.debug("Test name: " + args.test_name) run_task(args.test_name) + report = "\n"\ + " \n"\ + " Rally Summary Report\n"\ + "+===================+============+===============+===========+\n"\ + "| Module | Duration | nb. Test Run | Success |\n"\ + "+===================+============+===============+===========+\n" + payload = [] + + #for each scenario we draw a row for the table + total_duration = 0.0 + total_nb_tests = 0 + total_success = 0.0 + for s in SUMMARY: + name = "{0:<17}".format(s['test_name']) + duration = float(s['overall_duration']) + total_duration += duration + duration = time.strftime("%M:%S", time.gmtime(duration)) + duration = "{0:<10}".format(duration) + nb_tests = "{0:<13}".format(s['nb_tests']) + total_nb_tests += int(s['nb_tests']) + success = "{0:<10}".format(str(s['success'])+'%') + total_success += float(s['success']) + report += ""\ + "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\ + "+-------------------+------------+---------------+-----------+\n" + payload.append({'module': name, + 'details': {'duration': s['overall_duration'], + 'nb tests': s['nb_tests'], + 'success': s['success']}}) + + total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration)) + total_duration_str2 = "{0:<10}".format(total_duration_str) + total_nb_tests_str = "{0:<13}".format(total_nb_tests) + total_success = "{:0.2f}".format(total_success / len(SUMMARY)) + total_success_str = "{0:<10}".format(str(total_success)+'%') + report += "+===================+============+===============+===========+\n" + report += "| TOTAL: | " + total_duration_str2 + " | " + \ + total_nb_tests_str + " | " + total_success_str + "|\n" + report += "+===================+============+===============+===========+\n" + + logger.info("\n"+report) + payload.append({'summary': {'duration': total_duration, + 'nb tests': total_nb_tests, + 'nb success': total_success}}) + + # Generate json results for DB + #json_results = {"timestart": time_start, "duration": total_duration, + # "tests": int(total_nb_tests), "success": int(total_success)} + #logger.info("Results: "+str(json_results)) + + if args.report: + logger.debug("Pushing Rally summary into DB...") + push_results_to_db("Rally", payload) + + if args.noclean: + exit(0) + logger.debug("Deleting image '%s' with ID '%s'..." \ % (GLANCE_IMAGE_NAME, image_id)) if not functest_utils.delete_glance_image(nova_client, image_id): diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py index 3c70e3880..6b1aae2eb 100755 --- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py +++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py @@ -47,6 +47,9 @@ parser.add_argument("-r", "--report", parser.add_argument("-v", "--verbose", help="Print verbose info about the progress", action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") args = parser.parse_args() @@ -70,7 +73,7 @@ formatter = logging.Formatter("%(asctime)s - %(name)s - " ch.setFormatter(formatter) logger.addHandler(ch) -REPO_PATH=os.environ['repos_dir']+'/functest/' +REPO_PATH = os.environ['repos_dir']+'/functest/' if not os.path.exists(REPO_PATH): logger.error("Functest repository directory not found '%s'" % REPO_PATH) exit(-1) @@ -97,14 +100,14 @@ GLANCE_IMAGE_PATH = functest_yaml.get("general"). \ get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME -def push_results_to_db(payload): +def push_results_to_db(case, payload): url = TEST_DB + "/results" installer = functest_utils.get_installer_type(logger) scenario = functest_utils.get_scenario(logger) pod_name = functest_utils.get_pod_name(logger) # TODO pod_name hardcoded, info shall come from Jenkins - params = {"project_name": "functest", "case_name": "Rally", + params = {"project_name": "functest", "case_name": case, "pod_name": pod_name, "installer": installer, "version": scenario, "details": payload} @@ -213,7 +216,7 @@ def run_task(test_name): # Push results in payload of testcase if args.report: logger.debug("Push result into DB") - push_results_to_db(json_data) + push_results_to_db("Rally_details", json_data) """ parse JSON operation result """ if task_succeed(json_results): @@ -232,22 +235,22 @@ def main(): exit(-1) creds_nova = functest_utils.get_credentials("nova") - nova_client = novaclient.Client('2',**creds_nova) + nova_client = novaclient.Client('2', **creds_nova) creds_keystone = functest_utils.get_credentials("keystone") keystone_client = keystoneclient.Client(**creds_keystone) glance_endpoint = keystone_client.service_catalog.url_for(service_type='image', - endpoint_type='publicURL') + endpoint_type='publicURL') glance_client = glanceclient.Client(1, glance_endpoint, token=keystone_client.auth_token) - image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) if image_id == '': - logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \ + logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH)) - image_id = functest_utils.create_glance_image(glance_client,\ - GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH) + image_id = functest_utils.create_glance_image(glance_client, + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) if not image_id: logger.error("Failed to create the Glance image...") exit(-1) @@ -256,8 +259,7 @@ def main(): % (GLANCE_IMAGE_NAME, image_id)) else: logger.debug("Using existing image '%s' with ID '%s'..." \ - % (GLANCE_IMAGE_NAME,image_id)) - + % (GLANCE_IMAGE_NAME, image_id)) if args.test_name == "all": for test_name in tests: @@ -272,6 +274,9 @@ def main(): print(args.test_name) run_task(args.test_name) + if args.noclean: + exit(0) + logger.debug("Deleting image '%s' with ID '%s'..." \ % (GLANCE_IMAGE_NAME, image_id)) if not functest_utils.delete_glance_image(nova_client, image_id): diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py index b8ed2716e..294669182 100644 --- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py +++ b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py @@ -33,12 +33,21 @@ modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing', """ tests configuration """ parser = argparse.ArgumentParser() -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") -parser.add_argument("-m", "--mode", help="Tempest test mode [smoke, all]", +parser.add_argument("-d", "--debug", + help="Debug mode", + action="store_true") +parser.add_argument("-s", "--serial", + help="Run tests in one thread", + action="store_true") +parser.add_argument("-m", "--mode", + help="Tempest test mode [smoke, all]", default="smoke") parser.add_argument("-r", "--report", help="Create json result file", action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") args = parser.parse_args() @@ -289,12 +298,19 @@ def main(): else: MODE = "--set "+args.mode + if args.serial: + MODE = "--concur 1 "+MODE + if not os.path.exists(TEMPEST_RESULTS_DIR): os.makedirs(TEMPEST_RESULTS_DIR) create_tempest_resources() configure_tempest() run_tempest(MODE) + + if args.noclean: + exit(0) + free_tempest_resources() diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml index f32314ac6..7d5f21360 100644 --- a/testcases/config_functest.yaml +++ b/testcases/config_functest.yaml @@ -49,12 +49,6 @@ general: image_file_name: cirros-0.3.4-x86_64-disk.img image_disk_format: qcow2 - #Public network. Optional - neutron_public_net_name: net04_ext - neutron_public_subnet_name: net04_ext__subnet - neutron_public_subnet_cidr: 172.16.9.0/24 - neutron_public_subnet_start: 172.16.9.130 - neutron_public_subnet_end: 172.16.9.254 #Private network for functest. Will be created by config_functest.py neutron_private_net_name: functest-net neutron_private_subnet_name: functest-subnet @@ -172,7 +166,7 @@ results: # the execution order is important as some tests may be more destructive than others # and if vPing is failing is usually not needed to continue... test_exec_priority: - 1: vping + 1: vping_ssh 2: vping_userdata 3: tempest 4: odl @@ -237,7 +231,7 @@ test-dependencies: functest: vims: scenario: '(ocl)|(odl)|(nosdn)' - vping: + vping_ssh: vping_userdata: scenario: '(ocl)|(odl)|(nosdn)' tempest: diff --git a/testcases/features/doctor.py b/testcases/features/doctor.py index 8eb85a808..5669a9900 100644 --- a/testcases/features/doctor.py +++ b/testcases/features/doctor.py @@ -71,7 +71,7 @@ def main(): 'd': details, }) functest_utils.push_results_to_db(TEST_DB_URL, - 'doctor-notification', + 'doctor','doctor-notification', logger, pod_name, scenario, details) diff --git a/testcases/functest_utils.py b/testcases/functest_utils.py index 57ec1863f..94a4fa8a5 100644 --- a/testcases/functest_utils.py +++ b/testcases/functest_utils.py @@ -18,7 +18,6 @@ import socket import subprocess import sys import urllib2 -import yaml from git import Repo @@ -39,6 +38,7 @@ def check_credentials(): env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME'] return all(map(lambda v: v in os.environ and os.environ[v], env_vars)) + def get_credentials(service): """Returns a creds dictionary filled with the following keys: * username @@ -70,7 +70,6 @@ def get_credentials(service): return creds - #********************************************* # NOVA #********************************************* @@ -134,10 +133,10 @@ def get_floating_ips(nova_client): def create_flavor(nova_client, flavor_name, ram, disk, vcpus): try: - flavor = nova_client.flavors.create(flavor_name,ram,vcpus,disk) + flavor = nova_client.flavors.create(flavor_name, ram, vcpus, disk) except Exception, e: print "Error [create_flavor(nova_client, '%s', '%s', '%s', "\ - "'%s')]:" %(flavor_name,ram, disk, vcpus), e + "'%s')]:" % (flavor_name, ram, disk, vcpus), e return None return flavor.id @@ -156,7 +155,7 @@ def create_floating_ip(neutron_client): def add_floating_ip(nova_client, server_id, floatingip_id): try: - nova_client.servers.add_floating_ip(server_id,floatingip_id) + nova_client.servers.add_floating_ip(server_id, floatingip_id) return True except Exception, e: print "Error [add_floating_ip(nova_client, '%s', '%s')]:" % \ @@ -182,8 +181,6 @@ def delete_floating_ip(nova_client, floatingip_id): return False - - #********************************************* # NEUTRON #********************************************* @@ -274,7 +271,7 @@ def create_neutron_subnet(neutron_client, name, cidr, net_id): return subnet['subnets'][0]['id'] except Exception, e: print "Error [create_neutron_subnet(neutron_client, '%s', '%s', "\ - "'%s')]:" %(name,cidr, net_id), e + "'%s')]:" % (name, cidr, net_id), e return False @@ -300,7 +297,7 @@ def create_neutron_port(neutron_client, name, network_id, ip): return port['port']['id'] except Exception, e: print "Error [create_neutron_port(neutron_client, '%s', '%s', "\ - "'%s')]:" %(name,network_id, ip), e + "'%s')]:" % (name, network_id, ip), e return False @@ -311,7 +308,7 @@ def update_neutron_net(neutron_client, network_id, shared=False): return True except Exception, e: print "Error [update_neutron_net(neutron_client, '%s', '%s')]:" % \ - (network_id,str(shared)), e + (network_id, str(shared)), e return False @@ -325,7 +322,7 @@ def update_neutron_port(neutron_client, port_id, device_owner): return port['port']['id'] except Exception, e: print "Error [update_neutron_port(neutron_client, '%s', '%s')]:" % \ - (port_id,device_owner), e + (port_id, device_owner), e return False @@ -336,14 +333,15 @@ def add_interface_router(neutron_client, router_id, subnet_id): return True except Exception, e: print "Error [add_interface_router(neutron_client, '%s', '%s')]:" % \ - (router_id,subnet_id), e + (router_id, subnet_id), e return False + def add_gateway_router(neutron_client, router_id): ext_net_id = get_external_net_id(neutron_client) router_dict = {'network_id': ext_net_id} try: - neutron_client.add_gateway_router(router_id,router_dict) + neutron_client.add_gateway_router(router_id, router_dict) return True except Exception, e: print "Error [add_gateway_router(neutron_client, '%s')]:" % router_id, e @@ -396,7 +394,7 @@ def remove_interface_router(neutron_client, router_id, subnet_id): return True except Exception, e: print "Error [remove_interface_router(neutron_client, '%s', '%s')]:" % \ - (router_id,subnet_id), e + (router_id, subnet_id), e return False @@ -409,7 +407,6 @@ def remove_gateway_router(neutron_client, router_id): return False - #********************************************* # SEC GROUPS #********************************************* @@ -424,44 +421,43 @@ def get_security_groups(neutron_client): def create_security_group(neutron_client, sg_name, sg_description): - json_body= {'security_group' : { 'name' : sg_name, \ - 'description' : sg_description }} + json_body = {'security_group': {'name': sg_name, + 'description': sg_description}} try: secgroup = neutron_client.create_security_group(json_body) return secgroup['security_group'] except Exception, e: print "Error [create_security_group(neutron_client, '%s', '%s')]:" % \ - (sg_name,sg_description), e + (sg_name, sg_description), e return False def create_secgroup_rule(neutron_client, sg_id, direction, protocol, - port_range_min = None, port_range_max = None): - if port_range_min == None and port_range_max == None: - json_body = { 'security_group_rule' : \ - { 'direction' : direction, \ - 'security_group_id' : sg_id, \ - 'protocol' : protocol } } - elif port_range_min != None and port_range_max != None: - json_body = { 'security_group_rule' : \ - { 'direction' : direction, \ - 'security_group_id' : sg_id, \ - 'port_range_min': port_range_min, \ - 'port_range_max' : port_range_max, \ - 'protocol' : protocol } } + port_range_min=None, port_range_max=None): + if port_range_min is None and port_range_max is None: + json_body = {'security_group_rule': {'direction': direction, + 'security_group_id': sg_id, + 'protocol': protocol}} + elif port_range_min is not None and port_range_max is not None: + json_body = {'security_group_rule': {'direction': direction, + 'security_group_id': sg_id, + 'port_range_min': port_range_min, + 'port_range_max': port_range_max, + 'protocol': protocol}} else: print "Error [create_secgroup_rule(neutron_client, '%s', '%s', "\ - "'%s', '%s', '%s', '%s')]:" %(neutron_client, sg_id, direction, \ - port_range_min, port_range_max, protocol),\ - " Invalid values for port_range_min, port_range_max" + "'%s', '%s', '%s', '%s')]:" % (neutron_client, sg_id, direction, \ + port_range_min, port_range_max, protocol),\ + " Invalid values for port_range_min, port_range_max" return False try: neutron_client.create_security_group_rule(json_body) return True except Exception, e: print "Error [create_secgroup_rule(neutron_client, '%s', '%s', "\ - "'%s', '%s', '%s', '%s')]:" %(neutron_client, sg_id, direction, \ - port_range_min, port_range_max, protocol), e + "'%s', '%s', '%s', '%s')]:" % (neutron_client, sg_id, direction, + port_range_min, port_range_max, + protocol), e return False @@ -487,7 +483,7 @@ def update_sg_quota(neutron_client, tenant_id, sg_quota, sg_rule_quota): return True except Exception, e: print "Error [update_sg_quota(neutron_client, '%s', '%s', "\ - "'%s')]:" %(tenant_id, sg_quota, sg_rule_quota), e + "'%s')]:" % (tenant_id, sg_quota, sg_rule_quota), e return False @@ -500,8 +496,6 @@ def delete_security_group(neutron_client, secgroup_id): return False - - #********************************************* # GLANCE #********************************************* @@ -538,7 +532,7 @@ def create_glance_image(glance_client, image_name, file_path, public=True): return image.id except Exception, e: print "Error [create_glance_image(glance_client, '%s', '%s', "\ - "'%s')]:" %(image_name, file_path, str(public)), e + "'%s')]:" % (image_name, file_path, str(public)), e return False @@ -551,7 +545,6 @@ def delete_glance_image(nova_client, image_id): return False - #********************************************* # CINDER #********************************************* @@ -594,11 +587,11 @@ def update_cinder_quota(cinder_client, tenant_id, vols_quota, try: quotas_default = cinder_client.quotas.update(tenant_id, - **quotas_values) + **quotas_values) return True except Exception, e: print "Error [update_cinder_quota(cinder_client, '%s', '%s', '%s'" \ - "'%s')]:" %(tenant_id, vols_quota, snapshots_quota, gigabytes_quota), e + "'%s')]:" % (tenant_id, vols_quota, snapshots_quota, gigabytes_quota), e return False @@ -628,7 +621,6 @@ def delete_volume_type(cinder_client, volume_type): return False - #********************************************* # KEYSTONE #********************************************* @@ -701,7 +693,7 @@ def create_user(keystone_client, user_name, user_password, return user.id except Exception, e: print "Error [create_user(keystone_client, '%s', '%s', '%s'" \ - "'%s')]:" %(user_name, user_password, user_email, tenant_id), e + "'%s')]:" % (user_name, user_password, user_email, tenant_id), e return False @@ -711,7 +703,7 @@ def add_role_user(keystone_client, user_id, role_id, tenant_id): return True except Exception, e: print "Error [add_role_user(keystone_client, '%s', '%s'" \ - "'%s')]:" %(user_id, role_id, tenant_id), e + "'%s')]:" % (user_id, role_id, tenant_id), e return False @@ -827,14 +819,14 @@ def get_pod_name(logger=None): return "unknown-pod" -def push_results_to_db(db_url, case_name, logger, pod_name, +def push_results_to_db(db_url, project, case_name, logger, pod_name, version, payload): """ POST results to the Result target DB """ url = db_url + "/results" installer = get_installer_type(logger) - params = {"project_name": "functest", "case_name": case_name, + params = {"project_name": project, "case_name": case_name, "pod_name": pod_name, "installer": installer, "version": version, "details": payload} @@ -845,8 +837,8 @@ def push_results_to_db(db_url, case_name, logger, pod_name, logger.debug(r) return True except Exception, e: - print "Error [push_results_to_db('%s', '%s', '%s', '%s', '%s')]:" \ - % (db_url, case_name, pod_name, version, payload), e + print "Error [push_results_to_db('%s', '%s', '%s', '%s', '%s', '%s')]:" \ + % (db_url, project, case_name, pod_name, version, payload), e return False @@ -861,7 +853,7 @@ def get_resolvconf_ns(): ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", line) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if ip: - result = sock.connect_ex((ip.group(),53)) + result = sock.connect_ex((ip.group(), 53)) if result == 0: nameservers.append(ip.group()) line = rconf.readline() diff --git a/testcases/tests/TestFunctestUtils.py b/testcases/tests/TestFunctestUtils.py index 17bc958e3..fd83ed6f5 100644 --- a/testcases/tests/TestFunctestUtils.py +++ b/testcases/tests/TestFunctestUtils.py @@ -65,7 +65,10 @@ class TestFunctestUtils(unittest.TestCase): test = isTestRunnable('functest/odl', functest_yaml) self.assertTrue(test) - test = isTestRunnable('functest/vping', functest_yaml) + test = isTestRunnable('functest/vping_ssh', functest_yaml) + self.assertTrue(test) + + test = isTestRunnable('functest/vping_userdata', functest_yaml) self.assertTrue(test) test = isTestRunnable('functest/tempest', functest_yaml) @@ -82,7 +85,7 @@ class TestFunctestUtils(unittest.TestCase): test = generateTestcaseList(functest_yaml) - expected_list = "vping tempest odl doctor promise policy-test odl-vpn_service-tests vims rally " + expected_list = "vping_ssh vping_userdata tempest odl doctor promise policy-test odl-vpn_service-tests vims rally " self.assertEqual(test, expected_list) def tearDown(self): diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py index a8ac97f5c..1746d38bb 100644 --- a/testcases/vIMS/CI/vIMS.py +++ b/testcases/vIMS/CI/vIMS.py @@ -40,6 +40,9 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") parser.add_argument("-r", "--report", help="Create json result file", action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") args = parser.parse_args() """ logging configuration """ @@ -134,7 +137,9 @@ def push_results(): scenario = functest_utils.get_scenario(logger) pod_name = functest_utils.get_pod_name(logger) - functest_utils.push_results_to_db(db_url=DB_URL, case_name="vIMS", + functest_utils.push_results_to_db(db_url=DB_URL, + project="functest", + case_name="vIMS", logger=logger, pod_name=pod_name, version=scenario, payload=RESULTS) @@ -461,6 +466,8 @@ def main(): cfy.undeploy_manager() ############### GENERAL CLEANUP ################ + if args.noclean: + exit(0) ks_creds = functest_utils.get_credentials("keystone") diff --git a/testcases/vPing/CI/libraries/vPing2.py b/testcases/vPing/CI/libraries/vPing_ssh.py index 1ce6dc9e5..3050aad57 100644 --- a/testcases/vPing/CI/libraries/vPing2.py +++ b/testcases/vPing/CI/libraries/vPing_ssh.py @@ -37,12 +37,15 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") parser.add_argument("-r", "--report", help="Create json result file", action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") args = parser.parse_args() """ logging configuration """ -logger = logging.getLogger('vPing') +logger = logging.getLogger('vPing_ssh') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() @@ -198,6 +201,9 @@ def create_private_neutron_net(neutron): def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2, secgroup_id): + if args.noclean: + logger.debug("The OpenStack resources are not deleted.") + return True # delete both VMs logger.info("Cleaning up...") @@ -288,6 +294,7 @@ def push_results(start_time_ts, duration, test_status): scenario = functest_utils.get_scenario(logger) pod_name = functest_utils.get_pod_name(logger) functest_utils.push_results_to_db(TEST_DB, + "functest", "vPing", logger, pod_name, scenario, payload={'timestart': start_time_ts, diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing_userdata.py index 1368bbec1..90562969b 100644 --- a/testcases/vPing/CI/libraries/vPing.py +++ b/testcases/vPing/CI/libraries/vPing_userdata.py @@ -35,12 +35,15 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") parser.add_argument("-r", "--report", help="Create json result file", action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") args = parser.parse_args() """ logging configuration """ -logger = logging.getLogger('vPing') +logger = logging.getLogger('vPing_userdata') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() @@ -192,6 +195,9 @@ def create_private_neutron_net(neutron): def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2): + if args.noclean: + logger.debug("The OpenStack resources are not deleted.") + return True # delete both VMs logger.info("Cleaning up...") @@ -276,6 +282,7 @@ def push_results(start_time_ts, duration, test_status): scenario = functest_utils.get_scenario(logger) pod_name = functest_utils.get_pod_name(logger) functest_utils.push_results_to_db(TEST_DB, + "functest", "vPing_userdata", logger, pod_name, scenario, payload={'timestart': start_time_ts, |