summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docker/requirements.pip2
-rwxr-xr-xdocker/run_tests.sh26
-rw-r--r--docs/userguide/index.rst7
-rw-r--r--testcases/Controllers/ODL/CI/odlreport2db.py143
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/onosfunctest.py3
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py18
-rwxr-xr-x[-rw-r--r--]testcases/VIM/OpenStack/CI/libraries/run_rally.py14
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_tempest.py4
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml1
-rw-r--r--testcases/config_functest.yaml2
-rw-r--r--testcases/functest_utils.py20
-rw-r--r--testcases/vIMS/CI/vIMS.py5
-rw-r--r--testcases/vPing/CI/libraries/vPing.py4
-rw-r--r--testcases/vPing/CI/libraries/vPing2.py4
14 files changed, 216 insertions, 37 deletions
diff --git a/docker/requirements.pip b/docker/requirements.pip
index 40e56b9b5..2389d60ac 100644
--- a/docker/requirements.pip
+++ b/docker/requirements.pip
@@ -23,3 +23,5 @@ robotframework-requests==0.3.8
robotframework-sshlibrary==2.1.1
configObj==5.0.6
Flask==0.10.1
+xmltodict==0.9.2
+
diff --git a/docker/run_tests.sh b/docker/run_tests.sh
index bae75e961..48e3741f8 100755
--- a/docker/run_tests.sh
+++ b/docker/run_tests.sh
@@ -21,6 +21,7 @@ usage:
where:
-h|--help show this help text
-r|--report push results to database (false by default)
+ -n|--no-clean do not clean OpenStack resources after test run
-t|--test run specific set of tests
<test_name> one or more of the following: vping,odl,rally,tempest,vims,onos,promise,ovno. Separated by comma.
@@ -35,15 +36,18 @@ examples:
# NOTE: Still not 100% working when running the tests
offline=false
report=""
+clean=true
# Get the list of runnable tests
# Check if we are in CI mode
function clean_openstack(){
- echo -e "\n\nCleaning Openstack environment..."
- python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py \
- --debug
- echo -e "\n\n"
+ if [ $clean == true ]; then
+ echo -e "\n\nCleaning Openstack environment..."
+ python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py \
+ --debug
+ echo -e "\n\n"
+ fi
}
function run_test(){
@@ -86,10 +90,12 @@ function run_test(){
ODL_PORT=$odl_port ODL_IP=$odl_ip NEUTRON_IP=$neutron_ip USR_NAME=$usr_name PASS=$password \
${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/CI/start_tests.sh
- # save ODL results
- odl_logs="${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/CI/logs"
- if [ -d ${odl_logs} ]; then
- cp -Rf ${odl_logs} ${FUNCTEST_CONF_DIR}/ODL/
+ # push results to the DB in case of CI
+ if [[ -n "$DEPLOY_SCENARIO" && "$DEPLOY_SCENARIO" != "none" ]]; then
+ odl_logs="/home/opnfv/functest/results/odl/logs/2"
+ odl_path="${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/CI"
+ node_name=$(env | grep NODE_NAME | cut -f2 -d'=')
+ python ${odl_path}/odlreport2db.py -x ${odl_logs}/output.xml -i ${INSTALLER_TYPE} -p ${node_name} -s ${DEPLOY_SCENARIO}
fi
;;
"tempest")
@@ -168,6 +174,9 @@ while [[ $# > 0 ]]
-r|--report)
report="-r"
;;
+ -n|--no-clean)
+ clean=false
+ ;;
-t|--test|--tests)
TEST="$2"
shift
@@ -184,7 +193,6 @@ done
tests_file="/home/opnfv/functest/conf/testcase-list.txt"
if [[ -n "$DEPLOY_SCENARIO" && "$DEPLOY_SCENARIO" != "none" ]] &&\
[[ -f $tests_file ]]; then
- echo "testcase-list.txt content:";cat $test_file; echo ""
arr_test=($(cat $tests_file))
else
arr_test=(vping tempest vims rally)
diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst
index 994d76c2a..df4e200df 100644
--- a/docs/userguide/index.rst
+++ b/docs/userguide/index.rst
@@ -287,23 +287,26 @@ The script run_tests.sh has several options::
where:
-h|--help show this help text
-r|--report push results to database (false by default)
+ -n|--no-clean do not clean up OpenStack resources after test run
-t|--test run specific set of tests
<test_name> one or more of the following: vping,odl,rally,tempest,vims. Separated by comma.
examples:
run_tests.sh
run_tests.sh --test vping,odl
- run_tests.sh -t tempest,rally
+ run_tests.sh -t tempest,rally --no-clean
The -o option can be used to run the container offline (in case you are in a summit where there is no Internet connection...). It is an experimental option.
The -r option is used by the Continuous Integration in order to push the test results into a test collection database, see in next section for details. In manual mode, you must not use it, your try will be anyway probably rejected as your POD must be declared in the database to collect the data.
+The -n option is used for preserving all the existing OpenStack resources after execution test cases.
+
The -t option can be used to specify the list of test you want to launch, by default Functest will try to launch all its test suites in the following order vPing, odl, Tempest, vIMS, Rally. You may launch only one single test by using -t <the test you want to launch>
Within Tempest test suite you can define which test cases you want to execute in your environment by editing test_list.txt file before executing run_tests.sh script.
-Please note that Functest includes cleaning mechanism in order to remove everything except what was present after a fresh install. If you create your own VMs, tenants, networks etc. and then launch Functest, they all will be deleted after executing the tests. Be carefull or comment the cleaning phase in run_test.sh (comment call to clean_openstack.py). However, be aware that Tempest and Rally create of lot of resources (users, tenants, networks, volumes etc.) that are not always properly cleaned, so this cleaning function has been set to keep the system as clean as possible after a full Functest run.
+Please note that Functest includes cleaning mechanism in order to remove everything except what was present after a fresh install. If you create your own VMs, tenants, networks etc. and then launch Functest, they all will be deleted after executing the tests. Use --no-clean option with run_test.sh in order to preserve all the existing resources. However, be aware that Tempest and Rally create of lot of resources (users, tenants, networks, volumes etc.) that are not always properly cleaned, so this cleaning function has been set to keep the system as clean as possible after a full Functest run.
You may also add you own test by adding a section into the function run_test()
diff --git a/testcases/Controllers/ODL/CI/odlreport2db.py b/testcases/Controllers/ODL/CI/odlreport2db.py
new file mode 100644
index 000000000..1538f79cf
--- /dev/null
+++ b/testcases/Controllers/ODL/CI/odlreport2db.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+#
+# Authors:
+# - peter.bandzi@cisco.com
+# - morgan.richomme@orange.com
+#
+# src: Peter Bandzi
+# https://github.com/pbandzi/parse-robot/blob/master/convert_robot_to_json.py
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# 0.1: This script boots the VM1 and allocates IP address from Nova
+# Later, the VM2 boots then execute cloud-init to ping VM1.
+# After successful ping, both the VMs are deleted.
+# 0.2: measure test duration and publish results under json format
+#
+#
+import xmltodict
+import json
+import sys
+import getopt
+import yaml
+
+
+sys.path.append("/home/opnfv/repos/functest/testcases")
+import functest_utils
+
+
+def usage():
+ print """Usage:
+ get-json-from-robot.py --xml=<output.xml> --pod=<pod_name>
+ --installer=<installer> --database=<Database URL>
+ --scenaro=SCENARIO
+ -x, --xml xml file generated by robot test
+ -p, --pod POD name where the test come from
+ -i, --installer
+ -s, --scenario
+ -h, --help this message
+ """
+ sys.exit(2)
+
+
+def populate_detail(test):
+ detail = {}
+ detail['test_name'] = test['@name']
+ detail['test_status'] = test['status']
+ detail['test_doc'] = test['doc']
+ return detail
+
+
+def parse_test(tests, details):
+ try:
+ for test in tests:
+ details.append(populate_detail(test))
+ except TypeError:
+ # tests is not iterable
+ details.append(populate_detail(tests))
+ return details
+
+
+def parse_suites(suites):
+ data = {}
+ details = []
+ try:
+ for suite in suites:
+ data['details'] = parse_test(suite['test'], details)
+ except TypeError:
+ # suites is not iterable
+ data['details'] = parse_test(suites['test'], details)
+ return data
+
+
+def main(argv):
+ try:
+ opts, args = getopt.getopt(argv,
+ 'x:p:i:s:h',
+ ['xml=', 'pod=',
+ 'installer=',
+ 'scenario=',
+ 'help'])
+ except getopt.GetoptError:
+ usage()
+
+ for opt, arg in opts:
+ if opt in ('-h', '--help'):
+ usage()
+ elif opt in ('-x', '--xml'):
+ xml_file = arg
+ elif opt in ('-p', '--pod'):
+ pod = arg
+ elif opt in ('-i', '--installer'):
+ installer = arg
+ elif opt in ('-s', '--scenario'):
+ scenario = arg
+ else:
+ usage()
+
+ with open(xml_file, "r") as myfile:
+ xml_input = myfile.read().replace('\n', '')
+
+ # dictionary populated with data from xml file
+ all_data = xmltodict.parse(xml_input)['robot']
+
+ data = parse_suites(all_data['suite']['suite'])
+ data['description'] = all_data['suite']['@name']
+ data['version'] = all_data['@generator']
+ data['test_project'] = "functest"
+ data['case_name'] = "ODL"
+ data['pod_name'] = pod
+ data['installer'] = installer
+
+ json.dumps(data, indent=4, separators=(',', ': '))
+
+ # Only used from container, we can set up absolute path
+ with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+ f.close()
+
+ database = functest_yaml.get("results").get("test_db_url")
+
+ try:
+ # example:
+ # python odlreport2db.py -x ~/Pictures/Perso/odl/output3.xml
+ # -i fuel
+ # -p opnfv-jump-2
+ # -s os-odl_l2-ha
+ functest_utils.push_results_to_db(database,
+ data['case_name'],
+ None,
+ data['pod_name'],
+ scenario,
+ data)
+ except:
+ print("Error pushing results into Database '%s'" % sys.exc_info()[0])
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
index ccd49b75b..64b8ac1de 100644
--- a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
+++ b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
@@ -95,8 +95,7 @@ def GetResult():
return payload
def SetOnosIp():
- onoscreds = ONOS_CONF_DIR + "/openstack.creds"
- cmd = "cat " + onoscreds + " | grep OS_AUTH_URL"
+ cmd = "keystone catalog --service network | grep publicURL"
cmd_output = os.popen(cmd).read()
print cmd_output
OC1=re.search(r"\d+\.\d+\.\d+\.\d+",cmd_output).group()
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
index 17a6596d3..d2128d9ac 100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
@@ -49,12 +49,19 @@ parser.add_argument("-r", "--report",
parser.add_argument("-s", "--smoke",
help="Smoke test mode",
action="store_true")
+parser.add_argument("-v", "--verbose",
+ help="Print verbose info about the progress",
+ action="store_true")
args = parser.parse_args()
client_dict = {}
-FNULL = open(os.devnull, 'w')
+if args.verbose:
+ RALLY_STDERR = subprocess.STDOUT
+else:
+ RALLY_STDERR = open(os.devnull, 'w')
+
""" logging configuration """
logger = logging.getLogger("run_rally")
logger.setLevel(logging.DEBUG)
@@ -100,8 +107,6 @@ RESULTS_DIR = functest_yaml.get("general").get("directories"). \
TEST_DB = functest_yaml.get("results").get("test_db_url")
FLOATING_NETWORK = functest_yaml.get("general"). \
get("openstack").get("neutron_public_net_name")
-FLOATING_SUBNET_CIDR = functest_yaml.get("general"). \
- get("openstack").get("neutron_public_subnet_cidr")
PRIVATE_NETWORK = functest_yaml.get("general"). \
get("openstack").get("neutron_private_net_name")
@@ -119,12 +124,12 @@ def push_results_to_db(payload):
url = TEST_DB + "/results"
installer = functest_utils.get_installer_type(logger)
- git_version = functest_utils.get_git_branch(REPO_PATH)
+ scenario = functest_utils.get_scenario(logger)
pod_name = functest_utils.get_pod_name(logger)
# TODO pod_name hardcoded, info shall come from Jenkins
params = {"project_name": "functest", "case_name": "Rally",
"pod_name": pod_name, "installer": installer,
- "version": git_version, "details": payload}
+ "version": scenario, "details": payload}
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(params), headers=headers)
@@ -173,7 +178,6 @@ def build_task_args(test_file_name):
task_args['flavor_name'] = FLAVOR_NAME
task_args['glance_image_location'] = GLANCE_IMAGE_PATH
task_args['floating_network'] = FLOATING_NETWORK
- task_args['floating_subnet_cidr'] = FLOATING_SUBNET_CIDR
task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
PRIVATE_NETWORK).encode('ascii', 'ignore')
task_args['tmpl_dir'] = TEMPLATE_DIR
@@ -211,7 +215,7 @@ def run_task(test_name):
"--task-args \"{}\" ".format(build_task_args(test_name))
logger.debug('running command line : {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=FNULL, shell=True)
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
result = ""
while p.poll() is None:
l = p.stdout.readline()
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index a7f1db13a..3c70e3880 100644..100755
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -44,11 +44,17 @@ parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
+parser.add_argument("-v", "--verbose",
+ help="Print verbose info about the progress",
+ action="store_true")
args = parser.parse_args()
+if args.verbose:
+ RALLY_STDERR = subprocess.STDOUT
+else:
+ RALLY_STDERR = open(os.devnull, 'w')
-FNULL = open(os.devnull, 'w')
""" logging configuration """
logger = logging.getLogger("run_rally")
logger.setLevel(logging.DEBUG)
@@ -95,12 +101,12 @@ def push_results_to_db(payload):
url = TEST_DB + "/results"
installer = functest_utils.get_installer_type(logger)
- git_version = functest_utils.get_git_branch(REPO_PATH)
+ scenario = functest_utils.get_scenario(logger)
pod_name = functest_utils.get_pod_name(logger)
# TODO pod_name hardcoded, info shall come from Jenkins
params = {"project_name": "functest", "case_name": "Rally",
"pod_name": pod_name, "installer": installer,
- "version": git_version, "details": payload}
+ "version": scenario, "details": payload}
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(params), headers=headers)
@@ -164,7 +170,7 @@ def run_task(test_name):
logger.debug('Scenario fetched from : {}'.format(test_file_name))
cmd_line = "rally task start --abort-on-sla-failure {}".format(test_file_name)
logger.debug('running command line : {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=FNULL, shell=True)
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
result = ""
while p.poll() is None:
l = p.stdout.readline()
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
index 1f5d34cf7..ee0a4bea8 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
@@ -104,12 +104,12 @@ def push_results_to_db(payload, module, pod_name):
# TODO move DB creds into config file
url = TEST_DB + "/results"
installer = functest_utils.get_installer_type(logger)
- git_version = functest_utils.get_git_branch(REPO_PATH)
+ scenario = functest_utils.get_scenario(logger)
logger.info("Pushing results to DB: '%s'." % url)
params = {"project_name": "functest", "case_name": "Tempest",
"pod_name": str(pod_name), 'installer': installer,
- "version": git_version, 'details': payload}
+ "version": scenario, 'details': payload}
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(params), headers=headers)
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml
index 6debb415a..dd8b282a6 100644
--- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml
@@ -15,7 +15,6 @@
public_net: {{ floating_network }}
image: {{ image_name }}
flavor: {{ flavor_name }}
- cidr: {{ floating_subnet_cidr }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
runner:
diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml
index ccf07976a..6f257d296 100644
--- a/testcases/config_functest.yaml
+++ b/testcases/config_functest.yaml
@@ -164,7 +164,7 @@ promise:
flavor_disk: 0
results:
- test_db_url: http://213.77.62.197
+ test_db_url: http://testresults.opnfv.org/testapi
# to be maintained...
# the execution order is important as some tests may be more destructive than others
diff --git a/testcases/functest_utils.py b/testcases/functest_utils.py
index 9c618f75a..ed9e36c89 100644
--- a/testcases/functest_utils.py
+++ b/testcases/functest_utils.py
@@ -658,11 +658,25 @@ def get_installer_type(logger=None):
except KeyError:
if logger:
logger.error("Impossible to retrieve the installer type")
- installer = "Unkown"
+ installer = "Unknown_installer"
return installer
+def get_scenario(logger=None):
+ """
+ Get scenario
+ """
+ try:
+ scenario = os.environ['DEPLOY_SCENARIO']
+ except KeyError:
+ if logger:
+ logger.error("Impossible to retrieve the scenario")
+ scenario = "Unknown_scenario"
+
+ return scenario
+
+
def get_pod_name(logger=None):
"""
Get PoD Name from env variable NODE_NAME
@@ -677,12 +691,12 @@ def get_pod_name(logger=None):
def push_results_to_db(db_url, case_name, logger, pod_name,
- git_version, payload):
+ version, payload):
url = db_url + "/results"
installer = get_installer_type(logger)
params = {"project_name": "functest", "case_name": case_name,
"pod_name": pod_name, "installer": installer,
- "version": git_version, "details": payload}
+ "version": version, "details": payload}
headers = {'Content-Type': 'application/json'}
try:
diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py
index eae821ad4..bc15676e5 100644
--- a/testcases/vIMS/CI/vIMS.py
+++ b/testcases/vIMS/CI/vIMS.py
@@ -177,9 +177,10 @@ def test_clearwater():
if args.report:
logger.debug("Push result into DB")
logger.debug("Pushing results to DB....")
- git_version = functest_utils.get_git_branch(REPO_PATH)
+ scenario = functest_utils.get_scenario(logger)
functest_utils.push_results_to_db(db_url=TEST_DB, case_name="vIMS",
- logger=logger, pod_name=functest_utils.get_pod_name(logger), git_version=git_version,
+ logger=logger, pod_name=functest_utils.get_pod_name(logger),
+ version=scenario,
payload={'orchestrator': {'duration': CFY_DEPLOYMENT_DURATION,
'result': ""},
'vIMS': {'duration': CW_DEPLOYMENT_DURATION,
diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing.py
index aa3e400ca..dc2d2abec 100644
--- a/testcases/vPing/CI/libraries/vPing.py
+++ b/testcases/vPing/CI/libraries/vPing.py
@@ -479,11 +479,11 @@ def main():
# Don't report if userdata is not supported
logger.debug("Push result into DB")
# TODO check path result for the file
- git_version = functest_utils.get_git_branch(REPO_PATH)
+ scenario = functest_utils.get_scenario(logger)
pod_name = functest_utils.get_pod_name(logger)
functest_utils.push_results_to_db(TEST_DB,
"vPing_userdata",
- logger, pod_name, git_version,
+ logger, pod_name, scenario,
payload={'timestart': start_time_ts,
'duration': duration,
'status': test_status})
diff --git a/testcases/vPing/CI/libraries/vPing2.py b/testcases/vPing/CI/libraries/vPing2.py
index 50000868c..b103febf1 100644
--- a/testcases/vPing/CI/libraries/vPing2.py
+++ b/testcases/vPing/CI/libraries/vPing2.py
@@ -508,11 +508,11 @@ def main():
if args.report:
logger.debug("Push result into DB")
# TODO check path result for the file
- git_version = functest_utils.get_git_branch(REPO_PATH)
+ scenario = functest_utils.get_scenario(logger)
pod_name = functest_utils.get_pod_name(logger)
functest_utils.push_results_to_db(TEST_DB,
"vPing",
- logger, pod_name, git_version,
+ logger, pod_name, scenario,
payload={'timestart': start_time_ts,
'duration': duration,
'status': test_status})