aboutsummaryrefslogtreecommitdiffstats
path: root/testcases
diff options
context:
space:
mode:
authorMorgan Richomme <morgan.richomme@orange.com>2015-10-01 11:03:09 +0200
committerMorgan Richomme <morgan.richomme@orange.com>2015-10-01 11:03:09 +0200
commit19c7d0ecfd453d5e631466401e454d4da3314f9b (patch)
treec1dac5451af1760f963d709772b5062ba2741983 /testcases
parent5901dd9a47b08c162acb631bf5acaeeb1b7ce745 (diff)
parent96bf9abe9b1b26a79dcc86900e8eb33d8544e773 (diff)
Merge branch 'master' into stable/arnoarno.2015.2.0
Diffstat (limited to 'testcases')
-rwxr-xr-xtestcases/Controllers/ODL/CI/start_tests.sh21
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/Readme.txt5
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/dependencies/onos23
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/onosfunctest.py203
-rw-r--r--testcases/Dashboard/dashboard_utils.py251
-rw-r--r--testcases/Dashboard/functest2Dashboard.py81
-rw-r--r--testcases/Dashboard/odl2Dashboard.py52
-rw-r--r--testcases/Dashboard/rally2Dashboard.py52
-rw-r--r--testcases/Dashboard/tempest2Dashboard.py52
-rw-r--r--testcases/Dashboard/vPing2Dashboard.py94
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_rally.py7
-rw-r--r--testcases/config_functest.py50
-rw-r--r--testcases/config_functest.yaml3
-rw-r--r--testcases/functest_utils.py95
-rw-r--r--testcases/vIMS/vIMS.md3
-rw-r--r--testcases/vPing/CI/libraries/vPing.py318
16 files changed, 1186 insertions, 124 deletions
diff --git a/testcases/Controllers/ODL/CI/start_tests.sh b/testcases/Controllers/ODL/CI/start_tests.sh
index 7bc0b513b..56f4d564d 100755
--- a/testcases/Controllers/ODL/CI/start_tests.sh
+++ b/testcases/Controllers/ODL/CI/start_tests.sh
@@ -56,7 +56,11 @@ else
fi
# Change openstack password for admin tenant in neutron suite
-sed -i "s/\"password\": \"admin\"/\"password\": \"${PASS}\"/" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
+sed -i "s/\"password\": \".*\"/\"password\": \"${PASS}\"/" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
+
+# Add Start Suite and Teardown Suite
+sed -i "/^Documentation.*/a Suite Teardown Stop Suite" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
+sed -i "/^Documentation.*/a Suite Setup Start Suite" ${BASEDIR}/integration/test/csit/suites/openstack/neutron/__init__.robot
if source $BASEDIR/venv/bin/activate; then
echo -e "${green}Python virtualenv activated.${nc}"
@@ -72,7 +76,7 @@ cp -vf $BASEDIR/custom_tests/neutron/* $BASEDIR/integration/test/csit/suites/ope
# List of tests are specified in test_list.txt
# those are relative paths to test directories from integartion suite
echo -e "${green}Executing chosen tests.${nc}"
-test_num=1
+test_num=0
while read line
do
# skip comments
@@ -80,16 +84,23 @@ do
# skip empty lines
[[ -z "${line}" ]] && continue
+ ((test_num++))
echo -e "${light_green}Starting test: $line ${nc}"
pybot -v OPENSTACK:${NEUTRON_IP} -v PORT:${ODL_PORT} -v CONTROLLER:${ODL_IP} ${BASEDIR}/$line
mkdir -p $BASEDIR/logs/${test_num}
mv log.html $BASEDIR/logs/${test_num}/
mv report.html $BASEDIR/logs/${test_num}/
mv output.xml $BASEDIR/logs/${test_num}/
- ((test_num++))
done < ${BASEDIR}/test_list.txt
+# create final report which includes all partial test reports
+for i in $(seq $test_num); do
+ rebot_params="$rebot_params $BASEDIR/logs/$i/output.xml"
+done
+
+echo -e "${green}Final report is located:${nc}"
+rebot $rebot_params
+
+# deactivate venv
echo -e "${green}Deactivate venv.${nc}"
deactivate
-
-# Now we can copy output.xml, log.html and report.xml files generated by robot.
diff --git a/testcases/Controllers/ONOS/Teston/CI/Readme.txt b/testcases/Controllers/ONOS/Teston/CI/Readme.txt
new file mode 100644
index 000000000..7648b2a98
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/Readme.txt
@@ -0,0 +1,5 @@
+1.This is a basic test run about onos,we will make them better and better
+2.This test include two suits:
+(1)Test northbound(network/subnet/ports create/update/delete)
+(2)Ovsdb test,default configuration,openflow connection,vm go onlines.
+3.Later we will make a framework to do this test \ No newline at end of file
diff --git a/testcases/Controllers/ONOS/Teston/CI/dependencies/onos b/testcases/Controllers/ONOS/Teston/CI/dependencies/onos
new file mode 100644
index 000000000..d4d59e0f7
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/dependencies/onos
@@ -0,0 +1,23 @@
+#!/bin/bash
+# -----------------------------------------------------------------------------
+# ONOS remote command-line client.
+# -----------------------------------------------------------------------------
+
+[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
+. /root/.bashrc
+. $ONOS_ROOT/tools/build/envDefaults
+. $ONOS_ROOT/tools/test/bin/find-node.sh
+
+[ "$1" = "-w" ] && shift && onos-wait-for-start $1
+
+[ -n "$1" ] && OCI=$(find_node $1) && shift
+
+if which client 1>/dev/null 2>&1 && [ -z "$ONOS_USE_SSH" ]; then
+ # Use Karaf client only if we can and are allowed to
+ unset KARAF_HOME
+ client -h $OCI -u karaf "$@" 2>/dev/null
+else
+ # Otherwise use raw ssh; strict checking is off for dev environments only
+ #ssh -p 8101 -o StrictHostKeyChecking=no $OCI "$@"
+ sshpass -p karaf ssh -l karaf -p 8101 $OCI "$@"
+fi
diff --git a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
new file mode 100644
index 000000000..72fa4ae1f
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
@@ -0,0 +1,203 @@
+"""
+Description: This test is to run onos Teston VTN scripts
+
+List of test cases:
+CASE1 - Northbound NBI test network/subnet/ports
+CASE2 - Ovsdb test&Default configuration&Vm go online
+
+lanqinglong@huawei.com
+"""
+import os
+import os.path
+import time
+import pexpect
+import re
+import sys
+
+def SSHlogin(ipaddr,username,password):
+ login = pexpect.spawn('ssh %s@%s'%(username,ipaddr))
+ index = 0
+ while index != 2:
+ index = login.expect(['assword:','yes/no','#|$',pexpect.EOF])
+ if index == 0:
+ login.sendline(password)
+ login.interact()
+ if index == 1:
+ login.sendline('yes')
+ print "Login Success!"
+
+def AddKarafUser(ipaddr,username,password):
+ print '\033[1;31;40m'
+ print "Now Adding karaf user to OC1..."
+ print "\033[0m"
+ login = pexpect.spawn("ssh -l %s -p 8101 %s"%(username,ipaddr))
+ index = 0
+ while index != 2:
+ index = login.expect(['assword:','yes/no',pexpect.EOF])
+ if index == 0:
+ login.sendline(password)
+ login.sendline("logout")
+ index = login.expect(["closed",pexpect.EOF])
+ if index == 0:
+ print "Add SSH Known Host Success!"
+ else:
+ print "Add SSH Known Host Failed! Please Check!"
+ login.interact()
+ if index == 1:
+ login.sendline('yes')
+
+def DownLoadCode():
+ print '\033[1;31;40m'
+ print "Now loading test codes!Please wait in patient..."
+ print "\033[0m"
+ os.system("git clone https://github.com/sunyulin/OnosSystemTest.git")
+ time.sleep(1)
+ os.system("git clone https://gerrit.onosproject.org/onos")
+ time.sleep(1)
+ print "Done!"
+
+def CleanEnv():
+ print '\033[1;31;40m'
+ print "Now Cleaning test environment"
+ print "\033[0m"
+ os.system("sudo apt-get install -y mininet")
+ os.system("OnosSystemTest/TestON/bin/cleanup.sh")
+ time.sleep(5)
+ print "Done!"
+
+def OnosPushKeys(cmd,password):
+ print '\033[1;31;40m'
+ print "Now Pushing Onos Keys:"+cmd
+ print "\033[0m"
+ Pushkeys = pexpect.spawn(cmd)
+ Result = 0
+ while Result != 2:
+ Result = Pushkeys.expect(["yes","password",pexpect.EOF,pexpect.TIMEOUT])
+ if (Result == 0):
+ Pushkeys.sendline("yes")
+ if (Result == 1):
+ Pushkeys.sendline(password)
+ if (Result == 3):
+ print("Push keys Error!")
+ print "Done!"
+
+def AddEnvIntoBashrc(name):
+ print '\033[1;31;40m'
+ print "Now Adding bash environment"
+ print "\033[0m"
+ fileopen = open("/etc/profile",'r')
+ findContext = 1
+ while findContext:
+ findContext = fileopen.readline()
+ result = findContext.find('dev/bash_profile')
+ if result != -1:
+ break
+ fileopen.close
+ if result == -1:
+ envAdd = open("/etc/profile",'a+')
+ envAdd.writelines("\nsource /root/onos/tools/dev/bash_profile")
+ envAdd.close()
+
+def SetEnvVar(masterpass,agentpass):
+ print '\033[1;31;40m'
+ print "Now Setting test environment"
+ print "\033[0m"
+ os.environ["OCT"] = "10.1.0.1"
+ os.environ["OC1"] = "10.1.0.50"
+ os.environ["OC2"] = "10.1.0.51"
+ os.environ["OC3"] = "10.1.0.52"
+ os.environ["OCN"] = "10.1.0.53"
+ os.environ["OCN2"] = "10.1.0.54"
+ os.environ["localhost"] = "10.1.0.1"
+ os.system("sudo pip install configobj")
+ os.system("sudo apt-get install -y sshpass")
+ OnosPushKeys("onos-push-keys 10.1.0.1",masterpass)
+ OnosPushKeys("onos-push-keys 10.1.0.50",agentpass)
+ OnosPushKeys("onos-push-keys 10.1.0.53",agentpass)
+ OnosPushKeys("onos-push-keys 10.1.0.54",agentpass)
+
+def Gensshkey():
+ print '\033[1;31;40m'
+ print "Now Generating SSH keys..."
+ print "\033[0m"
+ os.system("rm -rf ~/.ssh/*")
+ keysub = pexpect.spawn("ssh-keygen -t rsa")
+ Result = 0
+ while Result != 2:
+ Result = keysub.expect(["Overwrite","Enter",pexpect.EOF,pexpect.TIMEOUT])
+ if Result == 0:
+ keysub.sendline("y")
+ if Result == 1:
+ keysub.sendline("\n")
+ if Result == 3:
+ printf("Generate SSH key failed.")
+ print "Done!"
+
+def ChangeOnosName(user,password):
+ print '\033[1;31;40m'
+ print "Now Changing ONOS name&password"
+ print "\033[0m"
+ line = open("onos/tools/build/envDefaults",'r').readlines()
+ lenall = len(line)-1
+ for i in range(lenall):
+ if "ONOS_USER=" in line[i]:
+ line[i]=line[i].replace("sdn",user)
+ if "ONOS_GROUP" in line[i]:
+ line[i]=line[i].replace("sdn",user)
+ if "ONOS_PWD" in line[i]:
+ line[i]=line[i].replace("rocks",password)
+ NewFile = open("onos/tools/build/envDefaults",'w')
+ NewFile.writelines(line)
+ NewFile.close
+ print "Done!"
+
+def ChangeTestCasePara(testcase,user,password):
+ print '\033[1;31;40m'
+ print "Now Changing " + testcase + " name&password"
+ print "\033[0m"
+ filepath = "OnosSystemTest/TestON/tests/" + testcase + "/" + testcase + ".topo"
+ line = open(filepath,'r').readlines()
+ lenall = len(line)-1
+ for i in range(lenall-2):
+ if ("localhost" in line[i]) or ("OCT" in line[i]):
+ line[i+1]=re.sub(">\w+",">"+user,line[i+1])
+ line[i+2]=re.sub(">\w+",">"+password,line[i+2])
+ if "OC1" in line [i] \
+ or "OC2" in line [i] \
+ or "OC3" in line [i] \
+ or "OCN" in line [i] \
+ or "OCN2" in line[i]:
+ line[i+1]=re.sub(">\w+",">root",line[i+1])
+ line[i+2]=re.sub(">\w+",">root",line[i+2])
+ NewFile = open(filepath,'w')
+ NewFile.writelines(line)
+ NewFile.close
+ print "Done!"
+
+def RunScript(testname,masterusername,masterpassword):
+ ChangeTestCasePara(testname,masterusername,masterpassword)
+ runtest = "OnosSystemTest/TestON/bin/cli.py run " + testname
+ os.system(runtest)
+ print "Done!"
+
+if __name__=="__main__":
+
+ #This is the compass run machine user&pass,you need to modify
+ masterusername = "root"
+ masterpassword = "root"
+
+ #The config below you don't need to care
+ agentusername = "root"
+ agentpassword = "root"
+
+ print "Test Begin....."
+ Gensshkey()
+ AddKarafUser("10.1.0.50","karaf","karaf")
+ AddEnvIntoBashrc("source onos/tools/dev/bash_profile")
+ SSHlogin("10.1.0.1",masterusername,masterpassword)
+ ChangeOnosName(agentusername,agentpassword)
+ DownLoadCode()
+ CleanEnv()
+ SetEnvVar(masterpassword,agentpassword)
+ RunScript("FUNCvirNetNB",masterusername,masterpassword)
+ RunScript("FUNCovsdbtest",masterusername,masterpassword)
diff --git a/testcases/Dashboard/dashboard_utils.py b/testcases/Dashboard/dashboard_utils.py
new file mode 100644
index 000000000..90562855a
--- /dev/null
+++ b/testcases/Dashboard/dashboard_utils.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to get data from test DB
+# and format them into a json format adapted for a dashboard
+#
+# v0.1: basic example
+#
+import json
+import requests
+from vPing2Dashboard import format_vPing_for_dashboard
+
+
+class TestCriteria:
+
+ """ describes the test criteria platform """
+ def __init__(self):
+ self.project = ''
+ self.testcase = ''
+ self.pod_id = -1
+ self.duration = 'all'
+ self.version = 'all'
+ self.installer = 'all'
+
+ def setCriteria(self, project, testcase, pod_id,
+ duration, version, installer):
+ self.project = project
+ self.testcase = testcase
+ self.pod_id = pod_id
+ self.duration = duration
+ self.version = version
+ self.installer = installer
+
+ def format_criteria(self, name):
+ if(name == 'all' or name == 0):
+ return ""
+ else:
+ if(type(name) == int):
+ return "-" + str(name)
+ else:
+ return "-" + name
+
+ def format(self):
+ pod_name = self.format_criteria(self.pod_id)
+ version_name = self.format_criteria(self.version)
+ installer_name = self.format_criteria(self.installer)
+ duration_name = self.format_criteria(self.duration)
+ try:
+ fileName = "result-" + self.project + "-" + self.testcase + \
+ pod_name + version_name + installer_name + \
+ duration_name + ".json"
+ except:
+ print "Impossible to format json file name"
+ return fileName
+
+
+def get_pods(db_url):
+ # retrieve the list of pods
+ url = db_url + "/pods"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ pods_data = json.loads(db_data.text)
+ # Get results
+ pods = pods_data['pods']
+ pods_table = []
+ for pod in pods:
+ # cast int becase otherwise API retrieve 1.0
+ # TODO check format with API
+ pods_table.append(int(pod['_id']))
+
+ pods_table.append(0) # 0 means all the pods here
+ return pods_table
+ except:
+ print "Error retrieving the list of PODs"
+ return None
+
+
+def get_versions(db_url):
+ # retrieve the list of versions
+ # TODO not supported in API yet
+ url = db_url + "/versions"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ versions_data = json.loads(db_data.text)
+ # Get results
+ versions = versions_data['versions']
+
+ versions_table = []
+ for version in versions:
+ versions_table.append(version['version'])
+
+ versions_table.append('all')
+
+ return versions_table
+ except:
+ print "Error retrieving the list of OPNFV versions"
+ return None
+
+
+def get_installers(db_url):
+ # retrieve the list of installers
+ # TODO not supported in API yet
+ url = db_url + "/installers"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ installers_data = json.loads(db_data.text)
+ # Get results
+ installers = installers_data['installers']
+
+ installers_table = []
+ for installer in installers:
+ installers_table.append(installer['installer'])
+
+ installers_table.append('all')
+
+ return installers
+ except:
+ print "Error retrieving the list of OPNFV installers"
+ return None
+
+
+def get_testcases(db_url, project):
+ # retrieve the list of pods
+ url = db_url + "/test_projects/" + project + "/cases"
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ try:
+ db_data = requests.get(url, headers=headers)
+ # Get result as a json object
+ testcases_data = json.loads(db_data.text)
+ # Get results
+ testcases = testcases_data['test_cases']
+ testcases_table = []
+ for testcase in testcases:
+ testcases_table.append(testcase['name'])
+
+ testcases_table.append('all')
+
+ return testcases_table
+ except:
+ print "Error retrieving the list of testcases"
+ return None
+
+
+def get_results(db_url, test_criteria):
+
+ # use param to filter request to result DB
+ # if not precised => no filter
+ # filter criteria:
+ # - POD
+ # - versions
+ # - installers
+ # - testcase
+ # - test projects
+ # - timeframe (last 30 days, 365 days, since beginning of the project)
+ # e.g.
+ # - vPing tests since 2 months
+ # - Tempest tests on LF POD2 fuel based / Arno stable since the beginning
+ # - yardstick tests on any POD since 30 days
+ # - Qtip tests on dell-test1 POD
+ #
+ # params = {"pod_id":pod_id, "testcase":testcase}
+ # filter_date = days # data from now - days
+
+ # test_project = test_criteria.project
+ testcase = test_criteria.testcase
+ # duration_frame = test_criteria.duration
+ # version = test_criteria.version
+ # installer_type = test_criteria.installer
+ pod_id = test_criteria.pod_id
+
+ pod_criteria = ""
+ if (pod_id > 0):
+ pod_criteria = "&pod=" + str(pod_id)
+
+ # TODO complete params (installer type, testcase, version )
+ # need API to be up to date
+ # we assume that criteria could be used at the API level
+ # no need to processing on date for instance
+ params = {"pod_id": pod_id}
+
+ # Build headers
+ headers = {'Content-Type': 'application/json'}
+
+ url = db_url + "/results?case=" + testcase + pod_criteria
+
+ # Send Request to Test DB
+ myData = requests.get(url, data=json.dumps(params), headers=headers)
+ # Get result as a json object
+ myNewData = json.loads(myData.text)
+
+ # Get results
+ myDataResults = myNewData['test_results']
+
+ return myDataResults
+
+
+def generateJson(test_name, test_case, db_url):
+ # pod_id = 1
+ # test_version = 'Arno master'
+ # test_installer = 'fuel'
+ # test_retention = 30
+
+ pods = get_pods(db_url)
+ versions = ['ArnoR1', 'ArnoSR1', 'all'] # not available in the API yet
+ installers = ['fuel', 'foreman', 'all'] # not available in the API yet
+ test_durations = [90, 365, 'all'] # not available through the API yet
+
+ # For all the PoDs
+ for pod in pods:
+ # all the versions
+ for version in versions:
+ # all the installers
+ for installer in installers:
+ # all the retention time
+ for test_duration in test_durations:
+
+ criteria = TestCriteria()
+ criteria.setCriteria(test_name, test_case, pod,
+ test_duration, version, installer)
+ format_data_for_dashboard(criteria)
+
+
+def format_data_for_dashboard(criteria):
+
+ # Depending on the use case, json for dashboarding is customized
+ # depending on the graph you want to show
+
+ if (criteria.testcase == "vPing"):
+ format_vPing_for_dashboard(criteria)
diff --git a/testcases/Dashboard/functest2Dashboard.py b/testcases/Dashboard/functest2Dashboard.py
new file mode 100644
index 000000000..c03ddbd14
--- /dev/null
+++ b/testcases/Dashboard/functest2Dashboard.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to get data from test DB
+# and format them into a json format adapted for a dashboard
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+ch = logging.StreamHandler()
+if args.debug:
+ ch.setLevel(logging.DEBUG)
+else:
+ ch.setLevel(logging.INFO)
+
+formatter = logging.Formatter('%(asctime)s - %(name)s -\
+ %(levelname)s - %(message)s')
+ch.setFormatter(formatter)
+logger.addHandler(ch)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def main():
+ try:
+ logger.info("Functest test result generation for dashboard")
+
+ # TODO create the loop to provide all the json files
+ logger.debug("Retrieve all the testcases from DB")
+ test_cases = dashboard_utils.get_testcases(TEST_DB, "functest")
+
+ # TODO to be refactor once graph for Tempest, rally and ODL ready
+ # Do it only for vPing in first stage
+ for case in test_cases:
+ logger.debug("Generate " + case + " json files")
+ dashboard_utils.generateJson('functest', case, TEST_DB)
+
+ logger.info("Functest json files for dashboard successfully generated")
+ except:
+ logger.error("Impossible to generate json files for dashboard")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testcases/Dashboard/odl2Dashboard.py b/testcases/Dashboard/odl2Dashboard.py
new file mode 100644
index 000000000..12247663e
--- /dev/null
+++ b/testcases/Dashboard/odl2Dashboard.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the ODL test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+# import json
+# import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_tempest_for_dashboard(criteria):
+ logger.debug("generate dashboard json files for ODL suite")
diff --git a/testcases/Dashboard/rally2Dashboard.py b/testcases/Dashboard/rally2Dashboard.py
new file mode 100644
index 000000000..20e597468
--- /dev/null
+++ b/testcases/Dashboard/rally2Dashboard.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the rally test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+# import json
+# import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_tempest_for_dashboard(criteria):
+ logger.debug("generate dashboard json files for rally")
diff --git a/testcases/Dashboard/tempest2Dashboard.py b/testcases/Dashboard/tempest2Dashboard.py
new file mode 100644
index 000000000..8cbecbbc8
--- /dev/null
+++ b/testcases/Dashboard/tempest2Dashboard.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the tempest test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+# import json
+# import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_tempest_for_dashboard(criteria):
+ logger.debug("generate dashboard json files for Tempest")
diff --git a/testcases/Dashboard/vPing2Dashboard.py b/testcases/Dashboard/vPing2Dashboard.py
new file mode 100644
index 000000000..f799e280f
--- /dev/null
+++ b/testcases/Dashboard/vPing2Dashboard.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 Orange
+# morgan.richomme@orange.com
+#
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# This script is used to build json files for the dashboard
+# for the vPing test case
+#
+# v0.1: basic example
+#
+import logging
+import argparse
+import pprint
+import json
+import dashboard_utils
+import os
+import yaml
+
+pp = pprint.PrettyPrinter(indent=4)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('config_functest')
+logger.setLevel(logging.DEBUG)
+
+if not os.path.exists(args.repo_path):
+ logger.error("Repo directory not found '%s'" % args.repo_path)
+ exit(-1)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+""" global variables """
+# Directories
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+
+def format_vPing_for_dashboard(criteria):
+
+ # Get results
+ myDataResults = dashboard_utils.get_results(TEST_DB, criteria)
+
+ # Depending on the use case, json for dashboarding is customized
+ # depending on the graph you want to show
+
+ test_data = [{'description': 'vPing results for Dashboard'}]
+
+ # Graph 1: Duration = f(time)
+ # ***************************
+ new_element = []
+ for data in myDataResults:
+ new_element.append({'x': data['creation_date'],
+ 'y': data['details']['duration']})
+
+ test_data.append({'name': "vPing duration",
+ 'info': {'type': "graph",
+ 'xlabel': 'time',
+ 'ylabel': 'duration (s)'},
+ 'data_set': new_element})
+
+ # Graph 2: bar
+ # ************
+ nbTest = 0
+ nbTestOk = 0
+
+ for data in myDataResults:
+ nbTest += 1
+ if data['details']['status'] == "OK":
+ nbTestOk += 1
+
+ test_data.append({'name': "vPing status",
+ 'info': {"type": "bar"},
+ 'data_set': [{'Nb tests': nbTest,
+ 'Nb Success': nbTestOk}]})
+
+ # Generate json file
+ fileName = criteria.format()
+ logger.debug("Generate json file:" + fileName)
+
+ with open(fileName, "w") as outfile:
+ json.dump(test_data, outfile, indent=4)
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 341281a2c..61bbaaeb7 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -12,11 +12,6 @@ import re, json, os, urllib2, argparse, logging, yaml
-""" get the date """
-cmd = os.popen("date '+%d%m%Y_%H%M'")
-test_date = cmd.read().rstrip()
-
-
""" tests configuration """
tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', 'neutron', 'nova', 'quotas', 'requests', 'vm', 'tempest', 'all', 'smoke']
parser = argparse.ArgumentParser()
@@ -57,7 +52,7 @@ f.close()
HOME = os.environ['HOME']+"/"
REPO_PATH = args.repo_path
SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_rally_scn")
-RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res") + test_date + "/"
+RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res") + "/rally/"
diff --git a/testcases/config_functest.py b/testcases/config_functest.py
index e618d2dd4..7fbd06042 100644
--- a/testcases/config_functest.py
+++ b/testcases/config_functest.py
@@ -8,9 +8,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-import re, json, os, urllib2, argparse, logging, shutil, subprocess, yaml, sys
+import re, json, os, urllib2, argparse, logging, shutil, subprocess, yaml, sys, getpass
import functest_utils
from git import Repo
+from os import stat
+from pwd import getpwuid
actions = ['start', 'check', 'clean']
parser = argparse.ArgumentParser()
@@ -53,6 +55,7 @@ REPO_PATH = args.repo_path
RALLY_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_rally")
RALLY_REPO_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_repo")
RALLY_INSTALLATION_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_inst")
+RALLY_RESULT_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res")
VPING_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_vping")
ODL_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_odl")
@@ -70,6 +73,10 @@ def action_start():
"""
Start the functest environment installation
"""
+ if not check_permissions():
+ logger.error("Bad Python cache directory ownership.")
+ exit(-1)
+
if not functest_utils.check_internet_connectivity():
logger.error("There is no Internet connectivity. Please check the network configuration.")
exit(-1)
@@ -83,11 +90,10 @@ def action_start():
logger.debug("Cleaning possible functest environment leftovers.")
action_clean()
- logger.info("Starting installation of functest environment")
- logger.info("Installing Rally...")
- if not install_rally():
- logger.error("There has been a problem while installing Rally.")
- action_clean()
+ logger.info("Installing needed libraries on the host")
+ cmd = "sudo yum -y install gcc libffi-devel python-devel openssl-devel gmp-devel libxml2-devel libxslt-devel postgresql-devel git wget"
+ if not functest_utils.execute_command(cmd, logger):
+ logger.error("There has been a problem while installing software packages.")
exit(-1)
logger.info("Installing ODL environment...")
@@ -96,6 +102,16 @@ def action_start():
action_clean()
exit(-1)
+ logger.info("Starting installation of functest environment")
+ logger.info("Installing Rally...")
+ if not install_rally():
+ logger.error("There has been a problem while installing Rally.")
+ action_clean()
+ exit(-1)
+
+ # Create result folder under functest if necessary
+ if not os.path.exists(RALLY_RESULT_DIR):
+ os.makedirs(RALLY_RESULT_DIR)
logger.info("Downloading image...")
if not functest_utils.download_url(IMAGE_URL, IMAGE_DIR):
@@ -206,10 +222,27 @@ def action_clean():
cmd = "glance image-delete " + image_id
functest_utils.execute_command(cmd,logger)
+ if os.path.exists(RALLY_RESULT_DIR):
+ logger.debug("Removing Result directory")
+ shutil.rmtree(RALLY_RESULT_DIR,ignore_errors=True)
+
+
logger.info("Functest environment clean!")
+def check_permissions():
+ current_user = getpass.getuser()
+ cache_dir = HOME+".cache/pip"
+ logger.info("Checking permissions of '%s'..." %cache_dir)
+ logger.debug("Current user is '%s'" %current_user)
+ cache_user = getpwuid(stat(cache_dir).st_uid).pw_name
+ logger.debug("Cache directory owner is '%s'" %cache_user)
+ if cache_user != current_user:
+ logger.info("The owner of '%s' is '%s'. Please run 'sudo chown -R %s %s'." %(cache_dir, cache_user, current_user, cache_dir))
+ return False
+
+ return True
def install_rally():
@@ -221,8 +254,9 @@ def install_rally():
Repo.clone_from(url, RALLY_REPO_DIR)
logger.debug("Executing %s./install_rally.sh..." %RALLY_REPO_DIR)
- install_script = RALLY_REPO_DIR + "install_rally.sh"
- functest_utils.execute_command(install_script,logger)
+ install_script = RALLY_REPO_DIR + "install_rally.sh --yes"
+ cmd = 'sudo ' + install_script
+ functest_utils.execute_command(cmd,logger)
logger.debug("Creating Rally environment...")
cmd = "rally deployment create --fromenv --name=opnfv-arno-rally"
diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml
index 40eb024ad..c38b46066 100644
--- a/testcases/config_functest.yaml
+++ b/testcases/config_functest.yaml
@@ -37,3 +37,6 @@ vping:
vm_name_2: opnfv-vping-2
ip_1: 192.168.120.30
ip_2: 192.168.120.40
+
+results:
+ test_db_url: http://213.77.62.197
diff --git a/testcases/functest_utils.py b/testcases/functest_utils.py
index 26c1f478f..6af55f7a7 100644
--- a/testcases/functest_utils.py
+++ b/testcases/functest_utils.py
@@ -31,10 +31,6 @@ def check_credentials():
os.environ['OS_TENANT_NAME']
except KeyError:
return False
- try:
- os.environ['OS_REGION_NAME']
- except KeyError:
- return False
return True
@@ -194,16 +190,103 @@ def check_neutron_net(neutron_client, net_name):
return True
return False
+def get_image_id(glance_client, image_name):
+ images = glance_client.images.list()
+ id = ''
+ for i in images:
+ if i.name == image_name:
+ id = i.id
+ break
+ return id
+
+def create_glance_image(glance_client, image_name, file_path):
+ try:
+ with open(file_path) as fimage:
+ image = glance_client.images.create(name=image_name, is_public=True, disk_format="qcow2",
+ container_format="bare", data=fimage)
+ return image.id
+ except:
+ return False
+
+def get_flavor_id(nova_client, flavor_name):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if f.name == flavor_name:
+ id = f.id
+ break
+ return id
+
+def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if min_ram <= f.ram and f.ram <= max_ram:
+ id = f.id
+ break
+ return id
+
+
+def get_tenant_id(keystone_client, tenant_name):
+ tenants = keystone_client.tenants.list()
+ id = ''
+ for t in tenants:
+ if t.name == tenant_name:
+ id = t.id
+ break
+ return id
+
+def get_role_id(keystone_client, role_name):
+ roles = keystone_client.roles.list()
+ id = ''
+ for r in roles:
+ if r.name == role_name:
+ id = r.id
+ break
+ return id
+
+def get_user_id(keystone_client, user_name):
+ users = keystone_client.users.list()
+ id = ''
+ for u in users:
+ if u.name == user_name:
+ id = u.id
+ break
+ return id
+
+def create_tenant(keystone_client, tenant_name, tenant_description):
+ try:
+ tenant = keystone_client.tenants.create(tenant_name, tenant_description, enabled=True)
+ return tenant.id
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+def delete_tenant(keystone_client, tenant_id):
+ try:
+ tenant = keystone_client.tenants.delete(tenant_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+def add_role_user(keystone_client, user_id, role_id, tenant_id):
+ try:
+ keystone_client.roles.add_user_role(user_id, role_id, tenant_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
-def check_internet_connectivity(url='http://www.google.com/'):
+def check_internet_connectivity(url='http://www.opnfv.org/'):
"""
Check if there is access to the internet
"""
try:
urllib2.urlopen(url, timeout=5)
return True
- except urllib.request.URLError:
+ except urllib.URLError:
return False
diff --git a/testcases/vIMS/vIMS.md b/testcases/vIMS/vIMS.md
new file mode 100644
index 000000000..68f86d9fa
--- /dev/null
+++ b/testcases/vIMS/vIMS.md
@@ -0,0 +1,3 @@
+# vIMS README
+
+
diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing.py
index 1cc73922c..5d68f2229 100644
--- a/testcases/vPing/CI/libraries/vPing.py
+++ b/testcases/vPing/CI/libraries/vPing.py
@@ -1,49 +1,69 @@
#!/usr/bin/python
#
-# Copyright (c) 2015 All rights reserved. This program and the accompanying materials
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# This script boots the VM1 and allocates IP address from Nova
+# 0.1: This script boots the VM1 and allocates IP address from Nova
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
+# 0.2: measure test duration and publish results under json format
#
-# Note: this is script works only with Ubuntu image, not with Cirros image
#
-import os, time, subprocess, logging, argparse, yaml, pprint, sys
+import os
+import time
+import argparse
+import pprint
+import sys
+import json
+import logging
+import yaml
+import datetime
+import requests
import novaclient.v2.client as novaclient
from neutronclient.v2_0 import client as neutronclient
pp = pprint.PrettyPrinter(indent=4)
-
parser = argparse.ArgumentParser()
+
parser.add_argument("repo_path", help="Path to the repository")
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+
args = parser.parse_args()
sys.path.append(args.repo_path + "testcases/")
+
import functest_utils
""" logging configuration """
+
logger = logging.getLogger('vPing')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
+
if args.debug:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+formatter = logging.Formatter('%(asctime)s - %(name)s'
+ '- %(levelname)s - %(message)s')
+
ch.setFormatter(formatter)
logger.addHandler(ch)
+HOME = os.environ['HOME'] + "/"
-HOME = os.environ['HOME']+"/"
-with open(args.repo_path+"testcases/config_functest.yaml") as f:
+with open(args.repo_path + "testcases/config_functest.yaml") as f:
functest_yaml = yaml.safe_load(f)
f.close()
@@ -51,151 +71,203 @@ f.close()
VM_BOOT_TIMEOUT = 180
VM_DELETE_TIMEOUT = 100
PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout")
+TEST_DB = functest_yaml.get("results").get("test_db_url")
NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1")
NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2")
IP_1 = functest_yaml.get("vping").get("ip_1")
IP_2 = functest_yaml.get("vping").get("ip_2")
-GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get("image_name")
+GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
+ get("openstack").get("image_name")
FLAVOR = functest_yaml.get("vping").get("vm_flavor")
# NEUTRON Private Network parameters
-NEUTRON_PRIVATE_NET_NAME = functest_yaml.get("general").get("openstack").get("neutron_private_net_name")
-NEUTRON_PRIVATE_SUBNET_NAME = functest_yaml.get("general").get("openstack").get("neutron_private_subnet_name")
-NEUTRON_PRIVATE_SUBNET_CIDR = functest_yaml.get("general").get("openstack").get("neutron_private_subnet_cidr")
-NEUTRON_ROUTER_NAME = functest_yaml.get("general").get("openstack").get("neutron_router_name")
+
+NEUTRON_PRIVATE_NET_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_net_name")
+
+NEUTRON_PRIVATE_SUBNET_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_subnet_name")
+
+NEUTRON_PRIVATE_SUBNET_CIDR = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_subnet_cidr")
+
+NEUTRON_ROUTER_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_router_name")
def pMsg(value):
+
"""pretty printing"""
pp.pprint(value)
-def waitVmActive(nova,vm):
+def waitVmActive(nova, vm):
+
# sleep and wait for VM status change
sleep_time = 3
count = VM_BOOT_TIMEOUT / sleep_time
while True:
- status = functest_utils.get_instance_status(nova,vm)
+ status = functest_utils.get_instance_status(nova, vm)
logger.debug("Status: %s" % status)
if status == "ACTIVE":
return True
if status == "ERROR" or count == 0:
return False
- count-=1
+ count -= 1
time.sleep(sleep_time)
return False
-def waitVmDeleted(nova,vm):
+
+def waitVmDeleted(nova, vm):
+
# sleep and wait for VM status change
sleep_time = 3
count = VM_DELETE_TIMEOUT / sleep_time
while True:
- status = functest_utils.get_instance_status(nova,vm)
+ status = functest_utils.get_instance_status(nova, vm)
if not status:
return True
elif count == 0:
logger.debug("Timeout")
return False
else:
- #return False
- count-=1
+ # return False
+ count -= 1
time.sleep(sleep_time)
return False
def create_private_neutron_net(neutron):
+
neutron.format = 'json'
logger.info('Creating neutron network %s...' % NEUTRON_PRIVATE_NET_NAME)
- network_id = functest_utils.create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME)
+ network_id = functest_utils. \
+ create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME)
+
if not network_id:
return False
logger.debug("Network '%s' created successfully" % network_id)
-
logger.debug('Creating Subnet....')
- subnet_id = functest_utils.create_neutron_subnet(neutron, NEUTRON_PRIVATE_SUBNET_NAME, NEUTRON_PRIVATE_SUBNET_CIDR, network_id)
+ subnet_id = functest_utils. \
+ create_neutron_subnet(neutron,
+ NEUTRON_PRIVATE_SUBNET_NAME,
+ NEUTRON_PRIVATE_SUBNET_CIDR,
+ network_id)
if not subnet_id:
return False
logger.debug("Subnet '%s' created successfully" % subnet_id)
-
logger.debug('Creating Router...')
- router_id = functest_utils.create_neutron_router(neutron, NEUTRON_ROUTER_NAME)
+ router_id = functest_utils. \
+ create_neutron_router(neutron, NEUTRON_ROUTER_NAME)
+
if not router_id:
return False
- logger.debug("Router '%s' created successfully" % router_id)
+ logger.debug("Router '%s' created successfully" % router_id)
logger.debug('Adding router to subnet...')
+
result = functest_utils.add_interface_router(neutron, router_id, subnet_id)
+
if not result:
return False
- logger.debug("Interface added successfully.")
- network_dic = {'net_id' : network_id,
- 'subnet_id' : subnet_id,
- 'router_id' : router_id}
+ logger.debug("Interface added successfully.")
+ network_dic = {'net_id': network_id,
+ 'subnet_id': subnet_id,
+ 'router_id': router_id}
return network_dic
-def cleanup(nova,neutron,network_dic):
+def cleanup(nova, neutron, network_dic):
+
# delete both VMs
logger.info("Cleaning up...")
vm1 = functest_utils.get_instance_by_name(nova, NAME_VM_1)
if vm1:
- logger.debug("Deleting '%s'..." %NAME_VM_1)
+ logger.debug("Deleting '%s'..." % NAME_VM_1)
nova.servers.delete(vm1)
- #wait until VMs are deleted
- if not waitVmDeleted(nova,vm1):
- logger.error("Instance '%s' with cannot be deleted. Status is '%s'" % (NAME_VM_1,functest_utils.get_instance_status(nova_client,vm1)))
+ # wait until VMs are deleted
+ if not waitVmDeleted(nova, vm1):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova, vm1)))
else:
logger.debug("Instance %s terminated." % NAME_VM_1)
vm2 = functest_utils.get_instance_by_name(nova, NAME_VM_2)
+
if vm2:
- logger.debug("Deleting '%s'..." %NAME_VM_2)
+ logger.debug("Deleting '%s'..." % NAME_VM_2)
vm2 = nova.servers.find(name=NAME_VM_2)
nova.servers.delete(vm2)
- if not waitVmDeleted(nova,vm2):
- logger.error("Instance '%s' with cannot be deleted. Status is '%s'" % (NAME_VM_2,functest_utils.get_instance_status(nova_client,vm2)))
+
+ if not waitVmDeleted(nova, vm2):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova, vm2)))
else:
logger.debug("Instance %s terminated." % NAME_VM_2)
# delete created network
logger.info("Deleting network '%s'..." % NEUTRON_PRIVATE_NET_NAME)
- net_id=network_dic["net_id"]
- subnet_id=network_dic["subnet_id"]
- router_id=network_dic["router_id"]
- if not functest_utils.remove_interface_router(neutron, router_id, subnet_id):
- logger.error("Unable to remove subnet '%s' from router '%s'" %(subnet_id,router_id))
- return False
+ net_id = network_dic["net_id"]
+ subnet_id = network_dic["subnet_id"]
+ router_id = network_dic["router_id"]
+
+ if not functest_utils.remove_interface_router(neutron, router_id,
+ subnet_id):
+ logger.error("Unable to remove subnet '%s' from router '%s'" % (
+ subnet_id, router_id))
+ return False
+
logger.debug("Interface removed successfully")
if not functest_utils.delete_neutron_router(neutron, router_id):
- logger.error("Unable to delete router '%s'" %router_id)
+ logger.error("Unable to delete router '%s'" % router_id)
return False
+
logger.debug("Router deleted successfully")
+
if not functest_utils.delete_neutron_subnet(neutron, subnet_id):
- logger.error("Unable to delete subnet '%s'" %subnet_id)
+ logger.error("Unable to delete subnet '%s'" % subnet_id)
return False
- logger.debug("Subnet '%s' deleted successfully" %NEUTRON_PRIVATE_SUBNET_NAME)
+
+ logger.debug(
+ "Subnet '%s' deleted successfully" % NEUTRON_PRIVATE_SUBNET_NAME)
+
if not functest_utils.delete_neutron_net(neutron, net_id):
- logger.error("Unable to delete network '%s'" %net_id)
+ logger.error("Unable to delete network '%s'" % net_id)
return False
- logger.debug("Network '%s' deleted successfully" %NEUTRON_PRIVATE_NET_NAME)
+
+ logger.debug(
+ "Network '%s' deleted successfully" % NEUTRON_PRIVATE_NET_NAME)
return True
+def push_results_to_db(payload):
+
+ # TODO move DB creds into config file
+ url = TEST_DB + "/results"
+ params = {"project_name": "functest", "case_name": "vPing", "pod_id": 1,
+ "details": payload}
+ headers = {'Content-Type': 'application/json'}
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ logger.debug(r)
+
def main():
+
creds_nova = functest_utils.get_credentials("nova")
nova_client = novaclient.Client(**creds_nova)
creds_neutron = functest_utils.get_credentials("neutron")
neutron_client = neutronclient.Client(**creds_neutron)
EXIT_CODE = -1
+
image = None
- network = None
flavor = None
# Check if the given image exists
try:
- image = nova_client.images.find(name = GLANCE_IMAGE_NAME)
+ image = nova_client.images.find(name=GLANCE_IMAGE_NAME)
logger.info("Glance image found '%s'" % GLANCE_IMAGE_NAME)
except:
logger.error("ERROR: Glance image '%s' not found." % GLANCE_IMAGE_NAME)
@@ -204,15 +276,18 @@ def main():
exit(-1)
network_dic = create_private_neutron_net(neutron_client)
+
if not network_dic:
- logger.error("There has been a problem when creating the neutron network")
+ logger.error(
+ "There has been a problem when creating the neutron network")
exit(-1)
network_id = network_dic["net_id"]
# Check if the given flavor exists
+
try:
- flavor = nova_client.flavors.find(name = FLAVOR)
+ flavor = nova_client.flavors.find(name=FLAVOR)
logger.info("Flavor found '%s'" % FLAVOR)
except:
logger.error("Flavor '%s' not found." % FLAVOR)
@@ -220,77 +295,102 @@ def main():
pMsg(nova_client.flavor.list())
exit(-1)
-
# Deleting instances if they exist
- servers=nova_client.servers.list()
+
+ servers = nova_client.servers.list()
for server in servers:
if server.name == NAME_VM_1 or server.name == NAME_VM_2:
- logger.info("Instance %s found. Deleting..." %server.name)
+ logger.info("Instance %s found. Deleting..." % server.name)
server.delete()
-
# boot VM 1
# basic boot
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
+ # tune (e.g. flavor, images, network) to your specific
+ # openstack configuration here
+ # we consider start time at VM1 booting
+ start_time_ts = time.time()
+ end_time_ts = start_time_ts
+ logger.info("vPing Start Time:'%s'" % (
+ datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ '%Y-%m-%d %H:%M:%S')))
# create VM
- logger.debug("Creating port 'vping-port-1' with IP %s..." %IP_1)
- port_id=functest_utils.create_neutron_port(neutron_client, "vping-port-1", network_id, IP_1)
+ logger.debug("Creating port 'vping-port-1' with IP %s..." % IP_1)
+ port_id = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-1", network_id,
+ IP_1)
if not port_id:
logger.error("Unable to create port.")
exit(-1)
- logger.info("Creating instance '%s' with IP %s..." %(NAME_VM_1,IP_1))
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n" %(NAME_VM_1,flavor,image,network_id))
+
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_1, IP_1))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
+ "network=%s \n" % (NAME_VM_1, flavor, image, network_id))
vm1 = nova_client.servers.create(
- name = NAME_VM_1,
- flavor = flavor,
- image = image,
- #nics = [{"net-id": network_id, "v4-fixed-ip": IP_1}]
- nics = [{"port-id": port_id}]
+ name=NAME_VM_1,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_1}]
+ nics=[{"port-id": port_id}]
)
- #wait until VM status is active
- if not waitVmActive(nova_client,vm1):
- logger.error("Instance '%s' cannot be booted. Status is '%s'" % (NAME_VM_1,functest_utils.get_instance_status(nova_client,vm1)))
- cleanup(nova_client,neutron_client,network_dic)
+
+ # wait until VM status is active
+ if not waitVmActive(nova_client, vm1):
+
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova_client, vm1)))
+ cleanup(nova_client, neutron_client, network_dic)
return (EXIT_CODE)
else:
logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)
- #retrieve IP of first VM
- #logger.debug("Fetching IP...")
- #server = functest_utils.get_instance_by_name(nova_client, NAME_VM_1)
- # theoretically there is only one IP address so we take the first element of the table
+ # Retrieve IP of first VM
+ # logger.debug("Fetching IP...")
+ # server = functest_utils.get_instance_by_name(nova_client, NAME_VM_1)
+ # theoretically there is only one IP address so we take the
+ # first element of the table
# Dangerous! To be improved!
- #test_ip = server.networks.get(NEUTRON_PRIVATE_NET_NAME)[0]
- test_ip=IP_1
- logger.debug("Instance '%s' got %s" %(NAME_VM_1,test_ip))
+ # test_ip = server.networks.get(NEUTRON_PRIVATE_NET_NAME)[0]
+ test_ip = IP_1
+ logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))
# boot VM 2
# we will boot then execute a ping script with cloud-init
# the long chain corresponds to the ping procedure converted with base 64
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
- u = "#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n"%test_ip
+ # tune (e.g. flavor, images, network) to your specific openstack
+ # configuration here
+ u = "#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n " \
+ "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n " \
+ "break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip
+
# create VM
+ logger.debug("Creating port 'vping-port-2' with IP %s..." % IP_2)
+ port_id = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-2", network_id,
+ IP_2)
- logger.debug("Creating port 'vping-port-2' with IP %s..." %IP_2)
- port_id=functest_utils.create_neutron_port(neutron_client, "vping-port-2", network_id, IP_2)
if not port_id:
logger.error("Unable to create port.")
exit(-1)
- logger.info("Creating instance '%s' with IP %s..." %(NAME_VM_2,IP_2))
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n userdata= \n%s" %(NAME_VM_2,flavor,image,network_id,u))
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_2, IP_2))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
+ "\n userdata= \n%s" % (
+ NAME_VM_2, flavor, image, network_id, u))
vm2 = nova_client.servers.create(
- name = NAME_VM_2,
- flavor = flavor,
- image = image,
- #nics = [{"net-id": network_id, "v4-fixed-ip": IP_2}],
- nics = [{"port-id": port_id}],
- userdata = u
+ name=NAME_VM_2,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_2}],
+ nics=[{"port-id": port_id}],
+ userdata=u
)
- if not waitVmActive(nova_client,vm2):
- logger.error("Instance '%s' cannot be booted. Status is '%s'" % (NAME_VM_2,functest_utils.get_instance_status(nova_client,vm2)))
- cleanup(nova_client,neutron_client,network_dic)
+ if not waitVmActive(nova_client, vm2):
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova_client, vm2)))
+ cleanup(nova_client, neutron_client, network_dic)
return (EXIT_CODE)
else:
logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)
@@ -298,13 +398,19 @@ def main():
logger.info("Waiting for ping...")
sec = 0
console_log = vm2.get_console_output()
+
while True:
time.sleep(1)
console_log = vm2.get_console_output()
- #print "--"+console_log
+ # print "--"+console_log
# report if the test is failed
if "vPing OK" in console_log:
logger.info("vPing detected!")
+
+ # we consider start time at VM1 booting
+ end_time_ts = time.time()
+ duration = round(end_time_ts - start_time_ts, 1)
+ logger.info("vPing duration:'%s'" % duration)
EXIT_CODE = 0
break
elif sec == PING_TIMEOUT:
@@ -312,17 +418,31 @@ def main():
break
else:
logger.debug("No vPing detected...")
- sec+=1
+ sec += 1
- cleanup(nova_client,neutron_client,network_dic)
+ cleanup(nova_client, neutron_client, network_dic)
+ test_status = "NOK"
if EXIT_CODE == 0:
logger.info("vPing OK")
+ test_status = "OK"
else:
logger.error("vPing FAILED")
- exit(EXIT_CODE)
+ try:
+ if args.report:
+ logger.debug("Push result into DB")
+ # TODO check path result for the file
+ push_results_to_db(
+ {'timestart': start_time_ts, 'duration': duration,
+ 'status': test_status})
+ # with open("vPing-result.json", "w") as outfile:
+ # json.dump({'timestart': start_time_ts, 'duration': duration,
+ # 'status': test_status}, outfile, indent=4)
+ except:
+ logger.error("Error pushing results into Database")
+ exit(EXIT_CODE)
if __name__ == '__main__':
main()