From ef62824e0471d07a4a3a40c401fc433070d961c6 Mon Sep 17 00:00:00 2001 From: "jose.lausuch" Date: Wed, 20 Apr 2016 16:03:44 +0200 Subject: Fix Flake8 Violations in the Functest scripts JIRA: FUNCTEST-213 Change-Id: I66c02dd6ff12ffb9798ebe44a4cfe7bfc73e76c3 Signed-off-by: jose.lausuch --- .../Controllers/ONOS/Teston/CI/adapters/client.py | 47 +++-- .../ONOS/Teston/CI/adapters/connection.py | 107 +++++----- .../ONOS/Teston/CI/adapters/environment.py | 230 +++++++++++---------- .../ONOS/Teston/CI/adapters/foundation.py | 72 ++++--- .../Controllers/ONOS/Teston/CI/onosfunctest.py | 17 +- testcases/SECTests/OpenSCAP.py | 20 +- testcases/SECTests/connect.py | 5 +- testcases/SECTests/scripts/createfiles.py | 2 +- .../VIM/OpenStack/CI/libraries/clean_openstack.py | 164 ++++++++------- .../OpenStack/CI/libraries/generate_defaults.py | 45 ++-- .../VIM/OpenStack/CI/libraries/run_rally-cert.py | 84 ++++---- .../VIM/OpenStack/CI/libraries/run_tempest.py | 70 ++++--- testcases/config_functest.py | 137 ++++++------ testcases/config_functest.yaml | 4 +- testcases/features/doctor.py | 3 +- testcases/features/promise.py | 75 ++++--- testcases/functest_utils.py | 10 +- testcases/openstack_utils.py | 57 ++--- testcases/tests/TestFunctestUtils.py | 5 +- testcases/vIMS/CI/clearwater.py | 11 +- testcases/vIMS/CI/orchestrator.py | 45 ++-- testcases/vIMS/CI/vIMS.py | 76 ++++--- testcases/vPing/CI/libraries/vPing_ssh.py | 134 +++++++----- testcases/vPing/CI/libraries/vPing_userdata.py | 70 ++++--- 24 files changed, 828 insertions(+), 662 deletions(-) (limited to 'testcases') diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/client.py b/testcases/Controllers/ONOS/Teston/CI/adapters/client.py index a61670ef8..15bb73c91 100644 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/client.py +++ b/testcases/Controllers/ONOS/Teston/CI/adapters/client.py @@ -11,21 +11,20 @@ Description: # """ from environment import environment -import os import time import pexpect -import re import requests import json -class client( environment ): - def __init__( self ): - environment.__init__( self ) +class client(environment): + + def __init__(self): + environment.__init__(self) self.loginfo = environment() self.testcase = '' - def RunScript( self, handle, testname, timeout=300 ): + def RunScript(self, handle, testname, timeout=300): """ Run ONOS Test Script Parameters: @@ -34,17 +33,21 @@ class client( environment ): masterpassword: The server password of running ONOS """ self.testcase = testname - self.ChangeTestCasePara( testname, self.masterusername, self.masterpassword ) + self.ChangeTestCasePara(testname, self.masterusername, + self.masterpassword) runhandle = handle - runtest = self.home + "/OnosSystemTest/TestON/bin/cli.py run " + testname + runtest = self.home + "/OnosSystemTest/TestON/bin/cli.py run " + \ + testname runhandle.sendline(runtest) circletime = 0 lastshowscreeninfo = '' while True: - Result = runhandle.expect(["PEXPECT]#", pexpect.EOF, pexpect.TIMEOUT]) + Result = runhandle.expect(["PEXPECT]#", pexpect.EOF, + pexpect.TIMEOUT]) curshowscreeninfo = runhandle.before - if (len(lastshowscreeninfo) != len(curshowscreeninfo)): - self.loginfo.log(str(curshowscreeninfo)[len(lastshowscreeninfo)::]) + if(len(lastshowscreeninfo) != len(curshowscreeninfo)): + self.loginfo.log(str(curshowscreeninfo) + [len(lastshowscreeninfo)::]) lastshowscreeninfo = curshowscreeninfo if Result == 0: print "Done!" @@ -53,28 +56,30 @@ class client( environment ): circletime += 1 if circletime > timeout: break - self.loginfo.log( "Timeout when running the test, please check!" ) + self.loginfo.log("Timeout when running the test, please check!") - def onosstart( self ): - #This is the compass run machine user&pass,you need to modify + def onosstart(self): + # This is the compass run machine user&pass,you need to modify print "Test Begin....." self.OnosConnectionSet() masterhandle = self.SSHlogin(self.localhost, self.masterusername, - self.masterpassword) - self.OnosEnvSetup( masterhandle ) + self.masterpassword) + self.OnosEnvSetup(masterhandle) return masterhandle - def onosclean( self, handle ): - self.SSHRelease( handle ) + def onosclean(self, handle): + self.SSHRelease(handle) self.loginfo.log('Release onos handle Successful') - def push_results_to_db( self, payload, pushornot = 1): + def push_results_to_db(self, payload, pushornot=1): if pushornot != 1: return 1 url = self.Result_DB + "/results" - params = {"project_name": "functest", "case_name": "ONOS-" + self.testcase, - "pod_name": 'huawei-build-2', "details": payload} + params = {"project_name": "functest", "case_name": "ONOS-" + + self.testcase, "pod_name": 'huawei-build-2', + "details": payload} + headers = {'Content-Type': 'application/json'} try: r = requests.post(url, data=json.dumps(params), headers=headers) diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py b/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py index e2788b412..16f2ef32c 100644 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py +++ b/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py @@ -14,19 +14,18 @@ Description: # """ import os -import time import pexpect import re -import sys from foundation import foundation -class connection( foundation ): - def __init__( self ): - foundation.__init__( self ) +class connection(foundation): + + def __init__(self): + foundation.__init__(self) self.loginfo = foundation() - def AddKnownHost( self, handle, ipaddr, username, password ): + def AddKnownHost(self, handle, ipaddr, username, password): """ Add an user to known host,so that onos can login in with onos $ipaddr. parameters: @@ -34,29 +33,30 @@ class connection( foundation ): username: login user name password: login password """ - print( "Now Adding an user to known hosts " + ipaddr ) + print("Now Adding an user to known hosts " + ipaddr) login = handle - login.sendline( "ssh -l %s -p 8101 %s"%( username, ipaddr ) ) + login.sendline("ssh -l %s -p 8101 %s" % (username, ipaddr)) index = 0 while index != 2: - index = login.expect( ['assword:', 'yes/no', pexpect.EOF, \ - pexpect.TIMEOUT] ) + index = login.expect(['assword:', 'yes/no', pexpect.EOF, + pexpect.TIMEOUT]) if index == 0: - login.sendline( password ) - login.sendline( "logout" ) - index = login.expect( ["closed", pexpect.EOF] ) + login.sendline(password) + login.sendline("logout") + index = login.expect(["closed", pexpect.EOF]) if index == 0: - self.loginfo.log( "Add SSH Known Host Success!" ) + self.loginfo.log("Add SSH Known Host Success!") break else: - self.loginfo.log( "Add SSH Known Host Failed! Please Check!" ) + self.loginfo.log("Add SSH Known Host Failed! " + "Please Check!") break - login.prompt( ) + login.prompt() if index == 1: login.sendline('yes') - def GetEnvValue( self, handle, envname): + def GetEnvValue(self, handle, envname): """ os.getenv only returns current user value GetEnvValue returns a environment value of @@ -64,26 +64,26 @@ class connection( foundation ): eg: GetEnvValue(handle,'HOME') """ envhandle = handle - envhandle.sendline( 'echo $' + envname ) - envhandle.prompt( ) + envhandle.sendline('echo $' + envname) + envhandle.prompt() reg = envname + '\r\n(.*)\r' - envaluereg = re.compile( reg ) - envalue = envaluereg.search( envhandle.before ) + envaluereg = re.compile(reg) + envalue = envaluereg.search(envhandle.before) if envalue: return envalue.groups()[0] else: return None - def Gensshkey( self, handle ): + def Gensshkey(self, handle): """ Generate ssh keys, used for some server have no sshkey. """ print "Now Generating SSH keys..." - #Here file name may be id_rsa or id_ecdsa or others - #So here will have a judgement + # Here file name may be id_rsa or id_ecdsa or others + # So here will have a judgement keysub = handle - filepath = self.GetEnvValue( keysub, 'HOME' ) + '/.ssh' - filelist = os.listdir( filepath ) + filepath = self.GetEnvValue(keysub, 'HOME') + '/.ssh' + filelist = os.listdir(filepath) for item in filelist: if 'id' in item: self.loginfo.log("SSH keys are exsit in ssh directory.") @@ -91,14 +91,14 @@ class connection( foundation ): keysub.sendline("ssh-keygen -t rsa") Result = 0 while Result != 2: - Result = keysub.expect( ["Overwrite", "Enter", pexpect.EOF, \ - 'PEXPECT]#', pexpect.TIMEOUT]) + Result = keysub.expect(["Overwrite", "Enter", pexpect.EOF, + 'PEXPECT]#', pexpect.TIMEOUT]) if Result == 0: keysub.sendline("y") if Result == 1 or Result == 2: keysub.sendline("\n") if Result == 3: - self.loginfo.log( "Generate SSH key success." ) + self.loginfo.log("Generate SSH key success.") keysub.prompt() break if Result == 4: @@ -106,33 +106,32 @@ class connection( foundation ): keysub.prompt() break - def GetRootAuth( self, password ): + def GetRootAuth(self, password): """ Get root user parameters: password: root login password """ - print( "Now changing to user root" ) - login = pexpect.spawn( "su - root" ) + print("Now changing to user root") + login = pexpect.spawn("su - root") index = 0 while index != 2: - index = login.expect( ['assword:', "failure", \ - pexpect.EOF, pexpect.TIMEOUT] ) + index = login.expect(['assword:', "failure", + pexpect.EOF, pexpect.TIMEOUT]) if index == 0: - login.sendline( password ) + login.sendline(password) if index == 1: self.loginfo.log("Change user to root failed.") login.interact() - def ReleaseRootAuth( self ): + def ReleaseRootAuth(self): """ Exit root user. """ - print( "Now Release user root" ) - login = pexpect.spawn( "exit" ) - index = login.expect( ['logout', \ - pexpect.EOF, pexpect.TIMEOUT] ) + print("Now Release user root") + login = pexpect.spawn("exit") + index = login.expect(['logout', pexpect.EOF, pexpect.TIMEOUT]) if index == 0: self.loginfo.log("Release root user success.") if index == 1: @@ -140,28 +139,28 @@ class connection( foundation ): login.interact() - def AddEnvIntoBashrc( self, envalue ): + def AddEnvIntoBashrc(self, envalue): """ Add Env var into /etc/profile. parameters: envalue: environment value to add """ print "Now Adding bash environment" - fileopen = open( "/etc/profile", 'r' ) + fileopen = open("/etc/profile", 'r') findContext = 1 while findContext: - findContext = fileopen.readline( ) - result = findContext.find( envalue ) + findContext = fileopen.readline() + result = findContext.find(envalue) if result != -1: break fileopen.close if result == -1: - envAdd = open( "/etc/profile", 'a+' ) - envAdd.writelines( "\n" + envalue ) - envAdd.close( ) - self.loginfo.log( "Add env to bashrc success!" ) + envAdd = open("/etc/profile", 'a+') + envAdd.writelines("\n" + envalue) + envAdd.close() + self.loginfo.log("Add env to bashrc success!") - def OnosRootPathChange( self, onospath ): + def OnosRootPathChange(self, onospath): """ Change ONOS root path in file:bash_profile onospath: path of onos root @@ -171,22 +170,22 @@ class connection( foundation ): line = open(filepath, 'r').readlines() lenall = len(line) - 1 for i in range(lenall): - if "export ONOS_ROOT" in line[i]: - line[i] = 'export ONOS_ROOT=' + onospath + 'onos\n' + if "export ONOS_ROOT" in line[i]: + line[i] = 'export ONOS_ROOT=' + onospath + 'onos\n' NewFile = open(filepath, 'w') NewFile.writelines(line) NewFile.close print "Done!" - def OnosConnectionSet (self): + def OnosConnectionSet(self): """ Intergrate for ONOS connection setup """ if self.masterusername == 'root': filepath = '/root/' - else : + else: filepath = '/home/' + self.masterusername + '/' - filepath = os.path.join( filepath, "onos/tools/dev/bash_profile" ) + filepath = os.path.join(filepath, "onos/tools/dev/bash_profile") self.AddEnvIntoBashrc("source " + filepath + "\n") self.AddEnvIntoBashrc("export OCT=" + self.OCT) self.AddEnvIntoBashrc("export OC1=" + self.OC1) diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py b/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py index 8b7ee13fc..4fc636aba 100644 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py +++ b/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py @@ -23,15 +23,16 @@ import sys import pxssh from connection import connection -class environment( connection ): - def __init__( self ): - connection.__init__( self ) - self.loginfo = connection( ) +class environment(connection): + + def __init__(self): + connection.__init__(self) + self.loginfo = connection() self.masterhandle = '' self.home = '' - def DownLoadCode( self, handle, codeurl ): + def DownLoadCode(self, handle, codeurl): """ Download Code use 'git clone' parameters: @@ -42,38 +43,40 @@ class environment( connection ): originalfolder = sys.path[0] print originalfolder gitclone = handle - gitclone.sendline( "git clone " + codeurl ) + gitclone.sendline("git clone " + codeurl) index = 0 - increment = 0 + # increment = 0 while index != 1 or index != 4: - index = gitclone.expect ( ['already exists', 'esolving deltas: 100%', \ - 'eceiving objects', 'Already up-to-date', \ - 'npacking objects: 100%', pexpect.EOF] ) + index = gitclone.expect(['already exists', + 'esolving deltas: 100%', + 'eceiving objects', + 'Already up-to-date', + 'npacking objects: 100%', pexpect.EOF]) filefolder = self.home + '/' + codeurl.split('/')[-1].split('.')[0] - if index == 0 : - os.chdir( filefolder ) - os.system( 'git pull' ) - os.chdir( originalfolder ) - self.loginfo.log( 'Download code success!' ) + if index == 0: + os.chdir(filefolder) + os.system('git pull') + os.chdir(originalfolder) + self.loginfo.log('Download code success!') break elif index == 1 or index == 4: - self.loginfo.log( 'Download code success!' ) - gitclone.sendline( "mkdir onos" ) - gitclone.prompt( ) - gitclone.sendline( "cp -rf " + filefolder+ "/tools onos/" ) - gitclone.prompt( ) + self.loginfo.log('Download code success!') + gitclone.sendline("mkdir onos") + gitclone.prompt() + gitclone.sendline("cp -rf " + filefolder + "/tools onos/") + gitclone.prompt() break - elif index == 2 : + elif index == 2: os.write(1, gitclone.before) sys.stdout.flush() - else : - self.loginfo.log( 'Download code failed!' ) - self.loginfo.log( 'Information before' + gitclone.before ) + else: + self.loginfo.log('Download code failed!') + self.loginfo.log('Information before' + gitclone.before) break - gitclone.prompt( ) + gitclone.prompt() - def InstallDefaultSoftware( self, handle ): + def InstallDefaultSoftware(self, handle): """ Install default software parameters: @@ -81,15 +84,15 @@ class environment( connection ): """ print "Now Cleaning test environment" handle.sendline("sudo apt-get install -y mininet") - handle.prompt( ) + handle.prompt() handle.sendline("sudo pip install configobj") - handle.prompt( ) + handle.prompt() handle.sendline("sudo apt-get install -y sshpass") - handle.prompt( ) + handle.prompt() handle.sendline("OnosSystemTest/TestON/bin/cleanup.sh") - handle.prompt( ) + handle.prompt() time.sleep(5) - self.loginfo.log( 'Clean environment success!' ) + self.loginfo.log('Clean environment success!') def OnosPushKeys(self, handle, cmd, password): """ @@ -99,28 +102,28 @@ class environment( connection ): cmd(input): onos-push-keys xxx(xxx is device) password(input): login in password """ - print "Now Pushing Onos Keys:"+cmd + print "Now Pushing Onos Keys:" + cmd Pushkeys = handle - Pushkeys.sendline( cmd ) + Pushkeys.sendline(cmd) Result = 0 while Result != 2: - Result = Pushkeys.expect( ["(yes/no)", "assword:", "PEXPECT]#", \ - pexpect.EOF, pexpect.TIMEOUT]) - if ( Result == 0 ): - Pushkeys.sendline( "yes" ) - if ( Result == 1 ): - Pushkeys.sendline( password ) - if ( Result == 2 ): - self.loginfo.log( "ONOS Push keys Success!" ) + Result = Pushkeys.expect(["(yes/no)", "assword:", "PEXPECT]#", + pexpect.EOF, pexpect.TIMEOUT]) + if(Result == 0): + Pushkeys.sendline("yes") + if(Result == 1): + Pushkeys.sendline(password) + if(Result == 2): + self.loginfo.log("ONOS Push keys Success!") break - if ( Result == 3 ): - self.loginfo.log( "ONOS Push keys Error!" ) + if(Result == 3): + self.loginfo.log("ONOS Push keys Error!") break time.sleep(2) - Pushkeys.prompt( ) + Pushkeys.prompt() print "Done!" - def SetOnosEnvVar( self, handle, masterpass, agentpass): + def SetOnosEnvVar(self, handle, masterpass, agentpass): """ Setup onos pushkeys to all devices(3+2) parameters: @@ -133,34 +136,36 @@ class environment( connection ): print "try to connect " + str(host) result = self.CheckSshNoPasswd(host) if not result: - print "ssh lgin failed,try to copy master publickey to agent " + str(host) + print "ssh lgin failed,try to copy master publickey" + \ + "to agent " + str(host) self.CopyPublicKey(host) - self.OnosPushKeys( handle, "onos-push-keys " + self.OCT, masterpass) - self.OnosPushKeys( handle, "onos-push-keys " + self.OC1, agentpass) - self.OnosPushKeys( handle, "onos-push-keys " + self.OC2, agentpass) - self.OnosPushKeys( handle, "onos-push-keys " + self.OC3, agentpass) - self.OnosPushKeys( handle, "onos-push-keys " + self.OCN, agentpass) - self.OnosPushKeys( handle, "onos-push-keys " + self.OCN2, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OCT, masterpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OC1, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OC2, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OC3, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OCN, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OCN2, agentpass) - def CheckSshNoPasswd( self, host): + def CheckSshNoPasswd(self, host): """ Check master can connect agent with no password """ - login = pexpect.spawn( "ssh " + str(host)) + login = pexpect.spawn("ssh " + str(host)) index = 4 while index == 4: - index = login.expect(['(yes/no)','>|#|\$', \ - pexpect.EOF, pexpect.TIMEOUT] ) + index = login.expect(['(yes/no)', '>|#|\$', + pexpect.EOF, pexpect.TIMEOUT]) if index == 0: - login.sendline( "yes" ) + login.sendline("yes") index = 4 if index == 1: - self.loginfo.log("ssh connect to " + str(host) + " success,no need to copy ssh public key" ) + self.loginfo.log("ssh connect to " + str(host) + + " success,no need to copy ssh public key") return True login.interact() return False - def ChangeOnosName( self, user, password): + def ChangeOnosName(self, user, password): """ Change onos name in envDefault file Because some command depend on this @@ -173,12 +178,12 @@ class environment( connection ): line = open(filepath, 'r').readlines() lenall = len(line) - 1 for i in range(lenall): - if "ONOS_USER=" in line[i]: - line[i]=line[i].replace("sdn",user) - if "ONOS_GROUP" in line[i]: - line[i]=line[i].replace("sdn",user) - if "ONOS_PWD" in line[i]: - line[i]=line[i].replace("rocks",password) + if "ONOS_USER=" in line[i]: + line[i] = line[i].replace("sdn", user) + if "ONOS_GROUP" in line[i]: + line[i] = line[i].replace("sdn", user) + if "ONOS_PWD" in line[i]: + line[i] = line[i].replace("rocks", password) NewFile = open(filepath, 'w') NewFile.writelines(line) NewFile.close @@ -191,31 +196,28 @@ class environment( connection ): user: onos&compute node user password: onos&compute node password """ - print "Now Changing " + testcase + " name&password" + print "Now Changing " + testcase + " name&password" if self.masterusername == 'root': filepath = '/root/' - else : + else: filepath = '/home/' + self.masterusername + '/' - filepath = filepath +"OnosSystemTest/TestON/tests/" + testcase + "/" + \ - testcase + ".topo" - line = open(filepath,'r').readlines() - lenall = len(line)-1 - for i in range(lenall-2): - if ("localhost" in line[i]) or ("OCT" in line[i]): - line[i+1]=re.sub(">\w+",">"+user,line[i+1]) - line[i+2]=re.sub(">\w+",">"+password,line[i+2]) - if "OC1" in line [i] \ - or "OC2" in line [i] \ - or "OC3" in line [i] \ - or "OCN" in line [i] \ - or "OCN2" in line[i]: - line[i+1]=re.sub(">\w+",">root",line[i+1]) - line[i+2]=re.sub(">\w+",">root",line[i+2]) - NewFile = open(filepath,'w') + filepath = filepath + "OnosSystemTest/TestON/tests/" \ + + testcase + "/" + testcase + ".topo" + line = open(filepath, 'r').readlines() + lenall = len(line) - 1 + for i in range(lenall - 2): + if("localhost" in line[i]) or ("OCT" in line[i]): + line[i + 1] = re.sub(">\w+", ">" + user, line[i + 1]) + line[i + 2] = re.sub(">\w+", ">" + password, line[i + 2]) + if "OC1" in line[i] or "OC2" in line[i] or "OC3" in line[i] or \ + "OCN" in line[i] or "OCN2" in line[i]: + line[i + 1] = re.sub(">\w+", ">root", line[i + 1]) + line[i + 2] = re.sub(">\w+", ">root", line[i + 2]) + NewFile = open(filepath, 'w') NewFile.writelines(line) NewFile.close - def SSHlogin ( self, ipaddr, username, password ) : + def SSHlogin(self, ipaddr, username, password): """ SSH login provide a connection to destination. parameters: @@ -224,52 +226,56 @@ class environment( connection ): password: login password return: handle """ - login = pxssh.pxssh( ) - login.login ( ipaddr, username, password, original_prompt='[$#>]') - #send command ls -l - login.sendline ('ls -l') - #match prompt + login = pxssh.pxssh() + login.login(ipaddr, username, password, original_prompt='[$#>]') + # send command ls -l + login.sendline('ls -l') + # match prompt login.prompt() - print ("SSH login " + ipaddr + " success!") + print("SSH login " + ipaddr + " success!") return login - def SSHRelease( self, handle ): - #Release ssh + def SSHRelease(self, handle): + # Release ssh handle.logout() - def CopyOnostoTestbin( self ): + def CopyOnostoTestbin(self): sourcefile = self.cipath + '/dependencies/onos' destifile = self.home + '/onos/tools/test/bin/' - os.system( 'pwd' ) + os.system('pwd') runcommand = 'cp ' + sourcefile + ' ' + destifile - os.system( runcommand ) + os.system(runcommand) - def CopyPublicKey( self, host ): - output = os.popen( 'cat /root/.ssh/id_rsa.pub' ) + def CopyPublicKey(self, host): + output = os.popen('cat /root/.ssh/id_rsa.pub') publickey = output.read().strip('\n') - tmphandle = self.SSHlogin( self.installer_master, self.installer_master_username, self.installer_master_password ) - tmphandle.sendline("ssh "+ host + " -T \'echo " + str(publickey) + ">>/root/.ssh/authorized_keys\'" ) + tmphandle = self.SSHlogin(self.installer_master, + self.installer_master_username, + self.installer_master_password) + tmphandle.sendline("ssh " + host + " -T \'echo " + + str(publickey) + ">>/root/.ssh/authorized_keys\'") tmphandle.prompt() self.SSHRelease(tmphandle) print "Add OCT PublicKey to " + host + " success" - def OnosEnvSetup( self, handle ): + def OnosEnvSetup(self, handle): """ Onos Environment Setup function """ - self.Gensshkey( handle ) - self.home = self.GetEnvValue( handle, 'HOME' ) - self.AddKnownHost( handle, self.OC1, "karaf", "karaf" ) - self.AddKnownHost( handle, self.OC2, "karaf", "karaf" ) - self.AddKnownHost( handle, self.OC3, "karaf", "karaf" ) - self.DownLoadCode( handle, 'https://github.com/sunyulin/OnosSystemTest.git' ) - #self.DownLoadCode( handle, 'https://gerrit.onosproject.org/onos' ) + self.Gensshkey(handle) + self.home = self.GetEnvValue(handle, 'HOME') + self.AddKnownHost(handle, self.OC1, "karaf", "karaf") + self.AddKnownHost(handle, self.OC2, "karaf", "karaf") + self.AddKnownHost(handle, self.OC3, "karaf", "karaf") + self.DownLoadCode(handle, + 'https://github.com/sunyulin/OnosSystemTest.git') + # self.DownLoadCode(handle, 'https://gerrit.onosproject.org/onos') if self.masterusername == 'root': filepath = '/root/' - else : + else: filepath = '/home/' + self.masterusername + '/' - self.OnosRootPathChange( filepath ) + self.OnosRootPathChange(filepath) self.CopyOnostoTestbin() - self.ChangeOnosName(self.agentusername,self.agentpassword) - self.InstallDefaultSoftware( handle ) - self.SetOnosEnvVar(handle, self.masterpassword,self.agentpassword) + self.ChangeOnosName(self.agentusername, self.agentpassword) + self.InstallDefaultSoftware(handle) + self.SetOnosEnvVar(handle, self.masterpassword, self.agentpassword) diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py b/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py index 486ecfa71..a4591ac72 100644 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py +++ b/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py @@ -18,38 +18,39 @@ import yaml import re import datetime + class foundation: def __init__(self): - #currentpath = os.getcwd() - REPO_PATH = os.environ['repos_dir']+'/functest/' + # currentpath = os.getcwd() + REPO_PATH = os.environ['repos_dir'] + '/functest/' currentpath = REPO_PATH + 'testcases/Controllers/ONOS/Teston/CI' self.cipath = currentpath - self.logdir = os.path.join( currentpath, 'log' ) - self.workhome = currentpath[0:currentpath.rfind('testcases')-1] + self.logdir = os.path.join(currentpath, 'log') + self.workhome = currentpath[0: currentpath.rfind('testcases') - 1] self.Result_DB = '' - filename = time.strftime( '%Y-%m-%d-%H-%M-%S' ) + '.log' - self.logfilepath = os.path.join( self.logdir, filename ) + filename = time.strftime('%Y-%m-%d-%H-%M-%S') + '.log' + self.logfilepath = os.path.join(self.logdir, filename) self.starttime = datetime.datetime.now() - def log (self, loginfo): + def log(self, loginfo): """ Record log in log directory for deploying test environment parameters: loginfo(input): record info """ - logging.basicConfig( level=logging.INFO, - format = '%(asctime)s %(filename)s:%(message)s', - datefmt = '%d %b %Y %H:%M:%S', - filename = self.logfilepath, - filemode = 'w') - filelog = logging.FileHandler( self.logfilepath ) - logging.getLogger( 'Functest' ).addHandler( filelog ) + logging.basicConfig(level=logging.INFO, + format='%(asctime)s %(filename)s:%(message)s', + datefmt='%d %b %Y %H:%M:%S', + filename=self.logfilepath, + filemode='w') + filelog = logging.FileHandler(self.logfilepath) + logging.getLogger('Functest').addHandler(filelog) print loginfo logging.info(loginfo) - def getdefaultpara( self ): + def getdefaultpara(self): """ Get Default Parameters value """ @@ -58,40 +59,47 @@ class foundation: f.close() self.Result_DB = str(functest_yaml.get("results").get("test_db_url")) - self.masterusername = str(functest_yaml.get("ONOS").get("general").\ - get('onosbench_username')) - self.masterpassword = str(functest_yaml.get("ONOS").get("general").\ - get("onosbench_password")) - self.agentusername = str(functest_yaml.get("ONOS").get("general").\ + self.masterusername = str(functest_yaml.get("ONOS").get("general"). + get('onosbench_username')) + self.masterpassword = str(functest_yaml.get("ONOS").get("general"). + get("onosbench_password")) + self.agentusername = str(functest_yaml.get("ONOS").get("general"). get("onoscli_username")) - self.agentpassword = str(functest_yaml.get("ONOS").get("general").\ + self.agentpassword = str(functest_yaml.get("ONOS").get("general"). get("onoscli_password")) - self.runtimeout = functest_yaml.get("ONOS").get("general").get("runtimeout") + self.runtimeout = functest_yaml.get("ONOS").\ + get("general").get("runtimeout") self.OCT = str(functest_yaml.get("ONOS").get("environment").get("OCT")) self.OC1 = str(functest_yaml.get("ONOS").get("environment").get("OC1")) self.OC2 = str(functest_yaml.get("ONOS").get("environment").get("OC2")) self.OC3 = str(functest_yaml.get("ONOS").get("environment").get("OC3")) self.OCN = str(functest_yaml.get("ONOS").get("environment").get("OCN")) - self.OCN2 = str(functest_yaml.get("ONOS").get("environment").get("OCN2")) - self.installer_master = str(functest_yaml.get("ONOS").get("environment").get("installer_master")) - self.installer_master_username = str(functest_yaml.get("ONOS").get("environment").get("installer_master_username")) - self.installer_master_password = str(functest_yaml.get("ONOS").get("environment").get("installer_master_password")) + self.OCN2 = str(functest_yaml.get("ONOS"). + get("environment").get("OCN2")) + self.installer_master = str(functest_yaml.get("ONOS"). + get("environment").get("installer_master")) + self.installer_master_username = str(functest_yaml.get("ONOS"). + get("environment"). + get("installer_master_username")) + self.installer_master_password = str(functest_yaml.get("ONOS"). + get("environment"). + get("installer_master_password")) self.hosts = [self.OC1, self.OCN, self.OCN2] self.localhost = self.OCT - - def GetResult( self ): + + def GetResult(self): cmd = "cat " + self.logfilepath + " | grep Fail" Resultbuffer = os.popen(cmd).read() duration = datetime.datetime.now() - self.starttime time.sleep(2) - + if re.search("[1-9]+", Resultbuffer): self.log("Testcase Fails\n" + Resultbuffer) Result = "POK" else: self.log("Testcases Pass") Result = "OK" - payload={'timestart': str(self.starttime), - 'duration': str(duration), - 'status': Result} + payload = {'timestart': str(self.starttime), + 'duration': str(duration), 'status': Result} + return payload diff --git a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py index a47198b96..73b4b63ba 100644 --- a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py +++ b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py @@ -46,8 +46,10 @@ f.close() # onos parameters TEST_DB = functest_yaml.get("results").get("test_db_url") -ONOS_REPO_PATH = functest_yaml.get("general").get("directories").get("dir_repos") -ONOS_CONF_DIR = functest_yaml.get("general").get("directories").get("dir_functest_conf") +ONOS_REPO_PATH = functest_yaml.get("general").\ + get("directories").get("dir_repos") +ONOS_CONF_DIR = functest_yaml.get("general").\ + get("directories").get("dir_functest_conf") REPO_PATH = ONOS_REPO_PATH + '/functest/' if not os.path.exists(REPO_PATH): logger.error("Functest repository directory not found '%s'" % REPO_PATH) @@ -87,15 +89,15 @@ def GetResult(): LOGPATH = ONOSCI_PATH + "OnosSystemTest/TestON/logs" cmd = "grep -rnh " + "Fail" + " " + LOGPATH Resultbuffer = os.popen(cmd).read() - duration = datetime.datetime.now() - starttime + # duration = datetime.datetime.now() - starttime time.sleep(2) if re.search("\s+[1-9]+\s+", Resultbuffer): logger.debug("Testcase Fails\n" + Resultbuffer) - Result = "Failed" + # Result = "Failed" else: logger.debug("Testcases Success") - Result = "Success" + # Result = "Success" # payload={'timestart': str(starttime), # 'duration': str(duration), # 'status': Result} @@ -195,8 +197,9 @@ def main(): # i.e. FUNCvirNet & FUNCvirNetL3 status = "failed" try: - if result['FUNCvirNet']['result'] == "Success" and result['FUNCvirNetL3']['result'] == "Success": - status = "passed" + if (result['FUNCvirNet']['result'] == "Success" and + result['FUNCvirNetL3']['result'] == "Success"): + status = "passed" except: logger.error("Unable to set ONOS criteria") diff --git a/testcases/SECTests/OpenSCAP.py b/testcases/SECTests/OpenSCAP.py index 6f7510aca..0ab0ce3d7 100644 --- a/testcases/SECTests/OpenSCAP.py +++ b/testcases/SECTests/OpenSCAP.py @@ -56,7 +56,7 @@ parser.add_argument('--port', action='store', dest='port"', help='port', - required=True) + required=True) parser.add_argument('--dist', action='store', dest='dist', @@ -147,11 +147,12 @@ def run_scanner(): parserout.password, com) elif args['which'] == 'oval': - com = '{0} oval eval --results {1}/{2} --report {1}/{3} {4}'.format(oscap, - tmpdir.rstrip(), - parserout.results, - parserout.report, - parserout.secpolicy) + com = '{0} oval eval --results {1}/{2}' + \ + ' --report {1}/{3} {4}'.format(oscap, + tmpdir.rstrip(), + parserout.results, + parserout.report, + parserout.secpolicy) connect = connect.connectionManager(parserout.host, parserout.user, parserout.password, @@ -164,11 +165,14 @@ def run_scanner(): parserout.password, com) run_tool = connect.remotecmd() + print run_tool def post_tasks(): import connect - dl_folder = os.path.join(os.getcwd(), parserout.host + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) + dl_folder = os.path.join(os.getcwd(), parserout.host + + datetime.datetime.now(). + strftime('%Y-%m-%d_%H-%M-%S')) os.mkdir(dl_folder, 0755) reportfile = '{0}/{1}'.format(tmpdir.rstrip(), parserout.report) connect = connect.connectionManager(parserout.host, @@ -179,6 +183,7 @@ def post_tasks(): parserout.report, parserout.results) run_tool = connect.download_reports() + print run_tool def removepkg(): @@ -200,6 +205,7 @@ def cleandir(): parserout.password, com) deldir = connect.remotecmd() + print deldir if __name__ == '__main__': diff --git a/testcases/SECTests/connect.py b/testcases/SECTests/connect.py index fe8fbacfd..f766eabf9 100644 --- a/testcases/SECTests/connect.py +++ b/testcases/SECTests/connect.py @@ -10,7 +10,6 @@ # # 0.1: OpenSCAP paramiko connection functions -import os import paramiko __version__ = 0.1 @@ -68,8 +67,8 @@ class connectionManager: print "There was no output for this command" def run_tool(self): - dist = self.args[0] - report = self.args[1] + # dist = self.args[0] + # report = self.args[1] com = self.args[2] client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) diff --git a/testcases/SECTests/scripts/createfiles.py b/testcases/SECTests/scripts/createfiles.py index f99e25c0d..b828901a5 100644 --- a/testcases/SECTests/scripts/createfiles.py +++ b/testcases/SECTests/scripts/createfiles.py @@ -21,6 +21,6 @@ files = ['results.xml', 'report.html', 'syschar.xml'] directory_name = tempfile.mkdtemp() for i in files: - os.system("touch %s/%s" % (directory_name,i)) + os.system("touch %s/%s" % (directory_name, i)) print directory_name diff --git a/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py b/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py index 8ea08b49d..bff71f4e0 100644 --- a/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py +++ b/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py @@ -23,7 +23,6 @@ import argparse import logging import os -import re import sys import time import yaml @@ -34,7 +33,7 @@ from keystoneclient.v2_0 import client as keystoneclient from cinderclient import client as cinderclient parser = argparse.ArgumentParser() -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") args = parser.parse_args() @@ -48,14 +47,12 @@ if args.debug: else: ch.setLevel(logging.INFO) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +formatter = logging.Formatter('% (asctime)s - % (name)s - ' + '% (levelname)s - % (message)s') ch.setFormatter(formatter) logger.addHandler(ch) -REPO_PATH=os.environ['repos_dir']+'/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) +REPO_PATH = os.environ['repos_dir'] + '/functest/' sys.path.append(REPO_PATH + "testcases/") import openstack_utils @@ -80,9 +77,11 @@ default_floatingips = defaults_yaml.get('floatingips') default_users = defaults_yaml.get('users') default_tenants = defaults_yaml.get('tenants') + def separator(): logger.info("-------------------------------------------") + def remove_instances(nova_client): logger.info("Removing Nova instances...") instances = openstack_utils.get_instances(nova_client) @@ -93,12 +92,13 @@ def remove_instances(nova_client): for instance in instances: instance_name = getattr(instance, 'name') instance_id = getattr(instance, 'id') - logger.debug("Removing instance '%s', ID=%s ..." % (instance_name,instance_id)) + logger.debug("Removing instance '%s', ID=%s ..." + % (instance_name, instance_id)) if openstack_utils.delete_instance(nova_client, instance_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "instance %s..." % instance_id) + "instance %s..." % instance_id) timeout = 50 while timeout > 0: @@ -121,16 +121,18 @@ def remove_images(nova_client): for image in images: image_name = getattr(image, 'name') image_id = getattr(image, 'id') - logger.debug("'%s', ID=%s " %(image_name,image_id)) + logger.debug("'%s', ID=%s " % (image_name, image_id)) if image_id not in default_images: - logger.debug("Removing image '%s', ID=%s ..." % (image_name,image_id)) + logger.debug("Removing image '%s', ID=%s ..." + % (image_name, image_id)) if openstack_utils.delete_glance_image(nova_client, image_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing the" - "image %s..." % image_id) + "image %s..." % image_id) else: - logger.debug(" > this is a default image and will NOT be deleted.") + logger.debug(" > this is a default image and will " + "NOT be deleted.") def remove_volumes(cinder_client): @@ -143,7 +145,7 @@ def remove_volumes(cinder_client): for volume in volumes: volume_id = getattr(volume, 'id') volume_name = getattr(volume, 'display_name') - logger.debug("'%s', ID=%s " %(volume_name,volume_id)) + logger.debug("'%s', ID=%s " % (volume_name, volume_id)) if volume_id not in default_volumes: logger.debug("Removing cinder volume %s ..." % volume_id) if openstack_utils.delete_volume(cinder_client, volume_id): @@ -151,14 +153,16 @@ def remove_volumes(cinder_client): else: logger.debug("Trying forced removal...") if openstack_utils.delete_volume(cinder_client, - volume_id, - forced=True): + volume_id, + forced=True): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "volume %s..." % volume_id) + "volume %s..." % volume_id) else: - logger.debug(" > this is a default volume and will NOT be deleted.") + logger.debug(" > this is a default volume and will " + "NOT be deleted.") + def remove_floatingips(nova_client): logger.info("Removing floating IPs...") @@ -172,7 +176,7 @@ def remove_floatingips(nova_client): for fip in floatingips: fip_id = getattr(fip, 'id') fip_ip = getattr(fip, 'ip') - logger.debug("'%s', ID=%s " %(fip_ip,fip_id)) + logger.debug("'%s', ID=%s " % (fip_ip, fip_id)) if fip_id not in default_floatingips: logger.debug("Removing floating IP %s ..." % fip_id) if openstack_utils.delete_floating_ip(nova_client, fip_id): @@ -180,10 +184,10 @@ def remove_floatingips(nova_client): deleted += 1 else: logger.error("There has been a problem removing the " - "floating IP %s..." % fip_id) + "floating IP %s..." % fip_id) else: - logger.debug(" > this is a default floating IP and will NOT be deleted.") - + logger.debug(" > this is a default floating IP and will " + "NOT be deleted.") timeout = 50 while timeout > 0: @@ -200,45 +204,47 @@ def remove_networks(neutron_client): logger.info("Removing Neutron objects") network_ids = [] networks = openstack_utils.get_network_list(neutron_client) - if networks == None: + if networks is None: logger.debug("There are no networks in the deployment. ") else: logger.debug("Existing networks:") for network in networks: net_id = network['id'] net_name = network['name'] - logger.debug(" '%s', ID=%s " %(net_name,net_id)) + logger.debug(" '%s', ID=%s " % (net_name, net_id)) if net_id in default_networks: - logger.debug(" > this is a default network and will NOT be deleted.") - elif network['router:external'] == True: - logger.debug(" > this is an external network and will NOT be deleted.") + logger.debug(" > this is a default network and will " + "NOT be deleted.") + elif network['router:external'] is True: + logger.debug(" > this is an external network and will " + "NOT be deleted.") else: logger.debug(" > this network will be deleted.") network_ids.append(net_id) - #delete ports + # delete ports ports = openstack_utils.get_port_list(neutron_client) if ports is None: logger.debug("There are no ports in the deployment. ") else: remove_ports(neutron_client, ports, network_ids) - #remove routers + # remove routers routers = openstack_utils.get_router_list(neutron_client) if routers is None: logger.debug("There are no routers in the deployment. ") else: remove_routers(neutron_client, routers) - #remove networks - if network_ids != None: + # remove networks + if network_ids is not None: for net_id in network_ids: logger.debug("Removing network %s ..." % net_id) if openstack_utils.delete_neutron_net(neutron_client, net_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "network %s..." % net_id) + "network %s..." % net_id) def remove_ports(neutron_client, ports, network_ids): @@ -248,28 +254,33 @@ def remove_ports(neutron_client, ports, network_ids): try: subnet_id = port['fixed_ips'][0]['subnet_id'] except: - logger.info(" > WARNING: Port %s does not contain 'fixed_ips'" % port_id) + logger.info(" > WARNING: Port %s does not contain 'fixed_ips'" + % port_id) print port router_id = port['device_id'] if len(port['fixed_ips']) == 0 and router_id == '': logger.debug("Removing port %s ..." % port_id) - if openstack_utils.delete_neutron_port(neutron_client, port_id): + if (openstack_utils.delete_neutron_port(neutron_client, + port_id)): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "port %s ..." %port_id) + "port %s ..." % port_id) force_remove_port(neutron_client, port_id) elif port['device_owner'] == 'network:router_interface': logger.debug("Detaching port %s (subnet %s) from router %s ..." - % (port_id,subnet_id,router_id)) - if openstack_utils.remove_interface_router(neutron_client, - router_id, subnet_id): - time.sleep(5) # leave 5 seconds to detach before doing anything else + % (port_id, subnet_id, router_id)) + if openstack_utils.\ + remove_interface_router(neutron_client, + router_id, + subnet_id): + time.sleep(5) # leave 5 seconds to detach logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "interface %s from router %s..." %(subnet_id,router_id)) + "interface %s from router %s..." + % (subnet_id, router_id)) force_remove_port(neutron_client, port_id) else: force_remove_port(neutron_client, port_id) @@ -278,14 +289,14 @@ def remove_ports(neutron_client, ports, network_ids): def force_remove_port(neutron_client, port_id): logger.debug("Clearing device_owner for port %s ..." % port_id) openstack_utils.update_neutron_port(neutron_client, - port_id, - device_owner='clear') + port_id, + device_owner='clear') logger.debug("Removing port %s ..." % port_id) if openstack_utils.delete_neutron_port(neutron_client, port_id): logger.debug(" > Done!") else: - logger.error("There has been a problem removing " - "the port %s..." % port_id) + logger.error("There has been a problem removing the port %s..." + % port_id) def remove_routers(neutron_client, routers): @@ -293,22 +304,28 @@ def remove_routers(neutron_client, routers): router_id = router['id'] router_name = router['name'] if router_id not in default_routers: - logger.debug("Checking '%s' with ID=(%s) ..." % (router_name,router_id)) - if router['external_gateway_info'] != None: - logger.debug("Router has gateway to external network. Removing link...") - if openstack_utils.remove_gateway_router(neutron_client, router_id): + logger.debug("Checking '%s' with ID=(%s) ..." % (router_name, + router_id)) + if router['external_gateway_info'] is not None: + logger.debug("Router has gateway to external network." + "Removing link...") + if openstack_utils.remove_gateway_router(neutron_client, + router_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing " - "the gateway...") + "the gateway...") else: - logger.debug("Router is not connected to anything. Ready to remove...") - logger.debug("Removing router %s(%s) ..." % (router_name, router_id)) - if openstack_utils.delete_neutron_router(neutron_client, router_id): + logger.debug("Router is not connected to anything." + "Ready to remove...") + logger.debug("Removing router %s(%s) ..." + % (router_name, router_id)) + if openstack_utils.delete_neutron_router(neutron_client, + router_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "router '%s'(%s)..." % (router_name, router_id)) + "router '%s'(%s)..." % (router_name, router_id)) def remove_security_groups(neutron_client): @@ -321,14 +338,15 @@ def remove_security_groups(neutron_client): for secgroup in secgroups: secgroup_name = secgroup['name'] secgroup_id = secgroup['id'] - logger.debug("'%s', ID=%s " %(secgroup_name,secgroup_id)) + logger.debug("'%s', ID=%s " % (secgroup_name, secgroup_id)) if secgroup_id not in default_security_groups: logger.debug(" Removing '%s'..." % secgroup_name) - if openstack_utils.delete_security_group(neutron_client, secgroup_id): + if openstack_utils.delete_security_group(neutron_client, + secgroup_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "security group %s..." % secgroup_id) + "security group %s..." % secgroup_id) else: logger.debug(" > this is a default security group and will NOT " "be deleted.") @@ -337,51 +355,52 @@ def remove_security_groups(neutron_client): def remove_users(keystone_client): logger.info("Removing Users...") users = openstack_utils.get_users(keystone_client) - if users == None: + if users is None: logger.debug("There are no users in the deployment. ") return for user in users: user_name = getattr(user, 'name') user_id = getattr(user, 'id') - logger.debug("'%s', ID=%s " %(user_name,user_id)) + logger.debug("'%s', ID=%s " % (user_name, user_id)) if user_id not in default_users: logger.debug(" Removing '%s'..." % user_name) - if openstack_utils.delete_user(keystone_client,user_id): + if openstack_utils.delete_user(keystone_client, user_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "user '%s'(%s)..." % (user_name,user_id)) + "user '%s'(%s)..." % (user_name, user_id)) else: - logger.debug(" > this is a default user and will NOT be deleted.") + logger.debug(" > this is a default user and will " + "NOT be deleted.") def remove_tenants(keystone_client): logger.info("Removing Tenants...") tenants = openstack_utils.get_tenants(keystone_client) - if tenants == None: + if tenants is None: logger.debug("There are no tenants in the deployment. ") return for tenant in tenants: - tenant_name=getattr(tenant, 'name') + tenant_name = getattr(tenant, 'name') tenant_id = getattr(tenant, 'id') - logger.debug("'%s', ID=%s " %(tenant_name,tenant_id)) + logger.debug("'%s', ID=%s " % (tenant_name, tenant_id)) if tenant_id not in default_tenants: logger.debug(" Removing '%s'..." % tenant_name) - if openstack_utils.delete_tenant(keystone_client,tenant_id): + if openstack_utils.delete_tenant(keystone_client, tenant_id): logger.debug(" > Done!") else: logger.error("There has been a problem removing the " - "tenant '%s'(%s)..." % (tenant_name,tenant_id)) + "tenant '%s'(%s)..." % (tenant_name, tenant_id)) else: - logger.debug(" > this is a default tenant and will NOT be deleted.") - + logger.debug(" > this is a default tenant and will " + "NOT be deleted.") def main(): creds_nova = openstack_utils.get_credentials("nova") - nova_client = novaclient.Client('2',**creds_nova) + nova_client = novaclient.Client('2', **creds_nova) creds_neutron = openstack_utils.get_credentials("neutron") neutron_client = neutronclient.Client(**creds_neutron) @@ -390,15 +409,16 @@ def main(): keystone_client = keystoneclient.Client(**creds_keystone) creds_cinder = openstack_utils.get_credentials("cinder") - #cinder_client = cinderclient.Client(**creds_cinder) - cinder_client = cinderclient.Client('1',creds_cinder['username'], + # cinder_client = cinderclient.Client(**creds_cinder) + cinder_client = cinderclient.Client('1', creds_cinder['username'], creds_cinder['api_key'], creds_cinder['project_id'], creds_cinder['auth_url'], service_type="volume") if not openstack_utils.check_credentials(): - logger.error("Please source the openrc credentials and run the script again.") + logger.error("Please source the openrc credentials and run " + "the script again.") exit(-1) remove_instances(nova_client) diff --git a/testcases/VIM/OpenStack/CI/libraries/generate_defaults.py b/testcases/VIM/OpenStack/CI/libraries/generate_defaults.py index 731ed9e4e..e67e4deec 100644 --- a/testcases/VIM/OpenStack/CI/libraries/generate_defaults.py +++ b/testcases/VIM/OpenStack/CI/libraries/generate_defaults.py @@ -23,9 +23,7 @@ import argparse import logging import os -import re import sys -import time import yaml from novaclient import client as novaclient @@ -34,7 +32,7 @@ from keystoneclient.v2_0 import client as keystoneclient from cinderclient import client as cinderclient parser = argparse.ArgumentParser() -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") args = parser.parse_args() @@ -48,11 +46,12 @@ if args.debug: else: ch.setLevel(logging.INFO) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - ' + + '%(message)s') ch.setFormatter(formatter) logger.addHandler(ch) -REPO_PATH=os.environ['repos_dir']+'/functest/' +REPO_PATH = os.environ['repos_dir'] + '/functest/' if not os.path.exists(REPO_PATH): logger.error("Functest repository directory not found '%s'" % REPO_PATH) exit(-1) @@ -65,13 +64,15 @@ DEFAULTS_FILE = '/home/opnfv/functest/conf/os_defaults.yaml' def separator(): logger.info("-------------------------------------------") + def get_instances(nova_client): logger.debug("Getting instances...") dic_instances = {} instances = openstack_utils.get_instances(nova_client) if not (instances is None or len(instances) == 0): for instance in instances: - dic_instances.update({getattr(instance, 'id'):getattr(instance, 'name')}) + dic_instances.update({getattr(instance, 'id'): getattr(instance, + 'name')}) return {'instances': dic_instances} @@ -81,7 +82,7 @@ def get_images(nova_client): images = openstack_utils.get_images(nova_client) if not (images is None or len(images) == 0): for image in images: - dic_images.update({getattr(image, 'id'):getattr(image, 'name')}) + dic_images.update({getattr(image, 'id'): getattr(image, 'name')}) return {'images': dic_images} @@ -89,9 +90,9 @@ def get_volumes(cinder_client): logger.debug("Getting volumes...") dic_volumes = {} volumes = openstack_utils.get_volumes(cinder_client) - if volumes != None: + if volumes is not None: for volume in volumes: - dic_volumes.update({volume.id:volume.display_name}) + dic_volumes.update({volume.id: volume.display_name}) return {'volumes': dic_volumes} @@ -99,9 +100,9 @@ def get_networks(neutron_client): logger.debug("Getting networks") dic_networks = {} networks = openstack_utils.get_network_list(neutron_client) - if networks != None: + if networks is not None: for network in networks: - dic_networks.update({network['id']:network['name']}) + dic_networks.update({network['id']: network['name']}) return {'networks': dic_networks} @@ -109,9 +110,9 @@ def get_routers(neutron_client): logger.debug("Getting routers") dic_routers = {} routers = openstack_utils.get_router_list(neutron_client) - if routers != None: + if routers is not None: for router in routers: - dic_routers.update({router['id']:router['name']}) + dic_routers.update({router['id']: router['name']}) return {'routers': dic_routers} @@ -121,7 +122,7 @@ def get_security_groups(neutron_client): secgroups = openstack_utils.get_security_groups(neutron_client) if not (secgroups is None or len(secgroups) == 0): for secgroup in secgroups: - dic_secgroups.update({secgroup['id']:secgroup['name']}) + dic_secgroups.update({secgroup['id']: secgroup['name']}) return {'secgroups': dic_secgroups} @@ -131,7 +132,7 @@ def get_floatinips(nova_client): floatingips = openstack_utils.get_floating_ips(nova_client) if not (floatingips is None or len(floatingips) == 0): for floatingip in floatingips: - dic_floatingips.update({floatingip.id:floatingip.ip}) + dic_floatingips.update({floatingip.id: floatingip.ip}) return {'floatingips': dic_floatingips} @@ -141,7 +142,7 @@ def get_users(keystone_client): users = openstack_utils.get_users(keystone_client) if not (users is None or len(users) == 0): for user in users: - dic_users.update({getattr(user, 'id'):getattr(user, 'name')}) + dic_users.update({getattr(user, 'id'): getattr(user, 'name')}) return {'users': dic_users} @@ -151,13 +152,14 @@ def get_tenants(keystone_client): tenants = openstack_utils.get_tenants(keystone_client) if not (tenants is None or len(tenants) == 0): for tenant in tenants: - dic_tenants.update({getattr(tenant, 'id'):getattr(tenant, 'name')}) + dic_tenants.update({getattr(tenant, 'id'): + getattr(tenant, 'name')}) return {'tenants': dic_tenants} def main(): creds_nova = openstack_utils.get_credentials("nova") - nova_client = novaclient.Client('2',**creds_nova) + nova_client = novaclient.Client('2', **creds_nova) creds_neutron = openstack_utils.get_credentials("neutron") neutron_client = neutronclient.Client(**creds_neutron) @@ -166,14 +168,15 @@ def main(): keystone_client = keystoneclient.Client(**creds_keystone) creds_cinder = openstack_utils.get_credentials("cinder") - cinder_client = cinderclient.Client('1',creds_cinder['username'], + cinder_client = cinderclient.Client('1', creds_cinder['username'], creds_cinder['api_key'], creds_cinder['project_id'], creds_cinder['auth_url'], service_type="volume") if not openstack_utils.check_credentials(): - logger.error("Please source the openrc credentials and run the script again.") + logger.error("Please source the openrc credentials and run the" + + "script again.") exit(-1) defaults = {} @@ -192,7 +195,7 @@ def main(): yaml_file.seek(0) logger.info("Openstack Defaults found in the deployment:") print yaml_file.read() - logger.debug("NOTE: These objects will NOT be deleted after "+\ + logger.debug("NOTE: These objects will NOT be deleted after " + "running the tests.") exit(0) diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py index 0ef7b4ed3..3ac3d4490 100755 --- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py +++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py @@ -45,7 +45,7 @@ parser.add_argument("test_name", "performs all possible test scenarios" .format(d=tests)) -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") parser.add_argument("-r", "--report", help="Create json result file", action="store_true") @@ -83,7 +83,7 @@ formatter = logging.Formatter("%(asctime)s - %(name)s - " ch.setFormatter(formatter) logger.addHandler(ch) -REPO_PATH = os.environ['repos_dir']+'/functest/' +REPO_PATH = os.environ['repos_dir'] + '/functest/' if not os.path.exists(REPO_PATH): logger.error("Functest repository directory not found '%s'" % REPO_PATH) exit(-1) @@ -95,12 +95,12 @@ with open("/home/opnfv/functest/conf/config_functest.yaml") as f: functest_yaml = yaml.safe_load(f) f.close() -HOME = os.environ['HOME']+"/" +HOME = os.environ['HOME'] + "/" SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \ get("directories").get("dir_rally_scn") TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates" SUPPORT_DIR = SCENARIOS_DIR + "scenario/support" -###todo: + FLAVOR_NAME = "m1.tiny" USERS_AMOUNT = 2 TENANTS_AMOUNT = 3 @@ -215,7 +215,7 @@ def build_task_args(test_file_name): task_args['floating_network'] = '' net_id = openstack_utils.get_network_id(client_dict['neutron'], - PRIVATE_NETWORK) + PRIVATE_NETWORK) task_args['netid'] = str(net_id) task_args['live_migration'] = live_migration_supported() @@ -270,7 +270,7 @@ def get_output(proc, test_name): except ValueError: logger.info('Duration error: %s, %s' % (duration, line)) - overall_duration="{:10.2f}".format(overall_duration) + overall_duration = "{:10.2f}".format(overall_duration) if nb_totals == 0: success_avg = 0 else: @@ -378,8 +378,9 @@ def main(): neutron_client = neutronclient.Client(**creds_neutron) creds_keystone = openstack_utils.get_credentials("keystone") keystone_client = keystoneclient.Client(**creds_keystone) - glance_endpoint = keystone_client.service_catalog.url_for(service_type='image', - endpoint_type='publicURL') + glance_endpoint = keystone_client.\ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') glance_client = glanceclient.Client(1, glance_endpoint, token=keystone_client.auth_token) creds_cinder = openstack_utils.get_credentials("cinder") @@ -392,15 +393,15 @@ def main(): client_dict['neutron'] = neutron_client volume_types = openstack_utils.list_volume_types(cinder_client, - private=False) + private=False) if not volume_types: - volume_type = openstack_utils.create_volume_type(cinder_client, - CINDER_VOLUME_TYPE_NAME) + volume_type = openstack_utils.\ + create_volume_type(cinder_client, CINDER_VOLUME_TYPE_NAME) if not volume_type: logger.error("Failed to create volume type...") exit(-1) else: - logger.debug("Volume type '%s' created succesfully..." \ + logger.debug("Volume type '%s' created succesfully..." % CINDER_VOLUME_TYPE_NAME) else: logger.debug("Using existing volume type(s)...") @@ -412,16 +413,16 @@ def main(): logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH)) image_id = openstack_utils.create_glance_image(glance_client, - GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH) + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) if not image_id: logger.error("Failed to create the Glance image...") exit(-1) else: - logger.debug("Image '%s' with ID '%s' created succesfully ." \ + logger.debug("Image '%s' with ID '%s' created succesfully ." % (GLANCE_IMAGE_NAME, image_id)) else: - logger.debug("Using existing image '%s' with ID '%s'..." \ + logger.debug("Using existing image '%s' with ID '%s'..." % (GLANCE_IMAGE_NAME, image_id)) image_exists = True @@ -435,14 +436,19 @@ def main(): run_task(args.test_name) report = "\n"\ - " \n"\ + " "\ + "\n"\ " Rally Summary Report\n"\ - "+===================+============+===============+===========+\n"\ - "| Module | Duration | nb. Test Run | Success |\n"\ - "+===================+============+===============+===========+\n" + "\n"\ + "+===================+============+===============+===========+"\ + "\n"\ + "| Module | Duration | nb. Test Run | Success |"\ + "\n"\ + "+===================+============+===============+===========+"\ + "\n" payload = [] - #for each scenario we draw a row for the table + # for each scenario we draw a row for the table total_duration = 0.0 total_nb_tests = 0 total_success = 0.0 @@ -454,11 +460,12 @@ def main(): duration = "{0:<10}".format(duration) nb_tests = "{0:<13}".format(s['nb_tests']) total_nb_tests += int(s['nb_tests']) - success = "{0:<10}".format(str(s['success'])+'%') + success = "{0:<10}".format(str(s['success']) + '%') total_success += float(s['success']) - report += ""\ - "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\ - "+-------------------+------------+---------------+-----------+\n" + report += "" + \ + "| " + name + " | " + duration + " | " + \ + nb_tests + " | " + success + "|\n" + \ + "+-------------------+------------+---------------+-----------+\n" payload.append({'module': name, 'details': {'duration': s['overall_duration'], 'nb tests': s['nb_tests'], @@ -468,21 +475,24 @@ def main(): total_duration_str2 = "{0:<10}".format(total_duration_str) total_nb_tests_str = "{0:<13}".format(total_nb_tests) total_success = "{:0.2f}".format(total_success / len(SUMMARY)) - total_success_str = "{0:<10}".format(str(total_success)+'%') - report += "+===================+============+===============+===========+\n" + total_success_str = "{0:<10}".format(str(total_success) + '%') + report += "+===================+============+===============+===========+" + report += "\n" report += "| TOTAL: | " + total_duration_str2 + " | " + \ - total_nb_tests_str + " | " + total_success_str + "|\n" - report += "+===================+============+===============+===========+\n" + total_nb_tests_str + " | " + total_success_str + "|\n" + report += "+===================+============+===============+===========+" + report += "\n" - logger.info("\n"+report) + logger.info("\n" + report) payload.append({'summary': {'duration': total_duration, - 'nb tests': total_nb_tests, - 'nb success': total_success}}) + 'nb tests': total_nb_tests, + 'nb success': total_success}}) # Generate json results for DB - #json_results = {"timestart": time_start, "duration": total_duration, - # "tests": int(total_nb_tests), "success": int(total_success)} - #logger.info("Results: "+str(json_results)) + # json_results = {"timestart": time_start, "duration": total_duration, + # "tests": int(total_nb_tests), + # "success": int(total_success)} + # logger.info("Results: "+str(json_results)) # Evaluation of the success criteria status = "failed" @@ -498,13 +508,13 @@ def main(): exit(0) if not image_exists: - logger.debug("Deleting image '%s' with ID '%s'..." \ + logger.debug("Deleting image '%s' with ID '%s'..." % (GLANCE_IMAGE_NAME, image_id)) if not openstack_utils.delete_glance_image(nova_client, image_id): logger.error("Error deleting the glance image") if not volume_types: - logger.debug("Deleting volume type '%s'..." \ + logger.debug("Deleting volume type '%s'..." % CINDER_VOLUME_TYPE_NAME) if not openstack_utils.delete_volume_type(cinder_client, volume_type): logger.error("Error in deleting volume type...") diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py index 4ff311767..951210ce9 100644 --- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py +++ b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py @@ -61,14 +61,12 @@ if args.debug: else: ch.setLevel(logging.INFO) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +formatter = logging.Formatter('%(asctime)s - %(name)s - ' + '%(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) -REPO_PATH = os.environ['repos_dir']+'/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) +REPO_PATH = os.environ['repos_dir'] + '/functest/' sys.path.append(REPO_PATH + "testcases/") import functest_utils import openstack_utils @@ -79,16 +77,25 @@ f.close() TEST_DB = functest_yaml.get("results").get("test_db_url") MODE = "smoke" -TENANT_NAME = functest_yaml.get("tempest").get("identity").get("tenant_name") -TENANT_DESCRIPTION = functest_yaml.get("tempest").get("identity").get("tenant_description") -USER_NAME = functest_yaml.get("tempest").get("identity").get("user_name") -USER_PASSWORD = functest_yaml.get("tempest").get("identity").get("user_password") -SSH_USER_REGEX = functest_yaml.get("tempest").get("input-scenario").get("ssh_user_regex") -DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name") -RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get("dir_rally_inst") -RESULTS_DIR = functest_yaml.get("general").get("directories").get("dir_results") +TENANT_NAME = functest_yaml.get("tempest").\ + get("identity").get("tenant_name") +TENANT_DESCRIPTION = functest_yaml.get("tempest").\ + get("identity").get("tenant_description") +USER_NAME = functest_yaml.get("tempest").\ + get("identity").get("user_name") +USER_PASSWORD = functest_yaml.get("tempest").\ + get("identity").get("user_password") +SSH_USER_REGEX = functest_yaml.get("tempest").\ + get("input-scenario").get("ssh_user_regex") +DEPLOYMENT_MAME = functest_yaml.get("rally").\ + get("deployment_name") +RALLY_INSTALLATION_DIR = functest_yaml.get("general").\ + get("directories").get("dir_rally_inst") +RESULTS_DIR = functest_yaml.get("general").\ + get("directories").get("dir_results") TEMPEST_RESULTS_DIR = RESULTS_DIR + '/tempest' -TEST_LIST_DIR = functest_yaml.get("general").get("directories").get("dir_tempest_cases") +TEST_LIST_DIR = functest_yaml.get("general").\ + get("directories").get("dir_tempest_cases") TEMPEST_LIST_FILE = REPO_PATH + TEST_LIST_DIR + 'test_list.txt' TEMPEST_DEFCORE = REPO_PATH + TEST_LIST_DIR + 'defcore_req.txt' @@ -111,10 +118,8 @@ def get_info(file_result): if (len(test_failed) < 1): test_failed = re.findall(regexp, line) - retval = p.wait() - - logger.debug("test_run:"+test_run) - logger.debug("duration:"+duration) + logger.debug("test_run:" + test_run) + logger.debug("duration:" + duration) def push_results_to_db(case, payload, criteria): @@ -183,7 +188,7 @@ def configure_tempest(mode): functest_utils.execute_command(cmd, logger) logger.debug("Resolving deployment UUID and directory...") - cmd = "rally deployment list | awk '/"+DEPLOYMENT_MAME+"/ {print $2}'" + cmd = "rally deployment list | awk '/" + DEPLOYMENT_MAME + "/ {print $2}'" p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) @@ -191,12 +196,14 @@ def configure_tempest(mode): if deployment_uuid == "": logger.debug(" Rally deployment NOT found") return False - deployment_dir = RALLY_INSTALLATION_DIR + "/tempest/for-deployment-" + deployment_uuid + deployment_dir = RALLY_INSTALLATION_DIR + "/tempest/for-deployment-" + \ + deployment_uuid logger.debug("Finding tempest.conf file...") tempest_conf_file = deployment_dir + "/tempest.conf" if not os.path.isfile(tempest_conf_file): - logger.error(" Tempest configuration file %s NOT found." % tempest_conf_file) + logger.error(" Tempest configuration file %s NOT found." + % tempest_conf_file) return False logger.debug("Generating test case list...") @@ -217,8 +224,9 @@ def configure_tempest(mode): logger.error("No shared private networks found.") else: private_net_name = private_net['name'] - cmd = "crudini --set "+tempest_conf_file+" compute fixed_network_name " \ - + private_net_name + cmd = "crudini --set " + tempest_conf_file + \ + " compute fixed_network_name " + \ + private_net_name functest_utils.execute_command(cmd, logger) logger.debug(" Updating non-admin credentials...") @@ -231,7 +239,8 @@ def configure_tempest(mode): cmd = "crudini --set " + tempest_conf_file + " identity password " \ + USER_PASSWORD functest_utils.execute_command(cmd, logger) - cmd = "sed -i 's/.*ssh_user_regex.*/ssh_user_regex = " + SSH_USER_REGEX + "/' " + tempest_conf_file + cmd = "sed -i 's/.*ssh_user_regex.*/ssh_user_regex = " + SSH_USER_REGEX + \ + "/' " + tempest_conf_file functest_utils.execute_command(cmd, logger) # Copy tempest.conf to /home/opnfv/functest/results/tempest/ @@ -260,9 +269,9 @@ def run_tempest(OPTION): os.getenv('NODE_NAME', 'Unknown'), time.strftime("%a %b %d %H:%M:%S %Z %Y")) - f_stdout = open(TEMPEST_RESULTS_DIR+"/tempest.log", 'w+') - f_stderr = open(TEMPEST_RESULTS_DIR+"/tempest-error.log", 'w+') - f_env = open(TEMPEST_RESULTS_DIR+"/environment.log", 'w+') + f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+') + f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+') + f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+') f_env.write(header) subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr) @@ -294,12 +303,12 @@ def run_tempest(OPTION): # Generate json results for DB json_results = {"timestart": time_start, "duration": dur_sec_int, "tests": int(num_tests), "failures": int(num_failures)} - logger.info("Results: "+str(json_results)) + logger.info("Results: " + str(json_results)) status = "failed" try: diff = (int(num_tests) - int(num_failures)) - success_rate = 100*diff/int(num_tests) + success_rate = 100 * diff / int(num_tests) except: success_rate = 0 @@ -317,7 +326,8 @@ def main(): global MODE if not (args.mode in modes): - logger.error("Tempest mode not valid. Possible values are:\n" + str(modes)) + logger.error("Tempest mode not valid. " + "Possible values are:\n" + str(modes)) exit(-1) if args.mode == 'custom' or args.mode == 'smoke' or args.mode == 'full': diff --git a/testcases/config_functest.py b/testcases/config_functest.py index 211f927c0..6db2e2b1e 100755 --- a/testcases/config_functest.py +++ b/testcases/config_functest.py @@ -7,20 +7,23 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 # - -import re, json, os, urllib2, argparse, logging, shutil, subprocess, yaml, sys, getpass +import argparse +import logging +import os +import shutil +import subprocess +import sys +import yaml import functest_utils import openstack_utils -from git import Repo -from os import stat -from pwd import getpwuid from neutronclient.v2_0 import client as neutronclient actions = ['start', 'check', 'clean'] parser = argparse.ArgumentParser() -parser.add_argument("action", help="Possible actions are: '{d[0]}|{d[1]}|{d[2]}' ".format(d=actions)) -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") -parser.add_argument("-f", "--force", help="Force", action="store_true") +parser.add_argument("action", help="Possible actions are: \ + '{d[0]}|{d[1]}|{d[2]}' ".format(d=actions)) +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-f", "--force", help="Force", action="store_true") args = parser.parse_args() @@ -34,14 +37,16 @@ if args.debug: else: ch.setLevel(logging.INFO) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +formatter = logging.Formatter('%(asctime)s - %(name)s - ' + '%(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) -REPOS_DIR=os.environ['repos_dir'] -FUNCTEST_REPO=REPOS_DIR+'/functest/' +REPOS_DIR = os.environ['repos_dir'] +FUNCTEST_REPO = REPOS_DIR + '/functest/' if not os.path.exists(FUNCTEST_REPO): - logger.error("Functest repository directory not found '%s'" % FUNCTEST_REPO) + logger.error("Functest repository directory not found '%s'" + % FUNCTEST_REPO) exit(-1) sys.path.append(FUNCTEST_REPO + "testcases/") @@ -52,20 +57,29 @@ f.close() """ global variables """ # Directories -RALLY_DIR = FUNCTEST_REPO + functest_yaml.get("general").get("directories").get("dir_rally") -RALLY_REPO_DIR = functest_yaml.get("general").get("directories").get("dir_repo_rally") -RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get("dir_rally_inst") -RALLY_RESULT_DIR = functest_yaml.get("general").get("directories").get("dir_rally_res") -TEMPEST_REPO_DIR = functest_yaml.get("general").get("directories").get("dir_repo_tempest") -VPING_DIR = FUNCTEST_REPO + functest_yaml.get("general").get("directories").get("dir_vping") -ODL_DIR = FUNCTEST_REPO + functest_yaml.get("general").get("directories").get("dir_odl") -DATA_DIR = functest_yaml.get("general").get("directories").get("dir_functest_data") +RALLY_DIR = FUNCTEST_REPO + functest_yaml.get("general").\ + get("directories").get("dir_rally") +RALLY_REPO_DIR = functest_yaml.get("general").\ + get("directories").get("dir_repo_rally") +RALLY_INSTALLATION_DIR = functest_yaml.get("general").\ + get("directories").get("dir_rally_inst") +RALLY_RESULT_DIR = functest_yaml.get("general").\ + get("directories").get("dir_rally_res") +TEMPEST_REPO_DIR = functest_yaml.get("general").\ + get("directories").get("dir_repo_tempest") +VPING_DIR = FUNCTEST_REPO + functest_yaml.\ + get("general").get("directories").get("dir_vping") +ODL_DIR = FUNCTEST_REPO + functest_yaml.\ + get("general").get("directories").get("dir_odl") +DATA_DIR = functest_yaml.get("general").\ + get("directories").get("dir_functest_data") # Tempest/Rally configuration details DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name") -#Image (cirros) -IMAGE_FILE_NAME = functest_yaml.get("general").get("openstack").get("image_file_name") +# Image (cirros) +IMAGE_FILE_NAME = functest_yaml.get("general").\ + get("openstack").get("image_file_name") IMAGE_PATH = DATA_DIR + "/" + IMAGE_FILE_NAME # NEUTRON Private Network parameters @@ -81,12 +95,14 @@ NEUTRON_ROUTER_NAME = functest_yaml.get("general"). \ creds_neutron = openstack_utils.get_credentials("neutron") neutron_client = neutronclient.Client(**creds_neutron) + def action_start(): """ Start the functest environment installation """ if not functest_utils.check_internet_connectivity(): - logger.info("No Internet connectivity. This may affect some test case suites.") + logger.info("No Internet connectivity. " + "This may affect some test case suites.") if action_check(): logger.info("Functest environment already installed. Nothing to do.") @@ -102,12 +118,13 @@ def action_start(): if private_net is None: # If there is no private network in the deployment we create one if not create_private_neutron_net(neutron_client): - logger.error("There has been a problem while creating the functest network.") + logger.error("There has been a problem while " + "creating the functest network.") action_clean() exit(-1) else: - logger.info("Private network '%s' already existing in the deployment." - % private_net['name']) + logger.info("Private network '%s' already existing in " + "the deployment." % private_net['name']) logger.info("Installing Rally...") if not install_rally(): @@ -134,7 +151,6 @@ def action_check(): Check if the functest environment is properly installed """ errors_all = False - errors = False logger.info("Checking current functest configuration...") logger.debug("Checking script directories...") @@ -143,7 +159,6 @@ def action_check(): for dir in dirs: if not os.path.exists(dir): logger.debug(" %s NOT found" % dir) - errors = True errors_all = True else: logger.debug(" %s found" % dir) @@ -154,72 +169,72 @@ def action_check(): errors_all = True logger.debug("Checking Image...") - errors = False if not os.path.isfile(IMAGE_PATH): logger.debug(" Image file '%s' NOT found." % IMAGE_PATH) - errors = True errors_all = True else: logger.debug(" Image file found in %s" % IMAGE_PATH) - - #TODO: check OLD environment setup + # TODO: check OLD environment setup return not errors_all - def action_clean(): """ Clean the existing functest environment """ logger.info("Removing current functest environment...") if os.path.exists(RALLY_INSTALLATION_DIR): - logger.debug("Removing Rally installation directory %s" % RALLY_INSTALLATION_DIR) - shutil.rmtree(RALLY_INSTALLATION_DIR,ignore_errors=True) + logger.debug("Removing Rally installation directory %s" + % RALLY_INSTALLATION_DIR) + shutil.rmtree(RALLY_INSTALLATION_DIR, ignore_errors=True) if os.path.exists(RALLY_RESULT_DIR): logger.debug("Removing Result directory") - shutil.rmtree(RALLY_RESULT_DIR,ignore_errors=True) + shutil.rmtree(RALLY_RESULT_DIR, ignore_errors=True) logger.info("Functest environment clean!") - def install_rally(): if check_rally(): logger.info("Rally is already installed.") else: logger.debug("Creating Rally environment...") - cmd = "rally deployment create --fromenv --name="+DEPLOYMENT_MAME - functest_utils.execute_command(cmd,logger) + cmd = "rally deployment create --fromenv --name=" + DEPLOYMENT_MAME + functest_utils.execute_command(cmd, logger) logger.debug("Installing tempest from existing repo...") - cmd = "rally verify install --source " + TEMPEST_REPO_DIR + " --system-wide" - functest_utils.execute_command(cmd,logger) + cmd = "rally verify install --source " + TEMPEST_REPO_DIR + \ + " --system-wide" + functest_utils.execute_command(cmd, logger) cmd = "rally deployment check" - functest_utils.execute_command(cmd,logger) - #TODO: check that everything is 'Available' and warn if not + functest_utils.execute_command(cmd, logger) + # TODO: check that everything is 'Available' and warn if not cmd = "rally show images" - functest_utils.execute_command(cmd,logger) + functest_utils.execute_command(cmd, logger) cmd = "rally show flavors" - functest_utils.execute_command(cmd,logger) + functest_utils.execute_command(cmd, logger) return True + def check_rally(): """ Check if Rally is installed and properly configured """ if os.path.exists(RALLY_INSTALLATION_DIR): - logger.debug(" Rally installation directory found in %s" % RALLY_INSTALLATION_DIR) - FNULL = open(os.devnull, 'w'); - cmd="rally deployment list | grep "+DEPLOYMENT_MAME + logger.debug(" Rally installation directory found in %s" + % RALLY_INSTALLATION_DIR) + FNULL = open(os.devnull, 'w') + cmd = "rally deployment list | grep " + DEPLOYMENT_MAME logger.debug(' Executing command : {}'.format(cmd)) - p=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=FNULL); - #if the command does not exist or there is no deployment + p = subprocess.Popen(cmd, shell=True, + stdout=subprocess.PIPE, stderr=FNULL) + # if the command does not exist or there is no deployment line = p.stdout.readline() if line == "": logger.debug(" Rally deployment NOT found") @@ -240,7 +255,8 @@ def create_private_neutron_net(neutron): return False logger.debug("Network '%s' created successfully." % network_id) - logger.info("Updating network '%s' with shared=True..." % NEUTRON_PRIVATE_NET_NAME) + logger.info("Updating network '%s' with shared=True..." + % NEUTRON_PRIVATE_NET_NAME) if openstack_utils.update_neutron_net(neutron, network_id, shared=True): logger.debug("Network '%s' updated successfully." % network_id) else: @@ -265,15 +281,14 @@ def create_private_neutron_net(neutron): logger.debug("Router '%s' created successfully." % router_id) logger.info("Adding router to subnet...") - result = openstack_utils.add_interface_router(neutron, router_id, subnet_id) + result = openstack_utils.add_interface_router(neutron, + router_id, + subnet_id) if not result: return False logger.debug("Interface added successfully.") - network_dic = {'net_id': network_id, - 'subnet_id': subnet_id, - 'router_id': router_id} return True @@ -282,13 +297,12 @@ def main(): logger.error('argument not valid') exit(-1) - if not openstack_utils.check_credentials(): - logger.error("Please source the openrc credentials and run the script again.") - #TODO: source the credentials in this script + logger.error("Please source the openrc credentials and " + "run the script again.") + # TODO: source the credentials in this script exit(-1) - if args.action == "start": action_start() @@ -299,9 +313,9 @@ def main(): logger.info("Functest environment not found or faulty") if args.action == "clean": - if args.force : + if args.force: action_clean() - else : + else: while True: print("Are you sure? [y|n]") answer = raw_input("") @@ -317,4 +331,3 @@ def main(): if __name__ == '__main__': main() - diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml index d9432316c..9884523a1 100644 --- a/testcases/config_functest.yaml +++ b/testcases/config_functest.yaml @@ -34,7 +34,7 @@ general: image_file_name: cirros-0.3.4-x86_64-disk.img image_disk_format: qcow2 - #Private network for functest. Will be created by config_functest.py + # Private network for functest. Will be created by config_functest.py neutron_private_net_name: functest-net neutron_private_subnet_name: functest-subnet neutron_private_subnet_cidr: 192.168.120.0/24 @@ -45,7 +45,7 @@ general: vping: ping_timeout: 200 - vm_flavor: m1.small #adapt to your environment + vm_flavor: m1.small # adapt to your environment vm_name_1: opnfv-vping-1 vm_name_2: opnfv-vping-2 image_name: functest-vping diff --git a/testcases/features/doctor.py b/testcases/features/doctor.py index a225d5a50..0c1f713e7 100644 --- a/testcases/features/doctor.py +++ b/testcases/features/doctor.py @@ -35,7 +35,8 @@ logger = logging.getLogger('doctor') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - ' + + '%(message)s') ch.setFormatter(formatter) logger.addHandler(ch) diff --git a/testcases/features/promise.py b/testcases/features/promise.py index 832083d04..d501d6764 100644 --- a/testcases/features/promise.py +++ b/testcases/features/promise.py @@ -80,11 +80,6 @@ ch.setFormatter(formatter) logger.addHandler(ch) -def create_image(glance_client, name): - - return image_id - - def main(): ks_creds = openstack_utils.get_credentials("keystone") nv_creds = openstack_utils.get_credentials("nova") @@ -117,7 +112,8 @@ def main(): exit(-1) logger.info("Adding role '%s' to tenant '%s'..." % (role_id, TENANT_NAME)) - if not openstack_utils.add_role_user(keystone, user_id, role_id, tenant_id): + if not openstack_utils.add_role_user(keystone, user_id, + role_id, tenant_id): logger.error("Error : Failed to add %s on tenant %s" % (ks_creds['username'], TENANT_NAME)) exit(-1) @@ -147,16 +143,17 @@ def main(): "project_id": TENANT_NAME, }) - glance_endpoint = keystone.service_catalog.url_for(service_type='image', - endpoint_type='publicURL') + glance_endpoint = keystone.\ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token) nova = nvclient.Client("2", **nv_creds) logger.info("Creating image '%s' from '%s'..." % (IMAGE_NAME, GLANCE_IMAGE_PATH)) image_id = openstack_utils.create_glance_image(glance, - IMAGE_NAME, - GLANCE_IMAGE_PATH) + IMAGE_NAME, + GLANCE_IMAGE_PATH) if not image_id: logger.error("Failed to create the Glance image...") exit(-1) @@ -166,23 +163,24 @@ def main(): if flavor_id == '': logger.info("Creating flavor '%s'..." % FLAVOR_NAME) flavor_id = openstack_utils.create_flavor(nova, - FLAVOR_NAME, - FLAVOR_RAM, - FLAVOR_DISK, - FLAVOR_VCPUS) + FLAVOR_NAME, + FLAVOR_RAM, + FLAVOR_DISK, + FLAVOR_VCPUS) if not flavor_id: logger.error("Failed to create the Flavor...") exit(-1) logger.debug("Flavor '%s' with ID '%s' created successfully." % (FLAVOR_NAME, flavor_id)) else: - logger.debug("Using existing flavor '%s' with ID '%s'..." % (FLAVOR_NAME, - flavor_id)) + logger.debug("Using existing flavor '%s' with ID '%s'..." + % (FLAVOR_NAME, flavor_id)) neutron = ntclient.Client(**nt_creds) private_net = openstack_utils.get_private_net(neutron) if private_net is None: - logger.error("There is no private network in the deployment. Aborting...") + logger.error("There is no private network in the deployment." + "Aborting...") exit(-1) logger.debug("Using private network '%s' (%s)." % (private_net['name'], private_net['id'])) @@ -202,16 +200,16 @@ def main(): cmd = 'npm run -s test -- --reporter json' logger.info("Running command: %s" % cmd) - ret = subprocess.call(cmd, shell=True, stdout=results_file, \ + ret = subprocess.call(cmd, shell=True, stdout=results_file, stderr=subprocess.STDOUT) results_file.close() if ret == 0: logger.info("The test succeeded.") - test_status = 'OK' + # test_status = 'OK' else: logger.info("The command '%s' failed." % cmd) - test_status = "Failed" + # test_status = "Failed" # Print output of file with open(results_file_name, 'r') as results_file: @@ -226,34 +224,35 @@ def main(): failures = json_data["stats"]["failures"] start_time = json_data["stats"]["start"] end_time = json_data["stats"]["end"] - duration = float(json_data["stats"]["duration"])/float(1000) - - logger.info("\n" \ - "****************************************\n"\ - " Promise test report\n\n"\ - "****************************************\n"\ - " Suites: \t%s\n"\ - " Tests: \t%s\n"\ - " Passes: \t%s\n"\ - " Pending: \t%s\n"\ - " Failures:\t%s\n"\ - " Start: \t%s\n"\ - " End: \t%s\n"\ - " Duration:\t%s\n"\ - "****************************************\n\n"\ - % (suites, tests, passes, pending, failures, start_time, end_time, duration)) + duration = float(json_data["stats"]["duration"]) / float(1000) + + logger.info("\n" + "****************************************\n" + " Promise test report\n\n" + "****************************************\n" + " Suites: \t%s\n" + " Tests: \t%s\n" + " Passes: \t%s\n" + " Pending: \t%s\n" + " Failures:\t%s\n" + " Start: \t%s\n" + " End: \t%s\n" + " Duration:\t%s\n" + "****************************************\n\n" + % (suites, tests, passes, pending, failures, + start_time, end_time, duration)) if args.report: pod_name = functest_utils.get_pod_name(logger) installer = functest_utils.get_installer_type(logger) scenario = functest_utils.get_scenario(logger) build_tag = functest_utils.get_build_tag(logger) - git_version = functest_utils.get_git_branch(PROMISE_REPO) + # git_version = functest_utils.get_git_branch(PROMISE_REPO) url = TEST_DB + "/results" json_results = {"timestart": start_time, "duration": duration, "tests": int(tests), "failures": int(failures)} - logger.debug("Results json: "+str(json_results)) + logger.debug("Results json: " + str(json_results)) # criteria for Promise in Release B was 100% of tests OK status = "failed" diff --git a/testcases/functest_utils.py b/testcases/functest_utils.py index 8111959f5..1fccdca8d 100644 --- a/testcases/functest_utils.py +++ b/testcases/functest_utils.py @@ -16,7 +16,6 @@ import requests import shutil import socket import subprocess -import sys import urllib2 from git import Repo @@ -113,7 +112,8 @@ def get_pod_name(logger=None): except KeyError: if logger: logger.error( - "Unable to retrieve the POD name from environment.Using pod name 'unknown-pod'") + "Unable to retrieve the POD name from environment. " + + "Using pod name 'unknown-pod'") return "unknown-pod" @@ -150,8 +150,10 @@ def push_results_to_db(db_url, project, case_name, logger, pod_name, logger.debug(r) return True except Exception, e: - print "Error [push_results_to_db('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')]:" \ - % (db_url, project, case_name, pod_name, version, scenario, criteria, build_tag, payload), e + print "Error [push_results_to_db('%s', '%s', '%s', " + \ + "'%s', '%s', '%s', '%s', '%s', '%s')]:" \ + % (db_url, project, case_name, pod_name, version, + scenario, criteria, build_tag, payload), e return False diff --git a/testcases/openstack_utils.py b/testcases/openstack_utils.py index 6ef6d061a..32eadd8af 100644 --- a/testcases/openstack_utils.py +++ b/testcases/openstack_utils.py @@ -59,14 +59,17 @@ def get_credentials(service): tenant: os.environ.get("OS_TENANT_NAME", "admin"), }) cacert = os.environ.get("OS_CACERT") - if cacert != None: + if cacert is not None: # each openstack client uses differnt kwargs for this - creds.update({"cacert":cacert,"ca_cert":cacert,"https_ca_cert":cacert, \ - "https_cacert":cacert,"ca_file":cacert}) - creds.update({"insecure":"True","https_insecure":"True"}) + creds.update({"cacert": cacert, + "ca_cert": cacert, + "https_ca_cert": cacert, + "https_cacert": cacert, + "ca_file": cacert}) + creds.update({"insecure": "True", "https_insecure": "True"}) if not os.path.isfile(cacert): - print "WARNING: The 'OS_CACERT' environment variable is set to %s "\ - "but the file does not exist." % cacert + print "WARNING: The 'OS_CACERT' environment variable is " + \ + "set to %s but the file does not exist." % cacert return creds @@ -178,7 +181,8 @@ def delete_floating_ip(nova_client, floatingip_id): nova_client.floating_ips.delete(floatingip_id) return True except Exception, e: - print "Error [delete_floating_ip(nova_client, '%s')]:" % floatingip_id, e + print "Error [delete_floating_ip(nova_client, '%s')]:" \ + % floatingip_id, e return False @@ -365,7 +369,8 @@ def add_gateway_router(neutron_client, router_id): neutron_client.add_gateway_router(router_id, router_dict) return True except Exception, e: - print "Error [add_gateway_router(neutron_client, '%s')]:" % router_id, e + print "Error [add_gateway_router(neutron_client, '%s')]:" \ + % router_id, e return False @@ -374,7 +379,8 @@ def delete_neutron_net(neutron_client, network_id): neutron_client.delete_network(network_id) return True except Exception, e: - print "Error [delete_neutron_net(neutron_client, '%s')]:" % network_id, e + print "Error [delete_neutron_net(neutron_client, '%s')]:" \ + % network_id, e return False @@ -383,12 +389,12 @@ def delete_neutron_subnet(neutron_client, subnet_id): neutron_client.delete_subnet(subnet_id) return True except Exception, e: - print "Error [delete_neutron_subnet(neutron_client, '%s')]:" % subnet_id, e + print "Error [delete_neutron_subnet(neutron_client, '%s')]:" \ + % subnet_id, e return False def delete_neutron_router(neutron_client, router_id): - json_body = {'router': {'id': router_id}} try: neutron_client.delete_router(router=router_id) return True @@ -414,8 +420,8 @@ def remove_interface_router(neutron_client, router_id, subnet_id): body=json_body) return True except Exception, e: - print "Error [remove_interface_router(neutron_client, '%s', '%s')]:" % \ - (router_id, subnet_id), e + print "Error [remove_interface_router(neutron_client, '%s', '%s')]:" \ + % (router_id, subnet_id), e return False @@ -424,7 +430,8 @@ def remove_gateway_router(neutron_client, router_id): neutron_client.remove_gateway_router(router_id) return True except Exception, e: - print "Error [remove_gateway_router(neutron_client, '%s')]:" % router_id, e + print "Error [remove_gateway_router(neutron_client, '%s')]:" \ + % router_id, e return False @@ -478,7 +485,8 @@ def create_secgroup_rule(neutron_client, sg_id, direction, protocol, else: print "Error [create_secgroup_rule(neutron_client, '%s', '%s', "\ "'%s', '%s', '%s', '%s')]:" % (neutron_client, sg_id, direction, - port_range_min, port_range_max, protocol),\ + port_range_min, port_range_max, + protocol),\ " Invalid values for port_range_min, port_range_max" return False try: @@ -509,11 +517,11 @@ def update_sg_quota(neutron_client, tenant_id, sg_quota, sg_rule_quota): }} try: - quota = neutron_client.update_quota(tenant_id=tenant_id, - body=json_body) + neutron_client.update_quota(tenant_id=tenant_id, + body=json_body) return True except Exception, e: - print "Error [update_sg_quota(neutron_client, '%s', '%s', "\ + print "Error [update_sg_quota(neutron_client, '%s', '%s', " \ "'%s')]:" % (tenant_id, sg_quota, sg_rule_quota), e return False @@ -523,7 +531,8 @@ def delete_security_group(neutron_client, secgroup_id): neutron_client.delete_security_group(secgroup_id) return True except Exception, e: - print "Error [delete_security_group(neutron_client, '%s')]:" % secgroup_id, e + print "Error [delete_security_group(neutron_client, '%s')]:" \ + % secgroup_id, e return False @@ -617,8 +626,7 @@ def update_cinder_quota(cinder_client, tenant_id, vols_quota, "gigabytes": gigabytes_quota} try: - quotas_default = cinder_client.quotas.update(tenant_id, - **quotas_values) + cinder_client.quotas.update(tenant_id, **quotas_values) return True except Exception, e: print "Error [update_cinder_quota(cinder_client, '%s', '%s', '%s'" \ @@ -649,7 +657,8 @@ def delete_volume_type(cinder_client, volume_type): cinder_client.volume_types.delete(volume_type) return True except Exception, e: - print "Error [delete_volume_type(cinder_client, '%s')]:" % volume_type, e + print "Error [delete_volume_type(cinder_client, '%s')]:" \ + % volume_type, e return False @@ -741,7 +750,7 @@ def add_role_user(keystone_client, user_id, role_id, tenant_id): def delete_tenant(keystone_client, tenant_id): try: - tenant = keystone_client.tenants.delete(tenant_id) + keystone_client.tenants.delete(tenant_id) return True except Exception, e: print "Error [delete_tenant(keystone_client, '%s')]:" % tenant_id, e @@ -750,7 +759,7 @@ def delete_tenant(keystone_client, tenant_id): def delete_user(keystone_client, user_id): try: - tenant = keystone_client.users.delete(user_id) + keystone_client.users.delete(user_id) return True except Exception, e: print "Error [delete_user(keystone_client, '%s')]:" % user_id, e diff --git a/testcases/tests/TestFunctestUtils.py b/testcases/tests/TestFunctestUtils.py index fd83ed6f5..024088b13 100644 --- a/testcases/tests/TestFunctestUtils.py +++ b/testcases/tests/TestFunctestUtils.py @@ -80,12 +80,12 @@ class TestFunctestUtils(unittest.TestCase): test = isTestRunnable('functest/vims', functest_yaml) self.assertTrue(test) - def test_generateTestcaseList(self): test = generateTestcaseList(functest_yaml) - expected_list = "vping_ssh vping_userdata tempest odl doctor promise policy-test odl-vpn_service-tests vims rally " + expected_list = "vping_ssh vping_userdata tempest odl doctor " + \ + "promise policy-test odl-vpn_service-tests vims rally " self.assertEqual(test, expected_list) def tearDown(self): @@ -95,4 +95,3 @@ class TestFunctestUtils(unittest.TestCase): if __name__ == '__main__': unittest.main() - diff --git a/testcases/vIMS/CI/clearwater.py b/testcases/vIMS/CI/clearwater.py index 364de68f4..7236f4fba 100644 --- a/testcases/vIMS/CI/clearwater.py +++ b/testcases/vIMS/CI/clearwater.py @@ -10,8 +10,6 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ######################################################################## -import sys -import subprocess class clearwater: @@ -40,7 +38,8 @@ class clearwater: def set_public_domain(self, public_domain): self.config['public_domain'] = public_domain - def deploy_vnf(self, blueprint, bp_name='clearwater', dep_name='clearwater-opnfv'): + def deploy_vnf(self, blueprint, bp_name='clearwater', + dep_name='clearwater-opnfv'): if self.orchestrator: self.dep_name = dep_name error = self.orchestrator.download_upload_and_deploy_blueprint( @@ -52,7 +51,7 @@ class clearwater: else: if self.logger: - logger.error("Cloudify manager is down or not provide...") + self.logger.error("Cloudify manager is down or not provide...") def undeploy_vnf(self): if self.orchestrator: @@ -61,7 +60,7 @@ class clearwater: self.orchestrator.undeploy_deployment(self.dep_name) else: if self.logger: - logger.error("Clearwater isn't already deploy...") + self.logger.error("Clearwater isn't already deploy...") else: if self.logger: - logger.error("Cloudify manager is down or not provide...") + self.logger.error("Cloudify manager is down or not provide...") diff --git a/testcases/vIMS/CI/orchestrator.py b/testcases/vIMS/CI/orchestrator.py index 965c2646d..7dbbda716 100644 --- a/testcases/vIMS/CI/orchestrator.py +++ b/testcases/vIMS/CI/orchestrator.py @@ -10,7 +10,6 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ######################################################################## -import sys import subprocess import os import shutil @@ -56,17 +55,21 @@ class orchestrator: def set_logger(self, logger): self.logger = logger - def download_manager_blueprint(self, manager_blueprint_url, manager_blueprint_branch): + def download_manager_blueprint(self, manager_blueprint_url, + manager_blueprint_branch): if self.manager_blueprint: if self.logger: self.logger.info( - "cloudify manager server blueprint is already downloaded !") + "cloudify manager server blueprint is " + "already downloaded !") else: if self.logger: self.logger.info( "Downloading the cloudify manager server blueprint") download_result = download_blueprints( - manager_blueprint_url, manager_blueprint_branch, self.blueprint_dir) + manager_blueprint_url, + manager_blueprint_branch, + self.blueprint_dir) if not download_result: if self.logger: @@ -76,7 +79,7 @@ class orchestrator: self.manager_blueprint = True def manager_up(self): - return manager_up + return self.manager_up def deploy_manager(self): if self.manager_blueprint: @@ -98,13 +101,16 @@ class orchestrator: if self.logger: self.logger.info("Launching the cloudify-manager deployment") script = "set -e; " - script += "source " + self.testcase_dir + "venv_cloudify/bin/activate; " + script += "source " + self.testcase_dir + \ + "venv_cloudify/bin/activate; " script += "cd " + self.testcase_dir + "; " script += "cfy init -r; " script += "cd cloudify-manager-blueprint; " - script += "cfy local create-requirements -o requirements.txt -p openstack-manager-blueprint.yaml; " + script += "cfy local create-requirements -o requirements.txt " + \ + "-p openstack-manager-blueprint.yaml; " script += "pip install -r requirements.txt; " - script += "timeout 1800 cfy bootstrap --install-plugins -p openstack-manager-blueprint.yaml -i inputs.yaml; " + script += "timeout 1800 cfy bootstrap --install-plugins " + \ + "-p openstack-manager-blueprint.yaml -i inputs.yaml; " cmd = "/bin/bash -c '" + script + "'" error = execute_command(cmd, self.logger) if error: @@ -131,23 +137,30 @@ class orchestrator: self.logger.info( "Cloudify-manager server has been successfully removed!") - def download_upload_and_deploy_blueprint(self, blueprint, config, bp_name, dep_name): + def download_upload_and_deploy_blueprint(self, blueprint, config, + bp_name, dep_name): if self.logger: self.logger.info("Downloading the {0} blueprint".format( blueprint['file_name'])) - download_result = download_blueprints(blueprint['url'], blueprint['branch'], - self.testcase_dir + blueprint['destination_folder']) + download_result = download_blueprints(blueprint['url'], + blueprint['branch'], + self.testcase_dir + + blueprint['destination_folder']) if not download_result: if self.logger: self.logger.error( - "Failed to download blueprint {0}".format(blueprint['file_name'])) + "Failed to download blueprint {0}". + format(blueprint['file_name'])) exit(-1) if self.logger: self.logger.info("Writing the inputs file") - with open(self.testcase_dir + blueprint['destination_folder'] + "/inputs.yaml", "w") as f: + + with open(self.testcase_dir + blueprint['destination_folder'] + + "/inputs.yaml", "w") as f: f.write(yaml.dump(config, default_style='"')) + f.close() if self.logger: @@ -159,7 +172,8 @@ class orchestrator: bp_name + " -p openstack-blueprint.yaml; " script += "cfy deployments create -b " + bp_name + \ " -d " + dep_name + " --inputs inputs.yaml; " - script += "cfy executions start -w install -d " + dep_name + " --timeout 1800; " + script += "cfy executions start -w install -d " \ + + dep_name + " --timeout 1800; " cmd = "/bin/bash -c '" + script + "'" error = execute_command(cmd, self.logger) @@ -173,7 +187,8 @@ class orchestrator: self.logger.info("Launching the {0} undeployment".format(dep_name)) script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; " script += "cd " + self.testcase_dir + "; " - script += "cfy executions start -w uninstall -d " + dep_name + " --timeout 1800 ; " + script += "cfy executions start -w uninstall -d " + dep_name \ + + " --timeout 1800 ; " script += "cfy deployments delete -d " + dep_name + "; " cmd = "/bin/bash -c '" + script + "'" diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py index 3eef5b381..2518855cd 100644 --- a/testcases/vIMS/CI/vIMS.py +++ b/testcases/vIMS/CI/vIMS.py @@ -1,4 +1,4 @@ - #!/usr/bin/python +#!/usr/bin/python # coding: utf8 ####################################################################### # @@ -27,14 +27,14 @@ import glanceclient.client as glclient import novaclient.client as nvclient from neutronclient.v2_0 import client as ntclient -from orchestrator import * -from clearwater import * +import orchestrator +import clearwater pp = pprint.PrettyPrinter(indent=4) parser = argparse.ArgumentParser() -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") parser.add_argument("-r", "--report", help="Create json result file", action="store_true") @@ -127,7 +127,7 @@ def step_failure(step_name, error_msg): set_result(step_name, 0, error_msg) status = "failed" if step_name == "sig_test": - status = "passed" + status = "passed" push_results(status) exit(-1) @@ -167,10 +167,12 @@ def test_clearwater(): mgr_ip = os.popen(cmd).read() mgr_ip = mgr_ip.splitlines()[0] except: - step_failure("sig_test", "Unable to retrieve the IP of the cloudify manager server !") + step_failure("sig_test", "Unable to retrieve the IP of the " + "cloudify manager server !") api_url = "http://" + mgr_ip + "/api/v2" - dep_outputs = requests.get(api_url + "/deployments/" + CW_DEPLOYMENT_NAME + "/outputs") + dep_outputs = requests.get(api_url + "/deployments/" + + CW_DEPLOYMENT_NAME + "/outputs") dns_ip = dep_outputs.json()['outputs']['dns_ip'] ellis_ip = dep_outputs.json()['outputs']['ellis_ip'] @@ -186,7 +188,7 @@ def test_clearwater(): i = 20 while rq.status_code != 201 and i > 0: rq = requests.post(url, data=params) - i = i-1 + i = i - 1 time.sleep(10) if rq.status_code == 201: @@ -200,11 +202,12 @@ def test_clearwater(): i = 24 while rq.status_code != 200 and i > 0: rq = requests.post(url, cookies=cookies) - i = i-1 + i = i - 1 time.sleep(25) if rq.status_code != 200: - step_failure("sig_test", "Unable to create a number: %s" % rq.json()['reason']) + step_failure("sig_test", "Unable to create a number: %s" + % rq.json()['reason']) start_time_ts = time.time() end_time_ts = start_time_ts @@ -217,7 +220,8 @@ def test_clearwater(): resolvconf += "\nnameserver " + ns if dns_ip != "": - script = 'echo -e "nameserver ' + dns_ip + resolvconf + '" > /etc/resolv.conf; ' + script = 'echo -e "nameserver ' + dns_ip + resolvconf + \ + '" > /etc/resolv.conf; ' script += 'source /etc/profile.d/rvm.sh; ' script += 'cd ' + VIMS_TEST_DIR + '; ' script += 'rake test[' + \ @@ -226,8 +230,8 @@ def test_clearwater(): cmd = "/bin/bash -c '" + script + "'" output_file = "output.txt" f = open(output_file, 'w+') - p = subprocess.call(cmd, shell=True, stdout=f, - stderr=subprocess.STDOUT) + subprocess.call(cmd, shell=True, stdout=f, + stderr=subprocess.STDOUT) f.close() end_time_ts = time.time() duration = round(end_time_ts - start_time_ts, 1) @@ -253,7 +257,8 @@ def test_clearwater(): # - VNF deployed status = "failed" try: - if RESULTS['orchestrator']['duration'] > 0 and RESULTS['vIMS']['duration'] > 0: + if (RESULTS['orchestrator']['duration'] > 0 and + RESULTS['vIMS']['duration'] > 0): status = "passed" except: logger.error("Unable to set test status") @@ -299,7 +304,8 @@ def main(): if role_id == '': logger.error("Error : Failed to get id for %s role" % role_name) - if not openstack_utils.add_role_user(keystone, user_id, role_id, tenant_id): + if not openstack_utils.add_role_user(keystone, user_id, + role_id, tenant_id): logger.error("Error : Failed to add %s on tenant" % ks_creds['username']) @@ -324,8 +330,9 @@ def main(): }) logger.info("Upload some OS images if it doesn't exist") - glance_endpoint = keystone.service_catalog.url_for(service_type='image', - endpoint_type='publicURL') + glance_endpoint = keystone.\ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token) for img in IMAGES.keys(): @@ -335,14 +342,16 @@ def main(): image_id = openstack_utils.get_image_id(glance, image_name) if image_id == '': - logger.info("""%s image doesn't exist on glance repository. - Try downloading this image and upload on glance !""" % image_name) + logger.info("""%s image doesn't exist on glance repository. Try + downloading this image and upload on glance !""" % image_name) image_id = download_and_add_image_on_glance( glance, image_name, image_url) if image_id == '': step_failure( - "init", "Error : Failed to find or upload required OS image for this deployment") + "init", + "Error : Failed to find or upload required OS " + "image for this deployment") nova = nvclient.Client("2", **nv_creds) @@ -350,7 +359,8 @@ def main(): neutron = ntclient.Client(**nt_creds) if not openstack_utils.update_sg_quota(neutron, tenant_id, 50, 100): step_failure( - "init", "Failed to update security group quota for tenant " + TENANT_NAME) + "init", + "Failed to update security group quota for tenant " + TENANT_NAME) logger.info("Update cinder quota for this tenant") from cinderclient import client as cinderclient @@ -361,7 +371,8 @@ def main(): creds_cinder['project_id'], creds_cinder['auth_url'], service_type="volume") - if not openstack_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150): + if not openstack_utils.update_cinder_quota(cinder_client, tenant_id, + 20, 10, 150): step_failure( "init", "Failed to update cinder quota for tenant " + TENANT_NAME) @@ -370,7 +381,8 @@ def main(): cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger) cfy.set_credentials(username=ks_creds['username'], password=ks_creds[ - 'password'], tenant_name=ks_creds['tenant_name'], auth_url=ks_creds['auth_url']) + 'password'], tenant_name=ks_creds['tenant_name'], + auth_url=ks_creds['auth_url']) logger.info("Collect flavor id for cloudify manager server") nova = nvclient.Client("2", **nv_creds) @@ -384,8 +396,10 @@ def main(): if flavor_id == '': logger.error( - "Failed to find %s flavor. Try with ram range default requirement !" % flavor_name) - flavor_id = openstack_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) + "Failed to find %s flavor. " + "Try with ram range default requirement !" % flavor_name) + flavor_id = openstack_utils.\ + get_flavor_id_by_ram_range(nova, 4000, 8196) if flavor_id == '': step_failure("orchestrator", @@ -402,7 +416,8 @@ def main(): if image_id == '': step_failure( - "orchestrator", "Error : Failed to find required OS image for cloudify manager") + "orchestrator", + "Error : Failed to find required OS image for cloudify manager") cfy.set_image_id(image_id) @@ -458,8 +473,10 @@ def main(): if flavor_id == '': logger.error( - "Failed to find %s flavor. Try with ram range default requirement !" % flavor_name) - flavor_id = openstack_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) + "Failed to find %s flavor. Try with ram range " + "default requirement !" % flavor_name) + flavor_id = openstack_utils.\ + get_flavor_id_by_ram_range(nova, 4000, 8196) if flavor_id == '': step_failure( @@ -476,7 +493,8 @@ def main(): if image_id == '': step_failure( - "vIMS", "Error : Failed to find required OS image for cloudify manager") + "vIMS", + "Error : Failed to find required OS image for cloudify manager") cw.set_image_id(image_id) diff --git a/testcases/vPing/CI/libraries/vPing_ssh.py b/testcases/vPing/CI/libraries/vPing_ssh.py index 7adf8a23d..43ab8525e 100644 --- a/testcases/vPing/CI/libraries/vPing_ssh.py +++ b/testcases/vPing/CI/libraries/vPing_ssh.py @@ -63,7 +63,7 @@ ch.setFormatter(formatter) logger.addHandler(ch) paramiko.util.log_to_file("/var/log/paramiko.log") -REPO_PATH = os.environ['repos_dir']+'/functest/' +REPO_PATH = os.environ['repos_dir'] + '/functest/' if not os.path.exists(REPO_PATH): logger.error("Functest repository directory not found '%s'" % REPO_PATH) exit(-1) @@ -159,15 +159,20 @@ def waitVmDeleted(nova, vm): def create_private_neutron_net(neutron): # Check if the network already exists - network_id = openstack_utils.get_network_id(neutron, NEUTRON_PRIVATE_NET_NAME) - subnet_id = openstack_utils.get_subnet_id(neutron, NEUTRON_PRIVATE_SUBNET_NAME) - router_id = openstack_utils.get_router_id(neutron, NEUTRON_ROUTER_NAME) + network_id = openstack_utils.get_network_id(neutron, + NEUTRON_PRIVATE_NET_NAME) + subnet_id = openstack_utils.get_subnet_id(neutron, + NEUTRON_PRIVATE_SUBNET_NAME) + router_id = openstack_utils.get_router_id(neutron, + NEUTRON_ROUTER_NAME) if network_id != '' and subnet_id != '' and router_id != '': - logger.info("Using existing network '%s'..." % NEUTRON_PRIVATE_NET_NAME) + logger.info("Using existing network '%s'..." + % NEUTRON_PRIVATE_NET_NAME) else: neutron.format = 'json' - logger.info('Creating neutron network %s...' % NEUTRON_PRIVATE_NET_NAME) + logger.info('Creating neutron network %s...' + % NEUTRON_PRIVATE_NET_NAME) network_id = openstack_utils. \ create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME) @@ -193,7 +198,9 @@ def create_private_neutron_net(neutron): logger.debug("Router '%s' created successfully" % router_id) logger.debug('Adding router to subnet...') - if not openstack_utils.add_interface_router(neutron, router_id, subnet_id): + if not openstack_utils.add_interface_router(neutron, + router_id, + subnet_id): return False logger.debug("Interface added successfully.") @@ -209,37 +216,42 @@ def create_private_neutron_net(neutron): def create_security_group(neutron_client): - sg_id = openstack_utils.get_security_group_id(neutron_client, SECGROUP_NAME) + sg_id = openstack_utils.get_security_group_id(neutron_client, + SECGROUP_NAME) if sg_id != '': logger.info("Using existing security group '%s'..." % SECGROUP_NAME) else: logger.info("Creating security group '%s'..." % SECGROUP_NAME) SECGROUP = openstack_utils.create_security_group(neutron_client, - SECGROUP_NAME, - SECGROUP_DESCR) + SECGROUP_NAME, + SECGROUP_DESCR) if not SECGROUP: logger.error("Failed to create the security group...") return False sg_id = SECGROUP['id'] - logger.debug("Security group '%s' with ID=%s created successfully." %\ - (SECGROUP['name'], sg_id)) + logger.debug("Security group '%s' with ID=%s created successfully." + % (SECGROUP['name'], sg_id)) - logger.debug("Adding ICMP rules in security group '%s'..." % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, \ - 'ingress', 'icmp'): + logger.debug("Adding ICMP rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'ingress', 'icmp'): logger.error("Failed to create the security group rule...") return False - logger.debug("Adding SSH rules in security group '%s'..." % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, \ - 'ingress', 'tcp', '22', '22'): + logger.debug("Adding SSH rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.\ + create_secgroup_rule(neutron_client, sg_id, + 'ingress', 'tcp', '22', '22'): logger.error("Failed to create the security group rule...") return False - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, \ - 'egress', 'tcp', '22', '22'): + if not openstack_utils.\ + create_secgroup_rule(neutron_client, sg_id, + 'egress', 'tcp', '22', '22'): logger.error("Failed to create the security group rule...") return False return sg_id @@ -290,7 +302,7 @@ def cleanup(nova, neutron, image_id, network_dic, sg_id, floatingip): router_id = network_dic["router_id"] if not openstack_utils.remove_interface_router(neutron, router_id, - subnet_id): + subnet_id): logger.error("Unable to remove subnet '%s' from router '%s'" % ( subnet_id, router_id)) return False @@ -324,7 +336,8 @@ def cleanup(nova, neutron, image_id, network_dic, sg_id, floatingip): logger.debug("Releasing floating ip '%s'..." % floatingip['fip_addr']) if not openstack_utils.delete_floating_ip(nova, floatingip['fip_id']): - logger.error("Unable to delete floatingip '%s'" % floatingip['fip_addr']) + logger.error("Unable to delete floatingip '%s'" + % floatingip['fip_addr']) return False logger.debug( "Floating IP '%s' deleted successfully" % floatingip['fip_addr']) @@ -350,7 +363,8 @@ def push_results(start_time_ts, duration, test_status): 'duration': duration, 'status': test_status}) except: - logger.error("Error pushing results into Database '%s'" % sys.exc_info()[0]) + logger.error("Error pushing results into Database '%s'" + % sys.exc_info()[0]) def main(): @@ -361,8 +375,9 @@ def main(): neutron_client = neutronclient.Client(**creds_neutron) creds_keystone = openstack_utils.get_credentials("keystone") keystone_client = keystoneclient.Client(**creds_keystone) - glance_endpoint = keystone_client.service_catalog.url_for(service_type='image', - endpoint_type='publicURL') + glance_endpoint = keystone_client.\ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') glance_client = glanceclient.Client(1, glance_endpoint, token=keystone_client.auth_token) EXIT_CODE = -1 @@ -380,13 +395,13 @@ def main(): logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH)) image_id = openstack_utils.create_glance_image(glance_client, - GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH) + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) if not image_id: logger.error("Failed to create a Glance image...") return(EXIT_CODE) - logger.debug("Image '%s' with ID=%s created successfully." %\ - (GLANCE_IMAGE_NAME, image_id)) + logger.debug("Image '%s' with ID=%s created successfully." + % (GLANCE_IMAGE_NAME, image_id)) network_dic = create_private_neutron_net(neutron_client) if not network_dic: @@ -437,7 +452,8 @@ def main(): if not waitVmActive(nova_client, vm1): logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1))) - cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, floatingip) + cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, + floatingip) return (EXIT_CODE) else: logger.info("Instance '%s' is ACTIVE." % NAME_VM_1) @@ -446,7 +462,8 @@ def main(): test_ip = vm1.networks.get(NEUTRON_PRIVATE_NET_NAME)[0] logger.debug("Instance '%s' got private ip '%s'." % (NAME_VM_1, test_ip)) - logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_1, SECGROUP_NAME)) + logger.info("Adding '%s' to security group '%s'..." + % (NAME_VM_1, SECGROUP_NAME)) openstack_utils.add_secgroup_to_instance(nova_client, vm1.id, sg_id) # boot VM 2 @@ -464,29 +481,34 @@ def main(): if not waitVmActive(nova_client, vm2): logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2))) - cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, floatip_dic) + cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, + floatip_dic) return (EXIT_CODE) else: logger.info("Instance '%s' is ACTIVE." % NAME_VM_2) - logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_2, SECGROUP_NAME)) + logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_2, + SECGROUP_NAME)) openstack_utils.add_secgroup_to_instance(nova_client, vm2.id, sg_id) logger.info("Creating floating IP for VM '%s'..." % NAME_VM_2) floatip_dic = openstack_utils.create_floating_ip(neutron_client) floatip = floatip_dic['fip_addr'] - floatip_id = floatip_dic['fip_id'] + # floatip_id = floatip_dic['fip_id'] if floatip is None: logger.error("Cannot create floating IP.") - cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, floatip_dic) + cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, + floatip_dic) return (EXIT_CODE) logger.info("Floating IP created: '%s'" % floatip) - logger.info("Associating floating ip: '%s' to VM '%s' " % (floatip, NAME_VM_2)) + logger.info("Associating floating ip: '%s' to VM '%s' " + % (floatip, NAME_VM_2)) if not openstack_utils.add_floating_ip(nova_client, vm2.id, floatip): logger.error("Cannot associate floating IP to VM.") - cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, floatip_dic) + cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, + floatip_dic) return (EXIT_CODE) logger.info("Trying to establish SSH connection to %s..." % floatip) @@ -502,7 +524,8 @@ def main(): cidr_first_octet = NEUTRON_PRIVATE_SUBNET_CIDR.split('.')[0] while timeout > 0: try: - ssh.connect(floatip, username=username, password=password, timeout=2) + ssh.connect(floatip, username=username, + password=password, timeout=2) logger.debug("SSH connection established to %s." % floatip) break except: @@ -513,26 +536,33 @@ def main(): console_log = vm2.get_console_output() # print each "Sending discover" captured on the console log - if len(re.findall("Sending discover", console_log)) > discover_count and not got_ip: + if len(re.findall("Sending discover", console_log)) > discover_count \ + and not got_ip: discover_count += 1 - logger.debug("Console-log '%s': Sending discover..." % NAME_VM_2) + logger.debug("Console-log '%s': Sending discover..." + % NAME_VM_2) - # check if eth0 got an ip, the line looks like this: "inet addr:192.168.".... + # check if eth0 got an ip,the line looks like this: + # "inet addr:192.168.".... # if the dhcp agent fails to assing ip, this line will not appear - if "inet addr:"+cidr_first_octet in console_log and not got_ip: + if "inet addr:" + cidr_first_octet in console_log and not got_ip: got_ip = True - logger.debug("The instance '%s' succeeded to get the IP from the dhcp agent.") + logger.debug("The instance '%s' succeeded to get the IP " + "from the dhcp agent.") - # if dhcp doesn't work, it shows "No lease, failing". The test will fail... + # if dhcp doesnt work,it shows "No lease, failing".The test will fail if "No lease, failing" in console_log and not nolease and not got_ip: nolease = True - logger.debug("Console-log '%s': No lease, failing..." % NAME_VM_2) - logger.info("The instance failed to get an IP from "\ - "the DHCP agent. The test will probably timeout...") + logger.debug("Console-log '%s': No lease, failing..." + % NAME_VM_2) + logger.info("The instance failed to get an IP from the " + "DHCP agent. The test will probably timeout...") if timeout == 0: # 300 sec timeout (5 min) - logger.error("Cannot establish connection to IP '%s'. Aborting" % floatip) - cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, floatip_dic) + logger.error("Cannot establish connection to IP '%s'. Aborting" + % floatip) + cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, + floatip_dic) return (EXIT_CODE) scp = SCPClient(ssh.get_transport()) @@ -541,7 +571,8 @@ def main(): try: scp.put(ping_script, "~/") except: - logger.error("Cannot SCP the file '%s' to VM '%s'" % (ping_script, floatip)) + logger.error("Cannot SCP the file '%s' to VM '%s'" + % (ping_script, floatip)) cmd = 'chmod 755 ~/ping.sh' (stdin, stdout, stderr) = ssh.exec_command(cmd) @@ -579,7 +610,8 @@ def main(): logger.debug("Pinging %s. Waiting for response..." % test_ip) sec += 1 - cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, floatip_dic) + cleanup(nova_client, neutron_client, image_id, network_dic, sg_id, + floatip_dic) test_status = "NOK" if EXIT_CODE == 0: diff --git a/testcases/vPing/CI/libraries/vPing_userdata.py b/testcases/vPing/CI/libraries/vPing_userdata.py index 9ffb56be3..1fc9e1d37 100644 --- a/testcases/vPing/CI/libraries/vPing_userdata.py +++ b/testcases/vPing/CI/libraries/vPing_userdata.py @@ -60,7 +60,7 @@ formatter = logging.Formatter('%(asctime)s - %(name)s' ch.setFormatter(formatter) logger.addHandler(ch) -REPO_PATH = os.environ['repos_dir']+'/functest/' +REPO_PATH = os.environ['repos_dir'] + '/functest/' if not os.path.exists(REPO_PATH): logger.error("Functest repository directory not found '%s'" % REPO_PATH) exit(-1) @@ -156,9 +156,12 @@ def waitVmDeleted(nova, vm): def create_private_neutron_net(neutron): # Check if the network already exists - network_id = openstack_utils.get_network_id(neutron, NEUTRON_PRIVATE_NET_NAME) - subnet_id = openstack_utils.get_subnet_id(neutron, NEUTRON_PRIVATE_SUBNET_NAME) - router_id = openstack_utils.get_router_id(neutron, NEUTRON_ROUTER_NAME) + network_id = openstack_utils.get_network_id(neutron, + NEUTRON_PRIVATE_NET_NAME) + subnet_id = openstack_utils.get_subnet_id(neutron, + NEUTRON_PRIVATE_SUBNET_NAME) + router_id = openstack_utils.get_router_id(neutron, + NEUTRON_ROUTER_NAME) if network_id != '' and subnet_id != '' and router_id != '': logger.info("Using existing network '%s'.." % NEUTRON_PRIVATE_NET_NAME) @@ -190,7 +193,8 @@ def create_private_neutron_net(neutron): logger.debug("Router '%s' created successfully" % router_id) logger.debug('Adding router to subnet...') - if not openstack_utils.add_interface_router(neutron, router_id, subnet_id): + if not openstack_utils.add_interface_router(neutron, router_id, + subnet_id): return False logger.debug("Interface added successfully.") @@ -206,38 +210,42 @@ def create_private_neutron_net(neutron): def create_security_group(neutron_client): - sg_id = openstack_utils.get_security_group_id(neutron_client, SECGROUP_NAME) + sg_id = openstack_utils.get_security_group_id(neutron_client, + SECGROUP_NAME) if sg_id != '': logger.info("Using existing security group '%s'..." % SECGROUP_NAME) else: logger.info("Creating security group '%s'..." % SECGROUP_NAME) SECGROUP = openstack_utils.create_security_group(neutron_client, - SECGROUP_NAME, - SECGROUP_DESCR) + SECGROUP_NAME, + SECGROUP_DESCR) if not SECGROUP: logger.error("Failed to create the security group...") return False sg_id = SECGROUP['id'] - logger.debug("Security group '%s' with ID=%s created successfully." %\ - (SECGROUP['name'], sg_id)) + logger.debug("Security group '%s' with ID=%s created successfully." + % (SECGROUP['name'], sg_id)) - logger.debug("Adding ICMP rules in security group '%s'..." % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, \ - 'ingress', 'icmp'): + logger.debug("Adding ICMP rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'ingress', 'icmp'): logger.error("Failed to create the security group rule...") return False - logger.debug("Adding SSH rules in security group '%s'..." % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, \ - 'ingress', 'tcp', - '22', '22'): + logger.debug("Adding SSH rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'ingress', 'tcp', + '22', '22'): logger.error("Failed to create the security group rule...") return False - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, \ - 'egress', 'tcp', '22', '22'): + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'egress', 'tcp', + '22', '22'): logger.error("Failed to create the security group rule...") return False return sg_id @@ -288,7 +296,7 @@ def cleanup(nova, neutron, image_id, network_dic): router_id = network_dic["router_id"] if not openstack_utils.remove_interface_router(neutron, router_id, - subnet_id): + subnet_id): logger.error("Unable to remove subnet '%s' from router '%s'" % ( subnet_id, router_id)) return False @@ -336,7 +344,8 @@ def push_results(start_time_ts, duration, test_status): 'duration': duration, 'status': test_status}) except: - logger.error("Error pushing results into Database '%s'" % sys.exc_info()[0]) + logger.error("Error pushing results into Database '%s'" + % sys.exc_info()[0]) def main(): @@ -347,8 +356,9 @@ def main(): neutron_client = neutronclient.Client(**creds_neutron) creds_keystone = openstack_utils.get_credentials("keystone") keystone_client = keystoneclient.Client(**creds_keystone) - glance_endpoint = keystone_client.service_catalog.url_for(service_type='image', - endpoint_type='publicURL') + glance_endpoint = keystone_client.\ + service_catalog.url_for(service_type='image', + endpoint_type='publicURL') glance_client = glanceclient.Client(1, glance_endpoint, token=keystone_client.auth_token) EXIT_CODE = -1 @@ -366,13 +376,13 @@ def main(): logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH)) image_id = openstack_utils.create_glance_image(glance_client, - GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH) + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) if not image_id: logger.error("Failed to create a Glance image...") return(EXIT_CODE) - logger.debug("Image '%s' with ID=%s created successfully." %\ - (GLANCE_IMAGE_NAME, image_id)) + logger.debug("Image '%s' with ID=%s created successfully." + % (GLANCE_IMAGE_NAME, image_id)) network_dic = create_private_neutron_net(neutron_client) if not network_dic: @@ -381,7 +391,7 @@ def main(): return(EXIT_CODE) network_id = network_dic["net_id"] - sg_id = create_security_group(neutron_client) + create_security_group(neutron_client) # Check if the given flavor exists try: @@ -500,8 +510,8 @@ def main(): break elif sec % 10 == 0: if "request failed" in console_log: - logger.debug("It seems userdata is not supported in nova boot." + \ - " Waiting a bit...") + logger.debug("It seems userdata is not supported in " + "nova boot. Waiting a bit...") metadata_tries += 1 else: logger.debug("Pinging %s. Waiting for response..." % test_ip) -- cgit 1.2.3-korg