summaryrefslogtreecommitdiffstats
path: root/testcases
diff options
context:
space:
mode:
Diffstat (limited to 'testcases')
-rw-r--r--testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt37
-rw-r--r--testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt37
-rw-r--r--testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt37
-rw-r--r--testcases/Controllers/ODL/CI/requirements.pip4
-rwxr-xr-x[-rw-r--r--]testcases/Controllers/ODL/CI/start_tests.sh67
-rw-r--r--testcases/Controllers/ODL/CI/test_list.txt4
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/Readme.txt5
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/__init__.py0
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/adapters/__init__.py0
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/adapters/client.py76
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/adapters/connection.py190
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/adapters/environment.py268
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py90
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/dependencies/onos23
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/log/gitignore0
-rw-r--r--testcases/Controllers/ONOS/Teston/CI/onosfunctest.py24
-rw-r--r--testcases/VIM/OpenStack/CI/custom_tests/test_list.txt227
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/check_os.sh91
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/clean_openstack.py399
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/os_defaults.yaml31
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/run_rally-cert.py306
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_rally.py273
-rw-r--r--testcases/VIM/OpenStack/CI/libraries/run_tempest.py260
-rwxr-xr-xtestcases/VIM/OpenStack/CI/libraries/test_openstack.sh111
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml97
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml63
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml266
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml49
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml142
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml92
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml240
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml369
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml54
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml28
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml268
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml42
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh13
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template17
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template1
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template13
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template13
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template64
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template43
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template23
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template19
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template11
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template19
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template16
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template16
-rw-r--r--testcases/VIM/OpenStack/CI/rally_cert/task.yaml59
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-authenticate.json18
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-cinder.json48
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-glance.json54
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-heat.json39
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-keystone.json18
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-neutron.json36
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-nova.json87
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-quotas.json15
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-requests.json30
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-smoke-green.json3
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-smoke.json3
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-tempest.json3
-rw-r--r--testcases/VIM/OpenStack/CI/suites/opnfv-vm.json22
-rw-r--r--testcases/VIM/OpenStack/OpenStack.md218
-rw-r--r--testcases/__init__.py0
-rwxr-xr-x[-rw-r--r--]testcases/config_functest.py650
-rw-r--r--testcases/config_functest.yaml243
-rw-r--r--testcases/features/doctor.py64
-rw-r--r--testcases/functest.yaml30
-rw-r--r--testcases/functest_utils.py766
-rw-r--r--testcases/tests/TestFunctestUtils.py111
-rw-r--r--testcases/vIMS/CI/clearwater.py63
-rwxr-xr-x[-rw-r--r--]testcases/vIMS/CI/create_venv.sh (renamed from testcases/Controllers/ODL/CI/create_venv.sh)17
-rw-r--r--testcases/vIMS/CI/orchestrator.py213
-rw-r--r--testcases/vIMS/CI/requirements.pip1
-rw-r--r--testcases/vIMS/CI/vIMS.py480
-rw-r--r--testcases/vIMS/vIMS.md3
-rw-r--r--testcases/vPing/CI/libraries/vPing.py587
78 files changed, 7283 insertions, 1136 deletions
diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt b/testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt
new file mode 100644
index 000000000..02c90c37c
--- /dev/null
+++ b/testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt
@@ -0,0 +1,37 @@
+*** Settings ***
+Documentation Checking Port deleted in OpenStack are deleted also in OpenDaylight
+Suite Setup Create Session OSSession http://${OPENSTACK}:9696 headers=${X-AUTH}
+Suite Teardown Delete All Sessions
+Library SSHLibrary
+Library Collections
+Library OperatingSystem
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Variables ../../../variables/Variables.py
+
+*** Variables ***
+${ODLREST} /controller/nb/v2/neutron/ports
+${OSREST} /v2.0/ports/${PORTID}
+${data} {"port":{"network_id":"${NETID}","admin_state_up": true}}
+
+*** Test Cases ***
+Delete New Port
+ [Documentation] Delete previously created port in OpenStack
+ [Tags] Delete port OpenStack Neutron
+ Log ${data}
+ ${resp} delete OSSession ${OSREST}
+ Should be Equal As Strings ${resp.status_code} 204
+ Log ${resp.content}
+ sleep 2
+
+Check Port Deleted
+ [Documentation] Check port deleted in OpenDaylight
+ [Tags] Check port deleted OpenDaylight
+ Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH}
+ ${resp} get ODLSession ${ODLREST}
+ Should be Equal As Strings ${resp.status_code} 200
+ ${ODLResult} To Json ${resp.content}
+ Set Suite Variable ${ODLResult}
+ Log ${ODLResult}
+ ${resp} get ODLSession ${ODLREST}/${PORTID}
+ Should be Equal As Strings ${resp.status_code} 404
diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt b/testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt
new file mode 100644
index 000000000..4121c98fe
--- /dev/null
+++ b/testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt
@@ -0,0 +1,37 @@
+*** Settings ***
+Documentation Checking Subnets deleted in OpenStack are deleted also in OpenDaylight
+Suite Setup Create Session OSSession http://${OPENSTACK}:9696 headers=${X-AUTH}
+Suite Teardown Delete All Sessions
+Library SSHLibrary
+Library Collections
+Library OperatingSystem
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Variables ../../../variables/Variables.py
+
+*** Variables ***
+${ODLREST} /controller/nb/v2/neutron/subnets
+${OSREST} /v2.0/subnets/${SUBNETID}
+${data} {"subnet":{"network_id":"${NETID}","ip_version":4,"cidr":"172.16.64.0/24","allocation_pools":[{"start":"172.16.64.20","end":"172.16.64.120"}]}}
+
+*** Test Cases ***
+Delete New subnet
+ [Documentation] Delete previously created subnet in OpenStack
+ [Tags] Delete Subnet OpenStack Neutron
+ Log ${data}
+ ${resp} delete OSSession ${OSREST}
+ Should be Equal As Strings ${resp.status_code} 204
+ Log ${resp.content}
+ sleep 2
+
+Check New subnet deleted
+ [Documentation] Check subnet deleted in OpenDaylight
+ [Tags] Check subnet deleted OpenDaylight
+ Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH}
+ ${resp} get ODLSession ${ODLREST}
+ Should be Equal As Strings ${resp.status_code} 200
+ ${ODLResult} To Json ${resp.content}
+ Set Suite Variable ${ODLResult}
+ Log ${ODLResult}
+ ${resp} get ODLSession ${ODLREST}/${SUBNETID}
+ Should be Equal As Strings ${resp.status_code} 404
diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt b/testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt
new file mode 100644
index 000000000..fc823fa4b
--- /dev/null
+++ b/testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt
@@ -0,0 +1,37 @@
+*** Settings ***
+Documentation Checking Network deleted in OpenStack are deleted also in OpenDaylight
+Suite Setup Create Session OSSession http://${OPENSTACK}:9696 headers=${X-AUTH}
+Suite Teardown Delete All Sessions
+Library SSHLibrary
+Library Collections
+Library OperatingSystem
+Library ../../../libraries/RequestsLibrary.py
+Library ../../../libraries/Common.py
+Variables ../../../variables/Variables.py
+
+*** Variables ***
+${ODLREST} /controller/nb/v2/neutron/networks
+${OSREST} /v2.0/networks/${NETID}
+${postNet} {"network":{"name":"odl_network","admin_state_up":true}}
+
+*** Test Cases ***
+Delete Network
+ [Documentation] Delete network in OpenStack
+ [Tags] Delete Network OpenStack Neutron
+ Log ${postNet}
+ ${resp} delete OSSession ${OSREST}
+ Should be Equal As Strings ${resp.status_code} 204
+ Log ${resp.content}
+ sleep 2
+
+Check Network deleted
+ [Documentation] Check Network deleted in OpenDaylight
+ [Tags] Check Network OpenDaylight
+ Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH}
+ ${resp} get ODLSession ${ODLREST}
+ Should be Equal As Strings ${resp.status_code} 200
+ ${ODLResult} To Json ${resp.content}
+ Set Suite Variable ${ODLResult}
+ Log ${ODLResult}
+ ${resp} get ODLSession ${ODLREST}/${NetID}
+ Should be Equal As Strings ${resp.status_code} 404
diff --git a/testcases/Controllers/ODL/CI/requirements.pip b/testcases/Controllers/ODL/CI/requirements.pip
deleted file mode 100644
index 43845e251..000000000
--- a/testcases/Controllers/ODL/CI/requirements.pip
+++ /dev/null
@@ -1,4 +0,0 @@
-requests
-robotframework
-robotframework-requests
-robotframework-sshlibrary
diff --git a/testcases/Controllers/ODL/CI/start_tests.sh b/testcases/Controllers/ODL/CI/start_tests.sh
index 367a075f7..5e87726b8 100644..100755
--- a/testcases/Controllers/ODL/CI/start_tests.sh
+++ b/testcases/Controllers/ODL/CI/start_tests.sh
@@ -1,9 +1,11 @@
#!/bin/bash
-# Script requires that test environment is created already
# it includes python2.7 virtual env with robot packages and git
-# use create_env.sh script for creating python virtualenv
BASEDIR=`dirname $0`
+RESULTS_DIR='/home/opnfv/functest/results/odl/'
+REPO_DIR='/home/opnfv/repos/odl_integration'
+#TODO: read this form config_functest.yaml
+
# Colors
green='\033[0;32m'
light_green='\033[1;32m'
@@ -17,11 +19,11 @@ usage:
where:
-h show this help text
- var one of the following: OSTACK_IP, ODL_PORT, USR_NAME, USR_PASSWORD
+ var one of the following: ODL_IP, ODL_PORT, USR_NAME, PASS, NEUTRON_IP
value new value for var
example:
- OSTACK_IP=oscontro1 ODL_PORT=8080 bash $(basename "$0")"
+ ODL_IP=oscontro1 ODL_PORT=8080 bash $(basename "$0")"
while getopts ':h' option; do
case "$option" in
@@ -38,36 +40,30 @@ done
echo -e "${green}Current environment parameters for ODL suite.${nc}"
# Following vars might be also specified as CLI params
set -x
-OSTACK_IP=${OSTACK_IP:-'oscontrol'}
+ODL_IP=${ODL_IP:-'192.168.1.5'}
ODL_PORT=${ODL_PORT:-8081}
-USR_NAME=${USR_NAME:-'admin'}
-USR_PASSWORD=${USR_PASSWORD:-'octopus'}
+USR_NAME=${USR_NAME:-'neutron'}
+PASS=${PASS:-'octopus'}
+NEUTRON_IP=${NEUTRON_IP:-192.168.0.68}
set +x
-echo -e "${green}Cloning ODL integration git repo.${nc}"
-if [ -d integration ]; then
- cd integration
- git checkout -- .
- git pull
- cd -
-else
- git clone https://github.com/opendaylight/integration.git
-fi
# Change openstack password for admin tenant in neutron suite
-sed -i "s/\"password\": \"admin\"/\"password\": \"${USR_PASSWORD}\"/" integration/test/csit/suites/openstack/neutron/__init__.robot
+sed -i "s/\"password\": \".*\"/\"password\": \"${PASS}\"/" ${REPO_DIR}/test/csit/suites/openstack/neutron/__init__.robot
+
+# Add Start Suite and Teardown Suite
+sed -i "/^Documentation.*/a Suite Teardown Stop Suite" ${REPO_DIR}/test/csit/suites/openstack/neutron/__init__.robot
+sed -i "/^Documentation.*/a Suite Setup Start Suite" ${REPO_DIR}/test/csit/suites/openstack/neutron/__init__.robot
+
-if source $BASEDIR/venv/bin/activate; then
- echo -e "${green}Python virtualenv activated.${nc}"
-else
- echo -e "${red}ERROR${nc}"
- exit 1
-fi
+# add custom tests to suite, if there are more custom tests needed this will be reworked
+echo -e "${green}Copy custom tests to suite.${nc}"
+cp -vf ${BASEDIR}/custom_tests/neutron/* ${REPO_DIR}/test/csit/suites/openstack/neutron/
# List of tests are specified in test_list.txt
# those are relative paths to test directories from integartion suite
echo -e "${green}Executing chosen tests.${nc}"
-test_num=1
+test_num=0
while read line
do
# skip comments
@@ -75,16 +71,19 @@ do
# skip empty lines
[[ -z "${line}" ]] && continue
- echo -e "${light_green}Starting test: $line ${nc}"
- pybot -v OPENSTACK:${OSTACK_IP} -v PORT:${ODL_PORT} -v CONTROLLER:${OSTACK_IP} $line
- mkdir -p $BASEDIR/logs/${test_num}
- mv log.html $BASEDIR/logs/${test_num}/
- mv report.html $BASEDIR/logs/${test_num}/
- mv output.xml $BASEDIR/logs/${test_num}/
((test_num++))
-done < test_list.txt
+ echo -e "${light_green}Starting test: $line ${nc}"
+ pybot -v OPENSTACK:${NEUTRON_IP} -v PORT:${ODL_PORT} -v CONTROLLER:${ODL_IP} ${REPO_DIR}/$line
+ mkdir -p $RESULTS_DIR/logs/${test_num}
+ mv log.html $RESULTS_DIR/logs/${test_num}/
+ mv report.html $RESULTS_DIR/logs/${test_num}/
+ mv output.xml $RESULTS_DIR/logs/${test_num}/
+done < ${BASEDIR}/test_list.txt
-echo -e "${green}Deactivate venv.${nc}"
-deactivate
+# create final report which includes all partial test reports
+for i in $(seq $test_num); do
+ rebot_params="$rebot_params $RESULTS_DIR/logs/$i/output.xml"
+done
-# Now we can copy output.xml, log.html and report.xml files generated by robot.
+echo -e "${green}Final report is located:${nc}"
+rebot $rebot_params
diff --git a/testcases/Controllers/ODL/CI/test_list.txt b/testcases/Controllers/ODL/CI/test_list.txt
index e5e52129b..ad791e553 100644
--- a/testcases/Controllers/ODL/CI/test_list.txt
+++ b/testcases/Controllers/ODL/CI/test_list.txt
@@ -1,5 +1,5 @@
# List of tests` which will be executed by script start_test.sh
# You can specify path to specific robot test file or directory (in that case all tests from directory will be executed)
-integration/test/csit/suites/integration/basic/
-integration/test/csit/suites/openstack/neutron/
+test/csit/suites/integration/basic/
+test/csit/suites/openstack/neutron/
diff --git a/testcases/Controllers/ONOS/Teston/CI/Readme.txt b/testcases/Controllers/ONOS/Teston/CI/Readme.txt
new file mode 100644
index 000000000..7393f59a1
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/Readme.txt
@@ -0,0 +1,5 @@
+1.This is a basic test run about onos,we will make them better and better
+2.This test include two suites:
+(1)Test northbound(network/subnet/ports create/update/delete)
+(2)Ovsdb test,default configuration,openflow connection,vm go onlines.
+3.Later we will make a framework to do this test \ No newline at end of file
diff --git a/testcases/Controllers/ONOS/Teston/CI/__init__.py b/testcases/Controllers/ONOS/Teston/CI/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/__init__.py
diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/__init__.py b/testcases/Controllers/ONOS/Teston/CI/adapters/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/adapters/__init__.py
diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/client.py b/testcases/Controllers/ONOS/Teston/CI/adapters/client.py
new file mode 100644
index 000000000..535b71f85
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/adapters/client.py
@@ -0,0 +1,76 @@
+"""
+Description:
+ This file is used to run testcase
+ lanqinglong@huawei.com
+"""
+from environment import environment
+import os
+import time
+import pexpect
+import re
+import requests
+import json
+
+class client( environment ):
+
+ def __init__( self ):
+ environment.__init__( self )
+ self.loginfo = environment()
+ self.testcase = ''
+
+ def RunScript( self, handle, testname, timeout=300 ):
+ """
+ Run ONOS Test Script
+ Parameters:
+ testname: ONOS Testcase Name
+ masterusername: The server username of running ONOS
+ masterpassword: The server password of running ONOS
+ """
+ self.testcase = testname
+ self.ChangeTestCasePara( testname, self.masterusername, self.masterpassword )
+ runhandle = handle
+ runtest = self.home + "/OnosSystemTest/TestON/bin/cli.py run " + testname
+ runhandle.sendline(runtest)
+ circletime = 0
+ lastshowscreeninfo = ''
+ while True:
+ Result = runhandle.expect(["PEXPECT]#", pexpect.EOF, pexpect.TIMEOUT])
+ curshowscreeninfo = runhandle.before
+ if (len(lastshowscreeninfo) != len(curshowscreeninfo)):
+ self.loginfo.log(str(curshowscreeninfo)[len(lastshowscreeninfo)::])
+ lastshowscreeninfo = curshowscreeninfo
+ if Result == 0:
+ print "Done!"
+ return
+ time.sleep(1)
+ circletime += 1
+ if circletime > timeout:
+ break
+ self.loginfo.log( "Timeout when running the test, please check!" )
+
+ def onosstart( self ):
+ #This is the compass run machine user&pass,you need to modify
+
+ print "Test Begin....."
+ self.OnosConnectionSet()
+ masterhandle = self.SSHlogin(self.localhost, self.masterusername,
+ self.masterpassword)
+ self.OnosEnvSetup( masterhandle )
+ return masterhandle
+
+ def onosclean( self, handle ):
+ self.SSHRelease( handle )
+ self.loginfo.log('Release onos handle Successful')
+
+ def push_results_to_db( self, payload, pushornot = 1):
+ if pushornot != 1:
+ return 1
+ url = self.Result_DB + "/results"
+ params = {"project_name": "functest", "case_name": "ONOS-" + self.testcase,
+ "pod_name": 'huawei-build-2', "details": payload}
+ headers = {'Content-Type': 'application/json'}
+ try:
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ self.loginfo.log(r)
+ except:
+ self.loginfo.log('Error pushing results into Database')
diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py b/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py
new file mode 100644
index 000000000..c0eb464af
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py
@@ -0,0 +1,190 @@
+"""
+Description:
+ This file is used to make connections
+ Include ssh & exchange public-key to each other so that
+ it can run without password
+
+ lanqinglong@huawei.com
+"""
+import os
+import time
+import pexpect
+import re
+import sys
+from foundation import foundation
+
+class connection( foundation ):
+
+ def __init__( self ):
+ foundation.__init__( self )
+ self.loginfo = foundation()
+
+ def AddKnownHost( self, handle, ipaddr, username, password ):
+ """
+ Add an user to known host,so that onos can login in with onos $ipaddr.
+ parameters:
+ ipaddr: ip address
+ username: login user name
+ password: login password
+ """
+ print( "Now Adding an user to known hosts " + ipaddr )
+ login = handle
+ login.sendline( "ssh -l %s -p 8101 %s"%( username, ipaddr ) )
+ index = 0
+ while index != 2:
+ index = login.expect( ['assword:', 'yes/no', pexpect.EOF, \
+ pexpect.TIMEOUT] )
+ if index == 0:
+ login.sendline( password )
+ login.sendline( "logout" )
+ index = login.expect( ["closed", pexpect.EOF] )
+ if index == 0:
+ self.loginfo.log( "Add SSH Known Host Success!" )
+ break
+ else:
+ self.loginfo.log( "Add SSH Known Host Failed! Please Check!" )
+ break
+ login.prompt( )
+
+ if index == 1:
+ login.sendline('yes')
+
+ def GetEnvValue( self, handle, envname):
+ """
+ os.getenv only returns current user value
+ GetEnvValue returns a environment value of
+ current handle
+ eg: GetEnvValue(handle,'HOME')
+ """
+ envhandle = handle
+ envhandle.sendline( 'echo $' + envname )
+ envhandle.prompt( )
+ reg = envname + '\r\n(.*)\r'
+ envaluereg = re.compile( reg )
+ envalue = envaluereg.search( envhandle.before )
+ if envalue:
+ return envalue.groups()[0]
+ else:
+ return None
+
+ def Gensshkey( self, handle ):
+ """
+ Generate ssh keys, used for some server have no sshkey.
+ """
+ print "Now Generating SSH keys..."
+ #Here file name may be id_rsa or id_ecdsa or others
+ #So here will have a judgement
+ keysub = handle
+ filepath = self.GetEnvValue( keysub, 'HOME' ) + '/.ssh'
+ filelist = os.listdir( filepath )
+ for item in filelist:
+ if 'id' in item:
+ self.loginfo.log("SSH keys are exsit in ssh directory.")
+ return True
+ keysub.sendline("ssh-keygen -t rsa")
+ Result = 0
+ while Result != 2:
+ Result = keysub.expect( ["Overwrite", "Enter", pexpect.EOF, \
+ 'PEXPECT]#', pexpect.TIMEOUT])
+ if Result == 0:
+ keysub.sendline("y")
+ if Result == 1 or Result == 2:
+ keysub.sendline("\n")
+ if Result == 3:
+ self.loginfo.log( "Generate SSH key success." )
+ keysub.prompt()
+ break
+ if Result == 4:
+ self.loginfo.log("Generate SSH key failed.")
+ keysub.prompt()
+ break
+
+ def GetRootAuth( self, password ):
+ """
+ Get root user
+ parameters:
+ password: root login password
+ """
+ print( "Now changing to user root" )
+ login = pexpect.spawn( "su - root" )
+ index = 0
+ while index != 2:
+ index = login.expect( ['assword:', "failure", \
+ pexpect.EOF, pexpect.TIMEOUT] )
+ if index == 0:
+ login.sendline( password )
+ if index == 1:
+ self.loginfo.log("Change user to root failed.")
+
+ login.interact()
+
+ def ReleaseRootAuth( self ):
+ """
+ Exit root user.
+ """
+ print( "Now Release user root" )
+ login = pexpect.spawn( "exit" )
+ index = login.expect( ['logout', \
+ pexpect.EOF, pexpect.TIMEOUT] )
+ if index == 0:
+ self.loginfo.log("Release root user success.")
+ if index == 1:
+ self.loginfo.log("Release root user failed.")
+
+ login.interact()
+
+ def AddEnvIntoBashrc( self, envalue ):
+ """
+ Add Env var into /etc/profile.
+ parameters:
+ envalue: environment value to add
+ """
+ print "Now Adding bash environment"
+ fileopen = open( "/etc/profile", 'r' )
+ findContext = 1
+ while findContext:
+ findContext = fileopen.readline( )
+ result = findContext.find( envalue )
+ if result != -1:
+ break
+ fileopen.close
+ if result == -1:
+ envAdd = open( "/etc/profile", 'a+' )
+ envAdd.writelines( "\n" + envalue )
+ envAdd.close( )
+ self.loginfo.log( "Add env to bashrc success!" )
+
+ def OnosRootPathChange( self, onospath ):
+ """
+ Change ONOS root path in file:bash_profile
+ onospath: path of onos root
+ """
+ print "Now Changing ONOS Root Path"
+ filepath = onospath + 'onos/tools/dev/bash_profile'
+ line = open(filepath, 'r').readlines()
+ lenall = len(line) - 1
+ for i in range(lenall):
+ if "export ONOS_ROOT" in line[i]:
+ line[i] = 'export ONOS_ROOT=' + onospath + 'onos\n'
+ NewFile = open(filepath, 'w')
+ NewFile.writelines(line)
+ NewFile.close
+ print "Done!"
+
+ def OnosConnectionSet (self):
+ """
+ Intergrate for ONOS connection setup
+ """
+ if self.masterusername == 'root':
+ filepath = '/root/'
+ else :
+ filepath = '/home/' + self.masterusername + '/'
+ filepath = os.path.join( filepath, "onos/tools/dev/bash_profile" )
+ self.AddEnvIntoBashrc("source " + filepath + "\n")
+ self.AddEnvIntoBashrc("export OCT=" + self.OCT)
+ self.AddEnvIntoBashrc("export OC1=" + self.OC1)
+ self.AddEnvIntoBashrc("export OC2=" + self.OC2)
+ self.AddEnvIntoBashrc("export OC3=" + self.OC3)
+ self.AddEnvIntoBashrc("export OCN=" + self.OCN)
+ self.AddEnvIntoBashrc("export OCN2=" + self.OCN2)
+ self.AddEnvIntoBashrc("export localhost=" + self.localhost)
diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py b/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py
new file mode 100644
index 000000000..f0bafd760
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py
@@ -0,0 +1,268 @@
+"""
+Description:
+ This file is used to setup the running environment
+ Include Download code,setup environment variable
+ Set onos running config
+ Set user name/password
+ Onos-push-keys and so on
+ lanqinglong@huawei.com
+"""
+
+import os
+import time
+import pexpect
+import re
+import sys
+import pxssh
+from connection import connection
+
+class environment( connection ):
+
+ def __init__( self ):
+ connection.__init__( self )
+ self.loginfo = connection( )
+ self.masterhandle = ''
+ self.home = ''
+
+ def DownLoadCode( self, handle, codeurl ):
+ """
+ Download Code use 'git clone'
+ parameters:
+ handle: current working handle
+ codeurl: clone code url
+ """
+ print "Now loading test codes! Please wait in patient..."
+ originalfolder = sys.path[0]
+ print originalfolder
+ gitclone = handle
+ gitclone.sendline( "git clone " + codeurl )
+ index = 0
+ increment = 0
+ while index != 1 or index != 4:
+ index = gitclone.expect ( ['already exists', 'esolving deltas: 100%', \
+ 'eceiving objects', 'Already up-to-date', \
+ 'npacking objects: 100%', pexpect.EOF] )
+
+ filefolder = self.home + '/' + codeurl.split('/')[-1].split('.')[0]
+ if index == 0 :
+ os.chdir( filefolder )
+ os.system( 'git pull' )
+ os.chdir( originalfolder )
+ self.loginfo.log( 'Download code success!' )
+ break
+ elif index == 1 or index == 4:
+ self.loginfo.log( 'Download code success!' )
+ gitclone.sendline( "mkdir onos" )
+ gitclone.prompt( )
+ gitclone.sendline( "cp -rf " + filefolder+ "/tools onos/" )
+ gitclone.prompt( )
+ break
+ elif index == 2 :
+ os.write(1, gitclone.before)
+ sys.stdout.flush()
+ else :
+ self.loginfo.log( 'Download code failed!' )
+ self.loginfo.log( 'Information before' + gitclone.before )
+ break
+ gitclone.prompt( )
+
+ def InstallDefaultSoftware( self, handle ):
+ """
+ Install default software
+ parameters:
+ handle(input): current working handle
+ """
+ print "Now Cleaning test environment"
+ handle.sendline("sudo apt-get install -y mininet")
+ handle.prompt( )
+ handle.sendline("sudo pip install configobj")
+ handle.prompt( )
+ handle.sendline("sudo apt-get install -y sshpass")
+ handle.prompt( )
+ handle.sendline("OnosSystemTest/TestON/bin/cleanup.sh")
+ handle.prompt( )
+ time.sleep(5)
+ self.loginfo.log( 'Clean environment success!' )
+
+ def OnosPushKeys(self, handle, cmd, password):
+ """
+ Using onos-push-keys to make ssh device without password
+ parameters:
+ handle(input): working handle
+ cmd(input): onos-push-keys xxx(xxx is device)
+ password(input): login in password
+ """
+ print "Now Pushing Onos Keys:"+cmd
+ Pushkeys = handle
+ Pushkeys.sendline( cmd )
+ Result = 0
+ while Result != 2:
+ Result = Pushkeys.expect( ["(yes/no)", "assword:", "PEXPECT]#", \
+ pexpect.EOF, pexpect.TIMEOUT])
+ if ( Result == 0 ):
+ Pushkeys.sendline( "yes" )
+ if ( Result == 1 ):
+ Pushkeys.sendline( password )
+ if ( Result == 2 ):
+ self.loginfo.log( "ONOS Push keys Success!" )
+ break
+ if ( Result == 3 ):
+ self.loginfo.log( "ONOS Push keys Error!" )
+ break
+ time.sleep(2)
+ Pushkeys.prompt( )
+ print "Done!"
+
+ def SetOnosEnvVar( self, handle, masterpass, agentpass):
+ """
+ Setup onos pushkeys to all devices(3+2)
+ parameters:
+ handle(input): current working handle
+ masterpass: scripts running server's password
+ agentpass: onos cluster&compute node password
+ """
+ print "Now Setting test environment"
+ for host in self.hosts:
+ print "try to connect " + str(host)
+ result = self.CheckSshNoPasswd(host)
+ if not result:
+ print "ssh lgin failed,try to copy master publickey to agent " + str(host)
+ self.CopyPublicKey(host)
+ self.OnosPushKeys( handle, "onos-push-keys " + self.OCT, masterpass)
+ self.OnosPushKeys( handle, "onos-push-keys " + self.OC1, agentpass)
+ self.OnosPushKeys( handle, "onos-push-keys " + self.OC2, agentpass)
+ self.OnosPushKeys( handle, "onos-push-keys " + self.OC3, agentpass)
+ self.OnosPushKeys( handle, "onos-push-keys " + self.OCN, agentpass)
+ self.OnosPushKeys( handle, "onos-push-keys " + self.OCN2, agentpass)
+
+ def CheckSshNoPasswd( self, host):
+ """
+ Check master can connect agent with no password
+ """
+ login = pexpect.spawn( "ssh " + str(host))
+ index = 4
+ while index == 4:
+ index = login.expect(['(yes/no)','>|#|\$', \
+ pexpect.EOF, pexpect.TIMEOUT] )
+ if index == 0:
+ login.sendline( "yes" )
+ index = 4
+ if index == 1:
+ self.loginfo.log("ssh connect to " + str(host) + " success,no need to copy ssh public key" )
+ return True
+ login.interact()
+ return False
+
+ def ChangeOnosName( self, user, password):
+ """
+ Change onos name in envDefault file
+ Because some command depend on this
+ parameters:
+ user: onos&compute node user
+ password: onos&compute node password
+ """
+ print "Now Changing ONOS name&password"
+ filepath = self.home + '/onos/tools/build/envDefaults'
+ line = open(filepath, 'r').readlines()
+ lenall = len(line) - 1
+ for i in range(lenall):
+ if "ONOS_USER=" in line[i]:
+ line[i]=line[i].replace("sdn",user)
+ if "ONOS_GROUP" in line[i]:
+ line[i]=line[i].replace("sdn",user)
+ if "ONOS_PWD" in line[i]:
+ line[i]=line[i].replace("rocks",password)
+ NewFile = open(filepath, 'w')
+ NewFile.writelines(line)
+ NewFile.close
+ print "Done!"
+
+ def ChangeTestCasePara(self, testcase, user, password):
+ """
+ When running test script, there's something need \
+ to change in every test folder's *.param & *.topo files
+ user: onos&compute node user
+ password: onos&compute node password
+ """
+ print "Now Changing " + testcase + " name&password"
+ if self.masterusername == 'root':
+ filepath = '/root/'
+ else :
+ filepath = '/home/' + self.masterusername + '/'
+ filepath = filepath +"OnosSystemTest/TestON/tests/" + testcase + "/" + \
+ testcase + ".topo"
+ line = open(filepath,'r').readlines()
+ lenall = len(line)-1
+ for i in range(lenall-2):
+ if ("localhost" in line[i]) or ("OCT" in line[i]):
+ line[i+1]=re.sub(">\w+",">"+user,line[i+1])
+ line[i+2]=re.sub(">\w+",">"+password,line[i+2])
+ if "OC1" in line [i] \
+ or "OC2" in line [i] \
+ or "OC3" in line [i] \
+ or "OCN" in line [i] \
+ or "OCN2" in line[i]:
+ line[i+1]=re.sub(">\w+",">root",line[i+1])
+ line[i+2]=re.sub(">\w+",">root",line[i+2])
+ NewFile = open(filepath,'w')
+ NewFile.writelines(line)
+ NewFile.close
+
+ def SSHlogin ( self, ipaddr, username, password ) :
+ """
+ SSH login provide a connection to destination.
+ parameters:
+ ipaddr: ip address
+ username: login user name
+ password: login password
+ return: handle
+ """
+ login = pxssh.pxssh( )
+ login.login ( ipaddr, username, password, original_prompt='[$#>]')
+ #send command ls -l
+ login.sendline ('ls -l')
+ #match prompt
+ login.prompt()
+ print ("SSH login " + ipaddr + " success!")
+ return login
+
+ def SSHRelease( self, handle ):
+ #Release ssh
+ handle.logout()
+
+ def CopyOnostoTestbin( self ):
+ sourcefile = self.cipath + '/dependencies/onos'
+ destifile = self.home + '/onos/tools/test/bin/'
+ os.system( 'pwd' )
+ runcommand = 'cp ' + sourcefile + ' ' + destifile
+ os.system( runcommand )
+
+ def CopyPublicKey( self, host ):
+ output = os.popen( 'cat /root/.ssh/id_rsa.pub' )
+ publickey = output.read().strip('\n')
+ tmphandle = self.SSHlogin( self.installer_master, self.installer_master_username, self.installer_master_password )
+ tmphandle.sendline("ssh "+ host + " -T \'echo " + str(publickey) + ">>/root/.ssh/authorized_keys\'" )
+ tmphandle.prompt()
+ self.SSHRelease(tmphandle)
+ print "Add OCT PublicKey to " + host + " success"
+
+ def OnosEnvSetup( self, handle ):
+ """
+ Onos Environment Setup function
+ """
+ self.Gensshkey( handle )
+ self.home = self.GetEnvValue( handle, 'HOME' )
+ self.AddKnownHost( handle, self.OC1, "karaf", "karaf" )
+ self.AddKnownHost( handle, self.OC2, "karaf", "karaf" )
+ self.AddKnownHost( handle, self.OC3, "karaf", "karaf" )
+ self.DownLoadCode( handle, 'https://github.com/sunyulin/OnosSystemTest.git' )
+ #self.DownLoadCode( handle, 'https://gerrit.onosproject.org/onos' )
+ if self.masterusername == 'root':
+ filepath = '/root/'
+ else :
+ filepath = '/home/' + self.masterusername + '/'
+ self.OnosRootPathChange( filepath )
+ self.CopyOnostoTestbin()
+ self.ChangeOnosName(self.agentusername,self.agentpassword)
+ self.InstallDefaultSoftware( handle )
+ self.SetOnosEnvVar(handle, self.masterpassword,self.agentpassword)
diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py b/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py
new file mode 100644
index 000000000..83cbcb242
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py
@@ -0,0 +1,90 @@
+"""
+Description:
+ This file include basis functions
+ lanqinglong@huawei.com
+"""
+
+import logging
+import os
+import time
+import yaml
+import re
+import datetime
+
+class foundation:
+
+ def __init__(self):
+
+ #currentpath = os.getcwd()
+ REPO_PATH = os.environ['repos_dir']+'/functest/'
+ currentpath = REPO_PATH + 'testcases/Controllers/ONOS/Teston/CI'
+ self.cipath = currentpath
+ self.logdir = os.path.join( currentpath, 'log' )
+ self.workhome = currentpath[0:currentpath.rfind('testcases')-1]
+ self.Result_DB = ''
+ filename = time.strftime( '%Y-%m-%d-%H-%M-%S' ) + '.log'
+ self.logfilepath = os.path.join( self.logdir, filename )
+ self.starttime = datetime.datetime.now()
+
+ def log (self, loginfo):
+ """
+ Record log in log directory for deploying test environment
+ parameters:
+ loginfo(input): record info
+ """
+ logging.basicConfig( level=logging.INFO,
+ format = '%(asctime)s %(filename)s:%(message)s',
+ datefmt = '%d %b %Y %H:%M:%S',
+ filename = self.logfilepath,
+ filemode = 'w')
+ filelog = logging.FileHandler( self.logfilepath )
+ logging.getLogger( 'Functest' ).addHandler( filelog )
+ print loginfo
+ logging.info(loginfo)
+
+ def getdefaultpara( self ):
+ """
+ Get Default Parameters value
+ """
+ with open(self.workhome + "/testcases/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+ f.close()
+
+ self.Result_DB = str(functest_yaml.get("results").get("test_db_url"))
+ self.masterusername = str(functest_yaml.get("ONOS").get("general").\
+ get('onosbench_username'))
+ self.masterpassword = str(functest_yaml.get("ONOS").get("general").\
+ get("onosbench_password"))
+ self.agentusername = str(functest_yaml.get("ONOS").get("general").\
+ get("onoscli_username"))
+ self.agentpassword = str(functest_yaml.get("ONOS").get("general").\
+ get("onoscli_password"))
+ self.runtimeout = functest_yaml.get("ONOS").get("general").get("runtimeout")
+ self.OCT = str(functest_yaml.get("ONOS").get("environment").get("OCT"))
+ self.OC1 = str(functest_yaml.get("ONOS").get("environment").get("OC1"))
+ self.OC2 = str(functest_yaml.get("ONOS").get("environment").get("OC2"))
+ self.OC3 = str(functest_yaml.get("ONOS").get("environment").get("OC3"))
+ self.OCN = str(functest_yaml.get("ONOS").get("environment").get("OCN"))
+ self.OCN2 = str(functest_yaml.get("ONOS").get("environment").get("OCN2"))
+ self.installer_master = str(functest_yaml.get("ONOS").get("environment").get("installer_master"))
+ self.installer_master_username = str(functest_yaml.get("ONOS").get("environment").get("installer_master_username"))
+ self.installer_master_password = str(functest_yaml.get("ONOS").get("environment").get("installer_master_password"))
+ self.hosts = [self.OC1, self.OCN, self.OCN2]
+ self.localhost = self.OCT
+
+ def GetResult( self ):
+ cmd = "cat " + self.logfilepath + " | grep Fail"
+ Resultbuffer = os.popen(cmd).read()
+ duration = datetime.datetime.now() - self.starttime
+ time.sleep(2)
+
+ if re.search("[1-9]+", Resultbuffer):
+ self.log("Testcase Fails\n" + Resultbuffer)
+ Result = "POK"
+ else:
+ self.log("Testcases Pass")
+ Result = "OK"
+ payload={'timestart': str(self.starttime),
+ 'duration': str(duration),
+ 'status': Result}
+ return payload
diff --git a/testcases/Controllers/ONOS/Teston/CI/dependencies/onos b/testcases/Controllers/ONOS/Teston/CI/dependencies/onos
new file mode 100644
index 000000000..d4d59e0f7
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/dependencies/onos
@@ -0,0 +1,23 @@
+#!/bin/bash
+# -----------------------------------------------------------------------------
+# ONOS remote command-line client.
+# -----------------------------------------------------------------------------
+
+[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
+. /root/.bashrc
+. $ONOS_ROOT/tools/build/envDefaults
+. $ONOS_ROOT/tools/test/bin/find-node.sh
+
+[ "$1" = "-w" ] && shift && onos-wait-for-start $1
+
+[ -n "$1" ] && OCI=$(find_node $1) && shift
+
+if which client 1>/dev/null 2>&1 && [ -z "$ONOS_USE_SSH" ]; then
+ # Use Karaf client only if we can and are allowed to
+ unset KARAF_HOME
+ client -h $OCI -u karaf "$@" 2>/dev/null
+else
+ # Otherwise use raw ssh; strict checking is off for dev environments only
+ #ssh -p 8101 -o StrictHostKeyChecking=no $OCI "$@"
+ sshpass -p karaf ssh -l karaf -p 8101 $OCI "$@"
+fi
diff --git a/testcases/Controllers/ONOS/Teston/CI/log/gitignore b/testcases/Controllers/ONOS/Teston/CI/log/gitignore
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/log/gitignore
diff --git a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
new file mode 100644
index 000000000..675b3fc68
--- /dev/null
+++ b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py
@@ -0,0 +1,24 @@
+"""
+Description: This test is to run onos Teston VTN scripts
+
+List of test cases:
+CASE1 - Northbound NBI test network/subnet/ports
+CASE2 - Ovsdb test&Default configuration&Vm go online
+
+lanqinglong@huawei.com
+"""
+from adapters.client import client
+
+if __name__=="__main__":
+
+ main = client()
+ main.getdefaultpara()
+
+ #scripts to run
+ runhandle = main.onosstart()
+ main.RunScript(runhandle, "FUNCvirNetNB")
+# main.RunScript(runhandle, "FUNCovsdbtest")
+ main.RunScript(runhandle, "FUNCvirNetNBL3")
+# main.RunScript(runhandle, "FUNCovsdbtestL3")
+ main.onosclean( runhandle )
+ main.push_results_to_db(main.GetResult())
diff --git a/testcases/VIM/OpenStack/CI/custom_tests/test_list.txt b/testcases/VIM/OpenStack/CI/custom_tests/test_list.txt
new file mode 100644
index 000000000..ca6bf6be2
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/custom_tests/test_list.txt
@@ -0,0 +1,227 @@
+# This list contains tempest test cases chosen for Functest deployment.
+# It consists of two main parts:
+# 1. Set of tempest smoke test cases
+# 2. Set of test cases from DefCore list (https://wiki.openstack.org/wiki/Governance/DefCoreCommittee)
+#
+# Part 1 (smoke)
+#
+tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]
+tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors[id-e36c0eaa-dff5-4082-ad1f-3f9a80aa3f59,smoke]
+tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create[id-850795d7-d4d3-4e55-b527-a774c0123d3a,network,smoke]
+tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_list[id-a6154130-5a55-4850-8be4-5e9e796dbf17,network,smoke]
+tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_groups_create_list_delete[id-eb2b087d-633d-4d0d-a7bd-9e6ba35b32de,network,smoke]
+tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_add_remove_fixed_ip[id-c7e0e60b-ee45-43d0-abeb-8596fd42a2f9,network,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke]
+tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke]
+tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses[id-6eb718c0-02d9-4d5e-acd1-4e0c269cef39,network,smoke]
+tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses_by_network[id-87bbc374-5538-4f64-b673-2b0e4443cc30,network,smoke]
+tempest.api.identity.admin.v2.test_services.ServicesTestJSON.test_list_services[id-34ea6489-012d-4a86-9038-1287cadd5eca,smoke]
+tempest.api.identity.admin.v2.test_users.UsersTestJSON.test_create_user[id-2d55a71e-da1d-4b43-9c03-d269fd93d905,smoke]
+tempest.api.identity.admin.v3.test_credentials.CredentialsTestJSON.test_credentials_create_get_update_delete[id-7cd59bf9-bda4-4c72-9467-d21cab278355,smoke]
+tempest.api.identity.admin.v3.test_domains.DefaultDomainTestJSON.test_default_domain_exists[id-17a5de24-e6a0-4e4a-a9ee-d85b6e5612b5,smoke]
+tempest.api.identity.admin.v3.test_domains.DomainsTestJSON.test_create_update_delete_domain[id-f2f5b44a-82e8-4dad-8084-0661ea3b18cf,smoke]
+tempest.api.identity.admin.v3.test_endpoints.EndPointsTestJSON.test_update_endpoint[id-37e8f15e-ee7c-4657-a1e7-f6b61e375eff,smoke]
+tempest.api.identity.admin.v3.test_groups.GroupsV3TestJSON.test_group_users_add_list_delete[id-1598521a-2f36-4606-8df9-30772bd51339,smoke]
+tempest.api.identity.admin.v3.test_policies.PoliciesTestJSON.test_create_update_delete_policy[id-e544703a-2f03-4cf2-9b0f-350782fdb0d3,smoke]
+tempest.api.identity.admin.v3.test_regions.RegionsTestJSON.test_create_region_with_specific_id[id-2c12c5b5-efcf-4aa5-90c5-bff1ab0cdbe2,smoke]
+tempest.api.identity.admin.v3.test_roles.RolesV3TestJSON.test_role_create_update_get_list[id-18afc6c0-46cf-4911-824e-9989cc056c3a,smoke]
+tempest.api.identity.admin.v3.test_services.ServicesTestJSON.test_create_update_get_service[id-5193aad5-bcb7-411d-85b0-b3b61b96ef06,smoke]
+tempest.api.identity.admin.v3.test_trusts.TrustsV3TestJSON.test_get_trusts_all[id-4773ebd5-ecbf-4255-b8d8-b63e6f72b65d,smoke]
+tempest.api.identity.v2.test_api_discovery.TestApiDiscovery.test_api_media_types[id-007a0be0-78fe-4fdb-bbee-e9216cc17bb2,smoke]
+tempest.api.identity.v2.test_api_discovery.TestApiDiscovery.test_api_version_resources[id-ea889a68-a15f-4166-bfb1-c12456eae853,smoke]
+tempest.api.identity.v2.test_api_discovery.TestApiDiscovery.test_api_version_statuses[id-77fd6be0-8801-48e6-b9bf-38cdd2f253ec,smoke]
+tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265,smoke]
+tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd,smoke]
+tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e,smoke]
+tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image[id-f848bb94-1c6e-45a4-8726-39e3a5b23535,smoke]
+tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_register_upload_get_image_file[id-139b765e-7f3d-4b3d-8b37-3ca3876ee318,smoke]
+tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image[id-f66891a7-a35c-41a8-b590-a065c2a1caa6,smoke]
+tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions[id-ef28c7e6-e646-4979-9d67-deb207bc5564,smoke]
+tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address[id-36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5,smoke]
+tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip[id-62595970-ab1c-4b7f-8fcc-fddfe55e8718,smoke]
+tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_network[id-d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2,smoke]
+tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_port[id-48037ff2-e889-4c3b-b86a-8e3f34d2d060,smoke]
+tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_subnet[id-8936533b-c0aa-4f29-8e53-6cc873aec489,smoke]
+tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_network[id-d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2,smoke]
+tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_port[id-48037ff2-e889-4c3b-b86a-8e3f34d2d060,smoke]
+tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_subnet[id-8936533b-c0aa-4f29-8e53-6cc873aec489,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestJSON.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
+tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
+tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
+tempest.api.network.test_networks.NetworksTestJSON.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
+tempest.api.network.test_networks.NetworksTestJSON.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
+tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
+tempest.api.network.test_networks.NetworksTestJSON.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
+tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1,smoke]
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups[id-4179dcb9-1382-4ced-84fe-1b91c54f5735,smoke]
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c,smoke]
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e,smoke]
+tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f,smoke]
+tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1,smoke]
+tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups[id-4179dcb9-1382-4ced-84fe-1b91c54f5735,smoke]
+tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c,smoke]
+tempest.api.network.test_ports.PortsTestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e,smoke]
+tempest.api.network.test_ports.PortsTestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f,smoke]
+tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces[id-802c73c9-c937-4cef-824b-2191e24a6aab,smoke]
+tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id[id-2b7d2f37-6748-4d78-92e5-1d590234f0d5,smoke]
+tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id[id-b42e6e39-2e37-49cc-a6f4-8467e940900a,smoke]
+tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router[id-f64403e2-8483-4b34-8ccd-b09a87bcc68c,smoke]
+tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces[id-802c73c9-c937-4cef-824b-2191e24a6aab,smoke]
+tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id[id-2b7d2f37-6748-4d78-92e5-1d590234f0d5,smoke]
+tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id[id-b42e6e39-2e37-49cc-a6f4-8467e940900a,smoke]
+tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router[id-f64403e2-8483-4b34-8ccd-b09a87bcc68c,smoke]
+tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802,smoke]
+tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9,smoke]
+tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686,smoke]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802,smoke]
+tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9,smoke]
+tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686,smoke]
+tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_list[id-7123d082-3577-4a30-8f00-f805327c4ffd,smoke]
+tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_show[id-0e85a483-828b-4a28-a0e3-f0a21809192b,smoke]
+tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_template[id-8401821d-65fe-4d43-9fa3-57d5ce3a35c7,smoke]
+tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_list[id-1275c835-c967-4a2c-8d5d-ad533447ed91,smoke]
+tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_metadata[id-fe7cd9f9-54b1-429c-a3b7-7df8451db913,smoke]
+tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_software_config[id-136162ed-9445-4b9c-b7fc-306af8b5da99,smoke]
+tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_create_validate[id-f29d21f3-ed75-47cf-8cdc-ef1bdeb4c674,smoke]
+tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_no_metadata_change[id-2ac43ab3-34f2-415d-be2e-eabb4d14ee32,smoke]
+tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_with_metadata_change[id-92c48944-d79d-4595-a840-8e1a581c1a72,smoke]
+tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_crud_no_resources[id-10498bd5-a83e-4b62-a817-ce24afe938fe,smoke]
+tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_list_responds[id-d35d628c-07f6-4674-85a1-74db9919e986,smoke]
+tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v1_notifications[id-04b10bfe-a5dc-47af-b22f-0460426bf498,image,smoke]
+tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v2_notifications[id-c240457d-d943-439b-8aea-85e26d64fe8e,image,smoke]
+tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_attach_detach_volume_to_instance[compute,id-fff42874-7db5-4487-a8e1-ddda5fb5288d,smoke,stress]
+tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance[compute,id-fff42874-7db5-4487-a8e1-ddda5fb5288d,smoke,stress]
+tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51,smoke]
+tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638,image,smoke]
+tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51,smoke]
+tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638,image,smoke]
+tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4,smoke]
+tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4,smoke]
+tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke]
+tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basicops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke]
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern[compute,id-557cd2c2-4eb8-4dce-98be-f86765ff311b,image,smoke,volume]
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern[compute,id-557cd2c2-4eb8-4dce-98be-f86765ff311b,image,smoke,volume]
+#
+# Part 2 (DefCore)
+#
+tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3]
+tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]
+tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_changes_since[id-18bac3ae-da27-436c-92a9-b22474d13aab]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_name[id-33163b73-79f5-4d07-a7ea-9213bcc468ff]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_id[id-9f238683-c763-45aa-b848-232ec3ce3105]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_ref[id-05a377b8-28cf-4734-a1e6-2ab5c38bf606]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_status[id-a3f5b513-aeb3-42a9-b18e-f091ef73254d]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_type[id-e3356918-4d3e-4756-81d5-abc4524ba29f]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_limit_results[id-3a484ca9-67ba-451e-b494-7fcf28d32d62]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_changes_since[id-7d439e18-ac2e-4827-b049-7e18004712c4]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_name[id-644ea267-9bd9-4f3b-af9f-dffa02396a17]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_server_ref[id-8c78f822-203b-4bf6-8bba-56ebd551cf84]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_status[id-9b0ea018-6185-4f71-948a-a123a107988e]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_type[id-888c0cc0-7223-43c5-9db0-b125fd0a393b]
+tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_limit_results[id-ba2fa9a9-b672-47cc-b354-3b4c0600e2cb]
+tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image[id-490d0898-e12a-463f-aef0-c50156b9f789]
+tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images[id-fd51b7f4-d4a3-4331-9885-866658112a6f]
+tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail[id-9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6]
+tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
+tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
+tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
+tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
+tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
+tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
+tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c]
+tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip[id-43a1242e-7b31-48d1-88f2-3f72aa9f2077]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex[id-a905e287-c35e-42f2-b132-d02b09f3654a]
+tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4,negative]
+tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f,negative]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_change_server_password[id-6158df09-4b82-4ab3-af6d-29cf36af858d]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_get_console_output[id-4b8867e6-fffa-4d54-b1d1-6fdda57be2f3]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft[id-4640e3ef-a5df-482e-95a1-ceeeb0faa84d]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm[id-1499262a-9328-4eda-9068-db1ac57498d2]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert[id-c03aab19-adb1-44f5-917d-c419577e9e68]
+tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560]
+tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658]
+tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a]
+tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce]
+tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251]
+tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c]
+tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134]
+tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0]
+tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833]
+tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699]
+tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077]
+tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_a_server_of_another_tenant[id-5c75009d-3eea-423e-bea3-61b09fd25f9c,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_reboot_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_non_existent_flavor[id-ced1a1d7-2ab6-45c9-b90f-b27d87b30efd,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_null_flavor[id-45436a7d-a388-4a35-a9d8-3adc5d0d940b,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_of_another_tenant[id-543d84c1-dd2e-4c6d-8cb2-b9da0efaa384,negative]
+tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899,negative]
+tempest.api.compute.test_authorization.AuthorizationTestJSON.test_create_keypair_in_analt_user_tenant[id-f03d1ded-7fd4-4d29-bc13-e2391f29c625]
+tempest.api.compute.test_authorization.AuthorizationTestJSON.test_create_server_fails_when_tenant_incorrect[id-acf8724b-142b-4044-82c3-78d31a533f24]
+tempest.api.compute.test_authorization.AuthorizationTestJSON.test_create_server_with_unauthorized_image[id-95d445f6-babc-4f2e-aea3-aa24ec5e7f0d]
+tempest.api.compute.test_authorization.AuthorizationTestJSON.test_get_keypair_of_alt_account_fails[id-85bcdd8f-56b4-4868-ae56-63fbf6f7e405]
+tempest.api.compute.test_authorization.AuthorizationTestJSON.test_get_metadata_of_alt_account_server_fails[id-dea1936a-473d-49f2-92ad-97bb7aded22e]
+tempest.api.compute.test_authorization.AuthorizationTestJSON.test_set_metadata_of_alt_account_server_fails[id-c5f52351-53d9-4fc9-83e5-917f7f5e3d71]
+tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a]
+tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107]
+tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
+tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list[id-bc2dd1a0-15af-48e5-9990-f2e75a48325d]
+tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_details[id-bad0567a-5a4f-420b-851e-780b55bb867c]
+tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-f01904f2-e975-4915-98ce-cb5fa27bde4f,negative]
+tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-62bab09a-4c03-4617-8cca-8572bc94af9b,negative]
+tempest.api.image.v1.test_images.ListImagesTest.test_index_no_params[id-246178ab-3b33-4212-9a4b-a7fe8261794d]
diff --git a/testcases/VIM/OpenStack/CI/libraries/check_os.sh b/testcases/VIM/OpenStack/CI/libraries/check_os.sh
new file mode 100755
index 000000000..560205261
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/libraries/check_os.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+# Simple script to check the basic OpenStack clients
+#
+# Author:
+# jose.lausuch@ericsson.com
+#
+
+verify_connectivity() {
+ for i in $(seq 0 10); do
+ if echo "test" | nc -v $1 $2 &>/dev/null; then
+ return 0
+ fi
+ sleep 1
+ done
+ return 1
+}
+
+
+if [ -z $OS_AUTH_URL ];then
+ echo "ERROR: OS_AUTH_URL environment variable missing... Have you sourced the OpenStack credentials?"
+ exit 1
+fi
+
+
+echo "Checking OpenStack endpoints:"
+publicURL=$OS_AUTH_URL
+publicIP=$(echo $publicURL|sed 's/^.*http\:\/\///'|sed 's/.[^:]*$//')
+publicPort=$(echo $publicURL|sed 's/^.*://'|sed 's/.[^\/]*$//')
+echo ">>Verifying connectivity to the public endpoint $publicIP:$publicPort..."
+verify_connectivity $publicIP $publicPort
+RETVAL=$?
+if [ $RETVAL -ne 0 ]; then
+ echo "ERROR: Cannot talk to the public endpoint publicIP:$publicPort ."
+ echo "OS_AUTH_URL=$OS_AUTH_URL"
+ exit 1
+fi
+echo " ...OK"
+
+adminURL=$(keystone catalog --service identity 2>/dev/null|grep adminURL|awk '{print $4}')
+adminIP=$(echo $adminURL|sed 's/^.*http\:\/\///'|sed 's/.[^:]*$//')
+adminPort=$(echo $adminURL|sed 's/^.*://'|sed 's/.[^\/]*$//')
+echo ">>Verifying connectivity to the admin endpoint $adminIP:$adminPort..."
+verify_connectivity $adminIP $adminPort
+RETVAL=$?
+if [ $RETVAL -ne 0 ]; then
+ echo "ERROR: Cannot talk to the admin endpoint adminIP:$adminPort ."
+ echo "adminURL"
+ exit 1
+fi
+echo " ...OK"
+
+
+echo "Checking OpenStack basic services:"
+commands=('keystone endpoint-list' 'nova list' 'neutron net-list' \
+ 'glance image-list' 'cinder list')
+for cmd in "${commands[@]}"
+do
+ service=$(echo $cmd | awk '{print $1}')
+ echo ">>Checking $service service..."
+ $cmd &>/dev/null
+ result=$?
+ if [ $result -ne 0 ];
+ then
+
+ echo "ERROR: Failed execution $cmd. The $service does not seem to be working."
+ exit 1
+ else
+ echo " ...OK"
+ fi
+done
+
+echo "OpenStack services are OK."
+
+echo "Checking External network..."
+networks=($(neutron net-list | tail -n +4 | head -n -1 | awk '{print $2}'))
+is_external=False
+for net in "${networks[@]}"
+do
+ is_external=$(neutron net-show $net|grep "router:external"|awk '{print $4}')
+ if [ $is_external == "True" ]; then
+ echo "External network found: $net"
+ break
+ fi
+done
+if [ $is_external == "False" ]; then
+ echo "ERROR: There are no external networks in the deployment."
+ exit 1
+fi
+
+exit 0
diff --git a/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py b/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py
new file mode 100644
index 000000000..96bd172b9
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py
@@ -0,0 +1,399 @@
+#!/usr/bin/env python
+#
+# Description:
+# Cleans possible leftovers after running functest tests:
+# - Nova instances
+# - Glance images
+# - Cinder volumes
+# - Floating IPs
+# - Neutron networks, subnets and ports
+# - Routers
+# - Users and tenants
+#
+# Author:
+# jose.lausuch@ericsson.com
+#
+
+import argparse
+import logging
+import os
+import re
+import sys
+import time
+import yaml
+
+from novaclient import client as novaclient
+from neutronclient.v2_0 import client as neutronclient
+from keystoneclient.v2_0 import client as keystoneclient
+from cinderclient import client as cinderclient
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+args = parser.parse_args()
+
+
+""" logging configuration """
+logger = logging.getLogger('clean_openstack')
+logger.setLevel(logging.DEBUG)
+
+ch = logging.StreamHandler()
+if args.debug:
+ ch.setLevel(logging.DEBUG)
+else:
+ ch.setLevel(logging.INFO)
+
+formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ch.setFormatter(formatter)
+logger.addHandler(ch)
+
+REPO_PATH=os.environ['repos_dir']+'/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
+import functest_utils
+
+with open(REPO_PATH+"testcases/VIM/OpenStack/CI/libraries/os_defaults.yaml") as f:
+ defaults_yaml = yaml.safe_load(f)
+f.close()
+
+installer = os.environ["INSTALLER_TYPE"]
+
+default_images = defaults_yaml.get(installer).get("images")
+default_networks = defaults_yaml.get(installer).get("networks") +\
+ defaults_yaml.get("common").get("networks")
+default_routers = defaults_yaml.get(installer).get("routers") +\
+ defaults_yaml.get("common").get("routers")
+default_security_groups = defaults_yaml.get(installer).get("security_groups")
+default_users = defaults_yaml.get(installer).get("users")
+default_tenants = defaults_yaml.get(installer).get("tenants")
+
+def separator():
+ logger.info("-------------------------------------------")
+
+def remove_instances(nova_client):
+ logger.info("Removing Nova instances...")
+ instances = functest_utils.get_instances(nova_client)
+ if instances is None or len(instances) == 0:
+ logger.debug("No instances found.")
+ return
+
+ for instance in instances:
+ instance_name = getattr(instance, 'name')
+ instance_id = getattr(instance, 'id')
+ logger.debug("Removing instance '%s', ID=%s ..." % (instance_name,instance_id))
+ if functest_utils.delete_instance(nova_client, instance_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "instance %s..." % instance_id)
+
+ timeout = 50
+ while timeout > 0:
+ instances = functest_utils.get_instances(nova_client)
+ if instances is None or len(instances) == 0:
+ break
+ else:
+ logger.debug("Waiting for instances to be terminated...")
+ timeout -= 1
+ time.sleep(1)
+
+
+def remove_images(nova_client):
+ logger.info("Removing Glance images...")
+ images = functest_utils.get_images(nova_client)
+ if images is None or len(images) == 0:
+ logger.debug("No images found.")
+ return
+
+ for image in images:
+ image_name = getattr(image, 'name')
+ image_id = getattr(image, 'id')
+ logger.debug("'%s', ID=%s " %(image_name,image_id))
+ if image_name not in default_images:
+ logger.debug("Removing image '%s', ID=%s ..." % (image_name,image_id))
+ if functest_utils.delete_glance_image(nova_client, image_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the"
+ "image %s..." % image_id)
+ else:
+ logger.debug(" > this is a default image and will NOT be deleted.")
+
+
+def remove_volumes(cinder_client):
+ logger.info("Removing Cinder volumes...")
+ volumes = functest_utils.get_volumes(cinder_client)
+ if volumes is None or len(volumes) == 0:
+ logger.debug("No volumes found.")
+ return
+
+ for volume in volumes:
+ volume_id = getattr(volume, 'id')
+ logger.debug("Removing cinder volume %s ..." % volume_id)
+ if functest_utils.delete_volume(cinder_client, volume_id):
+ logger.debug(" > Done!")
+ else:
+ logger.debug("Trying forced removal...")
+ if functest_utils.delete_volume(cinder_client,
+ volume_id,
+ forced=True):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "volume %s..." % volume_id)
+
+
+def remove_floatingips(nova_client):
+ logger.info("Removing floating IPs...")
+ floatingips = functest_utils.get_floating_ips(nova_client)
+ if floatingips is None or len(floatingips) == 0:
+ logger.debug("No floating IPs found.")
+ return
+
+ for fip in floatingips:
+ fip_id = getattr(fip, 'id')
+ logger.debug("Removing floating IP %s ..." % fip_id)
+ if functest_utils.delete_floating_ip(nova_client, fip_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "floating IP %s..." % fip_id)
+
+ timeout = 50
+ while timeout > 0:
+ floatingips = functest_utils.get_floating_ips(nova_client)
+ if floatingips is None or len(floatingips) == 0:
+ break
+ else:
+ logger.debug("Waiting for floating ips to be released...")
+ timeout -= 1
+ time.sleep(1)
+
+
+def remove_networks(neutron_client):
+ logger.info("Removing Neutron objects")
+ network_ids = []
+ networks = functest_utils.get_network_list(neutron_client)
+ if networks == None:
+ logger.debug("There are no networks in the deployment. ")
+ else:
+ logger.debug("Existing networks:")
+ for network in networks:
+ net_id = network['id']
+ net_name = network['name']
+ logger.debug(" '%s', ID=%s " %(net_name,net_id))
+ if net_name in default_networks:
+ logger.debug(" > this is a default network and will NOT be deleted.")
+ elif network['router:external'] == True:
+ logger.debug(" > this is an external network and will NOT be deleted.")
+ else:
+ logger.debug(" > this network will be deleted.")
+ network_ids.append(net_id)
+
+ #delete ports
+ ports = functest_utils.get_port_list(neutron_client)
+ if ports is None:
+ logger.debug("There are no ports in the deployment. ")
+ else:
+ remove_ports(neutron_client, ports, network_ids)
+
+ #remove routers
+ routers = functest_utils.get_router_list(neutron_client)
+ if routers is None:
+ logger.debug("There are no routers in the deployment. ")
+ else:
+ remove_routers(neutron_client, routers)
+
+ #remove networks
+ if network_ids != None:
+ for net_id in network_ids:
+ logger.debug("Removing network %s ..." % net_id)
+ if functest_utils.delete_neutron_net(neutron_client, net_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "network %s..." % net_id)
+
+
+def remove_ports(neutron_client, ports, network_ids):
+ for port in ports:
+ if port['network_id'] in network_ids:
+ port_id = port['id']
+ try:
+ subnet_id = port['fixed_ips'][0]['subnet_id']
+ except:
+ logger.info(" > WARNING: Port %s does not contain 'fixed_ips'" % port_id)
+ print port
+ router_id = port['device_id']
+ if len(port['fixed_ips']) == 0 and router_id == '':
+ logger.debug("Removing port %s ..." % port_id)
+ if functest_utils.delete_neutron_port(neutron_client, port_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "port %s ..." %port_id)
+ force_remove_port(neutron_client, port_id)
+
+ elif port['device_owner'] == 'network:router_interface':
+ logger.debug("Detaching port %s (subnet %s) from router %s ..."
+ % (port_id,subnet_id,router_id))
+ if functest_utils.remove_interface_router(neutron_client,
+ router_id, subnet_id):
+ time.sleep(5) # leave 5 seconds to detach before doing anything else
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "interface %s from router %s..." %(subnet_id,router_id))
+ force_remove_port(neutron_client, port_id)
+ else:
+ force_remove_port(neutron_client, port_id)
+
+
+def force_remove_port(neutron_client, port_id):
+ logger.debug("Clearing device_owner for port %s ..." % port_id)
+ functest_utils.update_neutron_port(neutron_client,
+ port_id,
+ device_owner='clear')
+ logger.debug("Removing port %s ..." % port_id)
+ if functest_utils.delete_neutron_port(neutron_client, port_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: Deleting port %s failed" % port_id)
+
+
+def remove_routers(neutron_client, routers):
+ for router in routers:
+ router_id = router['id']
+ router_name = router['name']
+ if router_name not in default_routers:
+ logger.debug("Checking '%s' with ID=(%s) ..." % (router_name,router_id))
+ if router['external_gateway_info'] != None:
+ logger.debug("Router has gateway to external network. Removing link...")
+ if functest_utils.remove_gateway_router(neutron_client, router_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing "
+ "the gateway...")
+ else:
+ logger.debug("Router is not connected to anything. Ready to remove...")
+ logger.debug("Removing router %s(%s) ..." % (router_name, router_id))
+ if functest_utils.delete_neutron_router(neutron_client, router_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "router '%s'(%s)..." % (router_name, router_id))
+
+
+def remove_security_groups(neutron_client):
+ logger.info("Removing Security groups...")
+ secgroups = functest_utils.get_security_groups(neutron_client)
+ if secgroups is None or len(secgroups) == 0:
+ logger.debug("No security groups found.")
+ return
+
+ for secgroup in secgroups:
+ secgroup_name = secgroup['name']
+ secgroup_id = secgroup['id']
+ logger.debug("'%s', ID=%s " %(secgroup_name,secgroup_id))
+ if secgroup_name not in default_security_groups:
+ logger.debug(" Removing '%s'..." % secgroup_name)
+ if functest_utils.delete_security_group(neutron_client, secgroup_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "security group %s..." % secgroup_id)
+ else:
+ logger.debug(" > this is a default security group and will NOT "
+ "be deleted.")
+
+
+def remove_users(keystone_client):
+ logger.info("Removing Users...")
+ users = functest_utils.get_users(keystone_client)
+ if users == None:
+ logger.debug("There are no users in the deployment. ")
+ return
+
+ for user in users:
+ user_name = getattr(user, 'name')
+ user_id = getattr(user, 'id')
+ logger.debug("'%s', ID=%s " %(user_name,user_id))
+ if user_name not in default_users:
+ logger.debug(" Removing '%s'..." % user_name)
+ if functest_utils.delete_user(keystone_client,user_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "user '%s'(%s)..." % (user_name,user_id))
+ else:
+ logger.debug(" > this is a default user and will NOT be deleted.")
+
+
+def remove_tenants(keystone_client):
+ logger.info("Removing Tenants...")
+ tenants = functest_utils.get_tenants(keystone_client)
+ if tenants == None:
+ logger.debug("There are no tenants in the deployment. ")
+ return
+
+ for tenant in tenants:
+ tenant_name=getattr(tenant, 'name')
+ tenant_id = getattr(tenant, 'id')
+ logger.debug("'%s', ID=%s " %(tenant_name,tenant_id))
+ if tenant_name not in default_tenants:
+ logger.debug(" Removing '%s'..." % tenant_name)
+ if functest_utils.delete_tenant(keystone_client,tenant_id):
+ logger.debug(" > Done!")
+ else:
+ logger.info(" > ERROR: There has been a problem removing the "
+ "tenant '%s'(%s)..." % (tenant_name,tenant_id))
+ else:
+ logger.debug(" > this is a default tenant and will NOT be deleted.")
+
+
+
+def main():
+ creds_nova = functest_utils.get_credentials("nova")
+ nova_client = novaclient.Client('2',**creds_nova)
+
+ creds_neutron = functest_utils.get_credentials("neutron")
+ neutron_client = neutronclient.Client(**creds_neutron)
+
+ creds_keystone = functest_utils.get_credentials("keystone")
+ keystone_client = keystoneclient.Client(**creds_keystone)
+
+ creds_cinder = functest_utils.get_credentials("cinder")
+ #cinder_client = cinderclient.Client(**creds_cinder)
+ cinder_client = cinderclient.Client('1',creds_cinder['username'],
+ creds_cinder['api_key'],
+ creds_cinder['project_id'],
+ creds_cinder['auth_url'],
+ service_type="volume")
+
+ if not functest_utils.check_credentials():
+ logger.error("Please source the openrc credentials and run the script again.")
+ exit(-1)
+
+ remove_instances(nova_client)
+ separator()
+ remove_images(nova_client)
+ separator()
+ remove_volumes(cinder_client)
+ separator()
+ remove_floatingips(nova_client)
+ separator()
+ remove_networks(neutron_client)
+ separator()
+ remove_security_groups(neutron_client)
+ separator()
+ remove_users(keystone_client)
+ separator()
+ remove_tenants(keystone_client)
+ separator()
+
+ exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testcases/VIM/OpenStack/CI/libraries/os_defaults.yaml b/testcases/VIM/OpenStack/CI/libraries/os_defaults.yaml
new file mode 100644
index 000000000..f792cda5d
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/libraries/os_defaults.yaml
@@ -0,0 +1,31 @@
+fuel:
+ images: ['TestVM']
+ networks: ['net04', 'net04_ext']
+ routers: ['router04']
+ security_groups: ['default']
+ tenants: ['admin', 'services']
+ users: ['heat', 'heat-cfn', 'cinder', 'nova', 'swift', 'glance', 'neutron', 'admin', 'fuel_stats_user']
+apex:
+ images: []
+ networks: ['internal', 'external']
+ routers: []
+ security_groups: ['default']
+ tenants: ['admin', 'services', 'service']
+ users: ['heat', 'heat-cfn', 'cinder', 'nova', 'swift', 'glance', 'neutron', 'admin', 'ceilometer', 'cinderv2']
+compass:
+ images: []
+ networks: ['ext-net']
+ routers: []
+ security_groups: ['default']
+ tenants: ['admin', 'service', 'demo']
+ users: ['heat', 'cinder', 'nova', 'glance', 'neutron', 'admin', 'ceilometer', 'demo', 'keystone']
+joid:
+ images: []
+ networks: ['ext-net']
+ routers: []
+ security_groups: ['default']
+ tenants: ['admin', 'services']
+ users: ['admin', 'glance', 'nova', 'quantum_nova', 'quantum', 'heat-cfn_heat', 'ceilometer', 'cinder_cinderv2', 'swift']
+common:
+ networks: ['functest-net']
+ routers: ['functest-router']
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
new file mode 100755
index 000000000..b5fcd7218
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
@@ -0,0 +1,306 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 Orange
+# guyrodrigue.koffi@orange.com
+# morgan.richomme@orange.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# 0.1 (05/2015) initial commit
+# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
+# 0.3 (19/10/2015) remove Tempest from run_rally
+# and push result into test DB
+#
+
+import re
+import json
+import os
+import argparse
+import logging
+import yaml
+import requests
+import sys
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
+from keystoneclient.v2_0 import client as keystoneclient
+from neutronclient.v2_0 import client as neutronclient
+
+""" tests configuration """
+tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
+ 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
+parser = argparse.ArgumentParser()
+parser.add_argument("test_name",
+ help="Module name to be tested. "
+ "Possible values are : "
+ "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
+ "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
+ "{d[10]} ] "
+ "The 'all' value "
+ "performs all possible test scenarios"
+ .format(d=tests))
+
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+parser.add_argument("-s", "--smoke",
+ help="Smoke test mode",
+ action="store_true")
+
+args = parser.parse_args()
+
+client_dict = {}
+
+""" logging configuration """
+logger = logging.getLogger("run_rally")
+logger.setLevel(logging.DEBUG)
+
+ch = logging.StreamHandler()
+if args.debug:
+ ch.setLevel(logging.DEBUG)
+else:
+ ch.setLevel(logging.INFO)
+
+formatter = logging.Formatter("%(asctime)s - %(name)s - "
+ "%(levelname)s - %(message)s")
+ch.setFormatter(formatter)
+logger.addHandler(ch)
+
+REPO_PATH=os.environ['repos_dir']+'/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
+import functest_utils
+
+with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+HOME = os.environ['HOME']+"/"
+####todo:
+#SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
+# get("directories").get("dir_rally_scn")
+SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
+###
+TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
+SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
+###todo:
+FLAVOR_NAME = "m1.tiny"
+USERS_AMOUNT = 2
+TENANTS_AMOUNT = 3
+CONTROLLERS_AMOUNT = 2
+###
+RESULTS_DIR = functest_yaml.get("general").get("directories"). \
+ get("dir_rally_res")
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+FLOATING_NETWORK = functest_yaml.get("general"). \
+ get("openstack").get("neutron_public_net_name")
+FLOATING_SUBNET_CIDR = functest_yaml.get("general"). \
+ get("openstack").get("neutron_public_subnet_cidr")
+PRIVATE_NETWORK = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_net_name")
+
+GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
+ get("openstack").get("image_name")
+GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
+ get("openstack").get("image_file_name")
+GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
+ get("openstack").get("image_disk_format")
+GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
+ get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
+
+
+def push_results_to_db(payload):
+
+ url = TEST_DB + "/results"
+ installer = functest_utils.get_installer_type(logger)
+ git_version = functest_utils.get_git_branch(REPO_PATH)
+ pod_name = functest_utils.get_pod_name(logger)
+ # TODO pod_name hardcoded, info shall come from Jenkins
+ params = {"project_name": "functest", "case_name": "Rally",
+ "pod_name": pod_name, "installer": installer,
+ "version": git_version, "details": payload}
+
+ headers = {'Content-Type': 'application/json'}
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ logger.debug(r)
+
+
+def get_task_id(cmd_raw):
+ """
+ get task id from command rally result
+ :param cmd_raw:
+ :return: task_id as string
+ """
+ taskid_re = re.compile('^Task +(.*): started$')
+ for line in cmd_raw.splitlines(True):
+ line = line.strip()
+ match = taskid_re.match(line)
+ if match:
+ return match.group(1)
+ return None
+
+
+def task_succeed(json_raw):
+ """
+ Parse JSON from rally JSON results
+ :param json_raw:
+ :return: Bool
+ """
+ rally_report = json.loads(json_raw)
+ rally_report = rally_report[0]
+ if rally_report is None:
+ return False
+ if rally_report.get('result') is None:
+ return False
+
+ for result in rally_report.get('result'):
+ if len(result.get('error')) > 0:
+ return False
+
+ return True
+
+
+def build_task_args(test_file_name):
+ task_args = {'service_list': [test_file_name]}
+ task_args['smoke'] = args.smoke
+ task_args['image_name'] = GLANCE_IMAGE_NAME
+ task_args['flavor_name'] = FLAVOR_NAME
+ task_args['glance_image_location'] = GLANCE_IMAGE_PATH
+ task_args['floating_network'] = FLOATING_NETWORK
+ task_args['floating_subnet_cidr'] = FLOATING_SUBNET_CIDR
+ task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
+ PRIVATE_NETWORK).encode('ascii', 'ignore')
+ task_args['tmpl_dir'] = TEMPLATE_DIR
+ task_args['sup_dir'] = SUPPORT_DIR
+ task_args['users_amount'] = USERS_AMOUNT
+ task_args['tenants_amount'] = TENANTS_AMOUNT
+ task_args['controllers_amount'] = CONTROLLERS_AMOUNT
+
+ return task_args
+
+
+def run_task(test_name):
+ #
+ # the "main" function of the script who launch rally for a task
+ # :param test_name: name for the rally test
+ # :return: void
+ #
+
+ logger.info('starting {} test ...'.format(test_name))
+
+ task_file = '{}task.yaml'.format(SCENARIOS_DIR)
+ if not os.path.exists(task_file):
+ logger.error("Task file '%s' does not exist." % task_file)
+ exit(-1)
+
+ test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
+ if not os.path.exists(test_file_name):
+ logger.error("The scenario '%s' does not exist." % test_file_name)
+ exit(-1)
+
+ logger.debug('Scenario fetched from : {}'.format(test_file_name))
+
+ cmd_line = "rally task start --abort-on-sla-failure " + \
+ "--task {} ".format(task_file) + \
+ "--task-args \"{}\" ".format(build_task_args(test_name))
+ logger.debug('running command line : {}'.format(cmd_line))
+ cmd = os.popen(cmd_line)
+ task_id = get_task_id(cmd.read())
+ logger.debug('task_id : {}'.format(task_id))
+
+ if task_id is None:
+ logger.error("failed to retrieve task_id")
+ exit(-1)
+
+ # check for result directory and create it otherwise
+ if not os.path.exists(RESULTS_DIR):
+ logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+ os.makedirs(RESULTS_DIR)
+
+ # write html report file
+ report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
+ cmd_line = "rally task report {} --out {}".format(task_id,
+ report_file_name)
+
+ logger.debug('running command line : {}'.format(cmd_line))
+ os.popen(cmd_line)
+
+ # get and save rally operation JSON result
+ cmd_line = "rally task results %s" % task_id
+ logger.debug('running command line : {}'.format(cmd_line))
+ cmd = os.popen(cmd_line)
+ json_results = cmd.read()
+ with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
+ logger.debug('saving json file')
+ f.write(json_results)
+
+ with open('{}opnfv-{}.json'
+ .format(RESULTS_DIR, test_name)) as json_file:
+ json_data = json.load(json_file)
+
+ # Push results in payload of testcase
+ if args.report:
+ logger.debug("Push result into DB")
+ push_results_to_db(json_data)
+
+ """ parse JSON operation result """
+ if task_succeed(json_results):
+ print 'Test OK'
+ else:
+ print 'Test KO'
+
+
+def main():
+ # configure script
+ if not (args.test_name in tests):
+ logger.error('argument not valid')
+ exit(-1)
+
+ creds_nova = functest_utils.get_credentials("nova")
+ nova_client = novaclient.Client('2',**creds_nova)
+ creds_neutron = functest_utils.get_credentials("neutron")
+ neutron_client = neutronclient.Client(**creds_neutron)
+ creds_keystone = functest_utils.get_credentials("keystone")
+ keystone_client = keystoneclient.Client(**creds_keystone)
+ glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
+ endpoint_type='publicURL')
+ glance_client = glanceclient.Client(1, glance_endpoint,
+ token=keystone_client.auth_token)
+
+ client_dict['neutron'] = neutron_client
+
+ logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH))
+ image_id = functest_utils.create_glance_image(glance_client,
+ GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+ if not image_id:
+ logger.error("Failed to create a Glance image...")
+ exit(-1)
+ # Check if the given image exists
+ try:
+ nova_client.images.find(name=GLANCE_IMAGE_NAME)
+ logger.info("Glance image found '%s'" % GLANCE_IMAGE_NAME)
+ except:
+ logger.error("ERROR: Glance image '%s' not found." % GLANCE_IMAGE_NAME)
+ logger.info("Available images are: ")
+ exit(-1)
+
+ if args.test_name == "all":
+ for test_name in tests:
+ if not (test_name == 'all' or
+ test_name == 'vm'):
+ print(test_name)
+ run_task(test_name)
+ else:
+ print(args.test_name)
+ run_task(args.test_name)
+
+ logger.debug("Deleting image...")
+ if not functest_utils.delete_glance_image(nova_client, image_id):
+ logger.error("Error deleting the glance image")
+
+if __name__ == '__main__':
+ main()
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally.py b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
index 2bfb8127f..d5796c1b4 100644
--- a/testcases/VIM/OpenStack/CI/libraries/run_rally.py
+++ b/testcases/VIM/OpenStack/CI/libraries/run_rally.py
@@ -8,36 +8,48 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
-import re, json, os, urllib2, argparse, logging, yaml
-
-with open('../functest.yaml') as f:
- functest_yaml = yaml.safe_load(f)
-f.close()
+# 0.1 (05/2015) initial commit
+# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
+# 0.3 (19/10/2015) remove Tempest from run_rally
+# and push result into test DB
+#
-HOME = os.environ['HOME']+"/"
-SCENARIOS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_scn")
-RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res")
+import re
+import json
+import os
+import argparse
+import logging
+import yaml
+import requests
+import sys
+from novaclient import client as novaclient
+from keystoneclient.v2_0 import client as keystoneclient
+from glanceclient import client as glanceclient
""" tests configuration """
-tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', 'neutron', 'nova', 'quotas', 'requests', 'tempest', 'vm', 'all', 'smoke']
+tests = ['authenticate', 'glance', 'cinder', 'ceilometer', 'heat', 'keystone',
+ 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
parser = argparse.ArgumentParser()
-parser.add_argument("test_name", help="The name of the test you want to perform with rally. "
- "Possible values are : "
- "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | {d[5]} | {d[6]} "
- "| {d[7]} | {d[8]} | {d[9]} | {d[10]} | {d[11]} | {d[12]}]. The 'all' value performs all the tests scenarios "
- "except 'tempest'".format(d=tests))
+parser.add_argument("test_name",
+ help="Module name to be tested"
+ "Possible values are : "
+ "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
+ "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
+ "{d[10]} | {d[11]}]. The 'all' value "
+ "performs all the possible tests scenarios"
+ .format(d=tests))
parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("test_mode", help="Tempest test mode", nargs='?', default="smoke")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+
args = parser.parse_args()
-test_mode=args.test_mode
-if not args.test_name == "tempest":
- if not args.test_mode == "smoke":
- parser.error("test_mode is only used with tempest")
+
""" logging configuration """
-logger = logging.getLogger('run_rally')
+logger = logging.getLogger("run_rally")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
@@ -46,25 +58,53 @@ if args.debug:
else:
ch.setLevel(logging.INFO)
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+formatter = logging.Formatter("%(asctime)s - %(name)s - "
+ "%(levelname)s - %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
+REPO_PATH=os.environ['repos_dir']+'/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
+import functest_utils
-def get_tempest_id(cmd_raw):
- """
- get task id from command rally result
- :param cmd_raw:
- :return: task_id as string
- """
- taskid_re = re.compile('^Verification UUID: (.*)$')
- for line in cmd_raw.splitlines(True):
- line = line.strip()
- match = taskid_re.match(line)
+with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+HOME = os.environ['HOME']+"/"
+SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
+ get("directories").get("dir_rally_scn")
+RESULTS_DIR = functest_yaml.get("general").get("directories"). \
+ get("dir_rally_res")
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+GLANCE_IMAGE_NAME = "functest-img-rally"
+GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
+ get("openstack").get("image_file_name")
+GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
+ get("openstack").get("image_disk_format")
+GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
+ get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
+
+
+def push_results_to_db(payload):
+
+ url = TEST_DB + "/results"
+ installer = functest_utils.get_installer_type(logger)
+ git_version = functest_utils.get_git_branch(REPO_PATH)
+ pod_name = functest_utils.get_pod_name(logger)
+ # TODO pod_name hardcoded, info shall come from Jenkins
+ params = {"project_name": "functest", "case_name": "Rally",
+ "pod_name": pod_name, "installer": installer,
+ "version": git_version, "details": payload}
+
+ headers = {'Content-Type': 'application/json'}
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ logger.debug(r)
- if match:
- return match.group(1)
- return None
def get_task_id(cmd_raw):
"""
@@ -81,6 +121,16 @@ def get_task_id(cmd_raw):
return None
+def create_glance_image(path, name, disk_format):
+ """
+ Create a glance image given the absolute path of the image, its name and the disk format
+ """
+ cmd = ("glance image-create --name " + name + " --visibility public "
+ "--disk-format " + disk_format + " --container-format bare --file " + path)
+ functest_utils.execute_command(cmd, logger)
+ return True
+
+
def task_succeed(json_raw):
"""
Parse JSON from rally JSON results
@@ -95,68 +145,33 @@ def task_succeed(json_raw):
return False
for result in rally_report.get('result'):
- if len(result.get('errors')) > 0:
+ if len(result.get('error')) > 0:
return False
return True
-def run_tempest():
- """
- the function dedicated to Tempest (functional tests for OpenStack)
- :param test_mode: Tempest mode smoke (default), full, ..
- :return: void
- """
- logger.info('starting {} Tempest ...'.format(test_mode))
-
- """ get the date """
- cmd = os.popen("date '+%d%m%Y_%H%M'")
- test_date = cmd.read().rstrip()
-
- cmd_line = "rally verify start {}".format(test_mode)
- logger.debug('running command line : {}'.format(cmd_line))
- cmd = os.popen(cmd_line)
- task_id = get_tempest_id(cmd.read())
- logger.debug('task_id : {}'.format(task_id))
-
- if task_id is None:
- logger.error("failed to retrieve task_id")
- exit(-1)
-
- """ check for result directory and create it otherwise """
- if not os.path.exists(RESULTS_DIR):
- logger.debug('does not exists, we create it'.format(RESULTS_DIR))
- os.makedirs(RESULTS_DIR)
-
- """ write log report file """
- report_file_name = '{}opnfv-tempest-{}.log'.format(RESULTS_DIR, test_date)
- cmd_line = "rally verify detailed {} > {} ".format(task_id, report_file_name)
- logger.debug('running command line : {}'.format(cmd_line))
- os.popen(cmd_line)
-
def run_task(test_name):
- """
- the "main" function of the script who lunch rally for a task
- :param test_name: name for the rally test
- :return: void
- """
- logger.info('starting {} test ...'.format(test_name))
+ #
+ # the "main" function of the script who lunch rally for a task
+ # :param test_name: name for the rally test
+ # :return: void
+ #
- """ get the date """
- cmd = os.popen("date '+%d%m%Y_%H%M'")
- test_date = cmd.read().rstrip()
+ logger.info('starting {} test ...'.format(test_name))
- """ check directory for scenarios test files or retrieve from git otherwise"""
+ # check directory for scenarios test files or retrieve from git otherwise
proceed_test = True
test_file_name = '{}opnfv-{}.json'.format(SCENARIOS_DIR, test_name)
+
if not os.path.exists(test_file_name):
- logger.debug('{} does not exists'.format(test_file_name))
- proceed_test = retrieve_test_cases_file(test_name, SCENARIOS_DIR)
+ logger.error("The scenario '%s' does not exist." % test_file_name)
+ exit(-1)
- """ we do the test only if we have a scenario test file """
+ # we do the test only if we have a scenario test file
if proceed_test:
logger.debug('Scenario fetched from : {}'.format(test_file_name))
- cmd_line = "rally task start --abort-on-sla-failure %s" % test_file_name
+ cmd_line = "rally task start --abort-on-sla-failure {}".format(test_file_name)
logger.debug('running command line : {}'.format(cmd_line))
cmd = os.popen(cmd_line)
task_id = get_task_id(cmd.read())
@@ -166,26 +181,36 @@ def run_task(test_name):
logger.error("failed to retrieve task_id")
exit(-1)
- """ check for result directory and create it otherwise """
+ # check for result directory and create it otherwise
if not os.path.exists(RESULTS_DIR):
logger.debug('does not exists, we create it'.format(RESULTS_DIR))
os.makedirs(RESULTS_DIR)
- """ write html report file """
- report_file_name = '{}opnfv-{}-{}.html'.format(RESULTS_DIR, test_name, test_date)
- cmd_line = "rally task report %s --out %s" % (task_id, report_file_name)
+ # write html report file
+ report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
+ cmd_line = "rally task report {} --out {}".format(task_id,
+ report_file_name)
+
logger.debug('running command line : {}'.format(cmd_line))
os.popen(cmd_line)
- """ get and save rally operation JSON result """
+ # get and save rally operation JSON result
cmd_line = "rally task results %s" % task_id
logger.debug('running command line : {}'.format(cmd_line))
cmd = os.popen(cmd_line)
json_results = cmd.read()
- with open('{}opnfv-{}-{}.json'.format(RESULTS_DIR, test_name, test_date), 'w') as f:
+ with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
logger.debug('saving json file')
f.write(json_results)
- logger.debug('saving json file2')
+
+ with open('{}opnfv-{}.json'
+ .format(RESULTS_DIR, test_name)) as json_file:
+ json_data = json.load(json_file)
+
+ # Push results in payload of testcase
+ if args.report:
+ logger.debug("Push result into DB")
+ push_results_to_db(json_data)
""" parse JSON operation result """
if task_succeed(json_results):
@@ -193,53 +218,55 @@ def run_task(test_name):
else:
print 'Test KO'
else:
- logger.error('{} test failed, unable to fetch a scenario test file'.format(test_name))
-
-
-def retrieve_test_cases_file(test_name, tests_path):
- """
- Retrieve from github the sample test files
- :return: Boolean that indicates the retrieval status
- """
-
- """ do not add the "/" at the end """
- url_base = "https://git.opnfv.org/cgit/functest/plain/testcases/VIM/OpenStack/CI/suites"
-
- test_file_name = 'opnfv-{}.json'.format(test_name)
- logger.info('fetching {}/{} ...'.format(url_base, test_file_name))
-
- try:
- response = urllib2.urlopen('{}/{}'.format(url_base, test_file_name))
- except (urllib2.HTTPError, urllib2.URLError):
- return False
- file_raw = response.read()
-
- """ check if the test path exist otherwise we create it """
- if not os.path.exists(tests_path):
- os.makedirs(tests_path)
-
- with open('{}/{}'.format(tests_path, test_file_name), 'w') as f:
- f.write(file_raw)
- return True
+ logger.error('{} test failed, unable to fetch a scenario test file'
+ .format(test_name))
def main():
- """ configure script """
+ # configure script
if not (args.test_name in tests):
logger.error('argument not valid')
exit(-1)
+ creds_nova = functest_utils.get_credentials("nova")
+ nova_client = novaclient.Client('2',**creds_nova)
+ creds_keystone = functest_utils.get_credentials("keystone")
+ keystone_client = keystoneclient.Client(**creds_keystone)
+ glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
+ endpoint_type='publicURL')
+ glance_client = glanceclient.Client(1, glance_endpoint,
+ token=keystone_client.auth_token)
+
+ logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH))
+ image_id = functest_utils.create_glance_image(glance_client,
+ GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+ if not image_id:
+ logger.error("Failed to create a Glance image...")
+ exit(-1)
+ # Check if the given image exists
+ try:
+ nova_client.images.find(name=GLANCE_IMAGE_NAME)
+ logger.info("Glance image found '%s'" % GLANCE_IMAGE_NAME)
+ except:
+ logger.error("ERROR: Glance image '%s' not found." % GLANCE_IMAGE_NAME)
+ logger.info("Available images are: ")
+ exit(-1)
+
if args.test_name == "all":
for test_name in tests:
- if not (test_name == 'all' or test_name == 'tempest'):
+ if not (test_name == 'all' or
+ test_name == 'heat' or
+ test_name == 'ceilometer' or
+ test_name == 'smoke' or
+ test_name == 'vm'):
print(test_name)
run_task(test_name)
else:
print(args.test_name)
- if args.test_name == 'tempest':
- run_tempest()
- else:
- run_task(args.test_name)
+ run_task(args.test_name)
+
+ if not functest_utils.delete_glance_image(nova_client, image_id):
+ logger.error("Error deleting the glance image")
if __name__ == '__main__':
main()
diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
new file mode 100644
index 000000000..e24697c76
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py
@@ -0,0 +1,260 @@
+#!/usr/bin/env python
+#
+# Description:
+# Runs tempest and pushes the results to the DB
+#
+# Authors:
+# morgan.richomme@orange.com
+# jose.lausuch@ericsson.com
+#
+
+import argparse
+import json
+import logging
+import os
+import re
+import requests
+import subprocess
+import sys
+import yaml
+import keystoneclient.v2_0.client as ksclient
+from neutronclient.v2_0 import client as neutronclient
+
+modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing',
+ 'identity', 'image', 'network', 'object_storage', 'orchestration',
+ 'telemetry', 'volume', 'custom']
+
+""" tests configuration """
+parser = argparse.ArgumentParser()
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-m", "--mode", help="Tempest test mode [smoke, all]",
+ default="smoke")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('run_tempest')
+logger.setLevel(logging.DEBUG)
+
+ch = logging.StreamHandler()
+if args.debug:
+ ch.setLevel(logging.DEBUG)
+else:
+ ch.setLevel(logging.INFO)
+
+formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ch.setFormatter(formatter)
+logger.addHandler(ch)
+
+REPO_PATH=os.environ['repos_dir']+'/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
+import functest_utils
+
+with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+MODE = "smoke"
+TENANT_NAME = functest_yaml.get("tempest").get("identity").get("tenant_name")
+TENANT_DESCRIPTION = functest_yaml.get("tempest").get("identity").get("tenant_description")
+USER_NAME = functest_yaml.get("tempest").get("identity").get("user_name")
+USER_PASSWORD = functest_yaml.get("tempest").get("identity").get("user_password")
+DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name")
+RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get("dir_rally_inst")
+
+
+def get_info(file_result):
+ test_run = ""
+ duration = ""
+ test_failed = ""
+
+ p = subprocess.Popen('cat tempest.log',
+ shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ for line in p.stdout.readlines():
+ # print line,
+ if (len(test_run) < 1):
+ test_run = re.findall("[0-9]*\.[0-9]*s", line)
+ if (len(duration) < 1):
+ duration = re.findall("[0-9]*\ tests", line)
+ regexp = r"(failures=[0-9]+)"
+ if (len(test_failed) < 1):
+ test_failed = re.findall(regexp, line)
+
+ retval = p.wait()
+
+ logger.debug("test_run:"+test_run)
+ logger.debug("duration:"+duration)
+
+
+def push_results_to_db(payload, module, pod_name):
+
+ # TODO move DB creds into config file
+ url = TEST_DB + "/results"
+ installer = functest_utils.get_installer_type(logger)
+ git_version = functest_utils.get_git_branch(REPO_PATH)
+ logger.info("Pushing results to DB: '%s'." % url)
+
+ params = {"project_name": "functest", "case_name": "Tempest",
+ "pod_name": str(pod_name), 'installer': installer,
+ "version": git_version, 'details': payload}
+ headers = {'Content-Type': 'application/json'}
+
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ logger.debug(r)
+
+
+def create_tempest_resources():
+ ks_creds = functest_utils.get_credentials("keystone")
+ logger.info("Creating tenant and user for Tempest suite")
+ keystone = ksclient.Client(**ks_creds)
+ tenant_id = functest_utils.create_tenant(keystone, TENANT_NAME, TENANT_DESCRIPTION)
+ if tenant_id == '':
+ logger.error("Error : Failed to create %s tenant" %TENANT_NAME)
+
+ user_id = functest_utils.create_user(keystone, USER_NAME, USER_PASSWORD, None, tenant_id)
+ if user_id == '':
+ logger.error("Error : Failed to create %s user" %USER_NAME)
+
+
+def free_tempest_resources():
+ ks_creds = functest_utils.get_credentials("keystone")
+ logger.info("Deleting tenant and user for Tempest suite)")
+ keystone = ksclient.Client(**ks_creds)
+
+ user_id = functest_utils.get_user_id(keystone, USER_NAME)
+ if user_id == '':
+ logger.error("Error : Failed to get id of %s user" % USER_NAME)
+ else:
+ if not functest_utils.delete_user(keystone, user_id):
+ logger.error("Error : Failed to delete %s user" % USER_NAME)
+
+ tenant_id = functest_utils.get_tenant_id(keystone, TENANT_NAME)
+ if tenant_id == '':
+ logger.error("Error : Failed to get id of %s tenant" % TENANT_NAME)
+ else:
+ if not functest_utils.delete_tenant(keystone, tenant_id):
+ logger.error("Error : Failed to delete %s tenant" % TENANT_NAME)
+
+
+def configure_tempest():
+ """
+ Add/update needed parameters into tempest.conf file generated by Rally
+ """
+
+ logger.debug("Generating tempest.conf file...")
+ cmd = "rally verify genconfig"
+ functest_utils.execute_command(cmd,logger)
+
+ logger.debug("Resolving deployment UUID...")
+ cmd = "rally deployment list | awk '/"+DEPLOYMENT_MAME+"/ {print $2}'"
+ p = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT);
+ deployment_uuid = p.stdout.readline().rstrip()
+ if deployment_uuid == "":
+ logger.debug(" Rally deployment NOT found")
+ return False
+
+ logger.debug("Finding tempest.conf file...")
+ tempest_conf_file = RALLY_INSTALLATION_DIR+"/tempest/for-deployment-" \
+ +deployment_uuid+"/tempest.conf"
+ if tempest_conf_file == "":
+ logger.debug(" Tempest configuration file NOT found")
+ return False
+
+ logger.debug(" Updating fixed_network_name...")
+ private_net_name = ""
+ creds_neutron = functest_utils.get_credentials("neutron")
+ neutron_client = neutronclient.Client(**creds_neutron)
+ private_net = functest_utils.get_private_net(neutron_client)
+ if private_net is None:
+ logger.error("No shared private networks found.")
+ else:
+ private_net_name = private_net['name']
+ cmd = "crudini --set "+tempest_conf_file+" compute fixed_network_name " \
+ +private_net_name
+ functest_utils.execute_command(cmd,logger)
+
+ logger.debug(" Updating non-admin credentials...")
+ cmd = "crudini --set "+tempest_conf_file+" identity tenant_name " \
+ +TENANT_NAME
+ functest_utils.execute_command(cmd,logger)
+ cmd = "crudini --set "+tempest_conf_file+" identity username " \
+ +USER_NAME
+ functest_utils.execute_command(cmd,logger)
+ cmd = "crudini --set "+tempest_conf_file+" identity password " \
+ +USER_PASSWORD
+ functest_utils.execute_command(cmd,logger)
+
+ return True
+
+
+def run_tempest(OPTION):
+ #
+ # the "main" function of the script which launches Rally to run Tempest
+ # :param option: tempest option (smoke, ..)
+ # :return: void
+ #
+ logger.info("Starting Tempest test suite: '%s'." % OPTION)
+ cmd_line = "rally verify start "+OPTION
+ logger.debug('Executing command : {}'.format(cmd_line))
+ subprocess.call(cmd_line, shell=True, stderr=subprocess.STDOUT)
+
+ cmd_line = "rally verify list"
+ logger.debug('Executing command : {}'.format(cmd_line))
+ cmd = os.popen(cmd_line)
+ output = (((cmd.read()).splitlines()[3]).replace(" ", "")).split("|")
+ # Format:
+ # | UUID | Deployment UUID | smoke | tests | failures | Created at |
+ # Duration | Status |
+ num_tests = output[4]
+ num_failures = output[5]
+ time_start = output[6]
+ duration = output[7]
+ # Compute duration (lets assume it does not take more than 60 min)
+ dur_min=int(duration.split(':')[1])
+ dur_sec_float=float(duration.split(':')[2])
+ dur_sec_int=int(round(dur_sec_float,0))
+ dur_sec_int = dur_sec_int + 60 * dur_min
+
+ # Generate json results for DB
+ json_results = {"timestart": time_start, "duration": dur_sec_int,
+ "tests": int(num_tests), "failures": int(num_failures)}
+ logger.info("Results: "+str(json_results))
+ pod_name = functest_utils.get_pod_name(logger)
+
+ # Push results in payload of testcase
+ if args.report:
+ logger.debug("Push result into DB")
+ push_results_to_db(json_results, MODE, pod_name)
+
+
+def main():
+ global MODE
+ if not (args.mode):
+ MODE = "smoke"
+ elif not (args.mode in modes):
+ logger.error("Tempest mode not valid. Possible values are:\n"
+ + str(modes))
+ exit(-1)
+ elif (args.mode == 'custom'):
+ MODE = "--tests-file "+REPO_PATH+"testcases/VIM/OpenStack/CI/custom_tests/test_list.txt"
+ else:
+ MODE = "--set "+args.mode
+
+ create_tempest_resources()
+ configure_tempest()
+ run_tempest(MODE)
+ free_tempest_resources()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testcases/VIM/OpenStack/CI/libraries/test_openstack.sh b/testcases/VIM/OpenStack/CI/libraries/test_openstack.sh
new file mode 100755
index 000000000..7225796c5
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/libraries/test_openstack.sh
@@ -0,0 +1,111 @@
+#
+# Script to test clean_openstack.py
+#
+# Author:
+# jose.lausuch@ericsson.com
+#
+
+if [ -z $OS_AUTH_URL ]; then
+ echo "Source credentials first"
+ exit 1
+fi
+
+echo "Using following credentials:"
+env | grep OS
+
+#################################
+echo "Creating keystone stuff.."
+#################################
+keystone tenant-create --name tenant_test1
+keystone tenant-create --name tenant_test2
+tenant1_id=$(keystone tenant-list | grep tenant_test1 | awk '{print $2}')
+tenant2_id=$(keystone tenant-list | grep tenant_test2 | awk '{print $2}')
+keystone user-create --name user_test11 --tenant $tenant1_id
+keystone user-create --name user_test12 --tenant $tenant1_id
+keystone user-create --name user_test13 --tenant $tenant1_id
+keystone user-create --name user_test21 --tenant $tenant2_id
+keystone user-create --name user_test22 --tenant $tenant2_id
+
+
+#################################
+echo "Creating glance stuff.."
+#################################
+wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
+glance image-create --name image_test1 --disk-format qcow2 --container-format bare < cirros-0.3.4-x86_64-disk.img
+glance image-create --name image_test2 --disk-format qcow2 --container-format bare < cirros-0.3.4-x86_64-disk.img
+#glance image-create --name test --visibility public --disk-format qcow2 --container-format bare --file cirros-0.3.4-x86_64-disk.img
+
+
+#################################
+echo "Creating cinder stuff.."
+#################################
+cinder create --display_name volume-test1 1
+cinder create --display_name volume-test2 2
+
+
+#################################
+echo "Creating NEUTRON stuff.."
+#################################
+echo "1. Create Networks."
+neutron net-create net-test1
+neutron net-create net-test2
+
+echo "2. Create subnets."
+neutron subnet-create --name subnet-test11 --allocation-pool start=10.7.0.2,end=10.7.0.253 --gateway 10.7.0.254 net-test1 10.7.0.0/24
+neutron subnet-create --name subnet-test21 --allocation-pool start=10.6.0.2,end=10.6.0.253 --gateway 10.6.0.254 net-test2 10.6.0.0/24
+
+echo "3. Create Ports."
+neutron port-create --name port-test11 --fixed-ip ip_address=10.7.0.10 net-test1
+neutron port-create --name port-test21 --fixed-ip ip_address=10.6.0.60 net-test2
+
+
+echo "4. Create Routers."
+neutron router-create router-test1
+neutron router-create router-test2
+router1_id=$(neutron router-list | grep router-test1 | awk '{print $2}')
+router1_id=$(neutron router-list | grep router-test2 | awk '{print $2}')
+
+neutron router-gateway-set router-test1 net04_ext
+neutron router-gateway-set router-test2 net04_ext
+
+neutron router-interface-add router-test1 subnet-test11
+neutron router-interface-add router-test2 subnet-test21
+
+echo "5. Floating IPs."
+neutron floatingip-create net04_ext
+neutron floatingip-create net04_ext
+neutron floatingip-create net04_ext
+neutron floatingip-create net04_ext
+
+floating_ip1_id=$(neutron floatingip-list | awk 'FNR == 4 {print}' | awk '{print $2}')
+floating_ip2_id=$(neutron floatingip-list | awk 'FNR == 5 {print}' | awk '{print $2}')
+floating_ip3_id=$(neutron floatingip-list | awk 'FNR == 6 {print}' | awk '{print $2}')
+floating_ip4_id=$(neutron floatingip-list | awk 'FNR == 7 {print}' | awk '{print $2}')
+
+floating_ip1=$(neutron floatingip-list | awk 'FNR == 4 {print}' | awk '{print $5}')
+floating_ip2=$(neutron floatingip-list | awk 'FNR == 5 {print}' | awk '{print $5}')
+floating_ip3=$(neutron floatingip-list | awk 'FNR == 6 {print}' | awk '{print $5}')
+floating_ip4=$(neutron floatingip-list | awk 'FNR == 7 {print}' | awk '{print $5}')
+
+#################################
+echo "Creating NOVA stuff.."
+#################################
+net1_id=$(neutron net-list | grep net-test1 | awk '{print $2}')
+net2_id=$(neutron net-list | grep net-test2 | awk '{print $2}')
+
+nova boot --flavor 2 --image image_test1 --nic net-id=$net1_id nova-test11
+nova boot --flavor 2 --image image_test1 --nic net-id=$net1_id nova-test12
+nova boot --flavor 2 --image image_test2 --nic net-id=$net2_id nova-test21
+nova boot --flavor 2 --image image_test2 --nic net-id=$net2_id nova-test22
+
+vm1_id=$(nova list | grep nova-test11 | awk '{print $2}')
+vm2_id=$(nova list | grep nova-test12 | awk '{print $2}')
+vm3_id=$(nova list | grep nova-test21 | awk '{print $2}')
+vm4_id=$(nova list | grep nova-test22 | awk '{print $2}')
+
+nova floating-ip-associate $vm1_id $floating_ip1
+nova floating-ip-associate $vm2_id $floating_ip2
+nova floating-ip-associate $vm3_id $floating_ip3
+nova floating-ip-associate $vm4_id $floating_ip4
+
+#neutron floatingip-associate --fixed-ip-address $floating_ip2 <PORT>
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml b/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml
new file mode 100644
index 000000000..48c0333e9
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml
@@ -0,0 +1,97 @@
+{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%}
+{%- if use_existing_users and caller is not defined -%} {}
+{%- else %}
+ {%- if not use_existing_users %}
+ users:
+ tenants: {{ tenants }}
+ users_per_tenant: {{ users_per_tenant }}
+ {%- endif %}
+ {%- if caller is defined %}
+ {{ caller() }}
+ {%- endif %}
+{%- endif %}
+{%- endmacro %}
+
+{%- macro vm_params(image=none, flavor=none, size=none) %}
+{%- if flavor is not none %}
+ flavor:
+ name: {{ flavor }}
+{%- endif %}
+{%- if image is not none %}
+ image:
+ name: {{ image }}
+{%- endif %}
+{%- if size is not none %}
+ size: {{ size }}
+{%- endif %}
+{%- endmacro %}
+
+{%- macro unlimited_volumes() %}
+ cinder:
+ gigabytes: -1
+ snapshots: -1
+ volumes: -1
+{%- endmacro %}
+
+{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %}
+ type: "constant"
+ {%- if is_smoke %}
+ concurrency: 1
+ times: 1
+ {%- else %}
+ concurrency: {{ concurrency }}
+ times: {{ times }}
+ {%- endif %}
+{%- endmacro %}
+
+{%- macro rps_runner(rps=1, times=1, is_smoke=True) %}
+ type: rps
+ {%- if is_smoke %}
+ rps: 1
+ times: 1
+ {%- else %}
+ rps: {{ rps }}
+ times: {{ times }}
+ {%- endif %}
+{%- endmacro %}
+
+{%- macro no_failures_sla() %}
+ failure_rate:
+ max: 0
+{%- endmacro %}
+
+{%- macro volumes(size=1, volumes_per_tenant=1) %}
+ volumes:
+ size: {{ size }}
+ volumes_per_tenant: {{ volumes_per_tenant }}
+{%- endmacro %}
+
+{%- macro unlimited_nova(keypairs=false) %}
+ nova:
+ cores: -1
+ floating_ips: -1
+ instances: -1
+ {%- if keypairs %}
+ key_pairs: -1
+ {%- endif %}
+ ram: -1
+ security_group_rules: -1
+ security_groups: -1
+{%- endmacro %}
+
+{%- macro unlimited_neutron(secgroups=false) %}
+ neutron:
+ network: -1
+ port: -1
+ subnet: -1
+ {%- if secgroups %}
+ security_group: -1
+ security_group_rule: -1
+ {%- endif %}
+{%- endmacro %}
+
+{%- macro glance_args(location, container="bare", type="qcow2") %}
+ container_format: {{ container }}
+ disk_format: {{ type }}
+ image_location: {{ location }}
+{%- endmacro %}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml
new file mode 100644
index 000000000..8d7f0e7cb
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml
@@ -0,0 +1,63 @@
+ Authenticate.keystone:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Authenticate.validate_cinder:
+ -
+ args:
+ repetitions: 2
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Authenticate.validate_glance:
+ -
+ args:
+ repetitions: 2
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Authenticate.validate_heat:
+ -
+ args:
+ repetitions: 2
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Authenticate.validate_neutron:
+ -
+ args:
+ repetitions: 2
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Authenticate.validate_nova:
+ -
+ args:
+ repetitions: 2
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml
new file mode 100644
index 000000000..723db65ff
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml
@@ -0,0 +1,266 @@
+ CinderVolumes.create_and_attach_volume:
+ -
+ args:
+ {{ vm_params(image_name,flavor_name,1) }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_and_delete_snapshot:
+ -
+ args:
+ force: false
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {{ volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_and_delete_volume:
+ -
+ args:
+ size:
+ max: 1
+ min: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ {{ vm_params(image_name,none,1) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ size: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_and_extend_volume:
+ -
+ args:
+ new_size: 2
+ size: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_and_list_snapshots:
+ -
+ args:
+ detailed: true
+ force: false
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {{ volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_and_list_volume:
+ -
+ args:
+ detailed: true
+ {{ vm_params(image_name,none,1) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ detailed: true
+ size: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_and_upload_volume_to_image:
+ -
+ args:
+ container_format: "bare"
+ disk_format: "raw"
+ do_delete: true
+ force: false
+ size: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_from_volume_and_delete_volume:
+ -
+ args:
+ size: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {{ volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_nested_snapshots_and_attach_volume:
+ -
+ args:
+ nested_level: 1
+ size:
+ max: 1
+ min: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ servers:
+ {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+ servers_per_tenant: 1
+ auto_assign_nic: true
+ network: {}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_snapshot_and_attach_volume:
+ -
+ args:
+ volume_type: false
+ size:
+ min: 1
+ max: 5
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ servers:
+ {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+ servers_per_tenant: 2
+ auto_assign_nic: true
+ network: {}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ volume_type: true
+ size:
+ min: 1
+ max: 5
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ servers:
+ {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+ servers_per_tenant: 2
+ auto_assign_nic: true
+ network: {}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.create_volume:
+ -
+ args:
+ size: 1
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ -
+ args:
+ size:
+ min: 1
+ max: 5
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CinderVolumes.list_volumes:
+ -
+ args:
+ detailed: True
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_volumes() }}
+ volumes:
+ size: 1
+ volumes_per_tenant: 4
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml
new file mode 100644
index 000000000..b5eb7f3f5
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml
@@ -0,0 +1,49 @@
+ GlanceImages.create_and_delete_image:
+ -
+ args:
+ {{ glance_args(location=glance_image_location) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GlanceImages.create_and_list_image:
+ -
+ args:
+ {{ glance_args(location=glance_image_location) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GlanceImages.list_images:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GlanceImages.create_image_and_boot_instances:
+ -
+ args:
+ {{ glance_args(location=glance_image_location) }}
+ flavor:
+ name: {{ flavor_name }}
+ number_instances: 2
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ quotas:
+ {{ unlimited_nova() }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml
new file mode 100644
index 000000000..6debb415a
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml
@@ -0,0 +1,142 @@
+ HeatStacks.create_and_delete_stack:
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/default.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/server_with_ports.yaml.template"
+ parameters:
+ public_net: {{ floating_network }}
+ image: {{ image_name }}
+ flavor: {{ flavor_name }}
+ cidr: {{ floating_subnet_cidr }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/server_with_volume.yaml.template"
+ parameters:
+ image: {{ image_name }}
+ flavor: {{ flavor_name }}
+ network_id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ HeatStacks.create_and_list_stack:
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/default.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ HeatStacks.create_check_delete_stack:
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ HeatStacks.create_suspend_resume_delete_stack:
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ HeatStacks.create_update_delete_stack:
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+ updated_template_path: "{{ tmpl_dir }}/updated_random_strings_add.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+ updated_template_path: "{{ tmpl_dir }}/updated_random_strings_delete.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/resource_group.yaml.template"
+ updated_template_path: "{{ tmpl_dir }}/updated_resource_group_increase.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template"
+ updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/resource_group.yaml.template"
+ updated_template_path: "{{ tmpl_dir }}/updated_resource_group_reduce.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+ -
+ args:
+ template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+ updated_template_path: "{{ tmpl_dir }}/updated_random_strings_replace.yaml.template"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ HeatStacks.list_stacks_and_resources:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml
new file mode 100644
index 000000000..50cbecff2
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml
@@ -0,0 +1,92 @@
+ KeystoneBasic.add_and_remove_user_role:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_add_and_list_user_roles:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_and_list_tenants:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_and_delete_role:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_and_delete_service:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.get_entities:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_update_and_delete_tenant:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_user:
+ -
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_tenant:
+ -
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_and_list_users:
+ -
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ KeystoneBasic.create_tenant_with_users:
+ -
+ args:
+ users_per_tenant: 10
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml
new file mode 100644
index 000000000..152c748b3
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml
@@ -0,0 +1,240 @@
+ NeutronNetworks.create_and_delete_networks:
+ -
+ args:
+ network_create_args: {}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ neutron:
+ network: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_delete_ports:
+ -
+ args:
+ network_create_args: {}
+ port_create_args: {}
+ ports_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ port: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_delete_routers:
+ -
+ args:
+ network_create_args: {}
+ router_create_args: {}
+ subnet_cidr_start: "1.1.0.0/30"
+ subnet_create_args: {}
+ subnets_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ subnet: -1
+ port: -1
+ router: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_delete_subnets:
+ -
+ args:
+ network_create_args: {}
+ subnet_cidr_start: "1.1.0.0/30"
+ subnet_create_args: {}
+ subnets_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ subnet: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_list_networks:
+ -
+ args:
+ network_create_args: {}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ neutron:
+ network: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_list_ports:
+ -
+ args:
+ network_create_args: {}
+ port_create_args: {}
+ ports_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ port: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_list_routers:
+ -
+ args:
+ network_create_args: {}
+ router_create_args: {}
+ subnet_cidr_start: "1.1.0.0/30"
+ subnet_create_args: {}
+ subnets_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ subnet: -1
+ router: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_list_subnets:
+ -
+ args:
+ network_create_args: {}
+ subnet_cidr_start: "1.1.0.0/30"
+ subnet_create_args: {}
+ subnets_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ subnet: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_update_networks:
+ -
+ args:
+ network_create_args: {}
+ network_update_args:
+ admin_state_up: false
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ neutron:
+ network: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_update_ports:
+ -
+ args:
+ network_create_args: {}
+ port_create_args: {}
+ port_update_args:
+ admin_state_up: false
+ device_id: "dummy_id"
+ device_owner: "dummy_owner"
+ ports_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ port: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_update_routers:
+ -
+ args:
+ network_create_args: {}
+ router_create_args: {}
+ router_update_args:
+ admin_state_up: false
+ subnet_cidr_start: "1.1.0.0/30"
+ subnet_create_args: {}
+ subnets_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ subnet: -1
+ port: -1
+ router: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NeutronNetworks.create_and_update_subnets:
+ -
+ args:
+ network_create_args: {}
+ subnet_cidr_start: "1.4.0.0/16"
+ subnet_create_args: {}
+ subnet_update_args:
+ enable_dhcp: false
+ subnets_per_network: 1
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ quotas:
+ neutron:
+ network: -1
+ subnet: -1
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml
new file mode 100644
index 000000000..d4bddbd89
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml
@@ -0,0 +1,369 @@
+ NovaKeypair.boot_and_delete_server_with_keypair:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ server_kwargs:
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova(keypairs=true) }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaKeypair.create_and_delete_keypair:
+ -
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_nova(keypairs=true) }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaKeypair.create_and_list_keypairs:
+ -
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_nova(keypairs=true) }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_and_bounce_server:
+ -
+ args:
+ actions:
+ -
+ hard_reboot: 1
+ -
+ soft_reboot: 1
+ -
+ stop_start: 1
+ -
+ rescue_unrescue: 1
+ {{ vm_params(image_name, flavor_name) }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_and_delete_server:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_and_list_server:
+ -
+ args:
+ detailed: true
+ {{ vm_params(image_name, flavor_name) }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_and_rebuild_server:
+ -
+ args:
+ {{ vm_params(flavor=flavor_name) }}
+ from_image:
+ name: {{ image_name }}
+ to_image:
+ name: {{ image_name }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_server_from_volume_and_delete:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ volume_size: 5
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_volumes() }}
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.pause_and_unpause_server:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ force_delete: false
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.snapshot_server:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ networks_per_tenant: 1
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_neutron() }}
+ {{ unlimited_nova() }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaSecGroup.boot_and_delete_server_with_secgroups:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ security_group_count: 10
+ rules_per_security_group: 10
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network:
+ start_cidr: "100.1.0.0/25"
+ quotas:
+ {{ unlimited_nova() }}
+ {{ unlimited_neutron(secgroups=true) }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_and_live_migrate_server:
+ - args:
+ {{ vm_params(image_name, flavor_name) }}
+ block_migration: false
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_and_migrate_server:
+ - args:
+ {{ vm_params(image_name, flavor_name) }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_server_from_volume:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ volume_size: 10
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_server:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_server_attach_created_volume_and_live_migrate:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ size: 10
+ block_migration: false
+ boot_server_kwargs:
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_server_from_volume_and_live_migrate:
+ - args:
+ {{ vm_params(image_name, flavor_name) }}
+ block_migration: false
+ volume_size: 10
+ force_delete: false
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaSecGroup.create_and_delete_secgroups:
+ -
+ args:
+ security_group_count: 10
+ rules_per_security_group: 10
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_neutron(secgroups=true) }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaSecGroup.create_and_list_secgroups:
+ -
+ args:
+ security_group_count: 10
+ rules_per_security_group: 10
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ quotas:
+ {{ unlimited_neutron(secgroups=true) }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.list_servers:
+ -
+ args:
+ detailed: True
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ servers:
+ {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+ servers_per_tenant: 2
+ auto_assign_nic: true
+ network: {}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.resize_server:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ to_flavor:
+ name: "m1.small"
+ confirm: true
+ force_delete: false
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml
new file mode 100644
index 000000000..66fd2039d
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml
@@ -0,0 +1,54 @@
+ Quotas.cinder_update_and_delete:
+ -
+ args:
+ max_quota: 1024
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Quotas.cinder_update:
+ -
+ args:
+ max_quota: 1024
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Quotas.neutron_update:
+ -
+ args:
+ max_quota: 1024
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Quotas.nova_update_and_delete:
+ -
+ args:
+ max_quota: 1024
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Quotas.nova_update:
+ -
+ args:
+ max_quota: 1024
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml
new file mode 100644
index 000000000..b7d2033f2
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml
@@ -0,0 +1,28 @@
+ HttpRequests.check_random_request:
+ -
+ args:
+ requests:
+ -
+ url: "http://www.example.com"
+ method: "GET"
+ status_code: 200
+ -
+ url: "http://www.openstack.org"
+ method: "GET"
+ status_code: 200
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ HttpRequests.check_request:
+ -
+ args:
+ url: "http://www.example.com"
+ method: "GET"
+ status_code: 200
+ allow_redirects: False
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml
new file mode 100644
index 000000000..f102edb2b
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml
@@ -0,0 +1,268 @@
+ TempestScenario.list_of_tests:
+ -
+ args:
+ tempest_conf: /etc/tempest/tempest.conf
+ test_names:
+ - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_get_flavor
+ - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors
+ - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail
+ - tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image
+ - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image
+ - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images
+ - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail
+ - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create
+ - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_cidr
+ - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_group_id
+ - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_delete_when_peer_group_deleted
+ - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_list
+ - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_group_create_get_delete
+ - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_groups_create_list_delete
+ - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_server_security_groups
+ - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_update_security_groups
+ - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_add_remove_fixed_ip
+ - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces
+ - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers
+ - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail
+ - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details
+ - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers
+ - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail
+ - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details
+ - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
+ - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft
+ - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server
+ - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm
+ - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped
+ - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses
+ - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses_by_network
+ - tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescue_unrescue_instance
+ - tempest.api.compute.test_quotas.QuotasTestJSON.test_compare_tenant_quotas_with_default_quotas
+ - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas
+ - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas
+ - tempest.api.compute.volumes.test_volumes_get.VolumesGetTestJSON.test_volume_create_get_delete
+ - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_create
+ - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_delete
+ - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_get
+ - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_list
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_create
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_delete
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_get
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_list
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_create
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_delete
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_get
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_list
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_create
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_delete
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_get
+ - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_list
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_create
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_delete
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_get
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_list
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_job_binary_get_data
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_create
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_delete
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_get
+ - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_list
+ - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_create
+ - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_delete
+ - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get
+ - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get_data
+ - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_list
+ - tempest.api.data_processing.test_jobs.JobTest.test_job_create
+ - tempest.api.data_processing.test_jobs.JobTest.test_job_delete
+ - tempest.api.data_processing.test_jobs.JobTest.test_job_get
+ - tempest.api.data_processing.test_jobs.JobTest.test_job_list
+ - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_create
+ - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_delete
+ - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_get
+ - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_list
+ - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_get
+ - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_list
+ - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_compare_db_flavors_with_os
+ - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_get_db_flavor
+ - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_list_db_flavors
+ - tempest.api.database.limits.test_limits.DatabaseLimitsTest.test_absolute_limits
+ - tempest.api.database.versions.test_versions.DatabaseVersionsTest.test_list_db_versions
+ - tempest.api.identity.admin.v2.test_services.ServicesTestJSON.test_list_services
+ - tempest.api.identity.admin.v2.test_users.UsersTestJSON.test_create_user
+ - tempest.api.identity.admin.v3.test_credentials.CredentialsTestJSON.test_credentials_create_get_update_delete
+ - tempest.api.identity.admin.v3.test_domains.DomainsTestJSON.test_create_update_delete_domain
+ - tempest.api.identity.admin.v3.test_endpoints.EndPointsTestJSON.test_update_endpoint
+ - tempest.api.identity.admin.v3.test_groups.GroupsV3TestJSON.test_group_users_add_list_delete
+ - tempest.api.identity.admin.v3.test_policies.PoliciesTestJSON.test_create_update_delete_policy
+ - tempest.api.identity.admin.v3.test_regions.RegionsTestJSON.test_create_region_with_specific_id
+ - tempest.api.identity.admin.v3.test_roles.RolesV3TestJSON.test_role_create_update_get_list
+ - tempest.api.identity.admin.v3.test_services.ServicesTestJSON.test_create_update_get_service
+ - tempest.api.identity.admin.v3.test_trusts.TrustsV3TestJSON.test_get_trusts_all
+ - tempest.api.messaging.test_claims.TestClaims.test_post_claim
+ - tempest.api.messaging.test_claims.TestClaims.test_query_claim
+ - tempest.api.messaging.test_claims.TestClaims.test_release_claim
+ - tempest.api.messaging.test_claims.TestClaims.test_update_claim
+ - tempest.api.messaging.test_messages.TestMessages.test_delete_multiple_messages
+ - tempest.api.messaging.test_messages.TestMessages.test_delete_single_message
+ - tempest.api.messaging.test_messages.TestMessages.test_get_message
+ - tempest.api.messaging.test_messages.TestMessages.test_get_multiple_messages
+ - tempest.api.messaging.test_messages.TestMessages.test_list_messages
+ - tempest.api.messaging.test_messages.TestMessages.test_post_messages
+ - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_existence
+ - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_head
+ - tempest.api.messaging.test_queues.TestManageQueue.test_get_queue_stats
+ - tempest.api.messaging.test_queues.TestManageQueue.test_list_queues
+ - tempest.api.messaging.test_queues.TestManageQueue.test_set_and_get_queue_metadata
+ - tempest.api.messaging.test_queues.TestQueues.test_create_delete_queue
+ - tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions
+ - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address
+ - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip
+ - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_network
+ - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_port
+ - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_subnet
+ - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_network
+ - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_port
+ - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_subnet
+ - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet
+ - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility
+ - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks
+ - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets
+ - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network
+ - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet
+ - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_update_delete_network_subnet
+ - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_external_network_visibility
+ - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_networks
+ - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_subnets
+ - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_network
+ - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_subnet
+ - tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet
+ - tempest.api.network.test_networks.NetworksTestJSON.test_external_network_visibility
+ - tempest.api.network.test_networks.NetworksTestJSON.test_list_networks
+ - tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets
+ - tempest.api.network.test_networks.NetworksTestJSON.test_show_network
+ - tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet
+ - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools
+ - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups
+ - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port
+ - tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports
+ - tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port
+ - tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools
+ - tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups
+ - tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port
+ - tempest.api.network.test_ports.PortsTestJSON.test_list_ports
+ - tempest.api.network.test_ports.PortsTestJSON.test_show_port
+ - tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces
+ - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id
+ - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id
+ - tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router
+ - tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces
+ - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id
+ - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id
+ - tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router
+ - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group
+ - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule
+ - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups
+ - tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group
+ - tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule
+ - tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups
+ - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota
+ - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_account_metadata
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_end_marker
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_json
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_xml
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_end_marker
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker_and_end_marker
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker_and_end_marker
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_extensions
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_account_metadata
+ - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_containers
+ - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_and_delete_metadata
+ - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_matadata_key
+ - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_metadata
+ - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata
+ - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata_key
+ - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_read_object_with_rights
+ - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_write_object_with_rights
+ - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_large_object
+ - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_too_many_objects
+ - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_valid_object
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_overwrite
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_key
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_key
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_value
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_delete_container
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_delimiter
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_end_marker
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_json
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_xml
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_limit
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_marker
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_no_object
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_path
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_prefix
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_metadata
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_list_no_container_metadata
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_and_delete_matadata
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_matadata_key
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_metadata
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata
+ - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata_key
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_fresh_metadata
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_meta
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_metakey
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_match
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_modified_since
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_unmodified_since
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_metadata
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_range
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_newest
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_object_manifest
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_list_no_object_metadata
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata_with_x_object_manifest
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_create_and_remove_metadata
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_object_manifest
+ - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_remove_object_metakey
+ - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_container_object_without_using_creds
+ - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_object_with_another_user_creds
+ - tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+ - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_list
+ - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_show
+ - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_template
+ - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_list
+ - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_metadata
+ - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_software_config
+ - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_create_validate
+ - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_no_metadata_change
+ - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_with_metadata_change
+ - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_crud_no_resources
+ - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_list_responds
+ - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v1_notifications
+ - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v2_notifications
+ - tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_attach_detach_volume_to_instance
+ - tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance
+ - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete
+ - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_from_image
+ - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete
+ - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image
+ - tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list
+ - tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list
+ runner:
+ concurrency: 1
+ times: 1
+ type: serial
+ sla:
+ failure_rate:
+ max: 0
+
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml
new file mode 100644
index 000000000..17ce20ce1
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml
@@ -0,0 +1,42 @@
+ VMTasks.boot_runcommand_delete:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ floating_network: {{ floating_network }}
+ force_delete: false
+ command:
+ interpreter: /bin/sh
+ script_file: {{ sup_dir }}/instance_dd_test.sh
+ username: cirros
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ fixed_network: private
+ floating_network: {{ floating_network }}
+ force_delete: false
+ command:
+ interpreter: /bin/sh
+ script_file: {{ sup_dir }}/instance_dd_test.sh
+ use_floatingip: true
+ username: cirros
+ nics:
+ - net-id: {{ netid }}
+ volume_args:
+ size: 2
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh b/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh
new file mode 100644
index 000000000..e3bf23405
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }
+file=/tmp/test.img
+c=${1:-$SIZE}
+c=${c:-1000} #default is 1GB
+write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c")
+read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c")
+[ -f $file ] && rm $file
+
+echo "{
+ \"write_seq_${c}m\": $write_seq,
+ \"read_seq_${c}m\": $read_seq
+ }"
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template
new file mode 100644
index 000000000..a22487e33
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template
@@ -0,0 +1,17 @@
+heat_template_version: 2013-05-23
+
+resources:
+ test_group:
+ type: OS::Heat::AutoScalingGroup
+ properties:
+ desired_capacity: 0
+ max_size: 0
+ min_size: 0
+ resource:
+ type: OS::Heat::RandomString
+ test_policy:
+ type: OS::Heat::ScalingPolicy
+ properties:
+ adjustment_type: change_in_capacity
+ auto_scaling_group_id: { get_resource: test_group }
+ scaling_adjustment: 1 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template
new file mode 100644
index 000000000..eb4f2f2dd
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template
@@ -0,0 +1 @@
+heat_template_version: 2014-10-16 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template
new file mode 100644
index 000000000..2dd676c11
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template
@@ -0,0 +1,13 @@
+heat_template_version: 2014-10-16
+
+description: Test template for rally create-update-delete scenario
+
+resources:
+ test_string_one:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20
+ test_string_two:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template
new file mode 100644
index 000000000..b3f505fa6
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template
@@ -0,0 +1,13 @@
+heat_template_version: 2014-10-16
+
+description: Test template for rally create-update-delete scenario
+
+resources:
+ test_group:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: 2
+ resource_def:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template
new file mode 100644
index 000000000..909f45d21
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template
@@ -0,0 +1,64 @@
+heat_template_version: 2013-05-23
+
+parameters:
+ # set all correct defaults for parameters before launch test
+ public_net:
+ type: string
+ default: public
+ image:
+ type: string
+ default: cirros-0.3.4-x86_64-uec
+ flavor:
+ type: string
+ default: m1.tiny
+ cidr:
+ type: string
+ default: 11.11.11.0/24
+
+resources:
+ server:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: image}
+ flavor: {get_param: flavor}
+ networks:
+ - port: { get_resource: server_port }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: {get_param: public_net}
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: private_subnet }
+
+ private_net:
+ type: OS::Neutron::Net
+
+ private_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_resource: private_net }
+ cidr: {get_param: cidr}
+
+ port_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: default_port_security_group
+ description: >
+ Default security group assigned to port. The neutron default group is not
+ used because neutron creates several groups with the same name=default and
+ nova cannot chooses which one should it use.
+
+ server_port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_resource: private_net}
+ fixed_ips:
+ - subnet: { get_resource: private_subnet }
+ security_groups:
+ - { get_resource: port_security_group }
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template
new file mode 100644
index 000000000..826ca9dae
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template
@@ -0,0 +1,43 @@
+heat_template_version: 2013-05-23
+
+parameters:
+ # set all correct defaults for parameters before launch test
+ image:
+ type: string
+ default: cirros-0.3.4-x86_64-uec
+ flavor:
+ type: string
+ default: m1.tiny
+ availability_zone:
+ type: string
+ description: The Availability Zone to launch the instance.
+ default: nova
+ volume_size:
+ type: number
+ description: Size of the volume to be created.
+ default: 1
+ constraints:
+ - range: { min: 1, max: 1024 }
+ description: must be between 1 and 1024 Gb.
+ network_id:
+ type: string
+
+resources:
+ server:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: image}
+ flavor: {get_param: flavor}
+ networks:
+ - network: { get_param: network_id }
+ cinder_volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: { get_param: volume_size }
+ availability_zone: { get_param: availability_zone }
+ volume_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: cinder_volume }
+ instance_uuid: { get_resource: server}
+ mountpoint: /dev/vdc
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template
new file mode 100644
index 000000000..cf34879ca
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template
@@ -0,0 +1,23 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Test template for create-update-delete-stack scenario in rally.
+ The template updates resource parameters without resource re-creation(replacement)
+ in the stack defined by autoscaling_policy.yaml.template. It allows to measure
+ performance of "pure" resource update operation only.
+
+resources:
+ test_group:
+ type: OS::Heat::AutoScalingGroup
+ properties:
+ desired_capacity: 0
+ max_size: 0
+ min_size: 0
+ resource:
+ type: OS::Heat::RandomString
+ test_policy:
+ type: OS::Heat::ScalingPolicy
+ properties:
+ adjustment_type: change_in_capacity
+ auto_scaling_group_id: { get_resource: test_group }
+ scaling_adjustment: -1 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template
new file mode 100644
index 000000000..e06d42e01
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template
@@ -0,0 +1,19 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Test template for create-update-delete-stack scenario in rally.
+ The template updates the stack defined by random_strings.yaml.template with additional resource.
+
+resources:
+ test_string_one:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20
+ test_string_two:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20
+ test_string_three:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template
new file mode 100644
index 000000000..d02593e3b
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template
@@ -0,0 +1,11 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Test template for create-update-delete-stack scenario in rally.
+ The template deletes one resource from the stack defined by random_strings.yaml.template.
+
+resources:
+ test_string_one:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template
new file mode 100644
index 000000000..46d8bff4c
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template
@@ -0,0 +1,19 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Test template for create-update-delete-stack scenario in rally.
+ The template deletes one resource from the stack defined by
+ random_strings.yaml.template and re-creates it with the updated parameters
+ (so-called update-replace). That happens because some parameters cannot be
+ changed without resource re-creation. The template allows to measure performance
+ of update-replace operation.
+
+resources:
+ test_string_one:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20
+ test_string_two:
+ type: OS::Heat::RandomString
+ properties:
+ length: 40 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template
new file mode 100644
index 000000000..891074ebc
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template
@@ -0,0 +1,16 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Test template for create-update-delete-stack scenario in rally.
+ The template updates one resource from the stack defined by resource_group.yaml.template
+ and adds children resources to that resource.
+
+resources:
+ test_group:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: 3
+ resource_def:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template
new file mode 100644
index 000000000..b4d1d1730
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template
@@ -0,0 +1,16 @@
+heat_template_version: 2014-10-16
+
+description: >
+ Test template for create-update-delete-stack scenario in rally.
+ The template updates one resource from the stack defined by resource_group.yaml.template
+ and deletes children resources from that resource.
+
+resources:
+ test_group:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: 1
+ resource_def:
+ type: OS::Heat::RandomString
+ properties:
+ length: 20 \ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/task.yaml b/testcases/VIM/OpenStack/CI/rally_cert/task.yaml
new file mode 100644
index 000000000..299421ac5
--- /dev/null
+++ b/testcases/VIM/OpenStack/CI/rally_cert/task.yaml
@@ -0,0 +1,59 @@
+{%- set glance_image_location = glance_image_location|default("http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img") %}
+{%- set image_name = image_name|default("functest-img-rally") %}
+{%- set flavor_name = flavor_name|default("m1.tiny") %}
+{%- set use_existing_users = use_existing_users|default(false) %}
+{%- set service_list = service_list|default(["authenticate", "cinder", "keystone", "nova", "glance", "neutron", "quotas", "requests", "heat", "vm"]) %}
+{%- set smoke = smoke|default(true) %}
+{%- set floating_network = floating_network|default("net04_ext") %}
+{%- set controllers_amount = controllers_amount|default(1) %}
+{%- if smoke %}
+{%- set users_amount = 1 %}
+{%- set tenants_amount = 1 %}
+{%- else %}
+{%- set users_amount = users_amount|default(1) %}
+{%- set tenants_amount = tenants_amount|default(1) %}
+{%- endif %}
+
+{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%}
+{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%}
+
+---
+{% if "authenticate" in service_list %}
+{%- include "scenario/opnfv-authenticate.yaml"-%}
+{% endif %}
+
+{% if "cinder" in service_list %}
+{%- include "scenario/opnfv-cinder.yaml"-%}
+{% endif %}
+
+{% if "keystone" in service_list %}
+{%- include "scenario/opnfv-keystone.yaml"-%}
+{% endif %}
+
+{% if "nova" in service_list %}
+{%- include "scenario/opnfv-nova.yaml"-%}
+{% endif %}
+
+{% if "glance" in service_list %}
+{%- include "scenario/opnfv-glance.yaml"-%}
+{% endif %}
+
+{% if "neutron" in service_list %}
+{%- include "scenario/opnfv-neutron.yaml"-%}
+{% endif %}
+
+{% if "quotas" in service_list %}
+{%- include "scenario/opnfv-quotas.yaml"-%}
+{% endif %}
+
+{% if "requests" in service_list %}
+{%- include "scenario/opnfv-requests.yaml"-%}
+{% endif %}
+
+{% if "heat" in service_list %}
+{%- include "scenario/opnfv-heat.yaml"-%}
+{% endif %}
+
+{% if "vm" in service_list %}
+{%- include "scenario/opnfv-vm.yaml"-%}
+{% endif %}
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-authenticate.json b/testcases/VIM/OpenStack/CI/suites/opnfv-authenticate.json
index 3ded19963..8bb589524 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-authenticate.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-authenticate.json
@@ -11,6 +11,9 @@
"tenants": 3,
"users_per_tenant": 50
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -29,6 +32,9 @@
"tenants": 3,
"users_per_tenant": 5
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -47,6 +53,9 @@
"tenants": 3,
"users_per_tenant": 5
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -65,6 +74,9 @@
"tenants": 3,
"users_per_tenant": 5
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -83,6 +95,9 @@
"tenants": 3,
"users_per_tenant": 5
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -101,6 +116,9 @@
"tenants": 3,
"users_per_tenant": 5
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-cinder.json b/testcases/VIM/OpenStack/CI/suites/opnfv-cinder.json
index 958055186..bada44733 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-cinder.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-cinder.json
@@ -4,7 +4,7 @@
"args": {
"size": 10,
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"flavor": {
"name": "m1.tiny"
@@ -20,6 +20,9 @@
"tenants": 2,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -41,6 +44,9 @@
"volumes": {
"size": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -59,6 +65,9 @@
"tenants": 2,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -78,6 +87,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -100,6 +112,9 @@
"volumes": {
"size": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -119,6 +134,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -141,6 +159,9 @@
"tenants": 2,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -149,7 +170,7 @@
"args": {
"size": 1,
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
}
},
"runner": {
@@ -162,6 +183,9 @@
"tenants": 2,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -183,6 +207,9 @@
"volumes": {
"size": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -210,13 +237,16 @@
},
"servers": {
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"flavor": {
"name": "m1.tiny"
},
"servers_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -241,13 +271,16 @@
},
"servers": {
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"flavor": {
"name": "m1.tiny"
},
"servers_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -266,6 +299,9 @@
"tenants": 2,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -288,8 +324,10 @@
"size": 1,
"volumes_per_tenant": 4
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
}
-
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-glance.json b/testcases/VIM/OpenStack/CI/suites/opnfv-glance.json
index 1359fff56..e905ccaba 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-glance.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-glance.json
@@ -1,4 +1,28 @@
{
+ "GlanceImages.list_images": [
+ {
+ "runner": {
+ "type": "constant",
+ "times": 10,
+ "concurrency": 1
+ },
+ "context": {
+ "users": {
+ "tenants": 2,
+ "users_per_tenant": 2
+ },
+ "images": {
+ "image_url": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
+ "image_type": "qcow2",
+ "image_container": "bare",
+ "images_per_tenant": 4
+ }
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
+ }
+ }
+ ],
"GlanceImages.create_and_delete_image": [
{
"args": {
@@ -16,6 +40,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -36,6 +63,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -46,7 +76,7 @@
"container_format": "bare",
"disk_format": "qcow2",
"flavor": {
- "name": "m1.nano"
+ "name": "m1.small"
},
"number_instances": 2
},
@@ -60,27 +90,9 @@
"tenants": 3,
"users_per_tenant": 5
}
- }
- }
- ],
- "GlanceImages.list_images": [
- {
- "runner": {
- "type": "constant",
- "times": 10,
- "concurrency": 1
},
- "context": {
- "users": {
- "tenants": 2,
- "users_per_tenant": 2
- },
- "images": {
- "image_url": "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img",
- "image_type": "qcow2",
- "image_container": "bare",
- "images_per_tenant": 4
- }
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-heat.json b/testcases/VIM/OpenStack/CI/suites/opnfv-heat.json
index 2fbb70fba..a712afefe 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-heat.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-heat.json
@@ -11,6 +11,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -29,6 +32,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -47,6 +53,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -62,6 +71,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -80,6 +92,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -98,6 +113,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -117,6 +135,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -136,6 +157,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -155,6 +179,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -174,6 +201,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -193,6 +223,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -212,6 +245,9 @@
"tenants": 2,
"users_per_tenant": 3
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -231,6 +267,9 @@
"stacks_per_tenant": 2,
"resources_per_stack": 10
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-keystone.json b/testcases/VIM/OpenStack/CI/suites/opnfv-keystone.json
index 390a1ae13..f7291ed59 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-keystone.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-keystone.json
@@ -8,6 +8,9 @@
"type": "constant",
"times": 100,
"concurrency": 10
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -20,6 +23,9 @@
"type": "constant",
"times": 10,
"concurrency": 1
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -32,6 +38,9 @@
"type": "constant",
"times": 100,
"concurrency": 10
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -44,6 +53,9 @@
"type": "constant",
"times": 100,
"concurrency": 10
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -56,6 +68,9 @@
"type": "constant",
"times": 100,
"concurrency": 10
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -69,6 +84,9 @@
"type": "constant",
"times": 10,
"concurrency": 10
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-neutron.json b/testcases/VIM/OpenStack/CI/suites/opnfv-neutron.json
index 5d176ca0d..9fcdf583d 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-neutron.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-neutron.json
@@ -19,6 +19,9 @@
"network": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -45,6 +48,9 @@
"port": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -74,6 +80,9 @@
"router": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -101,6 +110,9 @@
"subnet": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -124,6 +136,9 @@
"network": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -150,6 +165,9 @@
"port": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -179,6 +197,9 @@
"router": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -206,6 +227,9 @@
"subnet": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -233,6 +257,9 @@
"network": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -265,6 +292,9 @@
"port": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -298,6 +328,9 @@
"router": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -329,6 +362,9 @@
"subnet": -1
}
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-nova.json b/testcases/VIM/OpenStack/CI/suites/opnfv-nova.json
index 2c56777dc..e32fd57ec 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-nova.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-nova.json
@@ -6,7 +6,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"force_delete": false
},
@@ -20,6 +20,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -30,7 +33,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
}
},
"runner": {
@@ -46,6 +49,9 @@
"network": {
"start_cidr": "100.1.0.0/26"
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -56,7 +62,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"security_group_count": 10,
"rules_per_security_group": 10
@@ -74,6 +80,9 @@
"network": {
"start_cidr": "100.1.0.0/26"
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -84,7 +93,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"detailed": true
},
@@ -98,6 +107,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -108,7 +120,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"block_migration": false
},
@@ -122,6 +134,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -132,7 +147,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
}
},
"runner": {
@@ -145,6 +160,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -155,7 +173,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"force_delete": false,
"actions": [
@@ -175,6 +193,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -185,7 +206,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"volume_size": 10,
"force_delete": false
@@ -200,6 +221,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -210,7 +234,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"volume_size": 10
},
@@ -224,6 +248,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -234,7 +261,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
}
},
"runner": {
@@ -247,6 +274,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -256,7 +286,7 @@
"size": 10,
"block_migration": false,
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"flavor": {
"name": "m1.small"
@@ -272,6 +302,9 @@
"tenants": 2,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -282,7 +315,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"block_migration": false,
"volume_size": 10,
@@ -298,6 +331,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -308,7 +344,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"force_delete": false
},
@@ -322,6 +358,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -337,6 +376,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -356,6 +398,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -371,6 +416,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -390,6 +438,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -413,10 +464,13 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"servers_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -427,7 +481,7 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"to_flavor": {
"name": "m1.small"
@@ -445,6 +499,9 @@
"tenants": 1,
"users_per_tenant": 1
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-quotas.json b/testcases/VIM/OpenStack/CI/suites/opnfv-quotas.json
index 1778a8dd0..1cc1855e7 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-quotas.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-quotas.json
@@ -14,6 +14,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -32,6 +35,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -50,6 +56,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -68,6 +77,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -86,6 +98,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-requests.json b/testcases/VIM/OpenStack/CI/suites/opnfv-requests.json
index 9936e2fd5..4468d60a3 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-requests.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-requests.json
@@ -1,16 +1,38 @@
{
- "Requests.check_response": [
+ "HttpRequests.check_random_request": [
{
"args": {
- "url": "http://www.google.com",
- "response": 302
+ "requests": [{"url": "http://www.example.com", "method": "GET",
+ "status_code": 200},
+ {"url": "http://www.openstack.org", "method": "GET"}],
+ "status_code": 200
},
"runner": {
"type": "constant",
"times": 20,
"concurrency": 5
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
+ }
+ }
+ ],
+ "HttpRequests.check_request": [
+ {
+ "args": {
+ "url": "http://www.example.com",
+ "method": "GET",
+ "status_code": 200,
+ "allow_redirects": false
+ },
+ "runner": {
+ "type": "constant",
+ "times": 20,
+ "concurrency": 5
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
}
-
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-smoke-green.json b/testcases/VIM/OpenStack/CI/suites/opnfv-smoke-green.json
index a7eb345b7..b327b53e5 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-smoke-green.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-smoke-green.json
@@ -230,6 +230,9 @@
"type": "serial",
"times": 1,
"concurrency": 1
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-smoke.json b/testcases/VIM/OpenStack/CI/suites/opnfv-smoke.json
index 8c40fab1d..31514d274 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-smoke.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-smoke.json
@@ -265,6 +265,9 @@
"type": "serial",
"times": 1,
"concurrency": 1
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-tempest.json b/testcases/VIM/OpenStack/CI/suites/opnfv-tempest.json
index 0b63070a0..b94de47e4 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-tempest.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-tempest.json
@@ -6,6 +6,9 @@
"type": "constant",
"times": 1,
"concurrency": 1
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/CI/suites/opnfv-vm.json b/testcases/VIM/OpenStack/CI/suites/opnfv-vm.json
index f197ef6ba..382f40251 100644
--- a/testcases/VIM/OpenStack/CI/suites/opnfv-vm.json
+++ b/testcases/VIM/OpenStack/CI/suites/opnfv-vm.json
@@ -6,13 +6,13 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
- "floating_network": "public",
+ "floating_network": "net04_ext",
"force_delete": false,
- "script": "samples/tasks/support/instance_dd_test.sh",
+ "script": "../Rally_repo/samples/tasks/support/instance_dd_test.sh",
"interpreter": "/bin/sh",
- "username": "cirros"
+ "username": "admin"
},
"runner": {
"type": "constant",
@@ -26,6 +26,9 @@
},
"network": {
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
],
@@ -36,18 +39,18 @@
"name": "m1.small"
},
"image": {
- "name": "^TestVM*"
+ "name": "^functest-img*"
},
"volume_args": {
"size": 2
},
"fixed_network": "private",
- "floating_network": "public",
+ "floating_network": "net04_ext",
"use_floatingip": true,
"force_delete": false,
- "script": "samples/tasks/support/instance_dd_test.sh",
+ "script": "../Rally_repo/samples/tasks/support/instance_dd_test.sh",
"interpreter": "/bin/sh",
- "username": "cirros"
+ "username": "admin"
},
"runner": {
"type": "constant",
@@ -59,6 +62,9 @@
"tenants": 3,
"users_per_tenant": 2
}
+ },
+ "sla": {
+ "failure_rate": {"max": 0}
}
}
]
diff --git a/testcases/VIM/OpenStack/OpenStack.md b/testcases/VIM/OpenStack/OpenStack.md
deleted file mode 100644
index e1a85c305..000000000
--- a/testcases/VIM/OpenStack/OpenStack.md
+++ /dev/null
@@ -1,218 +0,0 @@
-# Rally tests for OpenStack
-
-Original Rally testsuites can be found here: https://github.com/stackforge/rally
-
----
-## Intro
-In order to perform functional and performance testing, we use Rally (see https://wiki.openstack.org/wiki/Rally for details).
-Rally must be installed as jenkins user on the jumphost machine of the OPNFV solution.
-
-## Installation & Configuration
-
-### Rally
-
-* Log on jumphost machine as jenkins user
-* Create the file existing.json, adapt it to your OpenStack (until agreement on default passwords)
-```bash
-{
- "type": "ExistingCloud",
- "auth_url": "http://example.net:5000/v2.0/",
- "region_name": "RegionOne",
- "endpoint_type": "public",
- "admin": {
- "username": "admin",
- "password": "myadminpass",
- "tenant_name": "demo"
- },
- "https_insecure": False,
- "https_cacert": "",
-}
-```
-* Install Rally (ref https://rally.readthedocs.org/en/latest/tutorial/step_0_installation.html)
-
-```bash
-git clone https://git.openstack.org/stackforge/rally
-./rally/install_rally.sh -v
-rally deployment create --file=existing.json --name=existing
-```
-* you can check the available OpenStack services
-```bash
-# rally deployment check
-keystone endpoints are valid and following service are available:
-+-------------+-----------+------------+
-| Services | Type | Status |
-+-----------+-------------+------------+
-| cinder | volume | Available |
-| cinderv2 | volumev2 | Available |
-| glance | image | Available |
-| keystone | identity | Available |
-| neutron | network | Available |
-| nova | compute | Available |
-| nova_ec2 | compute_ec2 | Available |
-| novav3 | computev3 | Available |
-+-----------+-------------+------------+
-```
-* You can start Rally scenario manually, follow https://rally.readthedocs.org/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
-```bash
-# rally task start ./samples/tasks/scenarios/nova/my-boot-and-delete.json
---------------------------------------------------------------------------------
- Preparing input task
---------------------------------------------------------------------------------
-
-Input task is:
-{
- "NovaServers.boot_and_delete_server": [
- {
- "args": {
- "flavor": {
- "name": "m1.small"
- },
- "image": {
- "name": "^ubuntu-14.10-64b"
- },
- "force_delete": false
- },
- "runner": {
- "type": "constant",
- "times": 10,
- "concurrency": 2
- },
- "context": {
- "users": {
- "tenants": 3,
- "users_per_tenant": 2
- }
- }
- }
- ]
-}
-
---------------------------------------------------------------------------------
- Task f42c8aed-00a6-4715-9951-945b4fb97c32: started
---------------------------------------------------------------------------------
-
-Benchmarking... This can take a while...
-
-To track task status use:
-
- rally task status
- or
- rally task detailed
-
---------------------------------------------------------------------------------
-Task f42c8aed-00a6-4715-9951-945b4fb97c32: finished
---------------------------------------------------------------------------------
-
-test scenario NovaServers.boot_and_delete_server
-args position 0
-args values:
-OrderedDict([(u'runner', OrderedDict([(u'type', u'constant'), (u'concurrency', 2), (u'times', 10)])), (u'args', OrderedDict([(u'force_delete', False), (u'flavor', OrderedDict([(u'name', u'm1.small')])), (u'image', OrderedDict([(u'name', u'^ubuntu-14.10-64b')]))])), (u'context', OrderedDict([(u'users', OrderedDict([(u'project_domain', u'default'), (u'users_per_tenant', 2), (u'tenants', 3), (u'resource_management_workers', 30), (u'user_domain', u'default')]))]))])
-+--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+
-| action | min (sec) | avg (sec) | max (sec) | 90 percentile | 95 percentile | success | count |
-+--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+
-| nova.boot_server | 4.675 | 5.554 | 6.357 | 6.289 | 6.323 | 100.0% | 10 |
-| nova.delete_server | 2.365 | 3.301 | 4.728 | 4.553 | 4.64 | 100.0% | 10 |
-| total | 7.303 | 8.857 | 10.789 | 10.543 | 10.666 | 100.0% | 10 |
-+--------------------+-----------+-----------+-----------+---------------+---------------+---------+-------+
-Load duration: 45.7972288132
-Full duration: 58.912060976
-
-HINTS:
-* To plot HTML graphics with this data, run:
- rally task report f42c8aed-00a6-4715-9951-945b4fb97c32 --out output.html
-
-* To get raw JSON output of task results, run:
- rally task results f42c8aed-00a6-4715-9951-945b4fb97c32
-
-Using task: f42c8aed-00a6-4715-9951-945b4fb97c32
-
-```
-* For Tempest, you can run the test manually by following the instructions https://www.mirantis.com/blog/rally-openstack-tempest-testing-made-simpler
-
-```bash
-root@rally:~/rally# rally verify start
-[...]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest
- test_attach_volumes_with_nonexistent_volume_id[compute,gate,id-f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6,negative]FAIL
- test_create_volume_with_invalid_size[gate,id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049,negative]OK 0.02
- test_create_volume_with_nonexistent_snapshot_id[gate,id-0c36f6ae-4604-4017-b0a9-34fdc63096f9,negative]OK 0.04
- test_create_volume_with_nonexistent_source_volid[gate,id-47c73e08-4be8-45bb-bfdf-0c4e79b88344,negative]OK 0.05
- test_create_volume_with_nonexistent_volume_type[gate,id-10254ed8-3849-454e-862e-3ab8e6aa01d2,negative]OK 0.02
- test_create_volume_with_out_passing_size[gate,id-9387686f-334f-4d31-a439-33494b9e2683,negative]OK 0.02
- test_create_volume_with_size_negative[gate,id-8b472729-9eba-446e-a83b-916bdb34bef7,negative]OK 0.02
-[...]
-Ran 933 tests in 1020.200s
-
-FAILED (failures=186)
-Test set 'full' has been finished with error. Check log for details
-
-```
-
-It is possible to get a better view on the result
-```bash
-# rally verify list
-+--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------------+----------+
-| UUID | Deployment UUID | Set name | Tests | Failures | Created at | Duration | Status |
-+--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------------+----------+
-| b1de3608-dbee-40e7-84c4-1c756ca0347c | e7d70ddf-9be0-4681-9456-aa8dce515e0e | None | 0 | 0 | 2015-03-11 08:48:04.416793 | 0:00:00.102275 | running |
-| ff0d9285-184f-47d5-9474-7475135ae8cf | e7d70ddf-9be0-4681-9456-aa8dce515e0e | full | 933 | 186 | 2015-03-11 09:57:01.836611 | 0:18:08.360204 | finished |
-| fec2fd0a-a4ef-4064-a292-95e9da68025c | e7d70ddf-9be0-4681-9456-aa8dce515e0e | full | 933 | 186 | 2015-03-12 09:46:40.818691 | 0:17:02.316443 | finished |
-+--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------------+----------+
-
-rally verify show fec2fd0a-a4ef-4064-a292-95e9da68025c
-Total results of verification:
-
-+--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------+
-| UUID | Deployment UUID | Set name | Tests | Failures | Created at | Status |
-+--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------+
-| fec2fd0a-a4ef-4064-a292-95e9da68025c | e7d70ddf-9be0-4681-9456-aa8dce515e0e | full | 933 | 186 | 2015-03-12 09:46:40.818691 | finished |
-+--------------------------------------+--------------------------------------+----------+-------+----------+----------------------------+----------+
-
-Tests:
-
-+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| name | time | status |
-+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| tearDownClass (tempest.api.image.v1.test_images.CreateRegisterImagesTest) | 0.0 | FAIL |
-| tearDownClass (tempest.api.image.v1.test_images.UpdateImageMetaTest) | 0.0 | FAIL |
-[...]
-| tempest.cli.simple_read_only.volume.test_cinder.SimpleReadOnlyCinderClientTest.test_cinder_quota_show[id-18166673-ffa8-4df3-b60c-6375532288bc] | 1.309555 | OK |
-| tempest.cli.simple_read_only.volume.test_cinder.SimpleReadOnlyCinderClientTest.test_cinder_rate_limits[id-b2c66ed9-ca96-4dc4-94cc-8083e664e516] | 1.277704 | OK |
-| tempest.cli.simple_read_only.volume.test_cinder.SimpleReadOnlyCinderClientTest.test_cinder_region_list[id-95a2850c-35b4-4159-bb93-51647a5ad232] | 1.105877 | FAIL |
-| tempest.cli.simple_read_only.volume.test_cinder.SimpleReadOnlyCinderClientTest.test_cinder_retries_list[id-6d97fcd2-5dd1-429d-af70-030c949d86cd] | 1.306407 | OK |
-| tempest.cli.simple_read_only.volume.test_cinder.SimpleReadOnlyCinderClientTest.test_cinder_service_list[id-301b5ae1-9591-4e9f-999c-d525a9bdf822] | 1.24909 | OK |
-| tempest.cli.simple_read_only.volume.test_cinder.SimpleReadOnlyCinderClientTest.test_cinder_snapshot_list[id-7a19955b-807c-481a-a2ee-9d76733eac28] | 1.270242 | OK |
-[...]
-| tempest.thirdparty.boto.test_s3_ec2_images.S3ImagesTest | 0.0 | SKIP |
-| tempest.thirdparty.boto.test_s3_objects.S3BucketsTest.test_create_get_delete_object[id-4eea567a-b46a-405b-a475-6097e1faebde] | 0.239222 | FAIL |
-+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-
-```
-Rally includes a reporting tool
-https://rally.readthedocs.org/en/latest/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html
-
-## Test description
-
-### Rally
-
-By default, the different Rally Scenarios are:
-```bash
-
-ls samples/tasks/scenarios/
-authenticate cinder dummy heat mistral nova README.rst sahara vm
-ceilometer designate glance keystone neutron quotas requests tempest-do-not-run-against-production zaqar
-
-```
-
-tempest tests can be retrieved at https://github.com/openstack/tempest
-
-tests have been grouped and are available in https://git.opnfv.org/cgit/functest/tree/testcases/VIM/OpenStack/CI/suites
-
-
-## Automation
-
-For automation, 2 job-templates have been created in https://git.opnfv.org/cgit/releng/tree/jjb/functest/functest.yml
-
-* functest-vim_bench-test: this template runs automatically a python script that runs the different rally scenario (except Tempest)
-* functest-vim_tempest-test: this template runs the rally command rally verify start
-
diff --git a/testcases/__init__.py b/testcases/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testcases/__init__.py
diff --git a/testcases/config_functest.py b/testcases/config_functest.py
index 05376be29..199fadfde 100644..100755
--- a/testcases/config_functest.py
+++ b/testcases/config_functest.py
@@ -8,18 +8,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-import re, json, os, urllib2, argparse, logging, shutil, subprocess, yaml
+import re, json, os, urllib2, argparse, logging, shutil, subprocess, yaml, sys, getpass
+import functest_utils
from git import Repo
-
-from neutronclient.v2_0 import client
+from os import stat
+from pwd import getpwuid
+from neutronclient.v2_0 import client as neutronclient
actions = ['start', 'check', 'clean']
-
-
-
parser = argparse.ArgumentParser()
parser.add_argument("action", help="Possible actions are: '{d[0]}|{d[1]}|{d[2]}' ".format(d=actions))
parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-f", "--force", help="Force", action="store_true")
args = parser.parse_args()
@@ -37,142 +37,124 @@ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(messag
ch.setFormatter(formatter)
logger.addHandler(ch)
+REPO_PATH=os.environ['repos_dir']+'/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
-
-yaml_url = 'https://git.opnfv.org/cgit/functest/plain/testcases/functest.yaml'
-name = yaml_url.rsplit('/')[-1]
-dest = "./" + name
-if not os.path.exists(dest):
- logger.info("Downloading functest.yaml...")
- try:
- response = urllib2.urlopen(yaml_url)
- except (urllib2.HTTPError, urllib2.URLError):
- logger.error("Error in fetching %s" %yaml_url)
- exit(-1)
- with open(dest, 'wb') as f:
- f.write(response.read())
- logger.info("functest.yaml stored in %s" % dest)
-else:
- logger.info("functest.yaml found in %s" % dest)
-
-
-with open('./functest.yaml') as f:
+with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
functest_yaml = yaml.safe_load(f)
f.close()
""" global variables """
-HOME = os.environ['HOME']+"/"
-FUNCTEST_BASE_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_functest")
-RALLY_REPO_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_repo")
-RALLY_TEST_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally")
-RALLY_INSTALLATION_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_inst")
-BENCH_TESTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_scn")
-VPING_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_vping")
-ODL_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_odl")
-NEUTRON_PUBLIC_NAME = functest_yaml.get("general").get("openstack").get("neutron_public_net_name")
-NEUTRON_NET_NAME = functest_yaml.get("general").get("openstack").get("neutron_net_name")
-NEUTRON_SUBNET_NAME = functest_yaml.get("general").get("openstack").get("neutron_subnet_name")
-NEUTRON_RANGE = functest_yaml.get("general").get("openstack").get("neutron_subnet_range")
-ROUTER_NAME = functest_yaml.get("general").get("openstack").get("neutron_router_name")
-
-IMAGE_URL = functest_yaml.get("general").get("openstack").get("image_url")
-IMAGE_DISK_FORMAT = functest_yaml.get("general").get("openstack").get("image_disk_format")
-IMAGE_NAME = functest_yaml.get("general").get("openstack").get("image_name")
-IMAGE_FILE_NAME = IMAGE_URL.rsplit('/')[-1]
-IMAGE_DOWNLOAD_PATH = FUNCTEST_BASE_DIR + IMAGE_FILE_NAME
-
-
-def config_functest_start():
+# Directories
+RALLY_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_rally")
+RALLY_REPO_DIR = functest_yaml.get("general").get("directories").get("dir_repo_rally")
+RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get("dir_rally_inst")
+RALLY_RESULT_DIR = functest_yaml.get("general").get("directories").get("dir_rally_res")
+VPING_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_vping")
+VIMS_TEST_DIR = functest_yaml.get("general").get("directories").get("dir_repo_vims_test")
+ODL_DIR = REPO_PATH + functest_yaml.get("general").get("directories").get("dir_odl")
+DATA_DIR = functest_yaml.get("general").get("directories").get("dir_functest_data")
+
+# Tempest/Rally configuration details
+DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name")
+RALLY_COMMIT = functest_yaml.get("general").get("repositories").get("rally_commit")
+
+#Image (cirros)
+IMAGE_FILE_NAME = functest_yaml.get("general").get("openstack").get("image_file_name")
+IMAGE_PATH = DATA_DIR + "/" + IMAGE_FILE_NAME
+
+# NEUTRON Private Network parameters
+NEUTRON_PRIVATE_NET_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_net_name")
+NEUTRON_PRIVATE_SUBNET_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_subnet_name")
+NEUTRON_PRIVATE_SUBNET_CIDR = functest_yaml.get("general"). \
+ get("openstack").get("neutron_private_subnet_cidr")
+NEUTRON_ROUTER_NAME = functest_yaml.get("general"). \
+ get("openstack").get("neutron_router_name")
+
+creds_neutron = functest_utils.get_credentials("neutron")
+neutron_client = neutronclient.Client(**creds_neutron)
+
+def action_start():
"""
Start the functest environment installation
"""
- #if config_functest_check():
- # logger.info("Functest environment already installed in %s. Nothing to do." %FUNCTEST_BASE_DIR)
- # exit(0)
- if not check_internet_connectivity():
+ if not functest_utils.check_internet_connectivity():
logger.error("There is no Internet connectivity. Please check the network configuration.")
exit(-1)
- elif not check_credentials():
- logger.error("Please source the openrc credentials and run the script again.")
- #TODO: source the credentials in this script
- exit(-1)
- elif not check_neutron_net(NEUTRON_PUBLIC_NAME):
- #The public network is normally created by default, no need to create a new one
- logger.debug("Public network '%s' not found." % NEUTRON_PUBLIC_NAME)
- logger.error("A public Neutron network is needed for the environment. Please create one.")
- #TODO: source the credentials in this script
- exit(-1)
- else:
- # Clean in case there are left overs
- config_functest_clean()
- logger.info("Starting installationg of functest environment in %s" % FUNCTEST_BASE_DIR)
- os.makedirs(FUNCTEST_BASE_DIR)
- if not os.path.exists(FUNCTEST_BASE_DIR):
- logger.error("There has been a problem while creating the environment directory.")
- exit(-1)
+ if action_check():
+ logger.info("Functest environment already installed. Nothing to do.")
+ exit(0)
- logger.info("Donwloading test scripts and scenarios...")
- if not download_tests():
- logger.error("There has been a problem while downloading the test scripts and scenarios.")
- config_functest_clean()
- exit(-1)
+ else:
+ # Clean in case there are left overs
+ logger.debug("Cleaning possible functest environment leftovers.")
+ action_clean()
+ logger.info("Starting installation of functest environment")
+
+ private_net = functest_utils.get_private_net(neutron_client)
+ if private_net is None:
+ # If there is no private network in the deployment we create one
+ if not create_private_neutron_net(neutron_client):
+ logger.error("There has been a problem while creating the functest network.")
+ action_clean()
+ exit(-1)
+ else:
+ logger.info("Private network '%s' already existing in the deployment."
+ % private_net['name'])
logger.info("Installing Rally...")
if not install_rally():
logger.error("There has been a problem while installing Rally.")
- config_functest_clean()
- exit(-1)
-
- logger.info("Installing ODL environment...")
- if not install_odl():
- logger.error("There has been a problem while installing Robot.")
- config_functest_clean()
+ action_clean()
exit(-1)
- logger.info("Creating a private Neutron network...")
- logger.debug("Checking if private network '%s' exists..." % NEUTRON_NET_NAME)
- #Now: if exists we don't create it again (the clean command does not clean the neutron network)
- #TODO: this check will not be needed when cleaning the neutron is implemented
- if check_neutron_net(NEUTRON_NET_NAME):
- logger.info("Private network '%s' found. No need to create another one." % NEUTRON_NET_NAME)
+ logger.info("Installing Ruby libraries for vIMS testcase...")
+ # Install ruby libraries for vims test-case
+ script = 'source /etc/profile.d/rvm.sh; '
+ script += 'cd ' + VIMS_TEST_DIR + '; '
+ script += 'rvm autolibs enable ;'
+ script += 'rvm install 1.9.3; '
+ script += 'rvm use 1.9.3;'
+ script += 'bundle install'
+
+ cmd = "/bin/bash -c '" + script + "'"
+ if os.environ.get("CI_DEBUG") == "false":
+ functest_utils.execute_command(cmd)
else:
- logger.info("Private network '%s' not found. Creating..." % NEUTRON_NET_NAME)
- if not create_neutron_net():
- logger.error("There has been a problem while creating the Neutron network.")
- #config_functest_clean()
- exit(-1)
-
+ functest_utils.execute_command(cmd,logger)
- logger.info("Donwloading image...")
- if not download_url_with_progress(IMAGE_URL, FUNCTEST_BASE_DIR):
- logger.error("There has been a problem while downloading the image.")
- config_functest_clean()
- exit(-1)
+ # Create result folder under functest if necessary
+ if not os.path.exists(RALLY_RESULT_DIR):
+ os.makedirs(RALLY_RESULT_DIR)
- logger.info("Creating Glance image: %s ..." %IMAGE_NAME)
- if not create_glance_image(IMAGE_DOWNLOAD_PATH,IMAGE_NAME,IMAGE_DISK_FORMAT):
- logger.error("There has been a problem while creating the Glance image.")
- config_functest_clean()
- exit(-1)
+ try:
+ logger.info("CI: Generate the list of executable tests.")
+ runnable_test = functest_utils.generateTestcaseList(functest_yaml)
+ logger.info("List of runnable tests generated: %s" % runnable_test)
+ except:
+ logger.error("Impossible to generate the list of runnable tests")
exit(0)
-
-def config_functest_check():
+def action_check():
"""
Check if the functest environment is properly installed
"""
errors_all = False
-
+ errors = False
logger.info("Checking current functest configuration...")
+ logger.debug("Checking script directories...")
- logger.debug("Checking directories...")
- errors = False
- dirs = [FUNCTEST_BASE_DIR, RALLY_INSTALLATION_DIR, RALLY_REPO_DIR, RALLY_TEST_DIR, BENCH_TESTS_DIR, VPING_DIR, ODL_DIR]
+ dirs = [RALLY_DIR, RALLY_INSTALLATION_DIR, VPING_DIR, ODL_DIR]
for dir in dirs:
if not os.path.exists(dir):
logger.debug("The directory '%s' does NOT exist." % dir)
@@ -188,56 +170,21 @@ def config_functest_check():
logger.debug("Checking Rally deployment...")
if not check_rally():
- logger.debug("Rally deployment NOT found.")
+ logger.debug(" Rally deployment NOT installed.")
errors_all = True
logger.debug("...FAIL")
else:
logger.debug("...OK")
-
- logger.debug("Checking Neutron...")
- errors = False
- if not check_neutron_net(NEUTRON_NET_NAME):
- logger.debug(" Private network '%s' NOT found." % NEUTRON_NET_NAME)
- errors = True
- errors_all = True
- else:
- logger.debug(" Private network '%s' found." % NEUTRON_NET_NAME)
-
- if not check_neutron_net(NEUTRON_PUBLIC_NAME):
- logger.debug(" Public network '%s' NOT found." % NEUTRON_PUBLIC_NAME)
- errors = True
- errors_all = True
- else:
- logger.debug(" Public network '%s' found." % NEUTRON_PUBLIC_NAME)
-
- if not errors:
- logger.debug("...OK")
- else:
- logger.debug("...FAIL")
-
-
logger.debug("Checking Image...")
errors = False
- if not os.path.isfile(IMAGE_DOWNLOAD_PATH):
- logger.debug(" Image file '%s' NOT found." % IMAGE_DOWNLOAD_PATH)
+ if not os.path.isfile(IMAGE_PATH):
+ logger.debug(" Image file '%s' NOT found." % IMAGE_PATH)
errors = True
errors_all = True
else:
- logger.debug(" Image file found in %s" % IMAGE_DOWNLOAD_PATH)
-
- cmd="glance image-list | grep " + IMAGE_NAME
- FNULL = open(os.devnull, 'w');
- logger.debug(' Executing command : {}'.format(cmd))
- p=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=FNULL);
- #if the command does not exist or there is no glance image
- line = p.stdout.readline()
- if line == "":
- logger.debug(" Glance image NOT found.")
- errors = True
- errors_all = True
- else:
- logger.debug(" Glance image found.")
+ logger.debug(" Image file found in %s" % IMAGE_PATH)
+
if not errors:
logger.debug("...OK")
@@ -245,44 +192,28 @@ def config_functest_check():
logger.debug("...FAIL")
#TODO: check OLD environment setup
- if errors_all:
- return False
- else:
- return True
+ return not errors_all
-
-def config_functest_clean():
+def action_clean():
"""
Clean the existing functest environment
"""
logger.info("Removing current functest environment...")
if os.path.exists(RALLY_INSTALLATION_DIR):
- logger.debug("Removing rally installation directory %s" % RALLY_INSTALLATION_DIR)
+ logger.debug("Removing Rally installation directory %s" % RALLY_INSTALLATION_DIR)
shutil.rmtree(RALLY_INSTALLATION_DIR,ignore_errors=True)
- if os.path.exists(FUNCTEST_BASE_DIR):
- logger.debug("Removing functest directory %s" % FUNCTEST_BASE_DIR)
- cmd = "sudo rm -rf " + FUNCTEST_BASE_DIR #need to be sudo, not possible with rmtree
- execute_command(cmd)
-
- #logger.debug("Deleting Neutron network %s" % NEUTRON_NET_NAME)
- #if not delete_neutron_net() :
- # logger.error("Error deleting the network. Remove it manually.")
-
- logger.debug("Deleting glance images")
- cmd = "glance image-list | grep "+IMAGE_NAME+" | cut -c3-38"
- p = os.popen(cmd,"r")
-
- #while image_id = p.readline()
- for image_id in p.readlines():
- cmd = "glance image-delete " + image_id
- execute_command(cmd)
-
- return True
-
+ if os.path.exists(RALLY_RESULT_DIR):
+ logger.debug("Removing Result directory")
+ shutil.rmtree(RALLY_RESULT_DIR,ignore_errors=True)
+ logger.debug("Cleaning up the OpenStack deployment...")
+ cmd='python ' + REPO_PATH + \
+ '/testcases/VIM/OpenStack/CI/libraries/clean_openstack.py'
+ functest_utils.execute_command(cmd,logger)
+ logger.info("Functest environment clean!")
@@ -290,38 +221,35 @@ def install_rally():
if check_rally():
logger.info("Rally is already installed.")
else:
- logger.debug("Cloning repository...")
- url = "https://git.openstack.org/openstack/rally"
- Repo.clone_from(url, RALLY_REPO_DIR)
-
- logger.debug("Executing %s./install_rally.sh..." %RALLY_REPO_DIR)
- install_script = RALLY_REPO_DIR + "install_rally.sh"
+ logger.debug("Executing %s/install_rally.sh..." %RALLY_REPO_DIR)
+ install_script = RALLY_REPO_DIR + "/install_rally.sh --yes"
cmd = 'sudo ' + install_script
- execute_command(cmd)
- #subprocess.call(['sudo', install_script])
+ if os.environ.get("CI_DEBUG") == "false":
+ functest_utils.execute_command(cmd)
+ else:
+ functest_utils.execute_command(cmd,logger)
logger.debug("Creating Rally environment...")
- cmd = "rally deployment create --fromenv --name=opnfv-arno-rally"
- execute_command(cmd)
+ cmd = "rally deployment create --fromenv --name="+DEPLOYMENT_MAME
+ functest_utils.execute_command(cmd,logger)
logger.debug("Installing tempest...")
- cmd = "rally-manage tempest install"
- execute_command(cmd)
+ cmd = "rally verify install"
+ functest_utils.execute_command(cmd,logger)
cmd = "rally deployment check"
- execute_command(cmd)
+ functest_utils.execute_command(cmd,logger)
#TODO: check that everything is 'Available' and warn if not
cmd = "rally show images"
- execute_command(cmd)
+ functest_utils.execute_command(cmd,logger)
cmd = "rally show flavors"
- execute_command(cmd)
+ functest_utils.execute_command(cmd,logger)
return True
-
def check_rally():
"""
Check if Rally is installed and properly configured
@@ -329,326 +257,102 @@ def check_rally():
if os.path.exists(RALLY_INSTALLATION_DIR):
logger.debug(" Rally installation directory found in %s" % RALLY_INSTALLATION_DIR)
FNULL = open(os.devnull, 'w');
- cmd="rally deployment list | grep opnfv";
+ cmd="rally deployment list | grep "+DEPLOYMENT_MAME
logger.debug(' Executing command : {}'.format(cmd))
p=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=FNULL);
#if the command does not exist or there is no deployment
line = p.stdout.readline()
if line == "":
- logger.debug(" Rally deployment not found")
+ logger.debug(" Rally deployment NOT found")
return False
logger.debug(" Rally deployment found")
return True
else:
- logger.debug(" Rally installation directory not found")
return False
-def install_odl():
- cmd = "chmod +x " + ODL_DIR + "create_venv.sh"
- execute_command(cmd)
- cmd = ODL_DIR + "create_venv.sh"
- execute_command(cmd)
- return True
-
+def create_private_neutron_net(neutron):
+ neutron.format = 'json'
+ logger.info('Creating neutron network %s...' % NEUTRON_PRIVATE_NET_NAME)
+ network_id = functest_utils. \
+ create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME)
-def check_credentials():
- """
- Check if the OpenStack credentials (openrc) are sourced
- """
- #TODO: there must be a short way to do this, doing if os.environ["something"] == "" throws an error
- try:
- os.environ['OS_AUTH_URL']
- except KeyError:
- return False
- try:
- os.environ['OS_USERNAME']
- except KeyError:
+ if not network_id:
return False
- try:
- os.environ['OS_PASSWORD']
- except KeyError:
- return False
- try:
- os.environ['OS_TENANT_NAME']
- except KeyError:
- return False
- try:
- os.environ['OS_REGION_NAME']
- except KeyError:
- return False
- return True
+ logger.debug("Network '%s' created successfully" % network_id)
-
-def get_credentials():
- d = {}
- d['username'] = os.environ['OS_USERNAME']
- d['password'] = os.environ['OS_PASSWORD']
- d['auth_url'] = os.environ['OS_AUTH_URL']
- d['tenant_name'] = os.environ['OS_TENANT_NAME']
- return d
-
-def get_nova_credentials():
- d = {}
- d['username'] = os.environ['OS_USERNAME']
- d['api_key'] = os.environ['OS_PASSWORD']
- d['auth_url'] = os.environ['OS_AUTH_URL']
- d['project_id'] = os.environ['OS_TENANT_NAME']
- return d
-
-
-def download_tests():
- os.makedirs(VPING_DIR)
- os.makedirs(ODL_DIR)
- os.makedirs(BENCH_TESTS_DIR)
-
- logger.info("Downloading functest.yaml...")
- yaml_url = 'https://git.opnfv.org/cgit/functest/plain/testcases/functest.yaml'
- if not download_url(yaml_url,FUNCTEST_BASE_DIR):
- logger.error("Unable to download the configuration file functest.yaml")
- return False
-
- logger.info("Downloading vPing test...")
- vPing_url = 'https://git.opnfv.org/cgit/functest/plain/testcases/vPing/CI/libraries/vPing.py'
- if not download_url(vPing_url,VPING_DIR):
+ logger.info('Updating neutron network %s...' % NEUTRON_PRIVATE_NET_NAME)
+ if functest_utils.update_neutron_net(neutron, network_id, shared=True):
+ logger.debug("Network '%s' updated successfully" % network_id)
+ else:
+ logger.info('Updating neutron network %s failed' % network_id)
+
+ logger.debug('Creating Subnet....')
+ subnet_id = functest_utils. \
+ create_neutron_subnet(neutron,
+ NEUTRON_PRIVATE_SUBNET_NAME,
+ NEUTRON_PRIVATE_SUBNET_CIDR,
+ network_id)
+ if not subnet_id:
return False
+ logger.debug("Subnet '%s' created successfully" % subnet_id)
+ logger.debug('Creating Router...')
+ router_id = functest_utils. \
+ create_neutron_router(neutron, NEUTRON_ROUTER_NAME)
-
- logger.info("Downloading Rally bench tests...")
- run_rally_url = 'https://git.opnfv.org/cgit/functest/plain/testcases/VIM/OpenStack/CI/libraries/run_rally.py'
- if not download_url(run_rally_url,RALLY_TEST_DIR):
+ if not router_id:
return False
- rally_bench_base_url = 'https://git.opnfv.org/cgit/functest/plain/testcases/VIM/OpenStack/CI/suites/'
- bench_tests = ['authenticate', 'cinder', 'glance', 'heat', 'keystone', 'neutron', 'nova', 'quotas', 'requests', 'tempest', 'vm']
- for i in bench_tests:
- rally_bench_url = rally_bench_base_url + "opnfv-" + i + ".json"
- logger.debug("Downloading %s" %rally_bench_url)
- if not download_url(rally_bench_url,BENCH_TESTS_DIR):
- return False
+ logger.debug("Router '%s' created successfully" % router_id)
+ logger.debug('Adding router to subnet...')
- logger.info("Downloading OLD tests...")
- odl_base_url = 'https://git.opnfv.org/cgit/functest/plain/testcases/Controllers/ODL/CI/'
- odl_tests = ['create_venv.sh', 'requirements.pip', 'start_tests.sh', 'test_list.txt']
- for i in odl_tests:
- odl_url = odl_base_url + i
- logger.debug("Downloading %s" %odl_url)
- if not download_url(odl_url,ODL_DIR):
- return False
+ result = functest_utils.add_interface_router(neutron, router_id, subnet_id)
- return True
-
-
-def create_neutron_net():
- credentials = get_credentials()
- neutron = client.Client(**credentials)
- try:
- neutron.format = 'json'
- logger.debug('Creating Neutron network %s...' % NEUTRON_NET_NAME)
- json_body = {'network': {'name': NEUTRON_NET_NAME,
- 'admin_state_up': True}}
- netw = neutron.create_network(body=json_body)
- net_dict = netw['network']
- network_id = net_dict['id']
- logger.debug("Network '%s' created successfully" % network_id)
-
- logger.debug('Creating Subnet....')
- json_body = {'subnets': [{'cidr': NEUTRON_RANGE,
- 'ip_version': 4, 'network_id': network_id}]}
- subnet = neutron.create_subnet(body=json_body)
- logger.debug("Subnet '%s' created successfully" % subnet)
-
- logger.debug('Creating Router...')
- json_body = {'router': {'name': ROUTER_NAME, 'admin_state_up': True}}
- router = neutron.create_router(json_body)
- logger.debug("Router '%s' created successfully" % router)
- router_id = router['router']['id']
-
- logger.debug('Creating Port')
- json_body = {'port': {
- 'admin_state_up': True,
- 'device_id': router_id,
- 'name': 'port1',
- 'network_id': network_id,
- }}
- response = neutron.create_port(body=json_body)
- logger.debug("Port created successfully.")
-
- logger.debug('Setting up gateway...')
- public_network_id = get_network_id(neutron,NEUTRON_PUBLIC_NAME)
- json_body = {'network_id': public_network_id, 'enable_snat' : True}
- gateway = neutron.add_gateway_router(router_id,body=json_body)
- logger.debug("Gateway '%s' added successfully" % gateway)
- except:
- logger.error("There has been a problem when creating the Neutron network.")
+ if not result:
return False
- finally:
- logger.info("Neutron network created successfully.")
- return True
-
- return False
-
-def get_network_id(neutron_client, network_name):
- networks = neutron_client.list_networks()['networks']
- id = ''
- for n in networks:
- if n['name'] == network_name:
- id = n['id']
- break
- return id
-
-def check_neutron_net(net_name):
- credentials = get_credentials()
- neutron = client.Client(**credentials)
- for network in neutron.list_networks()['networks']:
- if network['name'] == net_name :
- for subnet in network['subnets']:
- return True
- return False
-
-def delete_neutron_net():
- #TODO: remove router, ports
- credentials = get_credentials()
- neutron = client.Client(**credentials)
- try:
- #https://github.com/isginf/openstack_tools/blob/master/openstack_remove_tenant.py
- for network in neutron.list_networks()['networks']:
- if network['name'] == NEUTRON_NET_NAME :
- for subnet in network['subnets']:
- print "Deleting subnet " + subnet
- neutron.delete_subnet(subnet)
- print "Deleting network " + network['name']
- neutron.delete_neutron_net(network['id'])
- finally:
- return True
- return False
-
-
-
-
-def create_glance_image(path,name,disk_format):
- """
- Create a glance image given the absolute path of the image, its name and the disk format
- """
- cmd = "glance image-create --name "+name+" --is-public true --disk-format "+disk_format+" --container-format bare --file "+path
- execute_command(cmd)
+ logger.debug("Interface added successfully.")
+ network_dic = {'net_id': network_id,
+ 'subnet_id': subnet_id,
+ 'router_id': router_id}
return True
-
-
-
-def download_url(url, dest_path):
- """
- Download a file to a destination path given a URL
- """
- name = url.rsplit('/')[-1]
- dest = dest_path + name
- try:
- response = urllib2.urlopen(url)
- except (urllib2.HTTPError, urllib2.URLError):
- logger.error("Error in fetching %s" %url)
- return False
-
- with open(dest, 'wb') as f:
- f.write(response.read())
- return True
-
-
-def download_url_with_progress(url, dest_path):
- """
- Download a file to a destination path given a URL showing the progress
- """
- name = url.rsplit('/')[-1]
- dest = dest_path + name
- try:
- response = urllib2.urlopen(url)
- except (urllib2.HTTPError, urllib2.URLError):
- logger.error("Error in fetching %s" %url)
- return False
-
- f = open(dest, 'wb')
- meta = response.info()
- file_size = int(meta.getheaders("Content-Length")[0])
- logger.info("Downloading: %s Bytes: %s" %(dest, file_size))
-
- file_size_dl = 0
- block_sz = 8192
- while True:
- buffer = response.read(block_sz)
- if not buffer:
- break
-
- file_size_dl += len(buffer)
- f.write(buffer)
- status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
- status = status + chr(8)*(len(status)+1)
- print status,
-
- f.close()
- print("\n")
- return True
-
-
-def check_internet_connectivity(url='http://www.google.com/'):
- """
- Check if there is access to the internet
- """
- try:
- urllib2.urlopen(url, timeout=5)
- return True
- except urllib.request.URLError:
- return False
-
-def execute_command(cmd):
- """
- Execute Linux command
- """
- logger.debug('Executing command : {}'.format(cmd))
- #p = os.popen(cmd,"r")
- #logger.debug(p.read())
- output_file = "/tmp/output.txt"
- f = open(output_file, 'w+')
- p = subprocess.call(cmd,shell=True, stdout=f, stderr=subprocess.STDOUT)
- f.close()
- f = open(output_file, 'r')
- logger.debug(f.read())
- #p = subprocess.call(cmd,shell=True);
- if p == 0 :
- return True
- else:
- logger.error("Error when executing command %s" %cmd)
- exit(-1)
-
-
-
-
def main():
if not (args.action in actions):
logger.error('argument not valid')
exit(-1)
+
+ if not functest_utils.check_credentials():
+ logger.error("Please source the openrc credentials and run the script again.")
+ #TODO: source the credentials in this script
+ exit(-1)
+
+
if args.action == "start":
- config_functest_start()
+ action_start()
if args.action == "check":
- if config_functest_check():
+ if action_check():
logger.info("Functest environment correctly installed")
else:
logger.info("Functest environment not found or faulty")
if args.action == "clean":
- while True:
- print("Are you sure? [y|n]")
- answer = raw_input("")
- if answer == "y":
- config_functest_clean()
- break
- elif answer == "n":
- break
- else:
- print("Invalid option.")
+ if args.force :
+ action_clean()
+ else :
+ while True:
+ print("Are you sure? [y|n]")
+ answer = raw_input("")
+ if answer == "y":
+ action_clean()
+ break
+ elif answer == "n":
+ break
+ else:
+ print("Invalid option.")
exit(0)
diff --git a/testcases/config_functest.yaml b/testcases/config_functest.yaml
new file mode 100644
index 000000000..7c1e79d36
--- /dev/null
+++ b/testcases/config_functest.yaml
@@ -0,0 +1,243 @@
+general:
+ directories:
+ # Relative to the path where the repo is cloned:
+ dir_vping: testcases/vPing/CI/libraries/
+ dir_odl: testcases/Controllers/ODL/CI/
+ dir_rally: testcases/VIM/OpenStack/CI/libraries/
+ dir_rally_scn: testcases/VIM/OpenStack/CI/suites/
+ dir_vIMS: testcases/vIMS/CI/
+ dir_onos: testcases/Controllers/ONOS/Teston/CI/
+
+ # Absolute path
+ dir_repos: /home/opnfv/repos
+ dir_repo_functest: /home/opnfv/repos/functest
+ dir_repo_rally: /home/opnfv/repos/rally
+ dir_repo_releng: /home/opnfv/repos/releng
+ dir_repo_vims_test: /home/opnfv/repos/vims-test
+ dir_repo_bgpvpn: /home/opnfv/repos/bgpvpn
+ dir_repo_onos: /home/opnfv/repos/onos
+ dir_repo_promise: /home/opnfv/repos/promise
+ dir_repo_doctor: /home/opnfv/repos/doctor
+ dir_functest: /home/opnfv/functest
+ dir_results: /home/opnfv/functest/results
+ dir_functest_conf: /home/opnfv/functest/conf
+ dir_rally_res: /home/opnfv/functest/results/rally/
+ dir_functest_data: /home/opnfv/functest/data
+ dir_vIMS_data: /home/opnfv/functest/data/vIMS
+ dir_rally_inst: ~/.rally
+
+ repositories:
+ # branch and commit ID to which the repos will be reset (HEAD)
+ releng_branch: master
+ releng_commit: latest
+ rally_branch: master
+ rally_commit: 57efc5327530a34d139b5a1fd1f480195de0aadb
+ vims_test_branch: stable
+ vims_test_commit: latest
+ bgpvpn_branch: master
+ bgpvpn_commit: latest
+ onos_branch: master
+ onos_commit: latest
+ promise_branch: master
+ promise_commit: latest
+
+ openstack:
+ image_name: functest-img
+ image_file_name: cirros-0.3.4-x86_64-disk.img
+ image_disk_format: qcow2
+
+ #Public network. Optional
+ neutron_public_net_name: net04_ext
+ neutron_public_subnet_name: net04_ext__subnet
+ neutron_public_subnet_cidr: 172.16.9.0/24
+ neutron_public_subnet_start: 172.16.9.130
+ neutron_public_subnet_end: 172.16.9.254
+ #Private network for functest. Will be created by config_functest.py
+ neutron_private_net_name: functest-net
+ neutron_private_subnet_name: functest-subnet
+ neutron_private_subnet_cidr: 192.168.120.0/24
+ neutron_private_subnet_start: 192.168.120.2
+ neutron_private_subnet_end: 192.168.120.254
+ neutron_private_subnet_gateway: 192.168.120.254
+ neutron_router_name: functest-router
+
+vping:
+ ping_timeout: 200
+ vm_flavor: m1.small #adapt to your environment
+ vm_name_1: opnfv-vping-1
+ vm_name_2: opnfv-vping-2
+ vping_private_net_name: vping-net
+ vping_private_subnet_name: vping-subnet
+ vping_private_subnet_cidr: 192.168.130.0/24
+ vping_router_name: vping-router
+ ip_1: 192.168.130.30
+ ip_2: 192.168.130.40
+
+tempest:
+ identity:
+ tenant_name: tempest
+ tenant_description: Tenant for Tempest test suite
+ user_name: tempest
+ user_password: tempest
+
+rally:
+ deployment_name: opnfv-rally
+
+vIMS:
+ general:
+ tenant_name: vIMS
+ tenant_description: vIMS Functionality Testing
+ images:
+ ubuntu:
+ image_url: 'http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img'
+ image_name: ubuntu_14.04
+ centos:
+ image_url: 'http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1510.qcow2'
+ image_name: centos_7
+ cloudify:
+ blueprint:
+ url: https://github.com/boucherv-orange/cloudify-manager-blueprints.git
+ branch: "3.3-build"
+ requierments:
+ ram_min: 3000
+ os_image: centos_7
+ inputs:
+ keystone_username: ""
+ keystone_password: ""
+ keystone_tenant_name: ""
+ keystone_url: ""
+ manager_public_key_name: 'manager-kp'
+ agent_public_key_name: 'agent-kp'
+ image_id: ""
+ flavor_id: "3"
+ external_network_name: ""
+ ssh_user: centos
+ agents_user: ubuntu
+ clearwater:
+ blueprint:
+ file_name: 'openstack-blueprint.yaml'
+ name: "clearwater-opnfv"
+ destination_folder: "opnfv-cloudify-clearwater"
+ url: 'https://github.com/Orange-OpenSource/opnfv-cloudify-clearwater.git'
+ branch: "stable"
+ deployment-name: 'clearwater-opnfv'
+ requierments:
+ ram_min: 1700
+ os_image: ubuntu_14.04
+ inputs:
+ image_id: ''
+ flavor_id: ''
+ agent_user: 'ubuntu'
+ external_network_name: ''
+ public_domain: clearwater.opnfv
+ONOS:
+ general:
+ onosbench_username: 'root'
+ onosbench_password: 'root'
+ onoscli_username: 'root'
+ onoscli_password: 'root'
+ runtimeout: 300
+ environment:
+ OCT: '10.20.0.1'
+ OC1: '10.20.0.7'
+ OC2: '10.20.0.7'
+ OC3: '10.20.0.7'
+ OCN: '10.20.0.4'
+ OCN2: '10.20.0.5'
+ installer_master: '10.20.0.2'
+ installer_master_username: 'root'
+ installer_master_password: 'r00tme'
+results:
+ test_db_url: http://213.77.62.197
+
+# to be maintained...
+# the execution order is important as some tests may be more destructive than others
+# and if vPing is failing is usually not needed to continue...
+test_exec_priority:
+ 1: vping
+ 2: tempest
+ 3: odl
+ 4: onos
+ #5: ovno
+ #6: doctor
+ 7: promise
+ #8: policy-test
+ #9: odl-vpn_service-tests
+ #10: opnfv-yardstick-tc026-sdnvpn
+ #11: openstack-neutron-bgpvpn-api-extension-tests
+ 12: vims
+ 13: rally
+
+
+########################################################################
+# This part lists the dependencies of the tests
+#
+# it is used to manage the complexity of the possible combinations
+#
+# 17 projects have been declared for Brahmaputra (D Milestone)
+# 89 testcases are associated with these 17 projects
+#
+# in B-Release, Functest deals with 6 companion projects:
+# - congress
+# - doctor
+# - ovno
+# - policyTest
+# - promise
+# - sdnvpn
+#
+# constraints may be declared at the testcase or the test project level
+#
+# By default we consider that all the tests can be run on any configuration
+#
+# we defined 2 constraints
+# - installer (e.g. my test can be run only with installer Compass)
+# possible values: apex, compass, fuel, joid
+#
+# - the scenario: it described a specif installation
+# os-<controller>-<nfvfeature>-<mode>[-<extrastuff>]
+# With parameters:
+# controller=(nosdn|odl_l3|odl_l2|onos|ocl)
+# No odl_l3 today
+# nfvfeature=(kvm|ovs|dpdk|nofeature)
+# '_' list separated.
+# mode=(ha|noha)
+# extrastuff=(none)
+# Optional field - Not used today#
+#
+# ref:https://gerrit.opnfv.org/gerrit/#/c/6323/7/jjb/joid/joid-deploy.sh (L72-82)
+# e.g my test is only possible with OVS 2.3.2 on odl
+#
+# in functest, we indicate the regex pattern to be checked towards the scenario
+# e.g. odl-vpn_service-tests can be run if and only if
+# - installer is fuel
+# - scenario contains the name ovs and odl
+#
+#######################################################################
+test-dependencies:
+ doctor:
+ installer: '(apex)|(fuel)'
+ functest:
+ vims:
+ vping:
+ tempest:
+ rally:
+ odl:
+ scenario: 'odl'
+ onos:
+ scenario: 'onos'
+ promise:
+ installer: '(fuel)|(joid)'
+ ovno:
+ scenario: 'ocl'
+ policy-test:
+ scenario: 'odl'
+ sdnvpn:
+ opnfv-yardstick-tc026-sdnvpn:
+ installer: 'fuel'
+ scenario: '(ovs)*(nosdn)'
+ odl-vpn_service-tests:
+ installer: 'fuel'
+ scenario: '(ovs)*(odl)'
+ openstack-neutron-bgpvpn-api-extension-tests:
+ installer: 'fuel'
+ scenario: '(ovs)*(nosdn)'
diff --git a/testcases/features/doctor.py b/testcases/features/doctor.py
new file mode 100644
index 000000000..a68c31cd0
--- /dev/null
+++ b/testcases/features/doctor.py
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# 0.1: This script boots the VM1 and allocates IP address from Nova
+# Later, the VM2 boots then execute cloud-init to ping VM1.
+# After successful ping, both the VMs are deleted.
+# 0.2: measure test duration and publish results under json format
+#
+#
+
+import os
+import time
+import sys
+import yaml
+
+
+with open('/home/opnfv/functest/conf/config_functest.yaml') as f:
+ functest_yaml = yaml.safe_load(f)
+
+dirs = functest_yaml.get('general').get('directories')
+FUNCTEST_REPO = dirs.get('dir_repo_functest')
+DOCTOR_REPO = dirs.get('dir_repo_doctor')
+TEST_DB_URL = functest_yaml.get('results').get('test_db_url')
+
+sys.path.append('%s/testcases' % FUNCTEST_REPO)
+import functest_utils
+
+
+def main():
+ cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO
+ start_time_ts = time.time()
+
+ ret = functest_utils.execute_command(cmd, exit_on_error=False)
+
+ end_time_ts = time.time()
+ duration = round(end_time_ts - start_time_ts, 1)
+ if ret:
+ test_status = 'OK'
+ else:
+ test_status = 'NOK'
+
+ details = {
+ 'timestart': start_time_ts,
+ 'duration': duration,
+ 'status': test_status,
+ }
+ pod_name = functest_utils.get_pod_name()
+ git_version = functest_utils.get_git_branch(DOCTOR_REPO)
+ functest_utils.push_results_to_db(TEST_DB_URL,
+ 'doctor-notification',
+ None,
+ pod_name,
+ git_version,
+ details)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testcases/functest.yaml b/testcases/functest.yaml
deleted file mode 100644
index 2f3494997..000000000
--- a/testcases/functest.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-general:
- directories:
- dir_functest: .functest/ #DONT CHANGE
- dir_rally_repo: .functest/Rally_repo/
- dir_rally: .functest/Rally_test/
- dir_rally_scn: .functest/Rally_test/scenarios/
- dir_rally_res: .functest/Rally_test/results/
- dir_vping: .functest/vPing/
- dir_odl: .functest/ODL/
- dir_rally_inst: .rally/
- openstack:
- image_name: Ubuntu14.04
- image_url: https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
- image_disk_format: raw
- #Public network. Must exist already.
- neutron_public_net_name: net04_ext
- #Private network for functest. Will be created by config_functest.py
- neutron_net_name: functest-net
- neutron_subnet_name: functest-subnet
- neutron_subnet_range: 192.168.120.0/24
- neutron_subnet_start: 192.168.120.2
- neutron_subnet_end: 192.168.120.254
- neutron_subnet_gateway: 192.168.120.254
- neutron_router_name: functest-router
-vping:
- ping_timeout: 200
- vm_flavor: m1.small #adapt to your environment
- vm_name_1: opnfv-vping-1
- vm_name_2: opnfv-vping-2
diff --git a/testcases/functest_utils.py b/testcases/functest_utils.py
new file mode 100644
index 000000000..baab415d9
--- /dev/null
+++ b/testcases/functest_utils.py
@@ -0,0 +1,766 @@
+#!/usr/bin/env python
+#
+# jose.lausuch@ericsson.com
+# valentin.boucher@orange.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+
+import os
+import os.path
+import urllib2
+import subprocess
+import sys
+import requests
+import json
+import shutil
+import re
+import yaml
+from git import Repo
+
+
+# ############ CREDENTIALS OPENSTACK #############
+def check_credentials():
+ """
+ Check if the OpenStack credentials (openrc) are sourced
+ """
+ env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME']
+ return all(map(lambda v: v in os.environ and os.environ[v], env_vars))
+
+
+def get_credentials(service):
+ """Returns a creds dictionary filled with the following keys:
+ * username
+ * password/api_key (depending on the service)
+ * tenant_name/project_id (depending on the service)
+ * auth_url
+ :param service: a string indicating the name of the service
+ requesting the credentials.
+ """
+ creds = {}
+ # Unfortunately, each of the OpenStack client will request slightly
+ # different entries in their credentials dict.
+ if service.lower() in ("nova", "cinder"):
+ password = "api_key"
+ tenant = "project_id"
+ else:
+ password = "password"
+ tenant = "tenant_name"
+
+ # The most common way to pass these info to the script is to do it through
+ # environment variables.
+ creds.update({
+ "username": os.environ.get('OS_USERNAME', "admin"),
+ password: os.environ.get("OS_PASSWORD", 'admin'),
+ "auth_url": os.environ.get("OS_AUTH_URL",
+ "http://192.168.20.71:5000/v2.0"),
+ tenant: os.environ.get("OS_TENANT_NAME", "admin"),
+ })
+
+ return creds
+
+
+# ################ NOVA #################
+def get_instances(nova_client):
+ try:
+ instances = nova_client.servers.list(search_opts={'all_tenants': 1})
+ return instances
+ except:
+ return None
+
+
+def get_instance_status(nova_client, instance):
+ try:
+ instance = nova_client.servers.get(instance.id)
+ return instance.status
+ except:
+ return None
+
+
+def get_instance_by_name(nova_client, instance_name):
+ try:
+ instance = nova_client.servers.find(name=instance_name)
+ return instance
+ except:
+ return None
+
+
+def get_flavor_id(nova_client, flavor_name):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if f.name == flavor_name:
+ id = f.id
+ break
+ return id
+
+
+def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if min_ram <= f.ram and f.ram <= max_ram:
+ id = f.id
+ break
+ return id
+
+
+def delete_instance(nova_client, instance_id):
+ try:
+ nova_client.servers.force_delete(instance_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def get_floating_ips(nova_client):
+ try:
+ floating_ips = nova_client.floating_ips.list()
+ return floating_ips
+ except:
+ return None
+
+
+def delete_floating_ip(nova_client, floatingip_id):
+ try:
+ nova_client.floating_ips.delete(floatingip_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return None
+
+
+# ################ NEUTRON #################
+def create_neutron_net(neutron_client, name):
+ json_body = {'network': {'name': name,
+ 'admin_state_up': True}}
+ try:
+ network = neutron_client.create_network(body=json_body)
+ network_dict = network['network']
+ return network_dict['id']
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def update_neutron_net(neutron_client, network_id, shared=False):
+ json_body = {'network': {'shared': shared}}
+ try:
+ neutron_client.update_network(network_id, body=json_body)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def delete_neutron_net(neutron_client, network_id):
+ try:
+ neutron_client.delete_network(network_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def create_neutron_subnet(neutron_client, name, cidr, net_id):
+ json_body = {'subnets': [{'name': name, 'cidr': cidr,
+ 'ip_version': 4, 'network_id': net_id}]}
+ try:
+ subnet = neutron_client.create_subnet(body=json_body)
+ return subnet['subnets'][0]['id']
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def delete_neutron_subnet(neutron_client, subnet_id):
+ try:
+ neutron_client.delete_subnet(subnet_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def create_neutron_router(neutron_client, name):
+ json_body = {'router': {'name': name, 'admin_state_up': True}}
+ try:
+ router = neutron_client.create_router(json_body)
+ return router['router']['id']
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def delete_neutron_router(neutron_client, router_id):
+ json_body = {'router': {'id': router_id}}
+ try:
+ neutron_client.delete_router(router=router_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def add_interface_router(neutron_client, router_id, subnet_id):
+ json_body = {"subnet_id": subnet_id}
+ try:
+ neutron_client.add_interface_router(router=router_id, body=json_body)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def remove_interface_router(neutron_client, router_id, subnet_id):
+ json_body = {"subnet_id": subnet_id}
+ try:
+ neutron_client.remove_interface_router(router=router_id,
+ body=json_body)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def remove_gateway_router(neutron_client, router_id):
+ try:
+ neutron_client.remove_gateway_router(router_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def create_neutron_port(neutron_client, name, network_id, ip):
+ json_body = {'port': {
+ 'admin_state_up': True,
+ 'name': name,
+ 'network_id': network_id,
+ 'fixed_ips': [{"ip_address": ip}]
+ }}
+ try:
+ port = neutron_client.create_port(body=json_body)
+ return port['port']['id']
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def update_neutron_port(neutron_client, port_id, device_owner):
+ json_body = {'port': {
+ 'device_owner': device_owner,
+ }}
+ try:
+ port = neutron_client.update_port(port=port_id,
+ body=json_body)
+ return port['port']['id']
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def delete_neutron_port(neutron_client, port_id):
+ try:
+ neutron_client.delete_port(port_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def get_network_id(neutron_client, network_name):
+ networks = neutron_client.list_networks()['networks']
+ id = ''
+ for n in networks:
+ if n['name'] == network_name:
+ id = n['id']
+ break
+ return id
+
+
+def check_neutron_net(neutron_client, net_name):
+ for network in neutron_client.list_networks()['networks']:
+ if network['name'] == net_name:
+ for subnet in network['subnets']:
+ return True
+ return False
+
+
+def get_network_list(neutron_client):
+ network_list = neutron_client.list_networks()['networks']
+ if len(network_list) == 0:
+ return None
+ else:
+ return network_list
+
+
+def get_router_list(neutron_client):
+ router_list = neutron_client.list_routers()['routers']
+ if len(router_list) == 0:
+ return None
+ else:
+ return router_list
+
+
+def get_port_list(neutron_client):
+ port_list = neutron_client.list_ports()['ports']
+ if len(port_list) == 0:
+ return None
+ else:
+ return port_list
+
+
+def get_external_net(neutron_client):
+ for network in neutron_client.list_networks()['networks']:
+ if network['router:external']:
+ return network['name']
+ return False
+
+
+def update_sg_quota(neutron_client, tenant_id, sg_quota, sg_rule_quota):
+ json_body = {"quota": {
+ "security_group": sg_quota,
+ "security_group_rule": sg_rule_quota
+ }}
+
+ try:
+ quota = neutron_client.update_quota(tenant_id=tenant_id,
+ body=json_body)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def update_cinder_quota(cinder_client, tenant_id, vols_quota,
+ snapshots_quota, gigabytes_quota):
+ quotas_values = {"volumes": vols_quota,
+ "snapshots": snapshots_quota,
+ "gigabytes": gigabytes_quota}
+
+ try:
+ quotas_default = cinder_client.quotas.update(tenant_id,
+ **quotas_values)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def get_private_net(neutron_client):
+ # Checks if there is an existing shared private network
+ networks = neutron_client.list_networks()['networks']
+ if len(networks) == 0:
+ return None
+ for net in networks:
+ if (net['router:external'] is False) and (net['shared'] is True):
+ return net
+ return None
+
+
+# ################ GLANCE #################
+def get_images(nova_client):
+ try:
+ images = nova_client.images.list()
+ return images
+ except:
+ return None
+
+
+def get_image_id(glance_client, image_name):
+ images = glance_client.images.list()
+ id = ''
+ for i in images:
+ if i.name == image_name:
+ id = i.id
+ break
+ return id
+
+
+def create_glance_image(glance_client, image_name, file_path, public=True):
+ if not os.path.isfile(file_path):
+ print "Error: file " + file_path + " does not exist."
+ return False
+ try:
+ with open(file_path) as fimage:
+ image = glance_client.images.create(name=image_name,
+ is_public=public,
+ disk_format="qcow2",
+ container_format="bare",
+ data=fimage)
+ return image.id
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def delete_glance_image(nova_client, image_id):
+ try:
+ nova_client.images.delete(image_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+# ################ CINDER #################
+def get_volumes(cinder_client):
+ try:
+ volumes = cinder_client.volumes.list(search_opts={'all_tenants': 1})
+ return volumes
+ except:
+ return None
+
+
+def delete_volume(cinder_client, volume_id, forced=False):
+ try:
+ if forced:
+ try:
+ cinder_client.volumes.detach(volume_id)
+ except:
+ print "Error:", sys.exc_info()[0]
+ cinder_client.volumes.force_delete(volume_id)
+ else:
+ cinder_client.volumes.delete(volume_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+# ################ CINDER #################
+def get_security_groups(neutron_client):
+ try:
+ security_groups = neutron_client.list_security_groups()[
+ 'security_groups']
+ return security_groups
+ except:
+ return None
+
+
+def delete_security_group(neutron_client, secgroup_id):
+ try:
+ neutron_client.delete_security_group(secgroup_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+# ################ KEYSTONE #################
+def get_tenants(keystone_client):
+ try:
+ tenants = keystone_client.tenants.list()
+ return tenants
+ except:
+ return None
+
+
+def get_tenant_id(keystone_client, tenant_name):
+ tenants = keystone_client.tenants.list()
+ id = ''
+ for t in tenants:
+ if t.name == tenant_name:
+ id = t.id
+ break
+ return id
+
+
+def get_users(keystone_client):
+ try:
+ users = keystone_client.users.list()
+ return users
+ except:
+ return None
+
+
+def get_role_id(keystone_client, role_name):
+ roles = keystone_client.roles.list()
+ id = ''
+ for r in roles:
+ if r.name == role_name:
+ id = r.id
+ break
+ return id
+
+
+def get_user_id(keystone_client, user_name):
+ users = keystone_client.users.list()
+ id = ''
+ for u in users:
+ if u.name == user_name:
+ id = u.id
+ break
+ return id
+
+
+def create_tenant(keystone_client, tenant_name, tenant_description):
+ try:
+ tenant = keystone_client.tenants.create(tenant_name,
+ tenant_description,
+ enabled=True)
+ return tenant.id
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def delete_tenant(keystone_client, tenant_id):
+ try:
+ tenant = keystone_client.tenants.delete(tenant_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def create_user(keystone_client, user_name, user_password,
+ user_email, tenant_id):
+ try:
+ user = keystone_client.users.create(user_name, user_password,
+ user_email, tenant_id,
+ enabled=True)
+ return user.id
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def delete_user(keystone_client, user_id):
+ try:
+ tenant = keystone_client.users.delete(user_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def add_role_user(keystone_client, user_id, role_id, tenant_id):
+ try:
+ keystone_client.roles.add_user_role(user_id, role_id, tenant_id)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+# ################ UTILS #################
+def check_internet_connectivity(url='http://www.opnfv.org/'):
+ """
+ Check if there is access to the internet
+ """
+ try:
+ urllib2.urlopen(url, timeout=5)
+ return True
+ except urllib2.URLError:
+ return False
+
+
+def download_url(url, dest_path):
+ """
+ Download a file to a destination path given a URL
+ """
+ name = url.rsplit('/')[-1]
+ dest = dest_path + "/" + name
+ try:
+ response = urllib2.urlopen(url)
+ except (urllib2.HTTPError, urllib2.URLError):
+ return False
+
+ with open(dest, 'wb') as f:
+ shutil.copyfileobj(response, f)
+ return True
+
+
+def execute_command(cmd, logger=None, exit_on_error=True):
+ """
+ Execute Linux command
+ """
+ if logger:
+ logger.debug('Executing command : {}'.format(cmd))
+ output_file = "output.txt"
+ f = open(output_file, 'w+')
+ p = subprocess.call(cmd, shell=True, stdout=f, stderr=subprocess.STDOUT)
+ f.close()
+ f = open(output_file, 'r')
+ result = f.read()
+ if result != "" and logger:
+ logger.debug(result)
+ if p == 0:
+ return True
+ else:
+ if logger:
+ logger.error("Error when executing command %s" % cmd)
+ if exit_on_error:
+ exit(-1)
+ return False
+
+
+def get_git_branch(repo_path):
+ """
+ Get git branch name
+ """
+ repo = Repo(repo_path)
+ branch = repo.active_branch
+ return branch.name
+
+
+def get_installer_type(logger=None):
+ """
+ Get installer type (fuel, apex, joid, compass)
+ """
+ try:
+ installer = os.environ['INSTALLER_TYPE']
+ except KeyError:
+ if logger:
+ logger.error("Impossible to retrieve the installer type")
+ installer = "Unkown"
+
+ return installer
+
+
+def get_pod_name(logger=None):
+ """
+ Get PoD Name from env variable NODE_NAME
+ """
+ try:
+ return os.environ['NODE_NAME']
+ except KeyError:
+ if logger:
+ logger.error(
+ "Unable to retrieve the POD name from environment.Using pod name 'unknown-pod'")
+ return "unknown-pod"
+
+
+def push_results_to_db(db_url, case_name, logger, pod_name,
+ git_version, payload):
+ url = db_url + "/results"
+ installer = get_installer_type(logger)
+ params = {"project_name": "functest", "case_name": case_name,
+ "pod_name": pod_name, "installer": installer,
+ "version": git_version, "details": payload}
+
+ headers = {'Content-Type': 'application/json'}
+ try:
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ if logger:
+ logger.debug(r)
+ return True
+ except:
+ print "Error:", sys.exc_info()[0]
+ return False
+
+
+def get_resolvconf_ns():
+ nameservers = []
+ rconf = open("/etc/resolv.conf", "r")
+ line = rconf.readline()
+ while line:
+ ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", line)
+ if ip:
+ nameservers.append(ip.group())
+ line = rconf.readline()
+ return nameservers
+
+def getTestEnv(test, functest_yaml):
+ # get the config of the testcase based on functest_config.yaml
+ # 2 options
+ # - test = test project e.g; ovno
+ # - test = testcase e.g. functest/odl
+ # look for the / to see if it is a test project or a testcase
+ try:
+ TEST_ENV = functest_yaml.get("test-dependencies")
+
+ if test.find("/") < 0:
+ config_test = TEST_ENV[test]
+ else:
+ test_split = test.split("/")
+ testproject = test_split[0]
+ testcase = test_split[1]
+ config_test = TEST_ENV[testproject][testcase]
+ except KeyError:
+ # if not defined in dependencies => no dependencies
+ config_test = ""
+ except:
+ print "Error getTestEnv:", sys.exc_info()[0]
+
+ return config_test
+
+
+def get_ci_envvars():
+ """
+ Get the CI env variables
+ """
+ ci_env_var = {
+ "installer": os.environ.get('INSTALLER_TYPE'),
+ "scenario": os.environ.get('DEPLOY_SCENARIO')}
+ return ci_env_var
+
+
+def isTestRunnable(test, functest_yaml):
+ # By default we assume that all the tests are always runnable...
+ is_runnable = True
+ # Retrieve CI environment
+ ci_env = get_ci_envvars()
+ # Retrieve test environement from config file
+ test_env = getTestEnv(test, functest_yaml)
+
+ # if test_env not empty => dependencies to be checked
+ if test_env is not None and len(test_env) > 0:
+ # possible criteria = ["installer", "scenario"]
+ # consider test criteria from config file
+ # compare towards CI env through CI en variable
+ for criteria in test_env:
+ if re.search(test_env[criteria], ci_env[criteria]) is None:
+ # print "Test "+ test + " cannot be run on the environment"
+ is_runnable = False
+ return is_runnable
+
+
+def generateTestcaseList(functest_yaml):
+ test_list = ""
+ # get testcases
+ testcase_list = functest_yaml.get("test-dependencies")
+ projects = testcase_list.keys()
+
+ for project in projects:
+ testcases = testcase_list[project]
+ # 1 or 2 levels for testcases project[/case]
+ # if only project name without controller or scenario
+ # => shall be runnable on any controller/scenario
+ if testcases is None:
+ test_list += project + " "
+ else:
+ for testcase in testcases:
+ if testcase == "installer" or testcase == "scenario":
+ # project (1 level)
+ if isTestRunnable(project, functest_yaml):
+ test_list += project + " "
+ else:
+ # project/testcase (2 levels)
+ thetest = project + "/" + testcase
+ if isTestRunnable(thetest, functest_yaml):
+ test_list += testcase + " "
+
+ # sort the list to execute the test in the right order
+ test_order_list = functest_yaml.get("test_exec_priority")
+ test_sorted_list = ""
+ for test in test_order_list:
+ if test_order_list[test] in test_list:
+ test_sorted_list += test_order_list[test] + " "
+
+ # create a file that could be consumed by run-test.sh
+ # this method is used only for CI
+ # so it can be run only in container
+ # reuse default conf directory to store the list of runnable tests
+ file = open("/home/opnfv/functest/conf/testcase-list.txt", 'w')
+ file.write(test_sorted_list)
+ file.close()
+
+ return test_sorted_list
+
diff --git a/testcases/tests/TestFunctestUtils.py b/testcases/tests/TestFunctestUtils.py
new file mode 100644
index 000000000..6f12e603d
--- /dev/null
+++ b/testcases/tests/TestFunctestUtils.py
@@ -0,0 +1,111 @@
+import unittest
+import os
+import sys
+import yaml
+
+sys.path.append("../")
+from functest_utils import getTestEnv, isTestRunnable, generateTestcaseList
+
+
+class TestFunctestUtils(unittest.TestCase):
+
+ def setUp(self):
+ os.environ["INSTALLER_TYPE"] = "fuel"
+ os.environ["DEPLOY_SCENARIO"] = "os-odl_l3-ovs-ha"
+
+ global functest_yaml
+
+ with open("../config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+ f.close()
+
+ def test_getTestEnv(self):
+
+ env_test = getTestEnv('ovno', functest_yaml)
+ self.assertEqual(env_test, {'scenario': 'ocl'})
+
+ env_test = getTestEnv('doctor', functest_yaml)
+ self.assertEqual(env_test, {'installer': 'fuel'})
+
+ env_test = getTestEnv('promise', functest_yaml)
+ self.assertEqual(env_test, {'installer': '(fuel)|(joid)'})
+
+ env_test = getTestEnv('functest/tempest', functest_yaml)
+ self.assertEqual(env_test, None)
+
+ env_test = getTestEnv('functest/vims', functest_yaml)
+ self.assertEqual(env_test, None)
+
+ env_test = getTestEnv('functest/odl', functest_yaml)
+ self.assertEqual(env_test, {'scenario': 'odl'})
+
+ env_test = getTestEnv('functest/onos', functest_yaml)
+ self.assertEqual(env_test, {'scenario': 'onos'})
+
+ env_test = getTestEnv('policy-test', functest_yaml)
+ self.assertEqual(env_test, {'scenario': 'odl'})
+
+ env_test = getTestEnv('sdnvpn/odl-vpn_service-tests', functest_yaml)
+ self.assertEqual(env_test,
+ {'installer': 'fuel', 'scenario': '(ovs)*(odl)'})
+
+ env_test = getTestEnv('sdnvpn/opnfv-yardstick-tc026-sdnvpn',
+ functest_yaml)
+ self.assertEqual(env_test,
+ {'installer': 'fuel', 'scenario': '(ovs)*(nosdn)'})
+
+ env_test = getTestEnv('foo', functest_yaml)
+ self.assertEqual(env_test, '')
+
+ def test_isTestRunnable(self):
+
+ test = isTestRunnable('ovno', functest_yaml)
+ self.assertFalse(test)
+
+ test = isTestRunnable('doctor', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('promise', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('functest/onos', functest_yaml)
+ self.assertFalse(test)
+
+ test = isTestRunnable('functest/odl', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('functest/vping', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('functest/tempest', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('functest/rally', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('functest/vims', functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('sdnvpn/odl-vpn_service-tests',
+ functest_yaml)
+ self.assertTrue(test)
+
+ test = isTestRunnable('sdnvpn/opnfv-yardstick-tc026-sdnvpn',
+ functest_yaml)
+ self.assertFalse(test)
+
+ def test_generateTestcaseList(self):
+
+ test = generateTestcaseList(functest_yaml)
+
+ expected_list = "vping tempest odl doctor promise policy-test odl-vpn_service-tests vims rally "
+ self.assertEqual(test, expected_list)
+
+ def tearDown(self):
+ os.environ["INSTALLER_TYPE"] = ""
+ os.environ["DEPLOY_SCENARIO"] = ""
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/testcases/vIMS/CI/clearwater.py b/testcases/vIMS/CI/clearwater.py
new file mode 100644
index 000000000..d2d189f60
--- /dev/null
+++ b/testcases/vIMS/CI/clearwater.py
@@ -0,0 +1,63 @@
+#!/usr/bin/python
+# coding: utf8
+#######################################################################
+#
+# Copyright (c) 2015 Orange
+# valentin.boucher@orange.com
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+import sys
+import subprocess
+
+
+class clearwater:
+
+ def __init__(self, inputs={}, orchestrator=None, logger=None):
+ self.config = inputs
+ self.orchestrator = orchestrator
+ self.logger = logger
+ self.deploy = False
+
+ def set_orchestrator(self, orchestrator):
+ self.orchestrator = orchestrator
+
+ def set_flavor_id(self, flavor_id):
+ self.config['flavor_id'] = flavor_id
+
+ def set_image_id(self, image_id):
+ self.config['image_id'] = image_id
+
+ def set_agent_user(self, agent_user):
+ self.config['agent_user'] = agent_user
+
+ def set_external_network_name(self, external_network_name):
+ self.config['external_network_name'] = external_network_name
+
+ def set_public_domain(self, public_domain):
+ self.config['public_domain'] = public_domain
+
+ def deploy_vnf(self, blueprint, bp_name='clearwater', dep_name='clearwater-opnfv'):
+ if self.orchestrator:
+ self.dep_name = dep_name
+ self.orchestrator.download_upload_and_deploy_blueprint(
+ blueprint, self.config, bp_name, dep_name)
+ self.deploy = True
+ else:
+ if self.logger:
+ logger.error("Cloudify manager is down or not provide...")
+
+ def undeploy_vnf(self):
+ if self.orchestrator:
+ if self.deploy:
+ self.deploy = False
+ self.orchestrator.undeploy_deployment(self.dep_name)
+ else:
+ if self.logger:
+ logger.error("Clearwater isn't already deploy...")
+ else:
+ if self.logger:
+ logger.error("Cloudify manager is down or not provide...")
diff --git a/testcases/Controllers/ODL/CI/create_venv.sh b/testcases/vIMS/CI/create_venv.sh
index 5d0733a5a..15294f77b 100644..100755
--- a/testcases/Controllers/ODL/CI/create_venv.sh
+++ b/testcases/vIMS/CI/create_venv.sh
@@ -4,7 +4,8 @@
# It requires python2.7 and virtualenv packages installed
BASEDIR=`dirname $0`
-
+VENV_PATH=$1
+VENV_NAME="venv_cloudify"
function venv_install() {
if command -v virtualenv-2.7; then
virtualenv-2.7 $1
@@ -13,22 +14,22 @@ function venv_install() {
elif command -v virtualenv; then
virtualenv $1
else
- echo Please make sure virtualenv package is installed.
+ echo Cannot find virtualenv command.
return 1
fi
}
# exit when something goes wrong during venv install
set -e
-if [ ! -d "$BASEDIR/venv" ]; then
- venv_install $BASEDIR/venv
- echo "Virtualenv created."
+if [ ! -d "$VENV_PATH/$VENV_NAME" ]; then
+ venv_install $VENV_PATH/$VENV_NAME
+ echo "Virtualenv" + $VENV_NAME + "created."
fi
-if [ ! -f "$BASEDIR/venv/updated" -o $BASEDIR/requirements.pip -nt $BASEDIR/venv/updated ]; then
- source $BASEDIR/venv/bin/activate
+if [ ! -f "$VENV_PATH/$VENV_NAME/updated" -o $BASEDIR/requirements.pip -nt $VENV_PATH/$VENV_NAME/updated ]; then
+ source $VENV_PATH/$VENV_NAME/bin/activate
pip install -r $BASEDIR/requirements.pip
- touch $BASEDIR/venv/updated
+ touch $VENV_PATH/$VENV_NAME/updated
echo "Requirements installed."
deactivate
fi
diff --git a/testcases/vIMS/CI/orchestrator.py b/testcases/vIMS/CI/orchestrator.py
new file mode 100644
index 000000000..382fbd139
--- /dev/null
+++ b/testcases/vIMS/CI/orchestrator.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# coding: utf8
+#######################################################################
+#
+# Copyright (c) 2015 Orange
+# valentin.boucher@orange.com
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+import sys
+import subprocess
+import os
+import shutil
+import yaml
+from git import Repo
+
+
+class orchestrator:
+
+ def __init__(self, testcase_dir, inputs={}, logger=None):
+ self.testcase_dir = testcase_dir
+ self.blueprint_dir = testcase_dir + 'cloudify-manager-blueprint/'
+ self.input_file = 'inputs.yaml'
+ self.manager_blueprint = False
+ self.config = inputs
+ self.logger = logger
+ self.manager_up = False
+
+ def set_credentials(self, username, password, tenant_name, auth_url):
+ self.config['keystone_username'] = username
+ self.config['keystone_password'] = password
+ self.config['keystone_url'] = auth_url
+ self.config['keystone_tenant_name'] = tenant_name
+
+ def set_flavor_id(self, flavor_id):
+ self.config['flavor_id'] = flavor_id
+
+ def set_image_id(self, image_id):
+ self.config['image_id'] = image_id
+
+ def set_external_network_name(self, external_network_name):
+ self.config['external_network_name'] = external_network_name
+
+ def set_ssh_user(self, ssh_user):
+ self.config['ssh_user'] = ssh_user
+
+ def set_nameservers(self, nameservers):
+ if 0 < len(nameservers):
+ self.config['dns_subnet_1'] = nameservers[0]
+ if 1 < len(nameservers):
+ self.config['dns_subnet_2'] = nameservers[1]
+
+ def set_logger(self, logger):
+ self.logger = logger
+
+ def download_manager_blueprint(self, manager_blueprint_url, manager_blueprint_branch):
+ if self.manager_blueprint:
+ if self.logger:
+ self.logger.info(
+ "cloudify manager server blueprint is already downloaded !")
+ else:
+ if self.logger:
+ self.logger.info(
+ "Downloading the cloudify manager server blueprint")
+ download_result = download_blueprints(
+ manager_blueprint_url, manager_blueprint_branch, self.blueprint_dir)
+
+ if not download_result:
+ if self.logger:
+ self.logger.error("Failed to download manager blueprint")
+ exit(-1)
+ else:
+ self.manager_blueprint = True
+
+ def manager_up(self):
+ return manager_up
+
+ def deploy_manager(self):
+ if self.manager_blueprint:
+ if self.logger:
+ self.logger.info("Writing the inputs file")
+ with open(self.blueprint_dir + "inputs.yaml", "w") as f:
+ f.write(yaml.dump(self.config, default_style='"'))
+ f.close()
+
+ # Ensure no ssh key file already exists
+ key_files = ["/.ssh/cloudify-manager-kp.pem",
+ "/.ssh/cloudify-agent-kp.pem"]
+ home = os.path.expanduser("~")
+
+ for key_file in key_files:
+ if os.path.isfile(home + key_file):
+ os.remove(home + key_file)
+
+ if self.logger:
+ self.logger.info("Launching the cloudify-manager deployment")
+ script = "set -e; "
+ script += "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
+ script += "cd " + self.testcase_dir + "; "
+ script += "cfy init -r; "
+ script += "cd cloudify-manager-blueprint; "
+ script += "cfy local create-requirements -o requirements.txt -p openstack-manager-blueprint.yaml; "
+ script += "pip install -r requirements.txt; "
+ script += "timeout 1800 cfy bootstrap --install-plugins -p openstack-manager-blueprint.yaml -i inputs.yaml; "
+ cmd = "/bin/bash -c '" + script + "'"
+ execute_command(cmd, self.logger)
+
+ if self.logger:
+ self.logger.info("Cloudify-manager server is UP !")
+
+ self.manager_up = True
+
+ def undeploy_manager(self):
+ if self.logger:
+ self.logger.info("Launching the cloudify-manager undeployment")
+
+ self.manager_up = False
+
+ script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
+ script += "cd " + self.testcase_dir + "; "
+ script += "cfy teardown -f --ignore-deployments; "
+ cmd = "/bin/bash -c '" + script + "'"
+ execute_command(cmd, self.logger)
+
+ if self.logger:
+ self.logger.info(
+ "Cloudify-manager server has been successfully removed!")
+
+ def download_upload_and_deploy_blueprint(self, blueprint, config, bp_name, dep_name):
+ if self.logger:
+ self.logger.info("Downloading the {0} blueprint".format(
+ blueprint['file_name']))
+ download_result = download_blueprints(blueprint['url'], blueprint['branch'],
+ self.testcase_dir + blueprint['destination_folder'])
+
+ if not download_result:
+ if self.logger:
+ self.logger.error(
+ "Failed to download blueprint {0}".format(blueprint['file_name']))
+ exit(-1)
+
+ if self.logger:
+ self.logger.info("Writing the inputs file")
+ with open(self.testcase_dir + blueprint['destination_folder'] + "/inputs.yaml", "w") as f:
+ f.write(yaml.dump(config, default_style='"'))
+ f.close()
+
+ if self.logger:
+ self.logger.info("Launching the {0} deployment".format(bp_name))
+ script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
+ script += "cd " + self.testcase_dir + \
+ blueprint['destination_folder'] + "; "
+ script += "cfy blueprints upload -b " + \
+ bp_name + " -p openstack-blueprint.yaml; "
+ script += "cfy deployments create -b " + bp_name + \
+ " -d " + dep_name + " --inputs inputs.yaml; "
+ script += "cfy executions start -w install -d " + dep_name + " --timeout 1800; "
+
+ cmd = "/bin/bash -c '" + script + "'"
+ execute_command(cmd, self.logger)
+
+ if self.logger:
+ self.logger.info("The deployment of {0} is ended".format(dep_name))
+
+ def undeploy_deployment(self, dep_name):
+ if self.logger:
+ self.logger.info("Launching the {0} undeployment".format(dep_name))
+ script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
+ script += "cd " + self.testcase_dir + "; "
+ script += "cfy executions start -w uninstall -d " + dep_name + " --timeout 1800 ; "
+ script += "cfy deployments delete -d " + dep_name + "; "
+
+ cmd = "/bin/bash -c '" + script + "'"
+ try:
+ execute_command(cmd, self.logger)
+ except:
+ if self.logger:
+ self.logger.error("Clearwater undeployment failed")
+
+
+def execute_command(cmd, logger):
+ """
+ Execute Linux command
+ """
+ if logger:
+ logger.debug('Executing command : {}'.format(cmd))
+ output_file = "output.txt"
+ f = open(output_file, 'w+')
+ p = subprocess.call(cmd, shell=True, stdout=f, stderr=subprocess.STDOUT)
+ f.close()
+ f = open(output_file, 'r')
+ result = f.read()
+ if result != "" and logger:
+ logger.debug(result)
+ if p == 0:
+ return True
+ else:
+ if logger:
+ logger.error("Error when executing command %s" % cmd)
+ exit(-1)
+
+
+def download_blueprints(blueprint_url, branch, dest_path):
+ if os.path.exists(dest_path):
+ shutil.rmtree(dest_path)
+ try:
+ Repo.clone_from(blueprint_url, dest_path, branch=branch)
+ return True
+ except:
+ return False
diff --git a/testcases/vIMS/CI/requirements.pip b/testcases/vIMS/CI/requirements.pip
new file mode 100644
index 000000000..9b9d0ba53
--- /dev/null
+++ b/testcases/vIMS/CI/requirements.pip
@@ -0,0 +1 @@
+cloudify==3.3 \ No newline at end of file
diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py
new file mode 100644
index 000000000..eae821ad4
--- /dev/null
+++ b/testcases/vIMS/CI/vIMS.py
@@ -0,0 +1,480 @@
+#!/usr/bin/python
+# coding: utf8
+#######################################################################
+#
+# Copyright (c) 2015 Orange
+# valentin.boucher@orange.com
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+########################################################################
+
+import os
+import time
+import subprocess
+import logging
+import argparse
+import yaml
+import pprint
+import sys
+import shutil
+import json
+import datetime
+from git import Repo
+import keystoneclient.v2_0.client as ksclient
+import glanceclient.client as glclient
+import novaclient.client as nvclient
+from neutronclient.v2_0 import client as ntclient
+
+from orchestrator import *
+from clearwater import *
+
+import urllib
+pp = pprint.PrettyPrinter(indent=4)
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+args = parser.parse_args()
+
+""" logging configuration """
+logger = logging.getLogger('vIMS')
+logger.setLevel(logging.DEBUG)
+
+ch = logging.StreamHandler()
+if args.debug:
+ ch.setLevel(logging.DEBUG)
+else:
+ ch.setLevel(logging.INFO)
+formatter = logging.Formatter(
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ch.setFormatter(formatter)
+logger.addHandler(ch)
+
+REPO_PATH = os.environ['repos_dir'] + '/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
+import functest_utils
+
+with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+# Cloudify parameters
+VIMS_DIR = REPO_PATH + \
+ functest_yaml.get("general").get("directories").get("dir_vIMS")
+VIMS_DATA_DIR = functest_yaml.get("general").get(
+ "directories").get("dir_vIMS_data") + "/"
+VIMS_TEST_DIR = functest_yaml.get("general").get(
+ "directories").get("dir_repo_vims_test") + "/"
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+
+TENANT_NAME = functest_yaml.get("vIMS").get("general").get("tenant_name")
+TENANT_DESCRIPTION = functest_yaml.get("vIMS").get(
+ "general").get("tenant_description")
+IMAGES = functest_yaml.get("vIMS").get("general").get("images")
+
+CFY_MANAGER_BLUEPRINT = functest_yaml.get(
+ "vIMS").get("cloudify").get("blueprint")
+CFY_MANAGER_REQUIERMENTS = functest_yaml.get(
+ "vIMS").get("cloudify").get("requierments")
+CFY_INPUTS = functest_yaml.get("vIMS").get("cloudify").get("inputs")
+
+CW_BLUEPRINT = functest_yaml.get("vIMS").get("clearwater").get("blueprint")
+CW_DEPLOYMENT_NAME = functest_yaml.get("vIMS").get(
+ "clearwater").get("deployment-name")
+CW_INPUTS = functest_yaml.get("vIMS").get("clearwater").get("inputs")
+CW_REQUIERMENTS = functest_yaml.get("vIMS").get(
+ "clearwater").get("requierments")
+
+CFY_DEPLOYMENT_DURATION = 0
+CW_DEPLOYMENT_DURATION = 0
+
+
+def download_and_add_image_on_glance(glance, image_name, image_url):
+ dest_path = VIMS_DATA_DIR + "tmp/"
+ if not os.path.exists(dest_path):
+ os.makedirs(dest_path)
+ file_name = image_url.rsplit('/')[-1]
+ if not functest_utils.download_url(image_url, dest_path):
+ logger.error("Failed to download image %s" % file_name)
+ return False
+
+ image = functest_utils.create_glance_image(
+ glance, image_name, dest_path + file_name)
+ if not image:
+ logger.error("Failed to upload image on glance")
+ return False
+
+ return image
+
+
+def test_clearwater():
+
+ time.sleep(180)
+
+ script = "source " + VIMS_DATA_DIR + "venv_cloudify/bin/activate; "
+ script += "cd " + VIMS_DATA_DIR + "; "
+ script += "cfy deployments outputs -d " + CW_DEPLOYMENT_NAME + \
+ " | grep Value: | sed \"s/ *Value: //g\";"
+ cmd = "/bin/bash -c '" + script + "'"
+
+ try:
+ logger.debug("Trying to get clearwater nameserver IP ... ")
+ dns_ip = os.popen(cmd).read()
+ dns_ip = dns_ip.splitlines()[0]
+ except:
+ logger.error("Unable to retrieve the IP of the DNS server !")
+
+ start_time_ts = time.time()
+ end_time_ts = start_time_ts
+ logger.info("vIMS functional test Start Time:'%s'" % (
+ datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ '%Y-%m-%d %H:%M:%S')))
+ nameservers = functest_utils.get_resolvconf_ns()
+ resolvconf = ""
+ for ns in nameservers:
+ resolvconf += "\nnameserver " + ns
+
+ if dns_ip != "":
+ script = 'echo -e "nameserver ' + dns_ip + resolvconf + '" > /etc/resolv.conf; '
+ script += 'source /etc/profile.d/rvm.sh; '
+ script += 'cd ' + VIMS_TEST_DIR + '; '
+ script += 'rake test[' + \
+ CW_INPUTS["public_domain"] + '] SIGNUP_CODE="secret"'
+
+ cmd = "/bin/bash -c '" + script + "'"
+ output_file = "output.txt"
+ f = open(output_file, 'w+')
+ p = subprocess.call(cmd, shell=True, stdout=f,
+ stderr=subprocess.STDOUT)
+ f.close()
+ end_time_ts = time.time()
+ duration = round(end_time_ts - start_time_ts, 1)
+ logger.info("vIMS functional test duration:'%s'" % duration)
+ f = open(output_file, 'r')
+ result = f.read()
+ if result != "" and logger:
+ logger.debug(result)
+
+ vims_test_result = ""
+ try:
+ logger.debug("Trying to load test results")
+ with open(VIMS_TEST_DIR + "temp.json") as f:
+ vims_test_result = json.load(f)
+ f.close()
+ except:
+ logger.error("Unable to retrieve test results")
+
+ if vims_test_result != "":
+ if args.report:
+ logger.debug("Push result into DB")
+ logger.debug("Pushing results to DB....")
+ git_version = functest_utils.get_git_branch(REPO_PATH)
+ functest_utils.push_results_to_db(db_url=TEST_DB, case_name="vIMS",
+ logger=logger, pod_name=functest_utils.get_pod_name(logger), git_version=git_version,
+ payload={'orchestrator': {'duration': CFY_DEPLOYMENT_DURATION,
+ 'result': ""},
+ 'vIMS': {'duration': CW_DEPLOYMENT_DURATION,
+ 'result': ""},
+ 'sig_test': {'duration': duration,
+ 'result': vims_test_result}})
+ try:
+ os.remove(VIMS_TEST_DIR + "temp.json")
+ except:
+ logger.error("Deleting file failed")
+
+
+def main():
+
+ ################ GENERAL INITIALISATION ################
+
+ if not os.path.exists(VIMS_DATA_DIR):
+ os.makedirs(VIMS_DATA_DIR)
+
+ ks_creds = functest_utils.get_credentials("keystone")
+ nv_creds = functest_utils.get_credentials("nova")
+ nt_creds = functest_utils.get_credentials("neutron")
+
+ logger.info("Prepare OpenStack plateform (create tenant and user)")
+ keystone = ksclient.Client(**ks_creds)
+
+ user_id = functest_utils.get_user_id(keystone, ks_creds['username'])
+ if user_id == '':
+ logger.error("Error : Failed to get id of %s user" %
+ ks_creds['username'])
+ exit(-1)
+
+ tenant_id = functest_utils.create_tenant(
+ keystone, TENANT_NAME, TENANT_DESCRIPTION)
+ if tenant_id == '':
+ logger.error("Error : Failed to create %s tenant" % TENANT_NAME)
+ exit(-1)
+
+ roles_name = ["admin", "Admin"]
+ role_id = ''
+ for role_name in roles_name:
+ if role_id == '':
+ role_id = functest_utils.get_role_id(keystone, role_name)
+
+ if role_id == '':
+ logger.error("Error : Failed to get id for %s role" % role_name)
+
+ if not functest_utils.add_role_user(keystone, user_id, role_id, tenant_id):
+ logger.error("Error : Failed to add %s on tenant" %
+ ks_creds['username'])
+
+ user_id = functest_utils.create_user(
+ keystone, TENANT_NAME, TENANT_NAME, None, tenant_id)
+ if user_id == '':
+ logger.error("Error : Failed to create %s user" % TENANT_NAME)
+
+ logger.info("Update OpenStack creds informations")
+ ks_creds.update({
+ "username": TENANT_NAME,
+ "password": TENANT_NAME,
+ "tenant_name": TENANT_NAME,
+ })
+
+ nt_creds.update({
+ "tenant_name": TENANT_NAME,
+ })
+
+ nv_creds.update({
+ "project_id": TENANT_NAME,
+ })
+
+ logger.info("Upload some OS images if it doesn't exist")
+ glance_endpoint = keystone.service_catalog.url_for(service_type='image',
+ endpoint_type='publicURL')
+ glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token)
+
+ for img in IMAGES.keys():
+ image_name = IMAGES[img]['image_name']
+ image_url = IMAGES[img]['image_url']
+
+ image_id = functest_utils.get_image_id(glance, image_name)
+
+ if image_id == '':
+ logger.info("""%s image doesn't exist on glance repository.
+ Try downloading this image and upload on glance !""" % image_name)
+ image_id = download_and_add_image_on_glance(
+ glance, image_name, image_url)
+
+ if image_id == '':
+ logger.error(
+ "Error : Failed to find or upload required OS image for this deployment")
+ exit(-1)
+
+ nova = nvclient.Client("2", **nv_creds)
+
+ logger.info("Update security group quota for this tenant")
+ neutron = ntclient.Client(**nt_creds)
+ if not functest_utils.update_sg_quota(neutron, tenant_id, 50, 100):
+ logger.error(
+ "Failed to update security group quota for tenant %s" % TENANT_NAME)
+ exit(-1)
+
+ logger.info("Update cinder quota for this tenant")
+ from cinderclient import client as cinderclient
+
+ creds_cinder = functest_utils.get_credentials("cinder")
+ cinder_client = cinderclient.Client('1', creds_cinder['username'],
+ creds_cinder['api_key'],
+ creds_cinder['project_id'],
+ creds_cinder['auth_url'],
+ service_type="volume")
+ if not functest_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150):
+ logger.error("Failed to update cinder quota for tenant %s" %
+ TENANT_NAME)
+ exit(-1)
+
+ ################ CLOUDIFY INITIALISATION ################
+
+ cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger)
+
+ cfy.set_credentials(username=ks_creds['username'], password=ks_creds[
+ 'password'], tenant_name=ks_creds['tenant_name'], auth_url=ks_creds['auth_url'])
+
+ logger.info("Collect flavor id for cloudify manager server")
+ nova = nvclient.Client("2", **nv_creds)
+
+ flavor_name = "m1.medium"
+ flavor_id = functest_utils.get_flavor_id(nova, flavor_name)
+ for requirement in CFY_MANAGER_REQUIERMENTS:
+ if requirement == 'ram_min':
+ flavor_id = functest_utils.get_flavor_id_by_ram_range(
+ nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 8196)
+
+ if flavor_id == '':
+ logger.error(
+ "Failed to find %s flavor. Try with ram range default requirement !" % flavor_name)
+ flavor_id = functest_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)
+
+ if flavor_id == '':
+ logger.error(
+ "Failed to find required flavor for this deployment" % flavor_name)
+ exit(-1)
+
+ cfy.set_flavor_id(flavor_id)
+
+ image_name = "centos_7"
+ image_id = functest_utils.get_image_id(glance, image_name)
+ for requirement in CFY_MANAGER_REQUIERMENTS:
+ if requirement == 'os_image':
+ image_id = functest_utils.get_image_id(
+ glance, CFY_MANAGER_REQUIERMENTS['os_image'])
+
+ if image_id == '':
+ logger.error(
+ "Error : Failed to find required OS image for cloudify manager")
+ exit(-1)
+
+ cfy.set_image_id(image_id)
+
+ ext_net = functest_utils.get_external_net(neutron)
+ if not ext_net:
+ logger.error("Failed to get external network")
+ exit(-1)
+
+ cfy.set_external_network_name(ext_net)
+
+ ns = functest_utils.get_resolvconf_ns()
+ if ns:
+ cfy.set_nameservers(ns)
+
+ logger.info("Prepare virtualenv for cloudify-cli")
+ cmd = "chmod +x " + VIMS_DIR + "create_venv.sh"
+ functest_utils.execute_command(cmd, logger)
+ cmd = VIMS_DIR + "create_venv.sh " + VIMS_DATA_DIR
+ functest_utils.execute_command(cmd, logger)
+
+ cfy.download_manager_blueprint(
+ CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch'])
+
+ ################ CLOUDIFY DEPLOYMENT ################
+ start_time_ts = time.time()
+ end_time_ts = start_time_ts
+ logger.info("Cloudify deployment Start Time:'%s'" % (
+ datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ '%Y-%m-%d %H:%M:%S')))
+
+ cfy.deploy_manager()
+
+ global CFY_DEPLOYMENT_DURATION
+ end_time_ts = time.time()
+ CFY_DEPLOYMENT_DURATION = round(end_time_ts - start_time_ts, 1)
+ logger.info("Cloudify deployment duration:'%s'" % CFY_DEPLOYMENT_DURATION)
+
+ ################ CLEARWATER INITIALISATION ################
+
+ cw = clearwater(CW_INPUTS, cfy, logger)
+
+ logger.info("Collect flavor id for all clearwater vm")
+ nova = nvclient.Client("2", **nv_creds)
+
+ flavor_name = "m1.small"
+ flavor_id = functest_utils.get_flavor_id(nova, flavor_name)
+ for requirement in CW_REQUIERMENTS:
+ if requirement == 'ram_min':
+ flavor_id = functest_utils.get_flavor_id_by_ram_range(
+ nova, CW_REQUIERMENTS['ram_min'], 8196)
+
+ if flavor_id == '':
+ logger.error(
+ "Failed to find %s flavor. Try with ram range default requirement !" % flavor_name)
+ flavor_id = functest_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)
+
+ if flavor_id == '':
+ logger.error(
+ "Failed to find required flavor for this deployment" % flavor_name)
+ exit(-1)
+
+ cw.set_flavor_id(flavor_id)
+
+ image_name = "ubuntu_14.04"
+ image_id = functest_utils.get_image_id(glance, image_name)
+ for requirement in CW_REQUIERMENTS:
+ if requirement == 'os_image':
+ image_id = functest_utils.get_image_id(
+ glance, CW_REQUIERMENTS['os_image'])
+
+ if image_id == '':
+ logger.error(
+ "Error : Failed to find required OS image for cloudify manager")
+ exit(-1)
+
+ cw.set_image_id(image_id)
+
+ ext_net = functest_utils.get_external_net(neutron)
+ if not ext_net:
+ logger.error("Failed to get external network")
+ exit(-1)
+
+ cw.set_external_network_name(ext_net)
+
+ ################ CLEARWATER DEPLOYMENT ################
+
+ start_time_ts = time.time()
+ end_time_ts = start_time_ts
+ logger.info("vIMS VNF deployment Start Time:'%s'" % (
+ datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ '%Y-%m-%d %H:%M:%S')))
+
+ cw.deploy_vnf(CW_BLUEPRINT)
+
+ global CW_DEPLOYMENT_DURATION
+ end_time_ts = time.time()
+ CW_DEPLOYMENT_DURATION = round(end_time_ts - start_time_ts, 1)
+ logger.info("vIMS VNF deployment duration:'%s'" % CW_DEPLOYMENT_DURATION)
+
+ ################ CLEARWATER TEST ################
+
+ test_clearwater()
+
+ ########### CLEARWATER UNDEPLOYMENT ############
+
+ cw.undeploy_vnf()
+
+ ############ CLOUDIFY UNDEPLOYMENT #############
+
+ cfy.undeploy_manager()
+
+ ############### GENERAL CLEANUP ################
+
+ ks_creds = functest_utils.get_credentials("keystone")
+
+ keystone = ksclient.Client(**ks_creds)
+
+ logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name'])
+ tenant_id = functest_utils.get_tenant_id(
+ keystone, CFY_INPUTS['keystone_tenant_name'])
+ if tenant_id == '':
+ logger.error("Error : Failed to get id of %s tenant" %
+ CFY_INPUTS['keystone_tenant_name'])
+ else:
+ if not functest_utils.delete_tenant(keystone, tenant_id):
+ logger.error("Error : Failed to remove %s tenant" %
+ CFY_INPUTS['keystone_tenant_name'])
+
+ logger.info("Removing %s user .." % CFY_INPUTS['keystone_username'])
+ user_id = functest_utils.get_user_id(
+ keystone, CFY_INPUTS['keystone_username'])
+ if user_id == '':
+ logger.error("Error : Failed to get id of %s user" %
+ CFY_INPUTS['keystone_username'])
+ else:
+ if not functest_utils.delete_user(keystone, user_id):
+ logger.error("Error : Failed to remove %s user" %
+ CFY_INPUTS['keystone_username'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testcases/vIMS/vIMS.md b/testcases/vIMS/vIMS.md
new file mode 100644
index 000000000..68f86d9fa
--- /dev/null
+++ b/testcases/vIMS/vIMS.md
@@ -0,0 +1,3 @@
+# vIMS README
+
+
diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing.py
index b81ebb881..be0d2341a 100644
--- a/testcases/vPing/CI/libraries/vPing.py
+++ b/testcases/vPing/CI/libraries/vPing.py
@@ -1,277 +1,482 @@
#!/usr/bin/python
#
-# Copyright (c) 2015 All rights reserved. This program and the accompanying materials
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# This script boots the VM1 and allocates IP address from Nova
+# 0.1: This script boots the VM1 and allocates IP address from Nova
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
+# 0.2: measure test duration and publish results under json format
#
-# Note: this is script works only with Ubuntu image, not with Cirros image
#
-import os, time, subprocess, logging, argparse, yaml
+import os
+import time
+import argparse
import pprint
-import novaclient.v2.client as novaclient
-import neutronclient.client as neutronclient
-#import novaclient.v1_1.client as novaclient
-import cinderclient.v1.client as cinderclient
-pp = pprint.PrettyPrinter(indent=4)
+import sys
+import logging
+import yaml
+import datetime
+from novaclient import client as novaclient
+from neutronclient.v2_0 import client as neutronclient
+from keystoneclient.v2_0 import client as keystoneclient
+from glanceclient import client as glanceclient
-EXIT_CODE = -1
-HOME = os.environ['HOME']+"/"
-with open(HOME+'.functest/functest.yaml') as f:
- functest_yaml = yaml.safe_load(f)
-f.close()
+pp = pprint.PrettyPrinter(indent=4)
-PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout")
-NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1")
-NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2")
-GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get("image_name")
-NEUTRON_NET_NAME = functest_yaml.get("general").get("openstack").get("neutron_net_name")
-FLAVOR = functest_yaml.get("vping").get("vm_flavor")
+parser = argparse.ArgumentParser()
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
-parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
args = parser.parse_args()
""" logging configuration """
+
logger = logging.getLogger('vPing')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
+
if args.debug:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+formatter = logging.Formatter('%(asctime)s - %(name)s'
+ '- %(levelname)s - %(message)s')
+
ch.setFormatter(formatter)
logger.addHandler(ch)
+REPO_PATH=os.environ['repos_dir']+'/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
+import functest_utils
+
+with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+HOME = os.environ['HOME'] + "/"
+# vPing parameters
+VM_BOOT_TIMEOUT = 180
+VM_DELETE_TIMEOUT = 100
+PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout")
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1")
+NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2")
+IP_1 = functest_yaml.get("vping").get("ip_1")
+IP_2 = functest_yaml.get("vping").get("ip_2")
+# GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
+# get("openstack").get("image_name")
+GLANCE_IMAGE_NAME = "functest-vping"
+GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
+ get("openstack").get("image_file_name")
+GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
+ get("openstack").get("image_disk_format")
+GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
+ get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
+
+
+FLAVOR = functest_yaml.get("vping").get("vm_flavor")
+
+# NEUTRON Private Network parameters
+
+NEUTRON_PRIVATE_NET_NAME = functest_yaml.get("vping"). \
+ get("vping_private_net_name")
+
+NEUTRON_PRIVATE_SUBNET_NAME = functest_yaml.get("vping"). \
+ get("vping_private_subnet_name")
+
+NEUTRON_PRIVATE_SUBNET_CIDR = functest_yaml.get("vping"). \
+ get("vping_private_subnet_cidr")
+
+NEUTRON_ROUTER_NAME = functest_yaml.get("vping"). \
+ get("vping_router_name")
def pMsg(value):
+
"""pretty printing"""
pp.pprint(value)
-def print_title(title):
- """Print titles"""
- print "\n"+"#"*40+"\n# "+title+"\n"+"#"*40+"\n"
-
-def get_credentials(service):
- """Returns a creds dictionary filled with the following keys:
- * username
- * password/api_key (depending on the service)
- * tenant_name/project_id (depending on the service)
- * auth_url
- :param service: a string indicating the name of the service
- requesting the credentials.
- """
- #TODO: get credentials from the openrc file
- creds = {}
- # Unfortunately, each of the OpenStack client will request slightly
- # different entries in their credentials dict.
- if service.lower() in ("nova", "cinder"):
- password = "api_key"
- tenant = "project_id"
- else:
- password = "password"
- tenant = "tenant_name"
- # The most common way to pass these info to the script is to do it through
- # environment variables.
- creds.update({
- "username": os.environ.get('OS_USERNAME', "admin"), # add your cloud username details
- password: os.environ.get("OS_PASSWORD", 'admin'), # add password
- "auth_url": os.environ.get("OS_AUTH_URL","http://192.168.20.71:5000/v2.0"), # Auth URL
- tenant: os.environ.get("OS_TENANT_NAME", "admin"),
- })
+def waitVmActive(nova, vm):
- return creds
+ # sleep and wait for VM status change
+ sleep_time = 3
+ count = VM_BOOT_TIMEOUT / sleep_time
+ while True:
+ status = functest_utils.get_instance_status(nova, vm)
+ logger.debug("Status: %s" % status)
+ if status == "ACTIVE":
+ return True
+ if status == "ERROR" or status == "error":
+ return False
+ if count == 0:
+ logger.debug("Booting a VM timed out...")
+ return False
+ count -= 1
+ time.sleep(sleep_time)
+ return False
+
+
+def waitVmDeleted(nova, vm):
+
+ # sleep and wait for VM status change
+ sleep_time = 3
+ count = VM_DELETE_TIMEOUT / sleep_time
+ while True:
+ status = functest_utils.get_instance_status(nova, vm)
+ if not status:
+ return True
+ elif count == 0:
+ logger.debug("Timeout")
+ return False
+ else:
+ # return False
+ count -= 1
+ time.sleep(sleep_time)
+ return False
-def get_server(creds, servername):
- nova = novaclient.Client(**creds)
- return nova.servers.find(name=servername)
+def create_private_neutron_net(neutron):
+ neutron.format = 'json'
+ logger.info('Creating neutron network %s...' % NEUTRON_PRIVATE_NET_NAME)
+ network_id = functest_utils. \
+ create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME)
-def waitVmActive(nova,vm):
- # sleep and wait for VM status change
- while get_status(nova,vm) != "ACTIVE":
- time.sleep(3)
- logger.debug("Status: %s" % vm.status)
- logger.debug("Status: %s" % vm.status)
+ if not network_id:
+ return False
+ logger.debug("Network '%s' created successfully" % network_id)
+ logger.debug('Creating Subnet....')
+ subnet_id = functest_utils. \
+ create_neutron_subnet(neutron,
+ NEUTRON_PRIVATE_SUBNET_NAME,
+ NEUTRON_PRIVATE_SUBNET_CIDR,
+ network_id)
+ if not subnet_id:
+ return False
+ logger.debug("Subnet '%s' created successfully" % subnet_id)
+ logger.debug('Creating Router...')
+ router_id = functest_utils. \
+ create_neutron_router(neutron, NEUTRON_ROUTER_NAME)
-def get_status(nova,vm):
- vm = nova.servers.get(vm.id)
- return vm.status
+ if not router_id:
+ return False
+
+ logger.debug("Router '%s' created successfully" % router_id)
+ logger.debug('Adding router to subnet...')
+
+ result = functest_utils.add_interface_router(neutron, router_id, subnet_id)
+
+ if not result:
+ return False
+
+ logger.debug("Interface added successfully.")
+ network_dic = {'net_id': network_id,
+ 'subnet_id': subnet_id,
+ 'router_id': router_id}
+ return network_dic
+
+
+def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2):
+
+ # delete both VMs
+ logger.info("Cleaning up...")
+ logger.debug("Deleting image...")
+ if not functest_utils.delete_glance_image(nova, image_id):
+ logger.error("Error deleting the glance image")
+
+ vm1 = functest_utils.get_instance_by_name(nova, NAME_VM_1)
+ if vm1:
+ logger.debug("Deleting '%s'..." % NAME_VM_1)
+ nova.servers.delete(vm1)
+ # wait until VMs are deleted
+ if not waitVmDeleted(nova, vm1):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova, vm1)))
+ else:
+ logger.debug("Instance %s terminated." % NAME_VM_1)
+
+ vm2 = functest_utils.get_instance_by_name(nova, NAME_VM_2)
+
+ if vm2:
+ logger.debug("Deleting '%s'..." % NAME_VM_2)
+ vm2 = nova.servers.find(name=NAME_VM_2)
+ nova.servers.delete(vm2)
+
+ if not waitVmDeleted(nova, vm2):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova, vm2)))
+ else:
+ logger.debug("Instance %s terminated." % NAME_VM_2)
+
+ # delete created network
+ logger.info("Deleting network '%s'..." % NEUTRON_PRIVATE_NET_NAME)
+ net_id = network_dic["net_id"]
+ subnet_id = network_dic["subnet_id"]
+ router_id = network_dic["router_id"]
+
+ if not functest_utils.delete_neutron_port(neutron, port_id1):
+ logger.error("Unable to remove port '%s'" % port_id1)
+ return False
+ logger.debug("Port '%s' removed successfully" % port_id1)
+
+ if not functest_utils.delete_neutron_port(neutron, port_id2):
+ logger.error("Unable to remove port '%s'" % port_id2)
+ return False
+ logger.debug("Port '%s' removed successfully" % port_id2)
+
+ if not functest_utils.remove_interface_router(neutron, router_id,
+ subnet_id):
+ logger.error("Unable to remove subnet '%s' from router '%s'" % (
+ subnet_id, router_id))
+ return False
+
+ logger.debug("Interface removed successfully")
+ if not functest_utils.delete_neutron_router(neutron, router_id):
+ logger.error("Unable to delete router '%s'" % router_id)
+ return False
+
+ logger.debug("Router deleted successfully")
+
+ if not functest_utils.delete_neutron_subnet(neutron, subnet_id):
+ logger.error("Unable to delete subnet '%s'" % subnet_id)
+ return False
+
+ logger.debug(
+ "Subnet '%s' deleted successfully" % NEUTRON_PRIVATE_SUBNET_NAME)
+
+ if not functest_utils.delete_neutron_net(neutron, net_id):
+ logger.error("Unable to delete network '%s'" % net_id)
+ return False
+
+ logger.debug(
+ "Network '%s' deleted successfully" % NEUTRON_PRIVATE_NET_NAME)
+
+ return True
def main():
- creds = get_credentials("nova")
- nova = novaclient.Client(**creds)
- cinder = cinderclient.Client(**creds)
-
- """
- # print images and server resources
- # print nova_images
- print_title("images list")
- pMsg(nova.images.list())
- print_title("servers list")
- pMsg(nova.servers.list())
- """
- # Check if the given image is created
- images=nova.images.list()
- image_found = False
- for image in images:
- if image.name == GLANCE_IMAGE_NAME:
- logger.info("Glance image found '%s'" %image.name)
- image_found = True
- if not image_found:
- logger.error("ERROR: Glance image %s not found." % GLANCE_IMAGE_NAME)
+
+ creds_nova = functest_utils.get_credentials("nova")
+ nova_client = novaclient.Client('2',**creds_nova)
+ creds_neutron = functest_utils.get_credentials("neutron")
+ neutron_client = neutronclient.Client(**creds_neutron)
+ creds_keystone = functest_utils.get_credentials("keystone")
+ keystone_client = keystoneclient.Client(**creds_keystone)
+ glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
+ endpoint_type='publicURL')
+ glance_client = glanceclient.Client(1, glance_endpoint,
+ token=keystone_client.auth_token)
+ EXIT_CODE = -1
+
+ image = None
+ flavor = None
+
+ logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
+ GLANCE_IMAGE_PATH))
+ image_id = functest_utils.create_glance_image(glance_client,
+ GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+ if not image_id:
+ logger.error("Failed to create a Glance image...")
+ exit(-1)
+
+ # Check if the given image exists
+ image = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
+ if image == '':
+ logger.error("ERROR: Glance image '%s' not found." % GLANCE_IMAGE_NAME)
logger.info("Available images are: ")
- pMsg(nova.images.list())
+ pMsg(nova_client.images.list())
+ exit(-1)
+
+ network_dic = create_private_neutron_net(neutron_client)
+
+ if not network_dic:
+ logger.error(
+ "There has been a problem when creating the neutron network")
exit(-1)
- # Check if the given neutron network is created
- networks=nova.networks.list()
- network_found = False
- for net in networks:
- if net.human_id == NEUTRON_NET_NAME:
- logger.info("Network found '%s'" %net.human_id)
- network_found = True
- if not network_found:
- logger.error("Neutron network %s not found." % NEUTRON_NET_NAME)
- logger.info("Available networks are: ")
- pMsg(nova.networks.list())
+ network_id = network_dic["net_id"]
+
+ # Check if the given flavor exists
+
+ try:
+ flavor = nova_client.flavors.find(name=FLAVOR)
+ logger.info("Flavor found '%s'" % FLAVOR)
+ except:
+ logger.error("Flavor '%s' not found." % FLAVOR)
+ logger.info("Available flavors are: ")
+ pMsg(nova_client.flavor.list())
exit(-1)
- servers=nova.servers.list()
+ # Deleting instances if they exist
+
+ servers = nova_client.servers.list()
for server in servers:
if server.name == NAME_VM_1 or server.name == NAME_VM_2:
- logger.info("Instance %s found. Deleting..." %server.name)
+ logger.info("Instance %s found. Deleting..." % server.name)
server.delete()
-
-
# boot VM 1
# basic boot
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
- m = NAME_VM_1
- f = nova.flavors.find(name = FLAVOR)
- i = nova.images.find(name = GLANCE_IMAGE_NAME)
- n = nova.networks.find(label = NEUTRON_NET_NAME)
- u = "#cloud-config\npassword: opnfv\nchpasswd: { expire: False }\nssh_pwauth: True"
- #k = "demo-key"
+ # tune (e.g. flavor, images, network) to your specific
+ # openstack configuration here
+ # we consider start time at VM1 booting
+ start_time_ts = time.time()
+ end_time_ts = start_time_ts
+ logger.info("vPing Start Time:'%s'" % (
+ datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ '%Y-%m-%d %H:%M:%S')))
# create VM
- logger.info("Creating instance '%s'..." %m)
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n userdata= \n%s" %(m,f,i,n,u))
- vm1 = nova.servers.create(
- name = m,
- flavor = f,
- image = i,
- nics = [{"net-id": n.id}],
- #key_name = k,
- userdata = u,
- )
+ logger.debug("Creating port 'vping-port-1' with IP %s..." % IP_1)
+ port_id1 = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-1", network_id,
+ IP_1)
+ if not port_id1:
+ logger.error("Unable to create port.")
+ exit(-1)
- #pMsg(vm1)
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_1, IP_1))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
+ "network=%s \n" % (NAME_VM_1, flavor, image, network_id))
+ vm1 = nova_client.servers.create(
+ name=NAME_VM_1,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_1}]
+ nics=[{"port-id": port_id1}]
+ )
+ # wait until VM status is active
+ if not waitVmActive(nova_client, vm1):
- #wait until VM status is active
- waitVmActive(nova,vm1)
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova_client, vm1)))
+ cleanup(nova_client, neutron_client, image_id, network_dic, port_id1)
+ return (EXIT_CODE)
+ else:
+ logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)
- #retrieve IP of first VM
- logger.debug("Fetching IP...")
- server = get_server(creds, m)
- #pMsg(server.networks)
- # theoretically there is only one IP address so we take the first element of the table
+ # Retrieve IP of first VM
+ # logger.debug("Fetching IP...")
+ # server = functest_utils.get_instance_by_name(nova_client, NAME_VM_1)
+ # theoretically there is only one IP address so we take the
+ # first element of the table
# Dangerous! To be improved!
- test_ip = server.networks.get(NEUTRON_NET_NAME)[0]
- logger.debug("Instance '%s' got %s" %(m,test_ip))
- test_cmd = '/tmp/vping.sh %s'%test_ip
-
+ # test_ip = server.networks.get(NEUTRON_PRIVATE_NET_NAME)[0]
+ test_ip = IP_1
+ logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))
# boot VM 2
# we will boot then execute a ping script with cloud-init
# the long chain corresponds to the ping procedure converted with base 64
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
- m = NAME_VM_2
- f = nova.flavors.find(name = FLAVOR)
- i = nova.images.find(name = GLANCE_IMAGE_NAME)
- n = nova.networks.find(label = NEUTRON_NET_NAME)
- # use base 64 format becaus bad surprises with sh script with cloud-init but script is just pinging
- #k = "demo-key"
- u = "#cloud-config\npassword: opnfv\nchpasswd: { expire: False }\nssh_pwauth: True\nwrite_files:\n- encoding: b64\n path: /tmp/vping.sh\n permissions: '0777'\n owner: root:root\n content: IyEvYmluL2Jhc2gKCndoaWxlIHRydWU7IGRvCiBwaW5nIC1jIDEgJDEgMj4mMSA+L2Rldi9udWxsCiBSRVM9JD8KIGlmIFsgIlokUkVTIiA9ICJaMCIgXSA7IHRoZW4KICBlY2hvICJ2UGluZyBPSyIKICBzbGVlcCAxMAogIHN1ZG8gc2h1dGRvd24gLWggbm93CiAgYnJlYWsKIGVsc2UKICBlY2hvICJ2UGluZyBLTyIKIGZpCiBzbGVlcCAxCmRvbmUK\nruncmd:\n - [ sh, -c, %s]"%test_cmd
+ # tune (e.g. flavor, images, network) to your specific openstack
+ # configuration here
+ u = "#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n " \
+ "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n " \
+ "break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip
+
# create VM
- logger.info("Creating instance '%s'..." %m)
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n userdata= \n%s" %(m,f,i,n,u))
- vm2 = nova.servers.create(
- name = m,
- flavor = f,
- image = i,
- nics = [{"net-id": n.id}],
- #key_name = k,
- userdata = u,
- #security_groups = s,
- #config_drive = v.id
- )
- # The injected script will shutdown the VM2 when the ping works
- # The console-log method is more consistent but doesn't work yet
+ logger.debug("Creating port 'vping-port-2' with IP %s..." % IP_2)
+ port_id2 = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-2", network_id,
+ IP_2)
- waitVmActive(nova,vm2)
+ if not port_id2:
+ logger.error("Unable to create port.")
+ exit(-1)
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_2, IP_2))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
+ "\n userdata= \n%s" % (
+ NAME_VM_2, flavor, image, network_id, u))
+ vm2 = nova_client.servers.create(
+ name=NAME_VM_2,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_2}],
+ nics=[{"port-id": port_id2}],
+ userdata=u
+ )
- logger.info("Waiting for ping, timeout is %d sec..." % PING_TIMEOUT)
- sec = 0
- while True:
- status = get_status(nova, vm2)
- #print status
- if status == "SHUTOFF" :
- EXIT_CODE = 0
- logger.info("vPing SUCCESSFUL after %d sec" % sec)
- break
- if sec == PING_TIMEOUT:
- logger.info("Timeout. vPing UNSUCCESSFUL.")
- break
- time.sleep(1)
- sec+=1
+ if not waitVmActive(nova_client, vm2):
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova_client, vm2)))
+ cleanup(nova_client, neutron_client, image_id, network_dic, port_id1, port_id2)
+ return (EXIT_CODE)
+ else:
+ logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)
- """
- # I leave this here until we fix the console-log output
+ logger.info("Waiting for ping...")
sec = 0
console_log = vm2.get_console_output()
- while not ("vPing" in console_log):
+
+ while True:
time.sleep(1)
console_log = vm2.get_console_output()
- print "--"+console_log
-
+ # print "--"+console_log
# report if the test is failed
- if "vPing" in console_log:
- pMsg("vPing is OK")
+ if "vPing OK" in console_log:
+ logger.info("vPing detected!")
+
+ # we consider start time at VM1 booting
+ end_time_ts = time.time()
+ duration = round(end_time_ts - start_time_ts, 1)
+ logger.info("vPing duration:'%s'" % duration)
+ EXIT_CODE = 0
break
- else:
- pMsg("no vPing detected....")
- sec+=1
- if sec == PING_TIMEOUT:
+ elif sec == PING_TIMEOUT:
+ logger.info("Timeout reached.")
break
- """
+ else:
+ logger.debug("No vPing detected...")
+ sec += 1
- # delete both VMs
- logger.debug("Deleting Instances...")
- nova.servers.delete(vm1)
- logger.debug("Instance %s terminated." % NAME_VM_1)
- nova.servers.delete(vm2)
- logger.debug("Instance %s terminated." % NAME_VM_2)
+ cleanup(nova_client, neutron_client, image_id, network_dic, port_id1, port_id2)
+ test_status = "NOK"
+ if EXIT_CODE == 0:
+ logger.info("vPing OK")
+ test_status = "OK"
+ else:
+ logger.error("vPing FAILED")
+
+ try:
+ if args.report:
+ logger.debug("Push result into DB")
+ # TODO check path result for the file
+ git_version = functest_utils.get_git_branch(REPO_PATH)
+ pod_name = functest_utils.get_pod_name(logger)
+ functest_utils.push_results_to_db(TEST_DB,
+ "vPing",
+ logger, pod_name, git_version,
+ payload={'timestart': start_time_ts,
+ 'duration': duration,
+ 'status': test_status})
+ # with open("vPing-result.json", "w") as outfile:
+ # json.dump({'timestart': start_time_ts, 'duration': duration,
+ # 'status': test_status}, outfile, indent=4)
+ except:
+ logger.error("Error pushing results into Database")
- logger.debug("EXIT_CODE=%s" % EXIT_CODE)
exit(EXIT_CODE)
-
if __name__ == '__main__':
main()