From 68d29b6f6e71df6b4f177cd417f98d9e977f8893 Mon Sep 17 00:00:00 2001 From: "jose.lausuch" Date: Tue, 17 May 2016 21:36:56 +0200 Subject: Change 'testcases' directory structure JIRA: FUNCTEST-226 - Remove all 'CI' subdirs - Remove VIM dir and have OpenStack dir within /testcases/ - Split rally and tempest in 2 different dirs Change-Id: Icfc76d18a84f7a18d93ae1a5ec7dc7a560bb7ce9 Signed-off-by: jose.lausuch --- ci/config_functest.yaml | 13 +- ci/exec_test.sh | 36 +- ci/tier_handler.py | 4 +- .../CI/custom_tests/neutron/010__networks.robot | 56 --- .../ODL/CI/custom_tests/neutron/020__subnets.robot | 56 --- .../ODL/CI/custom_tests/neutron/030__ports.robot | 56 --- .../CI/custom_tests/neutron/040__delete_ports.txt | 37 -- .../custom_tests/neutron/050__delete_subnets.txt | 37 -- .../custom_tests/neutron/060__delete_networks.txt | 37 -- testcases/Controllers/ODL/CI/odlreport2db.py | 165 ------ testcases/Controllers/ODL/CI/start_tests.sh | 97 ---- testcases/Controllers/ODL/CI/test_list.txt | 5 - .../ODL/custom_tests/neutron/010__networks.robot | 56 +++ .../ODL/custom_tests/neutron/020__subnets.robot | 56 +++ .../ODL/custom_tests/neutron/030__ports.robot | 56 +++ .../ODL/custom_tests/neutron/040__delete_ports.txt | 37 ++ .../custom_tests/neutron/050__delete_subnets.txt | 37 ++ .../custom_tests/neutron/060__delete_networks.txt | 37 ++ testcases/Controllers/ODL/odlreport2db.py | 165 ++++++ testcases/Controllers/ODL/start_tests.sh | 97 ++++ testcases/Controllers/ODL/test_list.txt | 5 + testcases/Controllers/ONOS/Teston/CI/Readme.txt | 5 - testcases/Controllers/ONOS/Teston/CI/__init__.py | 0 .../ONOS/Teston/CI/adapters/__init__.py | 0 .../Controllers/ONOS/Teston/CI/adapters/client.py | 88 ---- .../ONOS/Teston/CI/adapters/connection.py | 196 -------- .../ONOS/Teston/CI/adapters/environment.py | 281 ----------- .../ONOS/Teston/CI/adapters/foundation.py | 104 ---- .../Controllers/ONOS/Teston/CI/dependencies/onos | 29 -- testcases/Controllers/ONOS/Teston/CI/log/gitignore | 0 .../Controllers/ONOS/Teston/CI/onosfunctest.py | 208 -------- testcases/Controllers/ONOS/Teston/Readme.txt | 5 + testcases/Controllers/ONOS/Teston/__init__.py | 0 .../Controllers/ONOS/Teston/adapters/__init__.py | 0 .../Controllers/ONOS/Teston/adapters/client.py | 88 ++++ .../Controllers/ONOS/Teston/adapters/connection.py | 196 ++++++++ .../ONOS/Teston/adapters/environment.py | 281 +++++++++++ .../Controllers/ONOS/Teston/adapters/foundation.py | 104 ++++ .../Controllers/ONOS/Teston/dependencies/onos | 29 ++ testcases/Controllers/ONOS/Teston/log/gitignore | 0 testcases/Controllers/ONOS/Teston/onosfunctest.py | 208 ++++++++ testcases/OpenStack/healthcheck/healthcheck.sh | 208 ++++++++ testcases/OpenStack/rally/macro/macro.yaml | 97 ++++ testcases/OpenStack/rally/run_rally-cert.py | 560 +++++++++++++++++++++ .../rally/scenario/opnfv-authenticate.yaml | 63 +++ .../OpenStack/rally/scenario/opnfv-cinder.yaml | 272 ++++++++++ .../OpenStack/rally/scenario/opnfv-glance.yaml | 49 ++ testcases/OpenStack/rally/scenario/opnfv-heat.yaml | 160 ++++++ .../OpenStack/rally/scenario/opnfv-keystone.yaml | 92 ++++ .../OpenStack/rally/scenario/opnfv-neutron.yaml | 245 +++++++++ testcases/OpenStack/rally/scenario/opnfv-nova.yaml | 378 ++++++++++++++ .../OpenStack/rally/scenario/opnfv-quotas.yaml | 54 ++ .../OpenStack/rally/scenario/opnfv-requests.yaml | 28 ++ .../OpenStack/rally/scenario/opnfv-smoke.yaml | 268 ++++++++++ testcases/OpenStack/rally/scenario/opnfv-vm.yaml | 42 ++ .../rally/scenario/support/instance_dd_test.sh | 13 + .../templates/autoscaling_policy.yaml.template | 17 + .../rally/scenario/templates/default.yaml.template | 1 + .../templates/random_strings.yaml.template | 13 + .../templates/resource_group.yaml.template | 13 + .../templates/server_with_ports.yaml.template | 64 +++ .../templates/server_with_volume.yaml.template | 43 ++ ...pdated_autoscaling_policy_inplace.yaml.template | 23 + .../updated_random_strings_add.yaml.template | 19 + .../updated_random_strings_delete.yaml.template | 11 + .../updated_random_strings_replace.yaml.template | 19 + .../updated_resource_group_increase.yaml.template | 16 + .../updated_resource_group_reduce.yaml.template | 16 + testcases/OpenStack/rally/task.yaml | 60 +++ .../OpenStack/tempest/custom_tests/defcore_req.txt | 122 +++++ testcases/OpenStack/tempest/run_tempest.py | 347 +++++++++++++ testcases/OpenStack/vPing/ping.sh | 13 + testcases/OpenStack/vPing/vPing_ssh.py | 453 +++++++++++++++++ testcases/OpenStack/vPing/vPing_userdata.py | 387 ++++++++++++++ .../VIM/OpenStack/CI/custom_tests/defcore_req.txt | 122 ----- .../VIM/OpenStack/CI/libraries/healthcheck.sh | 208 -------- .../VIM/OpenStack/CI/libraries/run_rally-cert.py | 560 --------------------- .../VIM/OpenStack/CI/libraries/run_tempest.py | 347 ------------- .../VIM/OpenStack/CI/rally_cert/macro/macro.yaml | 97 ---- .../CI/rally_cert/scenario/opnfv-authenticate.yaml | 63 --- .../CI/rally_cert/scenario/opnfv-cinder.yaml | 272 ---------- .../CI/rally_cert/scenario/opnfv-glance.yaml | 49 -- .../CI/rally_cert/scenario/opnfv-heat.yaml | 160 ------ .../CI/rally_cert/scenario/opnfv-keystone.yaml | 92 ---- .../CI/rally_cert/scenario/opnfv-neutron.yaml | 245 --------- .../CI/rally_cert/scenario/opnfv-nova.yaml | 378 -------------- .../CI/rally_cert/scenario/opnfv-quotas.yaml | 54 -- .../CI/rally_cert/scenario/opnfv-requests.yaml | 28 -- .../CI/rally_cert/scenario/opnfv-smoke.yaml | 268 ---------- .../OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml | 42 -- .../scenario/support/instance_dd_test.sh | 13 - .../templates/autoscaling_policy.yaml.template | 17 - .../scenario/templates/default.yaml.template | 1 - .../templates/random_strings.yaml.template | 13 - .../templates/resource_group.yaml.template | 13 - .../templates/server_with_ports.yaml.template | 64 --- .../templates/server_with_volume.yaml.template | 43 -- ...pdated_autoscaling_policy_inplace.yaml.template | 23 - .../updated_random_strings_add.yaml.template | 19 - .../updated_random_strings_delete.yaml.template | 11 - .../updated_random_strings_replace.yaml.template | 19 - .../updated_resource_group_increase.yaml.template | 16 - .../updated_resource_group_reduce.yaml.template | 16 - testcases/VIM/OpenStack/CI/rally_cert/task.yaml | 60 --- testcases/vIMS/CI/clearwater.py | 66 --- testcases/vIMS/CI/create_venv.sh | 44 -- testcases/vIMS/CI/orchestrator.py | 236 --------- testcases/vIMS/CI/requirements.pip | 1 - testcases/vIMS/CI/vIMS.py | 553 -------------------- testcases/vIMS/clearwater.py | 66 +++ testcases/vIMS/create_venv.sh | 44 ++ testcases/vIMS/orchestrator.py | 236 +++++++++ testcases/vIMS/requirements.pip | 1 + testcases/vIMS/vIMS.py | 553 ++++++++++++++++++++ testcases/vPing/CI/libraries/ping.sh | 13 - testcases/vPing/CI/libraries/vPing_ssh.py | 453 ----------------- testcases/vPing/CI/libraries/vPing_userdata.py | 387 -------------- 117 files changed, 6547 insertions(+), 6552 deletions(-) delete mode 100644 testcases/Controllers/ODL/CI/custom_tests/neutron/010__networks.robot delete mode 100644 testcases/Controllers/ODL/CI/custom_tests/neutron/020__subnets.robot delete mode 100644 testcases/Controllers/ODL/CI/custom_tests/neutron/030__ports.robot delete mode 100644 testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt delete mode 100644 testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt delete mode 100644 testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt delete mode 100644 testcases/Controllers/ODL/CI/odlreport2db.py delete mode 100755 testcases/Controllers/ODL/CI/start_tests.sh delete mode 100644 testcases/Controllers/ODL/CI/test_list.txt create mode 100644 testcases/Controllers/ODL/custom_tests/neutron/010__networks.robot create mode 100644 testcases/Controllers/ODL/custom_tests/neutron/020__subnets.robot create mode 100644 testcases/Controllers/ODL/custom_tests/neutron/030__ports.robot create mode 100644 testcases/Controllers/ODL/custom_tests/neutron/040__delete_ports.txt create mode 100644 testcases/Controllers/ODL/custom_tests/neutron/050__delete_subnets.txt create mode 100644 testcases/Controllers/ODL/custom_tests/neutron/060__delete_networks.txt create mode 100644 testcases/Controllers/ODL/odlreport2db.py create mode 100755 testcases/Controllers/ODL/start_tests.sh create mode 100644 testcases/Controllers/ODL/test_list.txt delete mode 100644 testcases/Controllers/ONOS/Teston/CI/Readme.txt delete mode 100644 testcases/Controllers/ONOS/Teston/CI/__init__.py delete mode 100644 testcases/Controllers/ONOS/Teston/CI/adapters/__init__.py delete mode 100644 testcases/Controllers/ONOS/Teston/CI/adapters/client.py delete mode 100644 testcases/Controllers/ONOS/Teston/CI/adapters/connection.py delete mode 100644 testcases/Controllers/ONOS/Teston/CI/adapters/environment.py delete mode 100644 testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py delete mode 100644 testcases/Controllers/ONOS/Teston/CI/dependencies/onos delete mode 100644 testcases/Controllers/ONOS/Teston/CI/log/gitignore delete mode 100644 testcases/Controllers/ONOS/Teston/CI/onosfunctest.py create mode 100644 testcases/Controllers/ONOS/Teston/Readme.txt create mode 100644 testcases/Controllers/ONOS/Teston/__init__.py create mode 100644 testcases/Controllers/ONOS/Teston/adapters/__init__.py create mode 100644 testcases/Controllers/ONOS/Teston/adapters/client.py create mode 100644 testcases/Controllers/ONOS/Teston/adapters/connection.py create mode 100644 testcases/Controllers/ONOS/Teston/adapters/environment.py create mode 100644 testcases/Controllers/ONOS/Teston/adapters/foundation.py create mode 100644 testcases/Controllers/ONOS/Teston/dependencies/onos create mode 100644 testcases/Controllers/ONOS/Teston/log/gitignore create mode 100644 testcases/Controllers/ONOS/Teston/onosfunctest.py create mode 100755 testcases/OpenStack/healthcheck/healthcheck.sh create mode 100644 testcases/OpenStack/rally/macro/macro.yaml create mode 100755 testcases/OpenStack/rally/run_rally-cert.py create mode 100644 testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-cinder.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-glance.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-heat.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-keystone.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-neutron.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-nova.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-quotas.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-requests.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-smoke.yaml create mode 100644 testcases/OpenStack/rally/scenario/opnfv-vm.yaml create mode 100644 testcases/OpenStack/rally/scenario/support/instance_dd_test.sh create mode 100644 testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/default.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template create mode 100644 testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template create mode 100644 testcases/OpenStack/rally/task.yaml create mode 100644 testcases/OpenStack/tempest/custom_tests/defcore_req.txt create mode 100644 testcases/OpenStack/tempest/run_tempest.py create mode 100644 testcases/OpenStack/vPing/ping.sh create mode 100644 testcases/OpenStack/vPing/vPing_ssh.py create mode 100644 testcases/OpenStack/vPing/vPing_userdata.py delete mode 100644 testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt delete mode 100755 testcases/VIM/OpenStack/CI/libraries/healthcheck.sh delete mode 100755 testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py delete mode 100644 testcases/VIM/OpenStack/CI/libraries/run_tempest.py delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/task.yaml delete mode 100644 testcases/vIMS/CI/clearwater.py delete mode 100755 testcases/vIMS/CI/create_venv.sh delete mode 100644 testcases/vIMS/CI/orchestrator.py delete mode 100644 testcases/vIMS/CI/requirements.pip delete mode 100644 testcases/vIMS/CI/vIMS.py create mode 100644 testcases/vIMS/clearwater.py create mode 100755 testcases/vIMS/create_venv.sh create mode 100644 testcases/vIMS/orchestrator.py create mode 100644 testcases/vIMS/requirements.pip create mode 100644 testcases/vIMS/vIMS.py delete mode 100644 testcases/vPing/CI/libraries/ping.sh delete mode 100644 testcases/vPing/CI/libraries/vPing_ssh.py delete mode 100644 testcases/vPing/CI/libraries/vPing_userdata.py diff --git a/ci/config_functest.yaml b/ci/config_functest.yaml index 29a74afd5..ab620f023 100644 --- a/ci/config_functest.yaml +++ b/ci/config_functest.yaml @@ -1,13 +1,12 @@ general: directories: # Relative to the path where the repo is cloned: - dir_vping: testcases/vPing/CI/libraries/ - dir_odl: testcases/Controllers/ODL/CI/ - dir_rally: testcases/VIM/OpenStack/CI/libraries/ - dir_rally_scn: testcases/VIM/OpenStack/CI/rally_cert/ - dir_tempest_cases: testcases/VIM/OpenStack/CI/custom_tests/ - dir_vIMS: testcases/vIMS/CI/ - dir_onos: testcases/Controllers/ONOS/Teston/CI/ + dir_vping: testcases/OpenStack/vPing/ + dir_odl: testcases/Controllers/ODL/ + dir_rally: testcases/OpenStack/rally/ + dir_tempest_cases: testcases/OpenStack/tempest/custom_tests/ + dir_vIMS: testcases/vIMS/ + dir_onos: testcases/Controllers/ONOS/Teston/ # Absolute path dir_repos: /home/opnfv/repos diff --git a/ci/exec_test.sh b/ci/exec_test.sh index 013b98c2e..61d00c744 100755 --- a/ci/exec_test.sh +++ b/ci/exec_test.sh @@ -69,61 +69,57 @@ function run_test(){ case $test_name in "healthcheck") - ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh + ${FUNCTEST_REPO_DIR}/testcases/OpenStack/healthcheck/healthcheck.sh ;; "vping_ssh") - python ${FUNCTEST_REPO_DIR}/testcases/vPing/CI/libraries/vPing_ssh.py \ - $debug $report + python ${FUNCTEST_REPO_DIR}/testcases/vPing/vPing_ssh.py $report ;; "vping_userdata") - python ${FUNCTEST_REPO_DIR}/testcases/vPing/CI/libraries/vPing_userdata.py \ - $debug $report + python ${FUNCTEST_REPO_DIR}/testcases/vPing/vPing_userdata.py $report ;; "odl") odl_tests ODL_PORT=$odl_port ODL_IP=$odl_ip KEYSTONE_IP=$keystone_ip NEUTRON_IP=$neutron_ip USR_NAME=${OS_USERNAME} PASS=${OS_PASSWORD} \ - ${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/CI/start_tests.sh + ${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/start_tests.sh # push results to the DB in case of CI if [[ -n "$DEPLOY_SCENARIO" && "$DEPLOY_SCENARIO" != "none" ]]; then odl_logs="/home/opnfv/functest/results/odl/logs/2" - odl_path="${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/CI" + odl_path="${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/" node_name=$(env | grep NODE_NAME | cut -f2 -d'=') python ${odl_path}/odlreport2db.py -x ${odl_logs}/output.xml -i ${INSTALLER_TYPE} -p ${node_name} -s ${DEPLOY_SCENARIO} fi ;; "tempest_smoke_serial") - python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/run_tempest.py \ - $debug $clean_flag -s -m smoke $report + python ${FUNCTEST_REPO_DIR}/testcases/OpenStack/tempest/run_tempest.py \ + $clean_flag -s -m smoke $report ;; "tempest_full_parallel") - python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/run_tempest.py \ - $debug $serial_flag $clean_flag -m full $report + python ${FUNCTEST_REPO_DIR}/testcases/OpenStack/tempest/run_tempest.py \ + $serial_flag $clean_flag -m full $report ;; "vims") - python ${FUNCTEST_REPO_DIR}/testcases/vIMS/CI/vIMS.py \ - $debug $clean_flag $report + python ${FUNCTEST_REPO_DIR}/testcases/vIMS/vIMS.py $clean_flag $report ;; "rally_full") - python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py \ - $debug $clean_flag all $report + python ${FUNCTEST_REPO_DIR}/testcases/OpenStack/rally/run_rally-cert.py $clean_flag all $report ;; "rally_sanity") - python ${FUNCTEST_REPO_DIR}/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py \ - $debug $clean_flag --sanity all $report + python ${FUNCTEST_REPO_DIR}/testcases/OpenStack/rally/run_rally-cert.py \ + $clean_flag --sanity all $report ;; "bgpvpn") python ${FUNCTEST_REPO_DIR}/testcases/features/bgpvpn.py ;; "onos") if [ "$INSTALLER_TYPE" == "joid" ]; then - python ${FUNCTEST_REPO_DIR}/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py -i joid + python ${FUNCTEST_REPO_DIR}/testcases/Controllers/ONOS/Teston/onosfunctest.py -i joid else - python ${FUNCTEST_REPO_DIR}/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py + python ${FUNCTEST_REPO_DIR}/testcases/Controllers/ONOS/Teston/onosfunctest.py fi ;; "promise") - python ${FUNCTEST_REPO_DIR}/testcases/features/promise.py $debug $report + python ${FUNCTEST_REPO_DIR}/testcases/features/promise.py $report sleep 10 # to let the instances terminate ;; "doctor") diff --git a/ci/tier_handler.py b/ci/tier_handler.py index 2fc7a9c4d..03db4a91d 100644 --- a/ci/tier_handler.py +++ b/ci/tier_handler.py @@ -74,7 +74,7 @@ class Tier: return self.ci_loop def __str__(self): - lines = split_text(self.description, LINE_LENGTH-6) + lines = split_text(self.description, LINE_LENGTH - 6) out = "" out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2))) @@ -120,7 +120,7 @@ class TestCase: return self.name def __str__(self): - lines = split_text(self.description, LINE_LENGTH-6) + lines = split_text(self.description, LINE_LENGTH - 6) out = "" out += ("+%s+\n" % ("=" * (LINE_LENGTH - 2))) diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/010__networks.robot b/testcases/Controllers/ODL/CI/custom_tests/neutron/010__networks.robot deleted file mode 100644 index 583a6a153..000000000 --- a/testcases/Controllers/ODL/CI/custom_tests/neutron/010__networks.robot +++ /dev/null @@ -1,56 +0,0 @@ -*** Settings *** -Documentation Checking Network created in OpenStack are pushed to OpenDaylight -Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} -Suite Teardown Delete All Sessions -Library SSHLibrary -Library Collections -Library OperatingSystem -Library RequestsLibrary -Library ../../../libraries/Common.py -Variables ../../../variables/Variables.py - -*** Variables *** -${ODLREST} /controller/nb/v2/neutron/networks -${OSREST} /v2.0/networks -${postNet} {"network":{"name":"odl_network","admin_state_up":true}} - -*** Test Cases *** -Check OpenStack Networks - [Documentation] Checking OpenStack Neutron for known networks - [Tags] Network Neutron OpenStack - Log ${X-AUTH} - ${resp} get OSSession ${OSREST} - Should be Equal As Strings ${resp.status_code} 200 - ${OSResult} To Json ${resp.content} - Set Suite Variable ${OSResult} - Log ${OSResult} - -Check OpenDaylight Networks - [Documentation] Checking OpenDaylight Neutron API for Known Networks - [Tags] Network Neutron OpenDaylight - Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} - ${resp} get ODLSession ${ODLREST} - Should be Equal As Strings ${resp.status_code} 200 - ${ODLResult} To Json ${resp.content} - Set Suite Variable ${ODLResult} - Log ${ODLResult} - -Create Network - [Documentation] Create new network in OpenStack - [Tags] Create Network OpenStack Neutron - Log ${postNet} - ${resp} post OSSession ${OSREST} data=${postNet} - Should be Equal As Strings ${resp.status_code} 201 - ${result} To JSON ${resp.content} - ${result} Get From Dictionary ${result} network - ${NETID} Get From Dictionary ${result} id - Log ${result} - Log ${NETID} - Set Global Variable ${NETID} - sleep 2 - -Check Network - [Documentation] Check Network created in OpenDaylight - [Tags] Check Network OpenDaylight - ${resp} get ODLSession ${ODLREST}/${NetID} - Should be Equal As Strings ${resp.status_code} 200 diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/020__subnets.robot b/testcases/Controllers/ODL/CI/custom_tests/neutron/020__subnets.robot deleted file mode 100644 index 5f5b82440..000000000 --- a/testcases/Controllers/ODL/CI/custom_tests/neutron/020__subnets.robot +++ /dev/null @@ -1,56 +0,0 @@ -*** Settings *** -Documentation Checking Subnets created in OpenStack are pushed to OpenDaylight -Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} -Suite Teardown Delete All Sessions -Library SSHLibrary -Library Collections -Library OperatingSystem -Library RequestsLibrary -Library ../../../libraries/Common.py -Variables ../../../variables/Variables.py - -*** Variables *** -${ODLREST} /controller/nb/v2/neutron/subnets -${OSREST} /v2.0/subnets -${data} {"subnet":{"network_id":"${NETID}","ip_version":4,"cidr":"172.16.64.0/24","allocation_pools":[{"start":"172.16.64.20","end":"172.16.64.120"}]}} - -*** Test Cases *** -Check OpenStack Subnets - [Documentation] Checking OpenStack Neutron for known Subnets - [Tags] Subnets Neutron OpenStack - Log ${X-AUTH} - ${resp} get OSSession ${OSREST} - Should be Equal As Strings ${resp.status_code} 200 - ${OSResult} To Json ${resp.content} - Set Suite Variable ${OSResult} - Log ${OSResult} - -Check OpenDaylight subnets - [Documentation] Checking OpenDaylight Neutron API for Known Subnets - [Tags] Subnets Neutron OpenDaylight - Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} - ${resp} get ODLSession ${ODLREST} - Should be Equal As Strings ${resp.status_code} 200 - ${ODLResult} To Json ${resp.content} - Set Suite Variable ${ODLResult} - Log ${ODLResult} - -Create New subnet - [Documentation] Create new subnet in OpenStack - [Tags] Create Subnet OpenStack Neutron - Log ${data} - ${resp} post OSSession ${OSREST} data=${data} - Should be Equal As Strings ${resp.status_code} 201 - ${result} To JSON ${resp.content} - ${result} Get From Dictionary ${result} subnet - ${SUBNETID} Get From Dictionary ${result} id - Log ${result} - Log ${SUBNETID} - Set Global Variable ${SUBNETID} - sleep 2 - -Check New subnet - [Documentation] Check new subnet created in OpenDaylight - [Tags] Check subnet OpenDaylight - ${resp} get ODLSession ${ODLREST}/${SUBNETID} - Should be Equal As Strings ${resp.status_code} 200 diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/030__ports.robot b/testcases/Controllers/ODL/CI/custom_tests/neutron/030__ports.robot deleted file mode 100644 index 8b75733b7..000000000 --- a/testcases/Controllers/ODL/CI/custom_tests/neutron/030__ports.robot +++ /dev/null @@ -1,56 +0,0 @@ -*** Settings *** -Documentation Checking Port created in OpenStack are pushed to OpenDaylight -Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} -Suite Teardown Delete All Sessions -Library SSHLibrary -Library Collections -Library OperatingSystem -Library RequestsLibrary -Library ../../../libraries/Common.py -Variables ../../../variables/Variables.py - -*** Variables *** -${ODLREST} /controller/nb/v2/neutron/ports -${OSREST} /v2.0/ports -${data} {"port":{"network_id":"${NETID}","admin_state_up": true}} - -*** Test Cases *** -Check OpenStack ports - [Documentation] Checking OpenStack Neutron for known ports - [Tags] Ports Neutron OpenStack - Log ${X-AUTH} - ${resp} get OSSession ${OSREST} - Should be Equal As Strings ${resp.status_code} 200 - ${OSResult} To Json ${resp.content} - Set Suite Variable ${OSResult} - Log ${OSResult} - -Check OpenDaylight ports - [Documentation] Checking OpenDaylight Neutron API for Known Ports - [Tags] Ports Neutron OpenDaylight - Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} - ${resp} get ODLSession ${ODLREST} - Should be Equal As Strings ${resp.status_code} 200 - ${ODLResult} To Json ${resp.content} - Set Suite Variable ${ODLResult} - Log ${ODLResult} - -Create New Port - [Documentation] Create new port in OpenStack - [Tags] Create port OpenStack Neutron - Log ${data} - ${resp} post OSSession ${OSREST} data=${data} - Should be Equal As Strings ${resp.status_code} 201 - ${result} To JSON ${resp.content} - ${result} Get From Dictionary ${result} port - ${PORTID} Get From Dictionary ${result} id - Log ${result} - Log ${PORTID} - Set Global Variable ${PORTID} - sleep 2 - -Check New Port - [Documentation] Check new subnet created in OpenDaylight - [Tags] Check subnet OpenDaylight - ${resp} get ODLSession ${ODLREST}/${PORTID} - Should be Equal As Strings ${resp.status_code} 200 diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt b/testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt deleted file mode 100644 index f10298839..000000000 --- a/testcases/Controllers/ODL/CI/custom_tests/neutron/040__delete_ports.txt +++ /dev/null @@ -1,37 +0,0 @@ -*** Settings *** -Documentation Checking Port deleted in OpenStack are deleted also in OpenDaylight -Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} -Suite Teardown Delete All Sessions -Library SSHLibrary -Library Collections -Library OperatingSystem -Library ../../../libraries/RequestsLibrary.py -Library ../../../libraries/Common.py -Variables ../../../variables/Variables.py - -*** Variables *** -${ODLREST} /controller/nb/v2/neutron/ports -${OSREST} /v2.0/ports/${PORTID} -${data} {"port":{"network_id":"${NETID}","admin_state_up": true}} - -*** Test Cases *** -Delete New Port - [Documentation] Delete previously created port in OpenStack - [Tags] Delete port OpenStack Neutron - Log ${data} - ${resp} delete OSSession ${OSREST} - Should be Equal As Strings ${resp.status_code} 204 - Log ${resp.content} - sleep 2 - -Check Port Deleted - [Documentation] Check port deleted in OpenDaylight - [Tags] Check port deleted OpenDaylight - Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} - ${resp} get ODLSession ${ODLREST} - Should be Equal As Strings ${resp.status_code} 200 - ${ODLResult} To Json ${resp.content} - Set Suite Variable ${ODLResult} - Log ${ODLResult} - ${resp} get ODLSession ${ODLREST}/${PORTID} - Should be Equal As Strings ${resp.status_code} 404 diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt b/testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt deleted file mode 100644 index fb619825b..000000000 --- a/testcases/Controllers/ODL/CI/custom_tests/neutron/050__delete_subnets.txt +++ /dev/null @@ -1,37 +0,0 @@ -*** Settings *** -Documentation Checking Subnets deleted in OpenStack are deleted also in OpenDaylight -Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} -Suite Teardown Delete All Sessions -Library SSHLibrary -Library Collections -Library OperatingSystem -Library ../../../libraries/RequestsLibrary.py -Library ../../../libraries/Common.py -Variables ../../../variables/Variables.py - -*** Variables *** -${ODLREST} /controller/nb/v2/neutron/subnets -${OSREST} /v2.0/subnets/${SUBNETID} -${data} {"subnet":{"network_id":"${NETID}","ip_version":4,"cidr":"172.16.64.0/24","allocation_pools":[{"start":"172.16.64.20","end":"172.16.64.120"}]}} - -*** Test Cases *** -Delete New subnet - [Documentation] Delete previously created subnet in OpenStack - [Tags] Delete Subnet OpenStack Neutron - Log ${data} - ${resp} delete OSSession ${OSREST} - Should be Equal As Strings ${resp.status_code} 204 - Log ${resp.content} - sleep 2 - -Check New subnet deleted - [Documentation] Check subnet deleted in OpenDaylight - [Tags] Check subnet deleted OpenDaylight - Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} - ${resp} get ODLSession ${ODLREST} - Should be Equal As Strings ${resp.status_code} 200 - ${ODLResult} To Json ${resp.content} - Set Suite Variable ${ODLResult} - Log ${ODLResult} - ${resp} get ODLSession ${ODLREST}/${SUBNETID} - Should be Equal As Strings ${resp.status_code} 404 diff --git a/testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt b/testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt deleted file mode 100644 index 528fbfca2..000000000 --- a/testcases/Controllers/ODL/CI/custom_tests/neutron/060__delete_networks.txt +++ /dev/null @@ -1,37 +0,0 @@ -*** Settings *** -Documentation Checking Network deleted in OpenStack are deleted also in OpenDaylight -Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} -Suite Teardown Delete All Sessions -Library SSHLibrary -Library Collections -Library OperatingSystem -Library ../../../libraries/RequestsLibrary.py -Library ../../../libraries/Common.py -Variables ../../../variables/Variables.py - -*** Variables *** -${ODLREST} /controller/nb/v2/neutron/networks -${OSREST} /v2.0/networks/${NETID} -${postNet} {"network":{"name":"odl_network","admin_state_up":true}} - -*** Test Cases *** -Delete Network - [Documentation] Delete network in OpenStack - [Tags] Delete Network OpenStack Neutron - Log ${postNet} - ${resp} delete OSSession ${OSREST} - Should be Equal As Strings ${resp.status_code} 204 - Log ${resp.content} - sleep 2 - -Check Network deleted - [Documentation] Check Network deleted in OpenDaylight - [Tags] Check Network OpenDaylight - Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} - ${resp} get ODLSession ${ODLREST} - Should be Equal As Strings ${resp.status_code} 200 - ${ODLResult} To Json ${resp.content} - Set Suite Variable ${ODLResult} - Log ${ODLResult} - ${resp} get ODLSession ${ODLREST}/${NetID} - Should be Equal As Strings ${resp.status_code} 404 diff --git a/testcases/Controllers/ODL/CI/odlreport2db.py b/testcases/Controllers/ODL/CI/odlreport2db.py deleted file mode 100644 index 50c8b096e..000000000 --- a/testcases/Controllers/ODL/CI/odlreport2db.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/python -# -# Authors: -# - peter.bandzi@cisco.com -# - morgan.richomme@orange.com -# -# src: Peter Bandzi -# https://github.com/pbandzi/parse-robot/blob/master/convert_robot_to_json.py -# -# Copyright (c) 2015 All rights reserved -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# 0.1: This script boots the VM1 and allocates IP address from Nova -# Later, the VM2 boots then execute cloud-init to ping VM1. -# After successful ping, both the VMs are deleted. -# 0.2: measure test duration and publish results under json format -# -# - -import getopt -import json -import os -import sys -import xmltodict -import yaml - -import functest.utils.functest_utils as functest_utils - - -def usage(): - print """Usage: - get-json-from-robot.py --xml= --pod= - --installer= --database= - --scenaro=SCENARIO - -x, --xml xml file generated by robot test - -p, --pod POD name where the test come from - -i, --installer - -s, --scenario - -h, --help this message - """ - sys.exit(2) - - -def populate_detail(test): - detail = {} - detail['test_name'] = test['@name'] - detail['test_status'] = test['status'] - detail['test_doc'] = test['doc'] - return detail - - -def parse_test(tests, details): - try: - for test in tests: - details.append(populate_detail(test)) - except TypeError: - # tests is not iterable - details.append(populate_detail(tests)) - return details - - -def parse_suites(suites): - data = {} - details = [] - try: - for suite in suites: - data['details'] = parse_test(suite['test'], details) - except TypeError: - # suites is not iterable - data['details'] = parse_test(suites['test'], details) - return data - - -def main(argv): - try: - opts, args = getopt.getopt(argv, - 'x:p:i:s:h', - ['xml=', 'pod=', - 'installer=', - 'scenario=', - 'help']) - except getopt.GetoptError: - usage() - - for opt, arg in opts: - if opt in ('-h', '--help'): - usage() - elif opt in ('-x', '--xml'): - xml_file = arg - elif opt in ('-p', '--pod'): - pod = arg - elif opt in ('-i', '--installer'): - installer = arg - elif opt in ('-s', '--scenario'): - scenario = arg - else: - usage() - - with open(xml_file, "r") as myfile: - xml_input = myfile.read().replace('\n', '') - - # dictionary populated with data from xml file - all_data = xmltodict.parse(xml_input)['robot'] - - data = parse_suites(all_data['suite']['suite']) - data['description'] = all_data['suite']['@name'] - data['version'] = all_data['@generator'] - data['test_project'] = "functest" - data['case_name'] = "ODL" - data['pod_name'] = pod - data['installer'] = installer - - json.dumps(data, indent=4, separators=(',', ': ')) - - # Only used from container, we can set up absolute path - with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) - f.close() - - database = functest_yaml.get("results").get("test_db_url") - build_tag = functest_utils.get_build_tag() - - try: - # example: - # python odlreport2db.py -x ~/Pictures/Perso/odl/output3.xml - # -i fuel - # -p opnfv-jump-2 - # -s os-odl_l2-ha - version = functest_utils.get_version() - - # success criteria for ODL = 100% of tests OK - status = "failed" - try: - tests_passed = 0 - tests_failed = 0 - for v in data['details']: - if v['test_status']['@status'] == "PASS": - tests_passed += 1 - else: - tests_failed += 1 - - if (tests_failed < 1): - status = "passed" - except: - print("Unable to set criteria" % sys.exc_info()[0]) - functest_utils.push_results_to_db(database, - "functest", - data['case_name'], - None, - data['pod_name'], - version, - scenario, - status, - build_tag, - data) - except: - print("Error pushing results into Database '%s'" % sys.exc_info()[0]) - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/testcases/Controllers/ODL/CI/start_tests.sh b/testcases/Controllers/ODL/CI/start_tests.sh deleted file mode 100755 index 3800b6307..000000000 --- a/testcases/Controllers/ODL/CI/start_tests.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -# it includes python2.7 virtual env with robot packages and git -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# - -BASEDIR=`dirname $0` -RESULTS_DIR='/home/opnfv/functest/results/odl/' -REPO_DIR='/home/opnfv/repos/odl_integration' -#TODO: read this form config_functest.yaml - -# Colors -green='\033[0;32m' -light_green='\033[1;32m' -red='\033[1;31m' -nc='\033[0m' # No Color - -usage="Script for starting ODL tests. Tests to be executed are specified in test_list.txt file. - -usage: -[var=value] bash $(basename "$0") [-h] - -where: - -h show this help text - var one of the following: ODL_IP, ODL_PORT, USR_NAME, PASS, NEUTRON_IP - value new value for var - -example: - ODL_IP=oscontro1 ODL_PORT=8080 bash $(basename "$0")" - -while getopts ':h' option; do - case "$option" in - h) echo "$usage" - exit - ;; - \?) printf "illegal option: -%s\n" "$OPTARG" >&2 - echo "$usage" >&2 - exit 1 - ;; - esac -done - -echo -e "${green}Current environment parameters for ODL suite.${nc}" -# Following vars might be also specified as CLI params -set -x -ODL_IP=${ODL_IP:-'192.168.1.5'} -ODL_PORT=${ODL_PORT:-8081} -USR_NAME=${USR_NAME:-'neutron'} -PASS=${PASS:-'octopus'} -NEUTRON_IP=${NEUTRON_IP:-192.168.0.68} -KEYSTONE_IP=${KEYSTONE_IP:-192.168.0.69} -set +x - -init_file=${REPO_DIR}/test/csit/suites/openstack/neutron/__init__.robot -# Change openstack password for admin tenant in neutron suite -sed -i "s/\"password\": \".*\"/\"password\": \"${PASS}\"/" $init_file - -# Add Start Suite and Teardown Suite -if [[ ! `grep 'Suite Teardown' ${init_file}` ]]; then - sed -i "/^Documentation.*/a Suite Teardown Stop Suite" $init_file - sed -i "/^Documentation.*/a Suite Setup Start Suite" $init_file -fi - -# add custom tests to suite, if there are more custom tests needed this will be reworked -echo -e "${green}Copy custom tests to suite.${nc}" -cp -vf ${BASEDIR}/custom_tests/neutron/* ${REPO_DIR}/test/csit/suites/openstack/neutron/ - -# List of tests are specified in test_list.txt -# those are relative paths to test directories from integartion suite -echo -e "${green}Executing chosen tests.${nc}" -test_num=0 -while read line -do - # skip comments - [[ ${line:0:1} == "#" ]] && continue - # skip empty lines - [[ -z "${line}" ]] && continue - - ((test_num++)) - echo -e "${light_green}Starting test: $line ${nc}" - pybot -v OPENSTACK:${KEYSTONE_IP} -v NEUTRON:${NEUTRON_IP} -v PORT:${ODL_PORT} -v CONTROLLER:${ODL_IP} ${REPO_DIR}/$line - mkdir -p $RESULTS_DIR/logs/${test_num} - mv log.html $RESULTS_DIR/logs/${test_num}/ - mv report.html $RESULTS_DIR/logs/${test_num}/ - mv output.xml $RESULTS_DIR/logs/${test_num}/ -done < ${BASEDIR}/test_list.txt - -# create final report which includes all partial test reports -for i in $(seq $test_num); do - rebot_params="$rebot_params $RESULTS_DIR/logs/$i/output.xml" -done - -echo -e "${green}Final report is located:${nc}" -rebot $rebot_params diff --git a/testcases/Controllers/ODL/CI/test_list.txt b/testcases/Controllers/ODL/CI/test_list.txt deleted file mode 100644 index ad791e553..000000000 --- a/testcases/Controllers/ODL/CI/test_list.txt +++ /dev/null @@ -1,5 +0,0 @@ -# List of tests` which will be executed by script start_test.sh -# You can specify path to specific robot test file or directory (in that case all tests from directory will be executed) - -test/csit/suites/integration/basic/ -test/csit/suites/openstack/neutron/ diff --git a/testcases/Controllers/ODL/custom_tests/neutron/010__networks.robot b/testcases/Controllers/ODL/custom_tests/neutron/010__networks.robot new file mode 100644 index 000000000..583a6a153 --- /dev/null +++ b/testcases/Controllers/ODL/custom_tests/neutron/010__networks.robot @@ -0,0 +1,56 @@ +*** Settings *** +Documentation Checking Network created in OpenStack are pushed to OpenDaylight +Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} +Suite Teardown Delete All Sessions +Library SSHLibrary +Library Collections +Library OperatingSystem +Library RequestsLibrary +Library ../../../libraries/Common.py +Variables ../../../variables/Variables.py + +*** Variables *** +${ODLREST} /controller/nb/v2/neutron/networks +${OSREST} /v2.0/networks +${postNet} {"network":{"name":"odl_network","admin_state_up":true}} + +*** Test Cases *** +Check OpenStack Networks + [Documentation] Checking OpenStack Neutron for known networks + [Tags] Network Neutron OpenStack + Log ${X-AUTH} + ${resp} get OSSession ${OSREST} + Should be Equal As Strings ${resp.status_code} 200 + ${OSResult} To Json ${resp.content} + Set Suite Variable ${OSResult} + Log ${OSResult} + +Check OpenDaylight Networks + [Documentation] Checking OpenDaylight Neutron API for Known Networks + [Tags] Network Neutron OpenDaylight + Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} + ${resp} get ODLSession ${ODLREST} + Should be Equal As Strings ${resp.status_code} 200 + ${ODLResult} To Json ${resp.content} + Set Suite Variable ${ODLResult} + Log ${ODLResult} + +Create Network + [Documentation] Create new network in OpenStack + [Tags] Create Network OpenStack Neutron + Log ${postNet} + ${resp} post OSSession ${OSREST} data=${postNet} + Should be Equal As Strings ${resp.status_code} 201 + ${result} To JSON ${resp.content} + ${result} Get From Dictionary ${result} network + ${NETID} Get From Dictionary ${result} id + Log ${result} + Log ${NETID} + Set Global Variable ${NETID} + sleep 2 + +Check Network + [Documentation] Check Network created in OpenDaylight + [Tags] Check Network OpenDaylight + ${resp} get ODLSession ${ODLREST}/${NetID} + Should be Equal As Strings ${resp.status_code} 200 diff --git a/testcases/Controllers/ODL/custom_tests/neutron/020__subnets.robot b/testcases/Controllers/ODL/custom_tests/neutron/020__subnets.robot new file mode 100644 index 000000000..5f5b82440 --- /dev/null +++ b/testcases/Controllers/ODL/custom_tests/neutron/020__subnets.robot @@ -0,0 +1,56 @@ +*** Settings *** +Documentation Checking Subnets created in OpenStack are pushed to OpenDaylight +Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} +Suite Teardown Delete All Sessions +Library SSHLibrary +Library Collections +Library OperatingSystem +Library RequestsLibrary +Library ../../../libraries/Common.py +Variables ../../../variables/Variables.py + +*** Variables *** +${ODLREST} /controller/nb/v2/neutron/subnets +${OSREST} /v2.0/subnets +${data} {"subnet":{"network_id":"${NETID}","ip_version":4,"cidr":"172.16.64.0/24","allocation_pools":[{"start":"172.16.64.20","end":"172.16.64.120"}]}} + +*** Test Cases *** +Check OpenStack Subnets + [Documentation] Checking OpenStack Neutron for known Subnets + [Tags] Subnets Neutron OpenStack + Log ${X-AUTH} + ${resp} get OSSession ${OSREST} + Should be Equal As Strings ${resp.status_code} 200 + ${OSResult} To Json ${resp.content} + Set Suite Variable ${OSResult} + Log ${OSResult} + +Check OpenDaylight subnets + [Documentation] Checking OpenDaylight Neutron API for Known Subnets + [Tags] Subnets Neutron OpenDaylight + Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} + ${resp} get ODLSession ${ODLREST} + Should be Equal As Strings ${resp.status_code} 200 + ${ODLResult} To Json ${resp.content} + Set Suite Variable ${ODLResult} + Log ${ODLResult} + +Create New subnet + [Documentation] Create new subnet in OpenStack + [Tags] Create Subnet OpenStack Neutron + Log ${data} + ${resp} post OSSession ${OSREST} data=${data} + Should be Equal As Strings ${resp.status_code} 201 + ${result} To JSON ${resp.content} + ${result} Get From Dictionary ${result} subnet + ${SUBNETID} Get From Dictionary ${result} id + Log ${result} + Log ${SUBNETID} + Set Global Variable ${SUBNETID} + sleep 2 + +Check New subnet + [Documentation] Check new subnet created in OpenDaylight + [Tags] Check subnet OpenDaylight + ${resp} get ODLSession ${ODLREST}/${SUBNETID} + Should be Equal As Strings ${resp.status_code} 200 diff --git a/testcases/Controllers/ODL/custom_tests/neutron/030__ports.robot b/testcases/Controllers/ODL/custom_tests/neutron/030__ports.robot new file mode 100644 index 000000000..8b75733b7 --- /dev/null +++ b/testcases/Controllers/ODL/custom_tests/neutron/030__ports.robot @@ -0,0 +1,56 @@ +*** Settings *** +Documentation Checking Port created in OpenStack are pushed to OpenDaylight +Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} +Suite Teardown Delete All Sessions +Library SSHLibrary +Library Collections +Library OperatingSystem +Library RequestsLibrary +Library ../../../libraries/Common.py +Variables ../../../variables/Variables.py + +*** Variables *** +${ODLREST} /controller/nb/v2/neutron/ports +${OSREST} /v2.0/ports +${data} {"port":{"network_id":"${NETID}","admin_state_up": true}} + +*** Test Cases *** +Check OpenStack ports + [Documentation] Checking OpenStack Neutron for known ports + [Tags] Ports Neutron OpenStack + Log ${X-AUTH} + ${resp} get OSSession ${OSREST} + Should be Equal As Strings ${resp.status_code} 200 + ${OSResult} To Json ${resp.content} + Set Suite Variable ${OSResult} + Log ${OSResult} + +Check OpenDaylight ports + [Documentation] Checking OpenDaylight Neutron API for Known Ports + [Tags] Ports Neutron OpenDaylight + Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} + ${resp} get ODLSession ${ODLREST} + Should be Equal As Strings ${resp.status_code} 200 + ${ODLResult} To Json ${resp.content} + Set Suite Variable ${ODLResult} + Log ${ODLResult} + +Create New Port + [Documentation] Create new port in OpenStack + [Tags] Create port OpenStack Neutron + Log ${data} + ${resp} post OSSession ${OSREST} data=${data} + Should be Equal As Strings ${resp.status_code} 201 + ${result} To JSON ${resp.content} + ${result} Get From Dictionary ${result} port + ${PORTID} Get From Dictionary ${result} id + Log ${result} + Log ${PORTID} + Set Global Variable ${PORTID} + sleep 2 + +Check New Port + [Documentation] Check new subnet created in OpenDaylight + [Tags] Check subnet OpenDaylight + ${resp} get ODLSession ${ODLREST}/${PORTID} + Should be Equal As Strings ${resp.status_code} 200 diff --git a/testcases/Controllers/ODL/custom_tests/neutron/040__delete_ports.txt b/testcases/Controllers/ODL/custom_tests/neutron/040__delete_ports.txt new file mode 100644 index 000000000..f10298839 --- /dev/null +++ b/testcases/Controllers/ODL/custom_tests/neutron/040__delete_ports.txt @@ -0,0 +1,37 @@ +*** Settings *** +Documentation Checking Port deleted in OpenStack are deleted also in OpenDaylight +Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} +Suite Teardown Delete All Sessions +Library SSHLibrary +Library Collections +Library OperatingSystem +Library ../../../libraries/RequestsLibrary.py +Library ../../../libraries/Common.py +Variables ../../../variables/Variables.py + +*** Variables *** +${ODLREST} /controller/nb/v2/neutron/ports +${OSREST} /v2.0/ports/${PORTID} +${data} {"port":{"network_id":"${NETID}","admin_state_up": true}} + +*** Test Cases *** +Delete New Port + [Documentation] Delete previously created port in OpenStack + [Tags] Delete port OpenStack Neutron + Log ${data} + ${resp} delete OSSession ${OSREST} + Should be Equal As Strings ${resp.status_code} 204 + Log ${resp.content} + sleep 2 + +Check Port Deleted + [Documentation] Check port deleted in OpenDaylight + [Tags] Check port deleted OpenDaylight + Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} + ${resp} get ODLSession ${ODLREST} + Should be Equal As Strings ${resp.status_code} 200 + ${ODLResult} To Json ${resp.content} + Set Suite Variable ${ODLResult} + Log ${ODLResult} + ${resp} get ODLSession ${ODLREST}/${PORTID} + Should be Equal As Strings ${resp.status_code} 404 diff --git a/testcases/Controllers/ODL/custom_tests/neutron/050__delete_subnets.txt b/testcases/Controllers/ODL/custom_tests/neutron/050__delete_subnets.txt new file mode 100644 index 000000000..fb619825b --- /dev/null +++ b/testcases/Controllers/ODL/custom_tests/neutron/050__delete_subnets.txt @@ -0,0 +1,37 @@ +*** Settings *** +Documentation Checking Subnets deleted in OpenStack are deleted also in OpenDaylight +Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} +Suite Teardown Delete All Sessions +Library SSHLibrary +Library Collections +Library OperatingSystem +Library ../../../libraries/RequestsLibrary.py +Library ../../../libraries/Common.py +Variables ../../../variables/Variables.py + +*** Variables *** +${ODLREST} /controller/nb/v2/neutron/subnets +${OSREST} /v2.0/subnets/${SUBNETID} +${data} {"subnet":{"network_id":"${NETID}","ip_version":4,"cidr":"172.16.64.0/24","allocation_pools":[{"start":"172.16.64.20","end":"172.16.64.120"}]}} + +*** Test Cases *** +Delete New subnet + [Documentation] Delete previously created subnet in OpenStack + [Tags] Delete Subnet OpenStack Neutron + Log ${data} + ${resp} delete OSSession ${OSREST} + Should be Equal As Strings ${resp.status_code} 204 + Log ${resp.content} + sleep 2 + +Check New subnet deleted + [Documentation] Check subnet deleted in OpenDaylight + [Tags] Check subnet deleted OpenDaylight + Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} + ${resp} get ODLSession ${ODLREST} + Should be Equal As Strings ${resp.status_code} 200 + ${ODLResult} To Json ${resp.content} + Set Suite Variable ${ODLResult} + Log ${ODLResult} + ${resp} get ODLSession ${ODLREST}/${SUBNETID} + Should be Equal As Strings ${resp.status_code} 404 diff --git a/testcases/Controllers/ODL/custom_tests/neutron/060__delete_networks.txt b/testcases/Controllers/ODL/custom_tests/neutron/060__delete_networks.txt new file mode 100644 index 000000000..528fbfca2 --- /dev/null +++ b/testcases/Controllers/ODL/custom_tests/neutron/060__delete_networks.txt @@ -0,0 +1,37 @@ +*** Settings *** +Documentation Checking Network deleted in OpenStack are deleted also in OpenDaylight +Suite Setup Create Session OSSession http://${NEUTRON}:9696 headers=${X-AUTH} +Suite Teardown Delete All Sessions +Library SSHLibrary +Library Collections +Library OperatingSystem +Library ../../../libraries/RequestsLibrary.py +Library ../../../libraries/Common.py +Variables ../../../variables/Variables.py + +*** Variables *** +${ODLREST} /controller/nb/v2/neutron/networks +${OSREST} /v2.0/networks/${NETID} +${postNet} {"network":{"name":"odl_network","admin_state_up":true}} + +*** Test Cases *** +Delete Network + [Documentation] Delete network in OpenStack + [Tags] Delete Network OpenStack Neutron + Log ${postNet} + ${resp} delete OSSession ${OSREST} + Should be Equal As Strings ${resp.status_code} 204 + Log ${resp.content} + sleep 2 + +Check Network deleted + [Documentation] Check Network deleted in OpenDaylight + [Tags] Check Network OpenDaylight + Create Session ODLSession http://${CONTROLLER}:${PORT} headers=${HEADERS} auth=${AUTH} + ${resp} get ODLSession ${ODLREST} + Should be Equal As Strings ${resp.status_code} 200 + ${ODLResult} To Json ${resp.content} + Set Suite Variable ${ODLResult} + Log ${ODLResult} + ${resp} get ODLSession ${ODLREST}/${NetID} + Should be Equal As Strings ${resp.status_code} 404 diff --git a/testcases/Controllers/ODL/odlreport2db.py b/testcases/Controllers/ODL/odlreport2db.py new file mode 100644 index 000000000..50c8b096e --- /dev/null +++ b/testcases/Controllers/ODL/odlreport2db.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# +# Authors: +# - peter.bandzi@cisco.com +# - morgan.richomme@orange.com +# +# src: Peter Bandzi +# https://github.com/pbandzi/parse-robot/blob/master/convert_robot_to_json.py +# +# Copyright (c) 2015 All rights reserved +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# 0.1: This script boots the VM1 and allocates IP address from Nova +# Later, the VM2 boots then execute cloud-init to ping VM1. +# After successful ping, both the VMs are deleted. +# 0.2: measure test duration and publish results under json format +# +# + +import getopt +import json +import os +import sys +import xmltodict +import yaml + +import functest.utils.functest_utils as functest_utils + + +def usage(): + print """Usage: + get-json-from-robot.py --xml= --pod= + --installer= --database= + --scenaro=SCENARIO + -x, --xml xml file generated by robot test + -p, --pod POD name where the test come from + -i, --installer + -s, --scenario + -h, --help this message + """ + sys.exit(2) + + +def populate_detail(test): + detail = {} + detail['test_name'] = test['@name'] + detail['test_status'] = test['status'] + detail['test_doc'] = test['doc'] + return detail + + +def parse_test(tests, details): + try: + for test in tests: + details.append(populate_detail(test)) + except TypeError: + # tests is not iterable + details.append(populate_detail(tests)) + return details + + +def parse_suites(suites): + data = {} + details = [] + try: + for suite in suites: + data['details'] = parse_test(suite['test'], details) + except TypeError: + # suites is not iterable + data['details'] = parse_test(suites['test'], details) + return data + + +def main(argv): + try: + opts, args = getopt.getopt(argv, + 'x:p:i:s:h', + ['xml=', 'pod=', + 'installer=', + 'scenario=', + 'help']) + except getopt.GetoptError: + usage() + + for opt, arg in opts: + if opt in ('-h', '--help'): + usage() + elif opt in ('-x', '--xml'): + xml_file = arg + elif opt in ('-p', '--pod'): + pod = arg + elif opt in ('-i', '--installer'): + installer = arg + elif opt in ('-s', '--scenario'): + scenario = arg + else: + usage() + + with open(xml_file, "r") as myfile: + xml_input = myfile.read().replace('\n', '') + + # dictionary populated with data from xml file + all_data = xmltodict.parse(xml_input)['robot'] + + data = parse_suites(all_data['suite']['suite']) + data['description'] = all_data['suite']['@name'] + data['version'] = all_data['@generator'] + data['test_project'] = "functest" + data['case_name'] = "ODL" + data['pod_name'] = pod + data['installer'] = installer + + json.dumps(data, indent=4, separators=(',', ': ')) + + # Only used from container, we can set up absolute path + with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) + f.close() + + database = functest_yaml.get("results").get("test_db_url") + build_tag = functest_utils.get_build_tag() + + try: + # example: + # python odlreport2db.py -x ~/Pictures/Perso/odl/output3.xml + # -i fuel + # -p opnfv-jump-2 + # -s os-odl_l2-ha + version = functest_utils.get_version() + + # success criteria for ODL = 100% of tests OK + status = "failed" + try: + tests_passed = 0 + tests_failed = 0 + for v in data['details']: + if v['test_status']['@status'] == "PASS": + tests_passed += 1 + else: + tests_failed += 1 + + if (tests_failed < 1): + status = "passed" + except: + print("Unable to set criteria" % sys.exc_info()[0]) + functest_utils.push_results_to_db(database, + "functest", + data['case_name'], + None, + data['pod_name'], + version, + scenario, + status, + build_tag, + data) + except: + print("Error pushing results into Database '%s'" % sys.exc_info()[0]) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/testcases/Controllers/ODL/start_tests.sh b/testcases/Controllers/ODL/start_tests.sh new file mode 100755 index 000000000..3800b6307 --- /dev/null +++ b/testcases/Controllers/ODL/start_tests.sh @@ -0,0 +1,97 @@ +#!/bin/bash +# it includes python2.7 virtual env with robot packages and git +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# + +BASEDIR=`dirname $0` +RESULTS_DIR='/home/opnfv/functest/results/odl/' +REPO_DIR='/home/opnfv/repos/odl_integration' +#TODO: read this form config_functest.yaml + +# Colors +green='\033[0;32m' +light_green='\033[1;32m' +red='\033[1;31m' +nc='\033[0m' # No Color + +usage="Script for starting ODL tests. Tests to be executed are specified in test_list.txt file. + +usage: +[var=value] bash $(basename "$0") [-h] + +where: + -h show this help text + var one of the following: ODL_IP, ODL_PORT, USR_NAME, PASS, NEUTRON_IP + value new value for var + +example: + ODL_IP=oscontro1 ODL_PORT=8080 bash $(basename "$0")" + +while getopts ':h' option; do + case "$option" in + h) echo "$usage" + exit + ;; + \?) printf "illegal option: -%s\n" "$OPTARG" >&2 + echo "$usage" >&2 + exit 1 + ;; + esac +done + +echo -e "${green}Current environment parameters for ODL suite.${nc}" +# Following vars might be also specified as CLI params +set -x +ODL_IP=${ODL_IP:-'192.168.1.5'} +ODL_PORT=${ODL_PORT:-8081} +USR_NAME=${USR_NAME:-'neutron'} +PASS=${PASS:-'octopus'} +NEUTRON_IP=${NEUTRON_IP:-192.168.0.68} +KEYSTONE_IP=${KEYSTONE_IP:-192.168.0.69} +set +x + +init_file=${REPO_DIR}/test/csit/suites/openstack/neutron/__init__.robot +# Change openstack password for admin tenant in neutron suite +sed -i "s/\"password\": \".*\"/\"password\": \"${PASS}\"/" $init_file + +# Add Start Suite and Teardown Suite +if [[ ! `grep 'Suite Teardown' ${init_file}` ]]; then + sed -i "/^Documentation.*/a Suite Teardown Stop Suite" $init_file + sed -i "/^Documentation.*/a Suite Setup Start Suite" $init_file +fi + +# add custom tests to suite, if there are more custom tests needed this will be reworked +echo -e "${green}Copy custom tests to suite.${nc}" +cp -vf ${BASEDIR}/custom_tests/neutron/* ${REPO_DIR}/test/csit/suites/openstack/neutron/ + +# List of tests are specified in test_list.txt +# those are relative paths to test directories from integartion suite +echo -e "${green}Executing chosen tests.${nc}" +test_num=0 +while read line +do + # skip comments + [[ ${line:0:1} == "#" ]] && continue + # skip empty lines + [[ -z "${line}" ]] && continue + + ((test_num++)) + echo -e "${light_green}Starting test: $line ${nc}" + pybot -v OPENSTACK:${KEYSTONE_IP} -v NEUTRON:${NEUTRON_IP} -v PORT:${ODL_PORT} -v CONTROLLER:${ODL_IP} ${REPO_DIR}/$line + mkdir -p $RESULTS_DIR/logs/${test_num} + mv log.html $RESULTS_DIR/logs/${test_num}/ + mv report.html $RESULTS_DIR/logs/${test_num}/ + mv output.xml $RESULTS_DIR/logs/${test_num}/ +done < ${BASEDIR}/test_list.txt + +# create final report which includes all partial test reports +for i in $(seq $test_num); do + rebot_params="$rebot_params $RESULTS_DIR/logs/$i/output.xml" +done + +echo -e "${green}Final report is located:${nc}" +rebot $rebot_params diff --git a/testcases/Controllers/ODL/test_list.txt b/testcases/Controllers/ODL/test_list.txt new file mode 100644 index 000000000..ad791e553 --- /dev/null +++ b/testcases/Controllers/ODL/test_list.txt @@ -0,0 +1,5 @@ +# List of tests` which will be executed by script start_test.sh +# You can specify path to specific robot test file or directory (in that case all tests from directory will be executed) + +test/csit/suites/integration/basic/ +test/csit/suites/openstack/neutron/ diff --git a/testcases/Controllers/ONOS/Teston/CI/Readme.txt b/testcases/Controllers/ONOS/Teston/CI/Readme.txt deleted file mode 100644 index 7393f59a1..000000000 --- a/testcases/Controllers/ONOS/Teston/CI/Readme.txt +++ /dev/null @@ -1,5 +0,0 @@ -1.This is a basic test run about onos,we will make them better and better -2.This test include two suites: -(1)Test northbound(network/subnet/ports create/update/delete) -(2)Ovsdb test,default configuration,openflow connection,vm go onlines. -3.Later we will make a framework to do this test \ No newline at end of file diff --git a/testcases/Controllers/ONOS/Teston/CI/__init__.py b/testcases/Controllers/ONOS/Teston/CI/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/__init__.py b/testcases/Controllers/ONOS/Teston/CI/adapters/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/client.py b/testcases/Controllers/ONOS/Teston/CI/adapters/client.py deleted file mode 100644 index 77de092e4..000000000 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/client.py +++ /dev/null @@ -1,88 +0,0 @@ -""" -Description: - This file is used to run testcase - lanqinglong@huawei.com - -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -""" -from environment import environment -import time -import pexpect -import requests -import json - - -class client(environment): - - def __init__(self): - environment.__init__(self) - self.loginfo = environment() - self.testcase = '' - - def RunScript(self, handle, testname, timeout=300): - """ - Run ONOS Test Script - Parameters: - testname: ONOS Testcase Name - masterusername: The server username of running ONOS - masterpassword: The server password of running ONOS - """ - self.testcase = testname - self.ChangeTestCasePara(testname, self.masterusername, - self.masterpassword) - runhandle = handle - runtest = (self.home + "/OnosSystemTest/TestON/bin/cli.py run " + - testname) - runhandle.sendline(runtest) - circletime = 0 - lastshowscreeninfo = '' - while True: - Result = runhandle.expect(["PEXPECT]#", pexpect.EOF, - pexpect.TIMEOUT]) - curshowscreeninfo = runhandle.before - if(len(lastshowscreeninfo) != len(curshowscreeninfo)): - self.loginfo.log(str(curshowscreeninfo) - [len(lastshowscreeninfo)::]) - lastshowscreeninfo = curshowscreeninfo - if Result == 0: - print "Done!" - return - time.sleep(1) - circletime += 1 - if circletime > timeout: - break - self.loginfo.log("Timeout when running the test, please check!") - - def onosstart(self): - # This is the compass run machine user&pass,you need to modify - - print "Test Begin....." - self.OnosConnectionSet() - masterhandle = self.SSHlogin(self.localhost, self.masterusername, - self.masterpassword) - self.OnosEnvSetup(masterhandle) - return masterhandle - - def onosclean(self, handle): - self.SSHRelease(handle) - self.loginfo.log('Release onos handle Successful') - - def push_results_to_db(self, payload, pushornot=1): - if pushornot != 1: - return 1 - url = self.Result_DB + "/results" - params = {"project_name": "functest", "case_name": "ONOS-" + - self.testcase, "pod_name": 'huawei-build-2', - "details": payload} - - headers = {'Content-Type': 'application/json'} - try: - r = requests.post(url, data=json.dumps(params), headers=headers) - self.loginfo.log(r) - except: - self.loginfo.log('Error pushing results into Database') diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py b/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py deleted file mode 100644 index 16f2ef32c..000000000 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/connection.py +++ /dev/null @@ -1,196 +0,0 @@ -""" -Description: - This file is used to make connections - Include ssh & exchange public-key to each other so that - it can run without password - - lanqinglong@huawei.com - -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -""" -import os -import pexpect -import re -from foundation import foundation - - -class connection(foundation): - - def __init__(self): - foundation.__init__(self) - self.loginfo = foundation() - - def AddKnownHost(self, handle, ipaddr, username, password): - """ - Add an user to known host,so that onos can login in with onos $ipaddr. - parameters: - ipaddr: ip address - username: login user name - password: login password - """ - print("Now Adding an user to known hosts " + ipaddr) - login = handle - login.sendline("ssh -l %s -p 8101 %s" % (username, ipaddr)) - index = 0 - while index != 2: - index = login.expect(['assword:', 'yes/no', pexpect.EOF, - pexpect.TIMEOUT]) - if index == 0: - login.sendline(password) - login.sendline("logout") - index = login.expect(["closed", pexpect.EOF]) - if index == 0: - self.loginfo.log("Add SSH Known Host Success!") - break - else: - self.loginfo.log("Add SSH Known Host Failed! " - "Please Check!") - break - login.prompt() - - if index == 1: - login.sendline('yes') - - def GetEnvValue(self, handle, envname): - """ - os.getenv only returns current user value - GetEnvValue returns a environment value of - current handle - eg: GetEnvValue(handle,'HOME') - """ - envhandle = handle - envhandle.sendline('echo $' + envname) - envhandle.prompt() - reg = envname + '\r\n(.*)\r' - envaluereg = re.compile(reg) - envalue = envaluereg.search(envhandle.before) - if envalue: - return envalue.groups()[0] - else: - return None - - def Gensshkey(self, handle): - """ - Generate ssh keys, used for some server have no sshkey. - """ - print "Now Generating SSH keys..." - # Here file name may be id_rsa or id_ecdsa or others - # So here will have a judgement - keysub = handle - filepath = self.GetEnvValue(keysub, 'HOME') + '/.ssh' - filelist = os.listdir(filepath) - for item in filelist: - if 'id' in item: - self.loginfo.log("SSH keys are exsit in ssh directory.") - return True - keysub.sendline("ssh-keygen -t rsa") - Result = 0 - while Result != 2: - Result = keysub.expect(["Overwrite", "Enter", pexpect.EOF, - 'PEXPECT]#', pexpect.TIMEOUT]) - if Result == 0: - keysub.sendline("y") - if Result == 1 or Result == 2: - keysub.sendline("\n") - if Result == 3: - self.loginfo.log("Generate SSH key success.") - keysub.prompt() - break - if Result == 4: - self.loginfo.log("Generate SSH key failed.") - keysub.prompt() - break - - def GetRootAuth(self, password): - """ - Get root user - parameters: - password: root login password - """ - print("Now changing to user root") - login = pexpect.spawn("su - root") - index = 0 - while index != 2: - index = login.expect(['assword:', "failure", - pexpect.EOF, pexpect.TIMEOUT]) - if index == 0: - login.sendline(password) - if index == 1: - self.loginfo.log("Change user to root failed.") - - login.interact() - - def ReleaseRootAuth(self): - """ - Exit root user. - """ - print("Now Release user root") - login = pexpect.spawn("exit") - index = login.expect(['logout', pexpect.EOF, pexpect.TIMEOUT]) - if index == 0: - self.loginfo.log("Release root user success.") - if index == 1: - self.loginfo.log("Release root user failed.") - - login.interact() - - def AddEnvIntoBashrc(self, envalue): - """ - Add Env var into /etc/profile. - parameters: - envalue: environment value to add - """ - print "Now Adding bash environment" - fileopen = open("/etc/profile", 'r') - findContext = 1 - while findContext: - findContext = fileopen.readline() - result = findContext.find(envalue) - if result != -1: - break - fileopen.close - if result == -1: - envAdd = open("/etc/profile", 'a+') - envAdd.writelines("\n" + envalue) - envAdd.close() - self.loginfo.log("Add env to bashrc success!") - - def OnosRootPathChange(self, onospath): - """ - Change ONOS root path in file:bash_profile - onospath: path of onos root - """ - print "Now Changing ONOS Root Path" - filepath = onospath + 'onos/tools/dev/bash_profile' - line = open(filepath, 'r').readlines() - lenall = len(line) - 1 - for i in range(lenall): - if "export ONOS_ROOT" in line[i]: - line[i] = 'export ONOS_ROOT=' + onospath + 'onos\n' - NewFile = open(filepath, 'w') - NewFile.writelines(line) - NewFile.close - print "Done!" - - def OnosConnectionSet(self): - """ - Intergrate for ONOS connection setup - """ - if self.masterusername == 'root': - filepath = '/root/' - else: - filepath = '/home/' + self.masterusername + '/' - filepath = os.path.join(filepath, "onos/tools/dev/bash_profile") - self.AddEnvIntoBashrc("source " + filepath + "\n") - self.AddEnvIntoBashrc("export OCT=" + self.OCT) - self.AddEnvIntoBashrc("export OC1=" + self.OC1) - self.AddEnvIntoBashrc("export OC2=" + self.OC2) - self.AddEnvIntoBashrc("export OC3=" + self.OC3) - self.AddEnvIntoBashrc("export OCN=" + self.OCN) - self.AddEnvIntoBashrc("export OCN2=" + self.OCN2) - self.AddEnvIntoBashrc("export localhost=" + self.localhost) diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py b/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py deleted file mode 100644 index 49f7f9632..000000000 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/environment.py +++ /dev/null @@ -1,281 +0,0 @@ -""" -Description: - This file is used to setup the running environment - Include Download code,setup environment variable - Set onos running config - Set user name/password - Onos-push-keys and so on - lanqinglong@huawei.com - -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -""" - -import os -import time -import pexpect -import re -import sys -import pxssh -from connection import connection - - -class environment(connection): - - def __init__(self): - connection.__init__(self) - self.loginfo = connection() - self.masterhandle = '' - self.home = '' - - def DownLoadCode(self, handle, codeurl): - """ - Download Code use 'git clone' - parameters: - handle: current working handle - codeurl: clone code url - """ - print "Now loading test codes! Please wait in patient..." - originalfolder = sys.path[0] - print originalfolder - gitclone = handle - gitclone.sendline("git clone " + codeurl) - index = 0 - # increment = 0 - while index != 1 or index != 4: - index = gitclone.expect(['already exists', - 'esolving deltas: 100%', - 'eceiving objects', - 'Already up-to-date', - 'npacking objects: 100%', pexpect.EOF]) - - filefolder = self.home + '/' + codeurl.split('/')[-1].split('.')[0] - if index == 0: - os.chdir(filefolder) - os.system('git pull') - os.chdir(originalfolder) - self.loginfo.log('Download code success!') - break - elif index == 1 or index == 4: - self.loginfo.log('Download code success!') - gitclone.sendline("mkdir onos") - gitclone.prompt() - gitclone.sendline("cp -rf " + filefolder + "/tools onos/") - gitclone.prompt() - break - elif index == 2: - os.write(1, gitclone.before) - sys.stdout.flush() - else: - self.loginfo.log('Download code failed!') - self.loginfo.log('Information before' + gitclone.before) - break - gitclone.prompt() - - def InstallDefaultSoftware(self, handle): - """ - Install default software - parameters: - handle(input): current working handle - """ - print "Now Cleaning test environment" - handle.sendline("sudo apt-get install -y mininet") - handle.prompt() - handle.sendline("sudo pip install configobj") - handle.prompt() - handle.sendline("sudo apt-get install -y sshpass") - handle.prompt() - handle.sendline("OnosSystemTest/TestON/bin/cleanup.sh") - handle.prompt() - time.sleep(5) - self.loginfo.log('Clean environment success!') - - def OnosPushKeys(self, handle, cmd, password): - """ - Using onos-push-keys to make ssh device without password - parameters: - handle(input): working handle - cmd(input): onos-push-keys xxx(xxx is device) - password(input): login in password - """ - print "Now Pushing Onos Keys:" + cmd - Pushkeys = handle - Pushkeys.sendline(cmd) - Result = 0 - while Result != 2: - Result = Pushkeys.expect(["(yes/no)", "assword:", "PEXPECT]#", - pexpect.EOF, pexpect.TIMEOUT]) - if(Result == 0): - Pushkeys.sendline("yes") - if(Result == 1): - Pushkeys.sendline(password) - if(Result == 2): - self.loginfo.log("ONOS Push keys Success!") - break - if(Result == 3): - self.loginfo.log("ONOS Push keys Error!") - break - time.sleep(2) - Pushkeys.prompt() - print "Done!" - - def SetOnosEnvVar(self, handle, masterpass, agentpass): - """ - Setup onos pushkeys to all devices(3+2) - parameters: - handle(input): current working handle - masterpass: scripts running server's password - agentpass: onos cluster&compute node password - """ - print "Now Setting test environment" - for host in self.hosts: - print "try to connect " + str(host) - result = self.CheckSshNoPasswd(host) - if not result: - print ("ssh lgin failed,try to copy master publickey" + - "to agent " + str(host)) - self.CopyPublicKey(host) - self.OnosPushKeys(handle, "onos-push-keys " + self.OCT, masterpass) - self.OnosPushKeys(handle, "onos-push-keys " + self.OC1, agentpass) - self.OnosPushKeys(handle, "onos-push-keys " + self.OC2, agentpass) - self.OnosPushKeys(handle, "onos-push-keys " + self.OC3, agentpass) - self.OnosPushKeys(handle, "onos-push-keys " + self.OCN, agentpass) - self.OnosPushKeys(handle, "onos-push-keys " + self.OCN2, agentpass) - - def CheckSshNoPasswd(self, host): - """ - Check master can connect agent with no password - """ - login = pexpect.spawn("ssh " + str(host)) - index = 4 - while index == 4: - index = login.expect(['(yes/no)', '>|#|\$', - pexpect.EOF, pexpect.TIMEOUT]) - if index == 0: - login.sendline("yes") - index = 4 - if index == 1: - self.loginfo.log("ssh connect to " + str(host) + - " success,no need to copy ssh public key") - return True - login.interact() - return False - - def ChangeOnosName(self, user, password): - """ - Change onos name in envDefault file - Because some command depend on this - parameters: - user: onos&compute node user - password: onos&compute node password - """ - print "Now Changing ONOS name&password" - filepath = self.home + '/onos/tools/build/envDefaults' - line = open(filepath, 'r').readlines() - lenall = len(line) - 1 - for i in range(lenall): - if "ONOS_USER=" in line[i]: - line[i] = line[i].replace("sdn", user) - if "ONOS_GROUP" in line[i]: - line[i] = line[i].replace("sdn", user) - if "ONOS_PWD" in line[i]: - line[i] = line[i].replace("rocks", password) - NewFile = open(filepath, 'w') - NewFile.writelines(line) - NewFile.close - print "Done!" - - def ChangeTestCasePara(self, testcase, user, password): - """ - When running test script, there's something need \ - to change in every test folder's *.param & *.topo files - user: onos&compute node user - password: onos&compute node password - """ - print "Now Changing " + testcase + " name&password" - if self.masterusername == 'root': - filepath = '/root/' - else: - filepath = '/home/' + self.masterusername + '/' - filepath = (filepath + "OnosSystemTest/TestON/tests/" + - testcase + "/" + testcase + ".topo") - line = open(filepath, 'r').readlines() - lenall = len(line) - 1 - for i in range(lenall - 2): - if("localhost" in line[i]) or ("OCT" in line[i]): - line[i + 1] = re.sub(">\w+", ">" + user, line[i + 1]) - line[i + 2] = re.sub(">\w+", ">" + password, line[i + 2]) - if ("OC1" in line[i] or "OC2" in line[i] or "OC3" in line[i] or - "OCN" in line[i] or "OCN2" in line[i]): - line[i + 1] = re.sub(">\w+", ">root", line[i + 1]) - line[i + 2] = re.sub(">\w+", ">root", line[i + 2]) - NewFile = open(filepath, 'w') - NewFile.writelines(line) - NewFile.close - - def SSHlogin(self, ipaddr, username, password): - """ - SSH login provide a connection to destination. - parameters: - ipaddr: ip address - username: login user name - password: login password - return: handle - """ - login = pxssh.pxssh() - login.login(ipaddr, username, password, original_prompt='[$#>]') - # send command ls -l - login.sendline('ls -l') - # match prompt - login.prompt() - print("SSH login " + ipaddr + " success!") - return login - - def SSHRelease(self, handle): - # Release ssh - handle.logout() - - def CopyOnostoTestbin(self): - sourcefile = self.cipath + '/dependencies/onos' - destifile = self.home + '/onos/tools/test/bin/' - os.system('pwd') - runcommand = 'cp ' + sourcefile + ' ' + destifile - os.system(runcommand) - - def CopyPublicKey(self, host): - output = os.popen('cat /root/.ssh/id_rsa.pub') - publickey = output.read().strip('\n') - tmphandle = self.SSHlogin(self.installer_master, - self.installer_master_username, - self.installer_master_password) - tmphandle.sendline("ssh " + host + " -T \'echo " + - str(publickey) + ">>/root/.ssh/authorized_keys\'") - tmphandle.prompt() - self.SSHRelease(tmphandle) - print "Add OCT PublicKey to " + host + " success" - - def OnosEnvSetup(self, handle): - """ - Onos Environment Setup function - """ - self.Gensshkey(handle) - self.home = self.GetEnvValue(handle, 'HOME') - self.AddKnownHost(handle, self.OC1, "karaf", "karaf") - self.AddKnownHost(handle, self.OC2, "karaf", "karaf") - self.AddKnownHost(handle, self.OC3, "karaf", "karaf") - self.DownLoadCode(handle, - 'https://github.com/sunyulin/OnosSystemTest.git') - # self.DownLoadCode(handle, 'https://gerrit.onosproject.org/onos') - if self.masterusername == 'root': - filepath = '/root/' - else: - filepath = '/home/' + self.masterusername + '/' - self.OnosRootPathChange(filepath) - self.CopyOnostoTestbin() - self.ChangeOnosName(self.agentusername, self.agentpassword) - self.InstallDefaultSoftware(handle) - self.SetOnosEnvVar(handle, self.masterpassword, self.agentpassword) diff --git a/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py b/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py deleted file mode 100644 index 47605eb74..000000000 --- a/testcases/Controllers/ONOS/Teston/CI/adapters/foundation.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Description: - This file include basis functions - lanqinglong@huawei.com - -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -""" - -import logging -import os -import time -import yaml -import re -import datetime - - -class foundation: - - def __init__(self): - - # currentpath = os.getcwd() - REPO_PATH = os.environ['repos_dir'] + '/functest/' - currentpath = REPO_PATH + 'testcases/Controllers/ONOS/Teston/CI' - self.cipath = currentpath - self.logdir = os.path.join(currentpath, 'log') - self.workhome = currentpath[0: currentpath.rfind('testcases') - 1] - self.Result_DB = '' - filename = time.strftime('%Y-%m-%d-%H-%M-%S') + '.log' - self.logfilepath = os.path.join(self.logdir, filename) - self.starttime = datetime.datetime.now() - - def log(self, loginfo): - """ - Record log in log directory for deploying test environment - parameters: - loginfo(input): record info - """ - logging.basicConfig(level=logging.INFO, - format='%(asctime)s %(filename)s:%(message)s', - datefmt='%d %b %Y %H:%M:%S', - filename=self.logfilepath, - filemode='w') - filelog = logging.FileHandler(self.logfilepath) - logging.getLogger('Functest').addHandler(filelog) - print loginfo - logging.info(loginfo) - - def getdefaultpara(self): - """ - Get Default Parameters value - """ - with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) - - self.Result_DB = str(functest_yaml.get("results").get("test_db_url")) - self.masterusername = str(functest_yaml.get("ONOS").get("general"). - get('onosbench_username')) - self.masterpassword = str(functest_yaml.get("ONOS").get("general"). - get("onosbench_password")) - self.agentusername = str(functest_yaml.get("ONOS").get("general"). - get("onoscli_username")) - self.agentpassword = str(functest_yaml.get("ONOS").get("general"). - get("onoscli_password")) - self.runtimeout = functest_yaml.get("ONOS").get("general").get( - "runtimeout") - self.OCT = str(functest_yaml.get("ONOS").get("environment").get("OCT")) - self.OC1 = str(functest_yaml.get("ONOS").get("environment").get("OC1")) - self.OC2 = str(functest_yaml.get("ONOS").get("environment").get("OC2")) - self.OC3 = str(functest_yaml.get("ONOS").get("environment").get("OC3")) - self.OCN = str(functest_yaml.get("ONOS").get("environment").get("OCN")) - self.OCN2 = str(functest_yaml.get("ONOS"). - get("environment").get("OCN2")) - self.installer_master = str(functest_yaml.get("ONOS"). - get("environment").get("installer_master")) - self.installer_master_username = str(functest_yaml.get("ONOS"). - get("environment"). - get("installer_master_username")) - self.installer_master_password = str(functest_yaml.get("ONOS"). - get("environment"). - get("installer_master_password")) - self.hosts = [self.OC1, self.OCN, self.OCN2] - self.localhost = self.OCT - - def GetResult(self): - cmd = "cat " + self.logfilepath + " | grep Fail" - Resultbuffer = os.popen(cmd).read() - duration = datetime.datetime.now() - self.starttime - time.sleep(2) - - if re.search("[1-9]+", Resultbuffer): - self.log("Testcase Fails\n" + Resultbuffer) - Result = "POK" - else: - self.log("Testcases Pass") - Result = "OK" - payload = {'timestart': str(self.starttime), - 'duration': str(duration), 'status': Result} - - return payload diff --git a/testcases/Controllers/ONOS/Teston/CI/dependencies/onos b/testcases/Controllers/ONOS/Teston/CI/dependencies/onos deleted file mode 100644 index bb02fa899..000000000 --- a/testcases/Controllers/ONOS/Teston/CI/dependencies/onos +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# ----------------------------------------------------------------------------- -# ONOS remote command-line client. -# ----------------------------------------------------------------------------- -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# - -[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1 -. /root/.bashrc -. $ONOS_ROOT/tools/build/envDefaults -. $ONOS_ROOT/tools/test/bin/find-node.sh - -[ "$1" = "-w" ] && shift && onos-wait-for-start $1 - -[ -n "$1" ] && OCI=$(find_node $1) && shift - -if which client 1>/dev/null 2>&1 && [ -z "$ONOS_USE_SSH" ]; then - # Use Karaf client only if we can and are allowed to - unset KARAF_HOME - client -h $OCI -u karaf "$@" 2>/dev/null -else - # Otherwise use raw ssh; strict checking is off for dev environments only - #ssh -p 8101 -o StrictHostKeyChecking=no $OCI "$@" - sshpass -p karaf ssh -l karaf -p 8101 $OCI "$@" -fi diff --git a/testcases/Controllers/ONOS/Teston/CI/log/gitignore b/testcases/Controllers/ONOS/Teston/CI/log/gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py b/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py deleted file mode 100644 index 1e278e6a1..000000000 --- a/testcases/Controllers/ONOS/Teston/CI/onosfunctest.py +++ /dev/null @@ -1,208 +0,0 @@ -""" -Description: This test is to run onos Teston VTN scripts - -List of test cases: -CASE1 - Northbound NBI test network/subnet/ports -CASE2 - Ovsdb test&Default configuration&Vm go online - -lanqinglong@huawei.com -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -""" - -import argparse -import datetime -import os -import re -import time -import yaml - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as functest_utils - -parser = argparse.ArgumentParser() -parser.add_argument("-i", "--installer", help="Installer type") -args = parser.parse_args() -""" logging configuration """ -logger = ft_logger.Logger("onos").getLogger() - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() - -# onos parameters -TEST_DB = functest_yaml.get("results").get("test_db_url") -ONOS_REPO_PATH = functest_yaml.get("general").get("directories").get( - "dir_repos") -ONOS_CONF_DIR = functest_yaml.get("general").get("directories").get( - "dir_functest_conf") -REPO_PATH = ONOS_REPO_PATH + '/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) - -ONOSCI_PATH = REPO_PATH + 'testcases/Controllers/ONOS/Teston/CI/' -starttime = datetime.datetime.now() - -HOME = os.environ['HOME'] + "/" - - -def RunScript(testname): - """ - Run ONOS Test Script - Parameters: - testname: ONOS Testcase Name - """ - runtest = ONOSCI_PATH + "OnosSystemTest/TestON/bin/cli.py run " + testname - logger.debug("Run script " + testname) - os.system(runtest) - - -def DownloadCodes(url="https://github.com/sunyulin/OnosSystemTest.git"): - """ - Download Onos Teston codes - Parameters: - url: github url - """ - downloadcode = "git clone " + url + " " + ONOSCI_PATH + "OnosSystemTest" - logger.debug("Download Onos Teston codes " + url) - os.system(downloadcode) - - -def GetResult(): - LOGPATH = ONOSCI_PATH + "OnosSystemTest/TestON/logs" - cmd = "grep -rnh " + "Fail" + " " + LOGPATH - Resultbuffer = os.popen(cmd).read() - # duration = datetime.datetime.now() - starttime - time.sleep(2) - - if re.search("\s+[1-9]+\s+", Resultbuffer): - logger.debug("Testcase Fails\n" + Resultbuffer) - # Result = "Failed" - else: - logger.debug("Testcases Success") - # Result = "Success" - # payload={'timestart': str(starttime), - # 'duration': str(duration), - # 'status': Result} - cmd = "grep -rnh 'Execution Time' " + LOGPATH - Resultbuffer = os.popen(cmd).read() - time1 = Resultbuffer[114:128] - time2 = Resultbuffer[28:42] - cmd = "grep -rnh 'Success Percentage' " + LOGPATH + "/FUNCvirNetNB_*" - Resultbuffer = os.popen(cmd).read() - if Resultbuffer.find('100%') >= 0: - result1 = 'Success' - else: - result1 = 'Failed' - cmd = "grep -rnh 'Success Percentage' " + LOGPATH + "/FUNCvirNetNBL3*" - Resultbuffer = os.popen(cmd).read() - if Resultbuffer.find('100%') >= 0: - result2 = 'Success' - else: - result2 = 'Failed' - status1 = [] - status2 = [] - cmd = "grep -rnh 'h3' " + LOGPATH + "/FUNCvirNetNB_*" - Resultbuffer = os.popen(cmd).read() - pattern = re.compile("

([^-]+) - ([^-]+) - (\S*)

") - # res = pattern.search(Resultbuffer).groups() - res = pattern.findall(Resultbuffer) - i = 0 - for index in range(len(res)): - status1.append({'Case name:': res[i][0] + res[i][1], - 'Case result': res[i][2]}) - i = i + 1 - cmd = "grep -rnh 'h3' " + LOGPATH + "/FUNCvirNetNBL3*" - Resultbuffer = os.popen(cmd).read() - pattern = re.compile("

([^-]+) - ([^-]+) - (\S*)

") - # res = pattern.search(Resultbuffer).groups() - res = pattern.findall(Resultbuffer) - i = 0 - for index in range(len(res)): - status2.append({'Case name:': res[i][0] + res[i][1], - 'Case result': res[i][2]}) - i = i + 1 - payload = {'timestart': str(starttime), - 'FUNCvirNet': {'duration': time1, - 'result': result1, - 'status': status1}, - 'FUNCvirNetL3': {'duration': time2, - 'result': result2, - 'status': status2}} - return payload - - -def SetOnosIp(): - cmd = "openstack catalog show network | grep publicURL" - cmd_output = os.popen(cmd).read() - OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", cmd_output).group() - os.environ['OC1'] = OC1 - time.sleep(2) - logger.debug("ONOS IP is " + OC1) - - -def SetOnosIpForJoid(): - cmd = "env | grep SDN_CONTROLLER" - cmd_output = os.popen(cmd).read() - OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", cmd_output).group() - os.environ['OC1'] = OC1 - time.sleep(2) - logger.debug("ONOS IP is " + OC1) - - -def CleanOnosTest(): - TESTONPATH = ONOSCI_PATH + "OnosSystemTest/" - cmd = "rm -rf " + TESTONPATH - os.system(cmd) - time.sleep(2) - logger.debug("Clean ONOS Teston") - - -def main(): - - DownloadCodes() - if args.installer == "joid": - logger.debug("Installer is Joid") - SetOnosIpForJoid() - else: - SetOnosIp() - RunScript("FUNCvirNetNB") - RunScript("FUNCvirNetNBL3") - - try: - logger.debug("Push result into DB") - # TODO check path result for the file - scenario = functest_utils.get_scenario(logger) - version = functest_utils.get_version(logger) - result = GetResult() - - # ONOS success criteria = all tests OK - # i.e. FUNCvirNet & FUNCvirNetL3 - status = "failed" - try: - if (result['FUNCvirNet']['result'] == "Success" and - result['FUNCvirNetL3']['result'] == "Success"): - status = "passed" - except: - logger.error("Unable to set ONOS criteria") - - pod_name = functest_utils.get_pod_name(logger) - build_tag = functest_utils.get_build_tag(logger) - functest_utils.push_results_to_db(TEST_DB, - "functest", - "ONOS", - logger, pod_name, version, scenario, - status, build_tag, payload=result) - except: - logger.error("Error pushing results into Database") - - CleanOnosTest() - - -if __name__ == '__main__': - main() diff --git a/testcases/Controllers/ONOS/Teston/Readme.txt b/testcases/Controllers/ONOS/Teston/Readme.txt new file mode 100644 index 000000000..7393f59a1 --- /dev/null +++ b/testcases/Controllers/ONOS/Teston/Readme.txt @@ -0,0 +1,5 @@ +1.This is a basic test run about onos,we will make them better and better +2.This test include two suites: +(1)Test northbound(network/subnet/ports create/update/delete) +(2)Ovsdb test,default configuration,openflow connection,vm go onlines. +3.Later we will make a framework to do this test \ No newline at end of file diff --git a/testcases/Controllers/ONOS/Teston/__init__.py b/testcases/Controllers/ONOS/Teston/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/testcases/Controllers/ONOS/Teston/adapters/__init__.py b/testcases/Controllers/ONOS/Teston/adapters/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/testcases/Controllers/ONOS/Teston/adapters/client.py b/testcases/Controllers/ONOS/Teston/adapters/client.py new file mode 100644 index 000000000..77de092e4 --- /dev/null +++ b/testcases/Controllers/ONOS/Teston/adapters/client.py @@ -0,0 +1,88 @@ +""" +Description: + This file is used to run testcase + lanqinglong@huawei.com + +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" +from environment import environment +import time +import pexpect +import requests +import json + + +class client(environment): + + def __init__(self): + environment.__init__(self) + self.loginfo = environment() + self.testcase = '' + + def RunScript(self, handle, testname, timeout=300): + """ + Run ONOS Test Script + Parameters: + testname: ONOS Testcase Name + masterusername: The server username of running ONOS + masterpassword: The server password of running ONOS + """ + self.testcase = testname + self.ChangeTestCasePara(testname, self.masterusername, + self.masterpassword) + runhandle = handle + runtest = (self.home + "/OnosSystemTest/TestON/bin/cli.py run " + + testname) + runhandle.sendline(runtest) + circletime = 0 + lastshowscreeninfo = '' + while True: + Result = runhandle.expect(["PEXPECT]#", pexpect.EOF, + pexpect.TIMEOUT]) + curshowscreeninfo = runhandle.before + if(len(lastshowscreeninfo) != len(curshowscreeninfo)): + self.loginfo.log(str(curshowscreeninfo) + [len(lastshowscreeninfo)::]) + lastshowscreeninfo = curshowscreeninfo + if Result == 0: + print "Done!" + return + time.sleep(1) + circletime += 1 + if circletime > timeout: + break + self.loginfo.log("Timeout when running the test, please check!") + + def onosstart(self): + # This is the compass run machine user&pass,you need to modify + + print "Test Begin....." + self.OnosConnectionSet() + masterhandle = self.SSHlogin(self.localhost, self.masterusername, + self.masterpassword) + self.OnosEnvSetup(masterhandle) + return masterhandle + + def onosclean(self, handle): + self.SSHRelease(handle) + self.loginfo.log('Release onos handle Successful') + + def push_results_to_db(self, payload, pushornot=1): + if pushornot != 1: + return 1 + url = self.Result_DB + "/results" + params = {"project_name": "functest", "case_name": "ONOS-" + + self.testcase, "pod_name": 'huawei-build-2', + "details": payload} + + headers = {'Content-Type': 'application/json'} + try: + r = requests.post(url, data=json.dumps(params), headers=headers) + self.loginfo.log(r) + except: + self.loginfo.log('Error pushing results into Database') diff --git a/testcases/Controllers/ONOS/Teston/adapters/connection.py b/testcases/Controllers/ONOS/Teston/adapters/connection.py new file mode 100644 index 000000000..16f2ef32c --- /dev/null +++ b/testcases/Controllers/ONOS/Teston/adapters/connection.py @@ -0,0 +1,196 @@ +""" +Description: + This file is used to make connections + Include ssh & exchange public-key to each other so that + it can run without password + + lanqinglong@huawei.com + +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" +import os +import pexpect +import re +from foundation import foundation + + +class connection(foundation): + + def __init__(self): + foundation.__init__(self) + self.loginfo = foundation() + + def AddKnownHost(self, handle, ipaddr, username, password): + """ + Add an user to known host,so that onos can login in with onos $ipaddr. + parameters: + ipaddr: ip address + username: login user name + password: login password + """ + print("Now Adding an user to known hosts " + ipaddr) + login = handle + login.sendline("ssh -l %s -p 8101 %s" % (username, ipaddr)) + index = 0 + while index != 2: + index = login.expect(['assword:', 'yes/no', pexpect.EOF, + pexpect.TIMEOUT]) + if index == 0: + login.sendline(password) + login.sendline("logout") + index = login.expect(["closed", pexpect.EOF]) + if index == 0: + self.loginfo.log("Add SSH Known Host Success!") + break + else: + self.loginfo.log("Add SSH Known Host Failed! " + "Please Check!") + break + login.prompt() + + if index == 1: + login.sendline('yes') + + def GetEnvValue(self, handle, envname): + """ + os.getenv only returns current user value + GetEnvValue returns a environment value of + current handle + eg: GetEnvValue(handle,'HOME') + """ + envhandle = handle + envhandle.sendline('echo $' + envname) + envhandle.prompt() + reg = envname + '\r\n(.*)\r' + envaluereg = re.compile(reg) + envalue = envaluereg.search(envhandle.before) + if envalue: + return envalue.groups()[0] + else: + return None + + def Gensshkey(self, handle): + """ + Generate ssh keys, used for some server have no sshkey. + """ + print "Now Generating SSH keys..." + # Here file name may be id_rsa or id_ecdsa or others + # So here will have a judgement + keysub = handle + filepath = self.GetEnvValue(keysub, 'HOME') + '/.ssh' + filelist = os.listdir(filepath) + for item in filelist: + if 'id' in item: + self.loginfo.log("SSH keys are exsit in ssh directory.") + return True + keysub.sendline("ssh-keygen -t rsa") + Result = 0 + while Result != 2: + Result = keysub.expect(["Overwrite", "Enter", pexpect.EOF, + 'PEXPECT]#', pexpect.TIMEOUT]) + if Result == 0: + keysub.sendline("y") + if Result == 1 or Result == 2: + keysub.sendline("\n") + if Result == 3: + self.loginfo.log("Generate SSH key success.") + keysub.prompt() + break + if Result == 4: + self.loginfo.log("Generate SSH key failed.") + keysub.prompt() + break + + def GetRootAuth(self, password): + """ + Get root user + parameters: + password: root login password + """ + print("Now changing to user root") + login = pexpect.spawn("su - root") + index = 0 + while index != 2: + index = login.expect(['assword:', "failure", + pexpect.EOF, pexpect.TIMEOUT]) + if index == 0: + login.sendline(password) + if index == 1: + self.loginfo.log("Change user to root failed.") + + login.interact() + + def ReleaseRootAuth(self): + """ + Exit root user. + """ + print("Now Release user root") + login = pexpect.spawn("exit") + index = login.expect(['logout', pexpect.EOF, pexpect.TIMEOUT]) + if index == 0: + self.loginfo.log("Release root user success.") + if index == 1: + self.loginfo.log("Release root user failed.") + + login.interact() + + def AddEnvIntoBashrc(self, envalue): + """ + Add Env var into /etc/profile. + parameters: + envalue: environment value to add + """ + print "Now Adding bash environment" + fileopen = open("/etc/profile", 'r') + findContext = 1 + while findContext: + findContext = fileopen.readline() + result = findContext.find(envalue) + if result != -1: + break + fileopen.close + if result == -1: + envAdd = open("/etc/profile", 'a+') + envAdd.writelines("\n" + envalue) + envAdd.close() + self.loginfo.log("Add env to bashrc success!") + + def OnosRootPathChange(self, onospath): + """ + Change ONOS root path in file:bash_profile + onospath: path of onos root + """ + print "Now Changing ONOS Root Path" + filepath = onospath + 'onos/tools/dev/bash_profile' + line = open(filepath, 'r').readlines() + lenall = len(line) - 1 + for i in range(lenall): + if "export ONOS_ROOT" in line[i]: + line[i] = 'export ONOS_ROOT=' + onospath + 'onos\n' + NewFile = open(filepath, 'w') + NewFile.writelines(line) + NewFile.close + print "Done!" + + def OnosConnectionSet(self): + """ + Intergrate for ONOS connection setup + """ + if self.masterusername == 'root': + filepath = '/root/' + else: + filepath = '/home/' + self.masterusername + '/' + filepath = os.path.join(filepath, "onos/tools/dev/bash_profile") + self.AddEnvIntoBashrc("source " + filepath + "\n") + self.AddEnvIntoBashrc("export OCT=" + self.OCT) + self.AddEnvIntoBashrc("export OC1=" + self.OC1) + self.AddEnvIntoBashrc("export OC2=" + self.OC2) + self.AddEnvIntoBashrc("export OC3=" + self.OC3) + self.AddEnvIntoBashrc("export OCN=" + self.OCN) + self.AddEnvIntoBashrc("export OCN2=" + self.OCN2) + self.AddEnvIntoBashrc("export localhost=" + self.localhost) diff --git a/testcases/Controllers/ONOS/Teston/adapters/environment.py b/testcases/Controllers/ONOS/Teston/adapters/environment.py new file mode 100644 index 000000000..49f7f9632 --- /dev/null +++ b/testcases/Controllers/ONOS/Teston/adapters/environment.py @@ -0,0 +1,281 @@ +""" +Description: + This file is used to setup the running environment + Include Download code,setup environment variable + Set onos running config + Set user name/password + Onos-push-keys and so on + lanqinglong@huawei.com + +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" + +import os +import time +import pexpect +import re +import sys +import pxssh +from connection import connection + + +class environment(connection): + + def __init__(self): + connection.__init__(self) + self.loginfo = connection() + self.masterhandle = '' + self.home = '' + + def DownLoadCode(self, handle, codeurl): + """ + Download Code use 'git clone' + parameters: + handle: current working handle + codeurl: clone code url + """ + print "Now loading test codes! Please wait in patient..." + originalfolder = sys.path[0] + print originalfolder + gitclone = handle + gitclone.sendline("git clone " + codeurl) + index = 0 + # increment = 0 + while index != 1 or index != 4: + index = gitclone.expect(['already exists', + 'esolving deltas: 100%', + 'eceiving objects', + 'Already up-to-date', + 'npacking objects: 100%', pexpect.EOF]) + + filefolder = self.home + '/' + codeurl.split('/')[-1].split('.')[0] + if index == 0: + os.chdir(filefolder) + os.system('git pull') + os.chdir(originalfolder) + self.loginfo.log('Download code success!') + break + elif index == 1 or index == 4: + self.loginfo.log('Download code success!') + gitclone.sendline("mkdir onos") + gitclone.prompt() + gitclone.sendline("cp -rf " + filefolder + "/tools onos/") + gitclone.prompt() + break + elif index == 2: + os.write(1, gitclone.before) + sys.stdout.flush() + else: + self.loginfo.log('Download code failed!') + self.loginfo.log('Information before' + gitclone.before) + break + gitclone.prompt() + + def InstallDefaultSoftware(self, handle): + """ + Install default software + parameters: + handle(input): current working handle + """ + print "Now Cleaning test environment" + handle.sendline("sudo apt-get install -y mininet") + handle.prompt() + handle.sendline("sudo pip install configobj") + handle.prompt() + handle.sendline("sudo apt-get install -y sshpass") + handle.prompt() + handle.sendline("OnosSystemTest/TestON/bin/cleanup.sh") + handle.prompt() + time.sleep(5) + self.loginfo.log('Clean environment success!') + + def OnosPushKeys(self, handle, cmd, password): + """ + Using onos-push-keys to make ssh device without password + parameters: + handle(input): working handle + cmd(input): onos-push-keys xxx(xxx is device) + password(input): login in password + """ + print "Now Pushing Onos Keys:" + cmd + Pushkeys = handle + Pushkeys.sendline(cmd) + Result = 0 + while Result != 2: + Result = Pushkeys.expect(["(yes/no)", "assword:", "PEXPECT]#", + pexpect.EOF, pexpect.TIMEOUT]) + if(Result == 0): + Pushkeys.sendline("yes") + if(Result == 1): + Pushkeys.sendline(password) + if(Result == 2): + self.loginfo.log("ONOS Push keys Success!") + break + if(Result == 3): + self.loginfo.log("ONOS Push keys Error!") + break + time.sleep(2) + Pushkeys.prompt() + print "Done!" + + def SetOnosEnvVar(self, handle, masterpass, agentpass): + """ + Setup onos pushkeys to all devices(3+2) + parameters: + handle(input): current working handle + masterpass: scripts running server's password + agentpass: onos cluster&compute node password + """ + print "Now Setting test environment" + for host in self.hosts: + print "try to connect " + str(host) + result = self.CheckSshNoPasswd(host) + if not result: + print ("ssh lgin failed,try to copy master publickey" + + "to agent " + str(host)) + self.CopyPublicKey(host) + self.OnosPushKeys(handle, "onos-push-keys " + self.OCT, masterpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OC1, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OC2, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OC3, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OCN, agentpass) + self.OnosPushKeys(handle, "onos-push-keys " + self.OCN2, agentpass) + + def CheckSshNoPasswd(self, host): + """ + Check master can connect agent with no password + """ + login = pexpect.spawn("ssh " + str(host)) + index = 4 + while index == 4: + index = login.expect(['(yes/no)', '>|#|\$', + pexpect.EOF, pexpect.TIMEOUT]) + if index == 0: + login.sendline("yes") + index = 4 + if index == 1: + self.loginfo.log("ssh connect to " + str(host) + + " success,no need to copy ssh public key") + return True + login.interact() + return False + + def ChangeOnosName(self, user, password): + """ + Change onos name in envDefault file + Because some command depend on this + parameters: + user: onos&compute node user + password: onos&compute node password + """ + print "Now Changing ONOS name&password" + filepath = self.home + '/onos/tools/build/envDefaults' + line = open(filepath, 'r').readlines() + lenall = len(line) - 1 + for i in range(lenall): + if "ONOS_USER=" in line[i]: + line[i] = line[i].replace("sdn", user) + if "ONOS_GROUP" in line[i]: + line[i] = line[i].replace("sdn", user) + if "ONOS_PWD" in line[i]: + line[i] = line[i].replace("rocks", password) + NewFile = open(filepath, 'w') + NewFile.writelines(line) + NewFile.close + print "Done!" + + def ChangeTestCasePara(self, testcase, user, password): + """ + When running test script, there's something need \ + to change in every test folder's *.param & *.topo files + user: onos&compute node user + password: onos&compute node password + """ + print "Now Changing " + testcase + " name&password" + if self.masterusername == 'root': + filepath = '/root/' + else: + filepath = '/home/' + self.masterusername + '/' + filepath = (filepath + "OnosSystemTest/TestON/tests/" + + testcase + "/" + testcase + ".topo") + line = open(filepath, 'r').readlines() + lenall = len(line) - 1 + for i in range(lenall - 2): + if("localhost" in line[i]) or ("OCT" in line[i]): + line[i + 1] = re.sub(">\w+", ">" + user, line[i + 1]) + line[i + 2] = re.sub(">\w+", ">" + password, line[i + 2]) + if ("OC1" in line[i] or "OC2" in line[i] or "OC3" in line[i] or + "OCN" in line[i] or "OCN2" in line[i]): + line[i + 1] = re.sub(">\w+", ">root", line[i + 1]) + line[i + 2] = re.sub(">\w+", ">root", line[i + 2]) + NewFile = open(filepath, 'w') + NewFile.writelines(line) + NewFile.close + + def SSHlogin(self, ipaddr, username, password): + """ + SSH login provide a connection to destination. + parameters: + ipaddr: ip address + username: login user name + password: login password + return: handle + """ + login = pxssh.pxssh() + login.login(ipaddr, username, password, original_prompt='[$#>]') + # send command ls -l + login.sendline('ls -l') + # match prompt + login.prompt() + print("SSH login " + ipaddr + " success!") + return login + + def SSHRelease(self, handle): + # Release ssh + handle.logout() + + def CopyOnostoTestbin(self): + sourcefile = self.cipath + '/dependencies/onos' + destifile = self.home + '/onos/tools/test/bin/' + os.system('pwd') + runcommand = 'cp ' + sourcefile + ' ' + destifile + os.system(runcommand) + + def CopyPublicKey(self, host): + output = os.popen('cat /root/.ssh/id_rsa.pub') + publickey = output.read().strip('\n') + tmphandle = self.SSHlogin(self.installer_master, + self.installer_master_username, + self.installer_master_password) + tmphandle.sendline("ssh " + host + " -T \'echo " + + str(publickey) + ">>/root/.ssh/authorized_keys\'") + tmphandle.prompt() + self.SSHRelease(tmphandle) + print "Add OCT PublicKey to " + host + " success" + + def OnosEnvSetup(self, handle): + """ + Onos Environment Setup function + """ + self.Gensshkey(handle) + self.home = self.GetEnvValue(handle, 'HOME') + self.AddKnownHost(handle, self.OC1, "karaf", "karaf") + self.AddKnownHost(handle, self.OC2, "karaf", "karaf") + self.AddKnownHost(handle, self.OC3, "karaf", "karaf") + self.DownLoadCode(handle, + 'https://github.com/sunyulin/OnosSystemTest.git') + # self.DownLoadCode(handle, 'https://gerrit.onosproject.org/onos') + if self.masterusername == 'root': + filepath = '/root/' + else: + filepath = '/home/' + self.masterusername + '/' + self.OnosRootPathChange(filepath) + self.CopyOnostoTestbin() + self.ChangeOnosName(self.agentusername, self.agentpassword) + self.InstallDefaultSoftware(handle) + self.SetOnosEnvVar(handle, self.masterpassword, self.agentpassword) diff --git a/testcases/Controllers/ONOS/Teston/adapters/foundation.py b/testcases/Controllers/ONOS/Teston/adapters/foundation.py new file mode 100644 index 000000000..47605eb74 --- /dev/null +++ b/testcases/Controllers/ONOS/Teston/adapters/foundation.py @@ -0,0 +1,104 @@ +""" +Description: + This file include basis functions + lanqinglong@huawei.com + +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" + +import logging +import os +import time +import yaml +import re +import datetime + + +class foundation: + + def __init__(self): + + # currentpath = os.getcwd() + REPO_PATH = os.environ['repos_dir'] + '/functest/' + currentpath = REPO_PATH + 'testcases/Controllers/ONOS/Teston/CI' + self.cipath = currentpath + self.logdir = os.path.join(currentpath, 'log') + self.workhome = currentpath[0: currentpath.rfind('testcases') - 1] + self.Result_DB = '' + filename = time.strftime('%Y-%m-%d-%H-%M-%S') + '.log' + self.logfilepath = os.path.join(self.logdir, filename) + self.starttime = datetime.datetime.now() + + def log(self, loginfo): + """ + Record log in log directory for deploying test environment + parameters: + loginfo(input): record info + """ + logging.basicConfig(level=logging.INFO, + format='%(asctime)s %(filename)s:%(message)s', + datefmt='%d %b %Y %H:%M:%S', + filename=self.logfilepath, + filemode='w') + filelog = logging.FileHandler(self.logfilepath) + logging.getLogger('Functest').addHandler(filelog) + print loginfo + logging.info(loginfo) + + def getdefaultpara(self): + """ + Get Default Parameters value + """ + with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) + + self.Result_DB = str(functest_yaml.get("results").get("test_db_url")) + self.masterusername = str(functest_yaml.get("ONOS").get("general"). + get('onosbench_username')) + self.masterpassword = str(functest_yaml.get("ONOS").get("general"). + get("onosbench_password")) + self.agentusername = str(functest_yaml.get("ONOS").get("general"). + get("onoscli_username")) + self.agentpassword = str(functest_yaml.get("ONOS").get("general"). + get("onoscli_password")) + self.runtimeout = functest_yaml.get("ONOS").get("general").get( + "runtimeout") + self.OCT = str(functest_yaml.get("ONOS").get("environment").get("OCT")) + self.OC1 = str(functest_yaml.get("ONOS").get("environment").get("OC1")) + self.OC2 = str(functest_yaml.get("ONOS").get("environment").get("OC2")) + self.OC3 = str(functest_yaml.get("ONOS").get("environment").get("OC3")) + self.OCN = str(functest_yaml.get("ONOS").get("environment").get("OCN")) + self.OCN2 = str(functest_yaml.get("ONOS"). + get("environment").get("OCN2")) + self.installer_master = str(functest_yaml.get("ONOS"). + get("environment").get("installer_master")) + self.installer_master_username = str(functest_yaml.get("ONOS"). + get("environment"). + get("installer_master_username")) + self.installer_master_password = str(functest_yaml.get("ONOS"). + get("environment"). + get("installer_master_password")) + self.hosts = [self.OC1, self.OCN, self.OCN2] + self.localhost = self.OCT + + def GetResult(self): + cmd = "cat " + self.logfilepath + " | grep Fail" + Resultbuffer = os.popen(cmd).read() + duration = datetime.datetime.now() - self.starttime + time.sleep(2) + + if re.search("[1-9]+", Resultbuffer): + self.log("Testcase Fails\n" + Resultbuffer) + Result = "POK" + else: + self.log("Testcases Pass") + Result = "OK" + payload = {'timestart': str(self.starttime), + 'duration': str(duration), 'status': Result} + + return payload diff --git a/testcases/Controllers/ONOS/Teston/dependencies/onos b/testcases/Controllers/ONOS/Teston/dependencies/onos new file mode 100644 index 000000000..bb02fa899 --- /dev/null +++ b/testcases/Controllers/ONOS/Teston/dependencies/onos @@ -0,0 +1,29 @@ +#!/bin/bash +# ----------------------------------------------------------------------------- +# ONOS remote command-line client. +# ----------------------------------------------------------------------------- +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# + +[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1 +. /root/.bashrc +. $ONOS_ROOT/tools/build/envDefaults +. $ONOS_ROOT/tools/test/bin/find-node.sh + +[ "$1" = "-w" ] && shift && onos-wait-for-start $1 + +[ -n "$1" ] && OCI=$(find_node $1) && shift + +if which client 1>/dev/null 2>&1 && [ -z "$ONOS_USE_SSH" ]; then + # Use Karaf client only if we can and are allowed to + unset KARAF_HOME + client -h $OCI -u karaf "$@" 2>/dev/null +else + # Otherwise use raw ssh; strict checking is off for dev environments only + #ssh -p 8101 -o StrictHostKeyChecking=no $OCI "$@" + sshpass -p karaf ssh -l karaf -p 8101 $OCI "$@" +fi diff --git a/testcases/Controllers/ONOS/Teston/log/gitignore b/testcases/Controllers/ONOS/Teston/log/gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/testcases/Controllers/ONOS/Teston/onosfunctest.py b/testcases/Controllers/ONOS/Teston/onosfunctest.py new file mode 100644 index 000000000..07ecacc5d --- /dev/null +++ b/testcases/Controllers/ONOS/Teston/onosfunctest.py @@ -0,0 +1,208 @@ +""" +Description: This test is to run onos Teston VTN scripts + +List of test cases: +CASE1 - Northbound NBI test network/subnet/ports +CASE2 - Ovsdb test&Default configuration&Vm go online + +lanqinglong@huawei.com +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" + +import argparse +import datetime +import os +import re +import time +import yaml + +import functest.utils.functest_logger as ft_logger +import functest.utils.functest_utils as functest_utils + +parser = argparse.ArgumentParser() +parser.add_argument("-i", "--installer", help="Installer type") +args = parser.parse_args() +""" logging configuration """ +logger = ft_logger.Logger("onos").getLogger() + +with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) +f.close() + +# onos parameters +TEST_DB = functest_yaml.get("results").get("test_db_url") +ONOS_REPO_PATH = functest_yaml.get("general").get("directories").get( + "dir_repos") +ONOS_CONF_DIR = functest_yaml.get("general").get("directories").get( + "dir_functest_conf") +REPO_PATH = ONOS_REPO_PATH + '/functest/' +if not os.path.exists(REPO_PATH): + logger.error("Functest repository directory not found '%s'" % REPO_PATH) + exit(-1) + +ONOSCI_PATH = REPO_PATH + 'testcases/Controllers/ONOS/Teston/' +starttime = datetime.datetime.now() + +HOME = os.environ['HOME'] + "/" + + +def RunScript(testname): + """ + Run ONOS Test Script + Parameters: + testname: ONOS Testcase Name + """ + runtest = ONOSCI_PATH + "OnosSystemTest/TestON/bin/cli.py run " + testname + logger.debug("Run script " + testname) + os.system(runtest) + + +def DownloadCodes(url="https://github.com/sunyulin/OnosSystemTest.git"): + """ + Download Onos Teston codes + Parameters: + url: github url + """ + downloadcode = "git clone " + url + " " + ONOSCI_PATH + "OnosSystemTest" + logger.debug("Download Onos Teston codes " + url) + os.system(downloadcode) + + +def GetResult(): + LOGPATH = ONOSCI_PATH + "OnosSystemTest/TestON/logs" + cmd = "grep -rnh " + "Fail" + " " + LOGPATH + Resultbuffer = os.popen(cmd).read() + # duration = datetime.datetime.now() - starttime + time.sleep(2) + + if re.search("\s+[1-9]+\s+", Resultbuffer): + logger.debug("Testcase Fails\n" + Resultbuffer) + # Result = "Failed" + else: + logger.debug("Testcases Success") + # Result = "Success" + # payload={'timestart': str(starttime), + # 'duration': str(duration), + # 'status': Result} + cmd = "grep -rnh 'Execution Time' " + LOGPATH + Resultbuffer = os.popen(cmd).read() + time1 = Resultbuffer[114:128] + time2 = Resultbuffer[28:42] + cmd = "grep -rnh 'Success Percentage' " + LOGPATH + "/FUNCvirNetNB_*" + Resultbuffer = os.popen(cmd).read() + if Resultbuffer.find('100%') >= 0: + result1 = 'Success' + else: + result1 = 'Failed' + cmd = "grep -rnh 'Success Percentage' " + LOGPATH + "/FUNCvirNetNBL3*" + Resultbuffer = os.popen(cmd).read() + if Resultbuffer.find('100%') >= 0: + result2 = 'Success' + else: + result2 = 'Failed' + status1 = [] + status2 = [] + cmd = "grep -rnh 'h3' " + LOGPATH + "/FUNCvirNetNB_*" + Resultbuffer = os.popen(cmd).read() + pattern = re.compile("

([^-]+) - ([^-]+) - (\S*)

") + # res = pattern.search(Resultbuffer).groups() + res = pattern.findall(Resultbuffer) + i = 0 + for index in range(len(res)): + status1.append({'Case name:': res[i][0] + res[i][1], + 'Case result': res[i][2]}) + i = i + 1 + cmd = "grep -rnh 'h3' " + LOGPATH + "/FUNCvirNetNBL3*" + Resultbuffer = os.popen(cmd).read() + pattern = re.compile("

([^-]+) - ([^-]+) - (\S*)

") + # res = pattern.search(Resultbuffer).groups() + res = pattern.findall(Resultbuffer) + i = 0 + for index in range(len(res)): + status2.append({'Case name:': res[i][0] + res[i][1], + 'Case result': res[i][2]}) + i = i + 1 + payload = {'timestart': str(starttime), + 'FUNCvirNet': {'duration': time1, + 'result': result1, + 'status': status1}, + 'FUNCvirNetL3': {'duration': time2, + 'result': result2, + 'status': status2}} + return payload + + +def SetOnosIp(): + cmd = "openstack catalog show network | grep publicURL" + cmd_output = os.popen(cmd).read() + OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", cmd_output).group() + os.environ['OC1'] = OC1 + time.sleep(2) + logger.debug("ONOS IP is " + OC1) + + +def SetOnosIpForJoid(): + cmd = "env | grep SDN_CONTROLLER" + cmd_output = os.popen(cmd).read() + OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", cmd_output).group() + os.environ['OC1'] = OC1 + time.sleep(2) + logger.debug("ONOS IP is " + OC1) + + +def CleanOnosTest(): + TESTONPATH = ONOSCI_PATH + "OnosSystemTest/" + cmd = "rm -rf " + TESTONPATH + os.system(cmd) + time.sleep(2) + logger.debug("Clean ONOS Teston") + + +def main(): + + DownloadCodes() + if args.installer == "joid": + logger.debug("Installer is Joid") + SetOnosIpForJoid() + else: + SetOnosIp() + RunScript("FUNCvirNetNB") + RunScript("FUNCvirNetNBL3") + + try: + logger.debug("Push result into DB") + # TODO check path result for the file + scenario = functest_utils.get_scenario(logger) + version = functest_utils.get_version(logger) + result = GetResult() + + # ONOS success criteria = all tests OK + # i.e. FUNCvirNet & FUNCvirNetL3 + status = "failed" + try: + if (result['FUNCvirNet']['result'] == "Success" and + result['FUNCvirNetL3']['result'] == "Success"): + status = "passed" + except: + logger.error("Unable to set ONOS criteria") + + pod_name = functest_utils.get_pod_name(logger) + build_tag = functest_utils.get_build_tag(logger) + functest_utils.push_results_to_db(TEST_DB, + "functest", + "ONOS", + logger, pod_name, version, scenario, + status, build_tag, payload=result) + except: + logger.error("Error pushing results into Database") + + CleanOnosTest() + + +if __name__ == '__main__': + main() diff --git a/testcases/OpenStack/healthcheck/healthcheck.sh b/testcases/OpenStack/healthcheck/healthcheck.sh new file mode 100755 index 000000000..611c100c5 --- /dev/null +++ b/testcases/OpenStack/healthcheck/healthcheck.sh @@ -0,0 +1,208 @@ +# +# OpenStack Health Check +# This script is meant for really basic API operations on OpenStack +# Services tested: Keystone, Glance, Cinder, Neutron, Nova +# +# +# Author: +# jose.lausuch@ericsson.com +# +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# + +set -e + +#Redirect all the output (stdout) to a log file and show only possible errors. +LOG_FILE=/home/opnfv/functest/results/healthcheck.log +echo "">$LOG_FILE +exec 1<>$LOG_FILE + +info () { + echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - INFO - " "$*" | tee -a $LOG_FILE 1>&2 +} + +debug () { + if [[ "${CI_DEBUG,,}" == "true" ]]; then + echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - DEBUG - " "$*" | tee -a $LOG_FILE 1>&2 + fi +} + +error () { + echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - ERROR - " "$*" | tee -a $LOG_FILE 1>&2 + exit 1 +} + +if [ -z $OS_AUTH_URL ]; then + echo "Source credentials first." + exit 1 +fi + + +echo "Using following credentials:" +env | grep OS + +## Variables: +project_1="opnfv-tenant1" +project_2="opnfv-tenant2" +user_1="opnfv_user1" +user_2="opnfv_user2" +user_3="opnfv_user3" +user_4="opnfv_user4" +user_5="opnfv_user5" +user_6="opnfv_user6" +image_1="opnfv-image1" +image_2="opnfv-image2" +volume_1="opnfv-volume1" +volume_2="opnfv-volume2" +net_1="opnfv-network1" +net_2="opnfv-network2" +subnet_1="opnfv-subnet1" +subnet_2="opnfv-subnet2" +port_1="opnfv-port1" +port_2="opnfv-port2" +router_1="opnfv-router1" +router_2="opnfv-router2" +instance_1="opnfv-instance1" +instance_2="opnfv-instance2" +instance_3="opnfv-instance3" +instance_4="opnfv-instance4" + + + +function wait_for_ip() { + # $1 is the instance name + # $2 is the first octet of the subnet ip + timeout=60 + while [[ ${timeout} > 0 ]]; do + if [[ $(nova console-log $1|grep "No lease, failing") ]]; then + error "The instance $1 couldn't get an IP from the DHCP agent." | tee -a $LOG_FILE 1>&2 + exit 1 + elif [[ $(nova console-log $1|grep "^Lease"|grep "obtained") ]]; then + debug "The instance $1 got an IP successfully from the DHCP agent." | tee -a $LOG_FILE 1>&2 + break + fi + let timeout=timeout-1 + sleep 1 + done +} + + +################################# +info "Testing Keystone API..." | tee -a $LOG_FILE 1>&2 +################################# +openstack project create ${project_1} +debug "project '${project_1}' created." +openstack project create ${project_2} +debug "project '${project_2}' created." +openstack user create ${user_1} --project ${project_1} +debug "user '${user_1}' created in project ${project_1}." +openstack user create ${user_2} --project ${project_1} +debug "user '${user_2}' created in project ${project_1}." +openstack user create ${user_3} --project ${project_1} +debug "user '${user_3}' created in project ${project_1}." +openstack user create ${user_4} --project ${project_2} +debug "user '${user_4}' created in project ${project_2}." +openstack user create ${user_5} --project ${project_2} +debug "user '${user_5}' created in project ${project_2}." +openstack user create ${user_6} --project ${project_2} +debug "user '${user_6}' created in project ${project_2}." +info "...Keystone OK!" + +################################# +info "Testing Glance API..." +################################# +image=/home/opnfv/functest/data/cirros-0.3.4-x86_64-disk.img +glance image-create --name ${image_1} --disk-format qcow2 --container-format bare < ${image} +debug "image '${image_1}' created." +glance image-create --name ${image_2} --disk-format qcow2 --container-format bare < ${image} +debug "image '${image_2}' created." +info "... Glance OK!" + +################################# +info "Testing Cinder API..." +################################# +cinder create --display_name ${volume_1} 1 +debug "volume '${volume_1}' created." +cinder create --display_name ${volume_2} 10 +debug "volume '${volume_2}' created." +info "...Cinder OK!" + +################################# +info "Testing Neutron API..." +################################# + +network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}')) +for id in ${network_ids[@]}; do + [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id} +done +if [[ "${ext_net_id}" == "" ]]; then + error "No external network found. Exiting Health Check..." + exit 1 +else + info "External network found. ${ext_net_id}" +fi + +info "1. Create Networks..." +neutron net-create ${net_1} +debug "net '${net_1}' created." +neutron net-create ${net_2} +debug "net '${net_2}' created." +net1_id=$(neutron net-list | grep ${net_1} | awk '{print $2}') +net2_id=$(neutron net-list | grep ${net_2} | awk '{print $2}') + +info "2. Create subnets..." +neutron subnet-create --name ${subnet_1} --allocation-pool start=10.6.0.2,end=10.6.0.253 --gateway 10.6.0.254 ${net_1} 10.6.0.0/24 +debug "subnet '${subnet_1}' created." +neutron subnet-create --name ${subnet_2} --allocation-pool start=10.7.0.2,end=10.7.0.253 --gateway 10.7.0.254 ${net_2} 10.7.0.0/24 +debug "subnet '${subnet_2}' created." + +info "4. Create Routers..." +neutron router-create ${router_1} +debug "router '${router_1}' created." +neutron router-create ${router_2} +debug "router '${router_2}' created." + +neutron router-gateway-set ${router_1} ${ext_net_id} +debug "router '${router_1}' gateway set to ${ext_net_id}." +neutron router-gateway-set ${router_2} ${ext_net_id} +debug "router '${router_2}' gateway set to ${ext_net_id}." + +neutron router-interface-add ${router_1} ${subnet_1} +debug "router '${router_1}' interface added ${subnet_1}." +neutron router-interface-add ${router_2} ${subnet_2} +debug "router '${router_2}' interface added ${subnet_2}." + +info "...Neutron OK!" + +################################# +info "Testing Nova API..." +################################# + +nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_1} +debug "nova instance '${instance_1}' booted on ${net_1}." +nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_2} +debug "nova instance '${instance_2}' booted on ${net_1}." +nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_3} +debug "nova instance '${instance_3}' booted on ${net_2}." +nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_4} +debug "nova instance '${instance_4}' booted on ${net_2}." + +vm1_id=$(nova list | grep ${instance_1} | awk '{print $2}') +vm2_id=$(nova list | grep ${instance_2} | awk '{print $2}') +vm3_id=$(nova list | grep ${instance_3} | awk '{print $2}') +vm4_id=$(nova list | grep ${instance_4} | awk '{print $2}') +info "...Nova OK!" + +info "Checking if instances get an IP from DHCP..." +wait_for_ip ${instance_1} "10.6" +wait_for_ip ${instance_2} "10.6" +wait_for_ip ${instance_3} "10.7" +wait_for_ip ${instance_4} "10.7" +info "...DHCP OK!" + +info "Health check passed!" +exit 0 diff --git a/testcases/OpenStack/rally/macro/macro.yaml b/testcases/OpenStack/rally/macro/macro.yaml new file mode 100644 index 000000000..48c0333e9 --- /dev/null +++ b/testcases/OpenStack/rally/macro/macro.yaml @@ -0,0 +1,97 @@ +{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%} +{%- if use_existing_users and caller is not defined -%} {} +{%- else %} + {%- if not use_existing_users %} + users: + tenants: {{ tenants }} + users_per_tenant: {{ users_per_tenant }} + {%- endif %} + {%- if caller is defined %} + {{ caller() }} + {%- endif %} +{%- endif %} +{%- endmacro %} + +{%- macro vm_params(image=none, flavor=none, size=none) %} +{%- if flavor is not none %} + flavor: + name: {{ flavor }} +{%- endif %} +{%- if image is not none %} + image: + name: {{ image }} +{%- endif %} +{%- if size is not none %} + size: {{ size }} +{%- endif %} +{%- endmacro %} + +{%- macro unlimited_volumes() %} + cinder: + gigabytes: -1 + snapshots: -1 + volumes: -1 +{%- endmacro %} + +{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %} + type: "constant" + {%- if is_smoke %} + concurrency: 1 + times: 1 + {%- else %} + concurrency: {{ concurrency }} + times: {{ times }} + {%- endif %} +{%- endmacro %} + +{%- macro rps_runner(rps=1, times=1, is_smoke=True) %} + type: rps + {%- if is_smoke %} + rps: 1 + times: 1 + {%- else %} + rps: {{ rps }} + times: {{ times }} + {%- endif %} +{%- endmacro %} + +{%- macro no_failures_sla() %} + failure_rate: + max: 0 +{%- endmacro %} + +{%- macro volumes(size=1, volumes_per_tenant=1) %} + volumes: + size: {{ size }} + volumes_per_tenant: {{ volumes_per_tenant }} +{%- endmacro %} + +{%- macro unlimited_nova(keypairs=false) %} + nova: + cores: -1 + floating_ips: -1 + instances: -1 + {%- if keypairs %} + key_pairs: -1 + {%- endif %} + ram: -1 + security_group_rules: -1 + security_groups: -1 +{%- endmacro %} + +{%- macro unlimited_neutron(secgroups=false) %} + neutron: + network: -1 + port: -1 + subnet: -1 + {%- if secgroups %} + security_group: -1 + security_group_rule: -1 + {%- endif %} +{%- endmacro %} + +{%- macro glance_args(location, container="bare", type="qcow2") %} + container_format: {{ container }} + disk_format: {{ type }} + image_location: {{ location }} +{%- endmacro %} diff --git a/testcases/OpenStack/rally/run_rally-cert.py b/testcases/OpenStack/rally/run_rally-cert.py new file mode 100755 index 000000000..c3dd304ac --- /dev/null +++ b/testcases/OpenStack/rally/run_rally-cert.py @@ -0,0 +1,560 @@ +#!/usr/bin/env python +# +# Copyright (c) 2015 Orange +# guyrodrigue.koffi@orange.com +# morgan.richomme@orange.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# 0.1 (05/2015) initial commit +# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite +# 0.3 (19/10/2015) remove Tempest from run_rally +# and push result into test DB +# +import argparse +import iniparse +import json +import os +import re +import requests +import subprocess +import time +import yaml + +from novaclient import client as novaclient +from glanceclient import client as glanceclient +from keystoneclient.v2_0 import client as keystoneclient +from neutronclient.v2_0 import client as neutronclient +from cinderclient import client as cinderclient + +import functest.utils.functest_logger as ft_logger +import functest.utils.functest_utils as functest_utils +import functest.utils.openstack_utils as openstack_utils + +""" tests configuration """ +tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', + 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all'] +parser = argparse.ArgumentParser() +parser.add_argument("test_name", + help="Module name to be tested. " + "Possible values are : " + "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | " + "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | " + "{d[10]} ] " + "The 'all' value " + "performs all possible test scenarios" + .format(d=tests)) + +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-r", "--report", + help="Create json result file", + action="store_true") +parser.add_argument("-s", "--smoke", + help="Smoke test mode", + action="store_true") +parser.add_argument("-v", "--verbose", + help="Print verbose info about the progress", + action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") +parser.add_argument("-z", "--sanity", + help="Sanity test mode, execute only a subset of tests", + action="store_true") + +args = parser.parse_args() + +client_dict = {} +network_dict = {} + +if args.verbose: + RALLY_STDERR = subprocess.STDOUT +else: + RALLY_STDERR = open(os.devnull, 'w') + +""" logging configuration """ +logger = ft_logger.Logger("run_rally").getLogger() + +REPO_PATH = os.environ['repos_dir'] + '/functest/' +if not os.path.exists(REPO_PATH): + logger.error("Functest repository directory not found '%s'" % REPO_PATH) + exit(-1) + + +with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) +f.close() + +HOME = os.environ['HOME'] + "/" +RALLY_DIR = REPO_PATH + functest_yaml.get("general").get( + "directories").get("dir_rally") +TEMPLATE_DIR = RALLY_DIR + "scenario/templates" +SUPPORT_DIR = RALLY_DIR + "scenario/support" + +FLAVOR_NAME = "m1.tiny" +USERS_AMOUNT = 2 +TENANTS_AMOUNT = 3 +ITERATIONS_AMOUNT = 10 +CONCURRENCY = 4 + +RESULTS_DIR = functest_yaml.get("general").get("directories").get( + "dir_rally_res") +TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get( + "dir_results") + '/tempest/tempest.conf' +TEST_DB = functest_yaml.get("results").get("test_db_url") + +PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name") +PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name") +PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr") +ROUTER_NAME = functest_yaml.get("rally").get("router_name") + +GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get( + "image_name") +GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get( + "image_file_name") +GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get( + "image_disk_format") +GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get( + "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME + +CINDER_VOLUME_TYPE_NAME = "volume_test" + + +SUMMARY = [] + + +def push_results_to_db(case, payload, criteria): + + url = TEST_DB + "/results" + installer = functest_utils.get_installer_type(logger) + scenario = functest_utils.get_scenario(logger) + version = functest_utils.get_version(logger) + pod_name = functest_utils.get_pod_name(logger) + + # evalutate success criteria + + params = {"project_name": "functest", "case_name": case, + "pod_name": pod_name, "installer": installer, + "version": version, "scenario": scenario, + "criteria": criteria, "details": payload} + + headers = {'Content-Type': 'application/json'} + r = requests.post(url, data=json.dumps(params), headers=headers) + logger.debug(r) + + +def get_task_id(cmd_raw): + """ + get task id from command rally result + :param cmd_raw: + :return: task_id as string + """ + taskid_re = re.compile('^Task +(.*): started$') + for line in cmd_raw.splitlines(True): + line = line.strip() + match = taskid_re.match(line) + if match: + return match.group(1) + return None + + +def task_succeed(json_raw): + """ + Parse JSON from rally JSON results + :param json_raw: + :return: Bool + """ + rally_report = json.loads(json_raw) + for report in rally_report: + if report is None or report.get('result') is None: + return False + + for result in report.get('result'): + if result is None or len(result.get('error')) > 0: + return False + + return True + + +def live_migration_supported(): + config = iniparse.ConfigParser() + if (config.read(TEMPEST_CONF_FILE) and + config.has_section('compute-feature-enabled') and + config.has_option('compute-feature-enabled', 'live_migration')): + return config.getboolean('compute-feature-enabled', 'live_migration') + + return False + + +def build_task_args(test_file_name): + task_args = {'service_list': [test_file_name]} + task_args['image_name'] = GLANCE_IMAGE_NAME + task_args['flavor_name'] = FLAVOR_NAME + task_args['glance_image_location'] = GLANCE_IMAGE_PATH + task_args['tmpl_dir'] = TEMPLATE_DIR + task_args['sup_dir'] = SUPPORT_DIR + task_args['users_amount'] = USERS_AMOUNT + task_args['tenants_amount'] = TENANTS_AMOUNT + task_args['iterations'] = ITERATIONS_AMOUNT + task_args['concurrency'] = CONCURRENCY + + if args.sanity: + task_args['full_mode'] = False + task_args['smoke'] = True + else: + task_args['full_mode'] = True + task_args['smoke'] = args.smoke + + ext_net = openstack_utils.get_external_net(client_dict['neutron']) + if ext_net: + task_args['floating_network'] = str(ext_net) + else: + task_args['floating_network'] = '' + + net_id = network_dict['net_id'] + task_args['netid'] = str(net_id) + task_args['live_migration'] = live_migration_supported() + + return task_args + + +def get_output(proc, test_name): + global SUMMARY + result = "" + nb_tests = 0 + overall_duration = 0.0 + success = 0.0 + nb_totals = 0 + + while proc.poll() is None: + line = proc.stdout.readline() + if args.verbose: + result += line + else: + if ("Load duration" in line or + "started" in line or + "finished" in line or + " Preparing" in line or + "+-" in line or + "|" in line): + result += line + elif "test scenario" in line: + result += "\n" + line + elif "Full duration" in line: + result += line + "\n\n" + + # parse output for summary report + if ("| " in line and + "| action" not in line and + "| Starting" not in line and + "| Completed" not in line and + "| ITER" not in line and + "| " not in line and + "| total" not in line): + nb_tests += 1 + elif "| total" in line: + percentage = ((line.split('|')[8]).strip(' ')).strip('%') + try: + success += float(percentage) + except ValueError: + logger.info('Percentage error: %s, %s' % (percentage, line)) + nb_totals += 1 + elif "Full duration" in line: + duration = line.split(': ')[1] + try: + overall_duration += float(duration) + except ValueError: + logger.info('Duration error: %s, %s' % (duration, line)) + + overall_duration = "{:10.2f}".format(overall_duration) + if nb_totals == 0: + success_avg = 0 + else: + success_avg = "{:0.2f}".format(success / nb_totals) + + scenario_summary = {'test_name': test_name, + 'overall_duration': overall_duration, + 'nb_tests': nb_tests, + 'success': success_avg} + SUMMARY.append(scenario_summary) + + logger.info("\n" + result) + + return result + + +def get_cmd_output(proc): + result = "" + + while proc.poll() is None: + line = proc.stdout.readline() + result += line + + return result + + +def run_task(test_name): + # + # the "main" function of the script who launch rally for a task + # :param test_name: name for the rally test + # :return: void + # + global SUMMARY + logger.info('Starting test scenario "{}" ...'.format(test_name)) + + task_file = '{}task.yaml'.format(RALLY_DIR) + if not os.path.exists(task_file): + logger.error("Task file '%s' does not exist." % task_file) + exit(-1) + + test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/", + test_name) + if not os.path.exists(test_file_name): + logger.error("The scenario '%s' does not exist." % test_file_name) + exit(-1) + + logger.debug('Scenario fetched from : {}'.format(test_file_name)) + + cmd_line = ("rally task start --abort-on-sla-failure " + + "--task {} ".format(task_file) + + "--task-args \"{}\" ".format(build_task_args(test_name))) + logger.debug('running command line : {}'.format(cmd_line)) + + p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, + stderr=RALLY_STDERR, shell=True) + output = get_output(p, test_name) + task_id = get_task_id(output) + logger.debug('task_id : {}'.format(task_id)) + + if task_id is None: + logger.error('Failed to retrieve task_id, validating task...') + cmd_line = ("rally task validate " + + "--task {} ".format(task_file) + + "--task-args \"{}\" ".format(build_task_args(test_name))) + logger.debug('running command line : {}'.format(cmd_line)) + p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=True) + output = get_cmd_output(p) + logger.error("Task validation result:" + "\n" + output) + return + + # check for result directory and create it otherwise + if not os.path.exists(RESULTS_DIR): + logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR)) + os.makedirs(RESULTS_DIR) + + # write html report file + report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name) + cmd_line = "rally task report {} --out {}".format(task_id, + report_file_name) + + logger.debug('running command line : {}'.format(cmd_line)) + os.popen(cmd_line) + + # get and save rally operation JSON result + cmd_line = "rally task results %s" % task_id + logger.debug('running command line : {}'.format(cmd_line)) + cmd = os.popen(cmd_line) + json_results = cmd.read() + with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f: + logger.debug('saving json file') + f.write(json_results) + + with open('{}opnfv-{}.json' + .format(RESULTS_DIR, test_name)) as json_file: + json_data = json.load(json_file) + + """ parse JSON operation result """ + status = "failed" + if task_succeed(json_results): + logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n") + status = "passed" + else: + logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n") + + # Push results in payload of testcase + if args.report: + logger.debug("Push result into DB") + push_results_to_db("Rally_details", json_data, status) + + +def main(): + global SUMMARY + global network_dict + # configure script + if not (args.test_name in tests): + logger.error('argument not valid') + exit(-1) + + SUMMARY = [] + creds_nova = openstack_utils.get_credentials("nova") + nova_client = novaclient.Client('2', **creds_nova) + creds_neutron = openstack_utils.get_credentials("neutron") + neutron_client = neutronclient.Client(**creds_neutron) + creds_keystone = openstack_utils.get_credentials("keystone") + keystone_client = keystoneclient.Client(**creds_keystone) + glance_endpoint = keystone_client.service_catalog.url_for( + service_type='image', endpoint_type='publicURL') + glance_client = glanceclient.Client(1, glance_endpoint, + token=keystone_client.auth_token) + creds_cinder = openstack_utils.get_credentials("cinder") + cinder_client = cinderclient.Client('2', creds_cinder['username'], + creds_cinder['api_key'], + creds_cinder['project_id'], + creds_cinder['auth_url'], + service_type="volume") + + client_dict['neutron'] = neutron_client + + volume_types = openstack_utils.list_volume_types(cinder_client, + private=False) + if not volume_types: + volume_type = openstack_utils.create_volume_type( + cinder_client, CINDER_VOLUME_TYPE_NAME) + if not volume_type: + logger.error("Failed to create volume type...") + exit(-1) + else: + logger.debug("Volume type '%s' created succesfully..." + % CINDER_VOLUME_TYPE_NAME) + else: + logger.debug("Using existing volume type(s)...") + + image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) + image_exists = False + + if image_id == '': + logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH)) + image_id = openstack_utils.create_glance_image(glance_client, + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) + if not image_id: + logger.error("Failed to create the Glance image...") + exit(-1) + else: + logger.debug("Image '%s' with ID '%s' created succesfully ." + % (GLANCE_IMAGE_NAME, image_id)) + else: + logger.debug("Using existing image '%s' with ID '%s'..." + % (GLANCE_IMAGE_NAME, image_id)) + image_exists = True + + logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME) + network_dict = openstack_utils.create_network_full(logger, + client_dict['neutron'], + PRIVATE_NET_NAME, + PRIVATE_SUBNET_NAME, + ROUTER_NAME, + PRIVATE_SUBNET_CIDR) + if not network_dict: + logger.error("Failed to create network...") + exit(-1) + else: + if not openstack_utils.update_neutron_net(client_dict['neutron'], + network_dict['net_id'], + shared=True): + logger.error("Failed to update network...") + exit(-1) + else: + logger.debug("Network '%s' available..." % PRIVATE_NET_NAME) + + if args.test_name == "all": + for test_name in tests: + if not (test_name == 'all' or + test_name == 'vm'): + run_task(test_name) + else: + logger.debug("Test name: " + args.test_name) + run_task(args.test_name) + + report = ("\n" + " " + "\n" + " Rally Summary Report\n" + "\n" + "+===================+============+===============+===========+" + "\n" + "| Module | Duration | nb. Test Run | Success |" + "\n" + "+===================+============+===============+===========+" + "\n") + payload = [] + + # for each scenario we draw a row for the table + total_duration = 0.0 + total_nb_tests = 0 + total_success = 0.0 + for s in SUMMARY: + name = "{0:<17}".format(s['test_name']) + duration = float(s['overall_duration']) + total_duration += duration + duration = time.strftime("%M:%S", time.gmtime(duration)) + duration = "{0:<10}".format(duration) + nb_tests = "{0:<13}".format(s['nb_tests']) + total_nb_tests += int(s['nb_tests']) + success = "{0:<10}".format(str(s['success']) + '%') + total_success += float(s['success']) + report += ("" + + "| " + name + " | " + duration + " | " + + nb_tests + " | " + success + "|\n" + + "+-------------------+------------" + "+---------------+-----------+\n") + payload.append({'module': name, + 'details': {'duration': s['overall_duration'], + 'nb tests': s['nb_tests'], + 'success': s['success']}}) + + total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration)) + total_duration_str2 = "{0:<10}".format(total_duration_str) + total_nb_tests_str = "{0:<13}".format(total_nb_tests) + total_success = "{:0.2f}".format(total_success / len(SUMMARY)) + total_success_str = "{0:<10}".format(str(total_success) + '%') + report += "+===================+============+===============+===========+" + report += "\n" + report += ("| TOTAL: | " + total_duration_str2 + " | " + + total_nb_tests_str + " | " + total_success_str + "|\n") + report += "+===================+============+===============+===========+" + report += "\n" + + logger.info("\n" + report) + payload.append({'summary': {'duration': total_duration, + 'nb tests': total_nb_tests, + 'nb success': total_success}}) + + # Generate json results for DB + # json_results = {"timestart": time_start, "duration": total_duration, + # "tests": int(total_nb_tests), + # "success": int(total_success)} + # logger.info("Results: "+str(json_results)) + + # Evaluation of the success criteria + status = "failed" + # for Rally we decided that the overall success rate must be above 90% + if total_success >= 90: + status = "passed" + + if args.report: + logger.debug("Pushing Rally summary into DB...") + push_results_to_db("Rally", payload, status) + + if args.noclean: + exit(0) + + if not image_exists: + logger.debug("Deleting image '%s' with ID '%s'..." + % (GLANCE_IMAGE_NAME, image_id)) + if not openstack_utils.delete_glance_image(nova_client, image_id): + logger.error("Error deleting the glance image") + + if not volume_types: + logger.debug("Deleting volume type '%s'..." + % CINDER_VOLUME_TYPE_NAME) + if not openstack_utils.delete_volume_type(cinder_client, volume_type): + logger.error("Error in deleting volume type...") + + +if __name__ == '__main__': + main() diff --git a/testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml b/testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml new file mode 100644 index 000000000..a04e4c1c1 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-authenticate.yaml @@ -0,0 +1,63 @@ + Authenticate.keystone: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Authenticate.validate_cinder: + - + args: + repetitions: 2 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Authenticate.validate_glance: + - + args: + repetitions: 2 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Authenticate.validate_heat: + - + args: + repetitions: 2 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Authenticate.validate_neutron: + - + args: + repetitions: 2 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Authenticate.validate_nova: + - + args: + repetitions: 2 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-cinder.yaml b/testcases/OpenStack/rally/scenario/opnfv-cinder.yaml new file mode 100644 index 000000000..cb28ee84e --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-cinder.yaml @@ -0,0 +1,272 @@ +{# all scenarios included only in full mode #} + +{% if full_mode %} + + CinderVolumes.create_and_attach_volume: + - + args: + {{ vm_params(image_name,flavor_name,1) }} + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_and_list_snapshots: + - + args: + detailed: true + force: false + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {{ volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_and_list_volume: + - + args: + detailed: true + {{ vm_params(image_name,none,1) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + detailed: true + size: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_and_upload_volume_to_image: + - + args: + container_format: "bare" + disk_format: "raw" + do_delete: true + force: false + size: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_nested_snapshots_and_attach_volume: + - + args: + nested_level: 1 + size: + max: 1 + min: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + servers: + {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} + servers_per_tenant: 1 + auto_assign_nic: true + network: {} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_snapshot_and_attach_volume: + - + args: + volume_type: false + size: + min: 1 + max: 5 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + servers: + {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} + servers_per_tenant: 2 + auto_assign_nic: true + network: {} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + volume_type: true + size: + min: 1 + max: 5 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + servers: + {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} + servers_per_tenant: 2 + auto_assign_nic: true + network: {} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_volume: + - + args: + size: 1 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + - + args: + size: + min: 1 + max: 5 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.list_volumes: + - + args: + detailed: True + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + volumes: + size: 1 + volumes_per_tenant: 4 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + +{% endif %} + + CinderVolumes.create_and_delete_snapshot: + - + args: + force: false + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {{ volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_and_delete_volume: + - + args: + size: + max: 1 + min: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + {{ vm_params(image_name,none,1) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + size: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_and_extend_volume: + - + args: + new_size: 2 + size: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + CinderVolumes.create_from_volume_and_delete_volume: + - + args: + size: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_volumes() }} + {{ volumes() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-glance.yaml b/testcases/OpenStack/rally/scenario/opnfv-glance.yaml new file mode 100644 index 000000000..adbf8b79a --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-glance.yaml @@ -0,0 +1,49 @@ + GlanceImages.create_and_delete_image: + - + args: + {{ glance_args(location=glance_image_location) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + GlanceImages.create_and_list_image: + - + args: + {{ glance_args(location=glance_image_location) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + GlanceImages.list_images: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + GlanceImages.create_image_and_boot_instances: + - + args: + {{ glance_args(location=glance_image_location) }} + flavor: + name: {{ flavor_name }} + number_instances: 2 + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + quotas: + {{ unlimited_nova() }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + diff --git a/testcases/OpenStack/rally/scenario/opnfv-heat.yaml b/testcases/OpenStack/rally/scenario/opnfv-heat.yaml new file mode 100644 index 000000000..534d796ea --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-heat.yaml @@ -0,0 +1,160 @@ +{# all scenarios included only in full mode #} + +{% if full_mode %} + + HeatStacks.create_and_delete_stack: + - + args: + template_path: "{{ tmpl_dir }}/default.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + template_path: "{{ tmpl_dir }}/server_with_ports.yaml.template" + parameters: + public_net: {{ floating_network }} + image: {{ image_name }} + flavor: {{ flavor_name }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + template_path: "{{ tmpl_dir }}/server_with_volume.yaml.template" + parameters: + image: {{ image_name }} + flavor: {{ flavor_name }} + network_id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + HeatStacks.create_and_list_stack: + - + args: + template_path: "{{ tmpl_dir }}/default.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + HeatStacks.create_update_delete_stack: + - + args: + template_path: "{{ tmpl_dir }}/random_strings.yaml.template" + updated_template_path: "{{ tmpl_dir }}/updated_random_strings_add.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + template_path: "{{ tmpl_dir }}/random_strings.yaml.template" + updated_template_path: "{{ tmpl_dir }}/updated_random_strings_delete.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + template_path: "{{ tmpl_dir }}/resource_group.yaml.template" + updated_template_path: "{{ tmpl_dir }}/updated_resource_group_increase.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template" + updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + template_path: "{{ tmpl_dir }}/resource_group.yaml.template" + updated_template_path: "{{ tmpl_dir }}/updated_resource_group_reduce.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + - + args: + template_path: "{{ tmpl_dir }}/random_strings.yaml.template" + updated_template_path: "{{ tmpl_dir }}/updated_random_strings_replace.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + +{% else %} + + HeatStacks.create_update_delete_stack: + - + args: + template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template" + updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + +{% endif %} + + HeatStacks.create_check_delete_stack: + - + args: + template_path: "{{ tmpl_dir }}/random_strings.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + HeatStacks.create_suspend_resume_delete_stack: + - + args: + template_path: "{{ tmpl_dir }}/random_strings.yaml.template" + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + HeatStacks.list_stacks_and_resources: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-keystone.yaml b/testcases/OpenStack/rally/scenario/opnfv-keystone.yaml new file mode 100644 index 000000000..bfc9948b3 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-keystone.yaml @@ -0,0 +1,92 @@ + KeystoneBasic.add_and_remove_user_role: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_add_and_list_user_roles: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_and_list_tenants: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_and_delete_role: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_and_delete_service: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.get_entities: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_update_and_delete_tenant: + - + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_user: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_tenant: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_and_list_users: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + KeystoneBasic.create_tenant_with_users: + - + args: + users_per_tenant: 10 + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-neutron.yaml b/testcases/OpenStack/rally/scenario/opnfv-neutron.yaml new file mode 100644 index 000000000..3804d2589 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-neutron.yaml @@ -0,0 +1,245 @@ +{# all scenarios included only in full mode #} + +{% if full_mode %} + + NeutronNetworks.create_and_update_networks: + - + args: + network_create_args: {} + network_update_args: + admin_state_up: false + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + neutron: + network: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_update_ports: + - + args: + network_create_args: {} + port_create_args: {} + port_update_args: + admin_state_up: false + device_id: "dummy_id" + device_owner: "dummy_owner" + ports_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + port: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_update_routers: + - + args: + network_create_args: {} + router_create_args: {} + router_update_args: + admin_state_up: false + subnet_cidr_start: "1.1.0.0/30" + subnet_create_args: {} + subnets_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + port: -1 + router: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_update_subnets: + - + args: + network_create_args: {} + subnet_cidr_start: "1.4.0.0/16" + subnet_create_args: {} + subnet_update_args: + enable_dhcp: false + subnets_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + +{% endif %} + + NeutronNetworks.create_and_delete_networks: + - + args: + network_create_args: {} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + neutron: + network: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_delete_ports: + - + args: + network_create_args: {} + port_create_args: {} + ports_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + port: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_delete_routers: + - + args: + network_create_args: {} + router_create_args: {} + subnet_cidr_start: "1.1.0.0/30" + subnet_create_args: {} + subnets_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + port: -1 + router: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_delete_subnets: + - + args: + network_create_args: {} + subnet_cidr_start: "1.1.0.0/30" + subnet_create_args: {} + subnets_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_list_networks: + - + args: + network_create_args: {} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + neutron: + network: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_list_ports: + - + args: + network_create_args: {} + port_create_args: {} + ports_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + port: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_list_routers: + - + args: + network_create_args: {} + router_create_args: {} + subnet_cidr_start: "1.1.0.0/30" + subnet_create_args: {} + subnets_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + router: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NeutronNetworks.create_and_list_subnets: + - + args: + network_create_args: {} + subnet_cidr_start: "1.1.0.0/30" + subnet_create_args: {} + subnets_per_network: 1 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + quotas: + neutron: + network: -1 + subnet: -1 + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-nova.yaml b/testcases/OpenStack/rally/scenario/opnfv-nova.yaml new file mode 100644 index 000000000..f0fed8ef4 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-nova.yaml @@ -0,0 +1,378 @@ +{# all scenarios included only in full mode #} + +{% if full_mode %} + + NovaKeypair.create_and_delete_keypair: + - + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_nova(keypairs=true) }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaKeypair.create_and_list_keypairs: + - + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_nova(keypairs=true) }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_and_bounce_server: + - + args: + actions: + - + hard_reboot: 1 + - + soft_reboot: 1 + - + stop_start: 1 + - + rescue_unrescue: 1 + {{ vm_params(image_name, flavor_name) }} + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_neutron() }} + {{ unlimited_nova() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_and_delete_server: + - + args: + {{ vm_params(image_name, flavor_name) }} + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_neutron() }} + {{ unlimited_nova() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_and_list_server: + - + args: + detailed: true + {{ vm_params(image_name, flavor_name) }} + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_neutron() }} + {{ unlimited_nova() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_and_rebuild_server: + - + args: + {{ vm_params(flavor=flavor_name) }} + from_image: + name: {{ image_name }} + to_image: + name: {{ image_name }} + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_neutron() }} + {{ unlimited_nova() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.snapshot_server: + - + args: + {{ vm_params(image_name, flavor_name) }} + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_neutron() }} + {{ unlimited_nova() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_server_from_volume: + - + args: + {{ vm_params(image_name, flavor_name) }} + volume_size: 10 + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_server: + - + args: + {{ vm_params(image_name, flavor_name) }} + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaSecGroup.create_and_delete_secgroups: + - + args: + security_group_count: 10 + rules_per_security_group: 10 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_neutron(secgroups=true) }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaSecGroup.create_and_list_secgroups: + - + args: + security_group_count: 10 + rules_per_security_group: 10 + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + quotas: + {{ unlimited_neutron(secgroups=true) }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.list_servers: + - + args: + detailed: True + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + servers: + {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} + servers_per_tenant: 2 + auto_assign_nic: true + network: {} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.resize_server: + - + args: + {{ vm_params(image_name, flavor_name) }} + to_flavor: + name: "m1.small" + confirm: true + force_delete: false + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + +{% if live_migration %} + + NovaServers.boot_and_live_migrate_server: + - args: + {{ vm_params(image_name, flavor_name) }} + block_migration: false + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_server_attach_created_volume_and_live_migrate: + - + args: + {{ vm_params(image_name, flavor_name) }} + size: 10 + block_migration: false + boot_server_kwargs: + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_server_from_volume_and_live_migrate: + - args: + {{ vm_params(image_name, flavor_name) }} + block_migration: false + volume_size: 10 + force_delete: false + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + +{% endif %} +{% endif %} + + NovaKeypair.boot_and_delete_server_with_keypair: + - + args: + {{ vm_params(image_name, flavor_name) }} + server_kwargs: + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_neutron() }} + {{ unlimited_nova(keypairs=true) }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_server_from_volume_and_delete: + - + args: + {{ vm_params(image_name, flavor_name) }} + volume_size: 5 + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_volumes() }} + {{ unlimited_neutron() }} + {{ unlimited_nova() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.pause_and_unpause_server: + - + args: + {{ vm_params(image_name, flavor_name) }} + force_delete: false + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + networks_per_tenant: 1 + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_neutron() }} + {{ unlimited_nova() }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaSecGroup.boot_and_delete_server_with_secgroups: + - + args: + {{ vm_params(image_name, flavor_name) }} + security_group_count: 10 + rules_per_security_group: 10 + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: + start_cidr: "100.1.0.0/25" + quotas: + {{ unlimited_nova() }} + {{ unlimited_neutron(secgroups=true) }} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + NovaServers.boot_and_migrate_server: + - args: + {{ vm_params(image_name, flavor_name) }} + nics: + - net-id: {{ netid }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-quotas.yaml b/testcases/OpenStack/rally/scenario/opnfv-quotas.yaml new file mode 100644 index 000000000..a0682acce --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-quotas.yaml @@ -0,0 +1,54 @@ + Quotas.cinder_update_and_delete: + - + args: + max_quota: 1024 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Quotas.cinder_update: + - + args: + max_quota: 1024 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Quotas.neutron_update: + - + args: + max_quota: 1024 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Quotas.nova_update_and_delete: + - + args: + max_quota: 1024 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + Quotas.nova_update: + - + args: + max_quota: 1024 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-requests.yaml b/testcases/OpenStack/rally/scenario/opnfv-requests.yaml new file mode 100644 index 000000000..6affcc6c6 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-requests.yaml @@ -0,0 +1,28 @@ + HttpRequests.check_random_request: + - + args: + requests: + - + url: "http://www.example.com" + method: "GET" + status_code: 200 + - + url: "http://www.openstack.org" + method: "GET" + status_code: 200 + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + HttpRequests.check_request: + - + args: + url: "http://www.example.com" + method: "GET" + status_code: 200 + allow_redirects: False + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/opnfv-smoke.yaml b/testcases/OpenStack/rally/scenario/opnfv-smoke.yaml new file mode 100644 index 000000000..f102edb2b --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-smoke.yaml @@ -0,0 +1,268 @@ + TempestScenario.list_of_tests: + - + args: + tempest_conf: /etc/tempest/tempest.conf + test_names: + - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_get_flavor + - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors + - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail + - tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image + - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image + - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images + - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail + - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create + - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_cidr + - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_group_id + - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_delete_when_peer_group_deleted + - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_list + - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_group_create_get_delete + - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_groups_create_list_delete + - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_server_security_groups + - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_update_security_groups + - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_add_remove_fixed_ip + - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces + - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers + - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail + - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details + - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers + - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail + - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details + - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard + - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft + - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server + - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm + - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped + - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses + - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses_by_network + - tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescue_unrescue_instance + - tempest.api.compute.test_quotas.QuotasTestJSON.test_compare_tenant_quotas_with_default_quotas + - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas + - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas + - tempest.api.compute.volumes.test_volumes_get.VolumesGetTestJSON.test_volume_create_get_delete + - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_create + - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_delete + - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_get + - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_list + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_create + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_delete + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_get + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_list + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_create + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_delete + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_get + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_list + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_create + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_delete + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_get + - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_list + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_create + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_delete + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_get + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_list + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_job_binary_get_data + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_create + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_delete + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_get + - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_list + - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_create + - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_delete + - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get + - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get_data + - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_list + - tempest.api.data_processing.test_jobs.JobTest.test_job_create + - tempest.api.data_processing.test_jobs.JobTest.test_job_delete + - tempest.api.data_processing.test_jobs.JobTest.test_job_get + - tempest.api.data_processing.test_jobs.JobTest.test_job_list + - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_create + - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_delete + - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_get + - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_list + - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_get + - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_list + - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_compare_db_flavors_with_os + - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_get_db_flavor + - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_list_db_flavors + - tempest.api.database.limits.test_limits.DatabaseLimitsTest.test_absolute_limits + - tempest.api.database.versions.test_versions.DatabaseVersionsTest.test_list_db_versions + - tempest.api.identity.admin.v2.test_services.ServicesTestJSON.test_list_services + - tempest.api.identity.admin.v2.test_users.UsersTestJSON.test_create_user + - tempest.api.identity.admin.v3.test_credentials.CredentialsTestJSON.test_credentials_create_get_update_delete + - tempest.api.identity.admin.v3.test_domains.DomainsTestJSON.test_create_update_delete_domain + - tempest.api.identity.admin.v3.test_endpoints.EndPointsTestJSON.test_update_endpoint + - tempest.api.identity.admin.v3.test_groups.GroupsV3TestJSON.test_group_users_add_list_delete + - tempest.api.identity.admin.v3.test_policies.PoliciesTestJSON.test_create_update_delete_policy + - tempest.api.identity.admin.v3.test_regions.RegionsTestJSON.test_create_region_with_specific_id + - tempest.api.identity.admin.v3.test_roles.RolesV3TestJSON.test_role_create_update_get_list + - tempest.api.identity.admin.v3.test_services.ServicesTestJSON.test_create_update_get_service + - tempest.api.identity.admin.v3.test_trusts.TrustsV3TestJSON.test_get_trusts_all + - tempest.api.messaging.test_claims.TestClaims.test_post_claim + - tempest.api.messaging.test_claims.TestClaims.test_query_claim + - tempest.api.messaging.test_claims.TestClaims.test_release_claim + - tempest.api.messaging.test_claims.TestClaims.test_update_claim + - tempest.api.messaging.test_messages.TestMessages.test_delete_multiple_messages + - tempest.api.messaging.test_messages.TestMessages.test_delete_single_message + - tempest.api.messaging.test_messages.TestMessages.test_get_message + - tempest.api.messaging.test_messages.TestMessages.test_get_multiple_messages + - tempest.api.messaging.test_messages.TestMessages.test_list_messages + - tempest.api.messaging.test_messages.TestMessages.test_post_messages + - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_existence + - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_head + - tempest.api.messaging.test_queues.TestManageQueue.test_get_queue_stats + - tempest.api.messaging.test_queues.TestManageQueue.test_list_queues + - tempest.api.messaging.test_queues.TestManageQueue.test_set_and_get_queue_metadata + - tempest.api.messaging.test_queues.TestQueues.test_create_delete_queue + - tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions + - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address + - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip + - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_network + - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_port + - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_subnet + - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_network + - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_port + - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_subnet + - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet + - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility + - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks + - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets + - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network + - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet + - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_update_delete_network_subnet + - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_external_network_visibility + - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_networks + - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_subnets + - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_network + - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_subnet + - tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet + - tempest.api.network.test_networks.NetworksTestJSON.test_external_network_visibility + - tempest.api.network.test_networks.NetworksTestJSON.test_list_networks + - tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets + - tempest.api.network.test_networks.NetworksTestJSON.test_show_network + - tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet + - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools + - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups + - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port + - tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports + - tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port + - tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools + - tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups + - tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port + - tempest.api.network.test_ports.PortsTestJSON.test_list_ports + - tempest.api.network.test_ports.PortsTestJSON.test_show_port + - tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces + - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id + - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id + - tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router + - tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces + - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id + - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id + - tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router + - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group + - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule + - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups + - tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group + - tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule + - tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups + - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota + - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object + - tempest.api.object_storage.test_account_services.AccountTest.test_list_account_metadata + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_end_marker + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_json + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_xml + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_end_marker + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker_and_end_marker + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker + - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker_and_end_marker + - tempest.api.object_storage.test_account_services.AccountTest.test_list_extensions + - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_account_metadata + - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_containers + - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_and_delete_metadata + - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_matadata_key + - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_metadata + - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata + - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata_key + - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_read_object_with_rights + - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_write_object_with_rights + - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_large_object + - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_too_many_objects + - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_valid_object + - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container + - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_overwrite + - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_key + - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value + - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_key + - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_value + - tempest.api.object_storage.test_container_services.ContainerTest.test_delete_container + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_delimiter + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_end_marker + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_json + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_xml + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_limit + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_marker + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_no_object + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_path + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_prefix + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_metadata + - tempest.api.object_storage.test_container_services.ContainerTest.test_list_no_container_metadata + - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_and_delete_matadata + - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_matadata_key + - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_metadata + - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata + - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata_key + - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way + - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers + - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container + - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself + - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_fresh_metadata + - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_meta + - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_metakey + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_match + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_modified_since + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_unmodified_since + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_metadata + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_range + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_newest + - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_object_manifest + - tempest.api.object_storage.test_object_services.ObjectTest.test_list_no_object_metadata + - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata + - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata_with_x_object_manifest + - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata + - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_create_and_remove_metadata + - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_object_manifest + - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_remove_object_metakey + - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_container_object_without_using_creds + - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_object_with_another_user_creds + - tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container + - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_list + - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_show + - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_template + - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_list + - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_metadata + - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_software_config + - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_create_validate + - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_no_metadata_change + - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_with_metadata_change + - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_crud_no_resources + - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_list_responds + - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v1_notifications + - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v2_notifications + - tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_attach_detach_volume_to_instance + - tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance + - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete + - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_from_image + - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete + - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image + - tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list + - tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list + runner: + concurrency: 1 + times: 1 + type: serial + sla: + failure_rate: + max: 0 + diff --git a/testcases/OpenStack/rally/scenario/opnfv-vm.yaml b/testcases/OpenStack/rally/scenario/opnfv-vm.yaml new file mode 100644 index 000000000..74f509925 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/opnfv-vm.yaml @@ -0,0 +1,42 @@ + VMTasks.boot_runcommand_delete: + - + args: + {{ vm_params(image_name, flavor_name) }} + floating_network: {{ floating_network }} + force_delete: false + command: + interpreter: /bin/sh + script_file: {{ sup_dir }}/instance_dd_test.sh + username: cirros + nics: + - net-id: {{ netid }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + network: {} + {% endcall %} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} + + - + args: + {{ vm_params(image_name, flavor_name) }} + fixed_network: private + floating_network: {{ floating_network }} + force_delete: false + command: + interpreter: /bin/sh + script_file: {{ sup_dir }}/instance_dd_test.sh + use_floatingip: true + username: cirros + nics: + - net-id: {{ netid }} + volume_args: + size: 2 + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + sla: + {{ no_failures_sla() }} diff --git a/testcases/OpenStack/rally/scenario/support/instance_dd_test.sh b/testcases/OpenStack/rally/scenario/support/instance_dd_test.sh new file mode 100644 index 000000000..e3bf23405 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/support/instance_dd_test.sh @@ -0,0 +1,13 @@ +#!/bin/sh +time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; } +file=/tmp/test.img +c=${1:-$SIZE} +c=${c:-1000} #default is 1GB +write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") +read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c") +[ -f $file ] && rm $file + +echo "{ + \"write_seq_${c}m\": $write_seq, + \"read_seq_${c}m\": $read_seq + }" diff --git a/testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template b/testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template new file mode 100644 index 000000000..a22487e33 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/autoscaling_policy.yaml.template @@ -0,0 +1,17 @@ +heat_template_version: 2013-05-23 + +resources: + test_group: + type: OS::Heat::AutoScalingGroup + properties: + desired_capacity: 0 + max_size: 0 + min_size: 0 + resource: + type: OS::Heat::RandomString + test_policy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: test_group } + scaling_adjustment: 1 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/default.yaml.template b/testcases/OpenStack/rally/scenario/templates/default.yaml.template new file mode 100644 index 000000000..eb4f2f2dd --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/default.yaml.template @@ -0,0 +1 @@ +heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template b/testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template new file mode 100644 index 000000000..2dd676c11 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/random_strings.yaml.template @@ -0,0 +1,13 @@ +heat_template_version: 2014-10-16 + +description: Test template for rally create-update-delete scenario + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_two: + type: OS::Heat::RandomString + properties: + length: 20 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template b/testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template new file mode 100644 index 000000000..b3f505fa6 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/resource_group.yaml.template @@ -0,0 +1,13 @@ +heat_template_version: 2014-10-16 + +description: Test template for rally create-update-delete scenario + +resources: + test_group: + type: OS::Heat::ResourceGroup + properties: + count: 2 + resource_def: + type: OS::Heat::RandomString + properties: + length: 20 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template b/testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template new file mode 100644 index 000000000..909f45d21 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/server_with_ports.yaml.template @@ -0,0 +1,64 @@ +heat_template_version: 2013-05-23 + +parameters: + # set all correct defaults for parameters before launch test + public_net: + type: string + default: public + image: + type: string + default: cirros-0.3.4-x86_64-uec + flavor: + type: string + default: m1.tiny + cidr: + type: string + default: 11.11.11.0/24 + +resources: + server: + type: OS::Nova::Server + properties: + image: {get_param: image} + flavor: {get_param: flavor} + networks: + - port: { get_resource: server_port } + + router: + type: OS::Neutron::Router + properties: + external_gateway_info: + network: {get_param: public_net} + + router_interface: + type: OS::Neutron::RouterInterface + properties: + router_id: { get_resource: router } + subnet_id: { get_resource: private_subnet } + + private_net: + type: OS::Neutron::Net + + private_subnet: + type: OS::Neutron::Subnet + properties: + network: { get_resource: private_net } + cidr: {get_param: cidr} + + port_security_group: + type: OS::Neutron::SecurityGroup + properties: + name: default_port_security_group + description: > + Default security group assigned to port. The neutron default group is not + used because neutron creates several groups with the same name=default and + nova cannot chooses which one should it use. + + server_port: + type: OS::Neutron::Port + properties: + network: {get_resource: private_net} + fixed_ips: + - subnet: { get_resource: private_subnet } + security_groups: + - { get_resource: port_security_group } diff --git a/testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template b/testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template new file mode 100644 index 000000000..826ca9dae --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/server_with_volume.yaml.template @@ -0,0 +1,43 @@ +heat_template_version: 2013-05-23 + +parameters: + # set all correct defaults for parameters before launch test + image: + type: string + default: cirros-0.3.4-x86_64-uec + flavor: + type: string + default: m1.tiny + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + volume_size: + type: number + description: Size of the volume to be created. + default: 1 + constraints: + - range: { min: 1, max: 1024 } + description: must be between 1 and 1024 Gb. + network_id: + type: string + +resources: + server: + type: OS::Nova::Server + properties: + image: {get_param: image} + flavor: {get_param: flavor} + networks: + - network: { get_param: network_id } + cinder_volume: + type: OS::Cinder::Volume + properties: + size: { get_param: volume_size } + availability_zone: { get_param: availability_zone } + volume_attachment: + type: OS::Cinder::VolumeAttachment + properties: + volume_id: { get_resource: cinder_volume } + instance_uuid: { get_resource: server} + mountpoint: /dev/vdc diff --git a/testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template new file mode 100644 index 000000000..cf34879ca --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/updated_autoscaling_policy_inplace.yaml.template @@ -0,0 +1,23 @@ +heat_template_version: 2013-05-23 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates resource parameters without resource re-creation(replacement) + in the stack defined by autoscaling_policy.yaml.template. It allows to measure + performance of "pure" resource update operation only. + +resources: + test_group: + type: OS::Heat::AutoScalingGroup + properties: + desired_capacity: 0 + max_size: 0 + min_size: 0 + resource: + type: OS::Heat::RandomString + test_policy: + type: OS::Heat::ScalingPolicy + properties: + adjustment_type: change_in_capacity + auto_scaling_group_id: { get_resource: test_group } + scaling_adjustment: -1 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template new file mode 100644 index 000000000..e06d42e01 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_add.yaml.template @@ -0,0 +1,19 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates the stack defined by random_strings.yaml.template with additional resource. + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_two: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_three: + type: OS::Heat::RandomString + properties: + length: 20 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template new file mode 100644 index 000000000..d02593e3b --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_delete.yaml.template @@ -0,0 +1,11 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template deletes one resource from the stack defined by random_strings.yaml.template. + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template new file mode 100644 index 000000000..46d8bff4c --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/updated_random_strings_replace.yaml.template @@ -0,0 +1,19 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template deletes one resource from the stack defined by + random_strings.yaml.template and re-creates it with the updated parameters + (so-called update-replace). That happens because some parameters cannot be + changed without resource re-creation. The template allows to measure performance + of update-replace operation. + +resources: + test_string_one: + type: OS::Heat::RandomString + properties: + length: 20 + test_string_two: + type: OS::Heat::RandomString + properties: + length: 40 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template new file mode 100644 index 000000000..891074ebc --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/updated_resource_group_increase.yaml.template @@ -0,0 +1,16 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates one resource from the stack defined by resource_group.yaml.template + and adds children resources to that resource. + +resources: + test_group: + type: OS::Heat::ResourceGroup + properties: + count: 3 + resource_def: + type: OS::Heat::RandomString + properties: + length: 20 \ No newline at end of file diff --git a/testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template b/testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template new file mode 100644 index 000000000..b4d1d1730 --- /dev/null +++ b/testcases/OpenStack/rally/scenario/templates/updated_resource_group_reduce.yaml.template @@ -0,0 +1,16 @@ +heat_template_version: 2014-10-16 + +description: > + Test template for create-update-delete-stack scenario in rally. + The template updates one resource from the stack defined by resource_group.yaml.template + and deletes children resources from that resource. + +resources: + test_group: + type: OS::Heat::ResourceGroup + properties: + count: 1 + resource_def: + type: OS::Heat::RandomString + properties: + length: 20 \ No newline at end of file diff --git a/testcases/OpenStack/rally/task.yaml b/testcases/OpenStack/rally/task.yaml new file mode 100644 index 000000000..b67891664 --- /dev/null +++ b/testcases/OpenStack/rally/task.yaml @@ -0,0 +1,60 @@ +{%- set glance_image_location = glance_image_location|default("http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img") %} +{%- set image_name = image_name|default("functest-img-rally") %} +{%- set flavor_name = flavor_name|default("m1.tiny") %} +{%- set use_existing_users = use_existing_users|default(false) %} +{%- set service_list = service_list|default(["authenticate", "cinder", "keystone", "nova", "glance", "neutron", "quotas", "requests", "heat", "vm"]) %} +{%- set live_migration = live_migration|default(false) %} +{%- set smoke = smoke|default(true) %} +{%- set floating_network = floating_network|default("net04_ext") %} +{%- set controllers_amount = controllers_amount|default(1) %} +{%- if smoke %} +{%- set users_amount = 1 %} +{%- set tenants_amount = 1 %} +{%- else %} +{%- set users_amount = users_amount|default(1) %} +{%- set tenants_amount = tenants_amount|default(1) %} +{%- endif %} + +{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%} +{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%} + +--- +{% if "authenticate" in service_list %} +{%- include "scenario/opnfv-authenticate.yaml"-%} +{% endif %} + +{% if "cinder" in service_list %} +{%- include "scenario/opnfv-cinder.yaml"-%} +{% endif %} + +{% if "keystone" in service_list %} +{%- include "scenario/opnfv-keystone.yaml"-%} +{% endif %} + +{% if "nova" in service_list %} +{%- include "scenario/opnfv-nova.yaml"-%} +{% endif %} + +{% if "glance" in service_list %} +{%- include "scenario/opnfv-glance.yaml"-%} +{% endif %} + +{% if "neutron" in service_list %} +{%- include "scenario/opnfv-neutron.yaml"-%} +{% endif %} + +{% if "quotas" in service_list %} +{%- include "scenario/opnfv-quotas.yaml"-%} +{% endif %} + +{% if "requests" in service_list %} +{%- include "scenario/opnfv-requests.yaml"-%} +{% endif %} + +{% if "heat" in service_list %} +{%- include "scenario/opnfv-heat.yaml"-%} +{% endif %} + +{% if "vm" in service_list %} +{%- include "scenario/opnfv-vm.yaml"-%} +{% endif %} diff --git a/testcases/OpenStack/tempest/custom_tests/defcore_req.txt b/testcases/OpenStack/tempest/custom_tests/defcore_req.txt new file mode 100644 index 000000000..bb1d172df --- /dev/null +++ b/testcases/OpenStack/tempest/custom_tests/defcore_req.txt @@ -0,0 +1,122 @@ +# Set of DefCore tempest test cases (see http://www.openstack.org/brand/interop) +# This approved version (2016.01) is valid for Juno, Kilo, and Liberty releases of OpenStack +# The list is stored at http://git.openstack.org/cgit/openstack/defcore/plain/2016.01/2016.01.required.txt +tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3] +tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314] +tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_changes_since[id-18bac3ae-da27-436c-92a9-b22474d13aab] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_name[id-33163b73-79f5-4d07-a7ea-9213bcc468ff] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_id[id-9f238683-c763-45aa-b848-232ec3ce3105] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_ref[id-05a377b8-28cf-4734-a1e6-2ab5c38bf606] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_status[id-a3f5b513-aeb3-42a9-b18e-f091ef73254d] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_type[id-e3356918-4d3e-4756-81d5-abc4524ba29f] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_limit_results[id-3a484ca9-67ba-451e-b494-7fcf28d32d62] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_changes_since[id-7d439e18-ac2e-4827-b049-7e18004712c4] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_name[id-644ea267-9bd9-4f3b-af9f-dffa02396a17] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_server_ref[id-8c78f822-203b-4bf6-8bba-56ebd551cf84] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_status[id-9b0ea018-6185-4f71-948a-a123a107988e] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_type[id-888c0cc0-7223-43c5-9db0-b125fd0a393b] +tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_limit_results[id-ba2fa9a9-b672-47cc-b354-3b4c0600e2cb] +tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image[id-490d0898-e12a-463f-aef0-c50156b9f789] +tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images[id-fd51b7f4-d4a3-4331-9885-866658112a6f] +tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail[id-9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6] +tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666] +tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke] +tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997] +tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b] +tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke] +tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666] +tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke] +tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997] +tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b] +tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke] +tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c] +tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip[id-43a1242e-7b31-48d1-88f2-3f72aa9f2077] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex[id-a905e287-c35e-42f2-b132-d02b09f3654a] +tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4,negative] +tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f,negative] +tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee] +tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke] +tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c] +tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm[id-1499262a-9328-4eda-9068-db1ac57498d2] +tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert[id-c03aab19-adb1-44f5-917d-c419577e9e68] +tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560] +tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658] +tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a] +tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce] +tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251] +tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c] +tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134] +tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0] +tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833] +tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699] +tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077] +tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_non_existent_flavor[id-ced1a1d7-2ab6-45c9-b90f-b27d87b30efd,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_null_flavor[id-45436a7d-a388-4a35-a9d8-3adc5d0d940b,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_delete_a_server_of_another_tenant[id-5c75009d-3eea-423e-bea3-61b09fd25f9c,negative] +tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_update_server_of_another_tenant[id-543d84c1-dd2e-4c6d-8cb2-b9da0efaa384,negative] +tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a] +tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107] +tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff] +tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513] +tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list[id-bc2dd1a0-15af-48e5-9990-f2e75a48325d] +tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_details[id-bad0567a-5a4f-420b-851e-780b55bb867c] +tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-f01904f2-e975-4915-98ce-cb5fa27bde4f,negative] +tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-62bab09a-4c03-4617-8cca-8572bc94af9b,negative] +tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9] +tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee] +tempest.api.image.v1.test_images.ListImagesTest.test_index_no_params[id-246178ab-3b33-4212-9a4b-a7fe8261794d] +tempest.api.object_storage.test_object_expiry.ObjectExpiryTest.test_get_object_after_expiry_time[id-fb024a42-37f3-4ba5-9684-4f40a7910b41] +tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way[id-06f90388-2d0e-40aa-934c-e9a8833e958a] +tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers[id-aa467252-44f3-472a-b5ae-5b57c3c9c147] +tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container[id-1a9ab572-1b66-4981-8c21-416e2a5e6011] +tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself[id-2248abba-415d-410b-9c30-22dff9cd6e67] +tempest.api.object_storage.test_object_services.ObjectTest.test_create_object[id-5b4ce26f-3545-46c9-a2ba-5754358a4c62,smoke] +tempest.api.object_storage.test_object_services.ObjectTest.test_delete_object[id-17738d45-03bd-4d45-9e0b-7b2f58f98687] +tempest.api.object_storage.test_object_services.ObjectTest.test_get_object[id-02610ba7-86b7-4272-9ed8-aa8d417cb3cd,smoke] +tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_if_different[id-50d01f12-526f-4360-9ac2-75dd508d7b68] +tempest.api.object_storage.test_object_services.ObjectTest.test_object_upload_in_segments[id-e3e6a64a-9f50-4955-b987-6ce6767c97fb] +tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_get_object_using_temp_url[id-f91c96d4-1230-4bba-8eb9-84476d18d991] +tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_put_object_using_temp_url[id-9b08dade-3571-4152-8a4f-a4f2a873a735] +tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container[id-a151e158-dcbf-4a1f-a1e7-46cd65895a6f] diff --git a/testcases/OpenStack/tempest/run_tempest.py b/testcases/OpenStack/tempest/run_tempest.py new file mode 100644 index 000000000..bf62ce306 --- /dev/null +++ b/testcases/OpenStack/tempest/run_tempest.py @@ -0,0 +1,347 @@ +#!/usr/bin/env python +# +# Description: +# Runs tempest and pushes the results to the DB +# +# Authors: +# morgan.richomme@orange.com +# jose.lausuch@ericsson.com +# viktor.tikkanen@nokia.com +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +# +import argparse +import json +import os +import re +import requests +import shutil +import subprocess +import time +import yaml +import ConfigParser + +import keystoneclient.v2_0.client as ksclient +from neutronclient.v2_0 import client as neutronclient + +import functest.utils.functest_logger as ft_logger +import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_utils as os_utils + +modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing', + 'identity', 'image', 'network', 'object_storage', 'orchestration', + 'telemetry', 'volume', 'custom', 'defcore'] + +""" tests configuration """ +parser = argparse.ArgumentParser() +parser.add_argument("-d", "--debug", + help="Debug mode", + action="store_true") +parser.add_argument("-s", "--serial", + help="Run tests in one thread", + action="store_true") +parser.add_argument("-m", "--mode", + help="Tempest test mode [smoke, all]", + default="smoke") +parser.add_argument("-r", "--report", + help="Create json result file", + action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") + +args = parser.parse_args() + +""" logging configuration """ +logger = ft_logger.Logger("run_tempest").getLogger() + +REPO_PATH = os.environ['repos_dir'] + '/functest/' + + +with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) +f.close() +TEST_DB = functest_yaml.get("results").get("test_db_url") + +MODE = "smoke" +PRIVATE_NET_NAME = functest_yaml.get("tempest").get("private_net_name") +PRIVATE_SUBNET_NAME = functest_yaml.get("tempest").get("private_subnet_name") +PRIVATE_SUBNET_CIDR = functest_yaml.get("tempest").get("private_subnet_cidr") +ROUTER_NAME = functest_yaml.get("tempest").get("router_name") +TENANT_NAME = functest_yaml.get("tempest").get("identity").get("tenant_name") +TENANT_DESCRIPTION = functest_yaml.get("tempest").get("identity").get( + "tenant_description") +USER_NAME = functest_yaml.get("tempest").get("identity").get("user_name") +USER_PASSWORD = functest_yaml.get("tempest").get("identity").get( + "user_password") +DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name") +RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get( + "dir_rally_inst") +RESULTS_DIR = functest_yaml.get("general").get("directories").get( + "dir_results") +TEMPEST_RESULTS_DIR = RESULTS_DIR + '/tempest' +TEST_LIST_DIR = functest_yaml.get("general").get("directories").get( + "dir_tempest_cases") +TEMPEST_CUSTOM = REPO_PATH + TEST_LIST_DIR + 'test_list.txt' +TEMPEST_BLACKLIST = REPO_PATH + TEST_LIST_DIR + 'blacklist.txt' +TEMPEST_DEFCORE = REPO_PATH + TEST_LIST_DIR + 'defcore_req.txt' +TEMPEST_RAW_LIST = TEMPEST_RESULTS_DIR + '/test_raw_list.txt' +TEMPEST_LIST = TEMPEST_RESULTS_DIR + '/test_list.txt' + + +def get_info(file_result): + test_run = "" + duration = "" + test_failed = "" + + p = subprocess.Popen('cat tempest.log', + shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + for line in p.stdout.readlines(): + # print line, + if (len(test_run) < 1): + test_run = re.findall("[0-9]*\.[0-9]*s", line) + if (len(duration) < 1): + duration = re.findall("[0-9]*\ tests", line) + regexp = r"(failures=[0-9]+)" + if (len(test_failed) < 1): + test_failed = re.findall(regexp, line) + + logger.debug("test_run:" + test_run) + logger.debug("duration:" + duration) + + +def push_results_to_db(case, payload, criteria): + + # TODO move DB creds into config file + url = TEST_DB + "/results" + installer = ft_utils.get_installer_type(logger) + scenario = ft_utils.get_scenario(logger) + version = ft_utils.get_version(logger) + pod_name = ft_utils.get_pod_name(logger) + + logger.info("Pushing results to DB: '%s'." % url) + + params = {"project_name": "functest", "case_name": case, + "pod_name": str(pod_name), 'installer': installer, + "version": version, "scenario": scenario, "criteria": criteria, + 'details': payload} + headers = {'Content-Type': 'application/json'} + + r = requests.post(url, data=json.dumps(params), headers=headers) + logger.debug(r) + + +def create_tempest_resources(): + ks_creds = os_utils.get_credentials("keystone") + logger.debug("Creating tenant and user for Tempest suite") + keystone = ksclient.Client(**ks_creds) + tenant_id = os_utils.create_tenant(keystone, + TENANT_NAME, + TENANT_DESCRIPTION) + if tenant_id == '': + logger.error("Error : Failed to create %s tenant" % TENANT_NAME) + + user_id = os_utils.create_user(keystone, USER_NAME, USER_PASSWORD, + None, tenant_id) + if user_id == '': + logger.error("Error : Failed to create %s user" % USER_NAME) + + logger.debug("Creating private network for Tempest suite") + creds_neutron = os_utils.get_credentials("neutron") + neutron_client = neutronclient.Client(**creds_neutron) + network_dic = os_utils.create_network_full(logger, + neutron_client, + PRIVATE_NET_NAME, + PRIVATE_SUBNET_NAME, + ROUTER_NAME, + PRIVATE_SUBNET_CIDR) + if network_dic: + if not os_utils.update_neutron_net(neutron_client, + network_dic['net_id'], + shared=True): + logger.error("Failed to update private network...") + exit(-1) + else: + logger.debug("Network '%s' is available..." % PRIVATE_NET_NAME) + else: + logger.error("Private network creation failed") + exit(-1) + + +def configure_tempest(deployment_dir): + """ + Add/update needed parameters into tempest.conf file generated by Rally + """ + + logger.debug("Generating tempest.conf file...") + cmd = "rally verify genconfig" + ft_utils.execute_command(cmd, logger) + + logger.debug("Finding tempest.conf file...") + tempest_conf_file = deployment_dir + "/tempest.conf" + if not os.path.isfile(tempest_conf_file): + logger.error("Tempest configuration file %s NOT found." + % tempest_conf_file) + exit(-1) + + logger.debug("Updating selected tempest.conf parameters...") + config = ConfigParser.RawConfigParser() + config.read(tempest_conf_file) + config.set('compute', 'fixed_network_name', PRIVATE_NET_NAME) + config.set('identity', 'tenant_name', TENANT_NAME) + config.set('identity', 'username', USER_NAME) + config.set('identity', 'password', USER_PASSWORD) + with open(tempest_conf_file, 'wb') as config_file: + config.write(config_file) + + # Copy tempest.conf to /home/opnfv/functest/results/tempest/ + shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf') + return True + + +def read_file(filename): + with open(filename) as src: + return [line.strip() for line in src.readlines()] + + +def generate_test_list(deployment_dir, mode): + logger.debug("Generating test case list...") + if mode == 'defcore': + shutil.copyfile(TEMPEST_DEFCORE, TEMPEST_RAW_LIST) + elif mode == 'custom': + if os.path.isfile(TEMPEST_CUSTOM): + shutil.copyfile(TEMPEST_CUSTOM, TEMPEST_RAW_LIST) + else: + logger.error("Tempest test list file %s NOT found." + % TEMPEST_CUSTOM) + exit(-1) + else: + if mode == 'smoke': + testr_mode = "smoke" + elif mode == 'full': + testr_mode = "" + else: + testr_mode = 'tempest.api.' + mode + cmd = ("cd " + deployment_dir + ";" + "testr list-tests " + + testr_mode + ">" + TEMPEST_RAW_LIST + ";cd") + ft_utils.execute_command(cmd, logger) + + +def apply_tempest_blacklist(): + logger.debug("Applying tempest blacklist...") + cases_file = read_file(TEMPEST_RAW_LIST) + result_file = open(TEMPEST_LIST, 'w') + try: + black_file = read_file(TEMPEST_BLACKLIST) + except: + black_file = '' + logger.debug("Tempest blacklist file does not exist.") + for line in cases_file: + if line not in black_file: + result_file.write(str(line) + '\n') + result_file.close() + + +def run_tempest(OPTION): + # + # the "main" function of the script which launches Rally to run Tempest + # :param option: tempest option (smoke, ..) + # :return: void + # + logger.info("Starting Tempest test suite: '%s'." % OPTION) + cmd_line = "rally verify start " + OPTION + " --system-wide" + CI_DEBUG = os.environ.get("CI_DEBUG") + if CI_DEBUG == "true" or CI_DEBUG == "True": + ft_utils.execute_command(cmd_line, logger, exit_on_error=True) + else: + header = ("Tempest environment:\n" + " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" % + (os.getenv('INSTALLER_TYPE', 'Unknown'), + os.getenv('DEPLOY_SCENARIO', 'Unknown'), + os.getenv('NODE_NAME', 'Unknown'), + time.strftime("%a %b %d %H:%M:%S %Z %Y"))) + + f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+') + f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+') + f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+') + f_env.write(header) + + subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr) + + f_stdout.close() + f_stderr.close() + f_env.close() + + cmd_line = "rally verify show" + ft_utils.execute_command(cmd_line, logger, + exit_on_error=True, info=True) + + cmd_line = "rally verify list" + logger.debug('Executing command : {}'.format(cmd_line)) + cmd = os.popen(cmd_line) + output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|") + # Format: + # | UUID | Deployment UUID | smoke | tests | failures | Created at | + # Duration | Status | + num_tests = output[4] + num_failures = output[5] + time_start = output[6] + duration = output[7] + # Compute duration (lets assume it does not take more than 60 min) + dur_min = int(duration.split(':')[1]) + dur_sec_float = float(duration.split(':')[2]) + dur_sec_int = int(round(dur_sec_float, 0)) + dur_sec_int = dur_sec_int + 60 * dur_min + + # Generate json results for DB + json_results = {"timestart": time_start, "duration": dur_sec_int, + "tests": int(num_tests), "failures": int(num_failures)} + logger.info("Results: " + str(json_results)) + + status = "failed" + try: + diff = (int(num_tests) - int(num_failures)) + success_rate = 100 * diff / int(num_tests) + except: + success_rate = 0 + + # For Tempest we assume that teh success rate is above 90% + if success_rate >= 90: + status = "passed" + + # Push results in payload of testcase + if args.report: + logger.debug("Push result into DB") + push_results_to_db("Tempest", json_results, status) + + +def main(): + global MODE + + if not (args.mode in modes): + logger.error("Tempest mode not valid. " + "Possible values are:\n" + str(modes)) + exit(-1) + + if not os.path.exists(TEMPEST_RESULTS_DIR): + os.makedirs(TEMPEST_RESULTS_DIR) + + deployment_dir = ft_utils.get_deployment_dir(logger) + configure_tempest(deployment_dir) + create_tempest_resources() + generate_test_list(deployment_dir, args.mode) + apply_tempest_blacklist() + + MODE = "--tests-file " + TEMPEST_LIST + if args.serial: + MODE += " --concur 1" + + run_tempest(MODE) + + +if __name__ == '__main__': + main() diff --git a/testcases/OpenStack/vPing/ping.sh b/testcases/OpenStack/vPing/ping.sh new file mode 100644 index 000000000..693b86825 --- /dev/null +++ b/testcases/OpenStack/vPing/ping.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +while true; do + ping -c 1 $1 2>&1 >/dev/null + RES=$? + if [ "Z$RES" = "Z0" ] ; then + echo 'vPing OK' + break + else + echo 'vPing KO' + fi + sleep 1 +done \ No newline at end of file diff --git a/testcases/OpenStack/vPing/vPing_ssh.py b/testcases/OpenStack/vPing/vPing_ssh.py new file mode 100644 index 000000000..5b392e866 --- /dev/null +++ b/testcases/OpenStack/vPing/vPing_ssh.py @@ -0,0 +1,453 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 All rights reserved +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# 0.1: This script boots the VM1 and allocates IP address from Nova +# Later, the VM2 boots then execute cloud-init to ping VM1. +# After successful ping, both the VMs are deleted. +# 0.2: measure test duration and publish results under json format +# +# +import argparse +import datetime +import os +import paramiko +import pprint +import re +import time +import yaml +from scp import SCPClient + +from novaclient import client as novaclient +from neutronclient.v2_0 import client as neutronclient +from keystoneclient.v2_0 import client as keystoneclient +from glanceclient import client as glanceclient + +import functest.utils.functest_logger as ft_logger +import functest.utils.functest_utils as functest_utils +import functest.utils.openstack_utils as openstack_utils + +pp = pprint.PrettyPrinter(indent=4) + +parser = argparse.ArgumentParser() +image_exists = False + +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-r", "--report", + help="Create json result file", + action="store_true") + +args = parser.parse_args() + +""" logging configuration """ +logger = ft_logger.Logger("vping_ssh").getLogger() + +paramiko.util.log_to_file("/var/log/paramiko.log") + +REPO_PATH = os.environ['repos_dir'] + '/functest/' +if not os.path.exists(REPO_PATH): + logger.error("Functest repository directory not found '%s'" % REPO_PATH) + exit(-1) + +with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) +f.close() + +HOME = os.environ['HOME'] + "/" +# vPing parameters +VM_BOOT_TIMEOUT = 180 +VM_DELETE_TIMEOUT = 100 +PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout") +TEST_DB = functest_yaml.get("results").get("test_db_url") +NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1") +NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2") +GLANCE_IMAGE_NAME = functest_yaml.get("vping").get("image_name") +GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get( + "image_file_name") +GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get( + "image_disk_format") +GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get( + "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME + +FLAVOR = functest_yaml.get("vping").get("vm_flavor") + +# NEUTRON Private Network parameters + +PRIVATE_NET_NAME = functest_yaml.get("vping").get( + "vping_private_net_name") +PRIVATE_SUBNET_NAME = functest_yaml.get("vping").get( + "vping_private_subnet_name") +PRIVATE_SUBNET_CIDR = functest_yaml.get("vping").get( + "vping_private_subnet_cidr") +ROUTER_NAME = functest_yaml.get("vping").get( + "vping_router_name") + +SECGROUP_NAME = functest_yaml.get("vping").get("vping_sg_name") +SECGROUP_DESCR = functest_yaml.get("vping").get("vping_sg_descr") + + +def pMsg(value): + + """pretty printing""" + pp.pprint(value) + + +def waitVmActive(nova, vm): + + # sleep and wait for VM status change + sleep_time = 3 + count = VM_BOOT_TIMEOUT / sleep_time + while True: + status = openstack_utils.get_instance_status(nova, vm) + logger.debug("Status: %s" % status) + if status == "ACTIVE": + return True + if status == "ERROR" or status == "error": + return False + if count == 0: + logger.debug("Booting a VM timed out...") + return False + count -= 1 + time.sleep(sleep_time) + return False + + +def waitVmDeleted(nova, vm): + + # sleep and wait for VM status change + sleep_time = 3 + count = VM_DELETE_TIMEOUT / sleep_time + while True: + status = openstack_utils.get_instance_status(nova, vm) + if not status: + return True + elif count == 0: + logger.debug("Timeout") + return False + else: + # return False + count -= 1 + time.sleep(sleep_time) + return False + + +def create_security_group(neutron_client): + sg_id = openstack_utils.get_security_group_id(neutron_client, + SECGROUP_NAME) + if sg_id != '': + logger.info("Using existing security group '%s'..." % SECGROUP_NAME) + else: + logger.info("Creating security group '%s'..." % SECGROUP_NAME) + SECGROUP = openstack_utils.create_security_group(neutron_client, + SECGROUP_NAME, + SECGROUP_DESCR) + if not SECGROUP: + logger.error("Failed to create the security group...") + return False + + sg_id = SECGROUP['id'] + + logger.debug("Security group '%s' with ID=%s created successfully." + % (SECGROUP['name'], sg_id)) + + logger.debug("Adding ICMP rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'ingress', 'icmp'): + logger.error("Failed to create the security group rule...") + return False + + logger.debug("Adding SSH rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.create_secgroup_rule( + neutron_client, sg_id, 'ingress', 'tcp', '22', '22'): + logger.error("Failed to create the security group rule...") + return False + + if not openstack_utils.create_secgroup_rule( + neutron_client, sg_id, 'egress', 'tcp', '22', '22'): + logger.error("Failed to create the security group rule...") + return False + return sg_id + + +def push_results(start_time_ts, duration, test_status): + try: + logger.debug("Pushing result into DB...") + scenario = functest_utils.get_scenario(logger) + version = functest_utils.get_version(logger) + criteria = "failed" + if test_status == "OK": + criteria = "passed" + pod_name = functest_utils.get_pod_name(logger) + build_tag = functest_utils.get_build_tag(logger) + functest_utils.push_results_to_db(TEST_DB, + "functest", + "vPing", + logger, pod_name, version, scenario, + criteria, build_tag, + payload={'timestart': start_time_ts, + 'duration': duration, + 'status': test_status}) + except: + logger.error("Error pushing results into Database '%s'" + % sys.exc_info()[0]) + + +def main(): + + creds_nova = openstack_utils.get_credentials("nova") + nova_client = novaclient.Client('2', **creds_nova) + creds_neutron = openstack_utils.get_credentials("neutron") + neutron_client = neutronclient.Client(**creds_neutron) + creds_keystone = openstack_utils.get_credentials("keystone") + keystone_client = keystoneclient.Client(**creds_keystone) + glance_endpoint = keystone_client.service_catalog.url_for( + service_type='image', endpoint_type='publicURL') + glance_client = glanceclient.Client(1, glance_endpoint, + token=keystone_client.auth_token) + EXIT_CODE = -1 + + image_id = None + flavor = None + + # Check if the given image exists + image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) + if image_id != '': + logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME) + global image_exists + image_exists = True + else: + logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH)) + image_id = openstack_utils.create_glance_image(glance_client, + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) + if not image_id: + logger.error("Failed to create a Glance image...") + return(EXIT_CODE) + logger.debug("Image '%s' with ID=%s created successfully." + % (GLANCE_IMAGE_NAME, image_id)) + + network_dic = openstack_utils.create_network_full(logger, + neutron_client, + PRIVATE_NET_NAME, + PRIVATE_SUBNET_NAME, + ROUTER_NAME, + PRIVATE_SUBNET_CIDR) + if not network_dic: + logger.error( + "There has been a problem when creating the neutron network") + return(EXIT_CODE) + + network_id = network_dic["net_id"] + + sg_id = create_security_group(neutron_client) + + # Check if the given flavor exists + try: + flavor = nova_client.flavors.find(name=FLAVOR) + logger.info("Using existing Flavor '%s'..." % FLAVOR) + except: + logger.error("Flavor '%s' not found." % FLAVOR) + logger.info("Available flavors are: ") + pMsg(nova_client.flavor.list()) + return(EXIT_CODE) + + # Deleting instances if they exist + servers = nova_client.servers.list() + for server in servers: + if server.name == NAME_VM_1 or server.name == NAME_VM_2: + logger.info("Instance %s found. Deleting..." % server.name) + server.delete() + + # boot VM 1 + start_time_ts = time.time() + end_time_ts = start_time_ts + logger.info("vPing Start Time:'%s'" % ( + datetime.datetime.fromtimestamp(start_time_ts).strftime( + '%Y-%m-%d %H:%M:%S'))) + + logger.info("Creating instance '%s'..." % NAME_VM_1) + logger.debug( + "Configuration:\n name=%s \n flavor=%s \n image=%s \n " + "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id)) + vm1 = nova_client.servers.create( + name=NAME_VM_1, + flavor=flavor, + image=image_id, + nics=[{"net-id": network_id}] + ) + + # wait until VM status is active + if not waitVmActive(nova_client, vm1): + logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( + NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1))) + return (EXIT_CODE) + else: + logger.info("Instance '%s' is ACTIVE." % NAME_VM_1) + + # Retrieve IP of first VM + test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0] + logger.debug("Instance '%s' got private ip '%s'." % (NAME_VM_1, test_ip)) + + logger.info("Adding '%s' to security group '%s'..." + % (NAME_VM_1, SECGROUP_NAME)) + openstack_utils.add_secgroup_to_instance(nova_client, vm1.id, sg_id) + + # boot VM 2 + logger.info("Creating instance '%s'..." % NAME_VM_2) + logger.debug( + "Configuration:\n name=%s \n flavor=%s \n image=%s \n " + "network=%s \n" % (NAME_VM_2, flavor, image_id, network_id)) + vm2 = nova_client.servers.create( + name=NAME_VM_2, + flavor=flavor, + image=image_id, + nics=[{"net-id": network_id}] + ) + + if not waitVmActive(nova_client, vm2): + logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( + NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2))) + return (EXIT_CODE) + else: + logger.info("Instance '%s' is ACTIVE." % NAME_VM_2) + + logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_2, + SECGROUP_NAME)) + openstack_utils.add_secgroup_to_instance(nova_client, vm2.id, sg_id) + + logger.info("Creating floating IP for VM '%s'..." % NAME_VM_2) + floatip_dic = openstack_utils.create_floating_ip(neutron_client) + floatip = floatip_dic['fip_addr'] + # floatip_id = floatip_dic['fip_id'] + + if floatip is None: + logger.error("Cannot create floating IP.") + return (EXIT_CODE) + logger.info("Floating IP created: '%s'" % floatip) + + logger.info("Associating floating ip: '%s' to VM '%s' " + % (floatip, NAME_VM_2)) + if not openstack_utils.add_floating_ip(nova_client, vm2.id, floatip): + logger.error("Cannot associate floating IP to VM.") + return (EXIT_CODE) + + logger.info("Trying to establish SSH connection to %s..." % floatip) + username = 'cirros' + password = 'cubswin:)' + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + timeout = 50 + nolease = False + got_ip = False + discover_count = 0 + cidr_first_octet = PRIVATE_SUBNET_CIDR.split('.')[0] + while timeout > 0: + try: + ssh.connect(floatip, username=username, + password=password, timeout=2) + logger.debug("SSH connection established to %s." % floatip) + break + except: + logger.debug("Waiting for %s..." % floatip) + time.sleep(6) + timeout -= 1 + + console_log = vm2.get_console_output() + + # print each "Sending discover" captured on the console log + if (len(re.findall("Sending discover", console_log)) > + discover_count and not got_ip): + discover_count += 1 + logger.debug("Console-log '%s': Sending discover..." + % NAME_VM_2) + + # check if eth0 got an ip,the line looks like this: + # "inet addr:192.168.".... + # if the dhcp agent fails to assing ip, this line will not appear + if "inet addr:" + cidr_first_octet in console_log and not got_ip: + got_ip = True + logger.debug("The instance '%s' succeeded to get the IP " + "from the dhcp agent.") + + # if dhcp doesnt work,it shows "No lease, failing".The test will fail + if "No lease, failing" in console_log and not nolease and not got_ip: + nolease = True + logger.debug("Console-log '%s': No lease, failing..." + % NAME_VM_2) + logger.info("The instance failed to get an IP from the " + "DHCP agent. The test will probably timeout...") + + if timeout == 0: # 300 sec timeout (5 min) + logger.error("Cannot establish connection to IP '%s'. Aborting" + % floatip) + return (EXIT_CODE) + + scp = SCPClient(ssh.get_transport()) + + ping_script = REPO_PATH + "testcases/vPing/ping.sh" + try: + scp.put(ping_script, "~/") + except: + logger.error("Cannot SCP the file '%s' to VM '%s'" + % (ping_script, floatip)) + + cmd = 'chmod 755 ~/ping.sh' + (stdin, stdout, stderr) = ssh.exec_command(cmd) + for line in stdout.readlines(): + print line + + logger.info("Waiting for ping...") + sec = 0 + duration = 0 + + cmd = '~/ping.sh ' + test_ip + flag = False + while True: + time.sleep(1) + (stdin, stdout, stderr) = ssh.exec_command(cmd) + output = stdout.readlines() + + for line in output: + if "vPing OK" in line: + logger.info("vPing detected!") + + # we consider start time at VM1 booting + end_time_ts = time.time() + duration = round(end_time_ts - start_time_ts, 1) + logger.info("vPing duration:'%s' s." % duration) + EXIT_CODE = 0 + flag = True + break + elif sec == PING_TIMEOUT: + logger.info("Timeout reached.") + flag = True + break + if flag: + break + logger.debug("Pinging %s. Waiting for response..." % test_ip) + sec += 1 + + test_status = "NOK" + if EXIT_CODE == 0: + logger.info("vPing OK") + test_status = "OK" + else: + duration = 0 + logger.error("vPing FAILED") + + if args.report: + push_results(start_time_ts, duration, test_status) + + exit(EXIT_CODE) + +if __name__ == '__main__': + main() diff --git a/testcases/OpenStack/vPing/vPing_userdata.py b/testcases/OpenStack/vPing/vPing_userdata.py new file mode 100644 index 000000000..2b2963144 --- /dev/null +++ b/testcases/OpenStack/vPing/vPing_userdata.py @@ -0,0 +1,387 @@ +#!/usr/bin/python +# +# Copyright (c) 2015 All rights reserved +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# 0.1: This script boots the VM1 and allocates IP address from Nova +# Later, the VM2 boots then execute cloud-init to ping VM1. +# After successful ping, both the VMs are deleted. +# 0.2: measure test duration and publish results under json format +# +# + +import argparse +import datetime +import os +import pprint +import time +import yaml + +from novaclient import client as novaclient +from neutronclient.v2_0 import client as neutronclient +from keystoneclient.v2_0 import client as keystoneclient +from glanceclient import client as glanceclient + +import functest.utils.functest_logger as ft_logger +import functest.utils.functest_utils as functest_utils +import functest.utils.openstack_utils as openstack_utils + +pp = pprint.PrettyPrinter(indent=4) + +parser = argparse.ArgumentParser() +image_exists = False + +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-r", "--report", + help="Create json result file", + action="store_true") + +args = parser.parse_args() + +""" logging configuration """ +logger = ft_logger.Logger("vping_userdata").getLogger() + +REPO_PATH = os.environ['repos_dir'] + '/functest/' +if not os.path.exists(REPO_PATH): + logger.error("Functest repository directory not found '%s'" % REPO_PATH) + exit(-1) + +with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) +f.close() + +HOME = os.environ['HOME'] + "/" +# vPing parameters +VM_BOOT_TIMEOUT = 180 +VM_DELETE_TIMEOUT = 100 +PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout") +TEST_DB = functest_yaml.get("results").get("test_db_url") +NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1") +NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2") +GLANCE_IMAGE_NAME = functest_yaml.get("vping").get("image_name") +GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get( + "openstack").get("image_file_name") +GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get( + "openstack").get("image_disk_format") +GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get( + "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME + + +FLAVOR = functest_yaml.get("vping").get("vm_flavor") + +# NEUTRON Private Network parameters + +PRIVATE_NET_NAME = functest_yaml.get("vping").get( + "vping_private_net_name") +PRIVATE_SUBNET_NAME = functest_yaml.get("vping").get( + "vping_private_subnet_name") +PRIVATE_SUBNET_CIDR = functest_yaml.get("vping").get( + "vping_private_subnet_cidr") +ROUTER_NAME = functest_yaml.get("vping").get("vping_router_name") + +SECGROUP_NAME = functest_yaml.get("vping").get("vping_sg_name") +SECGROUP_DESCR = functest_yaml.get("vping").get("vping_sg_descr") + + +def pMsg(value): + + """pretty printing""" + pp.pprint(value) + + +def waitVmActive(nova, vm): + + # sleep and wait for VM status change + sleep_time = 3 + count = VM_BOOT_TIMEOUT / sleep_time + while True: + status = openstack_utils.get_instance_status(nova, vm) + logger.debug("Status: %s" % status) + if status == "ACTIVE": + return True + if status == "ERROR" or status == "error": + return False + if count == 0: + logger.debug("Booting a VM timed out...") + return False + count -= 1 + time.sleep(sleep_time) + return False + + +def waitVmDeleted(nova, vm): + + # sleep and wait for VM status change + sleep_time = 3 + count = VM_DELETE_TIMEOUT / sleep_time + while True: + status = openstack_utils.get_instance_status(nova, vm) + if not status: + return True + elif count == 0: + logger.debug("Timeout") + return False + else: + # return False + count -= 1 + time.sleep(sleep_time) + return False + + +def create_security_group(neutron_client): + sg_id = openstack_utils.get_security_group_id(neutron_client, + SECGROUP_NAME) + if sg_id != '': + logger.info("Using existing security group '%s'..." % SECGROUP_NAME) + else: + logger.info("Creating security group '%s'..." % SECGROUP_NAME) + SECGROUP = openstack_utils.create_security_group(neutron_client, + SECGROUP_NAME, + SECGROUP_DESCR) + if not SECGROUP: + logger.error("Failed to create the security group...") + return False + + sg_id = SECGROUP['id'] + + logger.debug("Security group '%s' with ID=%s created successfully." + % (SECGROUP['name'], sg_id)) + + logger.debug("Adding ICMP rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'ingress', 'icmp'): + logger.error("Failed to create the security group rule...") + return False + + logger.debug("Adding SSH rules in security group '%s'..." + % SECGROUP_NAME) + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'ingress', 'tcp', + '22', '22'): + logger.error("Failed to create the security group rule...") + return False + + if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, + 'egress', 'tcp', + '22', '22'): + logger.error("Failed to create the security group rule...") + return False + return sg_id + + +def push_results(start_time_ts, duration, test_status): + try: + logger.debug("Pushing result into DB...") + scenario = functest_utils.get_scenario(logger) + version = functest_utils.get_version(logger) + criteria = "failed" + if test_status == "OK": + criteria = "passed" + pod_name = functest_utils.get_pod_name(logger) + build_tag = functest_utils.get_build_tag(logger) + functest_utils.push_results_to_db(TEST_DB, + "functest", + "vPing_userdata", + logger, pod_name, version, scenario, + criteria, build_tag, + payload={'timestart': start_time_ts, + 'duration': duration, + 'status': test_status}) + except: + logger.error("Error pushing results into Database '%s'" + % sys.exc_info()[0]) + + +def main(): + + creds_nova = openstack_utils.get_credentials("nova") + nova_client = novaclient.Client('2', **creds_nova) + creds_neutron = openstack_utils.get_credentials("neutron") + neutron_client = neutronclient.Client(**creds_neutron) + creds_keystone = openstack_utils.get_credentials("keystone") + keystone_client = keystoneclient.Client(**creds_keystone) + glance_endpoint = keystone_client.service_catalog.url_for( + service_type='image', endpoint_type='publicURL') + glance_client = glanceclient.Client(1, glance_endpoint, + token=keystone_client.auth_token) + EXIT_CODE = -1 + + image_id = None + flavor = None + + # Check if the given image exists + image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) + if image_id != '': + logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME) + global image_exists + image_exists = True + else: + logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH)) + image_id = openstack_utils.create_glance_image(glance_client, + GLANCE_IMAGE_NAME, + GLANCE_IMAGE_PATH) + if not image_id: + logger.error("Failed to create a Glance image...") + return(EXIT_CODE) + logger.debug("Image '%s' with ID=%s created successfully." + % (GLANCE_IMAGE_NAME, image_id)) + + network_dic = openstack_utils.create_network_full(logger, + neutron_client, + PRIVATE_NET_NAME, + PRIVATE_SUBNET_NAME, + ROUTER_NAME, + PRIVATE_SUBNET_CIDR) + if not network_dic: + logger.error( + "There has been a problem when creating the neutron network") + return(EXIT_CODE) + network_id = network_dic["net_id"] + + create_security_group(neutron_client) + + # Check if the given flavor exists + try: + flavor = nova_client.flavors.find(name=FLAVOR) + logger.info("Flavor found '%s'" % FLAVOR) + except: + logger.error("Flavor '%s' not found." % FLAVOR) + logger.info("Available flavors are: ") + pMsg(nova_client.flavor.list()) + exit(-1) + + # Deleting instances if they exist + servers = nova_client.servers.list() + for server in servers: + if server.name == NAME_VM_1 or server.name == NAME_VM_2: + logger.info("Instance %s found. Deleting..." % server.name) + server.delete() + + # boot VM 1 + # basic boot + # tune (e.g. flavor, images, network) to your specific + # openstack configuration here + # we consider start time at VM1 booting + start_time_ts = time.time() + end_time_ts = start_time_ts + logger.info("vPing Start Time:'%s'" % ( + datetime.datetime.fromtimestamp(start_time_ts).strftime( + '%Y-%m-%d %H:%M:%S'))) + + # create VM + logger.info("Creating instance '%s'..." % NAME_VM_1) + logger.debug( + "Configuration:\n name=%s \n flavor=%s \n image=%s \n " + "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id)) + vm1 = nova_client.servers.create( + name=NAME_VM_1, + flavor=flavor, + image=image_id, + config_drive=True, + nics=[{"net-id": network_id}] + ) + + # wait until VM status is active + if not waitVmActive(nova_client, vm1): + + logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( + NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1))) + return (EXIT_CODE) + else: + logger.info("Instance '%s' is ACTIVE." % NAME_VM_1) + + # Retrieve IP of first VM + test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0] + logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip)) + + # boot VM 2 + # we will boot then execute a ping script with cloud-init + # the long chain corresponds to the ping procedure converted with base 64 + # tune (e.g. flavor, images, network) to your specific openstack + # configuration here + u = ("#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n " + "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n " + "break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip) + + # create VM + logger.info("Creating instance '%s'..." % NAME_VM_2) + logger.debug( + "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s " + "\n userdata= \n%s" % ( + NAME_VM_2, flavor, image_id, network_id, u)) + vm2 = nova_client.servers.create( + name=NAME_VM_2, + flavor=flavor, + image=image_id, + nics=[{"net-id": network_id}], + config_drive=True, + userdata=u + ) + + if not waitVmActive(nova_client, vm2): + logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( + NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2))) + return (EXIT_CODE) + else: + logger.info("Instance '%s' is ACTIVE." % NAME_VM_2) + + logger.info("Waiting for ping...") + sec = 0 + metadata_tries = 0 + console_log = vm2.get_console_output() + duration = 0 + + while True: + time.sleep(1) + console_log = vm2.get_console_output() + # print "--"+console_log + # report if the test is failed + if "vPing OK" in console_log: + logger.info("vPing detected!") + + # we consider start time at VM1 booting + end_time_ts = time.time() + duration = round(end_time_ts - start_time_ts, 1) + logger.info("vPing duration:'%s'" % duration) + EXIT_CODE = 0 + break + elif ("failed to read iid from metadata" in console_log or + metadata_tries > 5): + EXIT_CODE = -2 + break + elif sec == PING_TIMEOUT: + logger.info("Timeout reached.") + break + elif sec % 10 == 0: + if "request failed" in console_log: + logger.debug("It seems userdata is not supported in " + "nova boot. Waiting a bit...") + metadata_tries += 1 + else: + logger.debug("Pinging %s. Waiting for response..." % test_ip) + sec += 1 + + test_status = "NOK" + if EXIT_CODE == 0: + logger.info("vPing OK") + test_status = "OK" + elif EXIT_CODE == -2: + duration = 0 + logger.info("Userdata is not supported in nova boot. Aborting test...") + else: + duration = 0 + logger.error("vPing FAILED") + + if args.report: + push_results(start_time_ts, duration, test_status) + + exit(EXIT_CODE) + +if __name__ == '__main__': + main() diff --git a/testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt b/testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt deleted file mode 100644 index bb1d172df..000000000 --- a/testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt +++ /dev/null @@ -1,122 +0,0 @@ -# Set of DefCore tempest test cases (see http://www.openstack.org/brand/interop) -# This approved version (2016.01) is valid for Juno, Kilo, and Liberty releases of OpenStack -# The list is stored at http://git.openstack.org/cgit/openstack/defcore/plain/2016.01/2016.01.required.txt -tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3] -tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314] -tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_changes_since[id-18bac3ae-da27-436c-92a9-b22474d13aab] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_name[id-33163b73-79f5-4d07-a7ea-9213bcc468ff] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_id[id-9f238683-c763-45aa-b848-232ec3ce3105] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_ref[id-05a377b8-28cf-4734-a1e6-2ab5c38bf606] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_status[id-a3f5b513-aeb3-42a9-b18e-f091ef73254d] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_type[id-e3356918-4d3e-4756-81d5-abc4524ba29f] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_limit_results[id-3a484ca9-67ba-451e-b494-7fcf28d32d62] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_changes_since[id-7d439e18-ac2e-4827-b049-7e18004712c4] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_name[id-644ea267-9bd9-4f3b-af9f-dffa02396a17] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_server_ref[id-8c78f822-203b-4bf6-8bba-56ebd551cf84] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_status[id-9b0ea018-6185-4f71-948a-a123a107988e] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_type[id-888c0cc0-7223-43c5-9db0-b125fd0a393b] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_limit_results[id-ba2fa9a9-b672-47cc-b354-3b4c0600e2cb] -tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image[id-490d0898-e12a-463f-aef0-c50156b9f789] -tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images[id-fd51b7f4-d4a3-4331-9885-866658112a6f] -tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail[id-9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke] -tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c] -tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip[id-43a1242e-7b31-48d1-88f2-3f72aa9f2077] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex[id-a905e287-c35e-42f2-b132-d02b09f3654a] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f,negative] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm[id-1499262a-9328-4eda-9068-db1ac57498d2] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert[id-c03aab19-adb1-44f5-917d-c419577e9e68] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_non_existent_flavor[id-ced1a1d7-2ab6-45c9-b90f-b27d87b30efd,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_null_flavor[id-45436a7d-a388-4a35-a9d8-3adc5d0d940b,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_delete_a_server_of_another_tenant[id-5c75009d-3eea-423e-bea3-61b09fd25f9c,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_update_server_of_another_tenant[id-543d84c1-dd2e-4c6d-8cb2-b9da0efaa384,negative] -tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a] -tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107] -tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff] -tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513] -tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list[id-bc2dd1a0-15af-48e5-9990-f2e75a48325d] -tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_details[id-bad0567a-5a4f-420b-851e-780b55bb867c] -tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-f01904f2-e975-4915-98ce-cb5fa27bde4f,negative] -tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-62bab09a-4c03-4617-8cca-8572bc94af9b,negative] -tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9] -tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee] -tempest.api.image.v1.test_images.ListImagesTest.test_index_no_params[id-246178ab-3b33-4212-9a4b-a7fe8261794d] -tempest.api.object_storage.test_object_expiry.ObjectExpiryTest.test_get_object_after_expiry_time[id-fb024a42-37f3-4ba5-9684-4f40a7910b41] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way[id-06f90388-2d0e-40aa-934c-e9a8833e958a] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers[id-aa467252-44f3-472a-b5ae-5b57c3c9c147] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container[id-1a9ab572-1b66-4981-8c21-416e2a5e6011] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself[id-2248abba-415d-410b-9c30-22dff9cd6e67] -tempest.api.object_storage.test_object_services.ObjectTest.test_create_object[id-5b4ce26f-3545-46c9-a2ba-5754358a4c62,smoke] -tempest.api.object_storage.test_object_services.ObjectTest.test_delete_object[id-17738d45-03bd-4d45-9e0b-7b2f58f98687] -tempest.api.object_storage.test_object_services.ObjectTest.test_get_object[id-02610ba7-86b7-4272-9ed8-aa8d417cb3cd,smoke] -tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_if_different[id-50d01f12-526f-4360-9ac2-75dd508d7b68] -tempest.api.object_storage.test_object_services.ObjectTest.test_object_upload_in_segments[id-e3e6a64a-9f50-4955-b987-6ce6767c97fb] -tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_get_object_using_temp_url[id-f91c96d4-1230-4bba-8eb9-84476d18d991] -tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_put_object_using_temp_url[id-9b08dade-3571-4152-8a4f-a4f2a873a735] -tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container[id-a151e158-dcbf-4a1f-a1e7-46cd65895a6f] diff --git a/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh b/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh deleted file mode 100755 index 611c100c5..000000000 --- a/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh +++ /dev/null @@ -1,208 +0,0 @@ -# -# OpenStack Health Check -# This script is meant for really basic API operations on OpenStack -# Services tested: Keystone, Glance, Cinder, Neutron, Nova -# -# -# Author: -# jose.lausuch@ericsson.com -# -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# - -set -e - -#Redirect all the output (stdout) to a log file and show only possible errors. -LOG_FILE=/home/opnfv/functest/results/healthcheck.log -echo "">$LOG_FILE -exec 1<>$LOG_FILE - -info () { - echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - INFO - " "$*" | tee -a $LOG_FILE 1>&2 -} - -debug () { - if [[ "${CI_DEBUG,,}" == "true" ]]; then - echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - DEBUG - " "$*" | tee -a $LOG_FILE 1>&2 - fi -} - -error () { - echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - ERROR - " "$*" | tee -a $LOG_FILE 1>&2 - exit 1 -} - -if [ -z $OS_AUTH_URL ]; then - echo "Source credentials first." - exit 1 -fi - - -echo "Using following credentials:" -env | grep OS - -## Variables: -project_1="opnfv-tenant1" -project_2="opnfv-tenant2" -user_1="opnfv_user1" -user_2="opnfv_user2" -user_3="opnfv_user3" -user_4="opnfv_user4" -user_5="opnfv_user5" -user_6="opnfv_user6" -image_1="opnfv-image1" -image_2="opnfv-image2" -volume_1="opnfv-volume1" -volume_2="opnfv-volume2" -net_1="opnfv-network1" -net_2="opnfv-network2" -subnet_1="opnfv-subnet1" -subnet_2="opnfv-subnet2" -port_1="opnfv-port1" -port_2="opnfv-port2" -router_1="opnfv-router1" -router_2="opnfv-router2" -instance_1="opnfv-instance1" -instance_2="opnfv-instance2" -instance_3="opnfv-instance3" -instance_4="opnfv-instance4" - - - -function wait_for_ip() { - # $1 is the instance name - # $2 is the first octet of the subnet ip - timeout=60 - while [[ ${timeout} > 0 ]]; do - if [[ $(nova console-log $1|grep "No lease, failing") ]]; then - error "The instance $1 couldn't get an IP from the DHCP agent." | tee -a $LOG_FILE 1>&2 - exit 1 - elif [[ $(nova console-log $1|grep "^Lease"|grep "obtained") ]]; then - debug "The instance $1 got an IP successfully from the DHCP agent." | tee -a $LOG_FILE 1>&2 - break - fi - let timeout=timeout-1 - sleep 1 - done -} - - -################################# -info "Testing Keystone API..." | tee -a $LOG_FILE 1>&2 -################################# -openstack project create ${project_1} -debug "project '${project_1}' created." -openstack project create ${project_2} -debug "project '${project_2}' created." -openstack user create ${user_1} --project ${project_1} -debug "user '${user_1}' created in project ${project_1}." -openstack user create ${user_2} --project ${project_1} -debug "user '${user_2}' created in project ${project_1}." -openstack user create ${user_3} --project ${project_1} -debug "user '${user_3}' created in project ${project_1}." -openstack user create ${user_4} --project ${project_2} -debug "user '${user_4}' created in project ${project_2}." -openstack user create ${user_5} --project ${project_2} -debug "user '${user_5}' created in project ${project_2}." -openstack user create ${user_6} --project ${project_2} -debug "user '${user_6}' created in project ${project_2}." -info "...Keystone OK!" - -################################# -info "Testing Glance API..." -################################# -image=/home/opnfv/functest/data/cirros-0.3.4-x86_64-disk.img -glance image-create --name ${image_1} --disk-format qcow2 --container-format bare < ${image} -debug "image '${image_1}' created." -glance image-create --name ${image_2} --disk-format qcow2 --container-format bare < ${image} -debug "image '${image_2}' created." -info "... Glance OK!" - -################################# -info "Testing Cinder API..." -################################# -cinder create --display_name ${volume_1} 1 -debug "volume '${volume_1}' created." -cinder create --display_name ${volume_2} 10 -debug "volume '${volume_2}' created." -info "...Cinder OK!" - -################################# -info "Testing Neutron API..." -################################# - -network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}')) -for id in ${network_ids[@]}; do - [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id} -done -if [[ "${ext_net_id}" == "" ]]; then - error "No external network found. Exiting Health Check..." - exit 1 -else - info "External network found. ${ext_net_id}" -fi - -info "1. Create Networks..." -neutron net-create ${net_1} -debug "net '${net_1}' created." -neutron net-create ${net_2} -debug "net '${net_2}' created." -net1_id=$(neutron net-list | grep ${net_1} | awk '{print $2}') -net2_id=$(neutron net-list | grep ${net_2} | awk '{print $2}') - -info "2. Create subnets..." -neutron subnet-create --name ${subnet_1} --allocation-pool start=10.6.0.2,end=10.6.0.253 --gateway 10.6.0.254 ${net_1} 10.6.0.0/24 -debug "subnet '${subnet_1}' created." -neutron subnet-create --name ${subnet_2} --allocation-pool start=10.7.0.2,end=10.7.0.253 --gateway 10.7.0.254 ${net_2} 10.7.0.0/24 -debug "subnet '${subnet_2}' created." - -info "4. Create Routers..." -neutron router-create ${router_1} -debug "router '${router_1}' created." -neutron router-create ${router_2} -debug "router '${router_2}' created." - -neutron router-gateway-set ${router_1} ${ext_net_id} -debug "router '${router_1}' gateway set to ${ext_net_id}." -neutron router-gateway-set ${router_2} ${ext_net_id} -debug "router '${router_2}' gateway set to ${ext_net_id}." - -neutron router-interface-add ${router_1} ${subnet_1} -debug "router '${router_1}' interface added ${subnet_1}." -neutron router-interface-add ${router_2} ${subnet_2} -debug "router '${router_2}' interface added ${subnet_2}." - -info "...Neutron OK!" - -################################# -info "Testing Nova API..." -################################# - -nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_1} -debug "nova instance '${instance_1}' booted on ${net_1}." -nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_2} -debug "nova instance '${instance_2}' booted on ${net_1}." -nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_3} -debug "nova instance '${instance_3}' booted on ${net_2}." -nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_4} -debug "nova instance '${instance_4}' booted on ${net_2}." - -vm1_id=$(nova list | grep ${instance_1} | awk '{print $2}') -vm2_id=$(nova list | grep ${instance_2} | awk '{print $2}') -vm3_id=$(nova list | grep ${instance_3} | awk '{print $2}') -vm4_id=$(nova list | grep ${instance_4} | awk '{print $2}') -info "...Nova OK!" - -info "Checking if instances get an IP from DHCP..." -wait_for_ip ${instance_1} "10.6" -wait_for_ip ${instance_2} "10.6" -wait_for_ip ${instance_3} "10.7" -wait_for_ip ${instance_4} "10.7" -info "...DHCP OK!" - -info "Health check passed!" -exit 0 diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py deleted file mode 100755 index 4dc1e16d5..000000000 --- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py +++ /dev/null @@ -1,560 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2015 Orange -# guyrodrigue.koffi@orange.com -# morgan.richomme@orange.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# 0.1 (05/2015) initial commit -# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite -# 0.3 (19/10/2015) remove Tempest from run_rally -# and push result into test DB -# -import argparse -import iniparse -import json -import os -import re -import requests -import subprocess -import time -import yaml - -from novaclient import client as novaclient -from glanceclient import client as glanceclient -from keystoneclient.v2_0 import client as keystoneclient -from neutronclient.v2_0 import client as neutronclient -from cinderclient import client as cinderclient - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as functest_utils -import functest.utils.openstack_utils as openstack_utils - -""" tests configuration """ -tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', - 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all'] -parser = argparse.ArgumentParser() -parser.add_argument("test_name", - help="Module name to be tested. " - "Possible values are : " - "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | " - "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | " - "{d[10]} ] " - "The 'all' value " - "performs all possible test scenarios" - .format(d=tests)) - -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") -parser.add_argument("-r", "--report", - help="Create json result file", - action="store_true") -parser.add_argument("-s", "--smoke", - help="Smoke test mode", - action="store_true") -parser.add_argument("-v", "--verbose", - help="Print verbose info about the progress", - action="store_true") -parser.add_argument("-n", "--noclean", - help="Don't clean the created resources for this test.", - action="store_true") -parser.add_argument("-z", "--sanity", - help="Sanity test mode, execute only a subset of tests", - action="store_true") - -args = parser.parse_args() - -client_dict = {} -network_dict = {} - -if args.verbose: - RALLY_STDERR = subprocess.STDOUT -else: - RALLY_STDERR = open(os.devnull, 'w') - -""" logging configuration """ -logger = ft_logger.Logger("run_rally").getLogger() - -REPO_PATH = os.environ['repos_dir'] + '/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) - - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() - -HOME = os.environ['HOME'] + "/" -SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get( - "directories").get("dir_rally_scn") -TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates" -SUPPORT_DIR = SCENARIOS_DIR + "scenario/support" - -FLAVOR_NAME = "m1.tiny" -USERS_AMOUNT = 2 -TENANTS_AMOUNT = 3 -ITERATIONS_AMOUNT = 10 -CONCURRENCY = 4 - -RESULTS_DIR = functest_yaml.get("general").get("directories").get( - "dir_rally_res") -TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get( - "dir_results") + '/tempest/tempest.conf' -TEST_DB = functest_yaml.get("results").get("test_db_url") - -PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name") -PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name") -PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr") -ROUTER_NAME = functest_yaml.get("rally").get("router_name") - -GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get( - "image_name") -GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get( - "image_file_name") -GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get( - "image_disk_format") -GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get( - "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME - -CINDER_VOLUME_TYPE_NAME = "volume_test" - - -SUMMARY = [] - - -def push_results_to_db(case, payload, criteria): - - url = TEST_DB + "/results" - installer = functest_utils.get_installer_type(logger) - scenario = functest_utils.get_scenario(logger) - version = functest_utils.get_version(logger) - pod_name = functest_utils.get_pod_name(logger) - - # evalutate success criteria - - params = {"project_name": "functest", "case_name": case, - "pod_name": pod_name, "installer": installer, - "version": version, "scenario": scenario, - "criteria": criteria, "details": payload} - - headers = {'Content-Type': 'application/json'} - r = requests.post(url, data=json.dumps(params), headers=headers) - logger.debug(r) - - -def get_task_id(cmd_raw): - """ - get task id from command rally result - :param cmd_raw: - :return: task_id as string - """ - taskid_re = re.compile('^Task +(.*): started$') - for line in cmd_raw.splitlines(True): - line = line.strip() - match = taskid_re.match(line) - if match: - return match.group(1) - return None - - -def task_succeed(json_raw): - """ - Parse JSON from rally JSON results - :param json_raw: - :return: Bool - """ - rally_report = json.loads(json_raw) - for report in rally_report: - if report is None or report.get('result') is None: - return False - - for result in report.get('result'): - if result is None or len(result.get('error')) > 0: - return False - - return True - - -def live_migration_supported(): - config = iniparse.ConfigParser() - if (config.read(TEMPEST_CONF_FILE) and - config.has_section('compute-feature-enabled') and - config.has_option('compute-feature-enabled', 'live_migration')): - return config.getboolean('compute-feature-enabled', 'live_migration') - - return False - - -def build_task_args(test_file_name): - task_args = {'service_list': [test_file_name]} - task_args['image_name'] = GLANCE_IMAGE_NAME - task_args['flavor_name'] = FLAVOR_NAME - task_args['glance_image_location'] = GLANCE_IMAGE_PATH - task_args['tmpl_dir'] = TEMPLATE_DIR - task_args['sup_dir'] = SUPPORT_DIR - task_args['users_amount'] = USERS_AMOUNT - task_args['tenants_amount'] = TENANTS_AMOUNT - task_args['iterations'] = ITERATIONS_AMOUNT - task_args['concurrency'] = CONCURRENCY - - if args.sanity: - task_args['full_mode'] = False - task_args['smoke'] = True - else: - task_args['full_mode'] = True - task_args['smoke'] = args.smoke - - ext_net = openstack_utils.get_external_net(client_dict['neutron']) - if ext_net: - task_args['floating_network'] = str(ext_net) - else: - task_args['floating_network'] = '' - - net_id = network_dict['net_id'] - task_args['netid'] = str(net_id) - task_args['live_migration'] = live_migration_supported() - - return task_args - - -def get_output(proc, test_name): - global SUMMARY - result = "" - nb_tests = 0 - overall_duration = 0.0 - success = 0.0 - nb_totals = 0 - - while proc.poll() is None: - line = proc.stdout.readline() - if args.verbose: - result += line - else: - if ("Load duration" in line or - "started" in line or - "finished" in line or - " Preparing" in line or - "+-" in line or - "|" in line): - result += line - elif "test scenario" in line: - result += "\n" + line - elif "Full duration" in line: - result += line + "\n\n" - - # parse output for summary report - if ("| " in line and - "| action" not in line and - "| Starting" not in line and - "| Completed" not in line and - "| ITER" not in line and - "| " not in line and - "| total" not in line): - nb_tests += 1 - elif "| total" in line: - percentage = ((line.split('|')[8]).strip(' ')).strip('%') - try: - success += float(percentage) - except ValueError: - logger.info('Percentage error: %s, %s' % (percentage, line)) - nb_totals += 1 - elif "Full duration" in line: - duration = line.split(': ')[1] - try: - overall_duration += float(duration) - except ValueError: - logger.info('Duration error: %s, %s' % (duration, line)) - - overall_duration = "{:10.2f}".format(overall_duration) - if nb_totals == 0: - success_avg = 0 - else: - success_avg = "{:0.2f}".format(success / nb_totals) - - scenario_summary = {'test_name': test_name, - 'overall_duration': overall_duration, - 'nb_tests': nb_tests, - 'success': success_avg} - SUMMARY.append(scenario_summary) - - logger.info("\n" + result) - - return result - - -def get_cmd_output(proc): - result = "" - - while proc.poll() is None: - line = proc.stdout.readline() - result += line - - return result - - -def run_task(test_name): - # - # the "main" function of the script who launch rally for a task - # :param test_name: name for the rally test - # :return: void - # - global SUMMARY - logger.info('Starting test scenario "{}" ...'.format(test_name)) - - task_file = '{}task.yaml'.format(SCENARIOS_DIR) - if not os.path.exists(task_file): - logger.error("Task file '%s' does not exist." % task_file) - exit(-1) - - test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", - test_name) - if not os.path.exists(test_file_name): - logger.error("The scenario '%s' does not exist." % test_file_name) - exit(-1) - - logger.debug('Scenario fetched from : {}'.format(test_file_name)) - - cmd_line = ("rally task start --abort-on-sla-failure " + - "--task {} ".format(task_file) + - "--task-args \"{}\" ".format(build_task_args(test_name))) - logger.debug('running command line : {}'.format(cmd_line)) - - p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, - stderr=RALLY_STDERR, shell=True) - output = get_output(p, test_name) - task_id = get_task_id(output) - logger.debug('task_id : {}'.format(task_id)) - - if task_id is None: - logger.error('Failed to retrieve task_id, validating task...') - cmd_line = ("rally task validate " + - "--task {} ".format(task_file) + - "--task-args \"{}\" ".format(build_task_args(test_name))) - logger.debug('running command line : {}'.format(cmd_line)) - p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=True) - output = get_cmd_output(p) - logger.error("Task validation result:" + "\n" + output) - return - - # check for result directory and create it otherwise - if not os.path.exists(RESULTS_DIR): - logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR)) - os.makedirs(RESULTS_DIR) - - # write html report file - report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name) - cmd_line = "rally task report {} --out {}".format(task_id, - report_file_name) - - logger.debug('running command line : {}'.format(cmd_line)) - os.popen(cmd_line) - - # get and save rally operation JSON result - cmd_line = "rally task results %s" % task_id - logger.debug('running command line : {}'.format(cmd_line)) - cmd = os.popen(cmd_line) - json_results = cmd.read() - with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f: - logger.debug('saving json file') - f.write(json_results) - - with open('{}opnfv-{}.json' - .format(RESULTS_DIR, test_name)) as json_file: - json_data = json.load(json_file) - - """ parse JSON operation result """ - status = "failed" - if task_succeed(json_results): - logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n") - status = "passed" - else: - logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n") - - # Push results in payload of testcase - if args.report: - logger.debug("Push result into DB") - push_results_to_db("Rally_details", json_data, status) - - -def main(): - global SUMMARY - global network_dict - # configure script - if not (args.test_name in tests): - logger.error('argument not valid') - exit(-1) - - SUMMARY = [] - creds_nova = openstack_utils.get_credentials("nova") - nova_client = novaclient.Client('2', **creds_nova) - creds_neutron = openstack_utils.get_credentials("neutron") - neutron_client = neutronclient.Client(**creds_neutron) - creds_keystone = openstack_utils.get_credentials("keystone") - keystone_client = keystoneclient.Client(**creds_keystone) - glance_endpoint = keystone_client.service_catalog.url_for( - service_type='image', endpoint_type='publicURL') - glance_client = glanceclient.Client(1, glance_endpoint, - token=keystone_client.auth_token) - creds_cinder = openstack_utils.get_credentials("cinder") - cinder_client = cinderclient.Client('2', creds_cinder['username'], - creds_cinder['api_key'], - creds_cinder['project_id'], - creds_cinder['auth_url'], - service_type="volume") - - client_dict['neutron'] = neutron_client - - volume_types = openstack_utils.list_volume_types(cinder_client, - private=False) - if not volume_types: - volume_type = openstack_utils.create_volume_type( - cinder_client, CINDER_VOLUME_TYPE_NAME) - if not volume_type: - logger.error("Failed to create volume type...") - exit(-1) - else: - logger.debug("Volume type '%s' created succesfully..." - % CINDER_VOLUME_TYPE_NAME) - else: - logger.debug("Using existing volume type(s)...") - - image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) - image_exists = False - - if image_id == '': - logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH)) - image_id = openstack_utils.create_glance_image(glance_client, - GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH) - if not image_id: - logger.error("Failed to create the Glance image...") - exit(-1) - else: - logger.debug("Image '%s' with ID '%s' created succesfully ." - % (GLANCE_IMAGE_NAME, image_id)) - else: - logger.debug("Using existing image '%s' with ID '%s'..." - % (GLANCE_IMAGE_NAME, image_id)) - image_exists = True - - logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME) - network_dict = openstack_utils.create_network_full(logger, - client_dict['neutron'], - PRIVATE_NET_NAME, - PRIVATE_SUBNET_NAME, - ROUTER_NAME, - PRIVATE_SUBNET_CIDR) - if not network_dict: - logger.error("Failed to create network...") - exit(-1) - else: - if not openstack_utils.update_neutron_net(client_dict['neutron'], - network_dict['net_id'], - shared=True): - logger.error("Failed to update network...") - exit(-1) - else: - logger.debug("Network '%s' available..." % PRIVATE_NET_NAME) - - if args.test_name == "all": - for test_name in tests: - if not (test_name == 'all' or - test_name == 'vm'): - run_task(test_name) - else: - logger.debug("Test name: " + args.test_name) - run_task(args.test_name) - - report = ("\n" - " " - "\n" - " Rally Summary Report\n" - "\n" - "+===================+============+===============+===========+" - "\n" - "| Module | Duration | nb. Test Run | Success |" - "\n" - "+===================+============+===============+===========+" - "\n") - payload = [] - - # for each scenario we draw a row for the table - total_duration = 0.0 - total_nb_tests = 0 - total_success = 0.0 - for s in SUMMARY: - name = "{0:<17}".format(s['test_name']) - duration = float(s['overall_duration']) - total_duration += duration - duration = time.strftime("%M:%S", time.gmtime(duration)) - duration = "{0:<10}".format(duration) - nb_tests = "{0:<13}".format(s['nb_tests']) - total_nb_tests += int(s['nb_tests']) - success = "{0:<10}".format(str(s['success']) + '%') - total_success += float(s['success']) - report += ("" + - "| " + name + " | " + duration + " | " + - nb_tests + " | " + success + "|\n" + - "+-------------------+------------" - "+---------------+-----------+\n") - payload.append({'module': name, - 'details': {'duration': s['overall_duration'], - 'nb tests': s['nb_tests'], - 'success': s['success']}}) - - total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration)) - total_duration_str2 = "{0:<10}".format(total_duration_str) - total_nb_tests_str = "{0:<13}".format(total_nb_tests) - total_success = "{:0.2f}".format(total_success / len(SUMMARY)) - total_success_str = "{0:<10}".format(str(total_success) + '%') - report += "+===================+============+===============+===========+" - report += "\n" - report += ("| TOTAL: | " + total_duration_str2 + " | " + - total_nb_tests_str + " | " + total_success_str + "|\n") - report += "+===================+============+===============+===========+" - report += "\n" - - logger.info("\n" + report) - payload.append({'summary': {'duration': total_duration, - 'nb tests': total_nb_tests, - 'nb success': total_success}}) - - # Generate json results for DB - # json_results = {"timestart": time_start, "duration": total_duration, - # "tests": int(total_nb_tests), - # "success": int(total_success)} - # logger.info("Results: "+str(json_results)) - - # Evaluation of the success criteria - status = "failed" - # for Rally we decided that the overall success rate must be above 90% - if total_success >= 90: - status = "passed" - - if args.report: - logger.debug("Pushing Rally summary into DB...") - push_results_to_db("Rally", payload, status) - - if args.noclean: - exit(0) - - if not image_exists: - logger.debug("Deleting image '%s' with ID '%s'..." - % (GLANCE_IMAGE_NAME, image_id)) - if not openstack_utils.delete_glance_image(nova_client, image_id): - logger.error("Error deleting the glance image") - - if not volume_types: - logger.debug("Deleting volume type '%s'..." - % CINDER_VOLUME_TYPE_NAME) - if not openstack_utils.delete_volume_type(cinder_client, volume_type): - logger.error("Error in deleting volume type...") - - -if __name__ == '__main__': - main() diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py deleted file mode 100644 index bf62ce306..000000000 --- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py +++ /dev/null @@ -1,347 +0,0 @@ -#!/usr/bin/env python -# -# Description: -# Runs tempest and pushes the results to the DB -# -# Authors: -# morgan.richomme@orange.com -# jose.lausuch@ericsson.com -# viktor.tikkanen@nokia.com -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -import argparse -import json -import os -import re -import requests -import shutil -import subprocess -import time -import yaml -import ConfigParser - -import keystoneclient.v2_0.client as ksclient -from neutronclient.v2_0 import client as neutronclient - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as ft_utils -import functest.utils.openstack_utils as os_utils - -modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing', - 'identity', 'image', 'network', 'object_storage', 'orchestration', - 'telemetry', 'volume', 'custom', 'defcore'] - -""" tests configuration """ -parser = argparse.ArgumentParser() -parser.add_argument("-d", "--debug", - help="Debug mode", - action="store_true") -parser.add_argument("-s", "--serial", - help="Run tests in one thread", - action="store_true") -parser.add_argument("-m", "--mode", - help="Tempest test mode [smoke, all]", - default="smoke") -parser.add_argument("-r", "--report", - help="Create json result file", - action="store_true") -parser.add_argument("-n", "--noclean", - help="Don't clean the created resources for this test.", - action="store_true") - -args = parser.parse_args() - -""" logging configuration """ -logger = ft_logger.Logger("run_tempest").getLogger() - -REPO_PATH = os.environ['repos_dir'] + '/functest/' - - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() -TEST_DB = functest_yaml.get("results").get("test_db_url") - -MODE = "smoke" -PRIVATE_NET_NAME = functest_yaml.get("tempest").get("private_net_name") -PRIVATE_SUBNET_NAME = functest_yaml.get("tempest").get("private_subnet_name") -PRIVATE_SUBNET_CIDR = functest_yaml.get("tempest").get("private_subnet_cidr") -ROUTER_NAME = functest_yaml.get("tempest").get("router_name") -TENANT_NAME = functest_yaml.get("tempest").get("identity").get("tenant_name") -TENANT_DESCRIPTION = functest_yaml.get("tempest").get("identity").get( - "tenant_description") -USER_NAME = functest_yaml.get("tempest").get("identity").get("user_name") -USER_PASSWORD = functest_yaml.get("tempest").get("identity").get( - "user_password") -DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name") -RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get( - "dir_rally_inst") -RESULTS_DIR = functest_yaml.get("general").get("directories").get( - "dir_results") -TEMPEST_RESULTS_DIR = RESULTS_DIR + '/tempest' -TEST_LIST_DIR = functest_yaml.get("general").get("directories").get( - "dir_tempest_cases") -TEMPEST_CUSTOM = REPO_PATH + TEST_LIST_DIR + 'test_list.txt' -TEMPEST_BLACKLIST = REPO_PATH + TEST_LIST_DIR + 'blacklist.txt' -TEMPEST_DEFCORE = REPO_PATH + TEST_LIST_DIR + 'defcore_req.txt' -TEMPEST_RAW_LIST = TEMPEST_RESULTS_DIR + '/test_raw_list.txt' -TEMPEST_LIST = TEMPEST_RESULTS_DIR + '/test_list.txt' - - -def get_info(file_result): - test_run = "" - duration = "" - test_failed = "" - - p = subprocess.Popen('cat tempest.log', - shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - for line in p.stdout.readlines(): - # print line, - if (len(test_run) < 1): - test_run = re.findall("[0-9]*\.[0-9]*s", line) - if (len(duration) < 1): - duration = re.findall("[0-9]*\ tests", line) - regexp = r"(failures=[0-9]+)" - if (len(test_failed) < 1): - test_failed = re.findall(regexp, line) - - logger.debug("test_run:" + test_run) - logger.debug("duration:" + duration) - - -def push_results_to_db(case, payload, criteria): - - # TODO move DB creds into config file - url = TEST_DB + "/results" - installer = ft_utils.get_installer_type(logger) - scenario = ft_utils.get_scenario(logger) - version = ft_utils.get_version(logger) - pod_name = ft_utils.get_pod_name(logger) - - logger.info("Pushing results to DB: '%s'." % url) - - params = {"project_name": "functest", "case_name": case, - "pod_name": str(pod_name), 'installer': installer, - "version": version, "scenario": scenario, "criteria": criteria, - 'details': payload} - headers = {'Content-Type': 'application/json'} - - r = requests.post(url, data=json.dumps(params), headers=headers) - logger.debug(r) - - -def create_tempest_resources(): - ks_creds = os_utils.get_credentials("keystone") - logger.debug("Creating tenant and user for Tempest suite") - keystone = ksclient.Client(**ks_creds) - tenant_id = os_utils.create_tenant(keystone, - TENANT_NAME, - TENANT_DESCRIPTION) - if tenant_id == '': - logger.error("Error : Failed to create %s tenant" % TENANT_NAME) - - user_id = os_utils.create_user(keystone, USER_NAME, USER_PASSWORD, - None, tenant_id) - if user_id == '': - logger.error("Error : Failed to create %s user" % USER_NAME) - - logger.debug("Creating private network for Tempest suite") - creds_neutron = os_utils.get_credentials("neutron") - neutron_client = neutronclient.Client(**creds_neutron) - network_dic = os_utils.create_network_full(logger, - neutron_client, - PRIVATE_NET_NAME, - PRIVATE_SUBNET_NAME, - ROUTER_NAME, - PRIVATE_SUBNET_CIDR) - if network_dic: - if not os_utils.update_neutron_net(neutron_client, - network_dic['net_id'], - shared=True): - logger.error("Failed to update private network...") - exit(-1) - else: - logger.debug("Network '%s' is available..." % PRIVATE_NET_NAME) - else: - logger.error("Private network creation failed") - exit(-1) - - -def configure_tempest(deployment_dir): - """ - Add/update needed parameters into tempest.conf file generated by Rally - """ - - logger.debug("Generating tempest.conf file...") - cmd = "rally verify genconfig" - ft_utils.execute_command(cmd, logger) - - logger.debug("Finding tempest.conf file...") - tempest_conf_file = deployment_dir + "/tempest.conf" - if not os.path.isfile(tempest_conf_file): - logger.error("Tempest configuration file %s NOT found." - % tempest_conf_file) - exit(-1) - - logger.debug("Updating selected tempest.conf parameters...") - config = ConfigParser.RawConfigParser() - config.read(tempest_conf_file) - config.set('compute', 'fixed_network_name', PRIVATE_NET_NAME) - config.set('identity', 'tenant_name', TENANT_NAME) - config.set('identity', 'username', USER_NAME) - config.set('identity', 'password', USER_PASSWORD) - with open(tempest_conf_file, 'wb') as config_file: - config.write(config_file) - - # Copy tempest.conf to /home/opnfv/functest/results/tempest/ - shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf') - return True - - -def read_file(filename): - with open(filename) as src: - return [line.strip() for line in src.readlines()] - - -def generate_test_list(deployment_dir, mode): - logger.debug("Generating test case list...") - if mode == 'defcore': - shutil.copyfile(TEMPEST_DEFCORE, TEMPEST_RAW_LIST) - elif mode == 'custom': - if os.path.isfile(TEMPEST_CUSTOM): - shutil.copyfile(TEMPEST_CUSTOM, TEMPEST_RAW_LIST) - else: - logger.error("Tempest test list file %s NOT found." - % TEMPEST_CUSTOM) - exit(-1) - else: - if mode == 'smoke': - testr_mode = "smoke" - elif mode == 'full': - testr_mode = "" - else: - testr_mode = 'tempest.api.' + mode - cmd = ("cd " + deployment_dir + ";" + "testr list-tests " + - testr_mode + ">" + TEMPEST_RAW_LIST + ";cd") - ft_utils.execute_command(cmd, logger) - - -def apply_tempest_blacklist(): - logger.debug("Applying tempest blacklist...") - cases_file = read_file(TEMPEST_RAW_LIST) - result_file = open(TEMPEST_LIST, 'w') - try: - black_file = read_file(TEMPEST_BLACKLIST) - except: - black_file = '' - logger.debug("Tempest blacklist file does not exist.") - for line in cases_file: - if line not in black_file: - result_file.write(str(line) + '\n') - result_file.close() - - -def run_tempest(OPTION): - # - # the "main" function of the script which launches Rally to run Tempest - # :param option: tempest option (smoke, ..) - # :return: void - # - logger.info("Starting Tempest test suite: '%s'." % OPTION) - cmd_line = "rally verify start " + OPTION + " --system-wide" - CI_DEBUG = os.environ.get("CI_DEBUG") - if CI_DEBUG == "true" or CI_DEBUG == "True": - ft_utils.execute_command(cmd_line, logger, exit_on_error=True) - else: - header = ("Tempest environment:\n" - " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" % - (os.getenv('INSTALLER_TYPE', 'Unknown'), - os.getenv('DEPLOY_SCENARIO', 'Unknown'), - os.getenv('NODE_NAME', 'Unknown'), - time.strftime("%a %b %d %H:%M:%S %Z %Y"))) - - f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+') - f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+') - f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+') - f_env.write(header) - - subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr) - - f_stdout.close() - f_stderr.close() - f_env.close() - - cmd_line = "rally verify show" - ft_utils.execute_command(cmd_line, logger, - exit_on_error=True, info=True) - - cmd_line = "rally verify list" - logger.debug('Executing command : {}'.format(cmd_line)) - cmd = os.popen(cmd_line) - output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|") - # Format: - # | UUID | Deployment UUID | smoke | tests | failures | Created at | - # Duration | Status | - num_tests = output[4] - num_failures = output[5] - time_start = output[6] - duration = output[7] - # Compute duration (lets assume it does not take more than 60 min) - dur_min = int(duration.split(':')[1]) - dur_sec_float = float(duration.split(':')[2]) - dur_sec_int = int(round(dur_sec_float, 0)) - dur_sec_int = dur_sec_int + 60 * dur_min - - # Generate json results for DB - json_results = {"timestart": time_start, "duration": dur_sec_int, - "tests": int(num_tests), "failures": int(num_failures)} - logger.info("Results: " + str(json_results)) - - status = "failed" - try: - diff = (int(num_tests) - int(num_failures)) - success_rate = 100 * diff / int(num_tests) - except: - success_rate = 0 - - # For Tempest we assume that teh success rate is above 90% - if success_rate >= 90: - status = "passed" - - # Push results in payload of testcase - if args.report: - logger.debug("Push result into DB") - push_results_to_db("Tempest", json_results, status) - - -def main(): - global MODE - - if not (args.mode in modes): - logger.error("Tempest mode not valid. " - "Possible values are:\n" + str(modes)) - exit(-1) - - if not os.path.exists(TEMPEST_RESULTS_DIR): - os.makedirs(TEMPEST_RESULTS_DIR) - - deployment_dir = ft_utils.get_deployment_dir(logger) - configure_tempest(deployment_dir) - create_tempest_resources() - generate_test_list(deployment_dir, args.mode) - apply_tempest_blacklist() - - MODE = "--tests-file " + TEMPEST_LIST - if args.serial: - MODE += " --concur 1" - - run_tempest(MODE) - - -if __name__ == '__main__': - main() diff --git a/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml b/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml deleted file mode 100644 index 48c0333e9..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml +++ /dev/null @@ -1,97 +0,0 @@ -{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%} -{%- if use_existing_users and caller is not defined -%} {} -{%- else %} - {%- if not use_existing_users %} - users: - tenants: {{ tenants }} - users_per_tenant: {{ users_per_tenant }} - {%- endif %} - {%- if caller is defined %} - {{ caller() }} - {%- endif %} -{%- endif %} -{%- endmacro %} - -{%- macro vm_params(image=none, flavor=none, size=none) %} -{%- if flavor is not none %} - flavor: - name: {{ flavor }} -{%- endif %} -{%- if image is not none %} - image: - name: {{ image }} -{%- endif %} -{%- if size is not none %} - size: {{ size }} -{%- endif %} -{%- endmacro %} - -{%- macro unlimited_volumes() %} - cinder: - gigabytes: -1 - snapshots: -1 - volumes: -1 -{%- endmacro %} - -{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %} - type: "constant" - {%- if is_smoke %} - concurrency: 1 - times: 1 - {%- else %} - concurrency: {{ concurrency }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro rps_runner(rps=1, times=1, is_smoke=True) %} - type: rps - {%- if is_smoke %} - rps: 1 - times: 1 - {%- else %} - rps: {{ rps }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro no_failures_sla() %} - failure_rate: - max: 0 -{%- endmacro %} - -{%- macro volumes(size=1, volumes_per_tenant=1) %} - volumes: - size: {{ size }} - volumes_per_tenant: {{ volumes_per_tenant }} -{%- endmacro %} - -{%- macro unlimited_nova(keypairs=false) %} - nova: - cores: -1 - floating_ips: -1 - instances: -1 - {%- if keypairs %} - key_pairs: -1 - {%- endif %} - ram: -1 - security_group_rules: -1 - security_groups: -1 -{%- endmacro %} - -{%- macro unlimited_neutron(secgroups=false) %} - neutron: - network: -1 - port: -1 - subnet: -1 - {%- if secgroups %} - security_group: -1 - security_group_rule: -1 - {%- endif %} -{%- endmacro %} - -{%- macro glance_args(location, container="bare", type="qcow2") %} - container_format: {{ container }} - disk_format: {{ type }} - image_location: {{ location }} -{%- endmacro %} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml deleted file mode 100644 index a04e4c1c1..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml +++ /dev/null @@ -1,63 +0,0 @@ - Authenticate.keystone: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_cinder: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_glance: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_heat: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_neutron: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_nova: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml deleted file mode 100644 index cb28ee84e..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml +++ /dev/null @@ -1,272 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - CinderVolumes.create_and_attach_volume: - - - args: - {{ vm_params(image_name,flavor_name,1) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_snapshots: - - - args: - detailed: true - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_volume: - - - args: - detailed: true - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - detailed: true - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_upload_volume_to_image: - - - args: - container_format: "bare" - disk_format: "raw" - do_delete: true - force: false - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_nested_snapshots_and_attach_volume: - - - args: - nested_level: 1 - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 1 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_snapshot_and_attach_volume: - - - args: - volume_type: false - size: - min: 1 - max: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 2 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - volume_type: true - size: - min: 1 - max: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 2 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_volume: - - - args: - size: 1 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - - - args: - size: - min: 1 - max: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.list_volumes: - - - args: - detailed: True - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - volumes: - size: 1 - volumes_per_tenant: 4 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} - - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_delete_volume: - - - args: - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_extend_volume: - - - args: - new_size: 2 - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml deleted file mode 100644 index adbf8b79a..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml +++ /dev/null @@ -1,49 +0,0 @@ - GlanceImages.create_and_delete_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.create_and_list_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.list_images: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.create_image_and_boot_instances: - - - args: - {{ glance_args(location=glance_image_location) }} - flavor: - name: {{ flavor_name }} - number_instances: 2 - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - quotas: - {{ unlimited_nova() }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml deleted file mode 100644 index 534d796ea..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml +++ /dev/null @@ -1,160 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - HeatStacks.create_and_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/default.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/server_with_ports.yaml.template" - parameters: - public_net: {{ floating_network }} - image: {{ image_name }} - flavor: {{ flavor_name }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/server_with_volume.yaml.template" - parameters: - image: {{ image_name }} - flavor: {{ flavor_name }} - network_id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.create_and_list_stack: - - - args: - template_path: "{{ tmpl_dir }}/default.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.create_update_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_random_strings_add.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_random_strings_delete.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/resource_group.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_resource_group_increase.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/resource_group.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_resource_group_reduce.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_random_strings_replace.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% else %} - - HeatStacks.create_update_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} - - HeatStacks.create_check_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.create_suspend_resume_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.list_stacks_and_resources: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml deleted file mode 100644 index bfc9948b3..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml +++ /dev/null @@ -1,92 +0,0 @@ - KeystoneBasic.add_and_remove_user_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_add_and_list_user_roles: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_list_tenants: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_service: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.get_entities: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_update_and_delete_tenant: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_user: - - - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_tenant: - - - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_list_users: - - - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml deleted file mode 100644 index 3804d2589..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml +++ /dev/null @@ -1,245 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - port_update_args: - admin_state_up: false - device_id: "dummy_id" - device_owner: "dummy_owner" - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - router_create_args: {} - router_update_args: - admin_state_up: false - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnet_create_args: {} - subnet_update_args: - enable_dhcp: false - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} - - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml deleted file mode 100644 index f0fed8ef4..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml +++ /dev/null @@ -1,378 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - NovaKeypair.create_and_delete_keypair: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaKeypair.create_and_list_keypairs: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_bounce_server: - - - args: - actions: - - - hard_reboot: 1 - - - soft_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_delete_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_list_server: - - - args: - detailed: true - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_rebuild_server: - - - args: - {{ vm_params(flavor=flavor_name) }} - from_image: - name: {{ image_name }} - to_image: - name: {{ image_name }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.snapshot_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume: - - - args: - {{ vm_params(image_name, flavor_name) }} - volume_size: 10 - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaSecGroup.create_and_delete_secgroups: - - - args: - security_group_count: 10 - rules_per_security_group: 10 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_neutron(secgroups=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaSecGroup.create_and_list_secgroups: - - - args: - security_group_count: 10 - rules_per_security_group: 10 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_neutron(secgroups=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.list_servers: - - - args: - detailed: True - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 2 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.resize_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - to_flavor: - name: "m1.small" - confirm: true - force_delete: false - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% if live_migration %} - - NovaServers.boot_and_live_migrate_server: - - args: - {{ vm_params(image_name, flavor_name) }} - block_migration: false - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_attach_created_volume_and_live_migrate: - - - args: - {{ vm_params(image_name, flavor_name) }} - size: 10 - block_migration: false - boot_server_kwargs: - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume_and_live_migrate: - - args: - {{ vm_params(image_name, flavor_name) }} - block_migration: false - volume_size: 10 - force_delete: false - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} -{% endif %} - - NovaKeypair.boot_and_delete_server_with_keypair: - - - args: - {{ vm_params(image_name, flavor_name) }} - server_kwargs: - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume_and_delete: - - - args: - {{ vm_params(image_name, flavor_name) }} - volume_size: 5 - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_volumes() }} - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.pause_and_unpause_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - force_delete: false - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaSecGroup.boot_and_delete_server_with_secgroups: - - - args: - {{ vm_params(image_name, flavor_name) }} - security_group_count: 10 - rules_per_security_group: 10 - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_nova() }} - {{ unlimited_neutron(secgroups=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_migrate_server: - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml deleted file mode 100644 index a0682acce..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml +++ /dev/null @@ -1,54 +0,0 @@ - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.cinder_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.neutron_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.nova_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml deleted file mode 100644 index 6affcc6c6..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml +++ /dev/null @@ -1,28 +0,0 @@ - HttpRequests.check_random_request: - - - args: - requests: - - - url: "http://www.example.com" - method: "GET" - status_code: 200 - - - url: "http://www.openstack.org" - method: "GET" - status_code: 200 - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HttpRequests.check_request: - - - args: - url: "http://www.example.com" - method: "GET" - status_code: 200 - allow_redirects: False - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml deleted file mode 100644 index f102edb2b..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml +++ /dev/null @@ -1,268 +0,0 @@ - TempestScenario.list_of_tests: - - - args: - tempest_conf: /etc/tempest/tempest.conf - test_names: - - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_get_flavor - - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors - - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail - - tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image - - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image - - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images - - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_cidr - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_group_id - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_delete_when_peer_group_deleted - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_list - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_group_create_get_delete - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_groups_create_list_delete - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_server_security_groups - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_update_security_groups - - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_add_remove_fixed_ip - - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces - - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers - - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail - - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details - - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers - - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail - - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped - - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses - - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses_by_network - - tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescue_unrescue_instance - - tempest.api.compute.test_quotas.QuotasTestJSON.test_compare_tenant_quotas_with_default_quotas - - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas - - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas - - tempest.api.compute.volumes.test_volumes_get.VolumesGetTestJSON.test_volume_create_get_delete - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_create - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_delete - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_get - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_list - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_create - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_delete - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_get - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_list - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_create - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_delete - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_get - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_list - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_create - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_delete - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_get - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_list - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_create - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_delete - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_get - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_list - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_job_binary_get_data - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_create - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_delete - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_get - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_list - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_create - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_delete - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get_data - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_list - - tempest.api.data_processing.test_jobs.JobTest.test_job_create - - tempest.api.data_processing.test_jobs.JobTest.test_job_delete - - tempest.api.data_processing.test_jobs.JobTest.test_job_get - - tempest.api.data_processing.test_jobs.JobTest.test_job_list - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_create - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_delete - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_get - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_list - - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_get - - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_list - - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_compare_db_flavors_with_os - - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_get_db_flavor - - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_list_db_flavors - - tempest.api.database.limits.test_limits.DatabaseLimitsTest.test_absolute_limits - - tempest.api.database.versions.test_versions.DatabaseVersionsTest.test_list_db_versions - - tempest.api.identity.admin.v2.test_services.ServicesTestJSON.test_list_services - - tempest.api.identity.admin.v2.test_users.UsersTestJSON.test_create_user - - tempest.api.identity.admin.v3.test_credentials.CredentialsTestJSON.test_credentials_create_get_update_delete - - tempest.api.identity.admin.v3.test_domains.DomainsTestJSON.test_create_update_delete_domain - - tempest.api.identity.admin.v3.test_endpoints.EndPointsTestJSON.test_update_endpoint - - tempest.api.identity.admin.v3.test_groups.GroupsV3TestJSON.test_group_users_add_list_delete - - tempest.api.identity.admin.v3.test_policies.PoliciesTestJSON.test_create_update_delete_policy - - tempest.api.identity.admin.v3.test_regions.RegionsTestJSON.test_create_region_with_specific_id - - tempest.api.identity.admin.v3.test_roles.RolesV3TestJSON.test_role_create_update_get_list - - tempest.api.identity.admin.v3.test_services.ServicesTestJSON.test_create_update_get_service - - tempest.api.identity.admin.v3.test_trusts.TrustsV3TestJSON.test_get_trusts_all - - tempest.api.messaging.test_claims.TestClaims.test_post_claim - - tempest.api.messaging.test_claims.TestClaims.test_query_claim - - tempest.api.messaging.test_claims.TestClaims.test_release_claim - - tempest.api.messaging.test_claims.TestClaims.test_update_claim - - tempest.api.messaging.test_messages.TestMessages.test_delete_multiple_messages - - tempest.api.messaging.test_messages.TestMessages.test_delete_single_message - - tempest.api.messaging.test_messages.TestMessages.test_get_message - - tempest.api.messaging.test_messages.TestMessages.test_get_multiple_messages - - tempest.api.messaging.test_messages.TestMessages.test_list_messages - - tempest.api.messaging.test_messages.TestMessages.test_post_messages - - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_existence - - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_head - - tempest.api.messaging.test_queues.TestManageQueue.test_get_queue_stats - - tempest.api.messaging.test_queues.TestManageQueue.test_list_queues - - tempest.api.messaging.test_queues.TestManageQueue.test_set_and_get_queue_metadata - - tempest.api.messaging.test_queues.TestQueues.test_create_delete_queue - - tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions - - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address - - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip - - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_network - - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_port - - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_subnet - - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_network - - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_port - - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_subnet - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_update_delete_network_subnet - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_external_network_visibility - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_networks - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_subnets - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_network - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_subnet - - tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet - - tempest.api.network.test_networks.NetworksTestJSON.test_external_network_visibility - - tempest.api.network.test_networks.NetworksTestJSON.test_list_networks - - tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets - - tempest.api.network.test_networks.NetworksTestJSON.test_show_network - - tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port - - tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools - - tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups - - tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port - - tempest.api.network.test_ports.PortsTestJSON.test_list_ports - - tempest.api.network.test_ports.PortsTestJSON.test_show_port - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id - - tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router - - tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces - - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id - - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id - - tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups - - tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group - - tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule - - tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups - - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota - - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object - - tempest.api.object_storage.test_account_services.AccountTest.test_list_account_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_json - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_xml - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker_and_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker_and_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_extensions - - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_account_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_containers - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_and_delete_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_matadata_key - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata_key - - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_read_object_with_rights - - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_write_object_with_rights - - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_large_object - - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_too_many_objects - - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_valid_object - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_overwrite - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_key - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_key - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_value - - tempest.api.object_storage.test_container_services.ContainerTest.test_delete_container - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_delimiter - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_end_marker - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_json - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_xml - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_limit - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_marker - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_no_object - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_path - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_prefix - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_no_container_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_and_delete_matadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_matadata_key - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata_key - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_fresh_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_meta - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_metakey - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_match - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_modified_since - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_unmodified_since - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_range - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_newest - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_object_manifest - - tempest.api.object_storage.test_object_services.ObjectTest.test_list_no_object_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata_with_x_object_manifest - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_create_and_remove_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_object_manifest - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_remove_object_metakey - - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_container_object_without_using_creds - - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_object_with_another_user_creds - - tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container - - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_list - - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_show - - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_template - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_list - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_metadata - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_software_config - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_create_validate - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_no_metadata_change - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_with_metadata_change - - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_crud_no_resources - - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_list_responds - - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v1_notifications - - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v2_notifications - - tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_attach_detach_volume_to_instance - - tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance - - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete - - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_from_image - - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete - - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image - - tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list - - tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list - runner: - concurrency: 1 - times: 1 - type: serial - sla: - failure_rate: - max: 0 - diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml deleted file mode 100644 index 74f509925..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml +++ /dev/null @@ -1,42 +0,0 @@ - VMTasks.boot_runcommand_delete: - - - args: - {{ vm_params(image_name, flavor_name) }} - floating_network: {{ floating_network }} - force_delete: false - command: - interpreter: /bin/sh - script_file: {{ sup_dir }}/instance_dd_test.sh - username: cirros - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - - args: - {{ vm_params(image_name, flavor_name) }} - fixed_network: private - floating_network: {{ floating_network }} - force_delete: false - command: - interpreter: /bin/sh - script_file: {{ sup_dir }}/instance_dd_test.sh - use_floatingip: true - username: cirros - nics: - - net-id: {{ netid }} - volume_args: - size: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh b/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh deleted file mode 100644 index e3bf23405..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; } -file=/tmp/test.img -c=${1:-$SIZE} -c=${c:-1000} #default is 1GB -write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") -read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c") -[ -f $file ] && rm $file - -echo "{ - \"write_seq_${c}m\": $write_seq, - \"read_seq_${c}m\": $read_seq - }" diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template deleted file mode 100644 index a22487e33..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: 2013-05-23 - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: 1 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template deleted file mode 100644 index eb4f2f2dd..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template +++ /dev/null @@ -1 +0,0 @@ -heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template deleted file mode 100644 index 2dd676c11..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template deleted file mode 100644 index b3f505fa6..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 2 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template deleted file mode 100644 index 909f45d21..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - public_net: - type: string - default: public - image: - type: string - default: cirros-0.3.4-x86_64-uec - flavor: - type: string - default: m1.tiny - cidr: - type: string - default: 11.11.11.0/24 - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - port: { get_resource: server_port } - - router: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: public_net} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: private_subnet } - - private_net: - type: OS::Neutron::Net - - private_subnet: - type: OS::Neutron::Subnet - properties: - network: { get_resource: private_net } - cidr: {get_param: cidr} - - port_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: default_port_security_group - description: > - Default security group assigned to port. The neutron default group is not - used because neutron creates several groups with the same name=default and - nova cannot chooses which one should it use. - - server_port: - type: OS::Neutron::Port - properties: - network: {get_resource: private_net} - fixed_ips: - - subnet: { get_resource: private_subnet } - security_groups: - - { get_resource: port_security_group } diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template deleted file mode 100644 index 826ca9dae..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template +++ /dev/null @@ -1,43 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - image: - type: string - default: cirros-0.3.4-x86_64-uec - flavor: - type: string - default: m1.tiny - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - network_id: - type: string - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - network: { get_param: network_id } - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server} - mountpoint: /dev/vdc diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template deleted file mode 100644 index cf34879ca..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template +++ /dev/null @@ -1,23 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates resource parameters without resource re-creation(replacement) - in the stack defined by autoscaling_policy.yaml.template. It allows to measure - performance of "pure" resource update operation only. - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: -1 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template deleted file mode 100644 index e06d42e01..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates the stack defined by random_strings.yaml.template with additional resource. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_three: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template deleted file mode 100644 index d02593e3b..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template +++ /dev/null @@ -1,11 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by random_strings.yaml.template. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template deleted file mode 100644 index 46d8bff4c..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by - random_strings.yaml.template and re-creates it with the updated parameters - (so-called update-replace). That happens because some parameters cannot be - changed without resource re-creation. The template allows to measure performance - of update-replace operation. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 40 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template deleted file mode 100644 index 891074ebc..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and adds children resources to that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 3 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template deleted file mode 100644 index b4d1d1730..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and deletes children resources from that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/task.yaml b/testcases/VIM/OpenStack/CI/rally_cert/task.yaml deleted file mode 100644 index b67891664..000000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/task.yaml +++ /dev/null @@ -1,60 +0,0 @@ -{%- set glance_image_location = glance_image_location|default("http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img") %} -{%- set image_name = image_name|default("functest-img-rally") %} -{%- set flavor_name = flavor_name|default("m1.tiny") %} -{%- set use_existing_users = use_existing_users|default(false) %} -{%- set service_list = service_list|default(["authenticate", "cinder", "keystone", "nova", "glance", "neutron", "quotas", "requests", "heat", "vm"]) %} -{%- set live_migration = live_migration|default(false) %} -{%- set smoke = smoke|default(true) %} -{%- set floating_network = floating_network|default("net04_ext") %} -{%- set controllers_amount = controllers_amount|default(1) %} -{%- if smoke %} -{%- set users_amount = 1 %} -{%- set tenants_amount = 1 %} -{%- else %} -{%- set users_amount = users_amount|default(1) %} -{%- set tenants_amount = tenants_amount|default(1) %} -{%- endif %} - -{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%} -{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%} - ---- -{% if "authenticate" in service_list %} -{%- include "scenario/opnfv-authenticate.yaml"-%} -{% endif %} - -{% if "cinder" in service_list %} -{%- include "scenario/opnfv-cinder.yaml"-%} -{% endif %} - -{% if "keystone" in service_list %} -{%- include "scenario/opnfv-keystone.yaml"-%} -{% endif %} - -{% if "nova" in service_list %} -{%- include "scenario/opnfv-nova.yaml"-%} -{% endif %} - -{% if "glance" in service_list %} -{%- include "scenario/opnfv-glance.yaml"-%} -{% endif %} - -{% if "neutron" in service_list %} -{%- include "scenario/opnfv-neutron.yaml"-%} -{% endif %} - -{% if "quotas" in service_list %} -{%- include "scenario/opnfv-quotas.yaml"-%} -{% endif %} - -{% if "requests" in service_list %} -{%- include "scenario/opnfv-requests.yaml"-%} -{% endif %} - -{% if "heat" in service_list %} -{%- include "scenario/opnfv-heat.yaml"-%} -{% endif %} - -{% if "vm" in service_list %} -{%- include "scenario/opnfv-vm.yaml"-%} -{% endif %} diff --git a/testcases/vIMS/CI/clearwater.py b/testcases/vIMS/CI/clearwater.py deleted file mode 100644 index 7236f4fba..000000000 --- a/testcases/vIMS/CI/clearwater.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/python -# coding: utf8 -####################################################################### -# -# Copyright (c) 2015 Orange -# valentin.boucher@orange.com -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -######################################################################## - - -class clearwater: - - def __init__(self, inputs={}, orchestrator=None, logger=None): - self.config = inputs - self.orchestrator = orchestrator - self.logger = logger - self.deploy = False - - def set_orchestrator(self, orchestrator): - self.orchestrator = orchestrator - - def set_flavor_id(self, flavor_id): - self.config['flavor_id'] = flavor_id - - def set_image_id(self, image_id): - self.config['image_id'] = image_id - - def set_agent_user(self, agent_user): - self.config['agent_user'] = agent_user - - def set_external_network_name(self, external_network_name): - self.config['external_network_name'] = external_network_name - - def set_public_domain(self, public_domain): - self.config['public_domain'] = public_domain - - def deploy_vnf(self, blueprint, bp_name='clearwater', - dep_name='clearwater-opnfv'): - if self.orchestrator: - self.dep_name = dep_name - error = self.orchestrator.download_upload_and_deploy_blueprint( - blueprint, self.config, bp_name, dep_name) - if error: - return error - - self.deploy = True - - else: - if self.logger: - self.logger.error("Cloudify manager is down or not provide...") - - def undeploy_vnf(self): - if self.orchestrator: - if self.deploy: - self.deploy = False - self.orchestrator.undeploy_deployment(self.dep_name) - else: - if self.logger: - self.logger.error("Clearwater isn't already deploy...") - else: - if self.logger: - self.logger.error("Cloudify manager is down or not provide...") diff --git a/testcases/vIMS/CI/create_venv.sh b/testcases/vIMS/CI/create_venv.sh deleted file mode 100755 index 575fd177c..000000000 --- a/testcases/vIMS/CI/create_venv.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -e - -# Script checks that venv exists. If it doesn't it will be created -# It requires python2.7 and virtualenv packages installed -# -# Copyright (c) 2015 Orange -# valentin.boucher@orange.com -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 - -BASEDIR=`dirname $0` -VENV_PATH=$1 -VENV_NAME="venv_cloudify" -function venv_install() { - if command -v virtualenv-2.7; then - virtualenv-2.7 $1 - elif command -v virtualenv2; then - virtualenv2 $1 - elif command -v virtualenv; then - virtualenv $1 - else - echo Cannot find virtualenv command. - return 1 - fi -} - -# exit when something goes wrong during venv install -set -e -if [ ! -d "$VENV_PATH/$VENV_NAME" ]; then - venv_install $VENV_PATH/$VENV_NAME - echo "Virtualenv" + $VENV_NAME + "created." -fi - -if [ ! -f "$VENV_PATH/$VENV_NAME/updated" -o $BASEDIR/requirements.pip -nt $VENV_PATH/$VENV_NAME/updated ]; then - source $VENV_PATH/$VENV_NAME/bin/activate - pip install -r $BASEDIR/requirements.pip - touch $VENV_PATH/$VENV_NAME/updated - echo "Requirements installed." - deactivate -fi -set +e diff --git a/testcases/vIMS/CI/orchestrator.py b/testcases/vIMS/CI/orchestrator.py deleted file mode 100644 index c61f654ca..000000000 --- a/testcases/vIMS/CI/orchestrator.py +++ /dev/null @@ -1,236 +0,0 @@ -#!/usr/bin/python -# coding: utf8 -####################################################################### -# -# Copyright (c) 2015 Orange -# valentin.boucher@orange.com -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -######################################################################## -import subprocess -import os -import shutil -import yaml -from git import Repo - - -class orchestrator: - - def __init__(self, testcase_dir, inputs={}, logger=None): - self.testcase_dir = testcase_dir - self.blueprint_dir = testcase_dir + 'cloudify-manager-blueprint/' - self.input_file = 'inputs.yaml' - self.manager_blueprint = False - self.config = inputs - self.logger = logger - self.manager_up = False - - def set_credentials(self, username, password, tenant_name, auth_url): - self.config['keystone_username'] = username - self.config['keystone_password'] = password - self.config['keystone_url'] = auth_url - self.config['keystone_tenant_name'] = tenant_name - - def set_flavor_id(self, flavor_id): - self.config['flavor_id'] = flavor_id - - def set_image_id(self, image_id): - self.config['image_id'] = image_id - - def set_external_network_name(self, external_network_name): - self.config['external_network_name'] = external_network_name - - def set_ssh_user(self, ssh_user): - self.config['ssh_user'] = ssh_user - - def set_nameservers(self, nameservers): - if 0 < len(nameservers): - self.config['dns_subnet_1'] = nameservers[0] - if 1 < len(nameservers): - self.config['dns_subnet_2'] = nameservers[1] - - def set_logger(self, logger): - self.logger = logger - - def download_manager_blueprint(self, manager_blueprint_url, - manager_blueprint_branch): - if self.manager_blueprint: - if self.logger: - self.logger.info( - "cloudify manager server blueprint is " - "already downloaded !") - else: - if self.logger: - self.logger.info( - "Downloading the cloudify manager server blueprint") - download_result = download_blueprints( - manager_blueprint_url, - manager_blueprint_branch, - self.blueprint_dir) - - if not download_result: - if self.logger: - self.logger.error("Failed to download manager blueprint") - exit(-1) - else: - self.manager_blueprint = True - - def manager_up(self): - return self.manager_up - - def deploy_manager(self): - if self.manager_blueprint: - if self.logger: - self.logger.info("Writing the inputs file") - with open(self.blueprint_dir + "inputs.yaml", "w") as f: - f.write(yaml.dump(self.config, default_style='"')) - f.close() - - # Ensure no ssh key file already exists - key_files = ["/.ssh/cloudify-manager-kp.pem", - "/.ssh/cloudify-agent-kp.pem"] - home = os.path.expanduser("~") - - for key_file in key_files: - if os.path.isfile(home + key_file): - os.remove(home + key_file) - - if self.logger: - self.logger.info("Launching the cloudify-manager deployment") - script = "set -e; " - script += ("source " + self.testcase_dir + - "venv_cloudify/bin/activate; ") - script += "cd " + self.testcase_dir + "; " - script += "cfy init -r; " - script += "cd cloudify-manager-blueprint; " - script += ("cfy local create-requirements -o requirements.txt " + - "-p openstack-manager-blueprint.yaml; ") - script += "pip install -r requirements.txt; " - script += ("timeout 1800 cfy bootstrap --install-plugins " + - "-p openstack-manager-blueprint.yaml -i inputs.yaml; ") - cmd = "/bin/bash -c '" + script + "'" - error = execute_command(cmd, self.logger) - if error: - return error - - if self.logger: - self.logger.info("Cloudify-manager server is UP !") - - self.manager_up = True - - def undeploy_manager(self): - if self.logger: - self.logger.info("Launching the cloudify-manager undeployment") - - self.manager_up = False - - script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; " - script += "cd " + self.testcase_dir + "; " - script += "cfy teardown -f --ignore-deployments; " - cmd = "/bin/bash -c '" + script + "'" - execute_command(cmd, self.logger) - - if self.logger: - self.logger.info( - "Cloudify-manager server has been successfully removed!") - - def download_upload_and_deploy_blueprint(self, blueprint, config, - bp_name, dep_name): - if self.logger: - self.logger.info("Downloading the {0} blueprint".format( - blueprint['file_name'])) - download_result = download_blueprints(blueprint['url'], - blueprint['branch'], - self.testcase_dir + - blueprint['destination_folder']) - - if not download_result: - if self.logger: - self.logger.error( - "Failed to download blueprint {0}". - format(blueprint['file_name'])) - exit(-1) - - if self.logger: - self.logger.info("Writing the inputs file") - - with open(self.testcase_dir + blueprint['destination_folder'] + - "/inputs.yaml", "w") as f: - f.write(yaml.dump(config, default_style='"')) - - f.close() - - if self.logger: - self.logger.info("Launching the {0} deployment".format(bp_name)) - script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; " - script += ("cd " + self.testcase_dir + - blueprint['destination_folder'] + "; ") - script += ("cfy blueprints upload -b " + - bp_name + " -p openstack-blueprint.yaml; ") - script += ("cfy deployments create -b " + bp_name + - " -d " + dep_name + " --inputs inputs.yaml; ") - script += ("cfy executions start -w install -d " + - dep_name + " --timeout 1800; ") - - cmd = "/bin/bash -c '" + script + "'" - error = execute_command(cmd, self.logger) - if error: - return error - if self.logger: - self.logger.info("The deployment of {0} is ended".format(dep_name)) - - def undeploy_deployment(self, dep_name): - if self.logger: - self.logger.info("Launching the {0} undeployment".format(dep_name)) - script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; " - script += "cd " + self.testcase_dir + "; " - script += ("cfy executions start -w uninstall -d " + dep_name + - " --timeout 1800 ; ") - script += "cfy deployments delete -d " + dep_name + "; " - - cmd = "/bin/bash -c '" + script + "'" - try: - execute_command(cmd, self.logger) - except: - if self.logger: - self.logger.error("Clearwater undeployment failed") - - -def execute_command(cmd, logger): - """ - Execute Linux command - """ - if logger: - logger.debug('Executing command : {}'.format(cmd)) - output_file = "output.txt" - f = open(output_file, 'w+') - p = subprocess.call(cmd, shell=True, stdout=f, stderr=subprocess.STDOUT) - f.close() - f = open(output_file, 'r') - result = f.read() - if result != "" and logger: - logger.debug(result) - if p == 0: - return False - else: - if logger: - logger.error("Error when executing command %s" % cmd) - f = open(output_file, 'r') - lines = f.readlines() - result = lines[len(lines) - 3] - result += lines[len(lines) - 2] - result += lines[len(lines) - 1] - return result - - -def download_blueprints(blueprint_url, branch, dest_path): - if os.path.exists(dest_path): - shutil.rmtree(dest_path) - try: - Repo.clone_from(blueprint_url, dest_path, branch=branch) - return True - except: - return False diff --git a/testcases/vIMS/CI/requirements.pip b/testcases/vIMS/CI/requirements.pip deleted file mode 100644 index 9b9d0ba53..000000000 --- a/testcases/vIMS/CI/requirements.pip +++ /dev/null @@ -1 +0,0 @@ -cloudify==3.3 \ No newline at end of file diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py deleted file mode 100644 index 2430af1a1..000000000 --- a/testcases/vIMS/CI/vIMS.py +++ /dev/null @@ -1,553 +0,0 @@ -#!/usr/bin/python -# coding: utf8 -####################################################################### -# -# Copyright (c) 2015 Orange -# valentin.boucher@orange.com -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -######################################################################## - -import argparse -import datetime -import json -import os -import pprint -import requests -import subprocess -import time -import yaml - -import keystoneclient.v2_0.client as ksclient -import glanceclient.client as glclient -import novaclient.client as nvclient -from neutronclient.v2_0 import client as ntclient - -import clearwater -import orchestrator - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as functest_utils -import functest.utils.openstack_utils as os_utils - - -pp = pprint.PrettyPrinter(indent=4) - - -parser = argparse.ArgumentParser() -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") -parser.add_argument("-r", "--report", - help="Create json result file", - action="store_true") -parser.add_argument("-n", "--noclean", - help="Don't clean the created resources for this test.", - action="store_true") -args = parser.parse_args() - -""" logging configuration """ -logger = ft_logger.Logger("vIMS").getLogger() - -REPO_PATH = os.environ['repos_dir'] + '/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() - -# Cloudify parameters -VIMS_DIR = (REPO_PATH + - functest_yaml.get("general").get("directories").get("dir_vIMS")) -VIMS_DATA_DIR = functest_yaml.get("general").get( - "directories").get("dir_vIMS_data") + "/" -VIMS_TEST_DIR = functest_yaml.get("general").get( - "directories").get("dir_repo_vims_test") + "/" -DB_URL = functest_yaml.get("results").get("test_db_url") - -TENANT_NAME = functest_yaml.get("vIMS").get("general").get("tenant_name") -TENANT_DESCRIPTION = functest_yaml.get("vIMS").get( - "general").get("tenant_description") -IMAGES = functest_yaml.get("vIMS").get("general").get("images") - -CFY_MANAGER_BLUEPRINT = functest_yaml.get( - "vIMS").get("cloudify").get("blueprint") -CFY_MANAGER_REQUIERMENTS = functest_yaml.get( - "vIMS").get("cloudify").get("requierments") -CFY_INPUTS = functest_yaml.get("vIMS").get("cloudify").get("inputs") - -CW_BLUEPRINT = functest_yaml.get("vIMS").get("clearwater").get("blueprint") -CW_DEPLOYMENT_NAME = functest_yaml.get("vIMS").get( - "clearwater").get("deployment-name") -CW_INPUTS = functest_yaml.get("vIMS").get("clearwater").get("inputs") -CW_REQUIERMENTS = functest_yaml.get("vIMS").get( - "clearwater").get("requierments") - -CFY_DEPLOYMENT_DURATION = 0 -CW_DEPLOYMENT_DURATION = 0 - -RESULTS = {'orchestrator': {'duration': 0, 'result': ''}, - 'vIMS': {'duration': 0, 'result': ''}, - 'sig_test': {'duration': 0, 'result': ''}} - - -def download_and_add_image_on_glance(glance, image_name, image_url): - dest_path = VIMS_DATA_DIR + "tmp/" - if not os.path.exists(dest_path): - os.makedirs(dest_path) - file_name = image_url.rsplit('/')[-1] - if not functest_utils.download_url(image_url, dest_path): - logger.error("Failed to download image %s" % file_name) - return False - - image = os_utils.create_glance_image( - glance, image_name, dest_path + file_name) - if not image: - logger.error("Failed to upload image on glance") - return False - - return image - - -def step_failure(step_name, error_msg): - logger.error(error_msg) - set_result(step_name, 0, error_msg) - status = "failed" - if step_name == "sig_test": - status = "passed" - push_results(status) - exit(-1) - - -def push_results(status): - if args.report: - logger.debug("Pushing results to DB....") - - scenario = functest_utils.get_scenario(logger) - version = functest_utils.get_version(logger) - pod_name = functest_utils.get_pod_name(logger) - build_tag = functest_utils.get_build_tag(logger) - - functest_utils.push_results_to_db(db_url=DB_URL, - project="functest", - case_name="vIMS", - logger=logger, pod_name=pod_name, - version=version, - scenario=scenario, - criteria=status, - build_tag=build_tag, - payload=RESULTS) - - -def set_result(step_name, duration=0, result=""): - RESULTS[step_name] = {'duration': duration, 'result': result} - - -def test_clearwater(): - script = "source " + VIMS_DATA_DIR + "venv_cloudify/bin/activate; " - script += "cd " + VIMS_DATA_DIR + "; " - script += "cfy status | grep -Eo \"([0-9]{1,3}\.){3}[0-9]{1,3}\"" - cmd = "/bin/bash -c '" + script + "'" - - try: - logger.debug("Trying to get clearwater manager IP ... ") - mgr_ip = os.popen(cmd).read() - mgr_ip = mgr_ip.splitlines()[0] - except: - step_failure("sig_test", "Unable to retrieve the IP of the " - "cloudify manager server !") - - api_url = "http://" + mgr_ip + "/api/v2" - dep_outputs = requests.get(api_url + "/deployments/" + - CW_DEPLOYMENT_NAME + "/outputs") - dns_ip = dep_outputs.json()['outputs']['dns_ip'] - ellis_ip = dep_outputs.json()['outputs']['ellis_ip'] - - ellis_url = "http://" + ellis_ip + "/" - url = ellis_url + "accounts" - - params = {"password": "functest", - "full_name": "opnfv functest user", - "email": "functest@opnfv.fr", - "signup_code": "secret"} - - rq = requests.post(url, data=params) - i = 20 - while rq.status_code != 201 and i > 0: - rq = requests.post(url, data=params) - i = i - 1 - time.sleep(10) - - if rq.status_code == 201: - url = ellis_url + "session" - rq = requests.post(url, data=params) - cookies = rq.cookies - - url = ellis_url + "accounts/" + params['email'] + "/numbers" - if cookies != "": - rq = requests.post(url, cookies=cookies) - i = 24 - while rq.status_code != 200 and i > 0: - rq = requests.post(url, cookies=cookies) - i = i - 1 - time.sleep(25) - - if rq.status_code != 200: - step_failure("sig_test", "Unable to create a number: %s" - % rq.json()['reason']) - - start_time_ts = time.time() - end_time_ts = start_time_ts - logger.info("vIMS functional test Start Time:'%s'" % ( - datetime.datetime.fromtimestamp(start_time_ts).strftime( - '%Y-%m-%d %H:%M:%S'))) - nameservers = functest_utils.get_resolvconf_ns() - resolvconf = "" - for ns in nameservers: - resolvconf += "\nnameserver " + ns - - if dns_ip != "": - script = ('echo -e "nameserver ' + dns_ip + resolvconf + - '" > /etc/resolv.conf; ') - script += 'source /etc/profile.d/rvm.sh; ' - script += 'cd ' + VIMS_TEST_DIR + '; ' - script += ('rake test[' + CW_INPUTS["public_domain"] + - '] SIGNUP_CODE="secret"') - - cmd = "/bin/bash -c '" + script + "'" - output_file = "output.txt" - f = open(output_file, 'w+') - subprocess.call(cmd, shell=True, stdout=f, - stderr=subprocess.STDOUT) - f.close() - end_time_ts = time.time() - duration = round(end_time_ts - start_time_ts, 1) - logger.info("vIMS functional test duration:'%s'" % duration) - f = open(output_file, 'r') - result = f.read() - if result != "" and logger: - logger.debug(result) - - vims_test_result = "" - try: - logger.debug("Trying to load test results") - with open(VIMS_TEST_DIR + "temp.json") as f: - vims_test_result = json.load(f) - f.close() - except: - logger.error("Unable to retrieve test results") - - set_result("sig_test", duration, vims_test_result) - - # success criteria for vIMS (for Brahmaputra) - # - orchestrator deployed - # - VNF deployed - status = "failed" - try: - if (RESULTS['orchestrator']['duration'] > 0 and - RESULTS['vIMS']['duration'] > 0): - status = "passed" - except: - logger.error("Unable to set test status") - push_results(status) - - try: - os.remove(VIMS_TEST_DIR + "temp.json") - except: - logger.error("Deleting file failed") - - -def main(): - - # ############### GENERAL INITIALISATION ################ - - if not os.path.exists(VIMS_DATA_DIR): - os.makedirs(VIMS_DATA_DIR) - - ks_creds = os_utils.get_credentials("keystone") - nv_creds = os_utils.get_credentials("nova") - nt_creds = os_utils.get_credentials("neutron") - - logger.info("Prepare OpenStack plateform (create tenant and user)") - keystone = ksclient.Client(**ks_creds) - - user_id = os_utils.get_user_id(keystone, ks_creds['username']) - if user_id == '': - step_failure("init", "Error : Failed to get id of " + - ks_creds['username']) - - tenant_id = os_utils.create_tenant( - keystone, TENANT_NAME, TENANT_DESCRIPTION) - if tenant_id == '': - step_failure("init", "Error : Failed to create " + - TENANT_NAME + " tenant") - - roles_name = ["admin", "Admin"] - role_id = '' - for role_name in roles_name: - if role_id == '': - role_id = os_utils.get_role_id(keystone, role_name) - - if role_id == '': - logger.error("Error : Failed to get id for %s role" % role_name) - - if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id): - logger.error("Error : Failed to add %s on tenant" % - ks_creds['username']) - - user_id = os_utils.create_user( - keystone, TENANT_NAME, TENANT_NAME, None, tenant_id) - if user_id == '': - logger.error("Error : Failed to create %s user" % TENANT_NAME) - - logger.info("Update OpenStack creds informations") - ks_creds.update({ - "username": TENANT_NAME, - "password": TENANT_NAME, - "tenant_name": TENANT_NAME, - }) - - nt_creds.update({ - "tenant_name": TENANT_NAME, - }) - - nv_creds.update({ - "project_id": TENANT_NAME, - }) - - logger.info("Upload some OS images if it doesn't exist") - glance_endpoint = keystone.service_catalog.url_for( - service_type='image', endpoint_type='publicURL') - glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token) - - for img in IMAGES.keys(): - image_name = IMAGES[img]['image_name'] - image_url = IMAGES[img]['image_url'] - - image_id = os_utils.get_image_id(glance, image_name) - - if image_id == '': - logger.info("""%s image doesn't exist on glance repository. Try - downloading this image and upload on glance !""" % image_name) - image_id = download_and_add_image_on_glance( - glance, image_name, image_url) - - if image_id == '': - step_failure( - "init", - "Error : Failed to find or upload required OS " - "image for this deployment") - - nova = nvclient.Client("2", **nv_creds) - - logger.info("Update security group quota for this tenant") - neutron = ntclient.Client(**nt_creds) - if not os_utils.update_sg_quota(neutron, tenant_id, 50, 100): - step_failure( - "init", - "Failed to update security group quota for tenant " + TENANT_NAME) - - logger.info("Update cinder quota for this tenant") - from cinderclient import client as cinderclient - - creds_cinder = os_utils.get_credentials("cinder") - cinder_client = cinderclient.Client('1', creds_cinder['username'], - creds_cinder['api_key'], - creds_cinder['project_id'], - creds_cinder['auth_url'], - service_type="volume") - if not os_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150): - step_failure( - "init", "Failed to update cinder quota for tenant " + TENANT_NAME) - - # ############### CLOUDIFY INITIALISATION ################ - - cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger) - - cfy.set_credentials(username=ks_creds['username'], password=ks_creds[ - 'password'], tenant_name=ks_creds['tenant_name'], - auth_url=ks_creds['auth_url']) - - logger.info("Collect flavor id for cloudify manager server") - nova = nvclient.Client("2", **nv_creds) - - flavor_name = "m1.medium" - flavor_id = os_utils.get_flavor_id(nova, flavor_name) - for requirement in CFY_MANAGER_REQUIERMENTS: - if requirement == 'ram_min': - flavor_id = os_utils.get_flavor_id_by_ram_range( - nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 8196) - - if flavor_id == '': - logger.error( - "Failed to find %s flavor. " - "Try with ram range default requirement !" % flavor_name) - flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) - - if flavor_id == '': - step_failure("orchestrator", - "Failed to find required flavor for this deployment") - - cfy.set_flavor_id(flavor_id) - - image_name = "centos_7" - image_id = os_utils.get_image_id(glance, image_name) - for requirement in CFY_MANAGER_REQUIERMENTS: - if requirement == 'os_image': - image_id = os_utils.get_image_id( - glance, CFY_MANAGER_REQUIERMENTS['os_image']) - - if image_id == '': - step_failure( - "orchestrator", - "Error : Failed to find required OS image for cloudify manager") - - cfy.set_image_id(image_id) - - ext_net = os_utils.get_external_net(neutron) - if not ext_net: - step_failure("orchestrator", "Failed to get external network") - - cfy.set_external_network_name(ext_net) - - ns = functest_utils.get_resolvconf_ns() - if ns: - cfy.set_nameservers(ns) - - logger.info("Prepare virtualenv for cloudify-cli") - cmd = "chmod +x " + VIMS_DIR + "create_venv.sh" - functest_utils.execute_command(cmd, logger) - time.sleep(3) - cmd = VIMS_DIR + "create_venv.sh " + VIMS_DATA_DIR - functest_utils.execute_command(cmd, logger) - - cfy.download_manager_blueprint( - CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch']) - - # ############### CLOUDIFY DEPLOYMENT ################ - start_time_ts = time.time() - end_time_ts = start_time_ts - logger.info("Cloudify deployment Start Time:'%s'" % ( - datetime.datetime.fromtimestamp(start_time_ts).strftime( - '%Y-%m-%d %H:%M:%S'))) - - error = cfy.deploy_manager() - if error: - step_failure("orchestrator", error) - - end_time_ts = time.time() - duration = round(end_time_ts - start_time_ts, 1) - logger.info("Cloudify deployment duration:'%s'" % duration) - set_result("orchestrator", duration, "") - - # ############### CLEARWATER INITIALISATION ################ - - cw = clearwater(CW_INPUTS, cfy, logger) - - logger.info("Collect flavor id for all clearwater vm") - nova = nvclient.Client("2", **nv_creds) - - flavor_name = "m1.small" - flavor_id = os_utils.get_flavor_id(nova, flavor_name) - for requirement in CW_REQUIERMENTS: - if requirement == 'ram_min': - flavor_id = os_utils.get_flavor_id_by_ram_range( - nova, CW_REQUIERMENTS['ram_min'], 8196) - - if flavor_id == '': - logger.error( - "Failed to find %s flavor. Try with ram range " - "default requirement !" % flavor_name) - flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) - - if flavor_id == '': - step_failure( - "vIMS", "Failed to find required flavor for this deployment") - - cw.set_flavor_id(flavor_id) - - image_name = "ubuntu_14.04" - image_id = os_utils.get_image_id(glance, image_name) - for requirement in CW_REQUIERMENTS: - if requirement == 'os_image': - image_id = os_utils.get_image_id( - glance, CW_REQUIERMENTS['os_image']) - - if image_id == '': - step_failure( - "vIMS", - "Error : Failed to find required OS image for cloudify manager") - - cw.set_image_id(image_id) - - ext_net = os_utils.get_external_net(neutron) - if not ext_net: - step_failure("vIMS", "Failed to get external network") - - cw.set_external_network_name(ext_net) - - # ############### CLEARWATER DEPLOYMENT ################ - - start_time_ts = time.time() - end_time_ts = start_time_ts - logger.info("vIMS VNF deployment Start Time:'%s'" % ( - datetime.datetime.fromtimestamp(start_time_ts).strftime( - '%Y-%m-%d %H:%M:%S'))) - - error = cw.deploy_vnf(CW_BLUEPRINT) - if error: - step_failure("vIMS", error) - - end_time_ts = time.time() - duration = round(end_time_ts - start_time_ts, 1) - logger.info("vIMS VNF deployment duration:'%s'" % duration) - set_result("vIMS", duration, "") - - # ############### CLEARWATER TEST ################ - - test_clearwater() - - # ########## CLEARWATER UNDEPLOYMENT ############ - - cw.undeploy_vnf() - - # ########### CLOUDIFY UNDEPLOYMENT ############# - - cfy.undeploy_manager() - - # ############## GENERAL CLEANUP ################ - if args.noclean: - exit(0) - - ks_creds = os_utils.get_credentials("keystone") - - keystone = ksclient.Client(**ks_creds) - - logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name']) - tenant_id = os_utils.get_tenant_id( - keystone, CFY_INPUTS['keystone_tenant_name']) - if tenant_id == '': - logger.error("Error : Failed to get id of %s tenant" % - CFY_INPUTS['keystone_tenant_name']) - else: - if not os_utils.delete_tenant(keystone, tenant_id): - logger.error("Error : Failed to remove %s tenant" % - CFY_INPUTS['keystone_tenant_name']) - - logger.info("Removing %s user .." % CFY_INPUTS['keystone_username']) - user_id = os_utils.get_user_id( - keystone, CFY_INPUTS['keystone_username']) - if user_id == '': - logger.error("Error : Failed to get id of %s user" % - CFY_INPUTS['keystone_username']) - else: - if not os_utils.delete_user(keystone, user_id): - logger.error("Error : Failed to remove %s user" % - CFY_INPUTS['keystone_username']) - - -if __name__ == '__main__': - main() diff --git a/testcases/vIMS/clearwater.py b/testcases/vIMS/clearwater.py new file mode 100644 index 000000000..7236f4fba --- /dev/null +++ b/testcases/vIMS/clearwater.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# coding: utf8 +####################################################################### +# +# Copyright (c) 2015 Orange +# valentin.boucher@orange.com +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +######################################################################## + + +class clearwater: + + def __init__(self, inputs={}, orchestrator=None, logger=None): + self.config = inputs + self.orchestrator = orchestrator + self.logger = logger + self.deploy = False + + def set_orchestrator(self, orchestrator): + self.orchestrator = orchestrator + + def set_flavor_id(self, flavor_id): + self.config['flavor_id'] = flavor_id + + def set_image_id(self, image_id): + self.config['image_id'] = image_id + + def set_agent_user(self, agent_user): + self.config['agent_user'] = agent_user + + def set_external_network_name(self, external_network_name): + self.config['external_network_name'] = external_network_name + + def set_public_domain(self, public_domain): + self.config['public_domain'] = public_domain + + def deploy_vnf(self, blueprint, bp_name='clearwater', + dep_name='clearwater-opnfv'): + if self.orchestrator: + self.dep_name = dep_name + error = self.orchestrator.download_upload_and_deploy_blueprint( + blueprint, self.config, bp_name, dep_name) + if error: + return error + + self.deploy = True + + else: + if self.logger: + self.logger.error("Cloudify manager is down or not provide...") + + def undeploy_vnf(self): + if self.orchestrator: + if self.deploy: + self.deploy = False + self.orchestrator.undeploy_deployment(self.dep_name) + else: + if self.logger: + self.logger.error("Clearwater isn't already deploy...") + else: + if self.logger: + self.logger.error("Cloudify manager is down or not provide...") diff --git a/testcases/vIMS/create_venv.sh b/testcases/vIMS/create_venv.sh new file mode 100755 index 000000000..575fd177c --- /dev/null +++ b/testcases/vIMS/create_venv.sh @@ -0,0 +1,44 @@ +#!/bin/bash -e + +# Script checks that venv exists. If it doesn't it will be created +# It requires python2.7 and virtualenv packages installed +# +# Copyright (c) 2015 Orange +# valentin.boucher@orange.com +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +BASEDIR=`dirname $0` +VENV_PATH=$1 +VENV_NAME="venv_cloudify" +function venv_install() { + if command -v virtualenv-2.7; then + virtualenv-2.7 $1 + elif command -v virtualenv2; then + virtualenv2 $1 + elif command -v virtualenv; then + virtualenv $1 + else + echo Cannot find virtualenv command. + return 1 + fi +} + +# exit when something goes wrong during venv install +set -e +if [ ! -d "$VENV_PATH/$VENV_NAME" ]; then + venv_install $VENV_PATH/$VENV_NAME + echo "Virtualenv" + $VENV_NAME + "created." +fi + +if [ ! -f "$VENV_PATH/$VENV_NAME/updated" -o $BASEDIR/requirements.pip -nt $VENV_PATH/$VENV_NAME/updated ]; then + source $VENV_PATH/$VENV_NAME/bin/activate + pip install -r $BASEDIR/requirements.pip + touch $VENV_PATH/$VENV_NAME/updated + echo "Requirements installed." + deactivate +fi +set +e diff --git a/testcases/vIMS/orchestrator.py b/testcases/vIMS/orchestrator.py new file mode 100644 index 000000000..c61f654ca --- /dev/null +++ b/testcases/vIMS/orchestrator.py @@ -0,0 +1,236 @@ +#!/usr/bin/python +# coding: utf8 +####################################################################### +# +# Copyright (c) 2015 Orange +# valentin.boucher@orange.com +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +######################################################################## +import subprocess +import os +import shutil +import yaml +from git import Repo + + +class orchestrator: + + def __init__(self, testcase_dir, inputs={}, logger=None): + self.testcase_dir = testcase_dir + self.blueprint_dir = testcase_dir + 'cloudify-manager-blueprint/' + self.input_file = 'inputs.yaml' + self.manager_blueprint = False + self.config = inputs + self.logger = logger + self.manager_up = False + + def set_credentials(self, username, password, tenant_name, auth_url): + self.config['keystone_username'] = username + self.config['keystone_password'] = password + self.config['keystone_url'] = auth_url + self.config['keystone_tenant_name'] = tenant_name + + def set_flavor_id(self, flavor_id): + self.config['flavor_id'] = flavor_id + + def set_image_id(self, image_id): + self.config['image_id'] = image_id + + def set_external_network_name(self, external_network_name): + self.config['external_network_name'] = external_network_name + + def set_ssh_user(self, ssh_user): + self.config['ssh_user'] = ssh_user + + def set_nameservers(self, nameservers): + if 0 < len(nameservers): + self.config['dns_subnet_1'] = nameservers[0] + if 1 < len(nameservers): + self.config['dns_subnet_2'] = nameservers[1] + + def set_logger(self, logger): + self.logger = logger + + def download_manager_blueprint(self, manager_blueprint_url, + manager_blueprint_branch): + if self.manager_blueprint: + if self.logger: + self.logger.info( + "cloudify manager server blueprint is " + "already downloaded !") + else: + if self.logger: + self.logger.info( + "Downloading the cloudify manager server blueprint") + download_result = download_blueprints( + manager_blueprint_url, + manager_blueprint_branch, + self.blueprint_dir) + + if not download_result: + if self.logger: + self.logger.error("Failed to download manager blueprint") + exit(-1) + else: + self.manager_blueprint = True + + def manager_up(self): + return self.manager_up + + def deploy_manager(self): + if self.manager_blueprint: + if self.logger: + self.logger.info("Writing the inputs file") + with open(self.blueprint_dir + "inputs.yaml", "w") as f: + f.write(yaml.dump(self.config, default_style='"')) + f.close() + + # Ensure no ssh key file already exists + key_files = ["/.ssh/cloudify-manager-kp.pem", + "/.ssh/cloudify-agent-kp.pem"] + home = os.path.expanduser("~") + + for key_file in key_files: + if os.path.isfile(home + key_file): + os.remove(home + key_file) + + if self.logger: + self.logger.info("Launching the cloudify-manager deployment") + script = "set -e; " + script += ("source " + self.testcase_dir + + "venv_cloudify/bin/activate; ") + script += "cd " + self.testcase_dir + "; " + script += "cfy init -r; " + script += "cd cloudify-manager-blueprint; " + script += ("cfy local create-requirements -o requirements.txt " + + "-p openstack-manager-blueprint.yaml; ") + script += "pip install -r requirements.txt; " + script += ("timeout 1800 cfy bootstrap --install-plugins " + + "-p openstack-manager-blueprint.yaml -i inputs.yaml; ") + cmd = "/bin/bash -c '" + script + "'" + error = execute_command(cmd, self.logger) + if error: + return error + + if self.logger: + self.logger.info("Cloudify-manager server is UP !") + + self.manager_up = True + + def undeploy_manager(self): + if self.logger: + self.logger.info("Launching the cloudify-manager undeployment") + + self.manager_up = False + + script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; " + script += "cd " + self.testcase_dir + "; " + script += "cfy teardown -f --ignore-deployments; " + cmd = "/bin/bash -c '" + script + "'" + execute_command(cmd, self.logger) + + if self.logger: + self.logger.info( + "Cloudify-manager server has been successfully removed!") + + def download_upload_and_deploy_blueprint(self, blueprint, config, + bp_name, dep_name): + if self.logger: + self.logger.info("Downloading the {0} blueprint".format( + blueprint['file_name'])) + download_result = download_blueprints(blueprint['url'], + blueprint['branch'], + self.testcase_dir + + blueprint['destination_folder']) + + if not download_result: + if self.logger: + self.logger.error( + "Failed to download blueprint {0}". + format(blueprint['file_name'])) + exit(-1) + + if self.logger: + self.logger.info("Writing the inputs file") + + with open(self.testcase_dir + blueprint['destination_folder'] + + "/inputs.yaml", "w") as f: + f.write(yaml.dump(config, default_style='"')) + + f.close() + + if self.logger: + self.logger.info("Launching the {0} deployment".format(bp_name)) + script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; " + script += ("cd " + self.testcase_dir + + blueprint['destination_folder'] + "; ") + script += ("cfy blueprints upload -b " + + bp_name + " -p openstack-blueprint.yaml; ") + script += ("cfy deployments create -b " + bp_name + + " -d " + dep_name + " --inputs inputs.yaml; ") + script += ("cfy executions start -w install -d " + + dep_name + " --timeout 1800; ") + + cmd = "/bin/bash -c '" + script + "'" + error = execute_command(cmd, self.logger) + if error: + return error + if self.logger: + self.logger.info("The deployment of {0} is ended".format(dep_name)) + + def undeploy_deployment(self, dep_name): + if self.logger: + self.logger.info("Launching the {0} undeployment".format(dep_name)) + script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; " + script += "cd " + self.testcase_dir + "; " + script += ("cfy executions start -w uninstall -d " + dep_name + + " --timeout 1800 ; ") + script += "cfy deployments delete -d " + dep_name + "; " + + cmd = "/bin/bash -c '" + script + "'" + try: + execute_command(cmd, self.logger) + except: + if self.logger: + self.logger.error("Clearwater undeployment failed") + + +def execute_command(cmd, logger): + """ + Execute Linux command + """ + if logger: + logger.debug('Executing command : {}'.format(cmd)) + output_file = "output.txt" + f = open(output_file, 'w+') + p = subprocess.call(cmd, shell=True, stdout=f, stderr=subprocess.STDOUT) + f.close() + f = open(output_file, 'r') + result = f.read() + if result != "" and logger: + logger.debug(result) + if p == 0: + return False + else: + if logger: + logger.error("Error when executing command %s" % cmd) + f = open(output_file, 'r') + lines = f.readlines() + result = lines[len(lines) - 3] + result += lines[len(lines) - 2] + result += lines[len(lines) - 1] + return result + + +def download_blueprints(blueprint_url, branch, dest_path): + if os.path.exists(dest_path): + shutil.rmtree(dest_path) + try: + Repo.clone_from(blueprint_url, dest_path, branch=branch) + return True + except: + return False diff --git a/testcases/vIMS/requirements.pip b/testcases/vIMS/requirements.pip new file mode 100644 index 000000000..9b9d0ba53 --- /dev/null +++ b/testcases/vIMS/requirements.pip @@ -0,0 +1 @@ +cloudify==3.3 \ No newline at end of file diff --git a/testcases/vIMS/vIMS.py b/testcases/vIMS/vIMS.py new file mode 100644 index 000000000..2430af1a1 --- /dev/null +++ b/testcases/vIMS/vIMS.py @@ -0,0 +1,553 @@ +#!/usr/bin/python +# coding: utf8 +####################################################################### +# +# Copyright (c) 2015 Orange +# valentin.boucher@orange.com +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +######################################################################## + +import argparse +import datetime +import json +import os +import pprint +import requests +import subprocess +import time +import yaml + +import keystoneclient.v2_0.client as ksclient +import glanceclient.client as glclient +import novaclient.client as nvclient +from neutronclient.v2_0 import client as ntclient + +import clearwater +import orchestrator + +import functest.utils.functest_logger as ft_logger +import functest.utils.functest_utils as functest_utils +import functest.utils.openstack_utils as os_utils + + +pp = pprint.PrettyPrinter(indent=4) + + +parser = argparse.ArgumentParser() +parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") +parser.add_argument("-r", "--report", + help="Create json result file", + action="store_true") +parser.add_argument("-n", "--noclean", + help="Don't clean the created resources for this test.", + action="store_true") +args = parser.parse_args() + +""" logging configuration """ +logger = ft_logger.Logger("vIMS").getLogger() + +REPO_PATH = os.environ['repos_dir'] + '/functest/' +if not os.path.exists(REPO_PATH): + logger.error("Functest repository directory not found '%s'" % REPO_PATH) + exit(-1) + +with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: + functest_yaml = yaml.safe_load(f) +f.close() + +# Cloudify parameters +VIMS_DIR = (REPO_PATH + + functest_yaml.get("general").get("directories").get("dir_vIMS")) +VIMS_DATA_DIR = functest_yaml.get("general").get( + "directories").get("dir_vIMS_data") + "/" +VIMS_TEST_DIR = functest_yaml.get("general").get( + "directories").get("dir_repo_vims_test") + "/" +DB_URL = functest_yaml.get("results").get("test_db_url") + +TENANT_NAME = functest_yaml.get("vIMS").get("general").get("tenant_name") +TENANT_DESCRIPTION = functest_yaml.get("vIMS").get( + "general").get("tenant_description") +IMAGES = functest_yaml.get("vIMS").get("general").get("images") + +CFY_MANAGER_BLUEPRINT = functest_yaml.get( + "vIMS").get("cloudify").get("blueprint") +CFY_MANAGER_REQUIERMENTS = functest_yaml.get( + "vIMS").get("cloudify").get("requierments") +CFY_INPUTS = functest_yaml.get("vIMS").get("cloudify").get("inputs") + +CW_BLUEPRINT = functest_yaml.get("vIMS").get("clearwater").get("blueprint") +CW_DEPLOYMENT_NAME = functest_yaml.get("vIMS").get( + "clearwater").get("deployment-name") +CW_INPUTS = functest_yaml.get("vIMS").get("clearwater").get("inputs") +CW_REQUIERMENTS = functest_yaml.get("vIMS").get( + "clearwater").get("requierments") + +CFY_DEPLOYMENT_DURATION = 0 +CW_DEPLOYMENT_DURATION = 0 + +RESULTS = {'orchestrator': {'duration': 0, 'result': ''}, + 'vIMS': {'duration': 0, 'result': ''}, + 'sig_test': {'duration': 0, 'result': ''}} + + +def download_and_add_image_on_glance(glance, image_name, image_url): + dest_path = VIMS_DATA_DIR + "tmp/" + if not os.path.exists(dest_path): + os.makedirs(dest_path) + file_name = image_url.rsplit('/')[-1] + if not functest_utils.download_url(image_url, dest_path): + logger.error("Failed to download image %s" % file_name) + return False + + image = os_utils.create_glance_image( + glance, image_name, dest_path + file_name) + if not image: + logger.error("Failed to upload image on glance") + return False + + return image + + +def step_failure(step_name, error_msg): + logger.error(error_msg) + set_result(step_name, 0, error_msg) + status = "failed" + if step_name == "sig_test": + status = "passed" + push_results(status) + exit(-1) + + +def push_results(status): + if args.report: + logger.debug("Pushing results to DB....") + + scenario = functest_utils.get_scenario(logger) + version = functest_utils.get_version(logger) + pod_name = functest_utils.get_pod_name(logger) + build_tag = functest_utils.get_build_tag(logger) + + functest_utils.push_results_to_db(db_url=DB_URL, + project="functest", + case_name="vIMS", + logger=logger, pod_name=pod_name, + version=version, + scenario=scenario, + criteria=status, + build_tag=build_tag, + payload=RESULTS) + + +def set_result(step_name, duration=0, result=""): + RESULTS[step_name] = {'duration': duration, 'result': result} + + +def test_clearwater(): + script = "source " + VIMS_DATA_DIR + "venv_cloudify/bin/activate; " + script += "cd " + VIMS_DATA_DIR + "; " + script += "cfy status | grep -Eo \"([0-9]{1,3}\.){3}[0-9]{1,3}\"" + cmd = "/bin/bash -c '" + script + "'" + + try: + logger.debug("Trying to get clearwater manager IP ... ") + mgr_ip = os.popen(cmd).read() + mgr_ip = mgr_ip.splitlines()[0] + except: + step_failure("sig_test", "Unable to retrieve the IP of the " + "cloudify manager server !") + + api_url = "http://" + mgr_ip + "/api/v2" + dep_outputs = requests.get(api_url + "/deployments/" + + CW_DEPLOYMENT_NAME + "/outputs") + dns_ip = dep_outputs.json()['outputs']['dns_ip'] + ellis_ip = dep_outputs.json()['outputs']['ellis_ip'] + + ellis_url = "http://" + ellis_ip + "/" + url = ellis_url + "accounts" + + params = {"password": "functest", + "full_name": "opnfv functest user", + "email": "functest@opnfv.fr", + "signup_code": "secret"} + + rq = requests.post(url, data=params) + i = 20 + while rq.status_code != 201 and i > 0: + rq = requests.post(url, data=params) + i = i - 1 + time.sleep(10) + + if rq.status_code == 201: + url = ellis_url + "session" + rq = requests.post(url, data=params) + cookies = rq.cookies + + url = ellis_url + "accounts/" + params['email'] + "/numbers" + if cookies != "": + rq = requests.post(url, cookies=cookies) + i = 24 + while rq.status_code != 200 and i > 0: + rq = requests.post(url, cookies=cookies) + i = i - 1 + time.sleep(25) + + if rq.status_code != 200: + step_failure("sig_test", "Unable to create a number: %s" + % rq.json()['reason']) + + start_time_ts = time.time() + end_time_ts = start_time_ts + logger.info("vIMS functional test Start Time:'%s'" % ( + datetime.datetime.fromtimestamp(start_time_ts).strftime( + '%Y-%m-%d %H:%M:%S'))) + nameservers = functest_utils.get_resolvconf_ns() + resolvconf = "" + for ns in nameservers: + resolvconf += "\nnameserver " + ns + + if dns_ip != "": + script = ('echo -e "nameserver ' + dns_ip + resolvconf + + '" > /etc/resolv.conf; ') + script += 'source /etc/profile.d/rvm.sh; ' + script += 'cd ' + VIMS_TEST_DIR + '; ' + script += ('rake test[' + CW_INPUTS["public_domain"] + + '] SIGNUP_CODE="secret"') + + cmd = "/bin/bash -c '" + script + "'" + output_file = "output.txt" + f = open(output_file, 'w+') + subprocess.call(cmd, shell=True, stdout=f, + stderr=subprocess.STDOUT) + f.close() + end_time_ts = time.time() + duration = round(end_time_ts - start_time_ts, 1) + logger.info("vIMS functional test duration:'%s'" % duration) + f = open(output_file, 'r') + result = f.read() + if result != "" and logger: + logger.debug(result) + + vims_test_result = "" + try: + logger.debug("Trying to load test results") + with open(VIMS_TEST_DIR + "temp.json") as f: + vims_test_result = json.load(f) + f.close() + except: + logger.error("Unable to retrieve test results") + + set_result("sig_test", duration, vims_test_result) + + # success criteria for vIMS (for Brahmaputra) + # - orchestrator deployed + # - VNF deployed + status = "failed" + try: + if (RESULTS['orchestrator']['duration'] > 0 and + RESULTS['vIMS']['duration'] > 0): + status = "passed" + except: + logger.error("Unable to set test status") + push_results(status) + + try: + os.remove(VIMS_TEST_DIR + "temp.json") + except: + logger.error("Deleting file failed") + + +def main(): + + # ############### GENERAL INITIALISATION ################ + + if not os.path.exists(VIMS_DATA_DIR): + os.makedirs(VIMS_DATA_DIR) + + ks_creds = os_utils.get_credentials("keystone") + nv_creds = os_utils.get_credentials("nova") + nt_creds = os_utils.get_credentials("neutron") + + logger.info("Prepare OpenStack plateform (create tenant and user)") + keystone = ksclient.Client(**ks_creds) + + user_id = os_utils.get_user_id(keystone, ks_creds['username']) + if user_id == '': + step_failure("init", "Error : Failed to get id of " + + ks_creds['username']) + + tenant_id = os_utils.create_tenant( + keystone, TENANT_NAME, TENANT_DESCRIPTION) + if tenant_id == '': + step_failure("init", "Error : Failed to create " + + TENANT_NAME + " tenant") + + roles_name = ["admin", "Admin"] + role_id = '' + for role_name in roles_name: + if role_id == '': + role_id = os_utils.get_role_id(keystone, role_name) + + if role_id == '': + logger.error("Error : Failed to get id for %s role" % role_name) + + if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id): + logger.error("Error : Failed to add %s on tenant" % + ks_creds['username']) + + user_id = os_utils.create_user( + keystone, TENANT_NAME, TENANT_NAME, None, tenant_id) + if user_id == '': + logger.error("Error : Failed to create %s user" % TENANT_NAME) + + logger.info("Update OpenStack creds informations") + ks_creds.update({ + "username": TENANT_NAME, + "password": TENANT_NAME, + "tenant_name": TENANT_NAME, + }) + + nt_creds.update({ + "tenant_name": TENANT_NAME, + }) + + nv_creds.update({ + "project_id": TENANT_NAME, + }) + + logger.info("Upload some OS images if it doesn't exist") + glance_endpoint = keystone.service_catalog.url_for( + service_type='image', endpoint_type='publicURL') + glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token) + + for img in IMAGES.keys(): + image_name = IMAGES[img]['image_name'] + image_url = IMAGES[img]['image_url'] + + image_id = os_utils.get_image_id(glance, image_name) + + if image_id == '': + logger.info("""%s image doesn't exist on glance repository. Try + downloading this image and upload on glance !""" % image_name) + image_id = download_and_add_image_on_glance( + glance, image_name, image_url) + + if image_id == '': + step_failure( + "init", + "Error : Failed to find or upload required OS " + "image for this deployment") + + nova = nvclient.Client("2", **nv_creds) + + logger.info("Update security group quota for this tenant") + neutron = ntclient.Client(**nt_creds) + if not os_utils.update_sg_quota(neutron, tenant_id, 50, 100): + step_failure( + "init", + "Failed to update security group quota for tenant " + TENANT_NAME) + + logger.info("Update cinder quota for this tenant") + from cinderclient import client as cinderclient + + creds_cinder = os_utils.get_credentials("cinder") + cinder_client = cinderclient.Client('1', creds_cinder['username'], + creds_cinder['api_key'], + creds_cinder['project_id'], + creds_cinder['auth_url'], + service_type="volume") + if not os_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150): + step_failure( + "init", "Failed to update cinder quota for tenant " + TENANT_NAME) + + # ############### CLOUDIFY INITIALISATION ################ + + cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger) + + cfy.set_credentials(username=ks_creds['username'], password=ks_creds[ + 'password'], tenant_name=ks_creds['tenant_name'], + auth_url=ks_creds['auth_url']) + + logger.info("Collect flavor id for cloudify manager server") + nova = nvclient.Client("2", **nv_creds) + + flavor_name = "m1.medium" + flavor_id = os_utils.get_flavor_id(nova, flavor_name) + for requirement in CFY_MANAGER_REQUIERMENTS: + if requirement == 'ram_min': + flavor_id = os_utils.get_flavor_id_by_ram_range( + nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 8196) + + if flavor_id == '': + logger.error( + "Failed to find %s flavor. " + "Try with ram range default requirement !" % flavor_name) + flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) + + if flavor_id == '': + step_failure("orchestrator", + "Failed to find required flavor for this deployment") + + cfy.set_flavor_id(flavor_id) + + image_name = "centos_7" + image_id = os_utils.get_image_id(glance, image_name) + for requirement in CFY_MANAGER_REQUIERMENTS: + if requirement == 'os_image': + image_id = os_utils.get_image_id( + glance, CFY_MANAGER_REQUIERMENTS['os_image']) + + if image_id == '': + step_failure( + "orchestrator", + "Error : Failed to find required OS image for cloudify manager") + + cfy.set_image_id(image_id) + + ext_net = os_utils.get_external_net(neutron) + if not ext_net: + step_failure("orchestrator", "Failed to get external network") + + cfy.set_external_network_name(ext_net) + + ns = functest_utils.get_resolvconf_ns() + if ns: + cfy.set_nameservers(ns) + + logger.info("Prepare virtualenv for cloudify-cli") + cmd = "chmod +x " + VIMS_DIR + "create_venv.sh" + functest_utils.execute_command(cmd, logger) + time.sleep(3) + cmd = VIMS_DIR + "create_venv.sh " + VIMS_DATA_DIR + functest_utils.execute_command(cmd, logger) + + cfy.download_manager_blueprint( + CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch']) + + # ############### CLOUDIFY DEPLOYMENT ################ + start_time_ts = time.time() + end_time_ts = start_time_ts + logger.info("Cloudify deployment Start Time:'%s'" % ( + datetime.datetime.fromtimestamp(start_time_ts).strftime( + '%Y-%m-%d %H:%M:%S'))) + + error = cfy.deploy_manager() + if error: + step_failure("orchestrator", error) + + end_time_ts = time.time() + duration = round(end_time_ts - start_time_ts, 1) + logger.info("Cloudify deployment duration:'%s'" % duration) + set_result("orchestrator", duration, "") + + # ############### CLEARWATER INITIALISATION ################ + + cw = clearwater(CW_INPUTS, cfy, logger) + + logger.info("Collect flavor id for all clearwater vm") + nova = nvclient.Client("2", **nv_creds) + + flavor_name = "m1.small" + flavor_id = os_utils.get_flavor_id(nova, flavor_name) + for requirement in CW_REQUIERMENTS: + if requirement == 'ram_min': + flavor_id = os_utils.get_flavor_id_by_ram_range( + nova, CW_REQUIERMENTS['ram_min'], 8196) + + if flavor_id == '': + logger.error( + "Failed to find %s flavor. Try with ram range " + "default requirement !" % flavor_name) + flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) + + if flavor_id == '': + step_failure( + "vIMS", "Failed to find required flavor for this deployment") + + cw.set_flavor_id(flavor_id) + + image_name = "ubuntu_14.04" + image_id = os_utils.get_image_id(glance, image_name) + for requirement in CW_REQUIERMENTS: + if requirement == 'os_image': + image_id = os_utils.get_image_id( + glance, CW_REQUIERMENTS['os_image']) + + if image_id == '': + step_failure( + "vIMS", + "Error : Failed to find required OS image for cloudify manager") + + cw.set_image_id(image_id) + + ext_net = os_utils.get_external_net(neutron) + if not ext_net: + step_failure("vIMS", "Failed to get external network") + + cw.set_external_network_name(ext_net) + + # ############### CLEARWATER DEPLOYMENT ################ + + start_time_ts = time.time() + end_time_ts = start_time_ts + logger.info("vIMS VNF deployment Start Time:'%s'" % ( + datetime.datetime.fromtimestamp(start_time_ts).strftime( + '%Y-%m-%d %H:%M:%S'))) + + error = cw.deploy_vnf(CW_BLUEPRINT) + if error: + step_failure("vIMS", error) + + end_time_ts = time.time() + duration = round(end_time_ts - start_time_ts, 1) + logger.info("vIMS VNF deployment duration:'%s'" % duration) + set_result("vIMS", duration, "") + + # ############### CLEARWATER TEST ################ + + test_clearwater() + + # ########## CLEARWATER UNDEPLOYMENT ############ + + cw.undeploy_vnf() + + # ########### CLOUDIFY UNDEPLOYMENT ############# + + cfy.undeploy_manager() + + # ############## GENERAL CLEANUP ################ + if args.noclean: + exit(0) + + ks_creds = os_utils.get_credentials("keystone") + + keystone = ksclient.Client(**ks_creds) + + logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name']) + tenant_id = os_utils.get_tenant_id( + keystone, CFY_INPUTS['keystone_tenant_name']) + if tenant_id == '': + logger.error("Error : Failed to get id of %s tenant" % + CFY_INPUTS['keystone_tenant_name']) + else: + if not os_utils.delete_tenant(keystone, tenant_id): + logger.error("Error : Failed to remove %s tenant" % + CFY_INPUTS['keystone_tenant_name']) + + logger.info("Removing %s user .." % CFY_INPUTS['keystone_username']) + user_id = os_utils.get_user_id( + keystone, CFY_INPUTS['keystone_username']) + if user_id == '': + logger.error("Error : Failed to get id of %s user" % + CFY_INPUTS['keystone_username']) + else: + if not os_utils.delete_user(keystone, user_id): + logger.error("Error : Failed to remove %s user" % + CFY_INPUTS['keystone_username']) + + +if __name__ == '__main__': + main() diff --git a/testcases/vPing/CI/libraries/ping.sh b/testcases/vPing/CI/libraries/ping.sh deleted file mode 100644 index 693b86825..000000000 --- a/testcases/vPing/CI/libraries/ping.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh - -while true; do - ping -c 1 $1 2>&1 >/dev/null - RES=$? - if [ "Z$RES" = "Z0" ] ; then - echo 'vPing OK' - break - else - echo 'vPing KO' - fi - sleep 1 -done \ No newline at end of file diff --git a/testcases/vPing/CI/libraries/vPing_ssh.py b/testcases/vPing/CI/libraries/vPing_ssh.py deleted file mode 100644 index fa9770ba8..000000000 --- a/testcases/vPing/CI/libraries/vPing_ssh.py +++ /dev/null @@ -1,453 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2015 All rights reserved -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# 0.1: This script boots the VM1 and allocates IP address from Nova -# Later, the VM2 boots then execute cloud-init to ping VM1. -# After successful ping, both the VMs are deleted. -# 0.2: measure test duration and publish results under json format -# -# -import argparse -import datetime -import os -import paramiko -import pprint -import re -import time -import yaml -from scp import SCPClient - -from novaclient import client as novaclient -from neutronclient.v2_0 import client as neutronclient -from keystoneclient.v2_0 import client as keystoneclient -from glanceclient import client as glanceclient - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as functest_utils -import functest.utils.openstack_utils as openstack_utils - -pp = pprint.PrettyPrinter(indent=4) - -parser = argparse.ArgumentParser() -image_exists = False - -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") -parser.add_argument("-r", "--report", - help="Create json result file", - action="store_true") - -args = parser.parse_args() - -""" logging configuration """ -logger = ft_logger.Logger("vping_ssh").getLogger() - -paramiko.util.log_to_file("/var/log/paramiko.log") - -REPO_PATH = os.environ['repos_dir'] + '/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() - -HOME = os.environ['HOME'] + "/" -# vPing parameters -VM_BOOT_TIMEOUT = 180 -VM_DELETE_TIMEOUT = 100 -PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout") -TEST_DB = functest_yaml.get("results").get("test_db_url") -NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1") -NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2") -GLANCE_IMAGE_NAME = functest_yaml.get("vping").get("image_name") -GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get( - "image_file_name") -GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get( - "image_disk_format") -GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get( - "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME - -FLAVOR = functest_yaml.get("vping").get("vm_flavor") - -# NEUTRON Private Network parameters - -PRIVATE_NET_NAME = functest_yaml.get("vping").get( - "vping_private_net_name") -PRIVATE_SUBNET_NAME = functest_yaml.get("vping").get( - "vping_private_subnet_name") -PRIVATE_SUBNET_CIDR = functest_yaml.get("vping").get( - "vping_private_subnet_cidr") -ROUTER_NAME = functest_yaml.get("vping").get( - "vping_router_name") - -SECGROUP_NAME = functest_yaml.get("vping").get("vping_sg_name") -SECGROUP_DESCR = functest_yaml.get("vping").get("vping_sg_descr") - - -def pMsg(value): - - """pretty printing""" - pp.pprint(value) - - -def waitVmActive(nova, vm): - - # sleep and wait for VM status change - sleep_time = 3 - count = VM_BOOT_TIMEOUT / sleep_time - while True: - status = openstack_utils.get_instance_status(nova, vm) - logger.debug("Status: %s" % status) - if status == "ACTIVE": - return True - if status == "ERROR" or status == "error": - return False - if count == 0: - logger.debug("Booting a VM timed out...") - return False - count -= 1 - time.sleep(sleep_time) - return False - - -def waitVmDeleted(nova, vm): - - # sleep and wait for VM status change - sleep_time = 3 - count = VM_DELETE_TIMEOUT / sleep_time - while True: - status = openstack_utils.get_instance_status(nova, vm) - if not status: - return True - elif count == 0: - logger.debug("Timeout") - return False - else: - # return False - count -= 1 - time.sleep(sleep_time) - return False - - -def create_security_group(neutron_client): - sg_id = openstack_utils.get_security_group_id(neutron_client, - SECGROUP_NAME) - if sg_id != '': - logger.info("Using existing security group '%s'..." % SECGROUP_NAME) - else: - logger.info("Creating security group '%s'..." % SECGROUP_NAME) - SECGROUP = openstack_utils.create_security_group(neutron_client, - SECGROUP_NAME, - SECGROUP_DESCR) - if not SECGROUP: - logger.error("Failed to create the security group...") - return False - - sg_id = SECGROUP['id'] - - logger.debug("Security group '%s' with ID=%s created successfully." - % (SECGROUP['name'], sg_id)) - - logger.debug("Adding ICMP rules in security group '%s'..." - % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, - 'ingress', 'icmp'): - logger.error("Failed to create the security group rule...") - return False - - logger.debug("Adding SSH rules in security group '%s'..." - % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule( - neutron_client, sg_id, 'ingress', 'tcp', '22', '22'): - logger.error("Failed to create the security group rule...") - return False - - if not openstack_utils.create_secgroup_rule( - neutron_client, sg_id, 'egress', 'tcp', '22', '22'): - logger.error("Failed to create the security group rule...") - return False - return sg_id - - -def push_results(start_time_ts, duration, test_status): - try: - logger.debug("Pushing result into DB...") - scenario = functest_utils.get_scenario(logger) - version = functest_utils.get_version(logger) - criteria = "failed" - if test_status == "OK": - criteria = "passed" - pod_name = functest_utils.get_pod_name(logger) - build_tag = functest_utils.get_build_tag(logger) - functest_utils.push_results_to_db(TEST_DB, - "functest", - "vPing", - logger, pod_name, version, scenario, - criteria, build_tag, - payload={'timestart': start_time_ts, - 'duration': duration, - 'status': test_status}) - except: - logger.error("Error pushing results into Database '%s'" - % sys.exc_info()[0]) - - -def main(): - - creds_nova = openstack_utils.get_credentials("nova") - nova_client = novaclient.Client('2', **creds_nova) - creds_neutron = openstack_utils.get_credentials("neutron") - neutron_client = neutronclient.Client(**creds_neutron) - creds_keystone = openstack_utils.get_credentials("keystone") - keystone_client = keystoneclient.Client(**creds_keystone) - glance_endpoint = keystone_client.service_catalog.url_for( - service_type='image', endpoint_type='publicURL') - glance_client = glanceclient.Client(1, glance_endpoint, - token=keystone_client.auth_token) - EXIT_CODE = -1 - - image_id = None - flavor = None - - # Check if the given image exists - image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) - if image_id != '': - logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME) - global image_exists - image_exists = True - else: - logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH)) - image_id = openstack_utils.create_glance_image(glance_client, - GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH) - if not image_id: - logger.error("Failed to create a Glance image...") - return(EXIT_CODE) - logger.debug("Image '%s' with ID=%s created successfully." - % (GLANCE_IMAGE_NAME, image_id)) - - network_dic = openstack_utils.create_network_full(logger, - neutron_client, - PRIVATE_NET_NAME, - PRIVATE_SUBNET_NAME, - ROUTER_NAME, - PRIVATE_SUBNET_CIDR) - if not network_dic: - logger.error( - "There has been a problem when creating the neutron network") - return(EXIT_CODE) - - network_id = network_dic["net_id"] - - sg_id = create_security_group(neutron_client) - - # Check if the given flavor exists - try: - flavor = nova_client.flavors.find(name=FLAVOR) - logger.info("Using existing Flavor '%s'..." % FLAVOR) - except: - logger.error("Flavor '%s' not found." % FLAVOR) - logger.info("Available flavors are: ") - pMsg(nova_client.flavor.list()) - return(EXIT_CODE) - - # Deleting instances if they exist - servers = nova_client.servers.list() - for server in servers: - if server.name == NAME_VM_1 or server.name == NAME_VM_2: - logger.info("Instance %s found. Deleting..." % server.name) - server.delete() - - # boot VM 1 - start_time_ts = time.time() - end_time_ts = start_time_ts - logger.info("vPing Start Time:'%s'" % ( - datetime.datetime.fromtimestamp(start_time_ts).strftime( - '%Y-%m-%d %H:%M:%S'))) - - logger.info("Creating instance '%s'..." % NAME_VM_1) - logger.debug( - "Configuration:\n name=%s \n flavor=%s \n image=%s \n " - "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id)) - vm1 = nova_client.servers.create( - name=NAME_VM_1, - flavor=flavor, - image=image_id, - nics=[{"net-id": network_id}] - ) - - # wait until VM status is active - if not waitVmActive(nova_client, vm1): - logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( - NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1))) - return (EXIT_CODE) - else: - logger.info("Instance '%s' is ACTIVE." % NAME_VM_1) - - # Retrieve IP of first VM - test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0] - logger.debug("Instance '%s' got private ip '%s'." % (NAME_VM_1, test_ip)) - - logger.info("Adding '%s' to security group '%s'..." - % (NAME_VM_1, SECGROUP_NAME)) - openstack_utils.add_secgroup_to_instance(nova_client, vm1.id, sg_id) - - # boot VM 2 - logger.info("Creating instance '%s'..." % NAME_VM_2) - logger.debug( - "Configuration:\n name=%s \n flavor=%s \n image=%s \n " - "network=%s \n" % (NAME_VM_2, flavor, image_id, network_id)) - vm2 = nova_client.servers.create( - name=NAME_VM_2, - flavor=flavor, - image=image_id, - nics=[{"net-id": network_id}] - ) - - if not waitVmActive(nova_client, vm2): - logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( - NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2))) - return (EXIT_CODE) - else: - logger.info("Instance '%s' is ACTIVE." % NAME_VM_2) - - logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_2, - SECGROUP_NAME)) - openstack_utils.add_secgroup_to_instance(nova_client, vm2.id, sg_id) - - logger.info("Creating floating IP for VM '%s'..." % NAME_VM_2) - floatip_dic = openstack_utils.create_floating_ip(neutron_client) - floatip = floatip_dic['fip_addr'] - # floatip_id = floatip_dic['fip_id'] - - if floatip is None: - logger.error("Cannot create floating IP.") - return (EXIT_CODE) - logger.info("Floating IP created: '%s'" % floatip) - - logger.info("Associating floating ip: '%s' to VM '%s' " - % (floatip, NAME_VM_2)) - if not openstack_utils.add_floating_ip(nova_client, vm2.id, floatip): - logger.error("Cannot associate floating IP to VM.") - return (EXIT_CODE) - - logger.info("Trying to establish SSH connection to %s..." % floatip) - username = 'cirros' - password = 'cubswin:)' - ssh = paramiko.SSHClient() - ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - timeout = 50 - nolease = False - got_ip = False - discover_count = 0 - cidr_first_octet = PRIVATE_SUBNET_CIDR.split('.')[0] - while timeout > 0: - try: - ssh.connect(floatip, username=username, - password=password, timeout=2) - logger.debug("SSH connection established to %s." % floatip) - break - except: - logger.debug("Waiting for %s..." % floatip) - time.sleep(6) - timeout -= 1 - - console_log = vm2.get_console_output() - - # print each "Sending discover" captured on the console log - if (len(re.findall("Sending discover", console_log)) > - discover_count and not got_ip): - discover_count += 1 - logger.debug("Console-log '%s': Sending discover..." - % NAME_VM_2) - - # check if eth0 got an ip,the line looks like this: - # "inet addr:192.168.".... - # if the dhcp agent fails to assing ip, this line will not appear - if "inet addr:" + cidr_first_octet in console_log and not got_ip: - got_ip = True - logger.debug("The instance '%s' succeeded to get the IP " - "from the dhcp agent.") - - # if dhcp doesnt work,it shows "No lease, failing".The test will fail - if "No lease, failing" in console_log and not nolease and not got_ip: - nolease = True - logger.debug("Console-log '%s': No lease, failing..." - % NAME_VM_2) - logger.info("The instance failed to get an IP from the " - "DHCP agent. The test will probably timeout...") - - if timeout == 0: # 300 sec timeout (5 min) - logger.error("Cannot establish connection to IP '%s'. Aborting" - % floatip) - return (EXIT_CODE) - - scp = SCPClient(ssh.get_transport()) - - ping_script = REPO_PATH + "testcases/vPing/CI/libraries/ping.sh" - try: - scp.put(ping_script, "~/") - except: - logger.error("Cannot SCP the file '%s' to VM '%s'" - % (ping_script, floatip)) - - cmd = 'chmod 755 ~/ping.sh' - (stdin, stdout, stderr) = ssh.exec_command(cmd) - for line in stdout.readlines(): - print line - - logger.info("Waiting for ping...") - sec = 0 - duration = 0 - - cmd = '~/ping.sh ' + test_ip - flag = False - while True: - time.sleep(1) - (stdin, stdout, stderr) = ssh.exec_command(cmd) - output = stdout.readlines() - - for line in output: - if "vPing OK" in line: - logger.info("vPing detected!") - - # we consider start time at VM1 booting - end_time_ts = time.time() - duration = round(end_time_ts - start_time_ts, 1) - logger.info("vPing duration:'%s' s." % duration) - EXIT_CODE = 0 - flag = True - break - elif sec == PING_TIMEOUT: - logger.info("Timeout reached.") - flag = True - break - if flag: - break - logger.debug("Pinging %s. Waiting for response..." % test_ip) - sec += 1 - - test_status = "NOK" - if EXIT_CODE == 0: - logger.info("vPing OK") - test_status = "OK" - else: - duration = 0 - logger.error("vPing FAILED") - - if args.report: - push_results(start_time_ts, duration, test_status) - - exit(EXIT_CODE) - -if __name__ == '__main__': - main() diff --git a/testcases/vPing/CI/libraries/vPing_userdata.py b/testcases/vPing/CI/libraries/vPing_userdata.py deleted file mode 100644 index 2b2963144..000000000 --- a/testcases/vPing/CI/libraries/vPing_userdata.py +++ /dev/null @@ -1,387 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2015 All rights reserved -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# 0.1: This script boots the VM1 and allocates IP address from Nova -# Later, the VM2 boots then execute cloud-init to ping VM1. -# After successful ping, both the VMs are deleted. -# 0.2: measure test duration and publish results under json format -# -# - -import argparse -import datetime -import os -import pprint -import time -import yaml - -from novaclient import client as novaclient -from neutronclient.v2_0 import client as neutronclient -from keystoneclient.v2_0 import client as keystoneclient -from glanceclient import client as glanceclient - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as functest_utils -import functest.utils.openstack_utils as openstack_utils - -pp = pprint.PrettyPrinter(indent=4) - -parser = argparse.ArgumentParser() -image_exists = False - -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") -parser.add_argument("-r", "--report", - help="Create json result file", - action="store_true") - -args = parser.parse_args() - -""" logging configuration """ -logger = ft_logger.Logger("vping_userdata").getLogger() - -REPO_PATH = os.environ['repos_dir'] + '/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() - -HOME = os.environ['HOME'] + "/" -# vPing parameters -VM_BOOT_TIMEOUT = 180 -VM_DELETE_TIMEOUT = 100 -PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout") -TEST_DB = functest_yaml.get("results").get("test_db_url") -NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1") -NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2") -GLANCE_IMAGE_NAME = functest_yaml.get("vping").get("image_name") -GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get( - "openstack").get("image_file_name") -GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get( - "openstack").get("image_disk_format") -GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get( - "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME - - -FLAVOR = functest_yaml.get("vping").get("vm_flavor") - -# NEUTRON Private Network parameters - -PRIVATE_NET_NAME = functest_yaml.get("vping").get( - "vping_private_net_name") -PRIVATE_SUBNET_NAME = functest_yaml.get("vping").get( - "vping_private_subnet_name") -PRIVATE_SUBNET_CIDR = functest_yaml.get("vping").get( - "vping_private_subnet_cidr") -ROUTER_NAME = functest_yaml.get("vping").get("vping_router_name") - -SECGROUP_NAME = functest_yaml.get("vping").get("vping_sg_name") -SECGROUP_DESCR = functest_yaml.get("vping").get("vping_sg_descr") - - -def pMsg(value): - - """pretty printing""" - pp.pprint(value) - - -def waitVmActive(nova, vm): - - # sleep and wait for VM status change - sleep_time = 3 - count = VM_BOOT_TIMEOUT / sleep_time - while True: - status = openstack_utils.get_instance_status(nova, vm) - logger.debug("Status: %s" % status) - if status == "ACTIVE": - return True - if status == "ERROR" or status == "error": - return False - if count == 0: - logger.debug("Booting a VM timed out...") - return False - count -= 1 - time.sleep(sleep_time) - return False - - -def waitVmDeleted(nova, vm): - - # sleep and wait for VM status change - sleep_time = 3 - count = VM_DELETE_TIMEOUT / sleep_time - while True: - status = openstack_utils.get_instance_status(nova, vm) - if not status: - return True - elif count == 0: - logger.debug("Timeout") - return False - else: - # return False - count -= 1 - time.sleep(sleep_time) - return False - - -def create_security_group(neutron_client): - sg_id = openstack_utils.get_security_group_id(neutron_client, - SECGROUP_NAME) - if sg_id != '': - logger.info("Using existing security group '%s'..." % SECGROUP_NAME) - else: - logger.info("Creating security group '%s'..." % SECGROUP_NAME) - SECGROUP = openstack_utils.create_security_group(neutron_client, - SECGROUP_NAME, - SECGROUP_DESCR) - if not SECGROUP: - logger.error("Failed to create the security group...") - return False - - sg_id = SECGROUP['id'] - - logger.debug("Security group '%s' with ID=%s created successfully." - % (SECGROUP['name'], sg_id)) - - logger.debug("Adding ICMP rules in security group '%s'..." - % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, - 'ingress', 'icmp'): - logger.error("Failed to create the security group rule...") - return False - - logger.debug("Adding SSH rules in security group '%s'..." - % SECGROUP_NAME) - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, - 'ingress', 'tcp', - '22', '22'): - logger.error("Failed to create the security group rule...") - return False - - if not openstack_utils.create_secgroup_rule(neutron_client, sg_id, - 'egress', 'tcp', - '22', '22'): - logger.error("Failed to create the security group rule...") - return False - return sg_id - - -def push_results(start_time_ts, duration, test_status): - try: - logger.debug("Pushing result into DB...") - scenario = functest_utils.get_scenario(logger) - version = functest_utils.get_version(logger) - criteria = "failed" - if test_status == "OK": - criteria = "passed" - pod_name = functest_utils.get_pod_name(logger) - build_tag = functest_utils.get_build_tag(logger) - functest_utils.push_results_to_db(TEST_DB, - "functest", - "vPing_userdata", - logger, pod_name, version, scenario, - criteria, build_tag, - payload={'timestart': start_time_ts, - 'duration': duration, - 'status': test_status}) - except: - logger.error("Error pushing results into Database '%s'" - % sys.exc_info()[0]) - - -def main(): - - creds_nova = openstack_utils.get_credentials("nova") - nova_client = novaclient.Client('2', **creds_nova) - creds_neutron = openstack_utils.get_credentials("neutron") - neutron_client = neutronclient.Client(**creds_neutron) - creds_keystone = openstack_utils.get_credentials("keystone") - keystone_client = keystoneclient.Client(**creds_keystone) - glance_endpoint = keystone_client.service_catalog.url_for( - service_type='image', endpoint_type='publicURL') - glance_client = glanceclient.Client(1, glance_endpoint, - token=keystone_client.auth_token) - EXIT_CODE = -1 - - image_id = None - flavor = None - - # Check if the given image exists - image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) - if image_id != '': - logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME) - global image_exists - image_exists = True - else: - logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH)) - image_id = openstack_utils.create_glance_image(glance_client, - GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH) - if not image_id: - logger.error("Failed to create a Glance image...") - return(EXIT_CODE) - logger.debug("Image '%s' with ID=%s created successfully." - % (GLANCE_IMAGE_NAME, image_id)) - - network_dic = openstack_utils.create_network_full(logger, - neutron_client, - PRIVATE_NET_NAME, - PRIVATE_SUBNET_NAME, - ROUTER_NAME, - PRIVATE_SUBNET_CIDR) - if not network_dic: - logger.error( - "There has been a problem when creating the neutron network") - return(EXIT_CODE) - network_id = network_dic["net_id"] - - create_security_group(neutron_client) - - # Check if the given flavor exists - try: - flavor = nova_client.flavors.find(name=FLAVOR) - logger.info("Flavor found '%s'" % FLAVOR) - except: - logger.error("Flavor '%s' not found." % FLAVOR) - logger.info("Available flavors are: ") - pMsg(nova_client.flavor.list()) - exit(-1) - - # Deleting instances if they exist - servers = nova_client.servers.list() - for server in servers: - if server.name == NAME_VM_1 or server.name == NAME_VM_2: - logger.info("Instance %s found. Deleting..." % server.name) - server.delete() - - # boot VM 1 - # basic boot - # tune (e.g. flavor, images, network) to your specific - # openstack configuration here - # we consider start time at VM1 booting - start_time_ts = time.time() - end_time_ts = start_time_ts - logger.info("vPing Start Time:'%s'" % ( - datetime.datetime.fromtimestamp(start_time_ts).strftime( - '%Y-%m-%d %H:%M:%S'))) - - # create VM - logger.info("Creating instance '%s'..." % NAME_VM_1) - logger.debug( - "Configuration:\n name=%s \n flavor=%s \n image=%s \n " - "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id)) - vm1 = nova_client.servers.create( - name=NAME_VM_1, - flavor=flavor, - image=image_id, - config_drive=True, - nics=[{"net-id": network_id}] - ) - - # wait until VM status is active - if not waitVmActive(nova_client, vm1): - - logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( - NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1))) - return (EXIT_CODE) - else: - logger.info("Instance '%s' is ACTIVE." % NAME_VM_1) - - # Retrieve IP of first VM - test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0] - logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip)) - - # boot VM 2 - # we will boot then execute a ping script with cloud-init - # the long chain corresponds to the ping procedure converted with base 64 - # tune (e.g. flavor, images, network) to your specific openstack - # configuration here - u = ("#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n " - "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n " - "break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip) - - # create VM - logger.info("Creating instance '%s'..." % NAME_VM_2) - logger.debug( - "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s " - "\n userdata= \n%s" % ( - NAME_VM_2, flavor, image_id, network_id, u)) - vm2 = nova_client.servers.create( - name=NAME_VM_2, - flavor=flavor, - image=image_id, - nics=[{"net-id": network_id}], - config_drive=True, - userdata=u - ) - - if not waitVmActive(nova_client, vm2): - logger.error("Instance '%s' cannot be booted. Status is '%s'" % ( - NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2))) - return (EXIT_CODE) - else: - logger.info("Instance '%s' is ACTIVE." % NAME_VM_2) - - logger.info("Waiting for ping...") - sec = 0 - metadata_tries = 0 - console_log = vm2.get_console_output() - duration = 0 - - while True: - time.sleep(1) - console_log = vm2.get_console_output() - # print "--"+console_log - # report if the test is failed - if "vPing OK" in console_log: - logger.info("vPing detected!") - - # we consider start time at VM1 booting - end_time_ts = time.time() - duration = round(end_time_ts - start_time_ts, 1) - logger.info("vPing duration:'%s'" % duration) - EXIT_CODE = 0 - break - elif ("failed to read iid from metadata" in console_log or - metadata_tries > 5): - EXIT_CODE = -2 - break - elif sec == PING_TIMEOUT: - logger.info("Timeout reached.") - break - elif sec % 10 == 0: - if "request failed" in console_log: - logger.debug("It seems userdata is not supported in " - "nova boot. Waiting a bit...") - metadata_tries += 1 - else: - logger.debug("Pinging %s. Waiting for response..." % test_ip) - sec += 1 - - test_status = "NOK" - if EXIT_CODE == 0: - logger.info("vPing OK") - test_status = "OK" - elif EXIT_CODE == -2: - duration = 0 - logger.info("Userdata is not supported in nova boot. Aborting test...") - else: - duration = 0 - logger.error("vPing FAILED") - - if args.report: - push_results(start_time_ts, duration, test_status) - - exit(EXIT_CODE) - -if __name__ == '__main__': - main() -- cgit 1.2.3-korg