From 68d29b6f6e71df6b4f177cd417f98d9e977f8893 Mon Sep 17 00:00:00 2001 From: "jose.lausuch" Date: Tue, 17 May 2016 21:36:56 +0200 Subject: Change 'testcases' directory structure JIRA: FUNCTEST-226 - Remove all 'CI' subdirs - Remove VIM dir and have OpenStack dir within /testcases/ - Split rally and tempest in 2 different dirs Change-Id: Icfc76d18a84f7a18d93ae1a5ec7dc7a560bb7ce9 Signed-off-by: jose.lausuch --- .../VIM/OpenStack/CI/custom_tests/defcore_req.txt | 122 ----- .../VIM/OpenStack/CI/libraries/healthcheck.sh | 208 -------- .../VIM/OpenStack/CI/libraries/run_rally-cert.py | 560 --------------------- .../VIM/OpenStack/CI/libraries/run_tempest.py | 347 ------------- .../VIM/OpenStack/CI/rally_cert/macro/macro.yaml | 97 ---- .../CI/rally_cert/scenario/opnfv-authenticate.yaml | 63 --- .../CI/rally_cert/scenario/opnfv-cinder.yaml | 272 ---------- .../CI/rally_cert/scenario/opnfv-glance.yaml | 49 -- .../CI/rally_cert/scenario/opnfv-heat.yaml | 160 ------ .../CI/rally_cert/scenario/opnfv-keystone.yaml | 92 ---- .../CI/rally_cert/scenario/opnfv-neutron.yaml | 245 --------- .../CI/rally_cert/scenario/opnfv-nova.yaml | 378 -------------- .../CI/rally_cert/scenario/opnfv-quotas.yaml | 54 -- .../CI/rally_cert/scenario/opnfv-requests.yaml | 28 -- .../CI/rally_cert/scenario/opnfv-smoke.yaml | 268 ---------- .../OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml | 42 -- .../scenario/support/instance_dd_test.sh | 13 - .../templates/autoscaling_policy.yaml.template | 17 - .../scenario/templates/default.yaml.template | 1 - .../templates/random_strings.yaml.template | 13 - .../templates/resource_group.yaml.template | 13 - .../templates/server_with_ports.yaml.template | 64 --- .../templates/server_with_volume.yaml.template | 43 -- ...pdated_autoscaling_policy_inplace.yaml.template | 23 - .../updated_random_strings_add.yaml.template | 19 - .../updated_random_strings_delete.yaml.template | 11 - .../updated_random_strings_replace.yaml.template | 19 - .../updated_resource_group_increase.yaml.template | 16 - .../updated_resource_group_reduce.yaml.template | 16 - testcases/VIM/OpenStack/CI/rally_cert/task.yaml | 60 --- 30 files changed, 3313 deletions(-) delete mode 100644 testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt delete mode 100755 testcases/VIM/OpenStack/CI/libraries/healthcheck.sh delete mode 100755 testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py delete mode 100644 testcases/VIM/OpenStack/CI/libraries/run_tempest.py delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template delete mode 100644 testcases/VIM/OpenStack/CI/rally_cert/task.yaml (limited to 'testcases/VIM/OpenStack/CI') diff --git a/testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt b/testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt deleted file mode 100644 index bb1d172d..00000000 --- a/testcases/VIM/OpenStack/CI/custom_tests/defcore_req.txt +++ /dev/null @@ -1,122 +0,0 @@ -# Set of DefCore tempest test cases (see http://www.openstack.org/brand/interop) -# This approved version (2016.01) is valid for Juno, Kilo, and Liberty releases of OpenStack -# The list is stored at http://git.openstack.org/cgit/openstack/defcore/plain/2016.01/2016.01.required.txt -tempest.api.compute.images.test_images.ImagesTestJSON.test_delete_saving_image[id-aa06b52b-2db5-4807-b218-9441f75d74e3] -tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314] -tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_changes_since[id-18bac3ae-da27-436c-92a9-b22474d13aab] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_name[id-33163b73-79f5-4d07-a7ea-9213bcc468ff] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_id[id-9f238683-c763-45aa-b848-232ec3ce3105] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_server_ref[id-05a377b8-28cf-4734-a1e6-2ab5c38bf606] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_status[id-a3f5b513-aeb3-42a9-b18e-f091ef73254d] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_filter_by_type[id-e3356918-4d3e-4756-81d5-abc4524ba29f] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_limit_results[id-3a484ca9-67ba-451e-b494-7fcf28d32d62] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_changes_since[id-7d439e18-ac2e-4827-b049-7e18004712c4] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_name[id-644ea267-9bd9-4f3b-af9f-dffa02396a17] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_server_ref[id-8c78f822-203b-4bf6-8bba-56ebd551cf84] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_status[id-9b0ea018-6185-4f71-948a-a123a107988e] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_filter_by_type[id-888c0cc0-7223-43c5-9db0-b125fd0a393b] -tempest.api.compute.images.test_list_image_filters.ListImageFiltersTestJSON.test_list_images_with_detail_limit_results[id-ba2fa9a9-b672-47cc-b354-3b4c0600e2cb] -tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image[id-490d0898-e12a-463f-aef0-c50156b9f789] -tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images[id-fd51b7f4-d4a3-4331-9885-866658112a6f] -tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail[id-9f94cb6b-7f10-48c5-b911-a0b84d7d4cd6] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b] -tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f,smoke] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b] -tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f,smoke] -tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c] -tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip[id-43a1242e-7b31-48d1-88f2-3f72aa9f2077] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_ip_regex[id-a905e287-c35e-42f2-b132-d02b09f3654a] -tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4,negative] -tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f,negative] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32,smoke] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm[id-1499262a-9328-4eda-9068-db1ac57498d2] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_revert[id-c03aab19-adb1-44f5-917d-c419577e9e68] -tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c] -tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077] -tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_non_existent_flavor[id-ced1a1d7-2ab6-45c9-b90f-b27d87b30efd,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_resize_server_with_null_flavor[id-45436a7d-a388-4a35-a9d8-3adc5d0d940b,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_delete_a_server_of_another_tenant[id-5c75009d-3eea-423e-bea3-61b09fd25f9c,negative] -tempest.api.compute.servers.test_servers_negative.ServersNegativeTestMultiTenantJSON.test_update_server_of_another_tenant[id-543d84c1-dd2e-4c6d-8cb2-b9da0efaa384,negative] -tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a] -tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107] -tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff] -tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513] -tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list[id-bc2dd1a0-15af-48e5-9990-f2e75a48325d] -tempest.api.compute.volumes.test_volumes_list.VolumesTestJSON.test_volume_list_with_details[id-bad0567a-5a4f-420b-851e-780b55bb867c] -tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-f01904f2-e975-4915-98ce-cb5fa27bde4f,negative] -tempest.api.compute.volumes.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-62bab09a-4c03-4617-8cca-8572bc94af9b,negative] -tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9] -tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee] -tempest.api.image.v1.test_images.ListImagesTest.test_index_no_params[id-246178ab-3b33-4212-9a4b-a7fe8261794d] -tempest.api.object_storage.test_object_expiry.ObjectExpiryTest.test_get_object_after_expiry_time[id-fb024a42-37f3-4ba5-9684-4f40a7910b41] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way[id-06f90388-2d0e-40aa-934c-e9a8833e958a] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers[id-aa467252-44f3-472a-b5ae-5b57c3c9c147] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container[id-1a9ab572-1b66-4981-8c21-416e2a5e6011] -tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself[id-2248abba-415d-410b-9c30-22dff9cd6e67] -tempest.api.object_storage.test_object_services.ObjectTest.test_create_object[id-5b4ce26f-3545-46c9-a2ba-5754358a4c62,smoke] -tempest.api.object_storage.test_object_services.ObjectTest.test_delete_object[id-17738d45-03bd-4d45-9e0b-7b2f58f98687] -tempest.api.object_storage.test_object_services.ObjectTest.test_get_object[id-02610ba7-86b7-4272-9ed8-aa8d417cb3cd,smoke] -tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_if_different[id-50d01f12-526f-4360-9ac2-75dd508d7b68] -tempest.api.object_storage.test_object_services.ObjectTest.test_object_upload_in_segments[id-e3e6a64a-9f50-4955-b987-6ce6767c97fb] -tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_get_object_using_temp_url[id-f91c96d4-1230-4bba-8eb9-84476d18d991] -tempest.api.object_storage.test_object_temp_url.ObjectTempUrlTest.test_put_object_using_temp_url[id-9b08dade-3571-4152-8a4f-a4f2a873a735] -tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container[id-a151e158-dcbf-4a1f-a1e7-46cd65895a6f] diff --git a/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh b/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh deleted file mode 100755 index 611c100c..00000000 --- a/testcases/VIM/OpenStack/CI/libraries/healthcheck.sh +++ /dev/null @@ -1,208 +0,0 @@ -# -# OpenStack Health Check -# This script is meant for really basic API operations on OpenStack -# Services tested: Keystone, Glance, Cinder, Neutron, Nova -# -# -# Author: -# jose.lausuch@ericsson.com -# -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# - -set -e - -#Redirect all the output (stdout) to a log file and show only possible errors. -LOG_FILE=/home/opnfv/functest/results/healthcheck.log -echo "">$LOG_FILE -exec 1<>$LOG_FILE - -info () { - echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - INFO - " "$*" | tee -a $LOG_FILE 1>&2 -} - -debug () { - if [[ "${CI_DEBUG,,}" == "true" ]]; then - echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - DEBUG - " "$*" | tee -a $LOG_FILE 1>&2 - fi -} - -error () { - echo -e "$(date '+%Y-%m-%d %H:%M:%S,%3N') - healtcheck - ERROR - " "$*" | tee -a $LOG_FILE 1>&2 - exit 1 -} - -if [ -z $OS_AUTH_URL ]; then - echo "Source credentials first." - exit 1 -fi - - -echo "Using following credentials:" -env | grep OS - -## Variables: -project_1="opnfv-tenant1" -project_2="opnfv-tenant2" -user_1="opnfv_user1" -user_2="opnfv_user2" -user_3="opnfv_user3" -user_4="opnfv_user4" -user_5="opnfv_user5" -user_6="opnfv_user6" -image_1="opnfv-image1" -image_2="opnfv-image2" -volume_1="opnfv-volume1" -volume_2="opnfv-volume2" -net_1="opnfv-network1" -net_2="opnfv-network2" -subnet_1="opnfv-subnet1" -subnet_2="opnfv-subnet2" -port_1="opnfv-port1" -port_2="opnfv-port2" -router_1="opnfv-router1" -router_2="opnfv-router2" -instance_1="opnfv-instance1" -instance_2="opnfv-instance2" -instance_3="opnfv-instance3" -instance_4="opnfv-instance4" - - - -function wait_for_ip() { - # $1 is the instance name - # $2 is the first octet of the subnet ip - timeout=60 - while [[ ${timeout} > 0 ]]; do - if [[ $(nova console-log $1|grep "No lease, failing") ]]; then - error "The instance $1 couldn't get an IP from the DHCP agent." | tee -a $LOG_FILE 1>&2 - exit 1 - elif [[ $(nova console-log $1|grep "^Lease"|grep "obtained") ]]; then - debug "The instance $1 got an IP successfully from the DHCP agent." | tee -a $LOG_FILE 1>&2 - break - fi - let timeout=timeout-1 - sleep 1 - done -} - - -################################# -info "Testing Keystone API..." | tee -a $LOG_FILE 1>&2 -################################# -openstack project create ${project_1} -debug "project '${project_1}' created." -openstack project create ${project_2} -debug "project '${project_2}' created." -openstack user create ${user_1} --project ${project_1} -debug "user '${user_1}' created in project ${project_1}." -openstack user create ${user_2} --project ${project_1} -debug "user '${user_2}' created in project ${project_1}." -openstack user create ${user_3} --project ${project_1} -debug "user '${user_3}' created in project ${project_1}." -openstack user create ${user_4} --project ${project_2} -debug "user '${user_4}' created in project ${project_2}." -openstack user create ${user_5} --project ${project_2} -debug "user '${user_5}' created in project ${project_2}." -openstack user create ${user_6} --project ${project_2} -debug "user '${user_6}' created in project ${project_2}." -info "...Keystone OK!" - -################################# -info "Testing Glance API..." -################################# -image=/home/opnfv/functest/data/cirros-0.3.4-x86_64-disk.img -glance image-create --name ${image_1} --disk-format qcow2 --container-format bare < ${image} -debug "image '${image_1}' created." -glance image-create --name ${image_2} --disk-format qcow2 --container-format bare < ${image} -debug "image '${image_2}' created." -info "... Glance OK!" - -################################# -info "Testing Cinder API..." -################################# -cinder create --display_name ${volume_1} 1 -debug "volume '${volume_1}' created." -cinder create --display_name ${volume_2} 10 -debug "volume '${volume_2}' created." -info "...Cinder OK!" - -################################# -info "Testing Neutron API..." -################################# - -network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}')) -for id in ${network_ids[@]}; do - [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id} -done -if [[ "${ext_net_id}" == "" ]]; then - error "No external network found. Exiting Health Check..." - exit 1 -else - info "External network found. ${ext_net_id}" -fi - -info "1. Create Networks..." -neutron net-create ${net_1} -debug "net '${net_1}' created." -neutron net-create ${net_2} -debug "net '${net_2}' created." -net1_id=$(neutron net-list | grep ${net_1} | awk '{print $2}') -net2_id=$(neutron net-list | grep ${net_2} | awk '{print $2}') - -info "2. Create subnets..." -neutron subnet-create --name ${subnet_1} --allocation-pool start=10.6.0.2,end=10.6.0.253 --gateway 10.6.0.254 ${net_1} 10.6.0.0/24 -debug "subnet '${subnet_1}' created." -neutron subnet-create --name ${subnet_2} --allocation-pool start=10.7.0.2,end=10.7.0.253 --gateway 10.7.0.254 ${net_2} 10.7.0.0/24 -debug "subnet '${subnet_2}' created." - -info "4. Create Routers..." -neutron router-create ${router_1} -debug "router '${router_1}' created." -neutron router-create ${router_2} -debug "router '${router_2}' created." - -neutron router-gateway-set ${router_1} ${ext_net_id} -debug "router '${router_1}' gateway set to ${ext_net_id}." -neutron router-gateway-set ${router_2} ${ext_net_id} -debug "router '${router_2}' gateway set to ${ext_net_id}." - -neutron router-interface-add ${router_1} ${subnet_1} -debug "router '${router_1}' interface added ${subnet_1}." -neutron router-interface-add ${router_2} ${subnet_2} -debug "router '${router_2}' interface added ${subnet_2}." - -info "...Neutron OK!" - -################################# -info "Testing Nova API..." -################################# - -nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_1} -debug "nova instance '${instance_1}' booted on ${net_1}." -nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_2} -debug "nova instance '${instance_2}' booted on ${net_1}." -nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_3} -debug "nova instance '${instance_3}' booted on ${net_2}." -nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_4} -debug "nova instance '${instance_4}' booted on ${net_2}." - -vm1_id=$(nova list | grep ${instance_1} | awk '{print $2}') -vm2_id=$(nova list | grep ${instance_2} | awk '{print $2}') -vm3_id=$(nova list | grep ${instance_3} | awk '{print $2}') -vm4_id=$(nova list | grep ${instance_4} | awk '{print $2}') -info "...Nova OK!" - -info "Checking if instances get an IP from DHCP..." -wait_for_ip ${instance_1} "10.6" -wait_for_ip ${instance_2} "10.6" -wait_for_ip ${instance_3} "10.7" -wait_for_ip ${instance_4} "10.7" -info "...DHCP OK!" - -info "Health check passed!" -exit 0 diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py deleted file mode 100755 index 4dc1e16d..00000000 --- a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py +++ /dev/null @@ -1,560 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2015 Orange -# guyrodrigue.koffi@orange.com -# morgan.richomme@orange.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# 0.1 (05/2015) initial commit -# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite -# 0.3 (19/10/2015) remove Tempest from run_rally -# and push result into test DB -# -import argparse -import iniparse -import json -import os -import re -import requests -import subprocess -import time -import yaml - -from novaclient import client as novaclient -from glanceclient import client as glanceclient -from keystoneclient.v2_0 import client as keystoneclient -from neutronclient.v2_0 import client as neutronclient -from cinderclient import client as cinderclient - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as functest_utils -import functest.utils.openstack_utils as openstack_utils - -""" tests configuration """ -tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', - 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all'] -parser = argparse.ArgumentParser() -parser.add_argument("test_name", - help="Module name to be tested. " - "Possible values are : " - "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | " - "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | " - "{d[10]} ] " - "The 'all' value " - "performs all possible test scenarios" - .format(d=tests)) - -parser.add_argument("-d", "--debug", help="Debug mode", action="store_true") -parser.add_argument("-r", "--report", - help="Create json result file", - action="store_true") -parser.add_argument("-s", "--smoke", - help="Smoke test mode", - action="store_true") -parser.add_argument("-v", "--verbose", - help="Print verbose info about the progress", - action="store_true") -parser.add_argument("-n", "--noclean", - help="Don't clean the created resources for this test.", - action="store_true") -parser.add_argument("-z", "--sanity", - help="Sanity test mode, execute only a subset of tests", - action="store_true") - -args = parser.parse_args() - -client_dict = {} -network_dict = {} - -if args.verbose: - RALLY_STDERR = subprocess.STDOUT -else: - RALLY_STDERR = open(os.devnull, 'w') - -""" logging configuration """ -logger = ft_logger.Logger("run_rally").getLogger() - -REPO_PATH = os.environ['repos_dir'] + '/functest/' -if not os.path.exists(REPO_PATH): - logger.error("Functest repository directory not found '%s'" % REPO_PATH) - exit(-1) - - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() - -HOME = os.environ['HOME'] + "/" -SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get( - "directories").get("dir_rally_scn") -TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates" -SUPPORT_DIR = SCENARIOS_DIR + "scenario/support" - -FLAVOR_NAME = "m1.tiny" -USERS_AMOUNT = 2 -TENANTS_AMOUNT = 3 -ITERATIONS_AMOUNT = 10 -CONCURRENCY = 4 - -RESULTS_DIR = functest_yaml.get("general").get("directories").get( - "dir_rally_res") -TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get( - "dir_results") + '/tempest/tempest.conf' -TEST_DB = functest_yaml.get("results").get("test_db_url") - -PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name") -PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name") -PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr") -ROUTER_NAME = functest_yaml.get("rally").get("router_name") - -GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get( - "image_name") -GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get( - "image_file_name") -GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get( - "image_disk_format") -GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get( - "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME - -CINDER_VOLUME_TYPE_NAME = "volume_test" - - -SUMMARY = [] - - -def push_results_to_db(case, payload, criteria): - - url = TEST_DB + "/results" - installer = functest_utils.get_installer_type(logger) - scenario = functest_utils.get_scenario(logger) - version = functest_utils.get_version(logger) - pod_name = functest_utils.get_pod_name(logger) - - # evalutate success criteria - - params = {"project_name": "functest", "case_name": case, - "pod_name": pod_name, "installer": installer, - "version": version, "scenario": scenario, - "criteria": criteria, "details": payload} - - headers = {'Content-Type': 'application/json'} - r = requests.post(url, data=json.dumps(params), headers=headers) - logger.debug(r) - - -def get_task_id(cmd_raw): - """ - get task id from command rally result - :param cmd_raw: - :return: task_id as string - """ - taskid_re = re.compile('^Task +(.*): started$') - for line in cmd_raw.splitlines(True): - line = line.strip() - match = taskid_re.match(line) - if match: - return match.group(1) - return None - - -def task_succeed(json_raw): - """ - Parse JSON from rally JSON results - :param json_raw: - :return: Bool - """ - rally_report = json.loads(json_raw) - for report in rally_report: - if report is None or report.get('result') is None: - return False - - for result in report.get('result'): - if result is None or len(result.get('error')) > 0: - return False - - return True - - -def live_migration_supported(): - config = iniparse.ConfigParser() - if (config.read(TEMPEST_CONF_FILE) and - config.has_section('compute-feature-enabled') and - config.has_option('compute-feature-enabled', 'live_migration')): - return config.getboolean('compute-feature-enabled', 'live_migration') - - return False - - -def build_task_args(test_file_name): - task_args = {'service_list': [test_file_name]} - task_args['image_name'] = GLANCE_IMAGE_NAME - task_args['flavor_name'] = FLAVOR_NAME - task_args['glance_image_location'] = GLANCE_IMAGE_PATH - task_args['tmpl_dir'] = TEMPLATE_DIR - task_args['sup_dir'] = SUPPORT_DIR - task_args['users_amount'] = USERS_AMOUNT - task_args['tenants_amount'] = TENANTS_AMOUNT - task_args['iterations'] = ITERATIONS_AMOUNT - task_args['concurrency'] = CONCURRENCY - - if args.sanity: - task_args['full_mode'] = False - task_args['smoke'] = True - else: - task_args['full_mode'] = True - task_args['smoke'] = args.smoke - - ext_net = openstack_utils.get_external_net(client_dict['neutron']) - if ext_net: - task_args['floating_network'] = str(ext_net) - else: - task_args['floating_network'] = '' - - net_id = network_dict['net_id'] - task_args['netid'] = str(net_id) - task_args['live_migration'] = live_migration_supported() - - return task_args - - -def get_output(proc, test_name): - global SUMMARY - result = "" - nb_tests = 0 - overall_duration = 0.0 - success = 0.0 - nb_totals = 0 - - while proc.poll() is None: - line = proc.stdout.readline() - if args.verbose: - result += line - else: - if ("Load duration" in line or - "started" in line or - "finished" in line or - " Preparing" in line or - "+-" in line or - "|" in line): - result += line - elif "test scenario" in line: - result += "\n" + line - elif "Full duration" in line: - result += line + "\n\n" - - # parse output for summary report - if ("| " in line and - "| action" not in line and - "| Starting" not in line and - "| Completed" not in line and - "| ITER" not in line and - "| " not in line and - "| total" not in line): - nb_tests += 1 - elif "| total" in line: - percentage = ((line.split('|')[8]).strip(' ')).strip('%') - try: - success += float(percentage) - except ValueError: - logger.info('Percentage error: %s, %s' % (percentage, line)) - nb_totals += 1 - elif "Full duration" in line: - duration = line.split(': ')[1] - try: - overall_duration += float(duration) - except ValueError: - logger.info('Duration error: %s, %s' % (duration, line)) - - overall_duration = "{:10.2f}".format(overall_duration) - if nb_totals == 0: - success_avg = 0 - else: - success_avg = "{:0.2f}".format(success / nb_totals) - - scenario_summary = {'test_name': test_name, - 'overall_duration': overall_duration, - 'nb_tests': nb_tests, - 'success': success_avg} - SUMMARY.append(scenario_summary) - - logger.info("\n" + result) - - return result - - -def get_cmd_output(proc): - result = "" - - while proc.poll() is None: - line = proc.stdout.readline() - result += line - - return result - - -def run_task(test_name): - # - # the "main" function of the script who launch rally for a task - # :param test_name: name for the rally test - # :return: void - # - global SUMMARY - logger.info('Starting test scenario "{}" ...'.format(test_name)) - - task_file = '{}task.yaml'.format(SCENARIOS_DIR) - if not os.path.exists(task_file): - logger.error("Task file '%s' does not exist." % task_file) - exit(-1) - - test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", - test_name) - if not os.path.exists(test_file_name): - logger.error("The scenario '%s' does not exist." % test_file_name) - exit(-1) - - logger.debug('Scenario fetched from : {}'.format(test_file_name)) - - cmd_line = ("rally task start --abort-on-sla-failure " + - "--task {} ".format(task_file) + - "--task-args \"{}\" ".format(build_task_args(test_name))) - logger.debug('running command line : {}'.format(cmd_line)) - - p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, - stderr=RALLY_STDERR, shell=True) - output = get_output(p, test_name) - task_id = get_task_id(output) - logger.debug('task_id : {}'.format(task_id)) - - if task_id is None: - logger.error('Failed to retrieve task_id, validating task...') - cmd_line = ("rally task validate " + - "--task {} ".format(task_file) + - "--task-args \"{}\" ".format(build_task_args(test_name))) - logger.debug('running command line : {}'.format(cmd_line)) - p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=True) - output = get_cmd_output(p) - logger.error("Task validation result:" + "\n" + output) - return - - # check for result directory and create it otherwise - if not os.path.exists(RESULTS_DIR): - logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR)) - os.makedirs(RESULTS_DIR) - - # write html report file - report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name) - cmd_line = "rally task report {} --out {}".format(task_id, - report_file_name) - - logger.debug('running command line : {}'.format(cmd_line)) - os.popen(cmd_line) - - # get and save rally operation JSON result - cmd_line = "rally task results %s" % task_id - logger.debug('running command line : {}'.format(cmd_line)) - cmd = os.popen(cmd_line) - json_results = cmd.read() - with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f: - logger.debug('saving json file') - f.write(json_results) - - with open('{}opnfv-{}.json' - .format(RESULTS_DIR, test_name)) as json_file: - json_data = json.load(json_file) - - """ parse JSON operation result """ - status = "failed" - if task_succeed(json_results): - logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n") - status = "passed" - else: - logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n") - - # Push results in payload of testcase - if args.report: - logger.debug("Push result into DB") - push_results_to_db("Rally_details", json_data, status) - - -def main(): - global SUMMARY - global network_dict - # configure script - if not (args.test_name in tests): - logger.error('argument not valid') - exit(-1) - - SUMMARY = [] - creds_nova = openstack_utils.get_credentials("nova") - nova_client = novaclient.Client('2', **creds_nova) - creds_neutron = openstack_utils.get_credentials("neutron") - neutron_client = neutronclient.Client(**creds_neutron) - creds_keystone = openstack_utils.get_credentials("keystone") - keystone_client = keystoneclient.Client(**creds_keystone) - glance_endpoint = keystone_client.service_catalog.url_for( - service_type='image', endpoint_type='publicURL') - glance_client = glanceclient.Client(1, glance_endpoint, - token=keystone_client.auth_token) - creds_cinder = openstack_utils.get_credentials("cinder") - cinder_client = cinderclient.Client('2', creds_cinder['username'], - creds_cinder['api_key'], - creds_cinder['project_id'], - creds_cinder['auth_url'], - service_type="volume") - - client_dict['neutron'] = neutron_client - - volume_types = openstack_utils.list_volume_types(cinder_client, - private=False) - if not volume_types: - volume_type = openstack_utils.create_volume_type( - cinder_client, CINDER_VOLUME_TYPE_NAME) - if not volume_type: - logger.error("Failed to create volume type...") - exit(-1) - else: - logger.debug("Volume type '%s' created succesfully..." - % CINDER_VOLUME_TYPE_NAME) - else: - logger.debug("Using existing volume type(s)...") - - image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME) - image_exists = False - - if image_id == '': - logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH)) - image_id = openstack_utils.create_glance_image(glance_client, - GLANCE_IMAGE_NAME, - GLANCE_IMAGE_PATH) - if not image_id: - logger.error("Failed to create the Glance image...") - exit(-1) - else: - logger.debug("Image '%s' with ID '%s' created succesfully ." - % (GLANCE_IMAGE_NAME, image_id)) - else: - logger.debug("Using existing image '%s' with ID '%s'..." - % (GLANCE_IMAGE_NAME, image_id)) - image_exists = True - - logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME) - network_dict = openstack_utils.create_network_full(logger, - client_dict['neutron'], - PRIVATE_NET_NAME, - PRIVATE_SUBNET_NAME, - ROUTER_NAME, - PRIVATE_SUBNET_CIDR) - if not network_dict: - logger.error("Failed to create network...") - exit(-1) - else: - if not openstack_utils.update_neutron_net(client_dict['neutron'], - network_dict['net_id'], - shared=True): - logger.error("Failed to update network...") - exit(-1) - else: - logger.debug("Network '%s' available..." % PRIVATE_NET_NAME) - - if args.test_name == "all": - for test_name in tests: - if not (test_name == 'all' or - test_name == 'vm'): - run_task(test_name) - else: - logger.debug("Test name: " + args.test_name) - run_task(args.test_name) - - report = ("\n" - " " - "\n" - " Rally Summary Report\n" - "\n" - "+===================+============+===============+===========+" - "\n" - "| Module | Duration | nb. Test Run | Success |" - "\n" - "+===================+============+===============+===========+" - "\n") - payload = [] - - # for each scenario we draw a row for the table - total_duration = 0.0 - total_nb_tests = 0 - total_success = 0.0 - for s in SUMMARY: - name = "{0:<17}".format(s['test_name']) - duration = float(s['overall_duration']) - total_duration += duration - duration = time.strftime("%M:%S", time.gmtime(duration)) - duration = "{0:<10}".format(duration) - nb_tests = "{0:<13}".format(s['nb_tests']) - total_nb_tests += int(s['nb_tests']) - success = "{0:<10}".format(str(s['success']) + '%') - total_success += float(s['success']) - report += ("" + - "| " + name + " | " + duration + " | " + - nb_tests + " | " + success + "|\n" + - "+-------------------+------------" - "+---------------+-----------+\n") - payload.append({'module': name, - 'details': {'duration': s['overall_duration'], - 'nb tests': s['nb_tests'], - 'success': s['success']}}) - - total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration)) - total_duration_str2 = "{0:<10}".format(total_duration_str) - total_nb_tests_str = "{0:<13}".format(total_nb_tests) - total_success = "{:0.2f}".format(total_success / len(SUMMARY)) - total_success_str = "{0:<10}".format(str(total_success) + '%') - report += "+===================+============+===============+===========+" - report += "\n" - report += ("| TOTAL: | " + total_duration_str2 + " | " + - total_nb_tests_str + " | " + total_success_str + "|\n") - report += "+===================+============+===============+===========+" - report += "\n" - - logger.info("\n" + report) - payload.append({'summary': {'duration': total_duration, - 'nb tests': total_nb_tests, - 'nb success': total_success}}) - - # Generate json results for DB - # json_results = {"timestart": time_start, "duration": total_duration, - # "tests": int(total_nb_tests), - # "success": int(total_success)} - # logger.info("Results: "+str(json_results)) - - # Evaluation of the success criteria - status = "failed" - # for Rally we decided that the overall success rate must be above 90% - if total_success >= 90: - status = "passed" - - if args.report: - logger.debug("Pushing Rally summary into DB...") - push_results_to_db("Rally", payload, status) - - if args.noclean: - exit(0) - - if not image_exists: - logger.debug("Deleting image '%s' with ID '%s'..." - % (GLANCE_IMAGE_NAME, image_id)) - if not openstack_utils.delete_glance_image(nova_client, image_id): - logger.error("Error deleting the glance image") - - if not volume_types: - logger.debug("Deleting volume type '%s'..." - % CINDER_VOLUME_TYPE_NAME) - if not openstack_utils.delete_volume_type(cinder_client, volume_type): - logger.error("Error in deleting volume type...") - - -if __name__ == '__main__': - main() diff --git a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py b/testcases/VIM/OpenStack/CI/libraries/run_tempest.py deleted file mode 100644 index bf62ce30..00000000 --- a/testcases/VIM/OpenStack/CI/libraries/run_tempest.py +++ /dev/null @@ -1,347 +0,0 @@ -#!/usr/bin/env python -# -# Description: -# Runs tempest and pushes the results to the DB -# -# Authors: -# morgan.richomme@orange.com -# jose.lausuch@ericsson.com -# viktor.tikkanen@nokia.com -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -# -import argparse -import json -import os -import re -import requests -import shutil -import subprocess -import time -import yaml -import ConfigParser - -import keystoneclient.v2_0.client as ksclient -from neutronclient.v2_0 import client as neutronclient - -import functest.utils.functest_logger as ft_logger -import functest.utils.functest_utils as ft_utils -import functest.utils.openstack_utils as os_utils - -modes = ['full', 'smoke', 'baremetal', 'compute', 'data_processing', - 'identity', 'image', 'network', 'object_storage', 'orchestration', - 'telemetry', 'volume', 'custom', 'defcore'] - -""" tests configuration """ -parser = argparse.ArgumentParser() -parser.add_argument("-d", "--debug", - help="Debug mode", - action="store_true") -parser.add_argument("-s", "--serial", - help="Run tests in one thread", - action="store_true") -parser.add_argument("-m", "--mode", - help="Tempest test mode [smoke, all]", - default="smoke") -parser.add_argument("-r", "--report", - help="Create json result file", - action="store_true") -parser.add_argument("-n", "--noclean", - help="Don't clean the created resources for this test.", - action="store_true") - -args = parser.parse_args() - -""" logging configuration """ -logger = ft_logger.Logger("run_tempest").getLogger() - -REPO_PATH = os.environ['repos_dir'] + '/functest/' - - -with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f: - functest_yaml = yaml.safe_load(f) -f.close() -TEST_DB = functest_yaml.get("results").get("test_db_url") - -MODE = "smoke" -PRIVATE_NET_NAME = functest_yaml.get("tempest").get("private_net_name") -PRIVATE_SUBNET_NAME = functest_yaml.get("tempest").get("private_subnet_name") -PRIVATE_SUBNET_CIDR = functest_yaml.get("tempest").get("private_subnet_cidr") -ROUTER_NAME = functest_yaml.get("tempest").get("router_name") -TENANT_NAME = functest_yaml.get("tempest").get("identity").get("tenant_name") -TENANT_DESCRIPTION = functest_yaml.get("tempest").get("identity").get( - "tenant_description") -USER_NAME = functest_yaml.get("tempest").get("identity").get("user_name") -USER_PASSWORD = functest_yaml.get("tempest").get("identity").get( - "user_password") -DEPLOYMENT_MAME = functest_yaml.get("rally").get("deployment_name") -RALLY_INSTALLATION_DIR = functest_yaml.get("general").get("directories").get( - "dir_rally_inst") -RESULTS_DIR = functest_yaml.get("general").get("directories").get( - "dir_results") -TEMPEST_RESULTS_DIR = RESULTS_DIR + '/tempest' -TEST_LIST_DIR = functest_yaml.get("general").get("directories").get( - "dir_tempest_cases") -TEMPEST_CUSTOM = REPO_PATH + TEST_LIST_DIR + 'test_list.txt' -TEMPEST_BLACKLIST = REPO_PATH + TEST_LIST_DIR + 'blacklist.txt' -TEMPEST_DEFCORE = REPO_PATH + TEST_LIST_DIR + 'defcore_req.txt' -TEMPEST_RAW_LIST = TEMPEST_RESULTS_DIR + '/test_raw_list.txt' -TEMPEST_LIST = TEMPEST_RESULTS_DIR + '/test_list.txt' - - -def get_info(file_result): - test_run = "" - duration = "" - test_failed = "" - - p = subprocess.Popen('cat tempest.log', - shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - for line in p.stdout.readlines(): - # print line, - if (len(test_run) < 1): - test_run = re.findall("[0-9]*\.[0-9]*s", line) - if (len(duration) < 1): - duration = re.findall("[0-9]*\ tests", line) - regexp = r"(failures=[0-9]+)" - if (len(test_failed) < 1): - test_failed = re.findall(regexp, line) - - logger.debug("test_run:" + test_run) - logger.debug("duration:" + duration) - - -def push_results_to_db(case, payload, criteria): - - # TODO move DB creds into config file - url = TEST_DB + "/results" - installer = ft_utils.get_installer_type(logger) - scenario = ft_utils.get_scenario(logger) - version = ft_utils.get_version(logger) - pod_name = ft_utils.get_pod_name(logger) - - logger.info("Pushing results to DB: '%s'." % url) - - params = {"project_name": "functest", "case_name": case, - "pod_name": str(pod_name), 'installer': installer, - "version": version, "scenario": scenario, "criteria": criteria, - 'details': payload} - headers = {'Content-Type': 'application/json'} - - r = requests.post(url, data=json.dumps(params), headers=headers) - logger.debug(r) - - -def create_tempest_resources(): - ks_creds = os_utils.get_credentials("keystone") - logger.debug("Creating tenant and user for Tempest suite") - keystone = ksclient.Client(**ks_creds) - tenant_id = os_utils.create_tenant(keystone, - TENANT_NAME, - TENANT_DESCRIPTION) - if tenant_id == '': - logger.error("Error : Failed to create %s tenant" % TENANT_NAME) - - user_id = os_utils.create_user(keystone, USER_NAME, USER_PASSWORD, - None, tenant_id) - if user_id == '': - logger.error("Error : Failed to create %s user" % USER_NAME) - - logger.debug("Creating private network for Tempest suite") - creds_neutron = os_utils.get_credentials("neutron") - neutron_client = neutronclient.Client(**creds_neutron) - network_dic = os_utils.create_network_full(logger, - neutron_client, - PRIVATE_NET_NAME, - PRIVATE_SUBNET_NAME, - ROUTER_NAME, - PRIVATE_SUBNET_CIDR) - if network_dic: - if not os_utils.update_neutron_net(neutron_client, - network_dic['net_id'], - shared=True): - logger.error("Failed to update private network...") - exit(-1) - else: - logger.debug("Network '%s' is available..." % PRIVATE_NET_NAME) - else: - logger.error("Private network creation failed") - exit(-1) - - -def configure_tempest(deployment_dir): - """ - Add/update needed parameters into tempest.conf file generated by Rally - """ - - logger.debug("Generating tempest.conf file...") - cmd = "rally verify genconfig" - ft_utils.execute_command(cmd, logger) - - logger.debug("Finding tempest.conf file...") - tempest_conf_file = deployment_dir + "/tempest.conf" - if not os.path.isfile(tempest_conf_file): - logger.error("Tempest configuration file %s NOT found." - % tempest_conf_file) - exit(-1) - - logger.debug("Updating selected tempest.conf parameters...") - config = ConfigParser.RawConfigParser() - config.read(tempest_conf_file) - config.set('compute', 'fixed_network_name', PRIVATE_NET_NAME) - config.set('identity', 'tenant_name', TENANT_NAME) - config.set('identity', 'username', USER_NAME) - config.set('identity', 'password', USER_PASSWORD) - with open(tempest_conf_file, 'wb') as config_file: - config.write(config_file) - - # Copy tempest.conf to /home/opnfv/functest/results/tempest/ - shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf') - return True - - -def read_file(filename): - with open(filename) as src: - return [line.strip() for line in src.readlines()] - - -def generate_test_list(deployment_dir, mode): - logger.debug("Generating test case list...") - if mode == 'defcore': - shutil.copyfile(TEMPEST_DEFCORE, TEMPEST_RAW_LIST) - elif mode == 'custom': - if os.path.isfile(TEMPEST_CUSTOM): - shutil.copyfile(TEMPEST_CUSTOM, TEMPEST_RAW_LIST) - else: - logger.error("Tempest test list file %s NOT found." - % TEMPEST_CUSTOM) - exit(-1) - else: - if mode == 'smoke': - testr_mode = "smoke" - elif mode == 'full': - testr_mode = "" - else: - testr_mode = 'tempest.api.' + mode - cmd = ("cd " + deployment_dir + ";" + "testr list-tests " + - testr_mode + ">" + TEMPEST_RAW_LIST + ";cd") - ft_utils.execute_command(cmd, logger) - - -def apply_tempest_blacklist(): - logger.debug("Applying tempest blacklist...") - cases_file = read_file(TEMPEST_RAW_LIST) - result_file = open(TEMPEST_LIST, 'w') - try: - black_file = read_file(TEMPEST_BLACKLIST) - except: - black_file = '' - logger.debug("Tempest blacklist file does not exist.") - for line in cases_file: - if line not in black_file: - result_file.write(str(line) + '\n') - result_file.close() - - -def run_tempest(OPTION): - # - # the "main" function of the script which launches Rally to run Tempest - # :param option: tempest option (smoke, ..) - # :return: void - # - logger.info("Starting Tempest test suite: '%s'." % OPTION) - cmd_line = "rally verify start " + OPTION + " --system-wide" - CI_DEBUG = os.environ.get("CI_DEBUG") - if CI_DEBUG == "true" or CI_DEBUG == "True": - ft_utils.execute_command(cmd_line, logger, exit_on_error=True) - else: - header = ("Tempest environment:\n" - " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" % - (os.getenv('INSTALLER_TYPE', 'Unknown'), - os.getenv('DEPLOY_SCENARIO', 'Unknown'), - os.getenv('NODE_NAME', 'Unknown'), - time.strftime("%a %b %d %H:%M:%S %Z %Y"))) - - f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+') - f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+') - f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+') - f_env.write(header) - - subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr) - - f_stdout.close() - f_stderr.close() - f_env.close() - - cmd_line = "rally verify show" - ft_utils.execute_command(cmd_line, logger, - exit_on_error=True, info=True) - - cmd_line = "rally verify list" - logger.debug('Executing command : {}'.format(cmd_line)) - cmd = os.popen(cmd_line) - output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|") - # Format: - # | UUID | Deployment UUID | smoke | tests | failures | Created at | - # Duration | Status | - num_tests = output[4] - num_failures = output[5] - time_start = output[6] - duration = output[7] - # Compute duration (lets assume it does not take more than 60 min) - dur_min = int(duration.split(':')[1]) - dur_sec_float = float(duration.split(':')[2]) - dur_sec_int = int(round(dur_sec_float, 0)) - dur_sec_int = dur_sec_int + 60 * dur_min - - # Generate json results for DB - json_results = {"timestart": time_start, "duration": dur_sec_int, - "tests": int(num_tests), "failures": int(num_failures)} - logger.info("Results: " + str(json_results)) - - status = "failed" - try: - diff = (int(num_tests) - int(num_failures)) - success_rate = 100 * diff / int(num_tests) - except: - success_rate = 0 - - # For Tempest we assume that teh success rate is above 90% - if success_rate >= 90: - status = "passed" - - # Push results in payload of testcase - if args.report: - logger.debug("Push result into DB") - push_results_to_db("Tempest", json_results, status) - - -def main(): - global MODE - - if not (args.mode in modes): - logger.error("Tempest mode not valid. " - "Possible values are:\n" + str(modes)) - exit(-1) - - if not os.path.exists(TEMPEST_RESULTS_DIR): - os.makedirs(TEMPEST_RESULTS_DIR) - - deployment_dir = ft_utils.get_deployment_dir(logger) - configure_tempest(deployment_dir) - create_tempest_resources() - generate_test_list(deployment_dir, args.mode) - apply_tempest_blacklist() - - MODE = "--tests-file " + TEMPEST_LIST - if args.serial: - MODE += " --concur 1" - - run_tempest(MODE) - - -if __name__ == '__main__': - main() diff --git a/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml b/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml deleted file mode 100644 index 48c0333e..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml +++ /dev/null @@ -1,97 +0,0 @@ -{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%} -{%- if use_existing_users and caller is not defined -%} {} -{%- else %} - {%- if not use_existing_users %} - users: - tenants: {{ tenants }} - users_per_tenant: {{ users_per_tenant }} - {%- endif %} - {%- if caller is defined %} - {{ caller() }} - {%- endif %} -{%- endif %} -{%- endmacro %} - -{%- macro vm_params(image=none, flavor=none, size=none) %} -{%- if flavor is not none %} - flavor: - name: {{ flavor }} -{%- endif %} -{%- if image is not none %} - image: - name: {{ image }} -{%- endif %} -{%- if size is not none %} - size: {{ size }} -{%- endif %} -{%- endmacro %} - -{%- macro unlimited_volumes() %} - cinder: - gigabytes: -1 - snapshots: -1 - volumes: -1 -{%- endmacro %} - -{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %} - type: "constant" - {%- if is_smoke %} - concurrency: 1 - times: 1 - {%- else %} - concurrency: {{ concurrency }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro rps_runner(rps=1, times=1, is_smoke=True) %} - type: rps - {%- if is_smoke %} - rps: 1 - times: 1 - {%- else %} - rps: {{ rps }} - times: {{ times }} - {%- endif %} -{%- endmacro %} - -{%- macro no_failures_sla() %} - failure_rate: - max: 0 -{%- endmacro %} - -{%- macro volumes(size=1, volumes_per_tenant=1) %} - volumes: - size: {{ size }} - volumes_per_tenant: {{ volumes_per_tenant }} -{%- endmacro %} - -{%- macro unlimited_nova(keypairs=false) %} - nova: - cores: -1 - floating_ips: -1 - instances: -1 - {%- if keypairs %} - key_pairs: -1 - {%- endif %} - ram: -1 - security_group_rules: -1 - security_groups: -1 -{%- endmacro %} - -{%- macro unlimited_neutron(secgroups=false) %} - neutron: - network: -1 - port: -1 - subnet: -1 - {%- if secgroups %} - security_group: -1 - security_group_rule: -1 - {%- endif %} -{%- endmacro %} - -{%- macro glance_args(location, container="bare", type="qcow2") %} - container_format: {{ container }} - disk_format: {{ type }} - image_location: {{ location }} -{%- endmacro %} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml deleted file mode 100644 index a04e4c1c..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml +++ /dev/null @@ -1,63 +0,0 @@ - Authenticate.keystone: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_cinder: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_glance: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_heat: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_neutron: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Authenticate.validate_nova: - - - args: - repetitions: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml deleted file mode 100644 index cb28ee84..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml +++ /dev/null @@ -1,272 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - CinderVolumes.create_and_attach_volume: - - - args: - {{ vm_params(image_name,flavor_name,1) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_snapshots: - - - args: - detailed: true - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_list_volume: - - - args: - detailed: true - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - detailed: true - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_upload_volume_to_image: - - - args: - container_format: "bare" - disk_format: "raw" - do_delete: true - force: false - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_nested_snapshots_and_attach_volume: - - - args: - nested_level: 1 - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 1 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_snapshot_and_attach_volume: - - - args: - volume_type: false - size: - min: 1 - max: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 2 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - volume_type: true - size: - min: 1 - max: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 2 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_volume: - - - args: - size: 1 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - - - args: - size: - min: 1 - max: 5 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.list_volumes: - - - args: - detailed: True - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - volumes: - size: 1 - volumes_per_tenant: 4 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} - - CinderVolumes.create_and_delete_snapshot: - - - args: - force: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_delete_volume: - - - args: - size: - max: 1 - min: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - {{ vm_params(image_name,none,1) }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_and_extend_volume: - - - args: - new_size: 2 - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - CinderVolumes.create_from_volume_and_delete_volume: - - - args: - size: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_volumes() }} - {{ volumes() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml deleted file mode 100644 index adbf8b79..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml +++ /dev/null @@ -1,49 +0,0 @@ - GlanceImages.create_and_delete_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.create_and_list_image: - - - args: - {{ glance_args(location=glance_image_location) }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.list_images: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - GlanceImages.create_image_and_boot_instances: - - - args: - {{ glance_args(location=glance_image_location) }} - flavor: - name: {{ flavor_name }} - number_instances: 2 - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - quotas: - {{ unlimited_nova() }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml deleted file mode 100644 index 534d796e..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml +++ /dev/null @@ -1,160 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - HeatStacks.create_and_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/default.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/server_with_ports.yaml.template" - parameters: - public_net: {{ floating_network }} - image: {{ image_name }} - flavor: {{ flavor_name }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/server_with_volume.yaml.template" - parameters: - image: {{ image_name }} - flavor: {{ flavor_name }} - network_id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.create_and_list_stack: - - - args: - template_path: "{{ tmpl_dir }}/default.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.create_update_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_random_strings_add.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_random_strings_delete.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/resource_group.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_resource_group_increase.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/resource_group.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_resource_group_reduce.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_random_strings_replace.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% else %} - - HeatStacks.create_update_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template" - updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} - - HeatStacks.create_check_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.create_suspend_resume_delete_stack: - - - args: - template_path: "{{ tmpl_dir }}/random_strings.yaml.template" - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HeatStacks.list_stacks_and_resources: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml deleted file mode 100644 index bfc9948b..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml +++ /dev/null @@ -1,92 +0,0 @@ - KeystoneBasic.add_and_remove_user_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_add_and_list_user_roles: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_list_tenants: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_role: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_delete_service: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.get_entities: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_update_and_delete_tenant: - - - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_user: - - - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_tenant: - - - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_and_list_users: - - - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - KeystoneBasic.create_tenant_with_users: - - - args: - users_per_tenant: 10 - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml deleted file mode 100644 index 3804d258..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml +++ /dev/null @@ -1,245 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - NeutronNetworks.create_and_update_networks: - - - args: - network_create_args: {} - network_update_args: - admin_state_up: false - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_ports: - - - args: - network_create_args: {} - port_create_args: {} - port_update_args: - admin_state_up: false - device_id: "dummy_id" - device_owner: "dummy_owner" - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_routers: - - - args: - network_create_args: {} - router_create_args: {} - router_update_args: - admin_state_up: false - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_update_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.4.0.0/16" - subnet_create_args: {} - subnet_update_args: - enable_dhcp: false - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} - - NeutronNetworks.create_and_delete_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - port: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_delete_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_networks: - - - args: - network_create_args: {} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - neutron: - network: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_ports: - - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - port: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_routers: - - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - router: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NeutronNetworks.create_and_list_subnets: - - - args: - network_create_args: {} - subnet_cidr_start: "1.1.0.0/30" - subnet_create_args: {} - subnets_per_network: 1 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml deleted file mode 100644 index f0fed8ef..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml +++ /dev/null @@ -1,378 +0,0 @@ -{# all scenarios included only in full mode #} - -{% if full_mode %} - - NovaKeypair.create_and_delete_keypair: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaKeypair.create_and_list_keypairs: - - - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_bounce_server: - - - args: - actions: - - - hard_reboot: 1 - - - soft_reboot: 1 - - - stop_start: 1 - - - rescue_unrescue: 1 - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_delete_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_list_server: - - - args: - detailed: true - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_rebuild_server: - - - args: - {{ vm_params(flavor=flavor_name) }} - from_image: - name: {{ image_name }} - to_image: - name: {{ image_name }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.snapshot_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume: - - - args: - {{ vm_params(image_name, flavor_name) }} - volume_size: 10 - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaSecGroup.create_and_delete_secgroups: - - - args: - security_group_count: 10 - rules_per_security_group: 10 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_neutron(secgroups=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaSecGroup.create_and_list_secgroups: - - - args: - security_group_count: 10 - rules_per_security_group: 10 - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - quotas: - {{ unlimited_neutron(secgroups=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.list_servers: - - - args: - detailed: True - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - servers: - {{ vm_params(image_name,flavor_name,none)|indent(2,true) }} - servers_per_tenant: 2 - auto_assign_nic: true - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.resize_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - to_flavor: - name: "m1.small" - confirm: true - force_delete: false - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% if live_migration %} - - NovaServers.boot_and_live_migrate_server: - - args: - {{ vm_params(image_name, flavor_name) }} - block_migration: false - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_attach_created_volume_and_live_migrate: - - - args: - {{ vm_params(image_name, flavor_name) }} - size: 10 - block_migration: false - boot_server_kwargs: - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume_and_live_migrate: - - args: - {{ vm_params(image_name, flavor_name) }} - block_migration: false - volume_size: 10 - force_delete: false - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - -{% endif %} -{% endif %} - - NovaKeypair.boot_and_delete_server_with_keypair: - - - args: - {{ vm_params(image_name, flavor_name) }} - server_kwargs: - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova(keypairs=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_server_from_volume_and_delete: - - - args: - {{ vm_params(image_name, flavor_name) }} - volume_size: 5 - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_volumes() }} - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.pause_and_unpause_server: - - - args: - {{ vm_params(image_name, flavor_name) }} - force_delete: false - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - networks_per_tenant: 1 - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_neutron() }} - {{ unlimited_nova() }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaSecGroup.boot_and_delete_server_with_secgroups: - - - args: - {{ vm_params(image_name, flavor_name) }} - security_group_count: 10 - rules_per_security_group: 10 - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: - start_cidr: "100.1.0.0/25" - quotas: - {{ unlimited_nova() }} - {{ unlimited_neutron(secgroups=true) }} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - NovaServers.boot_and_migrate_server: - - args: - {{ vm_params(image_name, flavor_name) }} - nics: - - net-id: {{ netid }} - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml deleted file mode 100644 index a0682acc..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml +++ /dev/null @@ -1,54 +0,0 @@ - Quotas.cinder_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.cinder_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.neutron_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.nova_update_and_delete: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - Quotas.nova_update: - - - args: - max_quota: 1024 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml deleted file mode 100644 index 6affcc6c..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml +++ /dev/null @@ -1,28 +0,0 @@ - HttpRequests.check_random_request: - - - args: - requests: - - - url: "http://www.example.com" - method: "GET" - status_code: 200 - - - url: "http://www.openstack.org" - method: "GET" - status_code: 200 - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - HttpRequests.check_request: - - - args: - url: "http://www.example.com" - method: "GET" - status_code: 200 - allow_redirects: False - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml deleted file mode 100644 index f102edb2..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml +++ /dev/null @@ -1,268 +0,0 @@ - TempestScenario.list_of_tests: - - - args: - tempest_conf: /etc/tempest/tempest.conf - test_names: - - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_get_flavor - - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors - - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail - - tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image - - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image - - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images - - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_cidr - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_group_id - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_delete_when_peer_group_deleted - - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_list - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_group_create_get_delete - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_groups_create_list_delete - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_server_security_groups - - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_update_security_groups - - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_add_remove_fixed_ip - - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces - - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers - - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail - - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details - - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers - - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail - - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm - - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped - - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses - - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses_by_network - - tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescue_unrescue_instance - - tempest.api.compute.test_quotas.QuotasTestJSON.test_compare_tenant_quotas_with_default_quotas - - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas - - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas - - tempest.api.compute.volumes.test_volumes_get.VolumesGetTestJSON.test_volume_create_get_delete - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_create - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_delete - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_get - - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_list - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_create - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_delete - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_get - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_list - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_create - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_delete - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_get - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_list - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_create - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_delete - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_get - - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_list - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_create - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_delete - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_get - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_list - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_job_binary_get_data - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_create - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_delete - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_get - - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_list - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_create - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_delete - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get_data - - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_list - - tempest.api.data_processing.test_jobs.JobTest.test_job_create - - tempest.api.data_processing.test_jobs.JobTest.test_job_delete - - tempest.api.data_processing.test_jobs.JobTest.test_job_get - - tempest.api.data_processing.test_jobs.JobTest.test_job_list - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_create - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_delete - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_get - - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_list - - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_get - - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_list - - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_compare_db_flavors_with_os - - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_get_db_flavor - - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_list_db_flavors - - tempest.api.database.limits.test_limits.DatabaseLimitsTest.test_absolute_limits - - tempest.api.database.versions.test_versions.DatabaseVersionsTest.test_list_db_versions - - tempest.api.identity.admin.v2.test_services.ServicesTestJSON.test_list_services - - tempest.api.identity.admin.v2.test_users.UsersTestJSON.test_create_user - - tempest.api.identity.admin.v3.test_credentials.CredentialsTestJSON.test_credentials_create_get_update_delete - - tempest.api.identity.admin.v3.test_domains.DomainsTestJSON.test_create_update_delete_domain - - tempest.api.identity.admin.v3.test_endpoints.EndPointsTestJSON.test_update_endpoint - - tempest.api.identity.admin.v3.test_groups.GroupsV3TestJSON.test_group_users_add_list_delete - - tempest.api.identity.admin.v3.test_policies.PoliciesTestJSON.test_create_update_delete_policy - - tempest.api.identity.admin.v3.test_regions.RegionsTestJSON.test_create_region_with_specific_id - - tempest.api.identity.admin.v3.test_roles.RolesV3TestJSON.test_role_create_update_get_list - - tempest.api.identity.admin.v3.test_services.ServicesTestJSON.test_create_update_get_service - - tempest.api.identity.admin.v3.test_trusts.TrustsV3TestJSON.test_get_trusts_all - - tempest.api.messaging.test_claims.TestClaims.test_post_claim - - tempest.api.messaging.test_claims.TestClaims.test_query_claim - - tempest.api.messaging.test_claims.TestClaims.test_release_claim - - tempest.api.messaging.test_claims.TestClaims.test_update_claim - - tempest.api.messaging.test_messages.TestMessages.test_delete_multiple_messages - - tempest.api.messaging.test_messages.TestMessages.test_delete_single_message - - tempest.api.messaging.test_messages.TestMessages.test_get_message - - tempest.api.messaging.test_messages.TestMessages.test_get_multiple_messages - - tempest.api.messaging.test_messages.TestMessages.test_list_messages - - tempest.api.messaging.test_messages.TestMessages.test_post_messages - - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_existence - - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_head - - tempest.api.messaging.test_queues.TestManageQueue.test_get_queue_stats - - tempest.api.messaging.test_queues.TestManageQueue.test_list_queues - - tempest.api.messaging.test_queues.TestManageQueue.test_set_and_get_queue_metadata - - tempest.api.messaging.test_queues.TestQueues.test_create_delete_queue - - tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions - - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address - - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip - - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_network - - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_port - - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_subnet - - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_network - - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_port - - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_subnet - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network - - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_update_delete_network_subnet - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_external_network_visibility - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_networks - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_subnets - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_network - - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_subnet - - tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet - - tempest.api.network.test_networks.NetworksTestJSON.test_external_network_visibility - - tempest.api.network.test_networks.NetworksTestJSON.test_list_networks - - tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets - - tempest.api.network.test_networks.NetworksTestJSON.test_show_network - - tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports - - tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port - - tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools - - tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups - - tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port - - tempest.api.network.test_ports.PortsTestJSON.test_list_ports - - tempest.api.network.test_ports.PortsTestJSON.test_show_port - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id - - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id - - tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router - - tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces - - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id - - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id - - tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule - - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups - - tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group - - tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule - - tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups - - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota - - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object - - tempest.api.object_storage.test_account_services.AccountTest.test_list_account_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_json - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_xml - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker_and_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker_and_end_marker - - tempest.api.object_storage.test_account_services.AccountTest.test_list_extensions - - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_account_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_containers - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_and_delete_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_matadata_key - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_metadata - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata - - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata_key - - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_read_object_with_rights - - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_write_object_with_rights - - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_large_object - - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_too_many_objects - - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_valid_object - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_overwrite - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_key - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_key - - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_value - - tempest.api.object_storage.test_container_services.ContainerTest.test_delete_container - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_delimiter - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_end_marker - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_json - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_xml - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_limit - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_marker - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_no_object - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_path - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_prefix - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_list_no_container_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_and_delete_matadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_matadata_key - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata - - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata_key - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_fresh_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_meta - - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_metakey - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_match - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_modified_since - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_unmodified_since - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_range - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_newest - - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_object_manifest - - tempest.api.object_storage.test_object_services.ObjectTest.test_list_no_object_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata_with_x_object_manifest - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_create_and_remove_metadata - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_object_manifest - - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_remove_object_metakey - - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_container_object_without_using_creds - - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_object_with_another_user_creds - - tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container - - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_list - - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_show - - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_template - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_list - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_metadata - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_software_config - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_create_validate - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_no_metadata_change - - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_with_metadata_change - - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_crud_no_resources - - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_list_responds - - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v1_notifications - - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v2_notifications - - tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_attach_detach_volume_to_instance - - tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance - - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete - - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_from_image - - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete - - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image - - tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list - - tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list - runner: - concurrency: 1 - times: 1 - type: serial - sla: - failure_rate: - max: 0 - diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml deleted file mode 100644 index 74f50992..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml +++ /dev/null @@ -1,42 +0,0 @@ - VMTasks.boot_runcommand_delete: - - - args: - {{ vm_params(image_name, flavor_name) }} - floating_network: {{ floating_network }} - force_delete: false - command: - interpreter: /bin/sh - script_file: {{ sup_dir }}/instance_dd_test.sh - username: cirros - nics: - - net-id: {{ netid }} - context: - {% call user_context(tenants_amount, users_amount, use_existing_users) %} - network: {} - {% endcall %} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} - - - - args: - {{ vm_params(image_name, flavor_name) }} - fixed_network: private - floating_network: {{ floating_network }} - force_delete: false - command: - interpreter: /bin/sh - script_file: {{ sup_dir }}/instance_dd_test.sh - use_floatingip: true - username: cirros - nics: - - net-id: {{ netid }} - volume_args: - size: 2 - context: - {{ user_context(tenants_amount, users_amount, use_existing_users) }} - runner: - {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} - sla: - {{ no_failures_sla() }} diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh b/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh deleted file mode 100644 index e3bf2340..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; } -file=/tmp/test.img -c=${1:-$SIZE} -c=${c:-1000} #default is 1GB -write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c") -read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c") -[ -f $file ] && rm $file - -echo "{ - \"write_seq_${c}m\": $write_seq, - \"read_seq_${c}m\": $read_seq - }" diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template deleted file mode 100644 index a22487e3..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -heat_template_version: 2013-05-23 - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: 1 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template deleted file mode 100644 index eb4f2f2d..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template +++ /dev/null @@ -1 +0,0 @@ -heat_template_version: 2014-10-16 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template deleted file mode 100644 index 2dd676c1..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template deleted file mode 100644 index b3f505fa..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template +++ /dev/null @@ -1,13 +0,0 @@ -heat_template_version: 2014-10-16 - -description: Test template for rally create-update-delete scenario - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 2 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template deleted file mode 100644 index 909f45d2..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template +++ /dev/null @@ -1,64 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - public_net: - type: string - default: public - image: - type: string - default: cirros-0.3.4-x86_64-uec - flavor: - type: string - default: m1.tiny - cidr: - type: string - default: 11.11.11.0/24 - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - port: { get_resource: server_port } - - router: - type: OS::Neutron::Router - properties: - external_gateway_info: - network: {get_param: public_net} - - router_interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: private_subnet } - - private_net: - type: OS::Neutron::Net - - private_subnet: - type: OS::Neutron::Subnet - properties: - network: { get_resource: private_net } - cidr: {get_param: cidr} - - port_security_group: - type: OS::Neutron::SecurityGroup - properties: - name: default_port_security_group - description: > - Default security group assigned to port. The neutron default group is not - used because neutron creates several groups with the same name=default and - nova cannot chooses which one should it use. - - server_port: - type: OS::Neutron::Port - properties: - network: {get_resource: private_net} - fixed_ips: - - subnet: { get_resource: private_subnet } - security_groups: - - { get_resource: port_security_group } diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template deleted file mode 100644 index 826ca9da..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template +++ /dev/null @@ -1,43 +0,0 @@ -heat_template_version: 2013-05-23 - -parameters: - # set all correct defaults for parameters before launch test - image: - type: string - default: cirros-0.3.4-x86_64-uec - flavor: - type: string - default: m1.tiny - availability_zone: - type: string - description: The Availability Zone to launch the instance. - default: nova - volume_size: - type: number - description: Size of the volume to be created. - default: 1 - constraints: - - range: { min: 1, max: 1024 } - description: must be between 1 and 1024 Gb. - network_id: - type: string - -resources: - server: - type: OS::Nova::Server - properties: - image: {get_param: image} - flavor: {get_param: flavor} - networks: - - network: { get_param: network_id } - cinder_volume: - type: OS::Cinder::Volume - properties: - size: { get_param: volume_size } - availability_zone: { get_param: availability_zone } - volume_attachment: - type: OS::Cinder::VolumeAttachment - properties: - volume_id: { get_resource: cinder_volume } - instance_uuid: { get_resource: server} - mountpoint: /dev/vdc diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template deleted file mode 100644 index cf34879c..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template +++ /dev/null @@ -1,23 +0,0 @@ -heat_template_version: 2013-05-23 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates resource parameters without resource re-creation(replacement) - in the stack defined by autoscaling_policy.yaml.template. It allows to measure - performance of "pure" resource update operation only. - -resources: - test_group: - type: OS::Heat::AutoScalingGroup - properties: - desired_capacity: 0 - max_size: 0 - min_size: 0 - resource: - type: OS::Heat::RandomString - test_policy: - type: OS::Heat::ScalingPolicy - properties: - adjustment_type: change_in_capacity - auto_scaling_group_id: { get_resource: test_group } - scaling_adjustment: -1 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template deleted file mode 100644 index e06d42e0..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates the stack defined by random_strings.yaml.template with additional resource. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_three: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template deleted file mode 100644 index d02593e3..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template +++ /dev/null @@ -1,11 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by random_strings.yaml.template. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template deleted file mode 100644 index 46d8bff4..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template +++ /dev/null @@ -1,19 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by - random_strings.yaml.template and re-creates it with the updated parameters - (so-called update-replace). That happens because some parameters cannot be - changed without resource re-creation. The template allows to measure performance - of update-replace operation. - -resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 40 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template deleted file mode 100644 index 891074eb..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and adds children resources to that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 3 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template deleted file mode 100644 index b4d1d173..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -heat_template_version: 2014-10-16 - -description: > - Test template for create-update-delete-stack scenario in rally. - The template updates one resource from the stack defined by resource_group.yaml.template - and deletes children resources from that resource. - -resources: - test_group: - type: OS::Heat::ResourceGroup - properties: - count: 1 - resource_def: - type: OS::Heat::RandomString - properties: - length: 20 \ No newline at end of file diff --git a/testcases/VIM/OpenStack/CI/rally_cert/task.yaml b/testcases/VIM/OpenStack/CI/rally_cert/task.yaml deleted file mode 100644 index b6789166..00000000 --- a/testcases/VIM/OpenStack/CI/rally_cert/task.yaml +++ /dev/null @@ -1,60 +0,0 @@ -{%- set glance_image_location = glance_image_location|default("http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img") %} -{%- set image_name = image_name|default("functest-img-rally") %} -{%- set flavor_name = flavor_name|default("m1.tiny") %} -{%- set use_existing_users = use_existing_users|default(false) %} -{%- set service_list = service_list|default(["authenticate", "cinder", "keystone", "nova", "glance", "neutron", "quotas", "requests", "heat", "vm"]) %} -{%- set live_migration = live_migration|default(false) %} -{%- set smoke = smoke|default(true) %} -{%- set floating_network = floating_network|default("net04_ext") %} -{%- set controllers_amount = controllers_amount|default(1) %} -{%- if smoke %} -{%- set users_amount = 1 %} -{%- set tenants_amount = 1 %} -{%- else %} -{%- set users_amount = users_amount|default(1) %} -{%- set tenants_amount = tenants_amount|default(1) %} -{%- endif %} - -{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%} -{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%} - ---- -{% if "authenticate" in service_list %} -{%- include "scenario/opnfv-authenticate.yaml"-%} -{% endif %} - -{% if "cinder" in service_list %} -{%- include "scenario/opnfv-cinder.yaml"-%} -{% endif %} - -{% if "keystone" in service_list %} -{%- include "scenario/opnfv-keystone.yaml"-%} -{% endif %} - -{% if "nova" in service_list %} -{%- include "scenario/opnfv-nova.yaml"-%} -{% endif %} - -{% if "glance" in service_list %} -{%- include "scenario/opnfv-glance.yaml"-%} -{% endif %} - -{% if "neutron" in service_list %} -{%- include "scenario/opnfv-neutron.yaml"-%} -{% endif %} - -{% if "quotas" in service_list %} -{%- include "scenario/opnfv-quotas.yaml"-%} -{% endif %} - -{% if "requests" in service_list %} -{%- include "scenario/opnfv-requests.yaml"-%} -{% endif %} - -{% if "heat" in service_list %} -{%- include "scenario/opnfv-heat.yaml"-%} -{% endif %} - -{% if "vm" in service_list %} -{%- include "scenario/opnfv-vm.yaml"-%} -{% endif %} -- cgit 1.2.3-korg