diff options
Diffstat (limited to 'functest/opnfv_tests/openstack')
16 files changed, 1207 insertions, 318 deletions
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt index 95bea2b7..099d6864 100644 --- a/functest/opnfv_tests/openstack/rally/blacklist.txt +++ b/functest/opnfv_tests/openstack/rally/blacklist.txt @@ -6,6 +6,38 @@ scenario: - joid tests: - NovaServers.boot_server_from_volume_and_delete + - + scenarios: + - '^os-' # all scenarios + installers: + - '.+' # all installers + tests: + # Following tests currently fail due to required Gnocchi API: + # HTTP 410: "This telemetry installation is configured to use + # Gnocchi. Please use the Gnocchi API available on the + # metric endpoint to retrieve data." + # Issue: https://bugs.launchpad.net/rally/+bug/1704322 + - CeilometerMeters.list_matched_meters + - CeilometerMeters.list_meters + - CeilometerQueries.create_and_query_samples + - CeilometerResource.get_tenant_resources + - CeilometerResource.list_matched_resources + - CeilometerResource.list_resources + - CeilometerSamples.list_matched_samples + - CeilometerSamples.list_samples + - CeilometerStats.create_meter_and_get_stats + - CeilometerStats.get_stats + - + scenarios: + - '^os-' # all scenarios + installers: + - '.+' # all installers + tests: + # Following test currently fails due to but in + # python-ceilometerclient during fetching of event_types + # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138 + # Fix: https://review.openstack.org/#/c/483402/ + - CeilometerEvents.create_user_and_list_event_types functionality: - diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py index 6b7c49ca..fdef8bed 100644 --- a/functest/opnfv_tests/openstack/rally/rally.py +++ b/functest/opnfv_tests/openstack/rally/rally.py @@ -34,8 +34,8 @@ LOGGER = logging.getLogger(__name__) class RallyBase(testcase.OSGCTestCase): """Base class form Rally testcases implementation.""" - TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', - 'neutron', 'nova', 'quotas', 'vm', 'all'] + TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat', + 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all'] GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name') GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name') GLANCE_IMAGE_PATH = os.path.join( diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml new file mode 100644 index 00000000..7efb5a83 --- /dev/null +++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml @@ -0,0 +1,458 @@ + CeilometerMeters.list_meters: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerResource.list_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_alarm_and_get_history: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + state: "ok" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_delete_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_get_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_list_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarm_history: + - + args: + orderby: !!null + limit: !!null + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarms: + - + args: + filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} + orderby: !!null + limit: 10 + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_samples: + - + args: + filter: {"=": {"counter_unit": "instance"}} + orderby: !!null + limit: 10 + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resource_id: "resource_id" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_update_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.create_meter_and_get_stats: + - + args: + user_id: "user-id" + resource_id: "resource-id" + counter_volume: 1.0 + counter_unit: "" + counter_type: "cumulative" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_get_event: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_events: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_event_types: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_trait_descriptions: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_traits: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.get_stats: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + meter_name: "benchmark_meter" + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + period: 300 + groupby: "resource_id" + sla: + {{ no_failures_sla() }} + + CeilometerResource.get_tenant_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_volume: 1.0 + counter_unit: "instance" + {% endcall %} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.list_alarms: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerSamples.list_matched_samples: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + metadata_list: + - status: "active" + name: "fake_resource" + deleted: "False" + created_at: "2015-09-04T12:34:19.000000" + - status: "not_active" + name: "fake_resource_1" + deleted: "False" + created_at: "2015-09-10T06:55:12.000000" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "not_active" + sla: + {{ no_failures_sla() }} + + CeilometerMeters.list_matched_meters: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerResource.list_matched_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerSamples.list_samples: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + metadata_list: + - status: "active" + name: "fake_resource" + deleted: "False" + created_at: "2015-09-04T12:34:19.000000" + - status: "not_active" + name: "fake_resource_1" + deleted: "False" + created_at: "2015-09-10T06:55:12.000000" + batch_size: 5 + {% endcall %} + args: + limit: 50 + metadata_query: + status: "not_active" + sla: + {{ no_failures_sla() }} + diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml new file mode 100644 index 00000000..bb070cd3 --- /dev/null +++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml @@ -0,0 +1,247 @@ + CeilometerAlarms.create_alarm_and_get_history: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + state: "ok" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_delete_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_get_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_list_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarm_history: + - + args: + orderby: !!null + limit: !!null + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarms: + - + args: + filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} + orderby: !!null + limit: 10 + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_samples: + - + args: + filter: {"=": {"counter_unit": "instance"}} + orderby: !!null + limit: 10 + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resource_id: "resource_id" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_update_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_get_event: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_events: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_event_types: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_trait_descriptions: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_traits: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.get_stats: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + meter_name: "benchmark_meter" + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + period: 300 + groupby: "resource_id" + sla: + {{ no_failures_sla() }} + + CeilometerResource.get_tenant_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_volume: 1.0 + counter_unit: "instance" + {% endcall %} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.list_alarms: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml index 033edb83..65f101fb 100644 --- a/functest/opnfv_tests/openstack/rally/task.yaml +++ b/functest/opnfv_tests/openstack/rally/task.yaml @@ -31,6 +31,10 @@ {%- include "var/opnfv-neutron.yaml"-%} {% endif %} +{% if "ceilometer" in service_list %} +{%- include "var/opnfv-ceilometer.yaml"-%} +{% endif %} + {% if "quotas" in service_list %} {%- include "var/opnfv-quotas.yaml"-%} {% endif %} diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py index 6ac72176..4f71b5f5 100644 --- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py +++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py @@ -28,12 +28,13 @@ from functest.opnfv_tests.openstack.refstack_client.tempest_conf \ from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_utils as os_utils # logging configuration """ LOGGER = logging.getLogger(__name__) -class RefstackClient(testcase.OSGCTestCase): +class RefstackClient(testcase.TestCase): """RefstackClient testcase implementation class.""" def __init__(self, **kwargs): @@ -41,6 +42,7 @@ class RefstackClient(testcase.OSGCTestCase): if "case_name" not in kwargs: kwargs["case_name"] = "refstack_defcore" super(RefstackClient, self).__init__(**kwargs) + self.tempestconf = None self.conf_path = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf') @@ -57,6 +59,13 @@ class RefstackClient(testcase.OSGCTestCase): CONST.__getattribute__('OS_INSECURE').lower() == 'true'): self.insecure = '-k' + def generate_conf(self): + if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR): + os.makedirs(conf_utils.REFSTACK_RESULTS_DIR) + + self.tempestconf = TempestConf() + self.tempestconf.generate_tempestconf() + def run_defcore(self, conf, testlist): """Run defcore sys command.""" cmd = ("refstack-client test {0} -c {1} -v --test-list {2}" @@ -65,42 +74,29 @@ class RefstackClient(testcase.OSGCTestCase): ft_utils.execute_command(cmd) def run_defcore_default(self): - """Run default defcare sys command.""" - cmd = ("refstack-client test {0} -c {1} -v --test-list {2}" - .format(self.insecure, self.confpath, self.defcorelist)) + """Run default defcore sys command.""" + options = ["-v"] if not self.insecure else ["-v", self.insecure] + cmd = (["refstack-client", "test", "-c", self.confpath] + + options + ["--test-list", self.defcorelist]) LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd) - header = ("Refstack environment:\n" - " SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" % - (CONST.__getattribute__('INSTALLER_TYPE'), - CONST.__getattribute__('DEPLOY_SCENARIO'), - CONST.__getattribute__('NODE_NAME'), - time.strftime("%a %b %d %H:%M:%S %Z %Y"))) - - f_stdout = open( - os.path.join(conf_utils.REFSTACK_RESULTS_DIR, - "refstack.log"), 'w+') - f_env = open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, - "environment.log"), 'w+') - f_env.write(header) - - process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, bufsize=1) - - with process.stdout: - for line in iter(process.stdout.readline, b''): - if 'Tests' in line: - break - if re.search(r"\} tempest\.", line): - LOGGER.info(line.replace('\n', '')) - f_stdout.write(line) - process.wait() - - f_stdout.close() - f_env.close() + with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "environment.log"), 'w+') as f_env: + f_env.write( + ("Refstack environment:\n" + " SUT: {}\n Scenario: {}\n Node: {}\n Date: {}\n").format( + CONST.__getattribute__('INSTALLER_TYPE'), + CONST.__getattribute__('DEPLOY_SCENARIO'), + CONST.__getattribute__('NODE_NAME'), + time.strftime("%a %b %d %H:%M:%S %Z %Y"))) + + with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "refstack.log"), 'w+') as f_stdout: + subprocess.call(cmd, shell=False, stdout=f_stdout, + stderr=subprocess.STDOUT) def parse_refstack_result(self): - """Parse Refstact results.""" + """Parse Refstack results.""" try: with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, "refstack.log"), 'r') as logfile: @@ -119,15 +115,15 @@ class RefstackClient(testcase.OSGCTestCase): for match in re.findall(r"(- Failed: )(\d+)", output): num_failures = match[1] LOGGER.info("".join(match)) - success_testcases = "" - for match in re.findall(r"\{0\}(.*?)[. ]*ok", output): - success_testcases += match + ", " - failed_testcases = "" - for match in re.findall(r"\{0\}(.*?)[. ]*FAILED", output): - failed_testcases += match + ", " - skipped_testcases = "" - for match in re.findall(r"\{0\}(.*?)[. ]*SKIPPED:", output): - skipped_testcases += match + ", " + success_testcases = [] + for match in re.findall(r"\{0\} (.*?)[. ]*ok", output): + success_testcases.append(match) + failed_testcases = [] + for match in re.findall(r"\{0\} (.*?)[. ]*FAILED", output): + failed_testcases.append(match) + skipped_testcases = [] + for match in re.findall(r"\{0\} (.*?)[. ]*SKIPPED:", output): + skipped_testcases.append(match) num_executed = int(num_tests) - int(num_skipped) @@ -157,18 +153,18 @@ class RefstackClient(testcase.OSGCTestCase): """ self.start_time = time.time() - if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR): - os.makedirs(conf_utils.REFSTACK_RESULTS_DIR) - try: - tempestconf = TempestConf() - tempestconf.generate_tempestconf() + # Make sure that Tempest is configured + if not self.tempestconf: + self.generate_conf() self.run_defcore_default() self.parse_refstack_result() res = testcase.TestCase.EX_OK except Exception: LOGGER.exception("Error with run") res = testcase.TestCase.EX_RUN_ERROR + finally: + self.tempestconf.clean() self.stop_time = time.time() return res @@ -207,6 +203,42 @@ class RefstackClient(testcase.OSGCTestCase): return res + def create_snapshot(self): + """ + Run the Tempest cleanup utility to initialize OS state. + For details, see https://docs.openstack.org/tempest/latest/cleanup.html + + :return: TestCase.EX_OK + """ + LOGGER.info("Initializing the saved state of the OpenStack deployment") + + # Make sure that Tempest is configured + if not self.tempestconf: + self.generate_conf() + + os_utils.init_tempest_cleanup( + self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "tempest-cleanup-init.log") + ) + + return super(RefstackClient, self).create_snapshot() + + def clean(self): + """ + Run the Tempest cleanup utility to delete and destroy OS resources. + For details, see https://docs.openstack.org/tempest/latest/cleanup.html + """ + LOGGER.info("Destroying the resources created for tempest") + + os_utils.perform_tempest_cleanup( + self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "tempest-cleanup.log") + ) + + return super(RefstackClient, self).clean() + class RefstackClientParser(object): # pylint: disable=too-few-public-methods """Command line argument parser helper.""" diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py index 30590b9e..db745227 100644 --- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py +++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py @@ -11,13 +11,15 @@ import pkg_resources from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils import openstack_utils from functest.utils.constants import CONST +from functest.opnfv_tests.openstack.tempest.tempest \ + import TempestResourcesManager """ logging configuration """ logger = logging.getLogger(__name__) class TempestConf(object): - def __init__(self): + def __init__(self, **kwargs): self.VERIFIER_ID = conf_utils.get_verifier_id() self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir( self.VERIFIER_ID) @@ -27,15 +29,22 @@ class TempestConf(object): self.confpath = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf') + self.resources = TempestResourcesManager(**kwargs) def generate_tempestconf(self): try: openstack_utils.source_credentials( CONST.__getattribute__('openstack_creds')) - img_flavor_dict = conf_utils.create_tempest_resources( - use_custom_images=True, use_custom_flavors=True) + resources = self.resources.create(create_project=True, + use_custom_images=True, + use_custom_flavors=True) conf_utils.configure_tempest_defcore( - self.DEPLOYMENT_DIR, img_flavor_dict) + self.DEPLOYMENT_DIR, + image_id=resources.get("image_id"), + flavor_id=resources.get("flavor_id"), + image_id_alt=resources.get("image_id_alt"), + flavor_id_alt=resources.get("flavor_id_alt"), + tenant_id=resources.get("project_id")) except Exception as e: logger.error("error with generating refstack client " "reference tempest conf file: %s", e) @@ -48,6 +57,9 @@ class TempestConf(object): except Exception as e: logger.error('Error with run: %s', e) + def clean(self): + self.resources.cleanup() + def main(): logging.basicConfig() diff --git a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py index 0b87440b..19c6a87f 100644 --- a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py +++ b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py @@ -28,9 +28,14 @@ class SnapsTestRunner(unit.Suite): if 'os_creds' in kwargs: self.os_creds = kwargs['os_creds'] else: + creds_override = None + if hasattr(CONST, 'snaps_os_creds_override'): + creds_override = CONST.__getattribute__( + 'snaps_os_creds_override') self.os_creds = openstack_tests.get_credentials( os_env_file=CONST.__getattribute__('openstack_creds'), - proxy_settings_str=None, ssh_proxy_cmd=None) + proxy_settings_str=None, ssh_proxy_cmd=None, + overrides=creds_override) if 'ext_net_name' in kwargs: self.ext_net_name = kwargs['ext_net_name'] diff --git a/functest/opnfv_tests/openstack/snaps/snaps_utils.py b/functest/opnfv_tests/openstack/snaps/snaps_utils.py index 327ba073..309f9db1 100644 --- a/functest/opnfv_tests/openstack/snaps/snaps_utils.py +++ b/functest/opnfv_tests/openstack/snaps/snaps_utils.py @@ -16,4 +16,4 @@ def get_ext_net_name(os_creds): """ neutron = neutron_utils.neutron_client(os_creds) ext_nets = neutron_utils.get_external_networks(neutron) - return ext_nets[0]['network']['name'] + return ext_nets[0].name diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py index fa8f00fc..52fa6003 100644 --- a/functest/opnfv_tests/openstack/tempest/conf_utils.py +++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py @@ -11,10 +11,11 @@ import ConfigParser import logging import os import pkg_resources -import re import shutil import subprocess +import yaml + from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils import functest.utils.openstack_utils as os_utils @@ -28,16 +29,21 @@ GLANCE_IMAGE_PATH = os.path.join( TEMPEST_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'tempest') TEMPEST_CUSTOM = pkg_resources.resource_filename( - 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt') + 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt') TEMPEST_BLACKLIST = pkg_resources.resource_filename( - 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt') + 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt') TEMPEST_DEFCORE = pkg_resources.resource_filename( - 'functest', - 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt') + 'functest', + 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt') TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt') TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt') REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'refstack') +TEMPEST_CONF_YAML = pkg_resources.resource_filename( + 'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml') +TEST_ACCOUNTS_FILE = pkg_resources.resource_filename( + 'functest', + 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml') CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE') CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP') @@ -46,96 +52,9 @@ CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP') logger = logging.getLogger(__name__) -def create_tempest_resources(use_custom_images=False, - use_custom_flavors=False): - keystone_client = os_utils.get_keystone_client() - - logger.debug("Creating tenant and user for Tempest suite") - tenant_id = os_utils.create_tenant( - keystone_client, - CONST.__getattribute__('tempest_identity_tenant_name'), - CONST.__getattribute__('tempest_identity_tenant_description')) - if not tenant_id: - logger.error("Failed to create %s tenant" - % CONST.__getattribute__('tempest_identity_tenant_name')) - - user_id = os_utils.create_user( - keystone_client, - CONST.__getattribute__('tempest_identity_user_name'), - CONST.__getattribute__('tempest_identity_user_password'), - None, tenant_id) - if not user_id: - logger.error("Failed to create %s user" % - CONST.__getattribute__('tempest_identity_user_name')) - - logger.debug("Creating private network for Tempest suite") - network_dic = os_utils.create_shared_network_full( - CONST.__getattribute__('tempest_private_net_name'), - CONST.__getattribute__('tempest_private_subnet_name'), - CONST.__getattribute__('tempest_router_name'), - CONST.__getattribute__('tempest_private_subnet_cidr')) - if network_dic is None: - raise Exception('Failed to create private network') - - image_id = "" - image_id_alt = "" - flavor_id = "" - flavor_id_alt = "" - - if (CONST.__getattribute__('tempest_use_custom_images') or - use_custom_images): - # adding alternative image should be trivial should we need it - logger.debug("Creating image for Tempest suite") - _, image_id = os_utils.get_or_create_image( - CONST.__getattribute__('openstack_image_name'), - GLANCE_IMAGE_PATH, - CONST.__getattribute__('openstack_image_disk_format')) - if image_id is None: - raise Exception('Failed to create image') - - if use_custom_images: - logger.debug("Creating 2nd image for Tempest suite") - _, image_id_alt = os_utils.get_or_create_image( - CONST.__getattribute__('openstack_image_name_alt'), - GLANCE_IMAGE_PATH, - CONST.__getattribute__('openstack_image_disk_format')) - if image_id_alt is None: - raise Exception('Failed to create image') - - if (CONST.__getattribute__('tempest_use_custom_flavors') or - use_custom_flavors): - # adding alternative flavor should be trivial should we need it - logger.debug("Creating flavor for Tempest suite") - _, flavor_id = os_utils.get_or_create_flavor( - CONST.__getattribute__('openstack_flavor_name'), - CONST.__getattribute__('openstack_flavor_ram'), - CONST.__getattribute__('openstack_flavor_disk'), - CONST.__getattribute__('openstack_flavor_vcpus')) - if flavor_id is None: - raise Exception('Failed to create flavor') - - if use_custom_flavors: - logger.debug("Creating 2nd flavor for tempest_defcore") - _, flavor_id_alt = os_utils.get_or_create_flavor( - CONST.__getattribute__('openstack_flavor_name_alt'), - CONST.__getattribute__('openstack_flavor_ram'), - CONST.__getattribute__('openstack_flavor_disk'), - CONST.__getattribute__('openstack_flavor_vcpus')) - if flavor_id_alt is None: - raise Exception('Failed to create flavor') - - img_flavor_dict = {} - img_flavor_dict['image_id'] = image_id - img_flavor_dict['image_id_alt'] = image_id_alt - img_flavor_dict['flavor_id'] = flavor_id - img_flavor_dict['flavor_id_alt'] = flavor_id_alt - - return img_flavor_dict - - def get_verifier_id(): """ - Returns verifer id for current Tempest + Returns verifier id for current Tempest """ cmd = ("rally verify list-verifiers | awk '/" + CONST.__getattribute__('tempest_deployment_name') + @@ -169,7 +88,7 @@ def get_verifier_deployment_id(): def get_verifier_repo_dir(verifier_id): """ - Returns installed verfier repo directory for Tempest + Returns installed verifier repo directory for Tempest """ if not verifier_id: verifier_id = get_verifier_id() @@ -211,44 +130,42 @@ def backup_tempest_config(conf_file): """ Copy config file to tempest results directory """ - if not os.path.exists(TEMPEST_RESULTS_DIR): - os.makedirs(TEMPEST_RESULTS_DIR) - shutil.copyfile(conf_file, os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf')) -def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None, - MODE=None): +def configure_tempest(deployment_dir, image_id=None, flavor_id=None, + mode=None): """ Calls rally verify and updates the generated tempest.conf with given parameters """ conf_file = configure_verifier(deployment_dir) - configure_tempest_update_params(conf_file, - IMAGE_ID, FLAVOR_ID) - if MODE == 'feature_multisite': - configure_tempest_multisite_params(conf_file) + configure_tempest_update_params(conf_file, image_id, flavor_id) -def configure_tempest_defcore(deployment_dir, img_flavor_dict): +def configure_tempest_defcore(deployment_dir, image_id, flavor_id, + image_id_alt, flavor_id_alt, tenant_id): """ Add/update needed parameters into tempest.conf file """ conf_file = configure_verifier(deployment_dir) - configure_tempest_update_params(conf_file, - img_flavor_dict.get("image_id"), - img_flavor_dict.get("flavor_id")) + configure_tempest_update_params(conf_file, image_id, flavor_id) logger.debug("Updating selected tempest.conf parameters for defcore...") config = ConfigParser.RawConfigParser() config.read(conf_file) - config.set('compute', 'image_ref', img_flavor_dict.get("image_id")) - config.set('compute', 'image_ref_alt', - img_flavor_dict['image_id_alt']) - config.set('compute', 'flavor_ref', img_flavor_dict.get("flavor_id")) - config.set('compute', 'flavor_ref_alt', - img_flavor_dict['flavor_id_alt']) + config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir)) + config.set('oslo_concurrency', 'lock_path', + '{}/lock_files'.format(deployment_dir)) + generate_test_accounts_file(tenant_id=tenant_id) + config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE) + config.set('scenario', 'img_dir', '{}'.format(deployment_dir)) + config.set('scenario', 'img_file', 'tempest-image') + config.set('compute', 'image_ref', image_id) + config.set('compute', 'image_ref_alt', image_id_alt) + config.set('compute', 'flavor_ref', flavor_id) + config.set('compute', 'flavor_ref_alt', flavor_id_alt) with open(conf_file, 'wb') as config_file: config.write(config_file) @@ -259,8 +176,29 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict): shutil.copyfile(conf_file, confpath) +def generate_test_accounts_file(tenant_id): + """ + Add needed tenant and user params into test_accounts.yaml + """ + + logger.debug("Add needed params into test_accounts.yaml...") + accounts_list = [ + { + 'tenant_name': + CONST.__getattribute__('tempest_identity_tenant_name'), + 'tenant_id': str(tenant_id), + 'username': CONST.__getattribute__('tempest_identity_user_name'), + 'password': + CONST.__getattribute__('tempest_identity_user_password') + } + ] + + with open(TEST_ACCOUNTS_FILE, "w") as f: + yaml.dump(accounts_list, f, default_flow_style=False) + + def configure_tempest_update_params(tempest_conf_file, - IMAGE_ID=None, FLAVOR_ID=None): + image_id=None, flavor_id=None): """ Add/update needed parameters into tempest.conf file """ @@ -274,21 +212,15 @@ def configure_tempest_update_params(tempest_conf_file, config.set('compute', 'volume_device_name', CONST.__getattribute__('tempest_volume_device_name')) if CONST.__getattribute__('tempest_use_custom_images'): - if IMAGE_ID is not None: - config.set('compute', 'image_ref', IMAGE_ID) + if image_id is not None: + config.set('compute', 'image_ref', image_id) if IMAGE_ID_ALT is not None: config.set('compute', 'image_ref_alt', IMAGE_ID_ALT) if CONST.__getattribute__('tempest_use_custom_flavors'): - if FLAVOR_ID is not None: - config.set('compute', 'flavor_ref', FLAVOR_ID) + if flavor_id is not None: + config.set('compute', 'flavor_ref', flavor_id) if FLAVOR_ID_ALT is not None: config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT) - config.set('identity', 'tenant_name', - CONST.__getattribute__('tempest_identity_tenant_name')) - config.set('identity', 'username', - CONST.__getattribute__('tempest_identity_user_name')) - config.set('identity', 'password', - CONST.__getattribute__('tempest_identity_user_password')) config.set('identity', 'region', 'RegionOne') if os_utils.is_keystone_v3(): auth_version = 'v3' @@ -323,6 +255,19 @@ def configure_tempest_update_params(tempest_conf_file, config.set(service, 'endpoint_type', CONST.__getattribute__('OS_ENDPOINT_TYPE')) + logger.debug('Add/Update required params defined in tempest_conf.yaml ' + 'into tempest.conf file') + with open(TEMPEST_CONF_YAML) as f: + conf_yaml = yaml.safe_load(f) + if conf_yaml: + sections = config.sections() + for section in conf_yaml: + if section not in sections: + config.add_section(section) + sub_conf = conf_yaml.get(section) + for key, value in sub_conf.items(): + config.set(section, key, value) + with open(tempest_conf_file, 'wb') as config_file: config.write(config_file) @@ -351,93 +296,3 @@ def configure_verifier(deployment_dir): % tempest_conf_file) else: return tempest_conf_file - - -def configure_tempest_multisite_params(tempest_conf_file): - """ - Add/update multisite parameters into tempest.conf file generated by Rally - """ - logger.debug("Updating multisite tempest.conf parameters...") - config = ConfigParser.RawConfigParser() - config.read(tempest_conf_file) - - config.set('service_available', 'kingbird', 'true') - # cmd = ("openstack endpoint show kingbird | grep publicurl |" - # "awk '{print $4}' | awk -F '/' '{print $4}'") - # kingbird_api_version = os.popen(cmd).read() - # kingbird_api_version = os_utils.get_endpoint(service_type='multisite') - - if CI_INSTALLER_TYPE == 'fuel': - # For MOS based setup, the service is accessible - # via bind host - kingbird_conf_path = "/etc/kingbird/kingbird.conf" - installer_type = CI_INSTALLER_TYPE - installer_ip = CI_INSTALLER_IP - installer_username = CONST.__getattribute__( - 'multisite_{}_installer_username'.format(installer_type)) - installer_password = CONST.__getattribute__( - 'multisite_{}_installer_password'.format(installer_type)) - - ssh_options = ("-o UserKnownHostsFile=/dev/null -o " - "StrictHostKeyChecking=no") - - # Get the controller IP from the fuel node - cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s ' - '\'fuel node --env 1| grep controller | grep "True\| 1" ' - '| awk -F\| "{print \$5}"\'' % (installer_password, - ssh_options, - installer_username, - installer_ip)) - multisite_controller_ip = "".join(os.popen(cmd).read().split()) - - # Login to controller and get bind host details - cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" ' - 'grep -e "^bind_" %s \\""' % (installer_password, - ssh_options, - installer_username, - installer_ip, - multisite_controller_ip, - kingbird_conf_path)) - bind_details = os.popen(cmd).read() - bind_details = "".join(bind_details.split()) - # Extract port number from the bind details - bind_port = re.findall(r"\D(\d{4})", bind_details)[0] - # Extract ip address from the bind details - bind_host = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", - bind_details)[0] - kingbird_endpoint_url = "http://%s:%s/" % (bind_host, bind_port) - else: - # cmd = "openstack endpoint show kingbird | grep publicurl |\ - # awk '{print $4}' | awk -F '/' '{print $3}'" - # kingbird_endpoint_url = os.popen(cmd).read() - kingbird_endpoint_url = os_utils.get_endpoint(service_type='kingbird') - - try: - config.add_section("kingbird") - except Exception: - logger.info('kingbird section exist') - - # set the domain id - config.set('auth', 'admin_domain_name', 'default') - - config.set('kingbird', 'endpoint_type', 'publicURL') - config.set('kingbird', 'TIME_TO_SYNC', '120') - config.set('kingbird', 'endpoint_url', kingbird_endpoint_url) - config.set('kingbird', 'api_version', 'v1.0') - with open(tempest_conf_file, 'wb') as config_file: - config.write(config_file) - - backup_tempest_config(tempest_conf_file) - - -def install_verifier_ext(path): - """ - Install extension to active verifier - """ - logger.info("Installing verifier from existing repo...") - tag = get_repo_tag(path) - cmd = ("rally verify add-verifier-ext --source {0} " - "--version {1}" - .format(path, tag)) - error_msg = ("Problem while adding verifier extension from %s" % path) - ft_utils.execute_command_raise(cmd, error_msg=error_msg) diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml new file mode 100644 index 00000000..b47a9736 --- /dev/null +++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml @@ -0,0 +1,13 @@ +# This is an empty configuration file to be filled up with the desired options +# to generate a custom tempest.conf +# Examples: +# network-feature-enabled: +# port_security: True + +# volume-feature-enabled: +# api_v1: False + +# validation: +# image_ssh_user: root +# ssh_timeout: 300 + diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt index ac4e3728..df2c3126 100644 --- a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt +++ b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt @@ -1,4 +1,4 @@ # This is an empty file to be filled up with the desired tempest test cases # Examples: -#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops -#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
\ No newline at end of file +#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke] +#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke] diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py index e565f5f9..c7ad4df2 100644 --- a/functest/opnfv_tests/openstack/tempest/tempest.py +++ b/functest/opnfv_tests/openstack/tempest/tempest.py @@ -12,7 +12,6 @@ from __future__ import division import logging import os -import pkg_resources import re import shutil import subprocess @@ -24,15 +23,26 @@ from functest.core import testcase from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_utils as os_utils + +from snaps.openstack import create_flavor +from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor +from snaps.openstack.create_project import ProjectSettings +from snaps.openstack.create_network import NetworkSettings, SubnetSettings +from snaps.openstack.create_user import UserSettings +from snaps.openstack.tests import openstack_tests +from snaps.openstack.utils import deploy_utils + """ logging configuration """ logger = logging.getLogger(__name__) -class TempestCommon(testcase.OSGCTestCase): +class TempestCommon(testcase.TestCase): def __init__(self, **kwargs): super(TempestCommon, self).__init__(**kwargs) + self.resources = TempestResourcesManager(**kwargs) self.MODE = "" self.OPTION = "" self.VERIFIER_ID = conf_utils.get_verifier_id() @@ -63,8 +73,6 @@ class TempestCommon(testcase.OSGCTestCase): else: if self.MODE == 'smoke': testr_mode = "smoke" - elif self.MODE == 'feature_multisite': - testr_mode = "'[Kk]ingbird'" elif self.MODE == 'full': testr_mode = "" else: @@ -187,25 +195,32 @@ class TempestCommon(testcase.OSGCTestCase): try: self.result = 100 * int(num_success) / int(num_executed) except ZeroDivisionError: - logger.error("No test has been executed") self.result = 0 - return + if int(num_tests) > 0: + logger.info("All tests have been skipped") + else: + logger.error("No test has been executed") + return with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR, "tempest.log"), 'r') as logfile: output = logfile.read() - error_logs = "" - for match in re.findall('(.*?)[. ]*fail ', output): - error_logs += match - skipped_testcase = "" - for match in re.findall('(.*?)[. ]*skip:', output): - skipped_testcase += match + success_testcases = [] + for match in re.findall('.*\{0\} (.*?)[. ]*success ', output): + success_testcases.append(match) + failed_testcases = [] + for match in re.findall('.*\{0\} (.*?)[. ]*fail ', output): + failed_testcases.append(match) + skipped_testcases = [] + for match in re.findall('.*\{0\} (.*?)[. ]*skip:', output): + skipped_testcases.append(match) self.details = {"tests": int(num_tests), "failures": int(num_failures), - "errors": error_logs, - "skipped": skipped_testcase} + "success": success_testcases, + "errors": failed_testcases, + "skipped": skipped_testcases} except Exception: self.result = 0 @@ -218,12 +233,12 @@ class TempestCommon(testcase.OSGCTestCase): try: if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR): os.makedirs(conf_utils.TEMPEST_RESULTS_DIR) - image_and_flavor = conf_utils.create_tempest_resources() + resources = self.resources.create() conf_utils.configure_tempest( self.DEPLOYMENT_DIR, - IMAGE_ID=image_and_flavor.get("image_id"), - FLAVOR_ID=image_and_flavor.get("flavor_id"), - MODE=self.MODE) + image_id=resources.get("image_id"), + flavor_id=resources.get("flavor_id"), + mode=self.MODE) self.generate_test_list(self.VERIFIER_REPO_DIR) self.apply_tempest_blacklist() self.run_verifier_tests() @@ -232,10 +247,49 @@ class TempestCommon(testcase.OSGCTestCase): except Exception as e: logger.error('Error with run: %s' % e) res = testcase.TestCase.EX_RUN_ERROR + finally: + self.resources.cleanup() self.stop_time = time.time() return res + def create_snapshot(self): + """ + Run the Tempest cleanup utility to initialize OS state. + + :return: TestCase.EX_OK + """ + logger.info("Initializing the saved state of the OpenStack deployment") + + if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR): + os.makedirs(conf_utils.TEMPEST_RESULTS_DIR) + + # Make sure that the verifier is configured + conf_utils.configure_verifier(self.DEPLOYMENT_DIR) + + os_utils.init_tempest_cleanup( + self.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.TEMPEST_RESULTS_DIR, + "tempest-cleanup-init.log") + ) + + return super(TempestCommon, self).create_snapshot() + + def clean(self): + """ + Run the Tempest cleanup utility to delete and destroy OS resources + created by Tempest. + """ + logger.info("Destroying the resources created for refstack") + + os_utils.perform_tempest_cleanup( + self.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.TEMPEST_RESULTS_DIR, + "tempest-cleanup.log") + ) + + return super(TempestCommon, self).clean() + class TempestSmokeSerial(TempestCommon): @@ -266,18 +320,6 @@ class TempestFullParallel(TempestCommon): self.MODE = "full" -class TempestMultisite(TempestCommon): - - def __init__(self, **kwargs): - if "case_name" not in kwargs: - kwargs["case_name"] = 'multisite' - TempestCommon.__init__(self, **kwargs) - self.MODE = "feature_multisite" - self.OPTION = "--concurrency 1" - conf_utils.install_verifier_ext( - pkg_resources.resource_filename('kingbird', '..')) - - class TempestCustom(TempestCommon): def __init__(self, **kwargs): @@ -296,3 +338,170 @@ class TempestDefcore(TempestCommon): TempestCommon.__init__(self, **kwargs) self.MODE = "defcore" self.OPTION = "--concurrency 1" + + +class TempestResourcesManager(object): + + def __init__(self, **kwargs): + self.os_creds = None + if 'os_creds' in kwargs: + self.os_creds = kwargs['os_creds'] + else: + self.os_creds = openstack_tests.get_credentials( + os_env_file=CONST.__getattribute__('openstack_creds')) + + self.creators = list() + + if hasattr(CONST, 'snaps_images_cirros'): + self.cirros_image_config = CONST.__getattribute__( + 'snaps_images_cirros') + else: + self.cirros_image_config = None + + def create(self, use_custom_images=False, use_custom_flavors=False, + create_project=False): + if create_project: + logger.debug("Creating project (tenant) for Tempest suite") + project_name = CONST.__getattribute__( + 'tempest_identity_tenant_name') + project_creator = deploy_utils.create_project( + self.os_creds, ProjectSettings( + name=project_name, + description=CONST.__getattribute__( + 'tempest_identity_tenant_description'))) + if (project_creator is None or + project_creator.get_project() is None): + raise Exception("Failed to create tenant") + project_id = project_creator.get_project().id + self.creators.append(project_creator) + + logger.debug("Creating user for Tempest suite") + user_creator = deploy_utils.create_user( + self.os_creds, UserSettings( + name=CONST.__getattribute__('tempest_identity_user_name'), + password=CONST.__getattribute__( + 'tempest_identity_user_password'), + project_name=project_name)) + if user_creator is None or user_creator.get_user() is None: + raise Exception("Failed to create user") + user_id = user_creator.get_user().id + self.creators.append(user_creator) + else: + project_name = None + project_id = None + user_id = None + + logger.debug("Creating private network for Tempest suite") + network_creator = deploy_utils.create_network( + self.os_creds, NetworkSettings( + name=CONST.__getattribute__('tempest_private_net_name'), + project_name=project_name, + subnet_settings=[SubnetSettings( + name=CONST.__getattribute__('tempest_private_subnet_name'), + cidr=CONST.__getattribute__('tempest_private_subnet_cidr')) + ])) + if network_creator is None or network_creator.get_network() is None: + raise Exception("Failed to create private network") + self.creators.append(network_creator) + + image_id = None + image_id_alt = None + flavor_id = None + flavor_id_alt = None + + if (CONST.__getattribute__('tempest_use_custom_images') or + use_custom_images): + logger.debug("Creating image for Tempest suite") + image_base_name = CONST.__getattribute__('openstack_image_name') + os_image_settings = openstack_tests.cirros_image_settings( + image_base_name, public=True, + image_metadata=self.cirros_image_config) + logger.debug("Creating image for Tempest suite") + image_creator = deploy_utils.create_image( + self.os_creds, os_image_settings) + if image_creator is None: + raise Exception('Failed to create image') + self.creators.append(image_creator) + image_id = image_creator.get_image().id + + if use_custom_images: + logger.debug("Creating 2nd image for Tempest suite") + image_base_name_alt = CONST.__getattribute__( + 'openstack_image_name_alt') + os_image_settings_alt = openstack_tests.cirros_image_settings( + image_base_name_alt, public=True, + image_metadata=self.cirros_image_config) + logger.debug("Creating 2nd image for Tempest suite") + image_creator_alt = deploy_utils.create_image( + self.os_creds, os_image_settings_alt) + if image_creator_alt is None: + raise Exception('Failed to create image') + self.creators.append(image_creator_alt) + image_id_alt = image_creator_alt.get_image().id + + if (CONST.__getattribute__('tempest_use_custom_flavors') or + use_custom_flavors): + logger.info("Creating flavor for Tempest suite") + scenario = ft_utils.get_scenario() + flavor_metadata = None + if 'ovs' in scenario or 'fdio' in scenario: + flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE + flavor_creator = OpenStackFlavor( + self.os_creds, FlavorSettings( + name=CONST.__getattribute__('openstack_flavor_name'), + ram=CONST.__getattribute__('openstack_flavor_ram'), + disk=CONST.__getattribute__('openstack_flavor_disk'), + vcpus=CONST.__getattribute__('openstack_flavor_vcpus'), + metadata=flavor_metadata)) + flavor = flavor_creator.create() + if flavor is None: + raise Exception('Failed to create flavor') + self.creators.append(flavor_creator) + flavor_id = flavor.id + + if use_custom_flavors: + logger.info("Creating 2nd flavor for Tempest suite") + scenario = ft_utils.get_scenario() + flavor_metadata_alt = None + if 'ovs' in scenario or 'fdio' in scenario: + flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE + flavor_creator_alt = OpenStackFlavor( + self.os_creds, FlavorSettings( + name=CONST.__getattribute__('openstack_flavor_name_alt'), + ram=CONST.__getattribute__('openstack_flavor_ram'), + disk=CONST.__getattribute__('openstack_flavor_disk'), + vcpus=CONST.__getattribute__('openstack_flavor_vcpus'), + metadata=flavor_metadata_alt)) + flavor_alt = flavor_creator_alt.create() + if flavor_alt is None: + raise Exception('Failed to create flavor') + self.creators.append(flavor_creator_alt) + flavor_id_alt = flavor_alt.id + + print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, " + "flavor_id: %s, flavor_id_alt: %s" % ( + image_id, image_id_alt, flavor_id, flavor_id_alt,)) + + result = { + 'image_id': image_id, + 'image_id_alt': image_id_alt, + 'flavor_id': flavor_id, + 'flavor_id_alt': flavor_id_alt + } + + if create_project: + result['project_id'] = project_id + result['tenant_id'] = project_id # for compatibility + result['user_id'] = user_id + + return result + + def cleanup(self): + """ + Cleanup all OpenStack objects. Should be called on completion. + """ + for creator in reversed(self.creators): + try: + creator.clean() + except Exception as e: + logger.error('Unexpected error cleaning - %s', e) diff --git a/functest/opnfv_tests/openstack/vping/ping.sh b/functest/opnfv_tests/openstack/vping/ping.sh index 693b8682..15f5e84e 100644 --- a/functest/opnfv_tests/openstack/vping/ping.sh +++ b/functest/opnfv_tests/openstack/vping/ping.sh @@ -1,13 +1,10 @@ #!/bin/sh -while true; do - ping -c 1 $1 2>&1 >/dev/null - RES=$? - if [ "Z$RES" = "Z0" ] ; then - echo 'vPing OK' - break - else - echo 'vPing KO' - fi - sleep 1 -done
\ No newline at end of file + +ping -c 1 $1 2>&1 >/dev/null +RES=$? +if [ "Z$RES" = "Z0" ] ; then + echo 'vPing OK' +else + echo 'vPing KO' +fi diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py index 74fbce1b..40fcb07f 100644 --- a/functest/opnfv_tests/openstack/vping/vping_base.py +++ b/functest/opnfv_tests/openstack/vping/vping_base.py @@ -43,8 +43,14 @@ class VPingBase(testcase.TestCase): if 'os_creds' in kwargs: self.os_creds = kwargs['os_creds'] else: + creds_override = None + if hasattr(CONST, 'snaps_os_creds_override'): + creds_override = CONST.__getattribute__( + 'snaps_os_creds_override') + self.os_creds = openstack_tests.get_credentials( - os_env_file=CONST.__getattribute__('openstack_creds')) + os_env_file=CONST.__getattribute__('openstack_creds'), + overrides=creds_override) self.creators = list() self.image_creator = None @@ -102,14 +108,33 @@ class VPingBase(testcase.TestCase): 'vping_private_subnet_name') + self.guid private_subnet_cidr = CONST.__getattribute__( 'vping_private_subnet_cidr') + + vping_network_type = None + vping_physical_network = None + vping_segmentation_id = None + + if (hasattr(CONST, 'network_type')): + vping_network_type = CONST.__getattribute__( + 'vping_network_type') + if (hasattr(CONST, 'physical_network')): + vping_physical_network = CONST.__getattribute__( + 'vping_physical_network') + if (hasattr(CONST, 'segmentation_id')): + vping_segmentation_id = CONST.__getattribute__( + 'vping_segmentation_id') + self.logger.info( "Creating network with name: '%s'" % private_net_name) self.network_creator = deploy_utils.create_network( self.os_creds, - NetworkSettings(name=private_net_name, - subnet_settings=[SubnetSettings( - name=private_subnet_name, - cidr=private_subnet_cidr)])) + NetworkSettings( + name=private_net_name, + network_type=vping_network_type, + physical_network=vping_physical_network, + segmentation_id=vping_segmentation_id, + subnet_settings=[SubnetSettings( + name=private_subnet_name, + cidr=private_subnet_cidr)])) self.creators.append(self.network_creator) self.logger.info( diff --git a/functest/opnfv_tests/openstack/vping/vping_userdata.py b/functest/opnfv_tests/openstack/vping/vping_userdata.py index 9aed4c10..8088a4db 100644 --- a/functest/opnfv_tests/openstack/vping/vping_userdata.py +++ b/functest/opnfv_tests/openstack/vping/vping_userdata.py @@ -94,7 +94,7 @@ class VPingUserdata(vping_base.VPingBase): while True: time.sleep(1) - p_console = vm_creator.get_os_vm_server_obj().get_console_output() + p_console = vm_creator.get_console_output() if "vPing OK" in p_console: self.logger.info("vPing detected!") exit_code = TestCase.EX_OK |