aboutsummaryrefslogtreecommitdiffstats
path: root/functest/opnfv_tests/openstack
diff options
context:
space:
mode:
Diffstat (limited to 'functest/opnfv_tests/openstack')
-rw-r--r--functest/opnfv_tests/openstack/rally/blacklist.txt32
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py4
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml458
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml247
-rw-r--r--functest/opnfv_tests/openstack/rally/task.yaml4
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/refstack_client.py59
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/tempest_conf.py20
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py135
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py234
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py27
10 files changed, 1104 insertions, 116 deletions
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt
index 95bea2b7a..099d6864d 100644
--- a/functest/opnfv_tests/openstack/rally/blacklist.txt
+++ b/functest/opnfv_tests/openstack/rally/blacklist.txt
@@ -6,6 +6,38 @@ scenario:
- joid
tests:
- NovaServers.boot_server_from_volume_and_delete
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following tests currently fail due to required Gnocchi API:
+ # HTTP 410: "This telemetry installation is configured to use
+ # Gnocchi. Please use the Gnocchi API available on the
+ # metric endpoint to retrieve data."
+ # Issue: https://bugs.launchpad.net/rally/+bug/1704322
+ - CeilometerMeters.list_matched_meters
+ - CeilometerMeters.list_meters
+ - CeilometerQueries.create_and_query_samples
+ - CeilometerResource.get_tenant_resources
+ - CeilometerResource.list_matched_resources
+ - CeilometerResource.list_resources
+ - CeilometerSamples.list_matched_samples
+ - CeilometerSamples.list_samples
+ - CeilometerStats.create_meter_and_get_stats
+ - CeilometerStats.get_stats
+ -
+ scenarios:
+ - '^os-' # all scenarios
+ installers:
+ - '.+' # all installers
+ tests:
+ # Following test currently fails due to but in
+ # python-ceilometerclient during fetching of event_types
+ # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138
+ # Fix: https://review.openstack.org/#/c/483402/
+ - CeilometerEvents.create_user_and_list_event_types
functionality:
-
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index 6b7c49ca7..fdef8bed0 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -34,8 +34,8 @@ LOGGER = logging.getLogger(__name__)
class RallyBase(testcase.OSGCTestCase):
"""Base class form Rally testcases implementation."""
- TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'vm', 'all']
+ TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
+ 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
GLANCE_IMAGE_PATH = os.path.join(
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
new file mode 100644
index 000000000..7efb5a83b
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
@@ -0,0 +1,458 @@
+ CeilometerMeters.list_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.create_meter_and_get_stats:
+ -
+ args:
+ user_id: "user-id"
+ resource_id: "resource-id"
+ counter_volume: 1.0
+ counter_unit: ""
+ counter_type: "cumulative"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_matched_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerMeters.list_matched_meters:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.list_matched_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ limit: 50
+ filter_by_user_id: true
+ filter_by_project_id: true
+ metadata_query:
+ status: "terminated"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerSamples.list_samples:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 60
+ metadata_list:
+ - status: "active"
+ name: "fake_resource"
+ deleted: "False"
+ created_at: "2015-09-04T12:34:19.000000"
+ - status: "not_active"
+ name: "fake_resource_1"
+ deleted: "False"
+ created_at: "2015-09-10T06:55:12.000000"
+ batch_size: 5
+ {% endcall %}
+ args:
+ limit: 50
+ metadata_query:
+ status: "not_active"
+ sla:
+ {{ no_failures_sla() }}
+
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
new file mode 100644
index 000000000..bb070cd3a
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
@@ -0,0 +1,247 @@
+ CeilometerAlarms.create_alarm_and_get_history:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ state: "ok"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_delete_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_get_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_list_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarm_history:
+ -
+ args:
+ orderby: !!null
+ limit: !!null
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_alarms:
+ -
+ args:
+ filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
+ orderby: !!null
+ limit: 10
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerQueries.create_and_query_samples:
+ -
+ args:
+ filter: {"=": {"counter_unit": "instance"}}
+ orderby: !!null
+ limit: 10
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_unit: "instance"
+ counter_volume: 1.0
+ resource_id: "resource_id"
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.create_and_update_alarm:
+ -
+ args:
+ meter_name: "ram_util"
+ threshold: 10.0
+ type: "threshold"
+ statistic: "avg"
+ alarm_actions: ["http://localhost:8776/alarm"]
+ ok_actions: ["http://localhost:8776/ok"]
+ insufficient_data_actions: ["http://localhost:8776/notok"]
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_get_event:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_events:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerEvents.create_user_and_list_event_types:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_trait_descriptions:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerTraits.create_user_and_list_traits:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerStats.get_stats:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "benchmark_meter"
+ counter_type: "gauge"
+ counter_unit: "%"
+ counter_volume: 100
+ resources_per_tenant: 100
+ samples_per_resource: 100
+ timestamp_interval: 10
+ metadata_list:
+ -
+ status: "active"
+ name: "rally benchmark on"
+ deleted: "false"
+ -
+ status: "terminated"
+ name: "rally benchmark off"
+ deleted: "true"
+ {% endcall %}
+ args:
+ meter_name: "benchmark_meter"
+ filter_by_user_id: true
+ filter_by_project_id: true
+ filter_by_resource_id: true
+ metadata_query:
+ status: "terminated"
+ period: 300
+ groupby: "resource_id"
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerResource.get_tenant_resources:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ ceilometer:
+ counter_name: "cpu_util"
+ counter_type: "gauge"
+ counter_volume: 1.0
+ counter_unit: "instance"
+ {% endcall %}
+ sla:
+ {{ no_failures_sla() }}
+
+ CeilometerAlarms.list_alarms:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml
index 033edb831..65f101fbe 100644
--- a/functest/opnfv_tests/openstack/rally/task.yaml
+++ b/functest/opnfv_tests/openstack/rally/task.yaml
@@ -31,6 +31,10 @@
{%- include "var/opnfv-neutron.yaml"-%}
{% endif %}
+{% if "ceilometer" in service_list %}
+{%- include "var/opnfv-ceilometer.yaml"-%}
+{% endif %}
+
{% if "quotas" in service_list %}
{%- include "var/opnfv-quotas.yaml"-%}
{% endif %}
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
index 86053ccf3..4f71b5f5d 100644
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
@@ -28,12 +28,13 @@ from functest.opnfv_tests.openstack.refstack_client.tempest_conf \
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
# logging configuration """
LOGGER = logging.getLogger(__name__)
-class RefstackClient(testcase.OSGCTestCase):
+class RefstackClient(testcase.TestCase):
"""RefstackClient testcase implementation class."""
def __init__(self, **kwargs):
@@ -41,6 +42,7 @@ class RefstackClient(testcase.OSGCTestCase):
if "case_name" not in kwargs:
kwargs["case_name"] = "refstack_defcore"
super(RefstackClient, self).__init__(**kwargs)
+ self.tempestconf = None
self.conf_path = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
@@ -57,6 +59,13 @@ class RefstackClient(testcase.OSGCTestCase):
CONST.__getattribute__('OS_INSECURE').lower() == 'true'):
self.insecure = '-k'
+ def generate_conf(self):
+ if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
+ os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
+
+ self.tempestconf = TempestConf()
+ self.tempestconf.generate_tempestconf()
+
def run_defcore(self, conf, testlist):
"""Run defcore sys command."""
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
@@ -87,7 +96,7 @@ class RefstackClient(testcase.OSGCTestCase):
stderr=subprocess.STDOUT)
def parse_refstack_result(self):
- """Parse Refstact results."""
+ """Parse Refstack results."""
try:
with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
"refstack.log"), 'r') as logfile:
@@ -144,18 +153,18 @@ class RefstackClient(testcase.OSGCTestCase):
"""
self.start_time = time.time()
- if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
- os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
-
try:
- tempestconf = TempestConf()
- tempestconf.generate_tempestconf()
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
self.run_defcore_default()
self.parse_refstack_result()
res = testcase.TestCase.EX_OK
except Exception:
LOGGER.exception("Error with run")
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.tempestconf.clean()
self.stop_time = time.time()
return res
@@ -194,6 +203,42 @@ class RefstackClient(testcase.OSGCTestCase):
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+
+ :return: TestCase.EX_OK
+ """
+ LOGGER.info("Initializing the saved state of the OpenStack deployment")
+
+ # Make sure that Tempest is configured
+ if not self.tempestconf:
+ self.generate_conf()
+
+ os_utils.init_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(RefstackClient, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources.
+ For details, see https://docs.openstack.org/tempest/latest/cleanup.html
+ """
+ LOGGER.info("Destroying the resources created for tempest")
+
+ os_utils.perform_tempest_cleanup(
+ self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(RefstackClient, self).clean()
+
class RefstackClientParser(object): # pylint: disable=too-few-public-methods
"""Command line argument parser helper."""
diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
index 30590b9eb..db7452271 100644
--- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
+++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
@@ -11,13 +11,15 @@ import pkg_resources
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils import openstack_utils
from functest.utils.constants import CONST
+from functest.opnfv_tests.openstack.tempest.tempest \
+ import TempestResourcesManager
""" logging configuration """
logger = logging.getLogger(__name__)
class TempestConf(object):
- def __init__(self):
+ def __init__(self, **kwargs):
self.VERIFIER_ID = conf_utils.get_verifier_id()
self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
self.VERIFIER_ID)
@@ -27,15 +29,22 @@ class TempestConf(object):
self.confpath = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
+ self.resources = TempestResourcesManager(**kwargs)
def generate_tempestconf(self):
try:
openstack_utils.source_credentials(
CONST.__getattribute__('openstack_creds'))
- img_flavor_dict = conf_utils.create_tempest_resources(
- use_custom_images=True, use_custom_flavors=True)
+ resources = self.resources.create(create_project=True,
+ use_custom_images=True,
+ use_custom_flavors=True)
conf_utils.configure_tempest_defcore(
- self.DEPLOYMENT_DIR, img_flavor_dict)
+ self.DEPLOYMENT_DIR,
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ image_id_alt=resources.get("image_id_alt"),
+ flavor_id_alt=resources.get("flavor_id_alt"),
+ tenant_id=resources.get("project_id"))
except Exception as e:
logger.error("error with generating refstack client "
"reference tempest conf file: %s", e)
@@ -48,6 +57,9 @@ class TempestConf(object):
except Exception as e:
logger.error('Error with run: %s', e)
+ def clean(self):
+ self.resources.cleanup()
+
def main():
logging.basicConfig()
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index fd3785b99..52fa60032 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -41,6 +41,9 @@ REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
'refstack')
TEMPEST_CONF_YAML = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
+TEST_ACCOUNTS_FILE = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml')
CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE')
CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
@@ -49,77 +52,9 @@ CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
logger = logging.getLogger(__name__)
-def create_tempest_resources(use_custom_images=False,
- use_custom_flavors=False):
-
- logger.debug("Creating private network for Tempest suite")
- network_dic = os_utils.create_shared_network_full(
- CONST.__getattribute__('tempest_private_net_name'),
- CONST.__getattribute__('tempest_private_subnet_name'),
- CONST.__getattribute__('tempest_router_name'),
- CONST.__getattribute__('tempest_private_subnet_cidr'))
- if network_dic is None:
- raise Exception('Failed to create private network')
-
- image_id = ""
- image_id_alt = ""
- flavor_id = ""
- flavor_id_alt = ""
-
- if (CONST.__getattribute__('tempest_use_custom_images') or
- use_custom_images):
- # adding alternative image should be trivial should we need it
- logger.debug("Creating image for Tempest suite")
- _, image_id = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id is None:
- raise Exception('Failed to create image')
-
- if use_custom_images:
- logger.debug("Creating 2nd image for Tempest suite")
- _, image_id_alt = os_utils.get_or_create_image(
- CONST.__getattribute__('openstack_image_name_alt'),
- GLANCE_IMAGE_PATH,
- CONST.__getattribute__('openstack_image_disk_format'))
- if image_id_alt is None:
- raise Exception('Failed to create image')
-
- if (CONST.__getattribute__('tempest_use_custom_flavors') or
- use_custom_flavors):
- # adding alternative flavor should be trivial should we need it
- logger.debug("Creating flavor for Tempest suite")
- _, flavor_id = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id is None:
- raise Exception('Failed to create flavor')
-
- if use_custom_flavors:
- logger.debug("Creating 2nd flavor for tempest_defcore")
- _, flavor_id_alt = os_utils.get_or_create_flavor(
- CONST.__getattribute__('openstack_flavor_name_alt'),
- CONST.__getattribute__('openstack_flavor_ram'),
- CONST.__getattribute__('openstack_flavor_disk'),
- CONST.__getattribute__('openstack_flavor_vcpus'))
- if flavor_id_alt is None:
- raise Exception('Failed to create flavor')
-
- img_flavor_dict = {}
- img_flavor_dict['image_id'] = image_id
- img_flavor_dict['image_id_alt'] = image_id_alt
- img_flavor_dict['flavor_id'] = flavor_id
- img_flavor_dict['flavor_id_alt'] = flavor_id_alt
-
- return img_flavor_dict
-
-
def get_verifier_id():
"""
- Returns verifer id for current Tempest
+ Returns verifier id for current Tempest
"""
cmd = ("rally verify list-verifiers | awk '/" +
CONST.__getattribute__('tempest_deployment_name') +
@@ -153,7 +88,7 @@ def get_verifier_deployment_id():
def get_verifier_repo_dir(verifier_id):
"""
- Returns installed verfier repo directory for Tempest
+ Returns installed verifier repo directory for Tempest
"""
if not verifier_id:
verifier_id = get_verifier_id()
@@ -195,32 +130,27 @@ def backup_tempest_config(conf_file):
"""
Copy config file to tempest results directory
"""
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
-
shutil.copyfile(conf_file,
os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf'))
-def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None,
- MODE=None):
+def configure_tempest(deployment_dir, image_id=None, flavor_id=None,
+ mode=None):
"""
Calls rally verify and updates the generated tempest.conf with
given parameters
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- IMAGE_ID, FLAVOR_ID)
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
-def configure_tempest_defcore(deployment_dir, img_flavor_dict):
+def configure_tempest_defcore(deployment_dir, image_id, flavor_id,
+ image_id_alt, flavor_id_alt, tenant_id):
"""
Add/update needed parameters into tempest.conf file
"""
conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file,
- img_flavor_dict.get("image_id"),
- img_flavor_dict.get("flavor_id"))
+ configure_tempest_update_params(conf_file, image_id, flavor_id)
logger.debug("Updating selected tempest.conf parameters for defcore...")
config = ConfigParser.RawConfigParser()
@@ -228,14 +158,14 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir))
config.set('oslo_concurrency', 'lock_path',
'{}/lock_files'.format(deployment_dir))
+ generate_test_accounts_file(tenant_id=tenant_id)
+ config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE)
config.set('scenario', 'img_dir', '{}'.format(deployment_dir))
config.set('scenario', 'img_file', 'tempest-image')
- config.set('compute', 'image_ref', img_flavor_dict.get("image_id"))
- config.set('compute', 'image_ref_alt',
- img_flavor_dict['image_id_alt'])
- config.set('compute', 'flavor_ref', img_flavor_dict.get("flavor_id"))
- config.set('compute', 'flavor_ref_alt',
- img_flavor_dict['flavor_id_alt'])
+ config.set('compute', 'image_ref', image_id)
+ config.set('compute', 'image_ref_alt', image_id_alt)
+ config.set('compute', 'flavor_ref', flavor_id)
+ config.set('compute', 'flavor_ref_alt', flavor_id_alt)
with open(conf_file, 'wb') as config_file:
config.write(config_file)
@@ -246,8 +176,29 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict):
shutil.copyfile(conf_file, confpath)
+def generate_test_accounts_file(tenant_id):
+ """
+ Add needed tenant and user params into test_accounts.yaml
+ """
+
+ logger.debug("Add needed params into test_accounts.yaml...")
+ accounts_list = [
+ {
+ 'tenant_name':
+ CONST.__getattribute__('tempest_identity_tenant_name'),
+ 'tenant_id': str(tenant_id),
+ 'username': CONST.__getattribute__('tempest_identity_user_name'),
+ 'password':
+ CONST.__getattribute__('tempest_identity_user_password')
+ }
+ ]
+
+ with open(TEST_ACCOUNTS_FILE, "w") as f:
+ yaml.dump(accounts_list, f, default_flow_style=False)
+
+
def configure_tempest_update_params(tempest_conf_file,
- IMAGE_ID=None, FLAVOR_ID=None):
+ image_id=None, flavor_id=None):
"""
Add/update needed parameters into tempest.conf file
"""
@@ -261,13 +212,13 @@ def configure_tempest_update_params(tempest_conf_file,
config.set('compute', 'volume_device_name',
CONST.__getattribute__('tempest_volume_device_name'))
if CONST.__getattribute__('tempest_use_custom_images'):
- if IMAGE_ID is not None:
- config.set('compute', 'image_ref', IMAGE_ID)
+ if image_id is not None:
+ config.set('compute', 'image_ref', image_id)
if IMAGE_ID_ALT is not None:
config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
if CONST.__getattribute__('tempest_use_custom_flavors'):
- if FLAVOR_ID is not None:
- config.set('compute', 'flavor_ref', FLAVOR_ID)
+ if flavor_id is not None:
+ config.set('compute', 'flavor_ref', flavor_id)
if FLAVOR_ID_ALT is not None:
config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
config.set('identity', 'region', 'RegionOne')
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index 003d4ea41..c7ad4df2f 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -23,15 +23,26 @@ from functest.core import testcase
from functest.opnfv_tests.openstack.tempest import conf_utils
from functest.utils.constants import CONST
import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+
+from snaps.openstack import create_flavor
+from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor
+from snaps.openstack.create_project import ProjectSettings
+from snaps.openstack.create_network import NetworkSettings, SubnetSettings
+from snaps.openstack.create_user import UserSettings
+from snaps.openstack.tests import openstack_tests
+from snaps.openstack.utils import deploy_utils
+
""" logging configuration """
logger = logging.getLogger(__name__)
-class TempestCommon(testcase.OSGCTestCase):
+class TempestCommon(testcase.TestCase):
def __init__(self, **kwargs):
super(TempestCommon, self).__init__(**kwargs)
+ self.resources = TempestResourcesManager(**kwargs)
self.MODE = ""
self.OPTION = ""
self.VERIFIER_ID = conf_utils.get_verifier_id()
@@ -184,9 +195,12 @@ class TempestCommon(testcase.OSGCTestCase):
try:
self.result = 100 * int(num_success) / int(num_executed)
except ZeroDivisionError:
- logger.error("No test has been executed")
self.result = 0
- return
+ if int(num_tests) > 0:
+ logger.info("All tests have been skipped")
+ else:
+ logger.error("No test has been executed")
+ return
with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
"tempest.log"), 'r') as logfile:
@@ -219,12 +233,12 @@ class TempestCommon(testcase.OSGCTestCase):
try:
if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
- image_and_flavor = conf_utils.create_tempest_resources()
+ resources = self.resources.create()
conf_utils.configure_tempest(
self.DEPLOYMENT_DIR,
- IMAGE_ID=image_and_flavor.get("image_id"),
- FLAVOR_ID=image_and_flavor.get("flavor_id"),
- MODE=self.MODE)
+ image_id=resources.get("image_id"),
+ flavor_id=resources.get("flavor_id"),
+ mode=self.MODE)
self.generate_test_list(self.VERIFIER_REPO_DIR)
self.apply_tempest_blacklist()
self.run_verifier_tests()
@@ -233,10 +247,49 @@ class TempestCommon(testcase.OSGCTestCase):
except Exception as e:
logger.error('Error with run: %s' % e)
res = testcase.TestCase.EX_RUN_ERROR
+ finally:
+ self.resources.cleanup()
self.stop_time = time.time()
return res
+ def create_snapshot(self):
+ """
+ Run the Tempest cleanup utility to initialize OS state.
+
+ :return: TestCase.EX_OK
+ """
+ logger.info("Initializing the saved state of the OpenStack deployment")
+
+ if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
+ os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
+
+ # Make sure that the verifier is configured
+ conf_utils.configure_verifier(self.DEPLOYMENT_DIR)
+
+ os_utils.init_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup-init.log")
+ )
+
+ return super(TempestCommon, self).create_snapshot()
+
+ def clean(self):
+ """
+ Run the Tempest cleanup utility to delete and destroy OS resources
+ created by Tempest.
+ """
+ logger.info("Destroying the resources created for refstack")
+
+ os_utils.perform_tempest_cleanup(
+ self.DEPLOYMENT_DIR, 'tempest.conf',
+ os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
+ "tempest-cleanup.log")
+ )
+
+ return super(TempestCommon, self).clean()
+
class TempestSmokeSerial(TempestCommon):
@@ -285,3 +338,170 @@ class TempestDefcore(TempestCommon):
TempestCommon.__init__(self, **kwargs)
self.MODE = "defcore"
self.OPTION = "--concurrency 1"
+
+
+class TempestResourcesManager(object):
+
+ def __init__(self, **kwargs):
+ self.os_creds = None
+ if 'os_creds' in kwargs:
+ self.os_creds = kwargs['os_creds']
+ else:
+ self.os_creds = openstack_tests.get_credentials(
+ os_env_file=CONST.__getattribute__('openstack_creds'))
+
+ self.creators = list()
+
+ if hasattr(CONST, 'snaps_images_cirros'):
+ self.cirros_image_config = CONST.__getattribute__(
+ 'snaps_images_cirros')
+ else:
+ self.cirros_image_config = None
+
+ def create(self, use_custom_images=False, use_custom_flavors=False,
+ create_project=False):
+ if create_project:
+ logger.debug("Creating project (tenant) for Tempest suite")
+ project_name = CONST.__getattribute__(
+ 'tempest_identity_tenant_name')
+ project_creator = deploy_utils.create_project(
+ self.os_creds, ProjectSettings(
+ name=project_name,
+ description=CONST.__getattribute__(
+ 'tempest_identity_tenant_description')))
+ if (project_creator is None or
+ project_creator.get_project() is None):
+ raise Exception("Failed to create tenant")
+ project_id = project_creator.get_project().id
+ self.creators.append(project_creator)
+
+ logger.debug("Creating user for Tempest suite")
+ user_creator = deploy_utils.create_user(
+ self.os_creds, UserSettings(
+ name=CONST.__getattribute__('tempest_identity_user_name'),
+ password=CONST.__getattribute__(
+ 'tempest_identity_user_password'),
+ project_name=project_name))
+ if user_creator is None or user_creator.get_user() is None:
+ raise Exception("Failed to create user")
+ user_id = user_creator.get_user().id
+ self.creators.append(user_creator)
+ else:
+ project_name = None
+ project_id = None
+ user_id = None
+
+ logger.debug("Creating private network for Tempest suite")
+ network_creator = deploy_utils.create_network(
+ self.os_creds, NetworkSettings(
+ name=CONST.__getattribute__('tempest_private_net_name'),
+ project_name=project_name,
+ subnet_settings=[SubnetSettings(
+ name=CONST.__getattribute__('tempest_private_subnet_name'),
+ cidr=CONST.__getattribute__('tempest_private_subnet_cidr'))
+ ]))
+ if network_creator is None or network_creator.get_network() is None:
+ raise Exception("Failed to create private network")
+ self.creators.append(network_creator)
+
+ image_id = None
+ image_id_alt = None
+ flavor_id = None
+ flavor_id_alt = None
+
+ if (CONST.__getattribute__('tempest_use_custom_images') or
+ use_custom_images):
+ logger.debug("Creating image for Tempest suite")
+ image_base_name = CONST.__getattribute__('openstack_image_name')
+ os_image_settings = openstack_tests.cirros_image_settings(
+ image_base_name, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating image for Tempest suite")
+ image_creator = deploy_utils.create_image(
+ self.os_creds, os_image_settings)
+ if image_creator is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator)
+ image_id = image_creator.get_image().id
+
+ if use_custom_images:
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_base_name_alt = CONST.__getattribute__(
+ 'openstack_image_name_alt')
+ os_image_settings_alt = openstack_tests.cirros_image_settings(
+ image_base_name_alt, public=True,
+ image_metadata=self.cirros_image_config)
+ logger.debug("Creating 2nd image for Tempest suite")
+ image_creator_alt = deploy_utils.create_image(
+ self.os_creds, os_image_settings_alt)
+ if image_creator_alt is None:
+ raise Exception('Failed to create image')
+ self.creators.append(image_creator_alt)
+ image_id_alt = image_creator_alt.get_image().id
+
+ if (CONST.__getattribute__('tempest_use_custom_flavors') or
+ use_custom_flavors):
+ logger.info("Creating flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata))
+ flavor = flavor_creator.create()
+ if flavor is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator)
+ flavor_id = flavor.id
+
+ if use_custom_flavors:
+ logger.info("Creating 2nd flavor for Tempest suite")
+ scenario = ft_utils.get_scenario()
+ flavor_metadata_alt = None
+ if 'ovs' in scenario or 'fdio' in scenario:
+ flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE
+ flavor_creator_alt = OpenStackFlavor(
+ self.os_creds, FlavorSettings(
+ name=CONST.__getattribute__('openstack_flavor_name_alt'),
+ ram=CONST.__getattribute__('openstack_flavor_ram'),
+ disk=CONST.__getattribute__('openstack_flavor_disk'),
+ vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
+ metadata=flavor_metadata_alt))
+ flavor_alt = flavor_creator_alt.create()
+ if flavor_alt is None:
+ raise Exception('Failed to create flavor')
+ self.creators.append(flavor_creator_alt)
+ flavor_id_alt = flavor_alt.id
+
+ print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, "
+ "flavor_id: %s, flavor_id_alt: %s" % (
+ image_id, image_id_alt, flavor_id, flavor_id_alt,))
+
+ result = {
+ 'image_id': image_id,
+ 'image_id_alt': image_id_alt,
+ 'flavor_id': flavor_id,
+ 'flavor_id_alt': flavor_id_alt
+ }
+
+ if create_project:
+ result['project_id'] = project_id
+ result['tenant_id'] = project_id # for compatibility
+ result['user_id'] = user_id
+
+ return result
+
+ def cleanup(self):
+ """
+ Cleanup all OpenStack objects. Should be called on completion.
+ """
+ for creator in reversed(self.creators):
+ try:
+ creator.clean()
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index 967472fac..40fcb07f0 100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
@@ -108,14 +108,33 @@ class VPingBase(testcase.TestCase):
'vping_private_subnet_name') + self.guid
private_subnet_cidr = CONST.__getattribute__(
'vping_private_subnet_cidr')
+
+ vping_network_type = None
+ vping_physical_network = None
+ vping_segmentation_id = None
+
+ if (hasattr(CONST, 'network_type')):
+ vping_network_type = CONST.__getattribute__(
+ 'vping_network_type')
+ if (hasattr(CONST, 'physical_network')):
+ vping_physical_network = CONST.__getattribute__(
+ 'vping_physical_network')
+ if (hasattr(CONST, 'segmentation_id')):
+ vping_segmentation_id = CONST.__getattribute__(
+ 'vping_segmentation_id')
+
self.logger.info(
"Creating network with name: '%s'" % private_net_name)
self.network_creator = deploy_utils.create_network(
self.os_creds,
- NetworkSettings(name=private_net_name,
- subnet_settings=[SubnetSettings(
- name=private_subnet_name,
- cidr=private_subnet_cidr)]))
+ NetworkSettings(
+ name=private_net_name,
+ network_type=vping_network_type,
+ physical_network=vping_physical_network,
+ segmentation_id=vping_segmentation_id,
+ subnet_settings=[SubnetSettings(
+ name=private_subnet_name,
+ cidr=private_subnet_cidr)]))
self.creators.append(self.network_creator)
self.logger.info(