diff options
Diffstat (limited to 'functest/opnfv_tests')
21 files changed, 2009 insertions, 599 deletions
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt index 95bea2b7..099d6864 100644 --- a/functest/opnfv_tests/openstack/rally/blacklist.txt +++ b/functest/opnfv_tests/openstack/rally/blacklist.txt @@ -6,6 +6,38 @@ scenario: - joid tests: - NovaServers.boot_server_from_volume_and_delete + - + scenarios: + - '^os-' # all scenarios + installers: + - '.+' # all installers + tests: + # Following tests currently fail due to required Gnocchi API: + # HTTP 410: "This telemetry installation is configured to use + # Gnocchi. Please use the Gnocchi API available on the + # metric endpoint to retrieve data." + # Issue: https://bugs.launchpad.net/rally/+bug/1704322 + - CeilometerMeters.list_matched_meters + - CeilometerMeters.list_meters + - CeilometerQueries.create_and_query_samples + - CeilometerResource.get_tenant_resources + - CeilometerResource.list_matched_resources + - CeilometerResource.list_resources + - CeilometerSamples.list_matched_samples + - CeilometerSamples.list_samples + - CeilometerStats.create_meter_and_get_stats + - CeilometerStats.get_stats + - + scenarios: + - '^os-' # all scenarios + installers: + - '.+' # all installers + tests: + # Following test currently fails due to but in + # python-ceilometerclient during fetching of event_types + # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138 + # Fix: https://review.openstack.org/#/c/483402/ + - CeilometerEvents.create_user_and_list_event_types functionality: - diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py index 6b7c49ca..fdef8bed 100644 --- a/functest/opnfv_tests/openstack/rally/rally.py +++ b/functest/opnfv_tests/openstack/rally/rally.py @@ -34,8 +34,8 @@ LOGGER = logging.getLogger(__name__) class RallyBase(testcase.OSGCTestCase): """Base class form Rally testcases implementation.""" - TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', - 'neutron', 'nova', 'quotas', 'vm', 'all'] + TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat', + 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all'] GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name') GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name') GLANCE_IMAGE_PATH = os.path.join( diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml new file mode 100644 index 00000000..7efb5a83 --- /dev/null +++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml @@ -0,0 +1,458 @@ + CeilometerMeters.list_meters: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerResource.list_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_alarm_and_get_history: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + state: "ok" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_delete_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_get_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_list_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarm_history: + - + args: + orderby: !!null + limit: !!null + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarms: + - + args: + filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} + orderby: !!null + limit: 10 + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_samples: + - + args: + filter: {"=": {"counter_unit": "instance"}} + orderby: !!null + limit: 10 + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resource_id: "resource_id" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_update_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.create_meter_and_get_stats: + - + args: + user_id: "user-id" + resource_id: "resource-id" + counter_volume: 1.0 + counter_unit: "" + counter_type: "cumulative" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_get_event: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_events: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_event_types: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_trait_descriptions: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_traits: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.get_stats: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + meter_name: "benchmark_meter" + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + period: 300 + groupby: "resource_id" + sla: + {{ no_failures_sla() }} + + CeilometerResource.get_tenant_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_volume: 1.0 + counter_unit: "instance" + {% endcall %} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.list_alarms: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerSamples.list_matched_samples: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + metadata_list: + - status: "active" + name: "fake_resource" + deleted: "False" + created_at: "2015-09-04T12:34:19.000000" + - status: "not_active" + name: "fake_resource_1" + deleted: "False" + created_at: "2015-09-10T06:55:12.000000" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "not_active" + sla: + {{ no_failures_sla() }} + + CeilometerMeters.list_matched_meters: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerResource.list_matched_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerSamples.list_samples: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + metadata_list: + - status: "active" + name: "fake_resource" + deleted: "False" + created_at: "2015-09-04T12:34:19.000000" + - status: "not_active" + name: "fake_resource_1" + deleted: "False" + created_at: "2015-09-10T06:55:12.000000" + batch_size: 5 + {% endcall %} + args: + limit: 50 + metadata_query: + status: "not_active" + sla: + {{ no_failures_sla() }} + diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml new file mode 100644 index 00000000..bb070cd3 --- /dev/null +++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml @@ -0,0 +1,247 @@ + CeilometerAlarms.create_alarm_and_get_history: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + state: "ok" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_delete_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_get_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_list_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarm_history: + - + args: + orderby: !!null + limit: !!null + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarms: + - + args: + filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} + orderby: !!null + limit: 10 + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_samples: + - + args: + filter: {"=": {"counter_unit": "instance"}} + orderby: !!null + limit: 10 + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resource_id: "resource_id" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_update_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_get_event: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_events: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_event_types: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_trait_descriptions: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_traits: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.get_stats: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + meter_name: "benchmark_meter" + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + period: 300 + groupby: "resource_id" + sla: + {{ no_failures_sla() }} + + CeilometerResource.get_tenant_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_volume: 1.0 + counter_unit: "instance" + {% endcall %} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.list_alarms: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml index 033edb83..65f101fb 100644 --- a/functest/opnfv_tests/openstack/rally/task.yaml +++ b/functest/opnfv_tests/openstack/rally/task.yaml @@ -31,6 +31,10 @@ {%- include "var/opnfv-neutron.yaml"-%} {% endif %} +{% if "ceilometer" in service_list %} +{%- include "var/opnfv-ceilometer.yaml"-%} +{% endif %} + {% if "quotas" in service_list %} {%- include "var/opnfv-quotas.yaml"-%} {% endif %} diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py index 86053ccf..4f71b5f5 100644 --- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py +++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py @@ -28,12 +28,13 @@ from functest.opnfv_tests.openstack.refstack_client.tempest_conf \ from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_utils as os_utils # logging configuration """ LOGGER = logging.getLogger(__name__) -class RefstackClient(testcase.OSGCTestCase): +class RefstackClient(testcase.TestCase): """RefstackClient testcase implementation class.""" def __init__(self, **kwargs): @@ -41,6 +42,7 @@ class RefstackClient(testcase.OSGCTestCase): if "case_name" not in kwargs: kwargs["case_name"] = "refstack_defcore" super(RefstackClient, self).__init__(**kwargs) + self.tempestconf = None self.conf_path = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf') @@ -57,6 +59,13 @@ class RefstackClient(testcase.OSGCTestCase): CONST.__getattribute__('OS_INSECURE').lower() == 'true'): self.insecure = '-k' + def generate_conf(self): + if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR): + os.makedirs(conf_utils.REFSTACK_RESULTS_DIR) + + self.tempestconf = TempestConf() + self.tempestconf.generate_tempestconf() + def run_defcore(self, conf, testlist): """Run defcore sys command.""" cmd = ("refstack-client test {0} -c {1} -v --test-list {2}" @@ -87,7 +96,7 @@ class RefstackClient(testcase.OSGCTestCase): stderr=subprocess.STDOUT) def parse_refstack_result(self): - """Parse Refstact results.""" + """Parse Refstack results.""" try: with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, "refstack.log"), 'r') as logfile: @@ -144,18 +153,18 @@ class RefstackClient(testcase.OSGCTestCase): """ self.start_time = time.time() - if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR): - os.makedirs(conf_utils.REFSTACK_RESULTS_DIR) - try: - tempestconf = TempestConf() - tempestconf.generate_tempestconf() + # Make sure that Tempest is configured + if not self.tempestconf: + self.generate_conf() self.run_defcore_default() self.parse_refstack_result() res = testcase.TestCase.EX_OK except Exception: LOGGER.exception("Error with run") res = testcase.TestCase.EX_RUN_ERROR + finally: + self.tempestconf.clean() self.stop_time = time.time() return res @@ -194,6 +203,42 @@ class RefstackClient(testcase.OSGCTestCase): return res + def create_snapshot(self): + """ + Run the Tempest cleanup utility to initialize OS state. + For details, see https://docs.openstack.org/tempest/latest/cleanup.html + + :return: TestCase.EX_OK + """ + LOGGER.info("Initializing the saved state of the OpenStack deployment") + + # Make sure that Tempest is configured + if not self.tempestconf: + self.generate_conf() + + os_utils.init_tempest_cleanup( + self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "tempest-cleanup-init.log") + ) + + return super(RefstackClient, self).create_snapshot() + + def clean(self): + """ + Run the Tempest cleanup utility to delete and destroy OS resources. + For details, see https://docs.openstack.org/tempest/latest/cleanup.html + """ + LOGGER.info("Destroying the resources created for tempest") + + os_utils.perform_tempest_cleanup( + self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "tempest-cleanup.log") + ) + + return super(RefstackClient, self).clean() + class RefstackClientParser(object): # pylint: disable=too-few-public-methods """Command line argument parser helper.""" diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py index 30590b9e..db745227 100644 --- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py +++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py @@ -11,13 +11,15 @@ import pkg_resources from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils import openstack_utils from functest.utils.constants import CONST +from functest.opnfv_tests.openstack.tempest.tempest \ + import TempestResourcesManager """ logging configuration """ logger = logging.getLogger(__name__) class TempestConf(object): - def __init__(self): + def __init__(self, **kwargs): self.VERIFIER_ID = conf_utils.get_verifier_id() self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir( self.VERIFIER_ID) @@ -27,15 +29,22 @@ class TempestConf(object): self.confpath = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf') + self.resources = TempestResourcesManager(**kwargs) def generate_tempestconf(self): try: openstack_utils.source_credentials( CONST.__getattribute__('openstack_creds')) - img_flavor_dict = conf_utils.create_tempest_resources( - use_custom_images=True, use_custom_flavors=True) + resources = self.resources.create(create_project=True, + use_custom_images=True, + use_custom_flavors=True) conf_utils.configure_tempest_defcore( - self.DEPLOYMENT_DIR, img_flavor_dict) + self.DEPLOYMENT_DIR, + image_id=resources.get("image_id"), + flavor_id=resources.get("flavor_id"), + image_id_alt=resources.get("image_id_alt"), + flavor_id_alt=resources.get("flavor_id_alt"), + tenant_id=resources.get("project_id")) except Exception as e: logger.error("error with generating refstack client " "reference tempest conf file: %s", e) @@ -48,6 +57,9 @@ class TempestConf(object): except Exception as e: logger.error('Error with run: %s', e) + def clean(self): + self.resources.cleanup() + def main(): logging.basicConfig() diff --git a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py index 0b87440b..19c6a87f 100644 --- a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py +++ b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py @@ -28,9 +28,14 @@ class SnapsTestRunner(unit.Suite): if 'os_creds' in kwargs: self.os_creds = kwargs['os_creds'] else: + creds_override = None + if hasattr(CONST, 'snaps_os_creds_override'): + creds_override = CONST.__getattribute__( + 'snaps_os_creds_override') self.os_creds = openstack_tests.get_credentials( os_env_file=CONST.__getattribute__('openstack_creds'), - proxy_settings_str=None, ssh_proxy_cmd=None) + proxy_settings_str=None, ssh_proxy_cmd=None, + overrides=creds_override) if 'ext_net_name' in kwargs: self.ext_net_name = kwargs['ext_net_name'] diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py index 7c88fc5d..52fa6003 100644 --- a/functest/opnfv_tests/openstack/tempest/conf_utils.py +++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py @@ -11,7 +11,6 @@ import ConfigParser import logging import os import pkg_resources -import re import shutil import subprocess @@ -42,6 +41,9 @@ REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'refstack') TEMPEST_CONF_YAML = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml') +TEST_ACCOUNTS_FILE = pkg_resources.resource_filename( + 'functest', + 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml') CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE') CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP') @@ -50,96 +52,9 @@ CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP') logger = logging.getLogger(__name__) -def create_tempest_resources(use_custom_images=False, - use_custom_flavors=False): - keystone_client = os_utils.get_keystone_client() - - logger.debug("Creating tenant and user for Tempest suite") - tenant_id = os_utils.create_tenant( - keystone_client, - CONST.__getattribute__('tempest_identity_tenant_name'), - CONST.__getattribute__('tempest_identity_tenant_description')) - if not tenant_id: - logger.error("Failed to create %s tenant" - % CONST.__getattribute__('tempest_identity_tenant_name')) - - user_id = os_utils.create_user( - keystone_client, - CONST.__getattribute__('tempest_identity_user_name'), - CONST.__getattribute__('tempest_identity_user_password'), - None, tenant_id) - if not user_id: - logger.error("Failed to create %s user" % - CONST.__getattribute__('tempest_identity_user_name')) - - logger.debug("Creating private network for Tempest suite") - network_dic = os_utils.create_shared_network_full( - CONST.__getattribute__('tempest_private_net_name'), - CONST.__getattribute__('tempest_private_subnet_name'), - CONST.__getattribute__('tempest_router_name'), - CONST.__getattribute__('tempest_private_subnet_cidr')) - if network_dic is None: - raise Exception('Failed to create private network') - - image_id = "" - image_id_alt = "" - flavor_id = "" - flavor_id_alt = "" - - if (CONST.__getattribute__('tempest_use_custom_images') or - use_custom_images): - # adding alternative image should be trivial should we need it - logger.debug("Creating image for Tempest suite") - _, image_id = os_utils.get_or_create_image( - CONST.__getattribute__('openstack_image_name'), - GLANCE_IMAGE_PATH, - CONST.__getattribute__('openstack_image_disk_format')) - if image_id is None: - raise Exception('Failed to create image') - - if use_custom_images: - logger.debug("Creating 2nd image for Tempest suite") - _, image_id_alt = os_utils.get_or_create_image( - CONST.__getattribute__('openstack_image_name_alt'), - GLANCE_IMAGE_PATH, - CONST.__getattribute__('openstack_image_disk_format')) - if image_id_alt is None: - raise Exception('Failed to create image') - - if (CONST.__getattribute__('tempest_use_custom_flavors') or - use_custom_flavors): - # adding alternative flavor should be trivial should we need it - logger.debug("Creating flavor for Tempest suite") - _, flavor_id = os_utils.get_or_create_flavor( - CONST.__getattribute__('openstack_flavor_name'), - CONST.__getattribute__('openstack_flavor_ram'), - CONST.__getattribute__('openstack_flavor_disk'), - CONST.__getattribute__('openstack_flavor_vcpus')) - if flavor_id is None: - raise Exception('Failed to create flavor') - - if use_custom_flavors: - logger.debug("Creating 2nd flavor for tempest_defcore") - _, flavor_id_alt = os_utils.get_or_create_flavor( - CONST.__getattribute__('openstack_flavor_name_alt'), - CONST.__getattribute__('openstack_flavor_ram'), - CONST.__getattribute__('openstack_flavor_disk'), - CONST.__getattribute__('openstack_flavor_vcpus')) - if flavor_id_alt is None: - raise Exception('Failed to create flavor') - - img_flavor_dict = {} - img_flavor_dict['image_id'] = image_id - img_flavor_dict['image_id_alt'] = image_id_alt - img_flavor_dict['flavor_id'] = flavor_id - img_flavor_dict['flavor_id_alt'] = flavor_id_alt - - return img_flavor_dict - - def get_verifier_id(): """ - Returns verifer id for current Tempest + Returns verifier id for current Tempest """ cmd = ("rally verify list-verifiers | awk '/" + CONST.__getattribute__('tempest_deployment_name') + @@ -173,7 +88,7 @@ def get_verifier_deployment_id(): def get_verifier_repo_dir(verifier_id): """ - Returns installed verfier repo directory for Tempest + Returns installed verifier repo directory for Tempest """ if not verifier_id: verifier_id = get_verifier_id() @@ -215,34 +130,27 @@ def backup_tempest_config(conf_file): """ Copy config file to tempest results directory """ - if not os.path.exists(TEMPEST_RESULTS_DIR): - os.makedirs(TEMPEST_RESULTS_DIR) - shutil.copyfile(conf_file, os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf')) -def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None, - MODE=None): +def configure_tempest(deployment_dir, image_id=None, flavor_id=None, + mode=None): """ Calls rally verify and updates the generated tempest.conf with given parameters """ conf_file = configure_verifier(deployment_dir) - configure_tempest_update_params(conf_file, - IMAGE_ID, FLAVOR_ID) - if MODE == 'feature_multisite': - configure_tempest_multisite_params(conf_file) + configure_tempest_update_params(conf_file, image_id, flavor_id) -def configure_tempest_defcore(deployment_dir, img_flavor_dict): +def configure_tempest_defcore(deployment_dir, image_id, flavor_id, + image_id_alt, flavor_id_alt, tenant_id): """ Add/update needed parameters into tempest.conf file """ conf_file = configure_verifier(deployment_dir) - configure_tempest_update_params(conf_file, - img_flavor_dict.get("image_id"), - img_flavor_dict.get("flavor_id")) + configure_tempest_update_params(conf_file, image_id, flavor_id) logger.debug("Updating selected tempest.conf parameters for defcore...") config = ConfigParser.RawConfigParser() @@ -250,14 +158,14 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict): config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir)) config.set('oslo_concurrency', 'lock_path', '{}/lock_files'.format(deployment_dir)) + generate_test_accounts_file(tenant_id=tenant_id) + config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE) config.set('scenario', 'img_dir', '{}'.format(deployment_dir)) config.set('scenario', 'img_file', 'tempest-image') - config.set('compute', 'image_ref', img_flavor_dict.get("image_id")) - config.set('compute', 'image_ref_alt', - img_flavor_dict['image_id_alt']) - config.set('compute', 'flavor_ref', img_flavor_dict.get("flavor_id")) - config.set('compute', 'flavor_ref_alt', - img_flavor_dict['flavor_id_alt']) + config.set('compute', 'image_ref', image_id) + config.set('compute', 'image_ref_alt', image_id_alt) + config.set('compute', 'flavor_ref', flavor_id) + config.set('compute', 'flavor_ref_alt', flavor_id_alt) with open(conf_file, 'wb') as config_file: config.write(config_file) @@ -268,8 +176,29 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict): shutil.copyfile(conf_file, confpath) +def generate_test_accounts_file(tenant_id): + """ + Add needed tenant and user params into test_accounts.yaml + """ + + logger.debug("Add needed params into test_accounts.yaml...") + accounts_list = [ + { + 'tenant_name': + CONST.__getattribute__('tempest_identity_tenant_name'), + 'tenant_id': str(tenant_id), + 'username': CONST.__getattribute__('tempest_identity_user_name'), + 'password': + CONST.__getattribute__('tempest_identity_user_password') + } + ] + + with open(TEST_ACCOUNTS_FILE, "w") as f: + yaml.dump(accounts_list, f, default_flow_style=False) + + def configure_tempest_update_params(tempest_conf_file, - IMAGE_ID=None, FLAVOR_ID=None): + image_id=None, flavor_id=None): """ Add/update needed parameters into tempest.conf file """ @@ -283,21 +212,15 @@ def configure_tempest_update_params(tempest_conf_file, config.set('compute', 'volume_device_name', CONST.__getattribute__('tempest_volume_device_name')) if CONST.__getattribute__('tempest_use_custom_images'): - if IMAGE_ID is not None: - config.set('compute', 'image_ref', IMAGE_ID) + if image_id is not None: + config.set('compute', 'image_ref', image_id) if IMAGE_ID_ALT is not None: config.set('compute', 'image_ref_alt', IMAGE_ID_ALT) if CONST.__getattribute__('tempest_use_custom_flavors'): - if FLAVOR_ID is not None: - config.set('compute', 'flavor_ref', FLAVOR_ID) + if flavor_id is not None: + config.set('compute', 'flavor_ref', flavor_id) if FLAVOR_ID_ALT is not None: config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT) - config.set('identity', 'tenant_name', - CONST.__getattribute__('tempest_identity_tenant_name')) - config.set('identity', 'username', - CONST.__getattribute__('tempest_identity_user_name')) - config.set('identity', 'password', - CONST.__getattribute__('tempest_identity_user_password')) config.set('identity', 'region', 'RegionOne') if os_utils.is_keystone_v3(): auth_version = 'v3' @@ -373,93 +296,3 @@ def configure_verifier(deployment_dir): % tempest_conf_file) else: return tempest_conf_file - - -def configure_tempest_multisite_params(tempest_conf_file): - """ - Add/update multisite parameters into tempest.conf file generated by Rally - """ - logger.debug("Updating multisite tempest.conf parameters...") - config = ConfigParser.RawConfigParser() - config.read(tempest_conf_file) - - config.set('service_available', 'kingbird', 'true') - # cmd = ("openstack endpoint show kingbird | grep publicurl |" - # "awk '{print $4}' | awk -F '/' '{print $4}'") - # kingbird_api_version = os.popen(cmd).read() - # kingbird_api_version = os_utils.get_endpoint(service_type='multisite') - - if CI_INSTALLER_TYPE == 'fuel': - # For MOS based setup, the service is accessible - # via bind host - kingbird_conf_path = "/etc/kingbird/kingbird.conf" - installer_type = CI_INSTALLER_TYPE - installer_ip = CI_INSTALLER_IP - installer_username = CONST.__getattribute__( - 'multisite_{}_installer_username'.format(installer_type)) - installer_password = CONST.__getattribute__( - 'multisite_{}_installer_password'.format(installer_type)) - - ssh_options = ("-o UserKnownHostsFile=/dev/null -o " - "StrictHostKeyChecking=no") - - # Get the controller IP from the fuel node - cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s ' - '\'fuel node --env 1| grep controller | grep "True\| 1" ' - '| awk -F\| "{print \$5}"\'' % (installer_password, - ssh_options, - installer_username, - installer_ip)) - multisite_controller_ip = "".join(os.popen(cmd).read().split()) - - # Login to controller and get bind host details - cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" ' - 'grep -e "^bind_" %s \\""' % (installer_password, - ssh_options, - installer_username, - installer_ip, - multisite_controller_ip, - kingbird_conf_path)) - bind_details = os.popen(cmd).read() - bind_details = "".join(bind_details.split()) - # Extract port number from the bind details - bind_port = re.findall(r"\D(\d{4})", bind_details)[0] - # Extract ip address from the bind details - bind_host = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", - bind_details)[0] - kingbird_endpoint_url = "http://%s:%s/" % (bind_host, bind_port) - else: - # cmd = "openstack endpoint show kingbird | grep publicurl |\ - # awk '{print $4}' | awk -F '/' '{print $3}'" - # kingbird_endpoint_url = os.popen(cmd).read() - kingbird_endpoint_url = os_utils.get_endpoint(service_type='kingbird') - - try: - config.add_section("kingbird") - except Exception: - logger.info('kingbird section exist') - - # set the domain id - config.set('auth', 'admin_domain_name', 'default') - - config.set('kingbird', 'endpoint_type', 'publicURL') - config.set('kingbird', 'TIME_TO_SYNC', '120') - config.set('kingbird', 'endpoint_url', kingbird_endpoint_url) - config.set('kingbird', 'api_version', 'v1.0') - with open(tempest_conf_file, 'wb') as config_file: - config.write(config_file) - - backup_tempest_config(tempest_conf_file) - - -def install_verifier_ext(path): - """ - Install extension to active verifier - """ - logger.info("Installing verifier from existing repo...") - tag = get_repo_tag(path) - cmd = ("rally verify add-verifier-ext --source {0} " - "--version {1}" - .format(path, tag)) - error_msg = ("Problem while adding verifier extension from %s" % path) - ft_utils.execute_command_raise(cmd, error_msg=error_msg) diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py index b00bc6af..c7ad4df2 100644 --- a/functest/opnfv_tests/openstack/tempest/tempest.py +++ b/functest/opnfv_tests/openstack/tempest/tempest.py @@ -12,7 +12,6 @@ from __future__ import division import logging import os -import pkg_resources import re import shutil import subprocess @@ -24,15 +23,26 @@ from functest.core import testcase from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_utils as os_utils + +from snaps.openstack import create_flavor +from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor +from snaps.openstack.create_project import ProjectSettings +from snaps.openstack.create_network import NetworkSettings, SubnetSettings +from snaps.openstack.create_user import UserSettings +from snaps.openstack.tests import openstack_tests +from snaps.openstack.utils import deploy_utils + """ logging configuration """ logger = logging.getLogger(__name__) -class TempestCommon(testcase.OSGCTestCase): +class TempestCommon(testcase.TestCase): def __init__(self, **kwargs): super(TempestCommon, self).__init__(**kwargs) + self.resources = TempestResourcesManager(**kwargs) self.MODE = "" self.OPTION = "" self.VERIFIER_ID = conf_utils.get_verifier_id() @@ -63,8 +73,6 @@ class TempestCommon(testcase.OSGCTestCase): else: if self.MODE == 'smoke': testr_mode = "smoke" - elif self.MODE == 'feature_multisite': - testr_mode = "'[Kk]ingbird'" elif self.MODE == 'full': testr_mode = "" else: @@ -187,9 +195,12 @@ class TempestCommon(testcase.OSGCTestCase): try: self.result = 100 * int(num_success) / int(num_executed) except ZeroDivisionError: - logger.error("No test has been executed") self.result = 0 - return + if int(num_tests) > 0: + logger.info("All tests have been skipped") + else: + logger.error("No test has been executed") + return with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR, "tempest.log"), 'r') as logfile: @@ -222,12 +233,12 @@ class TempestCommon(testcase.OSGCTestCase): try: if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR): os.makedirs(conf_utils.TEMPEST_RESULTS_DIR) - image_and_flavor = conf_utils.create_tempest_resources() + resources = self.resources.create() conf_utils.configure_tempest( self.DEPLOYMENT_DIR, - IMAGE_ID=image_and_flavor.get("image_id"), - FLAVOR_ID=image_and_flavor.get("flavor_id"), - MODE=self.MODE) + image_id=resources.get("image_id"), + flavor_id=resources.get("flavor_id"), + mode=self.MODE) self.generate_test_list(self.VERIFIER_REPO_DIR) self.apply_tempest_blacklist() self.run_verifier_tests() @@ -236,10 +247,49 @@ class TempestCommon(testcase.OSGCTestCase): except Exception as e: logger.error('Error with run: %s' % e) res = testcase.TestCase.EX_RUN_ERROR + finally: + self.resources.cleanup() self.stop_time = time.time() return res + def create_snapshot(self): + """ + Run the Tempest cleanup utility to initialize OS state. + + :return: TestCase.EX_OK + """ + logger.info("Initializing the saved state of the OpenStack deployment") + + if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR): + os.makedirs(conf_utils.TEMPEST_RESULTS_DIR) + + # Make sure that the verifier is configured + conf_utils.configure_verifier(self.DEPLOYMENT_DIR) + + os_utils.init_tempest_cleanup( + self.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.TEMPEST_RESULTS_DIR, + "tempest-cleanup-init.log") + ) + + return super(TempestCommon, self).create_snapshot() + + def clean(self): + """ + Run the Tempest cleanup utility to delete and destroy OS resources + created by Tempest. + """ + logger.info("Destroying the resources created for refstack") + + os_utils.perform_tempest_cleanup( + self.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.TEMPEST_RESULTS_DIR, + "tempest-cleanup.log") + ) + + return super(TempestCommon, self).clean() + class TempestSmokeSerial(TempestCommon): @@ -270,18 +320,6 @@ class TempestFullParallel(TempestCommon): self.MODE = "full" -class TempestMultisite(TempestCommon): - - def __init__(self, **kwargs): - if "case_name" not in kwargs: - kwargs["case_name"] = 'multisite' - TempestCommon.__init__(self, **kwargs) - self.MODE = "feature_multisite" - self.OPTION = "--concurrency 1" - conf_utils.install_verifier_ext( - pkg_resources.resource_filename('kingbird', '..')) - - class TempestCustom(TempestCommon): def __init__(self, **kwargs): @@ -300,3 +338,170 @@ class TempestDefcore(TempestCommon): TempestCommon.__init__(self, **kwargs) self.MODE = "defcore" self.OPTION = "--concurrency 1" + + +class TempestResourcesManager(object): + + def __init__(self, **kwargs): + self.os_creds = None + if 'os_creds' in kwargs: + self.os_creds = kwargs['os_creds'] + else: + self.os_creds = openstack_tests.get_credentials( + os_env_file=CONST.__getattribute__('openstack_creds')) + + self.creators = list() + + if hasattr(CONST, 'snaps_images_cirros'): + self.cirros_image_config = CONST.__getattribute__( + 'snaps_images_cirros') + else: + self.cirros_image_config = None + + def create(self, use_custom_images=False, use_custom_flavors=False, + create_project=False): + if create_project: + logger.debug("Creating project (tenant) for Tempest suite") + project_name = CONST.__getattribute__( + 'tempest_identity_tenant_name') + project_creator = deploy_utils.create_project( + self.os_creds, ProjectSettings( + name=project_name, + description=CONST.__getattribute__( + 'tempest_identity_tenant_description'))) + if (project_creator is None or + project_creator.get_project() is None): + raise Exception("Failed to create tenant") + project_id = project_creator.get_project().id + self.creators.append(project_creator) + + logger.debug("Creating user for Tempest suite") + user_creator = deploy_utils.create_user( + self.os_creds, UserSettings( + name=CONST.__getattribute__('tempest_identity_user_name'), + password=CONST.__getattribute__( + 'tempest_identity_user_password'), + project_name=project_name)) + if user_creator is None or user_creator.get_user() is None: + raise Exception("Failed to create user") + user_id = user_creator.get_user().id + self.creators.append(user_creator) + else: + project_name = None + project_id = None + user_id = None + + logger.debug("Creating private network for Tempest suite") + network_creator = deploy_utils.create_network( + self.os_creds, NetworkSettings( + name=CONST.__getattribute__('tempest_private_net_name'), + project_name=project_name, + subnet_settings=[SubnetSettings( + name=CONST.__getattribute__('tempest_private_subnet_name'), + cidr=CONST.__getattribute__('tempest_private_subnet_cidr')) + ])) + if network_creator is None or network_creator.get_network() is None: + raise Exception("Failed to create private network") + self.creators.append(network_creator) + + image_id = None + image_id_alt = None + flavor_id = None + flavor_id_alt = None + + if (CONST.__getattribute__('tempest_use_custom_images') or + use_custom_images): + logger.debug("Creating image for Tempest suite") + image_base_name = CONST.__getattribute__('openstack_image_name') + os_image_settings = openstack_tests.cirros_image_settings( + image_base_name, public=True, + image_metadata=self.cirros_image_config) + logger.debug("Creating image for Tempest suite") + image_creator = deploy_utils.create_image( + self.os_creds, os_image_settings) + if image_creator is None: + raise Exception('Failed to create image') + self.creators.append(image_creator) + image_id = image_creator.get_image().id + + if use_custom_images: + logger.debug("Creating 2nd image for Tempest suite") + image_base_name_alt = CONST.__getattribute__( + 'openstack_image_name_alt') + os_image_settings_alt = openstack_tests.cirros_image_settings( + image_base_name_alt, public=True, + image_metadata=self.cirros_image_config) + logger.debug("Creating 2nd image for Tempest suite") + image_creator_alt = deploy_utils.create_image( + self.os_creds, os_image_settings_alt) + if image_creator_alt is None: + raise Exception('Failed to create image') + self.creators.append(image_creator_alt) + image_id_alt = image_creator_alt.get_image().id + + if (CONST.__getattribute__('tempest_use_custom_flavors') or + use_custom_flavors): + logger.info("Creating flavor for Tempest suite") + scenario = ft_utils.get_scenario() + flavor_metadata = None + if 'ovs' in scenario or 'fdio' in scenario: + flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE + flavor_creator = OpenStackFlavor( + self.os_creds, FlavorSettings( + name=CONST.__getattribute__('openstack_flavor_name'), + ram=CONST.__getattribute__('openstack_flavor_ram'), + disk=CONST.__getattribute__('openstack_flavor_disk'), + vcpus=CONST.__getattribute__('openstack_flavor_vcpus'), + metadata=flavor_metadata)) + flavor = flavor_creator.create() + if flavor is None: + raise Exception('Failed to create flavor') + self.creators.append(flavor_creator) + flavor_id = flavor.id + + if use_custom_flavors: + logger.info("Creating 2nd flavor for Tempest suite") + scenario = ft_utils.get_scenario() + flavor_metadata_alt = None + if 'ovs' in scenario or 'fdio' in scenario: + flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE + flavor_creator_alt = OpenStackFlavor( + self.os_creds, FlavorSettings( + name=CONST.__getattribute__('openstack_flavor_name_alt'), + ram=CONST.__getattribute__('openstack_flavor_ram'), + disk=CONST.__getattribute__('openstack_flavor_disk'), + vcpus=CONST.__getattribute__('openstack_flavor_vcpus'), + metadata=flavor_metadata_alt)) + flavor_alt = flavor_creator_alt.create() + if flavor_alt is None: + raise Exception('Failed to create flavor') + self.creators.append(flavor_creator_alt) + flavor_id_alt = flavor_alt.id + + print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, " + "flavor_id: %s, flavor_id_alt: %s" % ( + image_id, image_id_alt, flavor_id, flavor_id_alt,)) + + result = { + 'image_id': image_id, + 'image_id_alt': image_id_alt, + 'flavor_id': flavor_id, + 'flavor_id_alt': flavor_id_alt + } + + if create_project: + result['project_id'] = project_id + result['tenant_id'] = project_id # for compatibility + result['user_id'] = user_id + + return result + + def cleanup(self): + """ + Cleanup all OpenStack objects. Should be called on completion. + """ + for creator in reversed(self.creators): + try: + creator.clean() + except Exception as e: + logger.error('Unexpected error cleaning - %s', e) diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py index 74fbce1b..40fcb07f 100644 --- a/functest/opnfv_tests/openstack/vping/vping_base.py +++ b/functest/opnfv_tests/openstack/vping/vping_base.py @@ -43,8 +43,14 @@ class VPingBase(testcase.TestCase): if 'os_creds' in kwargs: self.os_creds = kwargs['os_creds'] else: + creds_override = None + if hasattr(CONST, 'snaps_os_creds_override'): + creds_override = CONST.__getattribute__( + 'snaps_os_creds_override') + self.os_creds = openstack_tests.get_credentials( - os_env_file=CONST.__getattribute__('openstack_creds')) + os_env_file=CONST.__getattribute__('openstack_creds'), + overrides=creds_override) self.creators = list() self.image_creator = None @@ -102,14 +108,33 @@ class VPingBase(testcase.TestCase): 'vping_private_subnet_name') + self.guid private_subnet_cidr = CONST.__getattribute__( 'vping_private_subnet_cidr') + + vping_network_type = None + vping_physical_network = None + vping_segmentation_id = None + + if (hasattr(CONST, 'network_type')): + vping_network_type = CONST.__getattribute__( + 'vping_network_type') + if (hasattr(CONST, 'physical_network')): + vping_physical_network = CONST.__getattribute__( + 'vping_physical_network') + if (hasattr(CONST, 'segmentation_id')): + vping_segmentation_id = CONST.__getattribute__( + 'vping_segmentation_id') + self.logger.info( "Creating network with name: '%s'" % private_net_name) self.network_creator = deploy_utils.create_network( self.os_creds, - NetworkSettings(name=private_net_name, - subnet_settings=[SubnetSettings( - name=private_subnet_name, - cidr=private_subnet_cidr)])) + NetworkSettings( + name=private_net_name, + network_type=vping_network_type, + physical_network=vping_physical_network, + segmentation_id=vping_segmentation_id, + subnet_settings=[SubnetSettings( + name=private_subnet_name, + cidr=private_subnet_cidr)])) self.creators.append(self.network_creator) self.logger.info( diff --git a/functest/opnfv_tests/vnf/aaa/__init__.py b/functest/opnfv_tests/vnf/aaa/__init__.py deleted file mode 100644 index e69de29b..00000000 --- a/functest/opnfv_tests/vnf/aaa/__init__.py +++ /dev/null diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py deleted file mode 100644 index 71e3c972..00000000 --- a/functest/opnfv_tests/vnf/aaa/aaa.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2016 Orange and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 - -import logging - -import functest.core.vnf as vnf - - -class AaaVnf(vnf.VnfOnBoarding): - """AAA VNF sample""" - - logger = logging.getLogger(__name__) - - def __init__(self, **kwargs): - if "case_name" not in kwargs: - kwargs["case_name"] = "aaa" - super(AaaVnf, self).__init__(**kwargs) - - def deploy_orchestrator(self): - self.logger.info("No VNFM needed to deploy a free radius here") - return True - - def deploy_vnf(self): - self.logger.info("Freeradius VNF deployment") - # find a way to deploy freeradius and tester (heat,manual, ..) - deploy_vnf = {'status': 'PASS', 'version': 'xxxx'} - self.details['deploy_vnf'] = deploy_vnf - return True - - def test_vnf(self): - self.logger.info("Run test towards freeradius") - # once the freeradius is deployed..make some tests - test_vnf = {'status': 'PASS', 'version': 'xxxx'} - self.details['test_vnf'] = test_vnf - return True diff --git a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py index 5a5c12be..8851f7a4 100644 --- a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py +++ b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py @@ -10,7 +10,9 @@ import json import logging import os import pkg_resources +import shlex import shutil +import subprocess import time import requests @@ -109,19 +111,17 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): bono_ip=None, ellis_ip=None, signup_code='secret'): self.logger.info('Run Clearwater live test') - nameservers = ft_utils.get_resolvconf_ns() - resolvconf = ['{0}{1}{2}'.format(os.linesep, 'nameserver ', ns) - for ns in nameservers] - self.logger.debug('resolvconf: %s', resolvconf) dns_file = '/etc/resolv.conf' dns_file_bak = '/etc/resolv.conf.bak' + self.logger.debug('Backup %s -> %s', dns_file, dns_file_bak) shutil.copy(dns_file, dns_file_bak) - script = ('echo -e "nameserver {0}{1}" > {2};' - 'source /etc/profile.d/rvm.sh;' - 'cd {3};' - 'rake test[{4}] SIGNUP_CODE={5}' - .format(dns_ip, - ''.join(resolvconf), + cmd = ("dnsmasq -d -u root --server=/clearwater.opnfv/{0} " + "-r /etc/resolv.conf.bak".format(dns_ip)) + dnsmasq_process = subprocess.Popen(shlex.split(cmd)) + script = ('echo -e "nameserver {0}" > {1};' + 'cd {2};' + 'rake test[{3}] SIGNUP_CODE={4}' + .format('127.0.0.1', dns_file, self.test_dir, public_domain, @@ -136,7 +136,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): ft_utils.execute_command(cmd, error_msg='Clearwater live test failed', output_file=output_file) - + dnsmasq_process.kill() with open(dns_file_bak, 'r') as bak_file: result = bak_file.read() with open(dns_file, 'w') as f: diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py index 8f6fcec8..b07eaee2 100644 --- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py +++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py @@ -110,15 +110,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): # needs some images self.__logger.info("Upload some OS images if it doesn't exist") - for image_name, image_url in self.images.iteritems(): - self.__logger.info("image: %s, url: %s", image_name, image_url) - if image_url and image_name: + for image_name, image_file in self.images.iteritems(): + self.__logger.info("image: %s, file: %s", image_name, image_file) + if image_file and image_name: image_creator = OpenStackImage( self.snaps_creds, ImageSettings(name=image_name, image_user='cloud', img_format='qcow2', - url=image_url)) + image_file=image_file)) image_creator.create() # self.created_object.append(image_creator) diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml index 743c6ddd..280e0a6b 100644 --- a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml +++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml @@ -1,6 +1,6 @@ tenant_images: - ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img - cloudify_manager_4.0: http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 + ubuntu_14.04: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img + cloudify_manager_4.0: /home/opnfv/functest/images/cloudify-manager-premium-4.0.1.qcow2 orchestrator: name: cloudify version: '4.0' diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py deleted file mode 100644 index d420705a..00000000 --- a/functest/opnfv_tests/vnf/ims/opera_ims.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 - -import json -import logging -import os -import time - -from opera import openo_connect -import requests - -import functest.opnfv_tests.vnf.ims.clearwater_ims_base as clearwater_ims_base -from functest.utils.constants import CONST - - -class OperaIms(clearwater_ims_base.ClearwaterOnBoardingBase): - - def __init__(self, **kwargs): - if "case_name" not in kwargs: - kwargs["case_name"] = "opera_ims" - super(OperaIms, self).__init__(**kwargs) - self.logger = logging.getLogger(__name__) - self.ellis_file = os.path.join( - CONST.__getattribute__('dir_results'), 'ellis.info') - self.live_test_file = os.path.join( - CONST.__getattribute__('dir_results'), 'live_test_report.json') - try: - self.openo_msb_endpoint = os.environ['OPENO_MSB_ENDPOINT'] - except KeyError: - raise Exception('OPENO_MSB_ENDPOINT is not specified,' - ' put it as <OPEN-O ip>:<port>') - else: - self.logger.info('OPEN-O endpoint is: %s', self.openo_msb_endpoint) - - def prepare(self): - pass - - def clean(self): - pass - - def deploy_vnf(self): - try: - openo_connect.create_service(self.openo_msb_endpoint, - 'functest_opera', - 'VNF for functest testing') - except Exception as e: - self.logger.error(e) - return {'status': 'FAIL', 'result': e} - else: - self.logger.info('vIMS deployment is kicked off') - return {'status': 'PASS', 'result': ''} - - def dump_info(self, info_file, result): - with open(info_file, 'w') as f: - self.logger.debug('Save information to file: %s', info_file) - json.dump(result, f) - - def test_vnf(self): - vnfm_ip = openo_connect.get_vnfm_ip(self.openo_msb_endpoint) - self.logger.info('VNFM IP: %s', vnfm_ip) - vnf_status_url = 'http://{0}:5000/api/v1/model/status'.format(vnfm_ip) - vnf_alive = False - retry = 40 - - self.logger.info('Check the VNF status') - while retry > 0: - rq = requests.get(vnf_status_url, timeout=90) - response = rq.json() - vnf_alive = response['vnf_alive'] - msg = response['msg'] - self.logger.info(msg) - if vnf_alive: - break - self.logger.info('check again in one and half a minute...') - retry = retry - 1 - time.sleep(90) - - if not vnf_alive: - raise Exception('VNF failed to start: {0}'.format(msg)) - - ellis_config_url = ('http://{0}:5000/api/v1/model/ellis/configure' - .format(vnfm_ip)) - rq = requests.get(ellis_config_url, timeout=90) - if rq.json() and not rq.json()['ellis_ok']: - self.logger.error(rq.json()['data']) - raise Exception('Failed to configure Ellis') - - self.logger.info('Get Clearwater deployment detail') - vnf_info_url = ('http://{0}:5000/api/v1/model/output' - .format(vnfm_ip)) - rq = requests.get(vnf_info_url, timeout=90) - data = rq.json()['data'] - self.logger.info(data) - bono_ip = data['bono_ip'] - ellis_ip = data['ellis_ip'] - dns_ip = data['dns_ip'] - result = self.config_ellis(ellis_ip, 'signup', True) - self.logger.debug('Ellis Result: %s', result) - self.dump_info(self.ellis_file, result) - - if dns_ip: - vims_test_result = self.run_clearwater_live_test( - dns_ip, - 'clearwater.local', - bono_ip, - ellis_ip, - 'signup') - if vims_test_result != '': - self.dump_info(self.live_test_file, vims_test_result) - return {'status': 'PASS', 'result': vims_test_result} - else: - return {'status': 'FAIL', 'result': ''} - - def main(self, **kwargs): - self.logger.info("Start to run Opera vIMS VNF onboarding test") - self.execute() - self.logger.info("Opera vIMS VNF onboarding test finished") - if self.result is "PASS": - return self.EX_OK - else: - return self.EX_RUN_ERROR - - def run(self): - kwargs = {} - return self.main(**kwargs) diff --git a/functest/opnfv_tests/vnf/ims/orchestra.yaml b/functest/opnfv_tests/vnf/ims/orchestra.yaml new file mode 100644 index 00000000..4cd18e72 --- /dev/null +++ b/functest/opnfv_tests/vnf/ims/orchestra.yaml @@ -0,0 +1,61 @@ +tenant_images: + orchestrator: + ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img + orchestra_openims: + openims: /home/opnfv/functest/images/img + orchestra_clearwaterims: + ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img +mano: + name: OpenBaton + version: '3.2.0' + requirements: + flavor: + name: openbaton + ram_min: 4096 + disk: 5 + vcpus: 2 + image: 'ubuntu-14.04-server-cloudimg-amd64-disk1' + bootstrap: + url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap + config: + url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file + gvnfm: + userdata: + url: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh + credentials: + username: admin + password: openbaton + +orchestra_openims: + name: OpenIMS + descriptor: + url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json + requirements: + flavor: + name: m1.small + ram_min: 2048 + disk: 5 + vcpus: 2 + test: + scscf: + ports: [3870, 6060] + pcscf: + ports: [4060] + icscf: + ports: [3869, 5060] + fhoss: + ports: [3868] + bind9: + ports: [] + +orchestra_clearwaterims: + name: Clearwater IMS + descriptor: + url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/ClearwaterIMS/3.2.0/json + requirements: + flavor: + name: m1.small + ram_min: 2048 + disk: 5 + vcpus: 2 + test: diff --git a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py new file mode 100644 index 00000000..a5405996 --- /dev/null +++ b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py @@ -0,0 +1,682 @@ +#!/usr/bin/env python + +# Copyright (c) 2016 Orange and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +"""Orchestra Clearwater IMS testcase implementation.""" + +import json +import logging +import os +import socket +import time +import pkg_resources +import yaml + +from snaps.openstack.create_image import OpenStackImage, ImageSettings +from snaps.openstack.create_flavor import OpenStackFlavor, FlavorSettings +from snaps.openstack.create_security_group import ( + OpenStackSecurityGroup, + SecurityGroupSettings, + SecurityGroupRuleSettings, + Direction, + Protocol) +from snaps.openstack.create_network import ( + OpenStackNetwork, + NetworkSettings, + SubnetSettings, + PortSettings) +from snaps.openstack.create_router import OpenStackRouter, RouterSettings +from snaps.openstack.os_credentials import OSCreds +from snaps.openstack.create_instance import ( + VmInstanceSettings, + OpenStackVmInstance) +from functest.opnfv_tests.openstack.snaps import snaps_utils + +import functest.core.vnf as vnf +import functest.utils.openstack_utils as os_utils +from functest.utils.constants import CONST + +from org.openbaton.cli.errors.errors import NfvoException +from org.openbaton.cli.agents.agents import MainAgent + + +__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>" +# ---------------------------------------------------------- +# +# UTILS +# +# ----------------------------------------------------------- + + +def get_config(parameter, file_path): + """ + Get config parameter. + + Returns the value of a given parameter in file.yaml + parameter must be given in string format with dots + Example: general.openstack.image_name + """ + with open(file_path) as config_file: + file_yaml = yaml.safe_load(config_file) + config_file.close() + value = file_yaml + for element in parameter.split("."): + value = value.get(element) + if value is None: + raise ValueError("The parameter %s is not defined in" + " reporting.yaml", parameter) + return value + + +def servertest(host, port): + """Method to test that a server is reachable at IP:port""" + args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + for family, socktype, proto, canonname, sockaddr in args: + sock = socket.socket(family, socktype, proto) + try: + sock.connect(sockaddr) + except socket.error: + return False + else: + sock.close() + return True + + +def get_userdata(orchestrator=dict): + """Build userdata for Open Baton machine""" + userdata = "#!/bin/bash\n" + userdata += "echo \"Executing userdata...\"\n" + userdata += "set -x\n" + userdata += "set -e\n" + userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n" + userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n" + userdata += "echo \"Install curl...\"\n" + userdata += "apt-get install curl\n" + userdata += "echo \"Inject public key...\"\n" + userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3" + "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc" + "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv" + "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S" + "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O" + "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc" + "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya" + "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut" + "horized_keys\n") + userdata += "echo \"Download bootstrap...\"\n" + userdata += ("curl -s %s " + "> ./bootstrap\n" % orchestrator['bootstrap']['url']) + userdata += ("curl -s %s" "> ./config_file\n" % + orchestrator['bootstrap']['config']['url']) + userdata += ("echo \"Disable usage of mysql...\"\n") + userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n" + userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n" + % orchestrator['details']['fip'].ip) + userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip" + "=%s/g /config_file\n" % orchestrator['details']['fip'].ip) + userdata += "echo \"Set autostart of components to 'false'\"\n" + userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n" + userdata += "echo \"Execute bootstrap...\"\n" + bootstrap = "sh ./bootstrap release -configFile=./config_file" + userdata += bootstrap + "\n" + userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n" + userdata += ("echo \"nfvo.plugin.timeout=600000\" >> " + "/etc/openbaton/openbaton-nfvo.properties\n") + userdata += ( + "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" % + orchestrator['gvnfm']['userdata']['url']) + userdata += "sed -i '113i"'\ \ \ \ '"sleep 60' " \ + "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" + userdata += "echo \"Starting NFVO\"\n" + userdata += "service openbaton-nfvo restart\n" + userdata += "echo \"Starting Generic VNFM\"\n" + userdata += "service openbaton-vnfm-generic restart\n" + userdata += "echo \"...end of userdata...\"\n" + return userdata + + +class ClearwaterImsVnf(vnf.VnfOnBoarding): + """Clearwater IMS VNF deployed with openBaton orchestrator""" + + logger = logging.getLogger(__name__) + + def __init__(self, **kwargs): + if "case_name" not in kwargs: + kwargs["case_name"] = "orchestra_clearwaterims" + super(ClearwaterImsVnf, self).__init__(**kwargs) + # self.logger = logging.getLogger("functest.ci.run_tests.orchestra") + self.logger.info("kwargs %s", (kwargs)) + + self.case_dir = pkg_resources.resource_filename( + 'functest', 'opnfv_tests/vnf/ims/') + self.data_dir = CONST.__getattribute__('dir_ims_data') + self.test_dir = CONST.__getattribute__('dir_repo_vims_test') + self.created_resources = [] + self.logger.info("%s VNF onboarding test starting", self.case_name) + + try: + self.config = CONST.__getattribute__( + 'vnf_{}_config'.format(self.case_name)) + except BaseException: + raise Exception("Orchestra VNF config file not found") + config_file = self.case_dir + self.config + + self.mano = dict( + get_config("mano", config_file), + details={} + ) + self.logger.debug("Orchestrator configuration %s", self.mano) + + self.details['orchestrator'] = dict( + name=self.mano['name'], + version=self.mano['version'], + status='ERROR', + result='' + ) + + self.vnf = dict( + get_config(self.case_name, config_file), + ) + self.logger.debug("VNF configuration: %s", self.vnf) + + self.details['vnf'] = dict( + name=self.vnf['name'], + ) + + self.details['test_vnf'] = dict( + name=self.case_name, + ) + + # Orchestra base Data directory creation + if not os.path.exists(self.data_dir): + os.makedirs(self.data_dir) + + self.images = get_config("tenant_images.orchestrator", config_file) + self.images.update( + get_config( + "tenant_images.%s" % + self.case_name, + config_file)) + self.snaps_creds = None + + def prepare(self): + """Prepare testscase (Additional pre-configuration steps).""" + super(ClearwaterImsVnf, self).prepare() + + self.logger.info("Additional pre-configuration steps") + self.logger.info("creds %s", (self.creds)) + + self.snaps_creds = OSCreds( + username=self.creds['username'], + password=self.creds['password'], + auth_url=self.creds['auth_url'], + project_name=self.creds['tenant'], + identity_api_version=int(os_utils.get_keystone_client_version())) + + self.prepare_images() + self.prepare_flavor() + self.prepare_security_groups() + self.prepare_network() + self.prepare_floating_ip() + + def prepare_images(self): + """Upload images if they doen't exist yet""" + self.logger.info("Upload images if they doen't exist yet") + for image_name, image_file in self.images.iteritems(): + self.logger.info("image: %s, file: %s", image_name, image_file) + if image_file and image_name: + image = OpenStackImage( + self.snaps_creds, + ImageSettings(name=image_name, + image_user='cloud', + img_format='qcow2', + image_file=image_file)) + image.create() + # self.created_resources.append(image); + + def prepare_security_groups(self): + """Create Open Baton security group if it doesn't exist yet""" + self.logger.info( + "Creating security group for Open Baton if not yet existing...") + sg_rules = list() + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.tcp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.tcp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.udp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.udp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.icmp)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.icmp)) + # sg_rules.append( + # SecurityGroupRuleSettings( + # sec_grp_name="orchestra-sec-group-allowall", + # direction=Direction.ingress, + # protocol=Protocol.icmp, + # port_range_min=-1, + # port_range_max=-1)) + # sg_rules.append( + # SecurityGroupRuleSettings( + # sec_grp_name="orchestra-sec-group-allowall", + # direction=Direction.egress, + # protocol=Protocol.icmp, + # port_range_min=-1, + # port_range_max=-1)) + + security_group = OpenStackSecurityGroup( + self.snaps_creds, + SecurityGroupSettings( + name="orchestra-sec-group-allowall", + rule_settings=sg_rules)) + + security_group_info = security_group.create() + self.created_resources.append(security_group) + self.mano['details']['sec_group'] = security_group_info.name + self.logger.info( + "Security group orchestra-sec-group-allowall prepared") + + def prepare_flavor(self): + """Create Open Baton flavor if it doesn't exist yet""" + self.logger.info( + "Create Flavor for Open Baton NFVO if not yet existing") + + flavor_settings = FlavorSettings( + name=self.mano['requirements']['flavor']['name'], + ram=self.mano['requirements']['flavor']['ram_min'], + disk=self.mano['requirements']['flavor']['disk'], + vcpus=self.mano['requirements']['flavor']['vcpus']) + flavor = OpenStackFlavor(self.snaps_creds, flavor_settings) + flavor_info = flavor.create() + self.created_resources.append(flavor) + self.mano['details']['flavor'] = {} + self.mano['details']['flavor']['name'] = flavor_settings.name + self.mano['details']['flavor']['id'] = flavor_info.id + + def prepare_network(self): + """Create network/subnet/router if they doen't exist yet""" + self.logger.info( + "Creating network/subnet/router if they doen't exist yet...") + subnet_settings = SubnetSettings( + name='%s_subnet' % + self.case_name, + cidr="192.168.100.0/24") + network_settings = NetworkSettings( + name='%s_net' % + self.case_name, + subnet_settings=[subnet_settings]) + orchestra_network = OpenStackNetwork( + self.snaps_creds, network_settings) + orchestra_network_info = orchestra_network.create() + self.mano['details']['network'] = {} + self.mano['details']['network']['id'] = orchestra_network_info.id + self.mano['details']['network']['name'] = orchestra_network_info.name + self.mano['details']['external_net_name'] = snaps_utils.\ + get_ext_net_name(self.snaps_creds) + self.created_resources.append(orchestra_network) + orchestra_router = OpenStackRouter( + self.snaps_creds, + RouterSettings( + name='%s_router' % + self.case_name, + external_gateway=self.mano['details']['external_net_name'], + internal_subnets=[ + subnet_settings.name])) + orchestra_router.create() + self.created_resources.append(orchestra_router) + self.logger.info("Created network and router for Open Baton NFVO...") + + def prepare_floating_ip(self): + """Select/Create Floating IP if it doesn't exist yet""" + self.logger.info("Retrieving floating IP for Open Baton NFVO") + neutron_client = snaps_utils.neutron_utils.neutron_client( + self.snaps_creds) + # Finding Tenant ID to check to which tenant the Floating IP belongs + tenant_id = os_utils.get_tenant_id( + os_utils.get_keystone_client(self.creds), + self.tenant_name) + # Use os_utils to retrieve complete information of Floating IPs + floating_ips = os_utils.get_floating_ips(neutron_client) + my_floating_ips = [] + # Filter Floating IPs with tenant id + for floating_ip in floating_ips: + # self.logger.info("Floating IP: %s", floating_ip) + if floating_ip.get('tenant_id') == tenant_id: + my_floating_ips.append(floating_ip.get('floating_ip_address')) + # Select if Floating IP exist else create new one + if len(my_floating_ips) >= 1: + # Get Floating IP object from snaps for clean up + snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips( + neutron_client) + for my_floating_ip in my_floating_ips: + for snaps_floating_ip in snaps_floating_ips: + if snaps_floating_ip.ip == my_floating_ip: + self.mano['details']['fip'] = snaps_floating_ip + self.logger.info( + "Selected floating IP for Open Baton NFVO %s", + (self.mano['details']['fip'].ip)) + break + if self.mano['details']['fip'] is not None: + break + else: + self.logger.info("Creating floating IP for Open Baton NFVO") + self.mano['details']['fip'] = snaps_utils.neutron_utils.\ + create_floating_ip( + neutron_client, + self.mano['details']['external_net_name']) + self.logger.info( + "Created floating IP for Open Baton NFVO %s", + (self.mano['details']['fip'].ip)) + + def get_vim_descriptor(self): + """"Create VIM descriptor to be used for onboarding""" + self.logger.info( + "Building VIM descriptor with PoP creds: %s", + self.creds) + # Depending on API version either tenant ID or project name must be + # used + if os_utils.is_keystone_v3(): + self.logger.info( + "Using v3 API of OpenStack... -> Using OS_PROJECT_ID") + project_id = os_utils.get_tenant_id( + os_utils.get_keystone_client(), + self.creds.get("project_name")) + else: + self.logger.info( + "Using v2 API of OpenStack... -> Using OS_TENANT_NAME") + project_id = self.creds.get("tenant_name") + self.logger.debug("VIM project/tenant id: %s", project_id) + vim_json = { + "name": "vim-instance", + "authUrl": self.creds.get("auth_url"), + "tenant": project_id, + "username": self.creds.get("username"), + "password": self.creds.get("password"), + "securityGroups": [ + self.mano['details']['sec_group'] + ], + "type": "openstack", + "location": { + "name": "opnfv", + "latitude": "52.525876", + "longitude": "13.314400" + } + } + self.logger.info("Built VIM descriptor: %s", vim_json) + return vim_json + + def deploy_orchestrator(self): + self.logger.info("Deploying Open Baton...") + self.logger.info("Details: %s", self.mano['details']) + start_time = time.time() + + self.logger.info("Creating orchestra instance...") + userdata = get_userdata(self.mano) + self.logger.info("flavor: %s\n" + "image: %s\n" + "network_id: %s\n", + self.mano['details']['flavor']['name'], + self.mano['requirements']['image'], + self.mano['details']['network']['id']) + self.logger.debug("userdata: %s\n", userdata) + # setting up image + image_settings = ImageSettings( + name=self.mano['requirements']['image'], + image_user='ubuntu', + exists=True) + # setting up port + port_settings = PortSettings( + name='%s_port' % self.case_name, + network_name=self.mano['details']['network']['name']) + # build configuration of vm + orchestra_settings = VmInstanceSettings( + name=self.case_name, + flavor=self.mano['details']['flavor']['name'], + port_settings=[port_settings], + security_group_names=[self.mano['details']['sec_group']], + userdata=userdata) + orchestra_vm = OpenStackVmInstance(self.snaps_creds, + orchestra_settings, + image_settings) + + orchestra_vm.create() + self.created_resources.append(orchestra_vm) + self.mano['details']['id'] = orchestra_vm.get_vm_info()['id'] + self.logger.info( + "Created orchestra instance: %s", + self.mano['details']['id']) + + self.logger.info("Associating floating ip: '%s' to VM '%s' ", + self.mano['details']['fip'].ip, + self.case_name) + nova_client = os_utils.get_nova_client() + if not os_utils.add_floating_ip( + nova_client, + self.mano['details']['id'], + self.mano['details']['fip'].ip): + duration = time.time() - start_time + self.details["orchestrator"].update( + status='FAIL', duration=duration) + self.logger.error("Cannot associate floating IP to VM.") + return False + + self.logger.info("Waiting for Open Baton NFVO to be up and running...") + timeout = 0 + while timeout < 200: + if servertest( + self.mano['details']['fip'].ip, + "8080"): + break + else: + self.logger.info( + "Open Baton NFVO is not started yet (%ss)", + (timeout * 5)) + time.sleep(5) + timeout += 1 + + if timeout >= 200: + duration = time.time() - start_time + self.details["orchestrator"].update( + status='FAIL', duration=duration) + self.logger.error("Open Baton is not started correctly") + return False + + self.logger.info("Waiting for all components to be up and running...") + time.sleep(60) + duration = time.time() - start_time + self.details["orchestrator"].update(status='PASS', duration=duration) + self.logger.info("Deploy Open Baton NFVO: OK") + return True + + def deploy_vnf(self): + start_time = time.time() + self.logger.info("Deploying %s...", self.vnf['name']) + + main_agent = MainAgent( + nfvo_ip=self.mano['details']['fip'].ip, + nfvo_port=8080, + https=False, + version=1, + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) + + self.logger.info( + "Create %s Flavor if not existing", self.vnf['name']) + flavor_settings = FlavorSettings( + name=self.vnf['requirements']['flavor']['name'], + ram=self.vnf['requirements']['flavor']['ram_min'], + disk=self.vnf['requirements']['flavor']['disk'], + vcpus=self.vnf['requirements']['flavor']['vcpus']) + flavor = OpenStackFlavor(self.snaps_creds, flavor_settings) + flavor_info = flavor.create() + self.logger.debug("Flavor id: %s", flavor_info.id) + + self.logger.info("Getting project 'default'...") + project_agent = main_agent.get_agent("project", "") + for project in json.loads(project_agent.find()): + if project.get("name") == "default": + self.mano['details']['project_id'] = project.get("id") + self.logger.info("Found project 'default': %s", project) + break + + vim_json = self.get_vim_descriptor() + self.logger.info("Registering VIM: %s", vim_json) + + main_agent.get_agent( + "vim", project_id=self.mano['details']['project_id']).create( + entity=json.dumps(vim_json)) + + market_agent = main_agent.get_agent( + "market", project_id=self.mano['details']['project_id']) + + try: + self.logger.info("sending: %s", self.vnf['descriptor']['url']) + nsd = market_agent.create(entity=self.vnf['descriptor']['url']) + if nsd.get('id') is None: + self.logger.error("NSD not onboarded correctly") + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + self.mano['details']['nsd_id'] = nsd.get('id') + self.logger.info("Onboarded NSD: " + nsd.get("name")) + + nsr_agent = main_agent.get_agent( + "nsr", project_id=self.mano['details']['project_id']) + + self.mano['details']['nsr'] = nsr_agent.create( + self.mano['details']['nsd_id']) + except NfvoException as exc: + self.logger.error(exc.message) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + + if self.mano['details']['nsr'].get('code') is not None: + self.logger.error( + "%s cannot be deployed: %s -> %s", + self.vnf['name'], + self.mano['details']['nsr'].get('code'), + self.mano['details']['nsr'].get('message')) + self.logger.error("%s cannot be deployed", self.vnf['name']) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + + timeout = 0 + self.logger.info("Waiting for NSR to go to ACTIVE...") + while self.mano['details']['nsr'].get("status") != 'ACTIVE' \ + and self.mano['details']['nsr'].get("status") != 'ERROR': + timeout += 1 + self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout) + if timeout == 300: + self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + time.sleep(5) + self.mano['details']['nsr'] = json.loads( + nsr_agent.find(self.mano['details']['nsr'].get('id'))) + + duration = time.time() - start_time + if self.mano['details']['nsr'].get("status") == 'ACTIVE': + self.details["vnf"].update(status='PASS', duration=duration) + self.logger.info("Sleep for 60s to ensure that all " + "services are up and running...") + time.sleep(60) + result = True + else: + self.details["vnf"].update(status='FAIL', duration=duration) + self.logger.error("NSR: %s", self.mano['details'].get('nsr')) + result = False + return result + + def test_vnf(self): + self.logger.info( + "Testing VNF Clearwater IMS is not yet implemented...") + start_time = time.time() + + duration = time.time() - start_time + self.details["test_vnf"].update(status='PASS', duration=duration) + self.logger.info("Test VNF: OK") + return True + + def clean(self): + self.logger.info("Cleaning %s...", self.case_name) + try: + main_agent = MainAgent( + nfvo_ip=self.mano['details']['fip'].ip, + nfvo_port=8080, + https=False, + version=1, + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) + self.logger.info("Terminating %s...", self.vnf['name']) + if (self.mano['details'].get('nsr')): + main_agent.get_agent( + "nsr", + project_id=self.mano['details']['project_id']).delete( + self.mano['details']['nsr'].get('id')) + self.logger.info("Sleeping 60 seconds...") + time.sleep(60) + else: + self.logger.info("No need to terminate the VNF...") + # os_utils.delete_instance(nova_client=os_utils.get_nova_client(), + # instance_id=self.mano_instance_id) + except (NfvoException, KeyError) as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + + try: + neutron_client = os_utils.get_neutron_client(self.creds) + self.logger.info("Deleting Open Baton Port...") + port = snaps_utils.neutron_utils.get_port_by_name( + neutron_client, '%s_port' % self.case_name) + snaps_utils.neutron_utils.delete_port(neutron_client, port) + time.sleep(10) + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + try: + self.logger.info("Deleting Open Baton Floating IP...") + snaps_utils.neutron_utils.delete_floating_ip( + neutron_client, self.mano['details']['fip']) + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + + for resource in reversed(self.created_resources): + try: + self.logger.info("Cleaning %s", str(resource)) + resource.clean() + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + super(ClearwaterImsVnf, self).clean() diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml b/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml deleted file mode 100644 index 9deb11c7..00000000 --- a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml +++ /dev/null @@ -1,45 +0,0 @@ -tenant_images: - ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img - openims: http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img -orchestrator: - name: openbaton - version: '3.2.0' - requirements: - flavor: - name: orchestra - ram_min: 4096 - disk: 5 - vcpus: 2 - os_image: 'ubuntu_14.04' - bootstrap: - url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap - config: - url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file - gvnfm: - userdata: - url: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh - credentials: - username: admin - password: openbaton -vnf: - name: openims - descriptor: - url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json - requirements: - flavor: - name: m1.small - ram_min: 2048 - disk: 5 - vcpus: 2 - -vIMS: - scscf: - ports: [3870, 6060] - pcscf: - ports: [4060] - icscf: - ports: [3869, 5060] - fhoss: - ports: [3868] - bind9: - ports: []
\ No newline at end of file diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_openims.py index 0a0a766a..f8acada4 100644 --- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py +++ b/functest/opnfv_tests/vnf/ims/orchestra_openims.py @@ -139,16 +139,16 @@ def get_userdata(orchestrator=dict): return userdata -class ImsVnf(vnf.VnfOnBoarding): +class OpenImsVnf(vnf.VnfOnBoarding): """OpenIMS VNF deployed with openBaton orchestrator""" - # logger = logging.getLogger(__name__) + logger = logging.getLogger(__name__) def __init__(self, **kwargs): if "case_name" not in kwargs: - kwargs["case_name"] = "orchestra_ims" - super(ImsVnf, self).__init__(**kwargs) - self.logger = logging.getLogger("functest.ci.run_tests.orchestra") + kwargs["case_name"] = "orchestra_openims" + super(OpenImsVnf, self).__init__(**kwargs) + # self.logger = logging.getLogger("functest.ci.run_tests.orchestra") self.logger.info("kwargs %s", (kwargs)) self.case_dir = pkg_resources.resource_filename( @@ -156,7 +156,7 @@ class ImsVnf(vnf.VnfOnBoarding): self.data_dir = CONST.__getattribute__('dir_ims_data') self.test_dir = CONST.__getattribute__('dir_repo_vims_test') self.created_resources = [] - self.logger.info("Orchestra IMS VNF onboarding test starting") + self.logger.info("%s VNF onboarding test starting", self.case_name) try: self.config = CONST.__getattribute__( @@ -165,48 +165,44 @@ class ImsVnf(vnf.VnfOnBoarding): raise Exception("Orchestra VNF config file not found") config_file = self.case_dir + self.config - self.baton = dict( - requirements=get_config("orchestrator.requirements", config_file), - credentials=get_config("orchestrator.credentials", config_file), - bootstrap=get_config("orchestrator.bootstrap", config_file), - gvnfm=get_config("orchestrator.gvnfm", config_file), + self.mano = dict( + get_config("mano", config_file), + details={} ) - self.logger.debug("Orchestrator configuration %s", self.baton) + self.logger.debug("Orchestrator configuration %s", self.mano) self.details['orchestrator'] = dict( - name=get_config("orchestrator.name", config_file), - version=get_config("orchestrator.version", config_file), + name=self.mano['name'], + version=self.mano['version'], status='ERROR', result='' ) - self.baton['details'] = {} - self.baton['details']['image'] = self.baton['requirements']['os_image'] - self.baton['details']['name'] = self.details['orchestrator']['name'] self.vnf = dict( - descriptor=get_config("vnf.descriptor", config_file), - requirements=get_config("vnf.requirements", config_file) + get_config(self.case_name, config_file), ) + self.logger.debug("VNF configuration: %s", self.vnf) + self.details['vnf'] = dict( - name=get_config("vnf.name", config_file), + name=self.vnf['name'], ) - self.logger.debug("VNF configuration: %s", self.vnf) self.details['test_vnf'] = dict( - name="openims-test", + name=self.case_name, ) - # vIMS Data directory creation + # Orchestra base Data directory creation if not os.path.exists(self.data_dir): os.makedirs(self.data_dir) - self.images = get_config("tenant_images", config_file) - self.ims_conf = get_config("vIMS", config_file) + self.images = get_config("tenant_images.orchestrator", config_file) + self.images.update(get_config("tenant_images.%s" % + self.case_name, config_file)) self.snaps_creds = None def prepare(self): """Prepare testscase (Additional pre-configuration steps).""" - super(ImsVnf, self).prepare() + super(OpenImsVnf, self).prepare() self.logger.info("Additional pre-configuration steps") self.logger.info("creds %s", (self.creds)) @@ -227,15 +223,15 @@ class ImsVnf(vnf.VnfOnBoarding): def prepare_images(self): """Upload images if they doen't exist yet""" self.logger.info("Upload images if they doen't exist yet") - for image_name, image_url in self.images.iteritems(): - self.logger.info("image: %s, url: %s", image_name, image_url) - if image_url and image_name: + for image_name, image_file in self.images.iteritems(): + self.logger.info("image: %s, file: %s", image_name, image_file) + if image_file and image_name: image = OpenStackImage( self.snaps_creds, ImageSettings(name=image_name, image_user='cloud', img_format='qcow2', - url=image_url)) + image_file=image_file)) image.create() # self.created_resources.append(image); @@ -272,6 +268,16 @@ class ImsVnf(vnf.VnfOnBoarding): protocol=Protocol.udp, port_range_min=1, port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.icmp)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.icmp)) # sg_rules.append( # SecurityGroupRuleSettings( # sec_grp_name="orchestra-sec-group-allowall", @@ -295,7 +301,7 @@ class ImsVnf(vnf.VnfOnBoarding): security_group_info = security_group.create() self.created_resources.append(security_group) - self.baton['details']['sec_group'] = security_group_info.name + self.mano['details']['sec_group'] = security_group_info.name self.logger.info( "Security group orchestra-sec-group-allowall prepared") @@ -305,16 +311,16 @@ class ImsVnf(vnf.VnfOnBoarding): "Create Flavor for Open Baton NFVO if not yet existing") flavor_settings = FlavorSettings( - name=self.baton['requirements']['flavor']['name'], - ram=self.baton['requirements']['flavor']['ram_min'], - disk=self.baton['requirements']['flavor']['disk'], - vcpus=self.baton['requirements']['flavor']['vcpus']) + name=self.mano['requirements']['flavor']['name'], + ram=self.mano['requirements']['flavor']['ram_min'], + disk=self.mano['requirements']['flavor']['disk'], + vcpus=self.mano['requirements']['flavor']['vcpus']) flavor = OpenStackFlavor(self.snaps_creds, flavor_settings) flavor_info = flavor.create() self.created_resources.append(flavor) - self.baton['details']['flavor'] = {} - self.baton['details']['flavor']['name'] = flavor_settings.name - self.baton['details']['flavor']['id'] = flavor_info.id + self.mano['details']['flavor'] = {} + self.mano['details']['flavor']['name'] = flavor_settings.name + self.mano['details']['flavor']['id'] = flavor_info.id def prepare_network(self): """Create network/subnet/router if they doen't exist yet""" @@ -322,27 +328,27 @@ class ImsVnf(vnf.VnfOnBoarding): "Creating network/subnet/router if they doen't exist yet...") subnet_settings = SubnetSettings( name='%s_subnet' % - self.baton['details']['name'], + self.case_name, cidr="192.168.100.0/24") network_settings = NetworkSettings( name='%s_net' % - self.baton['details']['name'], + self.case_name, subnet_settings=[subnet_settings]) orchestra_network = OpenStackNetwork( self.snaps_creds, network_settings) orchestra_network_info = orchestra_network.create() - self.baton['details']['network'] = {} - self.baton['details']['network']['id'] = orchestra_network_info.id - self.baton['details']['network']['name'] = orchestra_network_info.name - self.baton['details']['external_net_name'] = \ + self.mano['details']['network'] = {} + self.mano['details']['network']['id'] = orchestra_network_info.id + self.mano['details']['network']['name'] = orchestra_network_info.name + self.mano['details']['external_net_name'] = \ snaps_utils.get_ext_net_name(self.snaps_creds) self.created_resources.append(orchestra_network) orchestra_router = OpenStackRouter( self.snaps_creds, RouterSettings( name='%s_router' % - self.baton['details']['name'], - external_gateway=self.baton['details']['external_net_name'], + self.case_name, + external_gateway=self.mano['details']['external_net_name'], internal_subnets=[ subnet_settings.name])) orchestra_router.create() @@ -374,21 +380,21 @@ class ImsVnf(vnf.VnfOnBoarding): for my_floating_ip in my_floating_ips: for snaps_floating_ip in snaps_floating_ips: if snaps_floating_ip.ip == my_floating_ip: - self.baton['details']['fip'] = snaps_floating_ip + self.mano['details']['fip'] = snaps_floating_ip self.logger.info( "Selected floating IP for Open Baton NFVO %s", - (self.baton['details']['fip'].ip)) + (self.mano['details']['fip'].ip)) break - if self.baton['details']['fip'] is not None: + if self.mano['details']['fip'] is not None: break else: self.logger.info("Creating floating IP for Open Baton NFVO") - self.baton['details']['fip'] = \ - snaps_utils.neutron_utils.create_floating_ip( - neutron_client, self.baton['details']['external_net_name']) + self.mano['details']['fip'] = ( + snaps_utils.neutron_utils. create_floating_ip( + neutron_client, self.mano['details']['external_net_name'])) self.logger.info( "Created floating IP for Open Baton NFVO %s", - (self.baton['details']['fip'].ip)) + (self.mano['details']['fip'].ip)) def get_vim_descriptor(self): """"Create VIM descriptor to be used for onboarding""" @@ -415,7 +421,7 @@ class ImsVnf(vnf.VnfOnBoarding): "username": self.creds.get("username"), "password": self.creds.get("password"), "securityGroups": [ - self.baton['details']['sec_group'] + self.mano['details']['sec_group'] ], "type": "openstack", "location": { @@ -428,34 +434,34 @@ class ImsVnf(vnf.VnfOnBoarding): return vim_json def deploy_orchestrator(self): - self.logger.info("Deploying orchestrator Open Baton ...") - self.logger.info("Details: %s", self.baton['details']) + self.logger.info("Deploying Open Baton...") + self.logger.info("Details: %s", self.mano['details']) start_time = time.time() self.logger.info("Creating orchestra instance...") - userdata = get_userdata(self.baton) + userdata = get_userdata(self.mano) self.logger.info("flavor: %s\n" "image: %s\n" "network_id: %s\n", - self.baton['details']['flavor']['name'], - self.baton['details']['image'], - self.baton['details']['network']['id']) + self.mano['details']['flavor']['name'], + self.mano['requirements']['image'], + self.mano['details']['network']['id']) self.logger.debug("userdata: %s\n", userdata) # setting up image image_settings = ImageSettings( - name=self.baton['details']['image'], + name=self.mano['requirements']['image'], image_user='ubuntu', exists=True) # setting up port port_settings = PortSettings( - name='%s_port' % self.baton['details']['name'], - network_name=self.baton['details']['network']['name']) + name='%s_port' % self.case_name, + network_name=self.mano['details']['network']['name']) # build configuration of vm orchestra_settings = VmInstanceSettings( - name=self.baton['details']['name'], - flavor=self.baton['details']['flavor']['name'], + name=self.case_name, + flavor=self.mano['details']['flavor']['name'], port_settings=[port_settings], - security_group_names=[self.baton['details']['sec_group']], + security_group_names=[self.mano['details']['sec_group']], userdata=userdata) orchestra_vm = OpenStackVmInstance(self.snaps_creds, orchestra_settings, @@ -463,19 +469,19 @@ class ImsVnf(vnf.VnfOnBoarding): orchestra_vm.create() self.created_resources.append(orchestra_vm) - self.baton['details']['id'] = orchestra_vm.get_vm_info()['id'] + self.mano['details']['id'] = orchestra_vm.get_vm_info()['id'] self.logger.info( "Created orchestra instance: %s", - self.baton['details']['id']) + self.mano['details']['id']) self.logger.info("Associating floating ip: '%s' to VM '%s' ", - self.baton['details']['fip'].ip, - self.baton['details']['name']) + self.mano['details']['fip'].ip, + self.case_name) nova_client = os_utils.get_nova_client() if not os_utils.add_floating_ip( nova_client, - self.baton['details']['id'], - self.baton['details']['fip'].ip): + self.mano['details']['id'], + self.mano['details']['fip'].ip): duration = time.time() - start_time self.details["orchestrator"].update( status='FAIL', duration=duration) @@ -486,7 +492,7 @@ class ImsVnf(vnf.VnfOnBoarding): timeout = 0 while timeout < 200: if servertest( - self.baton['details']['fip'].ip, + self.mano['details']['fip'].ip, "8080"): break else: @@ -511,18 +517,18 @@ class ImsVnf(vnf.VnfOnBoarding): def deploy_vnf(self): start_time = time.time() - self.logger.info("Deploying OpenIMS...") + self.logger.info("Deploying %s...", self.vnf['name']) main_agent = MainAgent( - nfvo_ip=self.baton['details']['fip'].ip, + nfvo_ip=self.mano['details']['fip'].ip, nfvo_port=8080, https=False, version=1, - username=self.baton['credentials']['username'], - password=self.baton['credentials']['password']) + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) self.logger.info( - "Check if openims Flavor is available, if not, create one") + "Create %s Flavor if not existing", self.vnf['name']) flavor_settings = FlavorSettings( name=self.vnf['requirements']['flavor']['name'], ram=self.vnf['requirements']['flavor']['ram_min'], @@ -536,7 +542,7 @@ class ImsVnf(vnf.VnfOnBoarding): project_agent = main_agent.get_agent("project", "") for project in json.loads(project_agent.find()): if project.get("name") == "default": - self.baton['details']['project_id'] = project.get("id") + self.mano['details']['project_id'] = project.get("id") self.logger.info("Found project 'default': %s", project) break @@ -544,11 +550,11 @@ class ImsVnf(vnf.VnfOnBoarding): self.logger.info("Registering VIM: %s", vim_json) main_agent.get_agent( - "vim", project_id=self.baton['details']['project_id']).create( + "vim", project_id=self.mano['details']['project_id']).create( entity=json.dumps(vim_json)) market_agent = main_agent.get_agent( - "market", project_id=self.baton['details']['project_id']) + "market", project_id=self.mano['details']['project_id']) try: self.logger.info("sending: %s", self.vnf['descriptor']['url']) @@ -558,47 +564,48 @@ class ImsVnf(vnf.VnfOnBoarding): duration = time.time() - start_time self.details["vnf"].update(status='FAIL', duration=duration) return False - self.baton['details']['nsd_id'] = nsd.get('id') + self.mano['details']['nsd_id'] = nsd.get('id') self.logger.info("Onboarded NSD: " + nsd.get("name")) nsr_agent = main_agent.get_agent( - "nsr", project_id=self.baton['details']['project_id']) + "nsr", project_id=self.mano['details']['project_id']) - self.baton['details']['nsr'] = nsr_agent.create( - self.baton['details']['nsd_id']) + self.mano['details']['nsr'] = nsr_agent.create( + self.mano['details']['nsd_id']) except NfvoException as exc: self.logger.error(exc.message) duration = time.time() - start_time self.details["vnf"].update(status='FAIL', duration=duration) return False - if self.baton['details']['nsr'].get('code') is not None: + if self.mano['details']['nsr'].get('code') is not None: self.logger.error( - "vIMS cannot be deployed: %s -> %s", - self.baton['details']['nsr'].get('code'), - self.baton['details']['nsr'].get('message')) - self.logger.error("vIMS cannot be deployed") + "%s cannot be deployed: %s -> %s", + self.vnf['name'], + self.mano['details']['nsr'].get('code'), + self.mano['details']['nsr'].get('message')) + self.logger.error("%s cannot be deployed", self.vnf['name']) duration = time.time() - start_time self.details["vnf"].update(status='FAIL', duration=duration) return False timeout = 0 self.logger.info("Waiting for NSR to go to ACTIVE...") - while self.baton['details']['nsr'].get("status") != 'ACTIVE' \ - and self.baton['details']['nsr'].get("status") != 'ERROR': + while self.mano['details']['nsr'].get("status") != 'ACTIVE' \ + and self.mano['details']['nsr'].get("status") != 'ERROR': timeout += 1 self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout) - if timeout == 150: + if timeout == 300: self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout) duration = time.time() - start_time self.details["vnf"].update(status='FAIL', duration=duration) return False time.sleep(5) - self.baton['details']['nsr'] = json.loads( - nsr_agent.find(self.baton['details']['nsr'].get('id'))) + self.mano['details']['nsr'] = json.loads( + nsr_agent.find(self.mano['details']['nsr'].get('id'))) duration = time.time() - start_time - if self.baton['details']['nsr'].get("status") == 'ACTIVE': + if self.mano['details']['nsr'].get("status") == 'ACTIVE': self.details["vnf"].update(status='PASS', duration=duration) self.logger.info("Sleep for 60s to ensure that all " "services are up and running...") @@ -606,7 +613,7 @@ class ImsVnf(vnf.VnfOnBoarding): result = True else: self.details["vnf"].update(status='FAIL', duration=duration) - self.logger.error("NSR: %s", self.baton['details'].get('nsr')) + self.logger.error("NSR: %s", self.mano['details'].get('nsr')) result = False return result @@ -615,11 +622,11 @@ class ImsVnf(vnf.VnfOnBoarding): start_time = time.time() self.logger.info( "Testing if %s works properly...", - self.baton['details']['nsr'].get('name')) - for vnfr in self.baton['details']['nsr'].get('vnfr'): + self.mano['details']['nsr'].get('name')) + for vnfr in self.mano['details']['nsr'].get('vnfr'): self.logger.info( "Checking ports %s of VNF %s", - self.ims_conf.get(vnfr.get('name')).get('ports'), + self.vnf['test'][vnfr.get('name')]['ports'], vnfr.get('name')) for vdu in vnfr.get('vdu'): for vnfci in vdu.get('vnfc_instance'): @@ -631,8 +638,8 @@ class ImsVnf(vnf.VnfOnBoarding): "Testing %s:%s", vnfci.get('hostname'), floating_ip.get('ip')) - for port in self.ims_conf.get( - vnfr.get('name')).get('ports'): + for port in self.vnf['test'][vnfr.get( + 'name')]['ports']: if servertest(floating_ip.get('ip'), port): self.logger.info( "VNFC instance %s is reachable at %s:%s", @@ -662,32 +669,43 @@ class ImsVnf(vnf.VnfOnBoarding): return True def clean(self): - self.logger.info("Cleaning...") + self.logger.info("Cleaning %s...", self.case_name) try: main_agent = MainAgent( - nfvo_ip=self.baton['details']['fip'].ip, + nfvo_ip=self.mano['details']['fip'].ip, nfvo_port=8080, https=False, version=1, - username=self.baton['credentials']['username'], - password=self.baton['credentials']['password']) - self.logger.info("Terminating OpenIMS VNF...") - if (self.baton['details'].get('nsr')): + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) + self.logger.info("Terminating %s...", self.vnf['name']) + if (self.mano['details'].get('nsr')): main_agent.get_agent( "nsr", - project_id=self.baton['details']['project_id'])\ - .delete(self.baton['details']['nsr'].get('id')) - self.logger.info("Waiting 60sec for terminating OpenIMS VNF..") + project_id=self.mano['details']['project_id']).\ + delete(self.mano['details']['nsr'].get('id')) + self.logger.info("Sleeping 60 seconds...") time.sleep(60) + else: + self.logger.info("No need to terminate the VNF...") # os_utils.delete_instance(nova_client=os_utils.get_nova_client(), - # instance_id=self.baton_instance_id) + # instance_id=self.mano_instance_id) except (NfvoException, KeyError) as exc: self.logger.error('Unexpected error cleaning - %s', exc) try: neutron_client = os_utils.get_neutron_client(self.creds) + self.logger.info("Deleting Open Baton Port...") + port = snaps_utils.neutron_utils.get_port_by_name( + neutron_client, '%s_port' % self.case_name) + snaps_utils.neutron_utils.delete_port(neutron_client, port) + time.sleep(10) + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + try: + self.logger.info("Deleting Open Baton Floating IP...") snaps_utils.neutron_utils.delete_floating_ip( - neutron_client, self.baton['details']['fip']) + neutron_client, self.mano['details']['fip']) except Exception as exc: self.logger.error('Unexpected error cleaning - %s', exc) @@ -697,4 +715,4 @@ class ImsVnf(vnf.VnfOnBoarding): resource.clean() except Exception as exc: self.logger.error('Unexpected error cleaning - %s', exc) - super(ImsVnf, self).clean() + super(OpenImsVnf, self).clean() |