diff options
Diffstat (limited to 'functest/opnfv_tests')
30 files changed, 2742 insertions, 1061 deletions
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt index 95bea2b7..099d6864 100644 --- a/functest/opnfv_tests/openstack/rally/blacklist.txt +++ b/functest/opnfv_tests/openstack/rally/blacklist.txt @@ -6,6 +6,38 @@ scenario: - joid tests: - NovaServers.boot_server_from_volume_and_delete + - + scenarios: + - '^os-' # all scenarios + installers: + - '.+' # all installers + tests: + # Following tests currently fail due to required Gnocchi API: + # HTTP 410: "This telemetry installation is configured to use + # Gnocchi. Please use the Gnocchi API available on the + # metric endpoint to retrieve data." + # Issue: https://bugs.launchpad.net/rally/+bug/1704322 + - CeilometerMeters.list_matched_meters + - CeilometerMeters.list_meters + - CeilometerQueries.create_and_query_samples + - CeilometerResource.get_tenant_resources + - CeilometerResource.list_matched_resources + - CeilometerResource.list_resources + - CeilometerSamples.list_matched_samples + - CeilometerSamples.list_samples + - CeilometerStats.create_meter_and_get_stats + - CeilometerStats.get_stats + - + scenarios: + - '^os-' # all scenarios + installers: + - '.+' # all installers + tests: + # Following test currently fails due to but in + # python-ceilometerclient during fetching of event_types + # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138 + # Fix: https://review.openstack.org/#/c/483402/ + - CeilometerEvents.create_user_and_list_event_types functionality: - diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py index 6b7c49ca..fdef8bed 100644 --- a/functest/opnfv_tests/openstack/rally/rally.py +++ b/functest/opnfv_tests/openstack/rally/rally.py @@ -34,8 +34,8 @@ LOGGER = logging.getLogger(__name__) class RallyBase(testcase.OSGCTestCase): """Base class form Rally testcases implementation.""" - TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', - 'neutron', 'nova', 'quotas', 'vm', 'all'] + TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat', + 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all'] GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name') GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name') GLANCE_IMAGE_PATH = os.path.join( diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml new file mode 100644 index 00000000..7efb5a83 --- /dev/null +++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml @@ -0,0 +1,458 @@ + CeilometerMeters.list_meters: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerResource.list_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_alarm_and_get_history: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + state: "ok" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_delete_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_get_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_list_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarm_history: + - + args: + orderby: !!null + limit: !!null + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarms: + - + args: + filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} + orderby: !!null + limit: 10 + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_samples: + - + args: + filter: {"=": {"counter_unit": "instance"}} + orderby: !!null + limit: 10 + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resource_id: "resource_id" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_update_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.create_meter_and_get_stats: + - + args: + user_id: "user-id" + resource_id: "resource-id" + counter_volume: 1.0 + counter_unit: "" + counter_type: "cumulative" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_get_event: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_events: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_event_types: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_trait_descriptions: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_traits: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.get_stats: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + meter_name: "benchmark_meter" + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + period: 300 + groupby: "resource_id" + sla: + {{ no_failures_sla() }} + + CeilometerResource.get_tenant_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_volume: 1.0 + counter_unit: "instance" + {% endcall %} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.list_alarms: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerSamples.list_matched_samples: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + metadata_list: + - status: "active" + name: "fake_resource" + deleted: "False" + created_at: "2015-09-04T12:34:19.000000" + - status: "not_active" + name: "fake_resource_1" + deleted: "False" + created_at: "2015-09-10T06:55:12.000000" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "not_active" + sla: + {{ no_failures_sla() }} + + CeilometerMeters.list_matched_meters: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerResource.list_matched_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + limit: 50 + filter_by_user_id: true + filter_by_project_id: true + metadata_query: + status: "terminated" + sla: + {{ no_failures_sla() }} + + CeilometerSamples.list_samples: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 60 + metadata_list: + - status: "active" + name: "fake_resource" + deleted: "False" + created_at: "2015-09-04T12:34:19.000000" + - status: "not_active" + name: "fake_resource_1" + deleted: "False" + created_at: "2015-09-10T06:55:12.000000" + batch_size: 5 + {% endcall %} + args: + limit: 50 + metadata_query: + status: "not_active" + sla: + {{ no_failures_sla() }} + diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml new file mode 100644 index 00000000..bb070cd3 --- /dev/null +++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml @@ -0,0 +1,247 @@ + CeilometerAlarms.create_alarm_and_get_history: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + state: "ok" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_delete_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_get_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_list_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarm_history: + - + args: + orderby: !!null + limit: !!null + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_alarms: + - + args: + filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} + orderby: !!null + limit: 10 + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerQueries.create_and_query_samples: + - + args: + filter: {"=": {"counter_unit": "instance"}} + orderby: !!null + limit: 10 + counter_name: "cpu_util" + counter_type: "gauge" + counter_unit: "instance" + counter_volume: 1.0 + resource_id: "resource_id" + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.create_and_update_alarm: + - + args: + meter_name: "ram_util" + threshold: 10.0 + type: "threshold" + statistic: "avg" + alarm_actions: ["http://localhost:8776/alarm"] + ok_actions: ["http://localhost:8776/ok"] + insufficient_data_actions: ["http://localhost:8776/notok"] + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_get_event: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_events: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerEvents.create_user_and_list_event_types: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_trait_descriptions: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerTraits.create_user_and_list_traits: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} + + CeilometerStats.get_stats: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "benchmark_meter" + counter_type: "gauge" + counter_unit: "%" + counter_volume: 100 + resources_per_tenant: 100 + samples_per_resource: 100 + timestamp_interval: 10 + metadata_list: + - + status: "active" + name: "rally benchmark on" + deleted: "false" + - + status: "terminated" + name: "rally benchmark off" + deleted: "true" + {% endcall %} + args: + meter_name: "benchmark_meter" + filter_by_user_id: true + filter_by_project_id: true + filter_by_resource_id: true + metadata_query: + status: "terminated" + period: 300 + groupby: "resource_id" + sla: + {{ no_failures_sla() }} + + CeilometerResource.get_tenant_resources: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {% call user_context(tenants_amount, users_amount, use_existing_users) %} + ceilometer: + counter_name: "cpu_util" + counter_type: "gauge" + counter_volume: 1.0 + counter_unit: "instance" + {% endcall %} + sla: + {{ no_failures_sla() }} + + CeilometerAlarms.list_alarms: + - + runner: + {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }} + context: + {{ user_context(tenants_amount, users_amount, use_existing_users) }} + sla: + {{ no_failures_sla() }} diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml index 033edb83..65f101fb 100644 --- a/functest/opnfv_tests/openstack/rally/task.yaml +++ b/functest/opnfv_tests/openstack/rally/task.yaml @@ -31,6 +31,10 @@ {%- include "var/opnfv-neutron.yaml"-%} {% endif %} +{% if "ceilometer" in service_list %} +{%- include "var/opnfv-ceilometer.yaml"-%} +{% endif %} + {% if "quotas" in service_list %} {%- include "var/opnfv-quotas.yaml"-%} {% endif %} diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py index 6ac72176..4f71b5f5 100644 --- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py +++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py @@ -28,12 +28,13 @@ from functest.opnfv_tests.openstack.refstack_client.tempest_conf \ from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_utils as os_utils # logging configuration """ LOGGER = logging.getLogger(__name__) -class RefstackClient(testcase.OSGCTestCase): +class RefstackClient(testcase.TestCase): """RefstackClient testcase implementation class.""" def __init__(self, **kwargs): @@ -41,6 +42,7 @@ class RefstackClient(testcase.OSGCTestCase): if "case_name" not in kwargs: kwargs["case_name"] = "refstack_defcore" super(RefstackClient, self).__init__(**kwargs) + self.tempestconf = None self.conf_path = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf') @@ -57,6 +59,13 @@ class RefstackClient(testcase.OSGCTestCase): CONST.__getattribute__('OS_INSECURE').lower() == 'true'): self.insecure = '-k' + def generate_conf(self): + if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR): + os.makedirs(conf_utils.REFSTACK_RESULTS_DIR) + + self.tempestconf = TempestConf() + self.tempestconf.generate_tempestconf() + def run_defcore(self, conf, testlist): """Run defcore sys command.""" cmd = ("refstack-client test {0} -c {1} -v --test-list {2}" @@ -65,42 +74,29 @@ class RefstackClient(testcase.OSGCTestCase): ft_utils.execute_command(cmd) def run_defcore_default(self): - """Run default defcare sys command.""" - cmd = ("refstack-client test {0} -c {1} -v --test-list {2}" - .format(self.insecure, self.confpath, self.defcorelist)) + """Run default defcore sys command.""" + options = ["-v"] if not self.insecure else ["-v", self.insecure] + cmd = (["refstack-client", "test", "-c", self.confpath] + + options + ["--test-list", self.defcorelist]) LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd) - header = ("Refstack environment:\n" - " SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" % - (CONST.__getattribute__('INSTALLER_TYPE'), - CONST.__getattribute__('DEPLOY_SCENARIO'), - CONST.__getattribute__('NODE_NAME'), - time.strftime("%a %b %d %H:%M:%S %Z %Y"))) - - f_stdout = open( - os.path.join(conf_utils.REFSTACK_RESULTS_DIR, - "refstack.log"), 'w+') - f_env = open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, - "environment.log"), 'w+') - f_env.write(header) - - process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, bufsize=1) - - with process.stdout: - for line in iter(process.stdout.readline, b''): - if 'Tests' in line: - break - if re.search(r"\} tempest\.", line): - LOGGER.info(line.replace('\n', '')) - f_stdout.write(line) - process.wait() - - f_stdout.close() - f_env.close() + with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "environment.log"), 'w+') as f_env: + f_env.write( + ("Refstack environment:\n" + " SUT: {}\n Scenario: {}\n Node: {}\n Date: {}\n").format( + CONST.__getattribute__('INSTALLER_TYPE'), + CONST.__getattribute__('DEPLOY_SCENARIO'), + CONST.__getattribute__('NODE_NAME'), + time.strftime("%a %b %d %H:%M:%S %Z %Y"))) + + with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "refstack.log"), 'w+') as f_stdout: + subprocess.call(cmd, shell=False, stdout=f_stdout, + stderr=subprocess.STDOUT) def parse_refstack_result(self): - """Parse Refstact results.""" + """Parse Refstack results.""" try: with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR, "refstack.log"), 'r') as logfile: @@ -119,15 +115,15 @@ class RefstackClient(testcase.OSGCTestCase): for match in re.findall(r"(- Failed: )(\d+)", output): num_failures = match[1] LOGGER.info("".join(match)) - success_testcases = "" - for match in re.findall(r"\{0\}(.*?)[. ]*ok", output): - success_testcases += match + ", " - failed_testcases = "" - for match in re.findall(r"\{0\}(.*?)[. ]*FAILED", output): - failed_testcases += match + ", " - skipped_testcases = "" - for match in re.findall(r"\{0\}(.*?)[. ]*SKIPPED:", output): - skipped_testcases += match + ", " + success_testcases = [] + for match in re.findall(r"\{0\} (.*?)[. ]*ok", output): + success_testcases.append(match) + failed_testcases = [] + for match in re.findall(r"\{0\} (.*?)[. ]*FAILED", output): + failed_testcases.append(match) + skipped_testcases = [] + for match in re.findall(r"\{0\} (.*?)[. ]*SKIPPED:", output): + skipped_testcases.append(match) num_executed = int(num_tests) - int(num_skipped) @@ -157,18 +153,18 @@ class RefstackClient(testcase.OSGCTestCase): """ self.start_time = time.time() - if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR): - os.makedirs(conf_utils.REFSTACK_RESULTS_DIR) - try: - tempestconf = TempestConf() - tempestconf.generate_tempestconf() + # Make sure that Tempest is configured + if not self.tempestconf: + self.generate_conf() self.run_defcore_default() self.parse_refstack_result() res = testcase.TestCase.EX_OK except Exception: LOGGER.exception("Error with run") res = testcase.TestCase.EX_RUN_ERROR + finally: + self.tempestconf.clean() self.stop_time = time.time() return res @@ -207,6 +203,42 @@ class RefstackClient(testcase.OSGCTestCase): return res + def create_snapshot(self): + """ + Run the Tempest cleanup utility to initialize OS state. + For details, see https://docs.openstack.org/tempest/latest/cleanup.html + + :return: TestCase.EX_OK + """ + LOGGER.info("Initializing the saved state of the OpenStack deployment") + + # Make sure that Tempest is configured + if not self.tempestconf: + self.generate_conf() + + os_utils.init_tempest_cleanup( + self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "tempest-cleanup-init.log") + ) + + return super(RefstackClient, self).create_snapshot() + + def clean(self): + """ + Run the Tempest cleanup utility to delete and destroy OS resources. + For details, see https://docs.openstack.org/tempest/latest/cleanup.html + """ + LOGGER.info("Destroying the resources created for tempest") + + os_utils.perform_tempest_cleanup( + self.tempestconf.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.REFSTACK_RESULTS_DIR, + "tempest-cleanup.log") + ) + + return super(RefstackClient, self).clean() + class RefstackClientParser(object): # pylint: disable=too-few-public-methods """Command line argument parser helper.""" diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py index 30590b9e..db745227 100644 --- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py +++ b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py @@ -11,13 +11,15 @@ import pkg_resources from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils import openstack_utils from functest.utils.constants import CONST +from functest.opnfv_tests.openstack.tempest.tempest \ + import TempestResourcesManager """ logging configuration """ logger = logging.getLogger(__name__) class TempestConf(object): - def __init__(self): + def __init__(self, **kwargs): self.VERIFIER_ID = conf_utils.get_verifier_id() self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir( self.VERIFIER_ID) @@ -27,15 +29,22 @@ class TempestConf(object): self.confpath = pkg_resources.resource_filename( 'functest', 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf') + self.resources = TempestResourcesManager(**kwargs) def generate_tempestconf(self): try: openstack_utils.source_credentials( CONST.__getattribute__('openstack_creds')) - img_flavor_dict = conf_utils.create_tempest_resources( - use_custom_images=True, use_custom_flavors=True) + resources = self.resources.create(create_project=True, + use_custom_images=True, + use_custom_flavors=True) conf_utils.configure_tempest_defcore( - self.DEPLOYMENT_DIR, img_flavor_dict) + self.DEPLOYMENT_DIR, + image_id=resources.get("image_id"), + flavor_id=resources.get("flavor_id"), + image_id_alt=resources.get("image_id_alt"), + flavor_id_alt=resources.get("flavor_id_alt"), + tenant_id=resources.get("project_id")) except Exception as e: logger.error("error with generating refstack client " "reference tempest conf file: %s", e) @@ -48,6 +57,9 @@ class TempestConf(object): except Exception as e: logger.error('Error with run: %s', e) + def clean(self): + self.resources.cleanup() + def main(): logging.basicConfig() diff --git a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py index 0b87440b..19c6a87f 100644 --- a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py +++ b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py @@ -28,9 +28,14 @@ class SnapsTestRunner(unit.Suite): if 'os_creds' in kwargs: self.os_creds = kwargs['os_creds'] else: + creds_override = None + if hasattr(CONST, 'snaps_os_creds_override'): + creds_override = CONST.__getattribute__( + 'snaps_os_creds_override') self.os_creds = openstack_tests.get_credentials( os_env_file=CONST.__getattribute__('openstack_creds'), - proxy_settings_str=None, ssh_proxy_cmd=None) + proxy_settings_str=None, ssh_proxy_cmd=None, + overrides=creds_override) if 'ext_net_name' in kwargs: self.ext_net_name = kwargs['ext_net_name'] diff --git a/functest/opnfv_tests/openstack/snaps/snaps_utils.py b/functest/opnfv_tests/openstack/snaps/snaps_utils.py index 327ba073..309f9db1 100644 --- a/functest/opnfv_tests/openstack/snaps/snaps_utils.py +++ b/functest/opnfv_tests/openstack/snaps/snaps_utils.py @@ -16,4 +16,4 @@ def get_ext_net_name(os_creds): """ neutron = neutron_utils.neutron_client(os_creds) ext_nets = neutron_utils.get_external_networks(neutron) - return ext_nets[0]['network']['name'] + return ext_nets[0].name diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py index fa8f00fc..52fa6003 100644 --- a/functest/opnfv_tests/openstack/tempest/conf_utils.py +++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py @@ -11,10 +11,11 @@ import ConfigParser import logging import os import pkg_resources -import re import shutil import subprocess +import yaml + from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils import functest.utils.openstack_utils as os_utils @@ -28,16 +29,21 @@ GLANCE_IMAGE_PATH = os.path.join( TEMPEST_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'tempest') TEMPEST_CUSTOM = pkg_resources.resource_filename( - 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt') + 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt') TEMPEST_BLACKLIST = pkg_resources.resource_filename( - 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt') + 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt') TEMPEST_DEFCORE = pkg_resources.resource_filename( - 'functest', - 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt') + 'functest', + 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt') TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt') TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt') REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'refstack') +TEMPEST_CONF_YAML = pkg_resources.resource_filename( + 'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml') +TEST_ACCOUNTS_FILE = pkg_resources.resource_filename( + 'functest', + 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml') CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE') CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP') @@ -46,96 +52,9 @@ CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP') logger = logging.getLogger(__name__) -def create_tempest_resources(use_custom_images=False, - use_custom_flavors=False): - keystone_client = os_utils.get_keystone_client() - - logger.debug("Creating tenant and user for Tempest suite") - tenant_id = os_utils.create_tenant( - keystone_client, - CONST.__getattribute__('tempest_identity_tenant_name'), - CONST.__getattribute__('tempest_identity_tenant_description')) - if not tenant_id: - logger.error("Failed to create %s tenant" - % CONST.__getattribute__('tempest_identity_tenant_name')) - - user_id = os_utils.create_user( - keystone_client, - CONST.__getattribute__('tempest_identity_user_name'), - CONST.__getattribute__('tempest_identity_user_password'), - None, tenant_id) - if not user_id: - logger.error("Failed to create %s user" % - CONST.__getattribute__('tempest_identity_user_name')) - - logger.debug("Creating private network for Tempest suite") - network_dic = os_utils.create_shared_network_full( - CONST.__getattribute__('tempest_private_net_name'), - CONST.__getattribute__('tempest_private_subnet_name'), - CONST.__getattribute__('tempest_router_name'), - CONST.__getattribute__('tempest_private_subnet_cidr')) - if network_dic is None: - raise Exception('Failed to create private network') - - image_id = "" - image_id_alt = "" - flavor_id = "" - flavor_id_alt = "" - - if (CONST.__getattribute__('tempest_use_custom_images') or - use_custom_images): - # adding alternative image should be trivial should we need it - logger.debug("Creating image for Tempest suite") - _, image_id = os_utils.get_or_create_image( - CONST.__getattribute__('openstack_image_name'), - GLANCE_IMAGE_PATH, - CONST.__getattribute__('openstack_image_disk_format')) - if image_id is None: - raise Exception('Failed to create image') - - if use_custom_images: - logger.debug("Creating 2nd image for Tempest suite") - _, image_id_alt = os_utils.get_or_create_image( - CONST.__getattribute__('openstack_image_name_alt'), - GLANCE_IMAGE_PATH, - CONST.__getattribute__('openstack_image_disk_format')) - if image_id_alt is None: - raise Exception('Failed to create image') - - if (CONST.__getattribute__('tempest_use_custom_flavors') or - use_custom_flavors): - # adding alternative flavor should be trivial should we need it - logger.debug("Creating flavor for Tempest suite") - _, flavor_id = os_utils.get_or_create_flavor( - CONST.__getattribute__('openstack_flavor_name'), - CONST.__getattribute__('openstack_flavor_ram'), - CONST.__getattribute__('openstack_flavor_disk'), - CONST.__getattribute__('openstack_flavor_vcpus')) - if flavor_id is None: - raise Exception('Failed to create flavor') - - if use_custom_flavors: - logger.debug("Creating 2nd flavor for tempest_defcore") - _, flavor_id_alt = os_utils.get_or_create_flavor( - CONST.__getattribute__('openstack_flavor_name_alt'), - CONST.__getattribute__('openstack_flavor_ram'), - CONST.__getattribute__('openstack_flavor_disk'), - CONST.__getattribute__('openstack_flavor_vcpus')) - if flavor_id_alt is None: - raise Exception('Failed to create flavor') - - img_flavor_dict = {} - img_flavor_dict['image_id'] = image_id - img_flavor_dict['image_id_alt'] = image_id_alt - img_flavor_dict['flavor_id'] = flavor_id - img_flavor_dict['flavor_id_alt'] = flavor_id_alt - - return img_flavor_dict - - def get_verifier_id(): """ - Returns verifer id for current Tempest + Returns verifier id for current Tempest """ cmd = ("rally verify list-verifiers | awk '/" + CONST.__getattribute__('tempest_deployment_name') + @@ -169,7 +88,7 @@ def get_verifier_deployment_id(): def get_verifier_repo_dir(verifier_id): """ - Returns installed verfier repo directory for Tempest + Returns installed verifier repo directory for Tempest """ if not verifier_id: verifier_id = get_verifier_id() @@ -211,44 +130,42 @@ def backup_tempest_config(conf_file): """ Copy config file to tempest results directory """ - if not os.path.exists(TEMPEST_RESULTS_DIR): - os.makedirs(TEMPEST_RESULTS_DIR) - shutil.copyfile(conf_file, os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf')) -def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None, - MODE=None): +def configure_tempest(deployment_dir, image_id=None, flavor_id=None, + mode=None): """ Calls rally verify and updates the generated tempest.conf with given parameters """ conf_file = configure_verifier(deployment_dir) - configure_tempest_update_params(conf_file, - IMAGE_ID, FLAVOR_ID) - if MODE == 'feature_multisite': - configure_tempest_multisite_params(conf_file) + configure_tempest_update_params(conf_file, image_id, flavor_id) -def configure_tempest_defcore(deployment_dir, img_flavor_dict): +def configure_tempest_defcore(deployment_dir, image_id, flavor_id, + image_id_alt, flavor_id_alt, tenant_id): """ Add/update needed parameters into tempest.conf file """ conf_file = configure_verifier(deployment_dir) - configure_tempest_update_params(conf_file, - img_flavor_dict.get("image_id"), - img_flavor_dict.get("flavor_id")) + configure_tempest_update_params(conf_file, image_id, flavor_id) logger.debug("Updating selected tempest.conf parameters for defcore...") config = ConfigParser.RawConfigParser() config.read(conf_file) - config.set('compute', 'image_ref', img_flavor_dict.get("image_id")) - config.set('compute', 'image_ref_alt', - img_flavor_dict['image_id_alt']) - config.set('compute', 'flavor_ref', img_flavor_dict.get("flavor_id")) - config.set('compute', 'flavor_ref_alt', - img_flavor_dict['flavor_id_alt']) + config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir)) + config.set('oslo_concurrency', 'lock_path', + '{}/lock_files'.format(deployment_dir)) + generate_test_accounts_file(tenant_id=tenant_id) + config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE) + config.set('scenario', 'img_dir', '{}'.format(deployment_dir)) + config.set('scenario', 'img_file', 'tempest-image') + config.set('compute', 'image_ref', image_id) + config.set('compute', 'image_ref_alt', image_id_alt) + config.set('compute', 'flavor_ref', flavor_id) + config.set('compute', 'flavor_ref_alt', flavor_id_alt) with open(conf_file, 'wb') as config_file: config.write(config_file) @@ -259,8 +176,29 @@ def configure_tempest_defcore(deployment_dir, img_flavor_dict): shutil.copyfile(conf_file, confpath) +def generate_test_accounts_file(tenant_id): + """ + Add needed tenant and user params into test_accounts.yaml + """ + + logger.debug("Add needed params into test_accounts.yaml...") + accounts_list = [ + { + 'tenant_name': + CONST.__getattribute__('tempest_identity_tenant_name'), + 'tenant_id': str(tenant_id), + 'username': CONST.__getattribute__('tempest_identity_user_name'), + 'password': + CONST.__getattribute__('tempest_identity_user_password') + } + ] + + with open(TEST_ACCOUNTS_FILE, "w") as f: + yaml.dump(accounts_list, f, default_flow_style=False) + + def configure_tempest_update_params(tempest_conf_file, - IMAGE_ID=None, FLAVOR_ID=None): + image_id=None, flavor_id=None): """ Add/update needed parameters into tempest.conf file """ @@ -274,21 +212,15 @@ def configure_tempest_update_params(tempest_conf_file, config.set('compute', 'volume_device_name', CONST.__getattribute__('tempest_volume_device_name')) if CONST.__getattribute__('tempest_use_custom_images'): - if IMAGE_ID is not None: - config.set('compute', 'image_ref', IMAGE_ID) + if image_id is not None: + config.set('compute', 'image_ref', image_id) if IMAGE_ID_ALT is not None: config.set('compute', 'image_ref_alt', IMAGE_ID_ALT) if CONST.__getattribute__('tempest_use_custom_flavors'): - if FLAVOR_ID is not None: - config.set('compute', 'flavor_ref', FLAVOR_ID) + if flavor_id is not None: + config.set('compute', 'flavor_ref', flavor_id) if FLAVOR_ID_ALT is not None: config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT) - config.set('identity', 'tenant_name', - CONST.__getattribute__('tempest_identity_tenant_name')) - config.set('identity', 'username', - CONST.__getattribute__('tempest_identity_user_name')) - config.set('identity', 'password', - CONST.__getattribute__('tempest_identity_user_password')) config.set('identity', 'region', 'RegionOne') if os_utils.is_keystone_v3(): auth_version = 'v3' @@ -323,6 +255,19 @@ def configure_tempest_update_params(tempest_conf_file, config.set(service, 'endpoint_type', CONST.__getattribute__('OS_ENDPOINT_TYPE')) + logger.debug('Add/Update required params defined in tempest_conf.yaml ' + 'into tempest.conf file') + with open(TEMPEST_CONF_YAML) as f: + conf_yaml = yaml.safe_load(f) + if conf_yaml: + sections = config.sections() + for section in conf_yaml: + if section not in sections: + config.add_section(section) + sub_conf = conf_yaml.get(section) + for key, value in sub_conf.items(): + config.set(section, key, value) + with open(tempest_conf_file, 'wb') as config_file: config.write(config_file) @@ -351,93 +296,3 @@ def configure_verifier(deployment_dir): % tempest_conf_file) else: return tempest_conf_file - - -def configure_tempest_multisite_params(tempest_conf_file): - """ - Add/update multisite parameters into tempest.conf file generated by Rally - """ - logger.debug("Updating multisite tempest.conf parameters...") - config = ConfigParser.RawConfigParser() - config.read(tempest_conf_file) - - config.set('service_available', 'kingbird', 'true') - # cmd = ("openstack endpoint show kingbird | grep publicurl |" - # "awk '{print $4}' | awk -F '/' '{print $4}'") - # kingbird_api_version = os.popen(cmd).read() - # kingbird_api_version = os_utils.get_endpoint(service_type='multisite') - - if CI_INSTALLER_TYPE == 'fuel': - # For MOS based setup, the service is accessible - # via bind host - kingbird_conf_path = "/etc/kingbird/kingbird.conf" - installer_type = CI_INSTALLER_TYPE - installer_ip = CI_INSTALLER_IP - installer_username = CONST.__getattribute__( - 'multisite_{}_installer_username'.format(installer_type)) - installer_password = CONST.__getattribute__( - 'multisite_{}_installer_password'.format(installer_type)) - - ssh_options = ("-o UserKnownHostsFile=/dev/null -o " - "StrictHostKeyChecking=no") - - # Get the controller IP from the fuel node - cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s ' - '\'fuel node --env 1| grep controller | grep "True\| 1" ' - '| awk -F\| "{print \$5}"\'' % (installer_password, - ssh_options, - installer_username, - installer_ip)) - multisite_controller_ip = "".join(os.popen(cmd).read().split()) - - # Login to controller and get bind host details - cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" ' - 'grep -e "^bind_" %s \\""' % (installer_password, - ssh_options, - installer_username, - installer_ip, - multisite_controller_ip, - kingbird_conf_path)) - bind_details = os.popen(cmd).read() - bind_details = "".join(bind_details.split()) - # Extract port number from the bind details - bind_port = re.findall(r"\D(\d{4})", bind_details)[0] - # Extract ip address from the bind details - bind_host = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", - bind_details)[0] - kingbird_endpoint_url = "http://%s:%s/" % (bind_host, bind_port) - else: - # cmd = "openstack endpoint show kingbird | grep publicurl |\ - # awk '{print $4}' | awk -F '/' '{print $3}'" - # kingbird_endpoint_url = os.popen(cmd).read() - kingbird_endpoint_url = os_utils.get_endpoint(service_type='kingbird') - - try: - config.add_section("kingbird") - except Exception: - logger.info('kingbird section exist') - - # set the domain id - config.set('auth', 'admin_domain_name', 'default') - - config.set('kingbird', 'endpoint_type', 'publicURL') - config.set('kingbird', 'TIME_TO_SYNC', '120') - config.set('kingbird', 'endpoint_url', kingbird_endpoint_url) - config.set('kingbird', 'api_version', 'v1.0') - with open(tempest_conf_file, 'wb') as config_file: - config.write(config_file) - - backup_tempest_config(tempest_conf_file) - - -def install_verifier_ext(path): - """ - Install extension to active verifier - """ - logger.info("Installing verifier from existing repo...") - tag = get_repo_tag(path) - cmd = ("rally verify add-verifier-ext --source {0} " - "--version {1}" - .format(path, tag)) - error_msg = ("Problem while adding verifier extension from %s" % path) - ft_utils.execute_command_raise(cmd, error_msg=error_msg) diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml new file mode 100644 index 00000000..b47a9736 --- /dev/null +++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml @@ -0,0 +1,13 @@ +# This is an empty configuration file to be filled up with the desired options +# to generate a custom tempest.conf +# Examples: +# network-feature-enabled: +# port_security: True + +# volume-feature-enabled: +# api_v1: False + +# validation: +# image_ssh_user: root +# ssh_timeout: 300 + diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt index ac4e3728..df2c3126 100644 --- a/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt +++ b/functest/opnfv_tests/openstack/tempest/custom_tests/test_list.txt @@ -1,4 +1,4 @@ # This is an empty file to be filled up with the desired tempest test cases # Examples: -#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops -#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops
\ No newline at end of file +#tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops[compute,id-7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba,network,smoke] +#tempest.scenario.test_network_basic_ops.TestNetworkBasicOps.test_network_basic_ops[compute,id-f323b3ba-82f8-4db7-8ea6-6a895869ec49,network,smoke] diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py index e565f5f9..c7ad4df2 100644 --- a/functest/opnfv_tests/openstack/tempest/tempest.py +++ b/functest/opnfv_tests/openstack/tempest/tempest.py @@ -12,7 +12,6 @@ from __future__ import division import logging import os -import pkg_resources import re import shutil import subprocess @@ -24,15 +23,26 @@ from functest.core import testcase from functest.opnfv_tests.openstack.tempest import conf_utils from functest.utils.constants import CONST import functest.utils.functest_utils as ft_utils +import functest.utils.openstack_utils as os_utils + +from snaps.openstack import create_flavor +from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor +from snaps.openstack.create_project import ProjectSettings +from snaps.openstack.create_network import NetworkSettings, SubnetSettings +from snaps.openstack.create_user import UserSettings +from snaps.openstack.tests import openstack_tests +from snaps.openstack.utils import deploy_utils + """ logging configuration """ logger = logging.getLogger(__name__) -class TempestCommon(testcase.OSGCTestCase): +class TempestCommon(testcase.TestCase): def __init__(self, **kwargs): super(TempestCommon, self).__init__(**kwargs) + self.resources = TempestResourcesManager(**kwargs) self.MODE = "" self.OPTION = "" self.VERIFIER_ID = conf_utils.get_verifier_id() @@ -63,8 +73,6 @@ class TempestCommon(testcase.OSGCTestCase): else: if self.MODE == 'smoke': testr_mode = "smoke" - elif self.MODE == 'feature_multisite': - testr_mode = "'[Kk]ingbird'" elif self.MODE == 'full': testr_mode = "" else: @@ -187,25 +195,32 @@ class TempestCommon(testcase.OSGCTestCase): try: self.result = 100 * int(num_success) / int(num_executed) except ZeroDivisionError: - logger.error("No test has been executed") self.result = 0 - return + if int(num_tests) > 0: + logger.info("All tests have been skipped") + else: + logger.error("No test has been executed") + return with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR, "tempest.log"), 'r') as logfile: output = logfile.read() - error_logs = "" - for match in re.findall('(.*?)[. ]*fail ', output): - error_logs += match - skipped_testcase = "" - for match in re.findall('(.*?)[. ]*skip:', output): - skipped_testcase += match + success_testcases = [] + for match in re.findall('.*\{0\} (.*?)[. ]*success ', output): + success_testcases.append(match) + failed_testcases = [] + for match in re.findall('.*\{0\} (.*?)[. ]*fail ', output): + failed_testcases.append(match) + skipped_testcases = [] + for match in re.findall('.*\{0\} (.*?)[. ]*skip:', output): + skipped_testcases.append(match) self.details = {"tests": int(num_tests), "failures": int(num_failures), - "errors": error_logs, - "skipped": skipped_testcase} + "success": success_testcases, + "errors": failed_testcases, + "skipped": skipped_testcases} except Exception: self.result = 0 @@ -218,12 +233,12 @@ class TempestCommon(testcase.OSGCTestCase): try: if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR): os.makedirs(conf_utils.TEMPEST_RESULTS_DIR) - image_and_flavor = conf_utils.create_tempest_resources() + resources = self.resources.create() conf_utils.configure_tempest( self.DEPLOYMENT_DIR, - IMAGE_ID=image_and_flavor.get("image_id"), - FLAVOR_ID=image_and_flavor.get("flavor_id"), - MODE=self.MODE) + image_id=resources.get("image_id"), + flavor_id=resources.get("flavor_id"), + mode=self.MODE) self.generate_test_list(self.VERIFIER_REPO_DIR) self.apply_tempest_blacklist() self.run_verifier_tests() @@ -232,10 +247,49 @@ class TempestCommon(testcase.OSGCTestCase): except Exception as e: logger.error('Error with run: %s' % e) res = testcase.TestCase.EX_RUN_ERROR + finally: + self.resources.cleanup() self.stop_time = time.time() return res + def create_snapshot(self): + """ + Run the Tempest cleanup utility to initialize OS state. + + :return: TestCase.EX_OK + """ + logger.info("Initializing the saved state of the OpenStack deployment") + + if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR): + os.makedirs(conf_utils.TEMPEST_RESULTS_DIR) + + # Make sure that the verifier is configured + conf_utils.configure_verifier(self.DEPLOYMENT_DIR) + + os_utils.init_tempest_cleanup( + self.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.TEMPEST_RESULTS_DIR, + "tempest-cleanup-init.log") + ) + + return super(TempestCommon, self).create_snapshot() + + def clean(self): + """ + Run the Tempest cleanup utility to delete and destroy OS resources + created by Tempest. + """ + logger.info("Destroying the resources created for refstack") + + os_utils.perform_tempest_cleanup( + self.DEPLOYMENT_DIR, 'tempest.conf', + os.path.join(conf_utils.TEMPEST_RESULTS_DIR, + "tempest-cleanup.log") + ) + + return super(TempestCommon, self).clean() + class TempestSmokeSerial(TempestCommon): @@ -266,18 +320,6 @@ class TempestFullParallel(TempestCommon): self.MODE = "full" -class TempestMultisite(TempestCommon): - - def __init__(self, **kwargs): - if "case_name" not in kwargs: - kwargs["case_name"] = 'multisite' - TempestCommon.__init__(self, **kwargs) - self.MODE = "feature_multisite" - self.OPTION = "--concurrency 1" - conf_utils.install_verifier_ext( - pkg_resources.resource_filename('kingbird', '..')) - - class TempestCustom(TempestCommon): def __init__(self, **kwargs): @@ -296,3 +338,170 @@ class TempestDefcore(TempestCommon): TempestCommon.__init__(self, **kwargs) self.MODE = "defcore" self.OPTION = "--concurrency 1" + + +class TempestResourcesManager(object): + + def __init__(self, **kwargs): + self.os_creds = None + if 'os_creds' in kwargs: + self.os_creds = kwargs['os_creds'] + else: + self.os_creds = openstack_tests.get_credentials( + os_env_file=CONST.__getattribute__('openstack_creds')) + + self.creators = list() + + if hasattr(CONST, 'snaps_images_cirros'): + self.cirros_image_config = CONST.__getattribute__( + 'snaps_images_cirros') + else: + self.cirros_image_config = None + + def create(self, use_custom_images=False, use_custom_flavors=False, + create_project=False): + if create_project: + logger.debug("Creating project (tenant) for Tempest suite") + project_name = CONST.__getattribute__( + 'tempest_identity_tenant_name') + project_creator = deploy_utils.create_project( + self.os_creds, ProjectSettings( + name=project_name, + description=CONST.__getattribute__( + 'tempest_identity_tenant_description'))) + if (project_creator is None or + project_creator.get_project() is None): + raise Exception("Failed to create tenant") + project_id = project_creator.get_project().id + self.creators.append(project_creator) + + logger.debug("Creating user for Tempest suite") + user_creator = deploy_utils.create_user( + self.os_creds, UserSettings( + name=CONST.__getattribute__('tempest_identity_user_name'), + password=CONST.__getattribute__( + 'tempest_identity_user_password'), + project_name=project_name)) + if user_creator is None or user_creator.get_user() is None: + raise Exception("Failed to create user") + user_id = user_creator.get_user().id + self.creators.append(user_creator) + else: + project_name = None + project_id = None + user_id = None + + logger.debug("Creating private network for Tempest suite") + network_creator = deploy_utils.create_network( + self.os_creds, NetworkSettings( + name=CONST.__getattribute__('tempest_private_net_name'), + project_name=project_name, + subnet_settings=[SubnetSettings( + name=CONST.__getattribute__('tempest_private_subnet_name'), + cidr=CONST.__getattribute__('tempest_private_subnet_cidr')) + ])) + if network_creator is None or network_creator.get_network() is None: + raise Exception("Failed to create private network") + self.creators.append(network_creator) + + image_id = None + image_id_alt = None + flavor_id = None + flavor_id_alt = None + + if (CONST.__getattribute__('tempest_use_custom_images') or + use_custom_images): + logger.debug("Creating image for Tempest suite") + image_base_name = CONST.__getattribute__('openstack_image_name') + os_image_settings = openstack_tests.cirros_image_settings( + image_base_name, public=True, + image_metadata=self.cirros_image_config) + logger.debug("Creating image for Tempest suite") + image_creator = deploy_utils.create_image( + self.os_creds, os_image_settings) + if image_creator is None: + raise Exception('Failed to create image') + self.creators.append(image_creator) + image_id = image_creator.get_image().id + + if use_custom_images: + logger.debug("Creating 2nd image for Tempest suite") + image_base_name_alt = CONST.__getattribute__( + 'openstack_image_name_alt') + os_image_settings_alt = openstack_tests.cirros_image_settings( + image_base_name_alt, public=True, + image_metadata=self.cirros_image_config) + logger.debug("Creating 2nd image for Tempest suite") + image_creator_alt = deploy_utils.create_image( + self.os_creds, os_image_settings_alt) + if image_creator_alt is None: + raise Exception('Failed to create image') + self.creators.append(image_creator_alt) + image_id_alt = image_creator_alt.get_image().id + + if (CONST.__getattribute__('tempest_use_custom_flavors') or + use_custom_flavors): + logger.info("Creating flavor for Tempest suite") + scenario = ft_utils.get_scenario() + flavor_metadata = None + if 'ovs' in scenario or 'fdio' in scenario: + flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE + flavor_creator = OpenStackFlavor( + self.os_creds, FlavorSettings( + name=CONST.__getattribute__('openstack_flavor_name'), + ram=CONST.__getattribute__('openstack_flavor_ram'), + disk=CONST.__getattribute__('openstack_flavor_disk'), + vcpus=CONST.__getattribute__('openstack_flavor_vcpus'), + metadata=flavor_metadata)) + flavor = flavor_creator.create() + if flavor is None: + raise Exception('Failed to create flavor') + self.creators.append(flavor_creator) + flavor_id = flavor.id + + if use_custom_flavors: + logger.info("Creating 2nd flavor for Tempest suite") + scenario = ft_utils.get_scenario() + flavor_metadata_alt = None + if 'ovs' in scenario or 'fdio' in scenario: + flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE + flavor_creator_alt = OpenStackFlavor( + self.os_creds, FlavorSettings( + name=CONST.__getattribute__('openstack_flavor_name_alt'), + ram=CONST.__getattribute__('openstack_flavor_ram'), + disk=CONST.__getattribute__('openstack_flavor_disk'), + vcpus=CONST.__getattribute__('openstack_flavor_vcpus'), + metadata=flavor_metadata_alt)) + flavor_alt = flavor_creator_alt.create() + if flavor_alt is None: + raise Exception('Failed to create flavor') + self.creators.append(flavor_creator_alt) + flavor_id_alt = flavor_alt.id + + print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, " + "flavor_id: %s, flavor_id_alt: %s" % ( + image_id, image_id_alt, flavor_id, flavor_id_alt,)) + + result = { + 'image_id': image_id, + 'image_id_alt': image_id_alt, + 'flavor_id': flavor_id, + 'flavor_id_alt': flavor_id_alt + } + + if create_project: + result['project_id'] = project_id + result['tenant_id'] = project_id # for compatibility + result['user_id'] = user_id + + return result + + def cleanup(self): + """ + Cleanup all OpenStack objects. Should be called on completion. + """ + for creator in reversed(self.creators): + try: + creator.clean() + except Exception as e: + logger.error('Unexpected error cleaning - %s', e) diff --git a/functest/opnfv_tests/openstack/vping/ping.sh b/functest/opnfv_tests/openstack/vping/ping.sh index 693b8682..15f5e84e 100644 --- a/functest/opnfv_tests/openstack/vping/ping.sh +++ b/functest/opnfv_tests/openstack/vping/ping.sh @@ -1,13 +1,10 @@ #!/bin/sh -while true; do - ping -c 1 $1 2>&1 >/dev/null - RES=$? - if [ "Z$RES" = "Z0" ] ; then - echo 'vPing OK' - break - else - echo 'vPing KO' - fi - sleep 1 -done
\ No newline at end of file + +ping -c 1 $1 2>&1 >/dev/null +RES=$? +if [ "Z$RES" = "Z0" ] ; then + echo 'vPing OK' +else + echo 'vPing KO' +fi diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py index 74fbce1b..40fcb07f 100644 --- a/functest/opnfv_tests/openstack/vping/vping_base.py +++ b/functest/opnfv_tests/openstack/vping/vping_base.py @@ -43,8 +43,14 @@ class VPingBase(testcase.TestCase): if 'os_creds' in kwargs: self.os_creds = kwargs['os_creds'] else: + creds_override = None + if hasattr(CONST, 'snaps_os_creds_override'): + creds_override = CONST.__getattribute__( + 'snaps_os_creds_override') + self.os_creds = openstack_tests.get_credentials( - os_env_file=CONST.__getattribute__('openstack_creds')) + os_env_file=CONST.__getattribute__('openstack_creds'), + overrides=creds_override) self.creators = list() self.image_creator = None @@ -102,14 +108,33 @@ class VPingBase(testcase.TestCase): 'vping_private_subnet_name') + self.guid private_subnet_cidr = CONST.__getattribute__( 'vping_private_subnet_cidr') + + vping_network_type = None + vping_physical_network = None + vping_segmentation_id = None + + if (hasattr(CONST, 'network_type')): + vping_network_type = CONST.__getattribute__( + 'vping_network_type') + if (hasattr(CONST, 'physical_network')): + vping_physical_network = CONST.__getattribute__( + 'vping_physical_network') + if (hasattr(CONST, 'segmentation_id')): + vping_segmentation_id = CONST.__getattribute__( + 'vping_segmentation_id') + self.logger.info( "Creating network with name: '%s'" % private_net_name) self.network_creator = deploy_utils.create_network( self.os_creds, - NetworkSettings(name=private_net_name, - subnet_settings=[SubnetSettings( - name=private_subnet_name, - cidr=private_subnet_cidr)])) + NetworkSettings( + name=private_net_name, + network_type=vping_network_type, + physical_network=vping_physical_network, + segmentation_id=vping_segmentation_id, + subnet_settings=[SubnetSettings( + name=private_subnet_name, + cidr=private_subnet_cidr)])) self.creators.append(self.network_creator) self.logger.info( diff --git a/functest/opnfv_tests/openstack/vping/vping_userdata.py b/functest/opnfv_tests/openstack/vping/vping_userdata.py index 9aed4c10..8088a4db 100644 --- a/functest/opnfv_tests/openstack/vping/vping_userdata.py +++ b/functest/opnfv_tests/openstack/vping/vping_userdata.py @@ -94,7 +94,7 @@ class VPingUserdata(vping_base.VPingBase): while True: time.sleep(1) - p_console = vm_creator.get_os_vm_server_obj().get_console_output() + p_console = vm_creator.get_console_output() if "vPing OK" in p_console: self.logger.info("vPing detected!") exit_code = TestCase.EX_OK diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py index ede0fc50..841da834 100644 --- a/functest/opnfv_tests/sdn/odl/odl.py +++ b/functest/opnfv_tests/sdn/odl/odl.py @@ -66,8 +66,7 @@ class ODLResultVisitor(robot.api.ResultVisitor): class ODLTests(testcase.TestCase): """ODL test runner.""" - odl_test_repo = os.path.join( - constants.CONST.__getattribute__('dir_repos'), 'odl_test') + odl_test_repo = constants.CONST.__getattribute__('dir_repo_odl_test') neutron_suite_dir = os.path.join(odl_test_repo, "csit/suites/openstack/neutron") basic_suite_dir = os.path.join(odl_test_repo, @@ -234,7 +233,7 @@ class ODLTests(testcase.TestCase): elif installer_type == 'joid': kwargs['odlip'] = os.environ['SDN_CONTROLLER'] elif installer_type == 'compass': - kwargs['odlwebport'] = '8181' + kwargs['odlrestconfport'] = '8080' elif installer_type == 'daisy': kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP'] kwargs['odlwebport'] = '8181' diff --git a/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py b/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py index dfaa5cc1..a6d192ee 100644 --- a/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py +++ b/functest/opnfv_tests/sdn/onos/teston/adapters/connection.py @@ -64,7 +64,7 @@ class Connection(Foundation): """ os.getenv only returns current user value GetEnvValue returns a environment value of - current handle + current handle eg: GetEnvValue(handle,'HOME') """ envhandle = handle diff --git a/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py b/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py index cb75b5c3..875a2dc9 100644 --- a/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py +++ b/functest/opnfv_tests/sdn/onos/teston/adapters/environment.py @@ -1,11 +1,11 @@ """ Description: - This file is used to setup the running environment - Include Download code,setup environment variable - Set onos running config - Set user name/password - Onos-push-keys and so on - lanqinglong@huawei.com +This file is used to setup the running environment +Include Download code,setup environment variable +Set onos running config +Set user name/password +Onos-push-keys and so on +lanqinglong@huawei.com # # All rights reserved. This program and the accompanying materials @@ -17,7 +17,7 @@ Description: import logging import pexpect -import pxssh +from pexpect import pxssh import re import os import sys @@ -196,10 +196,10 @@ class Environment(Connection): def ChangeTestCasePara(self, testcase, user, password): """ - When running test script, there's something need - to change in every test folder's *.param & *.topo files - user: onos&compute node user - password: onos&compute node password + When running test script, there\'s something need + to change in every test folder\'s \*.param & \*.topo files + user: onos\&compute node user + password: onos\&compute node password """ self.logger.info("Now Changing " + testcase + " name&password") if self.masterusername == 'root': diff --git a/functest/opnfv_tests/vnf/aaa/__init__.py b/functest/opnfv_tests/vnf/aaa/__init__.py deleted file mode 100644 index e69de29b..00000000 --- a/functest/opnfv_tests/vnf/aaa/__init__.py +++ /dev/null diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py deleted file mode 100644 index 71e3c972..00000000 --- a/functest/opnfv_tests/vnf/aaa/aaa.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2016 Orange and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 - -import logging - -import functest.core.vnf as vnf - - -class AaaVnf(vnf.VnfOnBoarding): - """AAA VNF sample""" - - logger = logging.getLogger(__name__) - - def __init__(self, **kwargs): - if "case_name" not in kwargs: - kwargs["case_name"] = "aaa" - super(AaaVnf, self).__init__(**kwargs) - - def deploy_orchestrator(self): - self.logger.info("No VNFM needed to deploy a free radius here") - return True - - def deploy_vnf(self): - self.logger.info("Freeradius VNF deployment") - # find a way to deploy freeradius and tester (heat,manual, ..) - deploy_vnf = {'status': 'PASS', 'version': 'xxxx'} - self.details['deploy_vnf'] = deploy_vnf - return True - - def test_vnf(self): - self.logger.info("Run test towards freeradius") - # once the freeradius is deployed..make some tests - test_vnf = {'status': 'PASS', 'version': 'xxxx'} - self.details['test_vnf'] = test_vnf - return True diff --git a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py index 25ddca21..8851f7a4 100644 --- a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py +++ b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py @@ -10,7 +10,9 @@ import json import logging import os import pkg_resources +import shlex import shutil +import subprocess import time import requests @@ -43,7 +45,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): def config_ellis(self, ellis_ip, signup_code='secret', two_numbers=False): output_dict = {} - self.logger.info('Configure Ellis: %s', ellis_ip) + self.logger.debug('Configure Ellis: %s', ellis_ip) output_dict['ellis_ip'] = ellis_ip account_url = 'http://{0}/accounts'.format(ellis_ip) params = {"password": "functest", @@ -54,7 +56,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): output_dict['login'] = params if rq.status_code != 201 and rq.status_code != 409: raise Exception("Unable to create an account for number provision") - self.logger.info('Account is created on Ellis: %s', params) + self.logger.debug('Account is created on Ellis: %s', params) session_url = 'http://{0}/session'.format(ellis_ip) session_data = { @@ -66,13 +68,13 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): if rq.status_code != 201: raise Exception('Failed to get cookie for Ellis') cookies = rq.cookies - self.logger.info('Cookies: %s', cookies) + self.logger.debug('Cookies: %s', cookies) number_url = 'http://{0}/accounts/{1}/numbers'.format( ellis_ip, params['email']) - self.logger.info('Create 1st calling number on Ellis') - i = 24 + self.logger.debug('Create 1st calling number on Ellis') + i = 30 while rq.status_code != 200 and i > 0: try: number_res = self.create_ellis_number(number_url, cookies) @@ -86,7 +88,7 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): output_dict['number'] = number_res if two_numbers: - self.logger.info('Create 2nd calling number on Ellis') + self.logger.debug('Create 2nd calling number on Ellis') number_res = self.create_ellis_number(number_url, cookies) output_dict['number2'] = number_res @@ -109,19 +111,17 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): bono_ip=None, ellis_ip=None, signup_code='secret'): self.logger.info('Run Clearwater live test') - nameservers = ft_utils.get_resolvconf_ns() - resolvconf = ['{0}{1}{2}'.format(os.linesep, 'nameserver ', ns) - for ns in nameservers] - self.logger.debug('resolvconf: %s', resolvconf) dns_file = '/etc/resolv.conf' dns_file_bak = '/etc/resolv.conf.bak' + self.logger.debug('Backup %s -> %s', dns_file, dns_file_bak) shutil.copy(dns_file, dns_file_bak) - script = ('echo -e "nameserver {0}{1}" > {2};' - 'source /etc/profile.d/rvm.sh;' - 'cd {3};' - 'rake test[{4}] SIGNUP_CODE={5}' - .format(dns_ip, - ''.join(resolvconf), + cmd = ("dnsmasq -d -u root --server=/clearwater.opnfv/{0} " + "-r /etc/resolv.conf.bak".format(dns_ip)) + dnsmasq_process = subprocess.Popen(shlex.split(cmd)) + script = ('echo -e "nameserver {0}" > {1};' + 'cd {2};' + 'rake test[{3}] SIGNUP_CODE={4}' + .format('127.0.0.1', dns_file, self.test_dir, public_domain, @@ -131,12 +131,12 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding): script = '{0}{1}'.format(script, subscript) script = ('{0}{1}'.format(script, ' --trace')) cmd = "/bin/bash -c '{0}'".format(script) - self.logger.info('Live test cmd: %s', cmd) + self.logger.debug('Live test cmd: %s', cmd) output_file = os.path.join(self.result_dir, "ims_test_output.txt") ft_utils.execute_command(cmd, error_msg='Clearwater live test failed', output_file=output_file) - + dnsmasq_process.kill() with open(dns_file_bak, 'r') as bak_file: result = bak_file.read() with open(dns_file, 'w') as f: diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py index fafc77e1..b07eaee2 100644 --- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py +++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (c) 2016 Orange and others. +# Copyright (c) 2017 Orange and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -25,16 +25,16 @@ from functest.utils.constants import CONST import functest.utils.openstack_utils as os_utils from snaps.openstack.os_credentials import OSCreds -from snaps.openstack.create_network import NetworkSettings, SubnetSettings, \ - OpenStackNetwork -from snaps.openstack.create_security_group import SecurityGroupSettings, \ - SecurityGroupRuleSettings,\ - Direction, Protocol, \ - OpenStackSecurityGroup +from snaps.openstack.create_network import (NetworkSettings, SubnetSettings, + OpenStackNetwork) +from snaps.openstack.create_security_group import (SecurityGroupSettings, + SecurityGroupRuleSettings, + Direction, Protocol, + OpenStackSecurityGroup) from snaps.openstack.create_router import RouterSettings, OpenStackRouter -from snaps.openstack.create_instance import VmInstanceSettings, \ - FloatingIpSettings, \ - OpenStackVmInstance +from snaps.openstack.create_instance import (VmInstanceSettings, + FloatingIpSettings, + OpenStackVmInstance) from snaps.openstack.create_flavor import FlavorSettings, OpenStackFlavor from snaps.openstack.create_image import ImageSettings, OpenStackImage from snaps.openstack.create_keypairs import KeypairSettings, OpenStackKeypair @@ -110,15 +110,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): # needs some images self.__logger.info("Upload some OS images if it doesn't exist") - for image_name, image_url in self.images.iteritems(): - self.__logger.info("image: %s, url: %s", image_name, image_url) - if image_url and image_name: + for image_name, image_file in self.images.iteritems(): + self.__logger.info("image: %s, file: %s", image_name, image_file) + if image_file and image_name: image_creator = OpenStackImage( self.snaps_creds, ImageSettings(name=image_name, image_user='cloud', img_format='qcow2', - url=image_url)) + image_file=image_file)) image_creator.create() # self.created_object.append(image_creator) @@ -239,6 +239,8 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): while str(cfy_status) != 'running' and retry: try: cfy_status = cfy_client.manager.get_status()['status'] + self.__logger.debug("The current manager status is %s", + cfy_status) except Exception: # pylint: disable=broad-except self.__logger.warning("Cloudify Manager isn't " + "up and running. Retrying ...") @@ -263,14 +265,15 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): self.__logger.info("Put private keypair in manager") if manager_creator.vm_ssh_active(block=True): ssh = manager_creator.ssh_client() - scp = SCPClient(ssh.get_transport()) + scp = SCPClient(ssh.get_transport(), socket_timeout=15.0) scp.put(kp_file, '~/') cmd = "sudo cp ~/cloudify_ims.pem /etc/cloudify/" - ssh.exec_command(cmd) + run_blocking_ssh_command(ssh, cmd) cmd = "sudo chmod 444 /etc/cloudify/cloudify_ims.pem" - ssh.exec_command(cmd) + run_blocking_ssh_command(ssh, cmd) cmd = "sudo yum install -y gcc python-devel" - ssh.exec_command(cmd) + run_blocking_ssh_command(ssh, cmd, "Unable to install packages \ + on manager") self.details['orchestrator'].update(status='PASS', duration=duration) @@ -292,15 +295,17 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): descriptor.get('file_name')) self.__logger.info("Get or create flavor for all clearwater vm") - self.exist_obj['flavor2'], flavor_id = os_utils.get_or_create_flavor( - self.vnf['requirements']['flavor']['name'], - self.vnf['requirements']['flavor']['ram_min'], - '30', - '1', - public=True) + flavor_settings = FlavorSettings( + name=self.vnf['requirements']['flavor']['name'], + ram=self.vnf['requirements']['flavor']['ram_min'], + disk=25, + vcpus=1) + flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings) + flavor_creator.create() + self.created_object.append(flavor_creator) self.vnf['inputs'].update(dict( - flavor_id=flavor_id, + flavor_id=self.vnf['requirements']['flavor']['name'], )) self.__logger.info("Create VNF Instance") @@ -371,7 +376,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): try: cfy_client.executions.cancel(execution['id'], force=True) - except: + except: # pylint: disable=broad-except self.__logger.warn("Can't cancel the current exec") execution = cfy_client.executions.start( @@ -383,7 +388,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase): wait_for_execution(cfy_client, execution, self.__logger) cfy_client.deployments.delete(self.vnf['descriptor'].get('name')) cfy_client.blueprints.delete(self.vnf['descriptor'].get('name')) - except: + except: # pylint: disable=broad-except self.__logger.warn("Some issue during the undeployment ..") self.__logger.warn("Tenant clean continue ..") @@ -507,3 +512,10 @@ def sig_test_format(sig_test): total_sig_test_result['failures'] = nb_failures total_sig_test_result['skipped'] = nb_skipped return total_sig_test_result + + +def run_blocking_ssh_command(ssh, cmd, error_msg="Unable to run this command"): + """Command to run ssh command with the exit status.""" + stdin, stdout, stderr = ssh.exec_command(cmd) + if stdout.channel.recv_exit_status() != 0: + raise Exception(error_msg) diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml index f1028ce7..280e0a6b 100644 --- a/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml +++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.yaml @@ -1,6 +1,6 @@ tenant_images: - ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img - cloudify_manager_4.0: http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 + ubuntu_14.04: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img + cloudify_manager_4.0: /home/opnfv/functest/images/cloudify-manager-premium-4.0.1.qcow2 orchestrator: name: cloudify version: '4.0' @@ -19,7 +19,7 @@ vnf: version: '122' requirements: flavor: - name: m1.medium + name: m1.small ram_min: 2048 inputs: image_id: 'ubuntu_14.04' diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py deleted file mode 100644 index d420705a..00000000 --- a/functest/opnfv_tests/vnf/ims/opera_ims.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 - -import json -import logging -import os -import time - -from opera import openo_connect -import requests - -import functest.opnfv_tests.vnf.ims.clearwater_ims_base as clearwater_ims_base -from functest.utils.constants import CONST - - -class OperaIms(clearwater_ims_base.ClearwaterOnBoardingBase): - - def __init__(self, **kwargs): - if "case_name" not in kwargs: - kwargs["case_name"] = "opera_ims" - super(OperaIms, self).__init__(**kwargs) - self.logger = logging.getLogger(__name__) - self.ellis_file = os.path.join( - CONST.__getattribute__('dir_results'), 'ellis.info') - self.live_test_file = os.path.join( - CONST.__getattribute__('dir_results'), 'live_test_report.json') - try: - self.openo_msb_endpoint = os.environ['OPENO_MSB_ENDPOINT'] - except KeyError: - raise Exception('OPENO_MSB_ENDPOINT is not specified,' - ' put it as <OPEN-O ip>:<port>') - else: - self.logger.info('OPEN-O endpoint is: %s', self.openo_msb_endpoint) - - def prepare(self): - pass - - def clean(self): - pass - - def deploy_vnf(self): - try: - openo_connect.create_service(self.openo_msb_endpoint, - 'functest_opera', - 'VNF for functest testing') - except Exception as e: - self.logger.error(e) - return {'status': 'FAIL', 'result': e} - else: - self.logger.info('vIMS deployment is kicked off') - return {'status': 'PASS', 'result': ''} - - def dump_info(self, info_file, result): - with open(info_file, 'w') as f: - self.logger.debug('Save information to file: %s', info_file) - json.dump(result, f) - - def test_vnf(self): - vnfm_ip = openo_connect.get_vnfm_ip(self.openo_msb_endpoint) - self.logger.info('VNFM IP: %s', vnfm_ip) - vnf_status_url = 'http://{0}:5000/api/v1/model/status'.format(vnfm_ip) - vnf_alive = False - retry = 40 - - self.logger.info('Check the VNF status') - while retry > 0: - rq = requests.get(vnf_status_url, timeout=90) - response = rq.json() - vnf_alive = response['vnf_alive'] - msg = response['msg'] - self.logger.info(msg) - if vnf_alive: - break - self.logger.info('check again in one and half a minute...') - retry = retry - 1 - time.sleep(90) - - if not vnf_alive: - raise Exception('VNF failed to start: {0}'.format(msg)) - - ellis_config_url = ('http://{0}:5000/api/v1/model/ellis/configure' - .format(vnfm_ip)) - rq = requests.get(ellis_config_url, timeout=90) - if rq.json() and not rq.json()['ellis_ok']: - self.logger.error(rq.json()['data']) - raise Exception('Failed to configure Ellis') - - self.logger.info('Get Clearwater deployment detail') - vnf_info_url = ('http://{0}:5000/api/v1/model/output' - .format(vnfm_ip)) - rq = requests.get(vnf_info_url, timeout=90) - data = rq.json()['data'] - self.logger.info(data) - bono_ip = data['bono_ip'] - ellis_ip = data['ellis_ip'] - dns_ip = data['dns_ip'] - result = self.config_ellis(ellis_ip, 'signup', True) - self.logger.debug('Ellis Result: %s', result) - self.dump_info(self.ellis_file, result) - - if dns_ip: - vims_test_result = self.run_clearwater_live_test( - dns_ip, - 'clearwater.local', - bono_ip, - ellis_ip, - 'signup') - if vims_test_result != '': - self.dump_info(self.live_test_file, vims_test_result) - return {'status': 'PASS', 'result': vims_test_result} - else: - return {'status': 'FAIL', 'result': ''} - - def main(self, **kwargs): - self.logger.info("Start to run Opera vIMS VNF onboarding test") - self.execute() - self.logger.info("Opera vIMS VNF onboarding test finished") - if self.result is "PASS": - return self.EX_OK - else: - return self.EX_RUN_ERROR - - def run(self): - kwargs = {} - return self.main(**kwargs) diff --git a/functest/opnfv_tests/vnf/ims/orchestra.yaml b/functest/opnfv_tests/vnf/ims/orchestra.yaml new file mode 100644 index 00000000..4cd18e72 --- /dev/null +++ b/functest/opnfv_tests/vnf/ims/orchestra.yaml @@ -0,0 +1,61 @@ +tenant_images: + orchestrator: + ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img + orchestra_openims: + openims: /home/opnfv/functest/images/img + orchestra_clearwaterims: + ubuntu-14.04-server-cloudimg-amd64-disk1: /home/opnfv/functest/images/trusty-server-cloudimg-amd64-disk1.img +mano: + name: OpenBaton + version: '3.2.0' + requirements: + flavor: + name: openbaton + ram_min: 4096 + disk: 5 + vcpus: 2 + image: 'ubuntu-14.04-server-cloudimg-amd64-disk1' + bootstrap: + url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap + config: + url: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file + gvnfm: + userdata: + url: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh + credentials: + username: admin + password: openbaton + +orchestra_openims: + name: OpenIMS + descriptor: + url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json + requirements: + flavor: + name: m1.small + ram_min: 2048 + disk: 5 + vcpus: 2 + test: + scscf: + ports: [3870, 6060] + pcscf: + ports: [4060] + icscf: + ports: [3869, 5060] + fhoss: + ports: [3868] + bind9: + ports: [] + +orchestra_clearwaterims: + name: Clearwater IMS + descriptor: + url: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/ClearwaterIMS/3.2.0/json + requirements: + flavor: + name: m1.small + ram_min: 2048 + disk: 5 + vcpus: 2 + test: diff --git a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py new file mode 100644 index 00000000..a5405996 --- /dev/null +++ b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py @@ -0,0 +1,682 @@ +#!/usr/bin/env python + +# Copyright (c) 2016 Orange and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +"""Orchestra Clearwater IMS testcase implementation.""" + +import json +import logging +import os +import socket +import time +import pkg_resources +import yaml + +from snaps.openstack.create_image import OpenStackImage, ImageSettings +from snaps.openstack.create_flavor import OpenStackFlavor, FlavorSettings +from snaps.openstack.create_security_group import ( + OpenStackSecurityGroup, + SecurityGroupSettings, + SecurityGroupRuleSettings, + Direction, + Protocol) +from snaps.openstack.create_network import ( + OpenStackNetwork, + NetworkSettings, + SubnetSettings, + PortSettings) +from snaps.openstack.create_router import OpenStackRouter, RouterSettings +from snaps.openstack.os_credentials import OSCreds +from snaps.openstack.create_instance import ( + VmInstanceSettings, + OpenStackVmInstance) +from functest.opnfv_tests.openstack.snaps import snaps_utils + +import functest.core.vnf as vnf +import functest.utils.openstack_utils as os_utils +from functest.utils.constants import CONST + +from org.openbaton.cli.errors.errors import NfvoException +from org.openbaton.cli.agents.agents import MainAgent + + +__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>" +# ---------------------------------------------------------- +# +# UTILS +# +# ----------------------------------------------------------- + + +def get_config(parameter, file_path): + """ + Get config parameter. + + Returns the value of a given parameter in file.yaml + parameter must be given in string format with dots + Example: general.openstack.image_name + """ + with open(file_path) as config_file: + file_yaml = yaml.safe_load(config_file) + config_file.close() + value = file_yaml + for element in parameter.split("."): + value = value.get(element) + if value is None: + raise ValueError("The parameter %s is not defined in" + " reporting.yaml", parameter) + return value + + +def servertest(host, port): + """Method to test that a server is reachable at IP:port""" + args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + for family, socktype, proto, canonname, sockaddr in args: + sock = socket.socket(family, socktype, proto) + try: + sock.connect(sockaddr) + except socket.error: + return False + else: + sock.close() + return True + + +def get_userdata(orchestrator=dict): + """Build userdata for Open Baton machine""" + userdata = "#!/bin/bash\n" + userdata += "echo \"Executing userdata...\"\n" + userdata += "set -x\n" + userdata += "set -e\n" + userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n" + userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n" + userdata += "echo \"Install curl...\"\n" + userdata += "apt-get install curl\n" + userdata += "echo \"Inject public key...\"\n" + userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3" + "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc" + "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv" + "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S" + "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O" + "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc" + "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya" + "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut" + "horized_keys\n") + userdata += "echo \"Download bootstrap...\"\n" + userdata += ("curl -s %s " + "> ./bootstrap\n" % orchestrator['bootstrap']['url']) + userdata += ("curl -s %s" "> ./config_file\n" % + orchestrator['bootstrap']['config']['url']) + userdata += ("echo \"Disable usage of mysql...\"\n") + userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n" + userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n" + % orchestrator['details']['fip'].ip) + userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip" + "=%s/g /config_file\n" % orchestrator['details']['fip'].ip) + userdata += "echo \"Set autostart of components to 'false'\"\n" + userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n" + userdata += "echo \"Execute bootstrap...\"\n" + bootstrap = "sh ./bootstrap release -configFile=./config_file" + userdata += bootstrap + "\n" + userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n" + userdata += ("echo \"nfvo.plugin.timeout=600000\" >> " + "/etc/openbaton/openbaton-nfvo.properties\n") + userdata += ( + "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" % + orchestrator['gvnfm']['userdata']['url']) + userdata += "sed -i '113i"'\ \ \ \ '"sleep 60' " \ + "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" + userdata += "echo \"Starting NFVO\"\n" + userdata += "service openbaton-nfvo restart\n" + userdata += "echo \"Starting Generic VNFM\"\n" + userdata += "service openbaton-vnfm-generic restart\n" + userdata += "echo \"...end of userdata...\"\n" + return userdata + + +class ClearwaterImsVnf(vnf.VnfOnBoarding): + """Clearwater IMS VNF deployed with openBaton orchestrator""" + + logger = logging.getLogger(__name__) + + def __init__(self, **kwargs): + if "case_name" not in kwargs: + kwargs["case_name"] = "orchestra_clearwaterims" + super(ClearwaterImsVnf, self).__init__(**kwargs) + # self.logger = logging.getLogger("functest.ci.run_tests.orchestra") + self.logger.info("kwargs %s", (kwargs)) + + self.case_dir = pkg_resources.resource_filename( + 'functest', 'opnfv_tests/vnf/ims/') + self.data_dir = CONST.__getattribute__('dir_ims_data') + self.test_dir = CONST.__getattribute__('dir_repo_vims_test') + self.created_resources = [] + self.logger.info("%s VNF onboarding test starting", self.case_name) + + try: + self.config = CONST.__getattribute__( + 'vnf_{}_config'.format(self.case_name)) + except BaseException: + raise Exception("Orchestra VNF config file not found") + config_file = self.case_dir + self.config + + self.mano = dict( + get_config("mano", config_file), + details={} + ) + self.logger.debug("Orchestrator configuration %s", self.mano) + + self.details['orchestrator'] = dict( + name=self.mano['name'], + version=self.mano['version'], + status='ERROR', + result='' + ) + + self.vnf = dict( + get_config(self.case_name, config_file), + ) + self.logger.debug("VNF configuration: %s", self.vnf) + + self.details['vnf'] = dict( + name=self.vnf['name'], + ) + + self.details['test_vnf'] = dict( + name=self.case_name, + ) + + # Orchestra base Data directory creation + if not os.path.exists(self.data_dir): + os.makedirs(self.data_dir) + + self.images = get_config("tenant_images.orchestrator", config_file) + self.images.update( + get_config( + "tenant_images.%s" % + self.case_name, + config_file)) + self.snaps_creds = None + + def prepare(self): + """Prepare testscase (Additional pre-configuration steps).""" + super(ClearwaterImsVnf, self).prepare() + + self.logger.info("Additional pre-configuration steps") + self.logger.info("creds %s", (self.creds)) + + self.snaps_creds = OSCreds( + username=self.creds['username'], + password=self.creds['password'], + auth_url=self.creds['auth_url'], + project_name=self.creds['tenant'], + identity_api_version=int(os_utils.get_keystone_client_version())) + + self.prepare_images() + self.prepare_flavor() + self.prepare_security_groups() + self.prepare_network() + self.prepare_floating_ip() + + def prepare_images(self): + """Upload images if they doen't exist yet""" + self.logger.info("Upload images if they doen't exist yet") + for image_name, image_file in self.images.iteritems(): + self.logger.info("image: %s, file: %s", image_name, image_file) + if image_file and image_name: + image = OpenStackImage( + self.snaps_creds, + ImageSettings(name=image_name, + image_user='cloud', + img_format='qcow2', + image_file=image_file)) + image.create() + # self.created_resources.append(image); + + def prepare_security_groups(self): + """Create Open Baton security group if it doesn't exist yet""" + self.logger.info( + "Creating security group for Open Baton if not yet existing...") + sg_rules = list() + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.tcp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.tcp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.udp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.udp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.icmp)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.icmp)) + # sg_rules.append( + # SecurityGroupRuleSettings( + # sec_grp_name="orchestra-sec-group-allowall", + # direction=Direction.ingress, + # protocol=Protocol.icmp, + # port_range_min=-1, + # port_range_max=-1)) + # sg_rules.append( + # SecurityGroupRuleSettings( + # sec_grp_name="orchestra-sec-group-allowall", + # direction=Direction.egress, + # protocol=Protocol.icmp, + # port_range_min=-1, + # port_range_max=-1)) + + security_group = OpenStackSecurityGroup( + self.snaps_creds, + SecurityGroupSettings( + name="orchestra-sec-group-allowall", + rule_settings=sg_rules)) + + security_group_info = security_group.create() + self.created_resources.append(security_group) + self.mano['details']['sec_group'] = security_group_info.name + self.logger.info( + "Security group orchestra-sec-group-allowall prepared") + + def prepare_flavor(self): + """Create Open Baton flavor if it doesn't exist yet""" + self.logger.info( + "Create Flavor for Open Baton NFVO if not yet existing") + + flavor_settings = FlavorSettings( + name=self.mano['requirements']['flavor']['name'], + ram=self.mano['requirements']['flavor']['ram_min'], + disk=self.mano['requirements']['flavor']['disk'], + vcpus=self.mano['requirements']['flavor']['vcpus']) + flavor = OpenStackFlavor(self.snaps_creds, flavor_settings) + flavor_info = flavor.create() + self.created_resources.append(flavor) + self.mano['details']['flavor'] = {} + self.mano['details']['flavor']['name'] = flavor_settings.name + self.mano['details']['flavor']['id'] = flavor_info.id + + def prepare_network(self): + """Create network/subnet/router if they doen't exist yet""" + self.logger.info( + "Creating network/subnet/router if they doen't exist yet...") + subnet_settings = SubnetSettings( + name='%s_subnet' % + self.case_name, + cidr="192.168.100.0/24") + network_settings = NetworkSettings( + name='%s_net' % + self.case_name, + subnet_settings=[subnet_settings]) + orchestra_network = OpenStackNetwork( + self.snaps_creds, network_settings) + orchestra_network_info = orchestra_network.create() + self.mano['details']['network'] = {} + self.mano['details']['network']['id'] = orchestra_network_info.id + self.mano['details']['network']['name'] = orchestra_network_info.name + self.mano['details']['external_net_name'] = snaps_utils.\ + get_ext_net_name(self.snaps_creds) + self.created_resources.append(orchestra_network) + orchestra_router = OpenStackRouter( + self.snaps_creds, + RouterSettings( + name='%s_router' % + self.case_name, + external_gateway=self.mano['details']['external_net_name'], + internal_subnets=[ + subnet_settings.name])) + orchestra_router.create() + self.created_resources.append(orchestra_router) + self.logger.info("Created network and router for Open Baton NFVO...") + + def prepare_floating_ip(self): + """Select/Create Floating IP if it doesn't exist yet""" + self.logger.info("Retrieving floating IP for Open Baton NFVO") + neutron_client = snaps_utils.neutron_utils.neutron_client( + self.snaps_creds) + # Finding Tenant ID to check to which tenant the Floating IP belongs + tenant_id = os_utils.get_tenant_id( + os_utils.get_keystone_client(self.creds), + self.tenant_name) + # Use os_utils to retrieve complete information of Floating IPs + floating_ips = os_utils.get_floating_ips(neutron_client) + my_floating_ips = [] + # Filter Floating IPs with tenant id + for floating_ip in floating_ips: + # self.logger.info("Floating IP: %s", floating_ip) + if floating_ip.get('tenant_id') == tenant_id: + my_floating_ips.append(floating_ip.get('floating_ip_address')) + # Select if Floating IP exist else create new one + if len(my_floating_ips) >= 1: + # Get Floating IP object from snaps for clean up + snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips( + neutron_client) + for my_floating_ip in my_floating_ips: + for snaps_floating_ip in snaps_floating_ips: + if snaps_floating_ip.ip == my_floating_ip: + self.mano['details']['fip'] = snaps_floating_ip + self.logger.info( + "Selected floating IP for Open Baton NFVO %s", + (self.mano['details']['fip'].ip)) + break + if self.mano['details']['fip'] is not None: + break + else: + self.logger.info("Creating floating IP for Open Baton NFVO") + self.mano['details']['fip'] = snaps_utils.neutron_utils.\ + create_floating_ip( + neutron_client, + self.mano['details']['external_net_name']) + self.logger.info( + "Created floating IP for Open Baton NFVO %s", + (self.mano['details']['fip'].ip)) + + def get_vim_descriptor(self): + """"Create VIM descriptor to be used for onboarding""" + self.logger.info( + "Building VIM descriptor with PoP creds: %s", + self.creds) + # Depending on API version either tenant ID or project name must be + # used + if os_utils.is_keystone_v3(): + self.logger.info( + "Using v3 API of OpenStack... -> Using OS_PROJECT_ID") + project_id = os_utils.get_tenant_id( + os_utils.get_keystone_client(), + self.creds.get("project_name")) + else: + self.logger.info( + "Using v2 API of OpenStack... -> Using OS_TENANT_NAME") + project_id = self.creds.get("tenant_name") + self.logger.debug("VIM project/tenant id: %s", project_id) + vim_json = { + "name": "vim-instance", + "authUrl": self.creds.get("auth_url"), + "tenant": project_id, + "username": self.creds.get("username"), + "password": self.creds.get("password"), + "securityGroups": [ + self.mano['details']['sec_group'] + ], + "type": "openstack", + "location": { + "name": "opnfv", + "latitude": "52.525876", + "longitude": "13.314400" + } + } + self.logger.info("Built VIM descriptor: %s", vim_json) + return vim_json + + def deploy_orchestrator(self): + self.logger.info("Deploying Open Baton...") + self.logger.info("Details: %s", self.mano['details']) + start_time = time.time() + + self.logger.info("Creating orchestra instance...") + userdata = get_userdata(self.mano) + self.logger.info("flavor: %s\n" + "image: %s\n" + "network_id: %s\n", + self.mano['details']['flavor']['name'], + self.mano['requirements']['image'], + self.mano['details']['network']['id']) + self.logger.debug("userdata: %s\n", userdata) + # setting up image + image_settings = ImageSettings( + name=self.mano['requirements']['image'], + image_user='ubuntu', + exists=True) + # setting up port + port_settings = PortSettings( + name='%s_port' % self.case_name, + network_name=self.mano['details']['network']['name']) + # build configuration of vm + orchestra_settings = VmInstanceSettings( + name=self.case_name, + flavor=self.mano['details']['flavor']['name'], + port_settings=[port_settings], + security_group_names=[self.mano['details']['sec_group']], + userdata=userdata) + orchestra_vm = OpenStackVmInstance(self.snaps_creds, + orchestra_settings, + image_settings) + + orchestra_vm.create() + self.created_resources.append(orchestra_vm) + self.mano['details']['id'] = orchestra_vm.get_vm_info()['id'] + self.logger.info( + "Created orchestra instance: %s", + self.mano['details']['id']) + + self.logger.info("Associating floating ip: '%s' to VM '%s' ", + self.mano['details']['fip'].ip, + self.case_name) + nova_client = os_utils.get_nova_client() + if not os_utils.add_floating_ip( + nova_client, + self.mano['details']['id'], + self.mano['details']['fip'].ip): + duration = time.time() - start_time + self.details["orchestrator"].update( + status='FAIL', duration=duration) + self.logger.error("Cannot associate floating IP to VM.") + return False + + self.logger.info("Waiting for Open Baton NFVO to be up and running...") + timeout = 0 + while timeout < 200: + if servertest( + self.mano['details']['fip'].ip, + "8080"): + break + else: + self.logger.info( + "Open Baton NFVO is not started yet (%ss)", + (timeout * 5)) + time.sleep(5) + timeout += 1 + + if timeout >= 200: + duration = time.time() - start_time + self.details["orchestrator"].update( + status='FAIL', duration=duration) + self.logger.error("Open Baton is not started correctly") + return False + + self.logger.info("Waiting for all components to be up and running...") + time.sleep(60) + duration = time.time() - start_time + self.details["orchestrator"].update(status='PASS', duration=duration) + self.logger.info("Deploy Open Baton NFVO: OK") + return True + + def deploy_vnf(self): + start_time = time.time() + self.logger.info("Deploying %s...", self.vnf['name']) + + main_agent = MainAgent( + nfvo_ip=self.mano['details']['fip'].ip, + nfvo_port=8080, + https=False, + version=1, + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) + + self.logger.info( + "Create %s Flavor if not existing", self.vnf['name']) + flavor_settings = FlavorSettings( + name=self.vnf['requirements']['flavor']['name'], + ram=self.vnf['requirements']['flavor']['ram_min'], + disk=self.vnf['requirements']['flavor']['disk'], + vcpus=self.vnf['requirements']['flavor']['vcpus']) + flavor = OpenStackFlavor(self.snaps_creds, flavor_settings) + flavor_info = flavor.create() + self.logger.debug("Flavor id: %s", flavor_info.id) + + self.logger.info("Getting project 'default'...") + project_agent = main_agent.get_agent("project", "") + for project in json.loads(project_agent.find()): + if project.get("name") == "default": + self.mano['details']['project_id'] = project.get("id") + self.logger.info("Found project 'default': %s", project) + break + + vim_json = self.get_vim_descriptor() + self.logger.info("Registering VIM: %s", vim_json) + + main_agent.get_agent( + "vim", project_id=self.mano['details']['project_id']).create( + entity=json.dumps(vim_json)) + + market_agent = main_agent.get_agent( + "market", project_id=self.mano['details']['project_id']) + + try: + self.logger.info("sending: %s", self.vnf['descriptor']['url']) + nsd = market_agent.create(entity=self.vnf['descriptor']['url']) + if nsd.get('id') is None: + self.logger.error("NSD not onboarded correctly") + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + self.mano['details']['nsd_id'] = nsd.get('id') + self.logger.info("Onboarded NSD: " + nsd.get("name")) + + nsr_agent = main_agent.get_agent( + "nsr", project_id=self.mano['details']['project_id']) + + self.mano['details']['nsr'] = nsr_agent.create( + self.mano['details']['nsd_id']) + except NfvoException as exc: + self.logger.error(exc.message) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + + if self.mano['details']['nsr'].get('code') is not None: + self.logger.error( + "%s cannot be deployed: %s -> %s", + self.vnf['name'], + self.mano['details']['nsr'].get('code'), + self.mano['details']['nsr'].get('message')) + self.logger.error("%s cannot be deployed", self.vnf['name']) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + + timeout = 0 + self.logger.info("Waiting for NSR to go to ACTIVE...") + while self.mano['details']['nsr'].get("status") != 'ACTIVE' \ + and self.mano['details']['nsr'].get("status") != 'ERROR': + timeout += 1 + self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout) + if timeout == 300: + self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + time.sleep(5) + self.mano['details']['nsr'] = json.loads( + nsr_agent.find(self.mano['details']['nsr'].get('id'))) + + duration = time.time() - start_time + if self.mano['details']['nsr'].get("status") == 'ACTIVE': + self.details["vnf"].update(status='PASS', duration=duration) + self.logger.info("Sleep for 60s to ensure that all " + "services are up and running...") + time.sleep(60) + result = True + else: + self.details["vnf"].update(status='FAIL', duration=duration) + self.logger.error("NSR: %s", self.mano['details'].get('nsr')) + result = False + return result + + def test_vnf(self): + self.logger.info( + "Testing VNF Clearwater IMS is not yet implemented...") + start_time = time.time() + + duration = time.time() - start_time + self.details["test_vnf"].update(status='PASS', duration=duration) + self.logger.info("Test VNF: OK") + return True + + def clean(self): + self.logger.info("Cleaning %s...", self.case_name) + try: + main_agent = MainAgent( + nfvo_ip=self.mano['details']['fip'].ip, + nfvo_port=8080, + https=False, + version=1, + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) + self.logger.info("Terminating %s...", self.vnf['name']) + if (self.mano['details'].get('nsr')): + main_agent.get_agent( + "nsr", + project_id=self.mano['details']['project_id']).delete( + self.mano['details']['nsr'].get('id')) + self.logger.info("Sleeping 60 seconds...") + time.sleep(60) + else: + self.logger.info("No need to terminate the VNF...") + # os_utils.delete_instance(nova_client=os_utils.get_nova_client(), + # instance_id=self.mano_instance_id) + except (NfvoException, KeyError) as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + + try: + neutron_client = os_utils.get_neutron_client(self.creds) + self.logger.info("Deleting Open Baton Port...") + port = snaps_utils.neutron_utils.get_port_by_name( + neutron_client, '%s_port' % self.case_name) + snaps_utils.neutron_utils.delete_port(neutron_client, port) + time.sleep(10) + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + try: + self.logger.info("Deleting Open Baton Floating IP...") + snaps_utils.neutron_utils.delete_floating_ip( + neutron_client, self.mano['details']['fip']) + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + + for resource in reversed(self.created_resources): + try: + self.logger.info("Cleaning %s", str(resource)) + resource.clean() + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + super(ClearwaterImsVnf, self).clean() diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_ims.py deleted file mode 100644 index 7b1ea9ad..00000000 --- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py +++ /dev/null @@ -1,487 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2016 Orange and others. -# -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 - -import json -import logging -import os -import pkg_resources -import socket -import sys -import time -import yaml - -import functest.core.vnf as vnf -import functest.utils.openstack_utils as os_utils -from functest.utils.constants import CONST - -from org.openbaton.cli.agents.agents import MainAgent -from org.openbaton.cli.errors.errors import NfvoException - - -__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>" -# ---------------------------------------------------------- -# -# UTILS -# -# ----------------------------------------------------------- - - -def get_config(parameter, my_file): - """ - Returns the value of a given parameter in file.yaml - parameter must be given in string format with dots - Example: general.openstack.image_name - """ - with open(file) as f: - file_yaml = yaml.safe_load(f) - f.close() - value = file_yaml - for element in parameter.split("."): - value = value.get(element) - if value is None: - raise ValueError("The parameter %s is not defined in" - " %s" % (parameter, my_file)) - return value - - -def servertest(host, port): - args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) - for family, socktype, proto, canonname, sockaddr in args: - s = socket.socket(family, socktype, proto) - try: - s.connect(sockaddr) - except socket.error: - return False - else: - s.close() - return True - - -class ImsVnf(vnf.VnfOnBoarding): - """OpenIMS VNF deployed with openBaton orchestrator""" - - def __init__(self, project='functest', case_name='orchestra_ims', - repo='', cmd=''): - super(ImsVnf, self).__init__(project, case_name, repo, cmd) - self.logger = logging.getLogger(__name__) - self.logger.info("Orchestra IMS VNF onboarding test starting") - self.ob_password = "openbaton" - self.ob_username = "admin" - self.ob_https = False - self.ob_port = "8080" - self.ob_ip = "localhost" - self.ob_instance_id = "" - self.case_dir = pkg_resources.resource_filename( - 'functest', 'opnfv_tests/vnf/ims/') - self.data_dir = CONST.__getattribute__('dir_ims_data') - self.test_dir = CONST.__getattribute__('dir_repo_vims_test') - self.ob_projectid = "" - self.keystone_client = os_utils.get_keystone_client() - self.ob_nsr_id = "" - self.nsr = None - self.main_agent = None - # vIMS Data directory creation - if not os.path.exists(self.data_dir): - os.makedirs(self.data_dir) - # Retrieve the configuration - try: - self.config = CONST.__getattribute__( - 'vnf_{}_config'.format(self.case_name)) - except BaseException: - raise Exception("Orchestra VNF config file not found") - config_file = self.case_dir + self.config - self.imagename = get_config("openbaton.imagename", config_file) - self.bootstrap_link = get_config("openbaton.bootstrap_link", - config_file) - self.bootstrap_config_link = get_config( - "openbaton.bootstrap_config_link", config_file) - self.market_link = get_config("openbaton.marketplace_link", - config_file) - self.images = get_config("tenant_images", config_file) - self.ims_conf = get_config("vIMS", config_file) - self.userdata_file = get_config("openbaton.userdata.file", - config_file) - - def deploy_orchestrator(self): - self.logger.info("Additional pre-configuration steps") - nova_client = os_utils.get_nova_client() - neutron_client = os_utils.get_neutron_client() - glance_client = os_utils.get_glance_client() - - # Import images if needed - # needs some images - self.logger.info("Upload some OS images if it doesn't exist") - temp_dir = os.path.join(self.data_dir, "tmp/") - for image_name, image_url in self.images.iteritems(): - self.logger.info("image: %s, url: %s", image_name, image_url) - try: - image_id = os_utils.get_image_id(glance_client, - image_name) - self.logger.info("image_id: %s", image_id) - except BaseException: - self.logger.error("Unexpected error: %s", sys.exc_info()[0]) - - if image_id == '': - self.logger.info("""%s image doesn't exist on glance - repository. Try downloading this image - and upload on glance !""" % image_name) - image_id = os_utils.download_and_add_image_on_glance( - glance_client, - image_name, - image_url, - temp_dir) - if image_id == '': - self.logger.error("Failed to find or upload required OS " - "image for this deployment") - return False - - network_dic = os_utils.create_network_full(neutron_client, - "openbaton_mgmt", - "openbaton_mgmt_subnet", - "openbaton_router", - "192.168.100.0/24") - - # orchestrator VM flavor - self.logger.info( - "Check if orchestra Flavor is available, if not create one") - flavor_exist, flavor_id = os_utils.get_or_create_flavor( - "orchestra", - "4096", - '20', - '2', - public=True) - self.logger.debug("Flavor id: %s" % flavor_id) - - if not network_dic: - self.logger.error("There has been a problem when creating the " - "neutron network") - - network_id = network_dic["net_id"] - - self.logger.info("Creating floating IP for VM in advance...") - floatip_dic = os_utils.create_floating_ip(neutron_client) - floatip = floatip_dic['fip_addr'] - - if floatip is None: - self.logger.error("Cannot create floating IP.") - return False - - userdata = "#!/bin/bash\n" - userdata += "echo \"Executing userdata...\"\n" - userdata += "set -x\n" - userdata += "set -e\n" - userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n" - userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n" - userdata += "echo \"Install curl...\"\n" - userdata += "apt-get install curl\n" - userdata += "echo \"Inject public key...\"\n" - userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3" - "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc" - "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv" - "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S" - "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O" - "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc" - "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya" - "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut" - "horized_keys\n") - userdata += "echo \"Download bootstrap...\"\n" - userdata += ("curl -s %s " - "> ./bootstrap\n" % self.bootstrap_link) - userdata += ("curl -s %s" - "> ./config_file\n" % self.bootstrap_config_link) - userdata += ("echo \"Disable usage of mysql...\"\n") - userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n" - userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n" - % floatip) - userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip" - "=%s/g /config_file\n" % floatip) - userdata += "echo \"Set autostart of components to 'false'\"\n" - userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n" - userdata += "echo \"Execute bootstrap...\"\n" - bootstrap = "sh ./bootstrap release -configFile=./config_file" - userdata += bootstrap + "\n" - userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n" - userdata += ("echo \"nfvo.plugin.timeout=600000\" >> " - "/etc/openbaton/openbaton-nfvo.properties\n") - userdata += ( - "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" % - self.userdata_file) - userdata += "sed -i '113i\ \ \ \ sleep 60' " \ - "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" - userdata += "echo \"Starting NFVO\"\n" - userdata += "service openbaton-nfvo restart\n" - userdata += "echo \"Starting Generic VNFM\"\n" - userdata += "service openbaton-vnfm-generic restart\n" - userdata += "echo \"...end of userdata...\"\n" - - sg_id = os_utils.create_security_group_full(neutron_client, - "orchestra-sec-group", - "allowall") - - os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", - "icmp", 0, 255) - os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", - "icmp", 0, 255) - os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", - "tcp", 1, 65535) - os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", - "udp", 1, 65535) - os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", - "tcp", 1, 65535) - os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", - "udp", 1, 65535) - - self.logger.info("Security group set") - - self.logger.info("Create instance....") - self.logger.info("flavor: m1.medium\n" - "image: %s\n" - "network_id: %s\n" - "userdata: %s\n", - self.imagename, - network_id, - userdata) - - instance = os_utils.create_instance_and_wait_for_active( - "orchestra", - os_utils.get_image_id(glance_client, self.imagename), - network_id, - "orchestra-openbaton", - config_drive=False, - userdata=userdata) - - self.ob_instance_id = instance.id - - self.logger.info("Adding sec group to orchestra instance") - os_utils.add_secgroup_to_instance(nova_client, - self.ob_instance_id, sg_id) - - self.logger.info("Associating floating ip: '%s' to VM '%s' ", - floatip, - "orchestra-openbaton") - if not os_utils.add_floating_ip(nova_client, instance.id, floatip): - self.logger.error("Cannot associate floating IP to VM.") - return False - - self.logger.info("Waiting for Open Baton NFVO to be up and running...") - x = 0 - while x < 200: - if servertest(floatip, "8080"): - break - else: - self.logger.debug( - "Open Baton NFVO is not started yet (%ss)" % - (x * 5)) - time.sleep(5) - x += 1 - - if x == 200: - self.logger.error("Open Baton is not started correctly") - - self.ob_ip = floatip - self.ob_password = "openbaton" - self.ob_username = "admin" - self.ob_https = False - self.ob_port = "8080" - self.logger.info("Waiting for all components up and running...") - time.sleep(60) - self.details["orchestrator"] = { - 'status': "PASS", 'result': "Deploy Open Baton NFVO: OK"} - self.logger.info("Deploy Open Baton NFVO: OK") - return True - - def deploy_vnf(self): - self.logger.info("Starting vIMS Deployment...") - - self.main_agent = MainAgent(nfvo_ip=self.ob_ip, - nfvo_port=self.ob_port, - https=self.ob_https, - version=1, - username=self.ob_username, - password=self.ob_password) - - self.logger.info( - "Check if openims Flavor is available, if not create one") - flavor_exist, flavor_id = os_utils.get_or_create_flavor( - "m1.small", - "2048", - '20', - '1', - public=True) - self.logger.debug("Flavor id: %s", flavor_id) - - self.logger.info("Getting project 'default'...") - project_agent = self.main_agent.get_agent("project", self.ob_projectid) - for p in json.loads(project_agent.find()): - if p.get("name") == "default": - self.ob_projectid = p.get("id") - self.logger.info("Found project 'default': %s", p) - break - - self.logger.debug("project id: %s", self.ob_projectid) - if self.ob_projectid == "": - self.logger.error("Default project id was not found!") - - creds = os_utils.get_credentials() - self.logger.info("PoP creds: %s", creds) - - if os_utils.is_keystone_v3(): - self.logger.info( - "Using v3 API of OpenStack... -> Using OS_PROJECT_ID") - project_id = os_utils.get_tenant_id( - os_utils.get_keystone_client(), - creds.get("project_name")) - else: - self.logger.info( - "Using v2 API of OpenStack... -> Using OS_TENANT_NAME") - project_id = creds.get("tenant_name") - - self.logger.debug("project id: %s", project_id) - - vim_json = { - "name": "vim-instance", - "authUrl": creds.get("auth_url"), - "tenant": project_id, - "username": creds.get("username"), - "password": creds.get("password"), - "securityGroups": [ - "default", - "orchestra-sec-group" - ], - "type": "openstack", - "location": { - "name": "opnfv", - "latitude": "52.525876", - "longitude": "13.314400" - } - } - - self.logger.debug("Registering VIM: %s", vim_json) - - self.main_agent.get_agent( - "vim", - project_id=self.ob_projectid).create(entity=json.dumps(vim_json)) - - market_agent = self.main_agent.get_agent("market", - project_id=self.ob_projectid) - - nsd = {} - try: - self.logger.info("sending: %s", self.market_link) - nsd = market_agent.create(entity=self.market_link) - self.logger.info("Onboarded NSD: " + nsd.get("name")) - except NfvoException as e: - self.logger.error(e.message) - - nsr_agent = self.main_agent.get_agent("nsr", - project_id=self.ob_projectid) - nsd_id = nsd.get('id') - if nsd_id is None: - self.logger.error("NSD not onboarded correctly") - - try: - self.nsr = nsr_agent.create(nsd_id) - except NfvoException as e: - self.logger.error(e.message) - - if self.nsr.get('code') is not None: - self.logger.error( - "vIMS cannot be deployed: %s -> %s", - self.nsr.get('code'), - self.nsr.get('message')) - self.logger.error("vIMS cannot be deployed") - - i = 0 - self.logger.info("Waiting for NSR to go to ACTIVE...") - while self.nsr.get("status") != 'ACTIVE' and self.nsr.get( - "status") != 'ERROR': - i += 1 - if i == 150: - self.logger.error("INACTIVE NSR after %s sec..", 5 * i) - - time.sleep(5) - self.nsr = json.loads(nsr_agent.find(self.nsr.get('id'))) - - if self.nsr.get("status") == 'ACTIVE': - self.details["vnf"] = {'status': "PASS", 'result': self.nsr} - self.logger.info("Deploy VNF: OK") - else: - self.details["vnf"] = {'status': "FAIL", 'result': self.nsr} - self.logger.error(self.nsr) - self.logger.error("Deploy VNF: ERROR") - return False - - self.ob_nsr_id = self.nsr.get("id") - self.logger.info( - "Sleep for 60s to ensure that all services are up and running...") - time.sleep(60) - return True - - def test_vnf(self): - # Adaptations probably needed - # code used for cloudify_ims - # ruby client on jumphost calling the vIMS on the SUT - self.logger.info( - "Testing if %s works properly...", self.nsr.get('name')) - for vnfr in self.nsr.get('vnfr'): - self.logger.info( - "Checking ports %s of VNF %s", - self.ims_conf.get(vnfr.get('name')).get('ports'), - vnfr.get('name')) - for vdu in vnfr.get('vdu'): - for vnfci in vdu.get('vnfc_instance'): - self.logger.debug( - "Checking ports of VNFC instance %s", - vnfci.get('hostname')) - for floatingIp in vnfci.get('floatingIps'): - self.logger.debug( - "Testing %s:%s", - vnfci.get('hostname'), - floatingIp.get('ip')) - for port in self.ims_conf.get( - vnfr.get('name')).get('ports'): - if servertest(floatingIp.get('ip'), port): - self.logger.info( - "VNFC instance %s is reachable at %s:%s", - vnfci.get('hostname'), - floatingIp.get('ip'), - port) - else: - self.logger.error( - "VNFC instance %s is not reachable " - "at %s:%s", - vnfci.get('hostname'), - floatingIp.get('ip'), - port) - self.details["test_vnf"] = { - 'status': "FAIL", 'result': ( - "Port %s of server %s -> %s is " - "not reachable", - port, - vnfci.get('hostname'), - floatingIp.get('ip'))} - self.logger.error("Test VNF: ERROR") - return False - - self.details["test_vnf"] = { - 'status': "PASS", - 'result': "All tests have been executed successfully"} - self.logger.info("Test VNF: OK") - return True - - def clean(self): - self.main_agent.get_agent( - "nsr", - project_id=self.ob_projectid).delete(self.ob_nsr_id) - time.sleep(5) - os_utils.delete_instance(nova_client=os_utils.get_nova_client(), - instance_id=self.ob_instance_id) - # question is the clean removing also the VM? - # I think so since is goinf to remove the tenant... - super(ImsVnf, self).clean() diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml b/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml deleted file mode 100644 index 5b25d3c9..00000000 --- a/functest/opnfv_tests/vnf/ims/orchestra_ims.yaml +++ /dev/null @@ -1,21 +0,0 @@ -tenant_images: - ubuntu_14.04: http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img - openims: http://marketplace.openbaton.org:8082/api/v1/images/52e2ccc0-1dce-4663-894d-28aab49323aa/img -openbaton: - bootstrap_link: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap - bootstrap_config_link: http://get.openbaton.org/bootstraps/bootstrap_3.2.0_opnfv/bootstrap-config-file - userdata: - file: https://raw.githubusercontent.com/openbaton/generic-vnfm/3.2.0/src/main/resources/user-data.sh - marketplace_link: http://marketplace.openbaton.org:8082/api/v1/nsds/fokus/OpenImsCore/3.2.0/json - imagename: ubuntu_14.04 -vIMS: - scscf: - ports: [3870, 6060] - pcscf: - ports: [4060] - icscf: - ports: [3869, 5060] - fhoss: - ports: [3868] - bind9: - ports: []
\ No newline at end of file diff --git a/functest/opnfv_tests/vnf/ims/orchestra_openims.py b/functest/opnfv_tests/vnf/ims/orchestra_openims.py new file mode 100644 index 00000000..f8acada4 --- /dev/null +++ b/functest/opnfv_tests/vnf/ims/orchestra_openims.py @@ -0,0 +1,718 @@ +#!/usr/bin/env python + +# Copyright (c) 2016 Orange and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 + +"""Orchestra OpenIMS testcase implementation.""" + +import json +import logging +import os +import socket +import time +import pkg_resources +import yaml + + +from snaps.openstack.create_image import OpenStackImage, ImageSettings +from snaps.openstack.create_flavor import OpenStackFlavor, FlavorSettings +from snaps.openstack.create_security_group import ( + OpenStackSecurityGroup, + SecurityGroupSettings, + SecurityGroupRuleSettings, + Direction, + Protocol) +from snaps.openstack.create_network import ( + OpenStackNetwork, + NetworkSettings, + SubnetSettings, + PortSettings) +from snaps.openstack.create_router import OpenStackRouter, RouterSettings +from snaps.openstack.os_credentials import OSCreds +from snaps.openstack.create_instance import ( + VmInstanceSettings, OpenStackVmInstance) +from functest.opnfv_tests.openstack.snaps import snaps_utils + +import functest.core.vnf as vnf +import functest.utils.openstack_utils as os_utils +from functest.utils.constants import CONST + +from org.openbaton.cli.errors.errors import NfvoException +from org.openbaton.cli.agents.agents import MainAgent + + +__author__ = "Pauls, Michael <michael.pauls@fokus.fraunhofer.de>" +# ---------------------------------------------------------- +# +# UTILS +# +# ----------------------------------------------------------- + + +def get_config(parameter, file_path): + """ + Get config parameter. + + Returns the value of a given parameter in file.yaml + parameter must be given in string format with dots + Example: general.openstack.image_name + """ + with open(file_path) as config_file: + file_yaml = yaml.safe_load(config_file) + config_file.close() + value = file_yaml + for element in parameter.split("."): + value = value.get(element) + if value is None: + raise ValueError("The parameter %s is not defined in" + " reporting.yaml", parameter) + return value + + +def servertest(host, port): + """Method to test that a server is reachable at IP:port""" + args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + for family, socktype, proto, canonname, sockaddr in args: + sock = socket.socket(family, socktype, proto) + try: + sock.connect(sockaddr) + except socket.error: + return False + else: + sock.close() + return True + + +def get_userdata(orchestrator=dict): + """Build userdata for Open Baton machine""" + userdata = "#!/bin/bash\n" + userdata += "echo \"Executing userdata...\"\n" + userdata += "set -x\n" + userdata += "set -e\n" + userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n" + userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n" + userdata += "echo \"Install curl...\"\n" + userdata += "apt-get install curl\n" + userdata += "echo \"Inject public key...\"\n" + userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3" + "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc" + "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv" + "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S" + "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O" + "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc" + "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya" + "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut" + "horized_keys\n") + userdata += "echo \"Download bootstrap...\"\n" + userdata += ("curl -s %s " + "> ./bootstrap\n" % orchestrator['bootstrap']['url']) + userdata += ("curl -s %s" "> ./config_file\n" % + orchestrator['bootstrap']['config']['url']) + userdata += ("echo \"Disable usage of mysql...\"\n") + userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n" + userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n" + % orchestrator['details']['fip'].ip) + userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip" + "=%s/g /config_file\n" % orchestrator['details']['fip'].ip) + userdata += "echo \"Set autostart of components to 'false'\"\n" + userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n" + userdata += "echo \"Execute bootstrap...\"\n" + bootstrap = "sh ./bootstrap release -configFile=./config_file" + userdata += bootstrap + "\n" + userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n" + userdata += ("echo \"nfvo.plugin.timeout=600000\" >> " + "/etc/openbaton/openbaton-nfvo.properties\n") + userdata += ( + "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" % + orchestrator['gvnfm']['userdata']['url']) + userdata += "sed -i '113i"'\ \ \ \ '"sleep 60' " \ + "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" + userdata += "echo \"Starting NFVO\"\n" + userdata += "service openbaton-nfvo restart\n" + userdata += "echo \"Starting Generic VNFM\"\n" + userdata += "service openbaton-vnfm-generic restart\n" + userdata += "echo \"...end of userdata...\"\n" + return userdata + + +class OpenImsVnf(vnf.VnfOnBoarding): + """OpenIMS VNF deployed with openBaton orchestrator""" + + logger = logging.getLogger(__name__) + + def __init__(self, **kwargs): + if "case_name" not in kwargs: + kwargs["case_name"] = "orchestra_openims" + super(OpenImsVnf, self).__init__(**kwargs) + # self.logger = logging.getLogger("functest.ci.run_tests.orchestra") + self.logger.info("kwargs %s", (kwargs)) + + self.case_dir = pkg_resources.resource_filename( + 'functest', 'opnfv_tests/vnf/ims/') + self.data_dir = CONST.__getattribute__('dir_ims_data') + self.test_dir = CONST.__getattribute__('dir_repo_vims_test') + self.created_resources = [] + self.logger.info("%s VNF onboarding test starting", self.case_name) + + try: + self.config = CONST.__getattribute__( + 'vnf_{}_config'.format(self.case_name)) + except BaseException: + raise Exception("Orchestra VNF config file not found") + config_file = self.case_dir + self.config + + self.mano = dict( + get_config("mano", config_file), + details={} + ) + self.logger.debug("Orchestrator configuration %s", self.mano) + + self.details['orchestrator'] = dict( + name=self.mano['name'], + version=self.mano['version'], + status='ERROR', + result='' + ) + + self.vnf = dict( + get_config(self.case_name, config_file), + ) + self.logger.debug("VNF configuration: %s", self.vnf) + + self.details['vnf'] = dict( + name=self.vnf['name'], + ) + + self.details['test_vnf'] = dict( + name=self.case_name, + ) + + # Orchestra base Data directory creation + if not os.path.exists(self.data_dir): + os.makedirs(self.data_dir) + + self.images = get_config("tenant_images.orchestrator", config_file) + self.images.update(get_config("tenant_images.%s" % + self.case_name, config_file)) + self.snaps_creds = None + + def prepare(self): + """Prepare testscase (Additional pre-configuration steps).""" + super(OpenImsVnf, self).prepare() + + self.logger.info("Additional pre-configuration steps") + self.logger.info("creds %s", (self.creds)) + + self.snaps_creds = OSCreds( + username=self.creds['username'], + password=self.creds['password'], + auth_url=self.creds['auth_url'], + project_name=self.creds['tenant'], + identity_api_version=int(os_utils.get_keystone_client_version())) + + self.prepare_images() + self.prepare_flavor() + self.prepare_security_groups() + self.prepare_network() + self.prepare_floating_ip() + + def prepare_images(self): + """Upload images if they doen't exist yet""" + self.logger.info("Upload images if they doen't exist yet") + for image_name, image_file in self.images.iteritems(): + self.logger.info("image: %s, file: %s", image_name, image_file) + if image_file and image_name: + image = OpenStackImage( + self.snaps_creds, + ImageSettings(name=image_name, + image_user='cloud', + img_format='qcow2', + image_file=image_file)) + image.create() + # self.created_resources.append(image); + + def prepare_security_groups(self): + """Create Open Baton security group if it doesn't exist yet""" + self.logger.info( + "Creating security group for Open Baton if not yet existing...") + sg_rules = list() + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.tcp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.tcp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.udp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.udp, + port_range_min=1, + port_range_max=65535)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.ingress, + protocol=Protocol.icmp)) + sg_rules.append( + SecurityGroupRuleSettings( + sec_grp_name="orchestra-sec-group-allowall", + direction=Direction.egress, + protocol=Protocol.icmp)) + # sg_rules.append( + # SecurityGroupRuleSettings( + # sec_grp_name="orchestra-sec-group-allowall", + # direction=Direction.ingress, + # protocol=Protocol.icmp, + # port_range_min=-1, + # port_range_max=-1)) + # sg_rules.append( + # SecurityGroupRuleSettings( + # sec_grp_name="orchestra-sec-group-allowall", + # direction=Direction.egress, + # protocol=Protocol.icmp, + # port_range_min=-1, + # port_range_max=-1)) + + security_group = OpenStackSecurityGroup( + self.snaps_creds, + SecurityGroupSettings( + name="orchestra-sec-group-allowall", + rule_settings=sg_rules)) + + security_group_info = security_group.create() + self.created_resources.append(security_group) + self.mano['details']['sec_group'] = security_group_info.name + self.logger.info( + "Security group orchestra-sec-group-allowall prepared") + + def prepare_flavor(self): + """Create Open Baton flavor if it doesn't exist yet""" + self.logger.info( + "Create Flavor for Open Baton NFVO if not yet existing") + + flavor_settings = FlavorSettings( + name=self.mano['requirements']['flavor']['name'], + ram=self.mano['requirements']['flavor']['ram_min'], + disk=self.mano['requirements']['flavor']['disk'], + vcpus=self.mano['requirements']['flavor']['vcpus']) + flavor = OpenStackFlavor(self.snaps_creds, flavor_settings) + flavor_info = flavor.create() + self.created_resources.append(flavor) + self.mano['details']['flavor'] = {} + self.mano['details']['flavor']['name'] = flavor_settings.name + self.mano['details']['flavor']['id'] = flavor_info.id + + def prepare_network(self): + """Create network/subnet/router if they doen't exist yet""" + self.logger.info( + "Creating network/subnet/router if they doen't exist yet...") + subnet_settings = SubnetSettings( + name='%s_subnet' % + self.case_name, + cidr="192.168.100.0/24") + network_settings = NetworkSettings( + name='%s_net' % + self.case_name, + subnet_settings=[subnet_settings]) + orchestra_network = OpenStackNetwork( + self.snaps_creds, network_settings) + orchestra_network_info = orchestra_network.create() + self.mano['details']['network'] = {} + self.mano['details']['network']['id'] = orchestra_network_info.id + self.mano['details']['network']['name'] = orchestra_network_info.name + self.mano['details']['external_net_name'] = \ + snaps_utils.get_ext_net_name(self.snaps_creds) + self.created_resources.append(orchestra_network) + orchestra_router = OpenStackRouter( + self.snaps_creds, + RouterSettings( + name='%s_router' % + self.case_name, + external_gateway=self.mano['details']['external_net_name'], + internal_subnets=[ + subnet_settings.name])) + orchestra_router.create() + self.created_resources.append(orchestra_router) + self.logger.info("Created network and router for Open Baton NFVO...") + + def prepare_floating_ip(self): + """Select/Create Floating IP if it doesn't exist yet""" + self.logger.info("Retrieving floating IP for Open Baton NFVO") + neutron_client = snaps_utils.neutron_utils.neutron_client( + self.snaps_creds) + # Finding Tenant ID to check to which tenant the Floating IP belongs + tenant_id = os_utils.get_tenant_id( + os_utils.get_keystone_client(self.creds), + self.tenant_name) + # Use os_utils to retrieve complete information of Floating IPs + floating_ips = os_utils.get_floating_ips(neutron_client) + my_floating_ips = [] + # Filter Floating IPs with tenant id + for floating_ip in floating_ips: + # self.logger.info("Floating IP: %s", floating_ip) + if floating_ip.get('tenant_id') == tenant_id: + my_floating_ips.append(floating_ip.get('floating_ip_address')) + # Select if Floating IP exist else create new one + if len(my_floating_ips) >= 1: + # Get Floating IP object from snaps for clean up + snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips( + neutron_client) + for my_floating_ip in my_floating_ips: + for snaps_floating_ip in snaps_floating_ips: + if snaps_floating_ip.ip == my_floating_ip: + self.mano['details']['fip'] = snaps_floating_ip + self.logger.info( + "Selected floating IP for Open Baton NFVO %s", + (self.mano['details']['fip'].ip)) + break + if self.mano['details']['fip'] is not None: + break + else: + self.logger.info("Creating floating IP for Open Baton NFVO") + self.mano['details']['fip'] = ( + snaps_utils.neutron_utils. create_floating_ip( + neutron_client, self.mano['details']['external_net_name'])) + self.logger.info( + "Created floating IP for Open Baton NFVO %s", + (self.mano['details']['fip'].ip)) + + def get_vim_descriptor(self): + """"Create VIM descriptor to be used for onboarding""" + self.logger.info( + "Building VIM descriptor with PoP creds: %s", + self.creds) + # Depending on API version either tenant ID or project name must be + # used + if os_utils.is_keystone_v3(): + self.logger.info( + "Using v3 API of OpenStack... -> Using OS_PROJECT_ID") + project_id = os_utils.get_tenant_id( + os_utils.get_keystone_client(), + self.creds.get("project_name")) + else: + self.logger.info( + "Using v2 API of OpenStack... -> Using OS_TENANT_NAME") + project_id = self.creds.get("tenant_name") + self.logger.debug("VIM project/tenant id: %s", project_id) + vim_json = { + "name": "vim-instance", + "authUrl": self.creds.get("auth_url"), + "tenant": project_id, + "username": self.creds.get("username"), + "password": self.creds.get("password"), + "securityGroups": [ + self.mano['details']['sec_group'] + ], + "type": "openstack", + "location": { + "name": "opnfv", + "latitude": "52.525876", + "longitude": "13.314400" + } + } + self.logger.info("Built VIM descriptor: %s", vim_json) + return vim_json + + def deploy_orchestrator(self): + self.logger.info("Deploying Open Baton...") + self.logger.info("Details: %s", self.mano['details']) + start_time = time.time() + + self.logger.info("Creating orchestra instance...") + userdata = get_userdata(self.mano) + self.logger.info("flavor: %s\n" + "image: %s\n" + "network_id: %s\n", + self.mano['details']['flavor']['name'], + self.mano['requirements']['image'], + self.mano['details']['network']['id']) + self.logger.debug("userdata: %s\n", userdata) + # setting up image + image_settings = ImageSettings( + name=self.mano['requirements']['image'], + image_user='ubuntu', + exists=True) + # setting up port + port_settings = PortSettings( + name='%s_port' % self.case_name, + network_name=self.mano['details']['network']['name']) + # build configuration of vm + orchestra_settings = VmInstanceSettings( + name=self.case_name, + flavor=self.mano['details']['flavor']['name'], + port_settings=[port_settings], + security_group_names=[self.mano['details']['sec_group']], + userdata=userdata) + orchestra_vm = OpenStackVmInstance(self.snaps_creds, + orchestra_settings, + image_settings) + + orchestra_vm.create() + self.created_resources.append(orchestra_vm) + self.mano['details']['id'] = orchestra_vm.get_vm_info()['id'] + self.logger.info( + "Created orchestra instance: %s", + self.mano['details']['id']) + + self.logger.info("Associating floating ip: '%s' to VM '%s' ", + self.mano['details']['fip'].ip, + self.case_name) + nova_client = os_utils.get_nova_client() + if not os_utils.add_floating_ip( + nova_client, + self.mano['details']['id'], + self.mano['details']['fip'].ip): + duration = time.time() - start_time + self.details["orchestrator"].update( + status='FAIL', duration=duration) + self.logger.error("Cannot associate floating IP to VM.") + return False + + self.logger.info("Waiting for Open Baton NFVO to be up and running...") + timeout = 0 + while timeout < 200: + if servertest( + self.mano['details']['fip'].ip, + "8080"): + break + else: + self.logger.info("Open Baton NFVO is not started yet (%ss)", + (timeout * 5)) + time.sleep(5) + timeout += 1 + + if timeout >= 200: + duration = time.time() - start_time + self.details["orchestrator"].update( + status='FAIL', duration=duration) + self.logger.error("Open Baton is not started correctly") + return False + + self.logger.info("Waiting for all components to be up and running...") + time.sleep(60) + duration = time.time() - start_time + self.details["orchestrator"].update(status='PASS', duration=duration) + self.logger.info("Deploy Open Baton NFVO: OK") + return True + + def deploy_vnf(self): + start_time = time.time() + self.logger.info("Deploying %s...", self.vnf['name']) + + main_agent = MainAgent( + nfvo_ip=self.mano['details']['fip'].ip, + nfvo_port=8080, + https=False, + version=1, + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) + + self.logger.info( + "Create %s Flavor if not existing", self.vnf['name']) + flavor_settings = FlavorSettings( + name=self.vnf['requirements']['flavor']['name'], + ram=self.vnf['requirements']['flavor']['ram_min'], + disk=self.vnf['requirements']['flavor']['disk'], + vcpus=self.vnf['requirements']['flavor']['vcpus']) + flavor = OpenStackFlavor(self.snaps_creds, flavor_settings) + flavor_info = flavor.create() + self.logger.debug("Flavor id: %s", flavor_info.id) + + self.logger.info("Getting project 'default'...") + project_agent = main_agent.get_agent("project", "") + for project in json.loads(project_agent.find()): + if project.get("name") == "default": + self.mano['details']['project_id'] = project.get("id") + self.logger.info("Found project 'default': %s", project) + break + + vim_json = self.get_vim_descriptor() + self.logger.info("Registering VIM: %s", vim_json) + + main_agent.get_agent( + "vim", project_id=self.mano['details']['project_id']).create( + entity=json.dumps(vim_json)) + + market_agent = main_agent.get_agent( + "market", project_id=self.mano['details']['project_id']) + + try: + self.logger.info("sending: %s", self.vnf['descriptor']['url']) + nsd = market_agent.create(entity=self.vnf['descriptor']['url']) + if nsd.get('id') is None: + self.logger.error("NSD not onboarded correctly") + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + self.mano['details']['nsd_id'] = nsd.get('id') + self.logger.info("Onboarded NSD: " + nsd.get("name")) + + nsr_agent = main_agent.get_agent( + "nsr", project_id=self.mano['details']['project_id']) + + self.mano['details']['nsr'] = nsr_agent.create( + self.mano['details']['nsd_id']) + except NfvoException as exc: + self.logger.error(exc.message) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + + if self.mano['details']['nsr'].get('code') is not None: + self.logger.error( + "%s cannot be deployed: %s -> %s", + self.vnf['name'], + self.mano['details']['nsr'].get('code'), + self.mano['details']['nsr'].get('message')) + self.logger.error("%s cannot be deployed", self.vnf['name']) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + + timeout = 0 + self.logger.info("Waiting for NSR to go to ACTIVE...") + while self.mano['details']['nsr'].get("status") != 'ACTIVE' \ + and self.mano['details']['nsr'].get("status") != 'ERROR': + timeout += 1 + self.logger.info("NSR is not yet ACTIVE... (%ss)", 5 * timeout) + if timeout == 300: + self.logger.error("INACTIVE NSR after %s sec..", 5 * timeout) + duration = time.time() - start_time + self.details["vnf"].update(status='FAIL', duration=duration) + return False + time.sleep(5) + self.mano['details']['nsr'] = json.loads( + nsr_agent.find(self.mano['details']['nsr'].get('id'))) + + duration = time.time() - start_time + if self.mano['details']['nsr'].get("status") == 'ACTIVE': + self.details["vnf"].update(status='PASS', duration=duration) + self.logger.info("Sleep for 60s to ensure that all " + "services are up and running...") + time.sleep(60) + result = True + else: + self.details["vnf"].update(status='FAIL', duration=duration) + self.logger.error("NSR: %s", self.mano['details'].get('nsr')) + result = False + return result + + def test_vnf(self): + self.logger.info("Testing VNF OpenIMS...") + start_time = time.time() + self.logger.info( + "Testing if %s works properly...", + self.mano['details']['nsr'].get('name')) + for vnfr in self.mano['details']['nsr'].get('vnfr'): + self.logger.info( + "Checking ports %s of VNF %s", + self.vnf['test'][vnfr.get('name')]['ports'], + vnfr.get('name')) + for vdu in vnfr.get('vdu'): + for vnfci in vdu.get('vnfc_instance'): + self.logger.debug( + "Checking ports of VNFC instance %s", + vnfci.get('hostname')) + for floating_ip in vnfci.get('floatingIps'): + self.logger.debug( + "Testing %s:%s", + vnfci.get('hostname'), + floating_ip.get('ip')) + for port in self.vnf['test'][vnfr.get( + 'name')]['ports']: + if servertest(floating_ip.get('ip'), port): + self.logger.info( + "VNFC instance %s is reachable at %s:%s", + vnfci.get('hostname'), + floating_ip.get('ip'), + port) + else: + self.logger.error( + "VNFC instance %s is not reachable " + "at %s:%s", + vnfci.get('hostname'), + floating_ip.get('ip'), + port) + duration = time.time() - start_time + self.details["test_vnf"].update( + status='FAIL', duration=duration, esult=( + "Port %s of server %s -> %s is " + "not reachable", + port, + vnfci.get('hostname'), + floating_ip.get('ip'))) + self.logger.error("Test VNF: ERROR") + return False + duration = time.time() - start_time + self.details["test_vnf"].update(status='PASS', duration=duration) + self.logger.info("Test VNF: OK") + return True + + def clean(self): + self.logger.info("Cleaning %s...", self.case_name) + try: + main_agent = MainAgent( + nfvo_ip=self.mano['details']['fip'].ip, + nfvo_port=8080, + https=False, + version=1, + username=self.mano['credentials']['username'], + password=self.mano['credentials']['password']) + self.logger.info("Terminating %s...", self.vnf['name']) + if (self.mano['details'].get('nsr')): + main_agent.get_agent( + "nsr", + project_id=self.mano['details']['project_id']).\ + delete(self.mano['details']['nsr'].get('id')) + self.logger.info("Sleeping 60 seconds...") + time.sleep(60) + else: + self.logger.info("No need to terminate the VNF...") + # os_utils.delete_instance(nova_client=os_utils.get_nova_client(), + # instance_id=self.mano_instance_id) + except (NfvoException, KeyError) as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + + try: + neutron_client = os_utils.get_neutron_client(self.creds) + self.logger.info("Deleting Open Baton Port...") + port = snaps_utils.neutron_utils.get_port_by_name( + neutron_client, '%s_port' % self.case_name) + snaps_utils.neutron_utils.delete_port(neutron_client, port) + time.sleep(10) + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + try: + self.logger.info("Deleting Open Baton Floating IP...") + snaps_utils.neutron_utils.delete_floating_ip( + neutron_client, self.mano['details']['fip']) + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + + for resource in reversed(self.created_resources): + try: + self.logger.info("Cleaning %s", str(resource)) + resource.clean() + except Exception as exc: + self.logger.error('Unexpected error cleaning - %s', exc) + super(OpenImsVnf, self).clean() |