aboutsummaryrefslogtreecommitdiffstats
path: root/functest/opnfv_tests/openstack
diff options
context:
space:
mode:
Diffstat (limited to 'functest/opnfv_tests/openstack')
-rw-r--r--functest/opnfv_tests/openstack/api/__init__.py (renamed from functest/opnfv_tests/openstack/refstack_client/__init__.py)0
-rw-r--r--functest/opnfv_tests/openstack/api/connection_check.py73
-rw-r--r--functest/opnfv_tests/openstack/barbican/__init__.py (renamed from functest/opnfv_tests/openstack/snaps/__init__.py)0
-rw-r--r--functest/opnfv_tests/openstack/barbican/barbican.py37
-rw-r--r--functest/opnfv_tests/openstack/cinder/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/cinder/cinder_test.py127
-rw-r--r--functest/opnfv_tests/openstack/cinder/read_data.sh26
-rw-r--r--functest/opnfv_tests/openstack/cinder/write_data.sh30
-rw-r--r--functest/opnfv_tests/openstack/patrole/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/patrole/patrole.py28
-rw-r--r--functest/opnfv_tests/openstack/rally/blacklist.txt71
-rw-r--r--functest/opnfv_tests/openstack/rally/blacklist.yaml40
-rw-r--r--functest/opnfv_tests/openstack/rally/macro/macro.yaml6
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py1059
-rw-r--r--functest/opnfv_tests/openstack/rally/rally_jobs.yaml3
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml458
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-cinder.yaml77
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml5
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-gnocchi.yaml181
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml22
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml82
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/opnfv-barbican.yaml98
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml15
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml71
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/opnfv-vm.yaml41
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml247
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-cinder.yaml20
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml5
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-gnocchi.yaml119
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-neutron.yaml12
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml48
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/support/instance_dd_test.sh13
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template2
-rw-r--r--functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template2
-rw-r--r--functest/opnfv_tests/openstack/rally/task.yaml18
-rw-r--r--functest/opnfv_tests/openstack/refstack/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/refstack/refstack.py81
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/defcore.txt313
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/refstack_client.py261
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/tempest_conf.py67
-rw-r--r--functest/opnfv_tests/openstack/shaker/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/shaker/shaker.py147
-rw-r--r--functest/opnfv_tests/openstack/snaps/api_check.py41
-rw-r--r--functest/opnfv_tests/openstack/snaps/connection_check.py40
-rw-r--r--functest/opnfv_tests/openstack/snaps/health_check.py45
-rw-r--r--functest/opnfv_tests/openstack/snaps/smoke.py44
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_suite_builder.py433
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_test_runner.py63
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_utils.py38
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py352
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt2
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml19
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt249
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml15
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml117
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml104
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py1128
-rw-r--r--functest/opnfv_tests/openstack/vmtp/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/vmtp/vmtp.py213
-rw-r--r--functest/opnfv_tests/openstack/vping/ping.sh10
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py215
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_ssh.py253
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_userdata.py170
63 files changed, 3152 insertions, 4304 deletions
diff --git a/functest/opnfv_tests/openstack/refstack_client/__init__.py b/functest/opnfv_tests/openstack/api/__init__.py
index e69de29bb..e69de29bb 100644
--- a/functest/opnfv_tests/openstack/refstack_client/__init__.py
+++ b/functest/opnfv_tests/openstack/api/__init__.py
diff --git a/functest/opnfv_tests/openstack/api/connection_check.py b/functest/opnfv_tests/openstack/api/connection_check.py
new file mode 100644
index 000000000..eaf9767c0
--- /dev/null
+++ b/functest/opnfv_tests/openstack/api/connection_check.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Verify the connection to OpenStack Services"""
+
+import logging
+import time
+
+import os_client_config
+import shade
+from xtesting.core import testcase
+
+from functest.utils import env
+from functest.utils import functest_utils
+
+
+class ConnectionCheck(testcase.TestCase):
+ """Perform simplest queries"""
+ __logger = logging.getLogger(__name__)
+
+ func_list = [
+ "get_network_extensions", "list_aggregates", "list_domains",
+ "list_endpoints", "list_floating_ip_pools", "list_floating_ips",
+ "list_hypervisors", "list_keypairs", "list_networks", "list_ports",
+ "list_role_assignments", "list_roles", "list_routers", "list_servers",
+ "list_subnets"]
+
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = 'connection_check'
+ super().__init__(**kwargs)
+ self.output_log_name = 'functest.log'
+ self.output_debug_log_name = 'functest.debug.log'
+ try:
+ cloud_config = os_client_config.get_config()
+ self.cloud = shade.OpenStackCloud(cloud_config=cloud_config)
+ except Exception: # pylint: disable=broad-except
+ self.cloud = None
+
+ def run(self, **kwargs):
+ # pylint: disable=protected-access
+ """Run all read operations to check connections"""
+ status = testcase.TestCase.EX_RUN_ERROR
+ try:
+ assert self.cloud
+ self.start_time = time.time()
+ self.__logger.debug(
+ "list_services: %s", functest_utils.list_services(self.cloud))
+ if env.get('NO_TENANT_NETWORK').lower() == 'true':
+ self.func_list.remove("list_floating_ip_pools")
+ self.func_list.remove("list_floating_ips")
+ self.func_list.remove("list_routers")
+ for func in self.func_list:
+ self.__logger.debug(
+ "%s: %s", func, getattr(self.cloud, func)())
+ data = self.cloud._network_client.get("/service-providers.json")
+ self.__logger.debug(
+ "list_service_providers: %s",
+ self.cloud._get_and_munchify('service_providers', data))
+ functest_utils.get_openstack_version(self.cloud)
+ self.result = 100
+ status = testcase.TestCase.EX_OK
+ except Exception: # pylint: disable=broad-except
+ self.__logger.exception('Cannot run %s', self.case_name)
+ finally:
+ self.stop_time = time.time()
+ return status
diff --git a/functest/opnfv_tests/openstack/snaps/__init__.py b/functest/opnfv_tests/openstack/barbican/__init__.py
index e69de29bb..e69de29bb 100644
--- a/functest/opnfv_tests/openstack/snaps/__init__.py
+++ b/functest/opnfv_tests/openstack/barbican/__init__.py
diff --git a/functest/opnfv_tests/openstack/barbican/barbican.py b/functest/opnfv_tests/openstack/barbican/barbican.py
new file mode 100644
index 000000000..706304bbf
--- /dev/null
+++ b/functest/opnfv_tests/openstack/barbican/barbican.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# pylint: disable=missing-docstring
+
+from six.moves import configparser
+
+from functest.opnfv_tests.openstack.tempest import tempest
+
+
+class Barbican(tempest.TempestCommon):
+
+ def configure(self, **kwargs):
+ super().configure(**kwargs)
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('auth'):
+ rconfig.add_section('auth')
+ rconfig.set('auth', 'tempest_roles', 'creator')
+ if not rconfig.has_section('glance'):
+ rconfig.add_section('glance')
+ rconfig.set('glance', 'verify_glance_signatures', True)
+ if not rconfig.has_section('ephemeral_storage_encryption'):
+ rconfig.add_section('ephemeral_storage_encryption')
+ rconfig.set('ephemeral_storage_encryption', 'enabled', True)
+ if not rconfig.has_section('image-feature-enabled'):
+ rconfig.add_section('image-feature-enabled')
+ rconfig.set('image-feature-enabled', 'api_v1', False)
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+ self.backup_tempest_config(self.conf_file, self.res_dir)
diff --git a/functest/opnfv_tests/openstack/cinder/__init__.py b/functest/opnfv_tests/openstack/cinder/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/cinder/__init__.py
diff --git a/functest/opnfv_tests/openstack/cinder/cinder_test.py b/functest/opnfv_tests/openstack/cinder/cinder_test.py
new file mode 100644
index 000000000..7d8c0a0bd
--- /dev/null
+++ b/functest/opnfv_tests/openstack/cinder/cinder_test.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Enea AB and others
+
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""CinderCheck testcase."""
+
+import logging
+
+import pkg_resources
+from scp import SCPClient
+from xtesting.core import testcase
+
+from functest.core import singlevm
+from functest.utils import config
+from functest.utils import env
+
+
+class CinderCheck(singlevm.SingleVm2):
+ """
+ CinderCheck testcase implementation.
+
+ Class to execute the CinderCheck test using 2 Floating IPs
+ to connect to the VMs and one data volume
+ """
+ # pylint: disable=too-many-instance-attributes
+ volume_timeout = 60
+
+ def __init__(self, **kwargs):
+ """Initialize testcase."""
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = "cinder_test"
+ super().__init__(**kwargs)
+ self.logger = logging.getLogger(__name__)
+ self.vm2 = None
+ self.fip2 = None
+ self.ssh2 = None
+ self.volume = None
+
+ def execute(self):
+ """Execute CinderCheck testcase.
+
+ Sets up the OpenStack keypair, router, security group, and VM instance
+ objects then validates cinder.
+ :return: the exit code from the super.execute() method
+ """
+ return self._write_data() or self._read_data()
+
+ def prepare(self):
+ super().prepare()
+ self.vm2 = self.boot_vm(
+ f'{self.case_name}-vm2_{self.guid}',
+ key_name=self.keypair.id,
+ security_groups=[self.sec.id])
+ (self.fip2, self.ssh2) = self.connect(self.vm2)
+ self.volume = self.cloud.create_volume(
+ name=f'{self.case_name}-volume_{self.guid}', size='2',
+ timeout=self.volume_timeout, wait=True)
+
+ def _write_data(self):
+ assert self.cloud
+ self.cloud.attach_volume(self.sshvm, self.volume,
+ timeout=self.volume_timeout)
+ write_data_script = pkg_resources.resource_filename(
+ 'functest.opnfv_tests.openstack.cinder', 'write_data.sh')
+ try:
+ scp = SCPClient(self.ssh.get_transport())
+ scp.put(write_data_script, remote_path="~/")
+ except Exception: # pylint: disable=broad-except
+ self.logger.error("File not transfered!")
+ return testcase.TestCase.EX_RUN_ERROR
+ self.logger.debug("ssh: %s", self.ssh)
+ (_, stdout, stderr) = self.ssh.exec_command(
+ f"sh ~/write_data.sh {env.get('VOLUME_DEVICE_NAME')}")
+ self.logger.debug(
+ "volume_write stdout: %s", stdout.read().decode("utf-8"))
+ self.logger.debug(
+ "volume_write stderr: %s", stderr.read().decode("utf-8"))
+ # Detach volume from VM 1
+ self.logger.info("Detach volume from VM 1")
+ self.cloud.detach_volume(
+ self.sshvm, self.volume, timeout=self.volume_timeout)
+ return stdout.channel.recv_exit_status()
+
+ def _read_data(self):
+ assert self.cloud
+ # Attach volume to VM 2
+ self.logger.info("Attach volume to VM 2")
+ self.cloud.attach_volume(self.vm2, self.volume,
+ timeout=self.volume_timeout)
+ # Check volume data
+ read_data_script = pkg_resources.resource_filename(
+ 'functest.opnfv_tests.openstack.cinder', 'read_data.sh')
+ try:
+ scp = SCPClient(self.ssh2.get_transport())
+ scp.put(read_data_script, remote_path="~/")
+ except Exception: # pylint: disable=broad-except
+ self.logger.error("File not transfered!")
+ return testcase.TestCase.EX_RUN_ERROR
+ self.logger.debug("ssh: %s", self.ssh2)
+ (_, stdout, stderr) = self.ssh2.exec_command(
+ f"sh ~/read_data.sh {env.get('VOLUME_DEVICE_NAME')}")
+ self.logger.debug(
+ "read volume stdout: %s", stdout.read().decode("utf-8"))
+ self.logger.debug(
+ "read volume stderr: %s", stderr.read().decode("utf-8"))
+ self.logger.info("Detach volume from VM 2")
+ self.cloud.detach_volume(
+ self.vm2, self.volume, timeout=self.volume_timeout)
+ return stdout.channel.recv_exit_status()
+
+ def clean(self):
+ assert self.cloud
+ if self.vm2:
+ self.cloud.delete_server(
+ self.vm2, wait=True,
+ timeout=getattr(config.CONF, 'vping_vm_delete_timeout'))
+ if self.fip2:
+ self.cloud.delete_floating_ip(self.fip2.id)
+ if self.volume:
+ self.cloud.delete_volume(self.volume.id)
+ super().clean()
diff --git a/functest/opnfv_tests/openstack/cinder/read_data.sh b/functest/opnfv_tests/openstack/cinder/read_data.sh
new file mode 100644
index 000000000..2c5fdd4c0
--- /dev/null
+++ b/functest/opnfv_tests/openstack/cinder/read_data.sh
@@ -0,0 +1,26 @@
+#!/bin/sh -e
+
+# Copyright (c) 2018 Enea AB and others
+
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+DEST=$(mktemp -d)
+VOL_DEV_NAME=${1:-vdb}
+echo "VOL_DEV_NAME: $VOL_DEV_NAME"
+
+echo "$(lsblk -l -o NAME)"
+if [ ! -z $(lsblk -l -o NAME | grep $VOL_DEV_NAME) ]; then
+ sudo mount /dev/$VOL_DEV_NAME $DEST
+ if [ -f $DEST/new_data ]; then
+ echo "Found new data!"
+ else
+ echo "Failed to find data!"
+ exit 1
+ fi
+fi
+
+exit 0
diff --git a/functest/opnfv_tests/openstack/cinder/write_data.sh b/functest/opnfv_tests/openstack/cinder/write_data.sh
new file mode 100644
index 000000000..16845ba31
--- /dev/null
+++ b/functest/opnfv_tests/openstack/cinder/write_data.sh
@@ -0,0 +1,30 @@
+#!/bin/sh -e
+
+# Copyright (c) 2018 Enea AB and others
+
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+
+DEST=$(mktemp -d)
+VOL_DEV_NAME=${1:-vdb}
+echo "VOL_DEV_NAME: $VOL_DEV_NAME"
+
+echo "$(lsblk -l -o NAME)"
+
+if [ ! -z $(lsblk -l -o NAME | grep $VOL_DEV_NAME) ]; then
+ sudo mkfs.ext4 -F /dev/$VOL_DEV_NAME
+ sudo mount /dev/$VOL_DEV_NAME $DEST
+ sudo touch $DEST/new_data
+ if [ -f $DEST/new_data ]; then
+ echo "New data added to the volume!"
+ sudo umount $DEST
+ fi
+else
+ echo "Failed to write data!"
+ exit 1
+fi
+
+exit 0
diff --git a/functest/opnfv_tests/openstack/patrole/__init__.py b/functest/opnfv_tests/openstack/patrole/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/patrole/__init__.py
diff --git a/functest/opnfv_tests/openstack/patrole/patrole.py b/functest/opnfv_tests/openstack/patrole/patrole.py
new file mode 100644
index 000000000..88c42f269
--- /dev/null
+++ b/functest/opnfv_tests/openstack/patrole/patrole.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# pylint: disable=missing-docstring
+
+from six.moves import configparser
+
+from functest.opnfv_tests.openstack.tempest import tempest
+
+
+class Patrole(tempest.TempestCommon):
+
+ def configure(self, **kwargs):
+ super().configure(**kwargs)
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('rbac'):
+ rconfig.add_section('rbac')
+ rconfig.set('rbac', 'rbac_test_roles', kwargs.get('roles', 'admin'))
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+ self.backup_tempest_config(self.conf_file, self.res_dir)
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.txt b/functest/opnfv_tests/openstack/rally/blacklist.txt
deleted file mode 100644
index 4b42c312a..000000000
--- a/functest/opnfv_tests/openstack/rally/blacklist.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-scenario:
- -
- scenarios:
- - '^os-nosdn-lxd-(no)?ha$'
- installers:
- - joid
- tests:
- - NovaServers.boot_server_from_volume_and_delete
- -
- scenarios:
- - '^os-' # all scenarios
- installers:
- - '.+' # all installers
- tests:
- # Following tests currently fail due to required Gnocchi API:
- # HTTP 410: "This telemetry installation is configured to use
- # Gnocchi. Please use the Gnocchi API available on the
- # metric endpoint to retrieve data."
- # Issue: https://bugs.launchpad.net/rally/+bug/1704322
- - CeilometerMeters.list_matched_meters
- - CeilometerMeters.list_meters
- - CeilometerQueries.create_and_query_samples
- - CeilometerResource.get_tenant_resources
- - CeilometerResource.list_matched_resources
- - CeilometerResource.list_resources
- - CeilometerSamples.list_matched_samples
- - CeilometerSamples.list_samples
- - CeilometerStats.create_meter_and_get_stats
- - CeilometerStats.get_stats
- -
- scenarios:
- - '^os-' # all scenarios
- installers:
- - '.+' # all installers
- tests:
- # Following test currently fails due to but in
- # python-ceilometerclient during fetching of event_types
- # Bug: https://bugs.launchpad.net/ubuntu/+bug/1704138
- # Fix: https://review.openstack.org/#/c/483402/
- - CeilometerEvents.create_user_and_list_event_types
- -
- scenarios:
- - '^os-' # all scenarios
- installers:
- - '.+' # all installers
- tests:
- # Starting from ocata, following tests require the presence of
- # panko in the deployment. This is not currently fulfilled
- # Ref: https://docs.openstack.org/releasenotes/ceilometer/ocata.html
- - 'CeilometerEvents..*'
- - 'CeilometerTraits..*'
- -
- scenarios:
- - '^os-' # all scenarios
- installers:
- - '.+' # all installers
- tests:
- # Rally is still utilizing Ceilometer API which is deprecated
- # in Pike.
- # Ref: https://docs.openstack.org/releasenotes/ceilometer/pike.html
- - 'Ceilometer..*'
-
-functionality:
- -
- functions:
- - no_migration
- tests:
- - NovaServers.boot_and_live_migrate_server
- - NovaServers.boot_server_attach_created_volume_and_live_migrate
- - NovaServers.boot_server_from_volume_and_live_migrate
- - NovaServers.boot_and_migrate_server
diff --git a/functest/opnfv_tests/openstack/rally/blacklist.yaml b/functest/opnfv_tests/openstack/rally/blacklist.yaml
new file mode 100644
index 000000000..e16b83ba6
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/blacklist.yaml
@@ -0,0 +1,40 @@
+---
+scenario:
+
+functionality:
+ -
+ functions:
+ - block_migration
+ tests:
+ - NovaServers.boot_server_from_volume_and_live_migrate
+ -
+ functions:
+ - no_migration
+ tests:
+ - NovaServers.boot_and_live_migrate_server
+ - NovaServers.boot_server_attach_created_volume_and_live_migrate
+ - NovaServers.boot_server_from_volume_and_live_migrate
+ - NovaServers.boot_and_migrate_server
+ -
+ functions:
+ - no_net_trunk_service
+ tests:
+ - '^NeutronTrunk'
+ -
+ functions:
+ - no_floating_ip
+ tests:
+ - HeatStacks.create_and_delete_stack
+ - NovaServers.boot_and_associate_floating_ip
+ - NovaServers.boot_server_and_list_interfaces
+ - NovaServers.boot_server_associate_and_dissociate_floating_ip
+ - NeutronNetworks.create_and_delete_floating_ips
+ - NeutronNetworks.create_and_list_floating_ips
+ - NeutronNetworks.associate_and_dissociate_floating_ips
+ - VMTasks.dd_load_test
+ - NeutronNetworks.create_and_delete_routers
+ - NeutronNetworks.create_and_list_routers
+ - NeutronNetworks.create_and_show_routers
+ - NeutronNetworks.create_and_update_routers
+ - NeutronNetworks.set_and_clear_router_gateway
+ - Quotas.neutron_update
diff --git a/functest/opnfv_tests/openstack/rally/macro/macro.yaml b/functest/opnfv_tests/openstack/rally/macro/macro.yaml
index 48c0333e9..2536c92f0 100644
--- a/functest/opnfv_tests/openstack/rally/macro/macro.yaml
+++ b/functest/opnfv_tests/openstack/rally/macro/macro.yaml
@@ -95,3 +95,9 @@
disk_format: {{ type }}
image_location: {{ location }}
{%- endmacro %}
+
+{%- macro volume_service(version, service_type) %}
+ cinder:
+ version: {{ version }}
+ service_type: {{ service_type }}
+{%- endmacro %}
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index eefd3eb46..3d897e25d 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -11,149 +11,142 @@
"""Rally testcases implementation."""
from __future__ import division
+from __future__ import print_function
+import fileinput
import json
import logging
import os
import re
+import shutil
import subprocess
import time
-import uuid
import pkg_resources
+import prettytable
+from ruamel.yaml import YAML
+import six
+from six.moves import configparser
+from xtesting.core import testcase
import yaml
-from functest.core import testcase
-from functest.energy import energy
-from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.utils.constants import CONST
-
-from snaps.config.flavor import FlavorConfig
-from snaps.config.image import ImageConfig
-from snaps.config.network import NetworkConfig, SubnetConfig
-from snaps.config.router import RouterConfig
-
-from snaps.openstack.create_flavor import OpenStackFlavor
-from snaps.openstack.tests import openstack_tests
-from snaps.openstack.utils import deploy_utils
+from functest.core import singlevm
+from functest.utils import config
+from functest.utils import env
+from functest.utils import functest_utils
LOGGER = logging.getLogger(__name__)
-class RallyBase(testcase.TestCase):
+class RallyBase(singlevm.VmReady2):
"""Base class form Rally testcases implementation."""
- TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
- 'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
- GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
- GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
- GLANCE_IMAGE_PATH = os.path.join(
- CONST.__getattribute__('dir_functest_images'),
- GLANCE_IMAGE_FILENAME)
- GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
- GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
- GLANCE_IMAGE_EXTRA_PROPERTIES = {}
- if hasattr(CONST, 'openstack_extra_properties'):
- GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
- 'openstack_extra_properties')
- FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
- FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
- FLAVOR_EXTRA_SPECS = None
- FLAVOR_RAM = 512
- FLAVOR_RAM_ALT = 1024
- if hasattr(CONST, 'flavor_extra_specs'):
- FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
- FLAVOR_RAM = 1024
- FLAVOR_RAM_ALT = 2048
-
- RALLY_DIR = pkg_resources.resource_filename(
+ # pylint: disable=too-many-instance-attributes, too-many-public-methods
+ stests = ['authenticate', 'glance', 'cinder', 'gnocchi', 'heat',
+ 'keystone', 'neutron', 'nova', 'quotas', 'swift', 'barbican',
+ 'vm']
+
+ rally_conf_path = "/etc/rally/rally.conf"
+ rally_aar4_patch_path = pkg_resources.resource_filename(
+ 'functest', 'ci/rally_aarch64_patch.conf')
+ rally_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally')
- RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
+ rally_scenario_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally/scenario')
- TEMPLATE_DIR = pkg_resources.resource_filename(
+ template_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally/scenario/templates')
- SUPPORT_DIR = pkg_resources.resource_filename(
+ support_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally/scenario/support')
- USERS_AMOUNT = 2
- TENANTS_AMOUNT = 3
- ITERATIONS_AMOUNT = 10
- CONCURRENCY = 4
- RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
- BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
- TEMP_DIR = os.path.join(RALLY_DIR, "var")
-
- RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
- RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
- RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
- RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
+ users_amount = 2
+ tenants_amount = 3
+ iterations_amount = 10
+ concurrency = 4
+ volume_version = 3
+ volume_service_type = "volumev3"
+ blacklist_file = os.path.join(rally_dir, "blacklist.yaml")
+ task_dir = os.path.join(getattr(config.CONF, 'dir_rally_data'), 'task')
+ temp_dir = os.path.join(task_dir, 'var')
+
+ visibility = 'public'
+ shared_network = True
+ task_timeout = 3600
+ username = 'cirros'
def __init__(self, **kwargs):
"""Initialize RallyBase object."""
- super(RallyBase, self).__init__(**kwargs)
- if 'os_creds' in kwargs:
- self.os_creds = kwargs['os_creds']
+ super().__init__(**kwargs)
+ assert self.orig_cloud
+ assert self.project
+ if self.orig_cloud.get_role("admin"):
+ role_name = "admin"
+ elif self.orig_cloud.get_role("Admin"):
+ role_name = "Admin"
else:
- creds_override = None
- if hasattr(CONST, 'snaps_os_creds_override'):
- creds_override = CONST.__getattribute__(
- 'snaps_os_creds_override')
-
- self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'),
- overrides=creds_override)
-
- self.guid = '-' + str(uuid.uuid4())
-
+ raise Exception("Cannot detect neither admin nor Admin")
+ self.orig_cloud.grant_role(
+ role_name, user=self.project.user.id,
+ project=self.project.project.id,
+ domain=self.project.domain.id)
+ self.results_dir = os.path.join(
+ getattr(config.CONF, 'dir_results'), self.case_name)
+ self.task_file = ''
self.creators = []
- self.mode = ''
self.summary = []
self.scenario_dir = ''
- self.image_name = None
- self.ext_net_name = None
- self.priv_net_id = None
- self.flavor_name = None
- self.flavor_alt_name = None
self.smoke = None
- self.test_name = None
self.start_time = None
self.result = None
- self.details = None
self.compute_cnt = 0
-
- def _build_task_args(self, test_file_name):
- task_args = {'service_list': [test_file_name]}
- task_args['image_name'] = self.image_name
- task_args['flavor_name'] = self.flavor_name
- task_args['flavor_alt_name'] = self.flavor_alt_name
- task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
- task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
- task_args['tmpl_dir'] = self.TEMPLATE_DIR
- task_args['sup_dir'] = self.SUPPORT_DIR
- task_args['users_amount'] = self.USERS_AMOUNT
- task_args['tenants_amount'] = self.TENANTS_AMOUNT
+ self.flavor_alt = None
+ self.tests = []
+ self.run_cmd = ''
+ self.network_extensions = []
+ self.services = []
+
+ def build_task_args(self, test_name):
+ """Build arguments for the Rally task."""
+ task_args = {'service_list': [test_name]}
+ task_args['image_name'] = str(self.image.name)
+ task_args['flavor_name'] = str(self.flavor.name)
+ task_args['flavor_alt_name'] = str(self.flavor_alt.name)
+ task_args['glance_image_location'] = str(self.filename)
+ task_args['glance_image_format'] = str(self.image_format)
+ task_args['tmpl_dir'] = str(self.template_dir)
+ task_args['sup_dir'] = str(self.support_dir)
+ task_args['users_amount'] = self.users_amount
+ task_args['tenants_amount'] = self.tenants_amount
task_args['use_existing_users'] = False
- task_args['iterations'] = self.ITERATIONS_AMOUNT
- task_args['concurrency'] = self.CONCURRENCY
+ task_args['iterations'] = self.iterations_amount
+ task_args['concurrency'] = self.concurrency
task_args['smoke'] = self.smoke
+ task_args['volume_version'] = self.volume_version
+ task_args['volume_service_type'] = self.volume_service_type
+ task_args['block_migration'] = env.get("BLOCK_MIGRATION").lower()
+ task_args['username'] = self.username
- ext_net = self.ext_net_name
- if ext_net:
- task_args['floating_network'] = str(ext_net)
+ if self.ext_net:
+ task_args['floating_network'] = str(self.ext_net.name)
else:
task_args['floating_network'] = ''
- net_id = self.priv_net_id
- if net_id:
- task_args['netid'] = str(net_id)
+ if self.network:
+ task_args['netid'] = str(self.network.id)
else:
- task_args['netid'] = ''
+ LOGGER.warning(
+ 'No tenant network created. '
+ 'Trying EXTERNAL_NETWORK as a fallback')
+ if env.get("EXTERNAL_NETWORK"):
+ network = self.cloud.get_network(env.get("EXTERNAL_NETWORK"))
+ task_args['netid'] = str(network.id) if network else ''
+ else:
+ task_args['netid'] = ''
return task_args
def _prepare_test_list(self, test_name):
- test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
- scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
+ """Build the list of test cases to be executed."""
+ test_yaml_file_name = f'opnfv-{test_name}.yaml'
+ scenario_file_name = os.path.join(self.rally_scenario_dir,
test_yaml_file_name)
if not os.path.exists(scenario_file_name):
@@ -161,33 +154,108 @@ class RallyBase(testcase.TestCase):
test_yaml_file_name)
if not os.path.exists(scenario_file_name):
- raise Exception("The scenario '%s' does not exist."
- % scenario_file_name)
+ raise Exception(
+ f"The scenario '{scenario_file_name}' does not exist.")
LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
- test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
+ test_file_name = os.path.join(self.temp_dir, test_yaml_file_name)
- if not os.path.exists(self.TEMP_DIR):
- os.makedirs(self.TEMP_DIR)
+ if not os.path.exists(self.temp_dir):
+ os.makedirs(self.temp_dir)
- self._apply_blacklist(scenario_file_name, test_file_name)
+ self.apply_blacklist(scenario_file_name, test_file_name)
return test_file_name
@staticmethod
- def get_task_id(cmd_raw):
+ def get_verifier_deployment_id():
+ """
+ Returns deployment id for active Rally deployment
+ """
+ cmd = ("rally deployment list | awk '/" +
+ getattr(config.CONF, 'rally_deployment_name') +
+ "/ {print $2}'")
+ with subprocess.Popen(
+ cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT) as proc:
+ deployment_uuid = proc.stdout.readline().rstrip()
+ return deployment_uuid.decode("utf-8")
+
+ @staticmethod
+ def create_rally_deployment(environ=None):
+ # pylint: disable=unexpected-keyword-arg
+ """Create new rally deployment"""
+ # set the architecture to default
+ pod_arch = env.get("POD_ARCH")
+ arch_filter = ['aarch64']
+
+ if pod_arch and pod_arch in arch_filter:
+ LOGGER.info("Apply aarch64 specific to rally config...")
+ with open(
+ RallyBase.rally_aar4_patch_path, "r",
+ encoding='utf-8') as pfile:
+ rally_patch_conf = pfile.read()
+
+ for line in fileinput.input(RallyBase.rally_conf_path):
+ print(line, end=' ')
+ if "cirros|testvm" in line:
+ print(rally_patch_conf)
+
+ LOGGER.info("Creating Rally environment...")
+ try:
+ cmd = ['rally', 'deployment', 'destroy',
+ '--deployment',
+ str(getattr(config.CONF, 'rally_deployment_name'))]
+ output = subprocess.check_output(cmd)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ except subprocess.CalledProcessError:
+ pass
+
+ cmd = ['rally', 'deployment', 'create', '--fromenv',
+ '--name', str(getattr(config.CONF, 'rally_deployment_name'))]
+ output = subprocess.check_output(cmd, env=environ)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+
+ cmd = ['rally', 'deployment', 'check']
+ output = subprocess.check_output(cmd)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ return RallyBase.get_verifier_deployment_id()
+
+ @staticmethod
+ def update_keystone_default_role(rally_conf='/etc/rally/rally.conf'):
+ """Set keystone_default_role in rally.conf"""
+ if env.get("NEW_USER_ROLE").lower() != "member":
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(rally_conf)
+ if not rconfig.has_section('openstack'):
+ rconfig.add_section('openstack')
+ rconfig.set(
+ 'openstack', 'keystone_default_role', env.get("NEW_USER_ROLE"))
+ with open(rally_conf, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ @staticmethod
+ def clean_rally_conf(rally_conf='/etc/rally/rally.conf'):
+ """Clean Rally config"""
+ if env.get("NEW_USER_ROLE").lower() != "member":
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(rally_conf)
+ if rconfig.has_option('openstack', 'keystone_default_role'):
+ rconfig.remove_option('openstack', 'keystone_default_role')
+ with open(rally_conf, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ @staticmethod
+ def get_task_id(tag):
"""
Get task id from command rally result.
- :param cmd_raw:
+ :param tag:
:return: task_id as string
"""
- taskid_re = re.compile('^Task +(.*): started$')
- for line in cmd_raw.splitlines(True):
- line = line.strip()
- match = taskid_re.match(line)
- if match:
- return match.group(1)
- return None
+ cmd = ["rally", "task", "list", "--tag", tag, "--uuids-only"]
+ output = subprocess.check_output(cmd).decode("utf-8").rstrip()
+ LOGGER.info("%s: %s", " ".join(cmd), output)
+ return output
@staticmethod
def task_succeed(json_raw):
@@ -198,53 +266,48 @@ class RallyBase(testcase.TestCase):
:return: Bool
"""
rally_report = json.loads(json_raw)
- for report in rally_report:
- if report is None or report.get('result') is None:
- return False
-
- for result in report.get('result'):
- if result is None or len(result.get('error')) > 0:
+ tasks = rally_report.get('tasks')
+ if tasks:
+ for task in tasks:
+ if task.get('status') != 'finished' or \
+ task.get('pass_sla') is not True:
return False
-
+ else:
+ return False
return True
def _migration_supported(self):
"""Determine if migration is supported."""
if self.compute_cnt > 1:
return True
-
return False
- @staticmethod
- def get_cmd_output(proc):
- """Get command stdout."""
- result = ""
- while proc.poll() is None:
- line = proc.stdout.readline()
- result += line
- return result
+ def _network_trunk_supported(self):
+ """Determine if network trunk service is available"""
+ if 'trunk' in self.network_extensions:
+ return True
+ return False
@staticmethod
def excl_scenario():
"""Exclude scenario."""
black_tests = []
try:
- with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+ with open(
+ RallyBase.blacklist_file, 'r',
+ encoding='utf-8') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
- installer_type = CONST.__getattribute__('INSTALLER_TYPE')
- deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
- if (bool(installer_type) and bool(deploy_scenario) and
+ deploy_scenario = env.get('DEPLOY_SCENARIO')
+ if (bool(deploy_scenario) and
'scenario' in black_list_yaml.keys()):
for item in black_list_yaml['scenario']:
scenarios = item['scenarios']
- installers = item['installers']
in_it = RallyBase.in_iterable_re
- if (in_it(deploy_scenario, scenarios) and
- in_it(installer_type, installers)):
+ if in_it(deploy_scenario, scenarios):
tests = item['tests']
black_tests.extend(tests)
- except Exception:
+ except Exception: # pylint: disable=broad-except
LOGGER.debug("Scenario exclusion not applied.")
return black_tests
@@ -267,8 +330,8 @@ class RallyBase(testcase.TestCase):
# match if regex pattern is set and found in the needle
if pattern and re.search(pattern, needle) is not None:
return True
- else:
- return False
+
+ return False
def excl_func(self):
"""Exclude functionalities."""
@@ -276,11 +339,19 @@ class RallyBase(testcase.TestCase):
func_list = []
try:
- with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+ with open(
+ RallyBase.blacklist_file, 'r',
+ encoding='utf-8') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
+ if env.get('BLOCK_MIGRATION').lower() == 'true':
+ func_list.append("block_migration")
if not self._migration_supported():
func_list.append("no_migration")
+ if not self._network_trunk_supported():
+ func_list.append("no_net_trunk_service")
+ if not self.ext_net:
+ func_list.append("no_floating_ip")
if 'functionality' in black_list_yaml.keys():
for item in black_list_yaml['functionality']:
@@ -294,34 +365,28 @@ class RallyBase(testcase.TestCase):
return black_tests
- def _apply_blacklist(self, case_file_name, result_file_name):
+ def apply_blacklist(self, case_file_name, result_file_name):
"""Apply blacklist."""
LOGGER.debug("Applying blacklist...")
- cases_file = open(case_file_name, 'r')
- result_file = open(result_file_name, 'w')
-
- black_tests = list(set(self.excl_func() +
- self.excl_scenario()))
-
- if black_tests:
- LOGGER.debug("Blacklisted tests: " + str(black_tests))
-
- include = True
- for cases_line in cases_file:
- if include:
- for black_tests_line in black_tests:
- if re.search(black_tests_line,
- cases_line.strip().rstrip(':')):
- include = False
- break
+ with open(case_file_name, 'r', encoding='utf-8') as cases_file, open(
+ result_file_name, 'w', encoding='utf-8') as result_file:
+ black_tests = list(set(self.excl_func() + self.excl_scenario()))
+ if black_tests:
+ LOGGER.debug("Blacklisted tests: %s", str(black_tests))
+
+ include = True
+ for cases_line in cases_file:
+ if include:
+ for black_tests_line in black_tests:
+ if re.search(black_tests_line,
+ cases_line.strip().rstrip(':')):
+ include = False
+ break
+ else:
+ result_file.write(str(cases_line))
else:
- result_file.write(str(cases_line))
- else:
- if cases_line.isspace():
- include = True
-
- cases_file.close()
- result_file.close()
+ if cases_line.isspace():
+ include = True
@staticmethod
def file_is_empty(file_name):
@@ -334,325 +399,315 @@ class RallyBase(testcase.TestCase):
return True
- def _run_task(self, test_name):
- """Run a task."""
- LOGGER.info('Starting test scenario "%s" ...', test_name)
-
- task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
- if not os.path.exists(task_file):
- LOGGER.error("Task file '%s' does not exist.", task_file)
- raise Exception("Task file '%s' does not exist.", task_file)
-
- file_name = self._prepare_test_list(test_name)
- if self.file_is_empty(file_name):
- LOGGER.info('No tests for scenario "%s"', test_name)
- return
-
- cmd = (["rally", "task", "start", "--abort-on-sla-failure", "--task",
- task_file, "--task-args",
- str(self._build_task_args(test_name))])
- LOGGER.debug('running command: %s', cmd)
-
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output = self._get_output(proc, test_name)
- task_id = self.get_task_id(output)
- LOGGER.debug('task_id : %s', task_id)
-
- if task_id is None:
- LOGGER.error('Failed to retrieve task_id, validating task...')
- cmd = (["rally", "task", "validate", "--task", task_file,
- "--task-args", str(self._build_task_args(test_name))])
- LOGGER.debug('running command: %s', cmd)
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output = self.get_cmd_output(proc)
- LOGGER.error("Task validation result:" + "\n" + output)
- return
-
+ def _save_results(self, test_name, task_id):
+ """ Generate and save task execution results"""
# check for result directory and create it otherwise
- if not os.path.exists(self.RESULTS_DIR):
+ if not os.path.exists(self.results_dir):
LOGGER.debug('%s does not exist, we create it.',
- self.RESULTS_DIR)
- os.makedirs(self.RESULTS_DIR)
-
- # write html report file
- report_html_name = 'opnfv-{}.html'.format(test_name)
- report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
- cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
+ self.results_dir)
+ os.makedirs(self.results_dir)
+ # put detailed result to log
+ cmd = (["rally", "task", "detailed", "--uuid", task_id])
LOGGER.debug('running command: %s', cmd)
- subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
- # get and save rally operation JSON result
- cmd = (["rally", "task", "results", task_id])
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+
+ # save report as JSON
+ report_json_name = f'{test_name}.json'
+ report_json_dir = os.path.join(self.results_dir, report_json_name)
+ cmd = (["rally", "task", "report", "--json", "--uuid", task_id,
+ "--out", report_json_dir])
LOGGER.debug('running command: %s', cmd)
- proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- json_results = self.get_cmd_output(proc)
- report_json_name = 'opnfv-{}.json'.format(test_name)
- report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
- with open(report_json_dir, 'w') as r_file:
- LOGGER.debug('saving json file')
- r_file.write(json_results)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+
+ with open(report_json_dir, encoding='utf-8') as json_file:
+ json_results = json_file.read()
+ self._append_summary(json_results, test_name)
# parse JSON operation result
if self.task_succeed(json_results):
- LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
+ LOGGER.info('Test scenario: "%s" OK.', test_name)
else:
- LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+ LOGGER.info('Test scenario: "%s" Failed.', test_name)
- def _get_output(self, proc, test_name):
- result = ""
+ def run_task(self, test_name):
+ """Run a task."""
+ LOGGER.info('Starting test scenario "%s" ...', test_name)
+ LOGGER.debug('running command: %s', self.run_cmd)
+ if six.PY3:
+ subprocess.call(
+ self.run_cmd, timeout=self.task_timeout,
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ else:
+ with open(os.devnull, 'wb') as devnull:
+ subprocess.call(self.run_cmd, stdout=devnull, stderr=devnull)
+ task_id = self.get_task_id(test_name)
+ LOGGER.debug('task_id : %s', task_id)
+ if not task_id:
+ LOGGER.error("Failed to retrieve task_id")
+ raise Exception("Failed to retrieve task id")
+ self._save_results(test_name, task_id)
+
+ def _append_summary(self, json_raw, test_name):
+ # pylint: disable=too-many-locals
+ """Update statistics summary info."""
nb_tests = 0
+ nb_success = 0
overall_duration = 0.0
- success = 0.0
- nb_totals = 0
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- if ("Load duration" in line or
- "started" in line or
- "finished" in line or
- " Preparing" in line or
- "+-" in line or
- "|" in line):
- result += line
- elif "test scenario" in line:
- result += "\n" + line
- elif "Full duration" in line:
- result += line + "\n\n"
-
- # parse output for summary report
- if ("| " in line and
- "| action" not in line and
- "| Starting" not in line and
- "| Completed" not in line and
- "| ITER" not in line and
- "| " not in line and
- "| total" not in line):
- nb_tests += 1
- elif "| total" in line:
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- try:
- success += float(percentage)
- except ValueError:
- LOGGER.info('Percentage error: %s, %s',
- percentage, line)
- nb_totals += 1
- elif "Full duration" in line:
- duration = line.split(': ')[1]
- try:
- overall_duration += float(duration)
- except ValueError:
- LOGGER.info('Duration error: %s, %s', duration, line)
-
- overall_duration = "{:10.2f}".format(overall_duration)
- if nb_totals == 0:
- success_avg = 0
- else:
- success_avg = "{:0.2f}".format(success / nb_totals)
+ success = []
+ failures = []
+
+ rally_report = json.loads(json_raw)
+ for task in rally_report.get('tasks'):
+ for subtask in task.get('subtasks'):
+ has_errors = False
+ for workload in subtask.get('workloads'):
+ if workload.get('full_duration'):
+ overall_duration += workload.get('full_duration')
+
+ if workload.get('data'):
+ nb_tests += len(workload.get('data'))
+
+ for result in workload.get('data'):
+ if not result.get('error'):
+ nb_success += 1
+ else:
+ has_errors = True
+
+ if has_errors:
+ failures.append(subtask['title'])
+ else:
+ success.append(subtask['title'])
scenario_summary = {'test_name': test_name,
'overall_duration': overall_duration,
'nb_tests': nb_tests,
- 'success': success_avg}
+ 'nb_success': nb_success,
+ 'success': success,
+ 'failures': failures,
+ 'task_status': self.task_succeed(json_raw)}
self.summary.append(scenario_summary)
- LOGGER.debug("\n" + result)
-
- return result
-
- def _prepare_env(self):
- LOGGER.debug('Validating the test name...')
- if self.test_name not in self.TESTS:
- raise Exception("Test name '%s' is invalid" % self.test_name)
-
- network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
- subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
- router_name = self.RALLY_ROUTER_NAME + self.guid
- self.image_name = self.GLANCE_IMAGE_NAME + self.guid
- self.flavor_name = self.FLAVOR_NAME + self.guid
- self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
- self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
- self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
-
- LOGGER.debug("Creating image '%s'...", self.image_name)
- image_creator = deploy_utils.create_image(
- self.os_creds, ImageConfig(
- name=self.image_name,
- image_file=self.GLANCE_IMAGE_PATH,
- img_format=self.GLANCE_IMAGE_FORMAT,
- image_user=self.GLANCE_IMAGE_USERNAME,
- public=True,
- extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
- if image_creator is None:
- raise Exception("Failed to create image")
- self.creators.append(image_creator)
-
- LOGGER.debug("Creating network '%s'...", network_name)
-
- rally_network_type = None
- rally_physical_network = None
- rally_segmentation_id = None
-
- if hasattr(CONST, 'rally_network_type'):
- rally_network_type = CONST.__getattribute__(
- 'rally_network_type')
- if hasattr(CONST, 'rally_physical_network'):
- rally_physical_network = CONST.__getattribute__(
- 'rally_physical_network')
- if hasattr(CONST, 'rally_segmentation_id'):
- rally_segmentation_id = CONST.__getattribute__(
- 'rally_segmentation_id')
-
- network_creator = deploy_utils.create_network(
- self.os_creds, NetworkConfig(
- name=network_name,
- shared=True,
- network_type=rally_network_type,
- physical_network=rally_physical_network,
- segmentation_id=rally_segmentation_id,
- subnet_settings=[SubnetConfig(
- name=subnet_name,
- cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
- ]))
- if network_creator is None:
- raise Exception("Failed to create private network")
- self.priv_net_id = network_creator.get_network().id
- self.creators.append(network_creator)
-
- LOGGER.debug("Creating router '%s'...", router_name)
- router_creator = deploy_utils.create_router(
- self.os_creds, RouterConfig(
- name=router_name,
- external_gateway=self.ext_net_name,
- internal_subnets=[subnet_name]))
- if router_creator is None:
- raise Exception("Failed to create router")
- self.creators.append(router_creator)
-
- LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
- flavor_creator = OpenStackFlavor(
- self.os_creds, FlavorConfig(
- name=self.flavor_name, ram=self.FLAVOR_RAM, disk=1, vcpus=1,
- metadata=self.FLAVOR_EXTRA_SPECS))
- if flavor_creator is None or flavor_creator.create() is None:
- raise Exception("Failed to create flavor")
- self.creators.append(flavor_creator)
-
- LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
- flavor_alt_creator = OpenStackFlavor(
- self.os_creds, FlavorConfig(
- name=self.flavor_alt_name, ram=self.FLAVOR_RAM_ALT, disk=1,
- vcpus=1, metadata=self.FLAVOR_EXTRA_SPECS))
- if flavor_alt_creator is None or flavor_alt_creator.create() is None:
- raise Exception("Failed to create flavor")
- self.creators.append(flavor_alt_creator)
-
- def _run_tests(self):
- if self.test_name == 'all':
- for test in self.TESTS:
- if test == 'all' or test == 'vm':
- continue
- self._run_task(test)
- else:
- self._run_task(self.test_name)
+ def prepare_run(self, **kwargs):
+ """Prepare resources needed by test scenarios."""
+ assert self.cloud
+ LOGGER.debug('Validating run tests...')
+ for test in kwargs.get('tests', self.stests):
+ if test in self.stests:
+ self.tests.append(test)
+ else:
+ raise Exception(f"Test name '{test}' is invalid")
+
+ if not os.path.exists(self.task_dir):
+ os.makedirs(self.task_dir)
+
+ task = os.path.join(self.rally_dir, 'task.yaml')
+ if not os.path.exists(task):
+ LOGGER.error("Task file '%s' does not exist.", task)
+ raise Exception(f"Task file '{task}' does not exist.")
+ self.task_file = os.path.join(self.task_dir, 'task.yaml')
+ shutil.copyfile(task, self.task_file)
+
+ task_macro = os.path.join(self.rally_dir, 'macro')
+ if not os.path.exists(task_macro):
+ LOGGER.error("Task macro dir '%s' does not exist.", task_macro)
+ raise Exception(f"Task macro dir '{task_macro}' does not exist.")
+ macro_dir = os.path.join(self.task_dir, 'macro')
+ if os.path.exists(macro_dir):
+ shutil.rmtree(macro_dir)
+ shutil.copytree(task_macro, macro_dir)
+
+ self.update_keystone_default_role()
+ self.compute_cnt = self.count_hypervisors()
+ self.network_extensions = self.cloud.get_network_extensions()
+ self.flavor_alt = self.create_flavor_alt()
+ self.services = [service.name for service in
+ functest_utils.list_services(self.cloud)]
+
+ LOGGER.debug("flavor: %s", self.flavor_alt)
+
+ def prepare_task(self, test_name):
+ """Prepare resources for test run."""
+ file_name = self._prepare_test_list(test_name)
+ if self.file_is_empty(file_name):
+ LOGGER.info('No tests for scenario "%s"', test_name)
+ return False
+ self.run_cmd = (["rally", "task", "start", "--tag", test_name,
+ "--abort-on-sla-failure",
+ "--task", self.task_file, "--task-args",
+ str(self.build_task_args(test_name))])
+ return True
+
+ def run_tests(self, **kwargs):
+ """Execute tests."""
+ optional = kwargs.get('optional', [])
+ for test in self.tests:
+ if test in self.services or test not in optional:
+ if self.prepare_task(test):
+ self.run_task(test)
def _generate_report(self):
- report = (
- "\n"
- " "
- "\n"
- " Rally Summary Report\n"
- "\n"
- "+===================+============+===============+===========+"
- "\n"
- "| Module | Duration | nb. Test Run | Success |"
- "\n"
- "+===================+============+===============+===========+"
- "\n")
+ """Generate test execution summary report."""
+ total_duration = 0.0
+ total_nb_tests = 0
+ total_nb_success = 0
+ nb_modules = 0
payload = []
+ res_table = prettytable.PrettyTable(
+ padding_width=2,
+ field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])
+ res_table.align['Module'] = "l"
+ res_table.align['Duration'] = "r"
+ res_table.align['Success'] = "r"
+
# for each scenario we draw a row for the table
- total_duration = 0.0
- total_nb_tests = 0
- total_success = 0.0
for item in self.summary:
- name = "{0:<17}".format(item['test_name'])
- duration = float(item['overall_duration'])
- total_duration += duration
- duration = time.strftime("%M:%S", time.gmtime(duration))
- duration = "{0:<10}".format(duration)
- nb_tests = "{0:<13}".format(item['nb_tests'])
- total_nb_tests += int(item['nb_tests'])
- success = "{0:<10}".format(str(item['success']) + '%')
- total_success += float(item['success'])
- report += ("" +
- "| " + name + " | " + duration + " | " +
- nb_tests + " | " + success + "|\n" +
- "+-------------------+------------"
- "+---------------+-----------+\n")
- payload.append({'module': name,
+ if item['task_status'] is True:
+ nb_modules += 1
+ total_duration += item['overall_duration']
+ total_nb_tests += item['nb_tests']
+ total_nb_success += item['nb_success']
+ try:
+ success_avg = 100 * item['nb_success'] / item['nb_tests']
+ except ZeroDivisionError:
+ success_avg = 0
+ success_str = f"{success_avg:0.2f}%"
+ duration_str = time.strftime("%H:%M:%S",
+ time.gmtime(item['overall_duration']))
+ res_table.add_row([item['test_name'], duration_str,
+ item['nb_tests'], success_str])
+ payload.append({'module': item['test_name'],
'details': {'duration': item['overall_duration'],
'nb tests': item['nb_tests'],
- 'success': item['success']}})
+ 'success rate': success_str,
+ 'success': item['success'],
+ 'failures': item['failures']}})
total_duration_str = time.strftime("%H:%M:%S",
time.gmtime(total_duration))
- total_duration_str2 = "{0:<10}".format(total_duration_str)
- total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
try:
- self.result = total_success / len(self.summary)
+ self.result = 100 * total_nb_success / total_nb_tests
except ZeroDivisionError:
self.result = 100
+ success_rate = f"{self.result:0.2f}"
+ success_rate_str = str(success_rate) + '%'
+ res_table.add_row(["", "", "", ""])
+ res_table.add_row(["TOTAL:", total_duration_str, total_nb_tests,
+ success_rate_str])
+
+ LOGGER.info("Rally Summary Report:\n\n%s\n", res_table.get_string())
+ LOGGER.info("Rally '%s' success_rate is %s%% in %s/%s modules",
+ self.case_name, success_rate, nb_modules,
+ len(self.summary))
+ self.details['summary'] = {'duration': total_duration,
+ 'nb tests': total_nb_tests,
+ 'nb success': success_rate}
+ self.details["modules"] = payload
- success_rate = "{:0.2f}".format(self.result)
- success_rate_str = "{0:<10}".format(str(success_rate) + '%')
- report += ("+===================+============"
- "+===============+===========+")
- report += "\n"
- report += ("| TOTAL: | " + total_duration_str2 + " | " +
- total_nb_tests_str + " | " + success_rate_str + "|\n")
- report += ("+===================+============"
- "+===============+===========+")
- report += "\n"
-
- LOGGER.info("\n" + report)
- payload.append({'summary': {'duration': total_duration,
- 'nb tests': total_nb_tests,
- 'nb success': success_rate}})
-
- self.details = payload
-
- LOGGER.info("Rally '%s' success_rate is %s%%",
- self.case_name, success_rate)
-
- def _clean_up(self):
- for creator in reversed(self.creators):
- try:
- creator.clean()
- except Exception as e:
- LOGGER.error('Unexpected error cleaning - %s', e)
+ @staticmethod
+ def export_task(file_name, export_type="html"):
+ """Export all task results (e.g. html or xunit report)
+
+ Raises:
+ subprocess.CalledProcessError: if Rally doesn't return 0
+
+ Returns:
+ None
+ """
+ cmd = ["rally", "task", "export", "--type", export_type,
+ "--deployment",
+ str(getattr(config.CONF, 'rally_deployment_name')),
+ "--to", file_name]
+ LOGGER.debug('running command: %s', cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+
+ @staticmethod
+ def verify_report(file_name, uuid, export_type="html"):
+ """Generate the verifier report (e.g. html or xunit report)
+
+ Raises:
+ subprocess.CalledProcessError: if Rally doesn't return 0
+
+ Returns:
+ None
+ """
+ cmd = ["rally", "verify", "report", "--type", export_type,
+ "--uuid", uuid, "--to", file_name]
+ LOGGER.debug('running command: %s', cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+
+ def clean(self):
+ """Cleanup of OpenStack resources. Should be called on completion."""
+ self.clean_rally_conf()
+ self.clean_rally_logs()
+ if self.flavor_alt:
+ self.orig_cloud.delete_flavor(self.flavor_alt.id)
+ super().clean()
+
+ def is_successful(self):
+ """The overall result of the test."""
+ for item in self.summary:
+ if item['task_status'] is False:
+ return testcase.TestCase.EX_TESTCASE_FAILED
+
+ return super().is_successful()
+
+ @staticmethod
+ def update_rally_logs(res_dir, rally_conf='/etc/rally/rally.conf'):
+ """Print rally logs in res dir"""
+ if not os.path.exists(res_dir):
+ os.makedirs(res_dir)
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(rally_conf)
+ rconfig.set('DEFAULT', 'debug', True)
+ rconfig.set('DEFAULT', 'use_stderr', False)
+ rconfig.set('DEFAULT', 'log-file', 'rally.log')
+ rconfig.set('DEFAULT', 'log_dir', res_dir)
+ with open(rally_conf, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ @staticmethod
+ def clean_rally_logs(rally_conf='/etc/rally/rally.conf'):
+ """Clean Rally config"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(rally_conf)
+ if rconfig.has_option('DEFAULT', 'use_stderr'):
+ rconfig.remove_option('DEFAULT', 'use_stderr')
+ if rconfig.has_option('DEFAULT', 'debug'):
+ rconfig.remove_option('DEFAULT', 'debug')
+ if rconfig.has_option('DEFAULT', 'log-file'):
+ rconfig.remove_option('DEFAULT', 'log-file')
+ if rconfig.has_option('DEFAULT', 'log_dir'):
+ rconfig.remove_option('DEFAULT', 'log_dir')
+ with open(rally_conf, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
- @energy.enable_recording
def run(self, **kwargs):
"""Run testcase."""
self.start_time = time.time()
try:
- conf_utils.create_rally_deployment()
- self._prepare_env()
- self._run_tests()
+ assert super().run(
+ **kwargs) == testcase.TestCase.EX_OK
+ self.update_rally_logs(self.res_dir)
+ self.create_rally_deployment(environ=self.project.get_environ())
+ self.prepare_run(**kwargs)
+ self.run_tests(**kwargs)
self._generate_report()
+ self.export_task(
+ f"{self.results_dir}/{self.case_name}.html")
+ self.export_task(
+ f"{self.results_dir}/{self.case_name}.xml",
+ export_type="junit-xml")
res = testcase.TestCase.EX_OK
- except Exception as exc: # pylint: disable=broad-except
- LOGGER.error('Error with run: %s', exc)
+ except Exception: # pylint: disable=broad-except
+ LOGGER.exception('Error with run:')
+ self.result = 0
res = testcase.TestCase.EX_RUN_ERROR
- finally:
- self._clean_up()
-
self.stop_time = time.time()
return res
@@ -664,22 +719,120 @@ class RallySanity(RallyBase):
"""Initialize RallySanity object."""
if "case_name" not in kwargs:
kwargs["case_name"] = "rally_sanity"
- super(RallySanity, self).__init__(**kwargs)
- self.mode = 'sanity'
- self.test_name = 'all'
+ super().__init__(**kwargs)
self.smoke = True
- self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
+ self.scenario_dir = os.path.join(self.rally_scenario_dir, 'sanity')
class RallyFull(RallyBase):
"""Rally full testcase implementation."""
+ task_timeout = 7200
+
def __init__(self, **kwargs):
"""Initialize RallyFull object."""
if "case_name" not in kwargs:
kwargs["case_name"] = "rally_full"
- super(RallyFull, self).__init__(**kwargs)
- self.mode = 'full'
- self.test_name = 'all'
+ super().__init__(**kwargs)
self.smoke = False
- self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')
+ self.scenario_dir = os.path.join(self.rally_scenario_dir, 'full')
+
+
+class RallyJobs(RallyBase):
+ """Rally OpenStack CI testcase implementation."""
+
+ stests = ["neutron"]
+ task_timeout = 7200
+
+ def __init__(self, **kwargs):
+ """Initialize RallyJobs object."""
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = "rally_jobs"
+ super().__init__(**kwargs)
+ self.task_file = os.path.join(self.rally_dir, 'rally_jobs.yaml')
+ self.task_yaml = None
+
+ def prepare_run(self, **kwargs):
+ """Create resources needed by test scenarios."""
+ super().prepare_run(**kwargs)
+ with open(
+ os.path.join(self.rally_dir, 'rally_jobs.yaml'),
+ 'r', encoding='utf-8') as task_file:
+ self.task_yaml = yaml.safe_load(task_file)
+
+ for task in self.task_yaml:
+ if task not in self.tests:
+ raise Exception(f"Test '{task}' not in '{self.tests}'")
+
+ def apply_blacklist(self, case_file_name, result_file_name):
+ # pylint: disable=too-many-branches
+ """Apply blacklist."""
+ LOGGER.debug("Applying blacklist...")
+ black_tests = list(set(self.excl_func() +
+ self.excl_scenario()))
+ if black_tests:
+ LOGGER.debug("Blacklisted tests: %s", str(black_tests))
+
+ template = YAML(typ='jinja2')
+ with open(case_file_name, 'r', encoding='utf-8') as fname:
+ cases = template.load(fname)
+ if cases.get("version", 1) == 1:
+ # scenarios in dictionary
+ for name in cases.keys():
+ if self.in_iterable_re(name, black_tests):
+ cases.pop(name)
+ else:
+ # workloads in subtasks
+ for sind, subtask in reversed(list(
+ enumerate(cases.get('subtasks', [])))):
+ for wind, workload in reversed(list(
+ enumerate(subtask.get('workloads', [])))):
+ scenario = workload.get('scenario', {})
+ for name in scenario.keys():
+ if self.in_iterable_re(name, black_tests):
+ cases['subtasks'][sind]['workloads'].pop(wind)
+ break
+ if 'workloads' in cases['subtasks'][sind]:
+ if not cases['subtasks'][sind]['workloads']:
+ cases['subtasks'].pop(sind)
+ # scenarios in subtasks
+ for sind, subtask in reversed(list(
+ enumerate(cases.get('subtasks', [])))):
+ scenario = subtask.get('scenario', {})
+ for name in scenario.keys():
+ if self.in_iterable_re(name, black_tests):
+ cases['subtasks'].pop(sind)
+ break
+
+ with open(result_file_name, 'w', encoding='utf-8') as fname:
+ template.dump(cases, fname)
+
+ def build_task_args(self, test_name):
+ """Build arguments for the Rally task."""
+ task_args = {}
+ if self.ext_net:
+ task_args['floating_network'] = str(self.ext_net.name)
+ else:
+ task_args['floating_network'] = ''
+ task_args['image_name'] = str(self.image.name)
+ task_args['flavor_name'] = str(self.flavor.name)
+ return task_args
+
+ def prepare_task(self, test_name):
+ """Prepare resources for test run."""
+ jobs_dir = os.path.join(
+ getattr(config.CONF, 'dir_rally_data'), test_name, 'rally-jobs')
+ task_name = self.task_yaml.get(test_name).get("task")
+ task = os.path.join(jobs_dir, task_name)
+ if not os.path.exists(task):
+ raise Exception(f"The scenario '{task}' does not exist.")
+ LOGGER.debug('Scenario fetched from : %s', task)
+
+ if not os.path.exists(self.temp_dir):
+ os.makedirs(self.temp_dir)
+ task_file_name = os.path.join(self.temp_dir, task_name)
+ self.apply_blacklist(task, task_file_name)
+ self.run_cmd = (["rally", "task", "start", "--tag", test_name,
+ "--task", task_file_name,
+ "--task-args", str(self.build_task_args(test_name))])
+ return True
diff --git a/functest/opnfv_tests/openstack/rally/rally_jobs.yaml b/functest/opnfv_tests/openstack/rally/rally_jobs.yaml
new file mode 100644
index 000000000..2092fb4cf
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/rally_jobs.yaml
@@ -0,0 +1,3 @@
+---
+neutron:
+ task: task-neutron.yaml
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
deleted file mode 100644
index 7efb5a83b..000000000
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-ceilometer.yaml
+++ /dev/null
@@ -1,458 +0,0 @@
- CeilometerMeters.list_meters:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "benchmark_meter"
- counter_type: "gauge"
- counter_unit: "%"
- counter_volume: 100
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 10
- metadata_list:
- -
- status: "active"
- name: "rally benchmark on"
- deleted: "false"
- -
- status: "terminated"
- name: "rally benchmark off"
- deleted: "true"
- {% endcall %}
- args:
- limit: 50
- metadata_query:
- status: "terminated"
- sla:
- {{ no_failures_sla() }}
-
- CeilometerResource.list_resources:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "benchmark_meter"
- counter_type: "gauge"
- counter_unit: "%"
- counter_volume: 100
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 10
- metadata_list:
- -
- status: "active"
- name: "rally benchmark on"
- deleted: "false"
- -
- status: "terminated"
- name: "rally benchmark off"
- deleted: "true"
- {% endcall %}
- args:
- limit: 50
- metadata_query:
- status: "terminated"
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_alarm_and_get_history:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- state: "ok"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_delete_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_get_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_list_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerQueries.create_and_query_alarm_history:
- -
- args:
- orderby: !!null
- limit: !!null
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerQueries.create_and_query_alarms:
- -
- args:
- filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
- orderby: !!null
- limit: 10
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerQueries.create_and_query_samples:
- -
- args:
- filter: {"=": {"counter_unit": "instance"}}
- orderby: !!null
- limit: 10
- counter_name: "cpu_util"
- counter_type: "gauge"
- counter_unit: "instance"
- counter_volume: 1.0
- resource_id: "resource_id"
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_update_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerStats.create_meter_and_get_stats:
- -
- args:
- user_id: "user-id"
- resource_id: "resource-id"
- counter_volume: 1.0
- counter_unit: ""
- counter_type: "cumulative"
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerEvents.create_user_and_get_event:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerEvents.create_user_and_list_events:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerEvents.create_user_and_list_event_types:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerTraits.create_user_and_list_trait_descriptions:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerTraits.create_user_and_list_traits:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerStats.get_stats:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "benchmark_meter"
- counter_type: "gauge"
- counter_unit: "%"
- counter_volume: 100
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 10
- metadata_list:
- -
- status: "active"
- name: "rally benchmark on"
- deleted: "false"
- -
- status: "terminated"
- name: "rally benchmark off"
- deleted: "true"
- {% endcall %}
- args:
- meter_name: "benchmark_meter"
- filter_by_user_id: true
- filter_by_project_id: true
- filter_by_resource_id: true
- metadata_query:
- status: "terminated"
- period: 300
- groupby: "resource_id"
- sla:
- {{ no_failures_sla() }}
-
- CeilometerResource.get_tenant_resources:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "cpu_util"
- counter_type: "gauge"
- counter_volume: 1.0
- counter_unit: "instance"
- {% endcall %}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.list_alarms:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerSamples.list_matched_samples:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "cpu_util"
- counter_type: "gauge"
- counter_unit: "instance"
- counter_volume: 1.0
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 60
- metadata_list:
- - status: "active"
- name: "fake_resource"
- deleted: "False"
- created_at: "2015-09-04T12:34:19.000000"
- - status: "not_active"
- name: "fake_resource_1"
- deleted: "False"
- created_at: "2015-09-10T06:55:12.000000"
- {% endcall %}
- args:
- limit: 50
- filter_by_user_id: true
- filter_by_project_id: true
- filter_by_resource_id: true
- metadata_query:
- status: "not_active"
- sla:
- {{ no_failures_sla() }}
-
- CeilometerMeters.list_matched_meters:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "benchmark_meter"
- counter_type: "gauge"
- counter_unit: "%"
- counter_volume: 100
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 10
- metadata_list:
- -
- status: "active"
- name: "rally benchmark on"
- deleted: "false"
- -
- status: "terminated"
- name: "rally benchmark off"
- deleted: "true"
- {% endcall %}
- args:
- limit: 50
- filter_by_user_id: true
- filter_by_project_id: true
- filter_by_resource_id: true
- metadata_query:
- status: "terminated"
- sla:
- {{ no_failures_sla() }}
-
- CeilometerResource.list_matched_resources:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "benchmark_meter"
- counter_type: "gauge"
- counter_unit: "%"
- counter_volume: 100
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 10
- metadata_list:
- -
- status: "active"
- name: "rally benchmark on"
- deleted: "false"
- -
- status: "terminated"
- name: "rally benchmark off"
- deleted: "true"
- {% endcall %}
- args:
- limit: 50
- filter_by_user_id: true
- filter_by_project_id: true
- metadata_query:
- status: "terminated"
- sla:
- {{ no_failures_sla() }}
-
- CeilometerSamples.list_samples:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "cpu_util"
- counter_type: "gauge"
- counter_unit: "instance"
- counter_volume: 1.0
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 60
- metadata_list:
- - status: "active"
- name: "fake_resource"
- deleted: "False"
- created_at: "2015-09-04T12:34:19.000000"
- - status: "not_active"
- name: "fake_resource_1"
- deleted: "False"
- created_at: "2015-09-10T06:55:12.000000"
- batch_size: 5
- {% endcall %}
- args:
- limit: 50
- metadata_query:
- status: "not_active"
- sla:
- {{ no_failures_sla() }}
-
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-cinder.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-cinder.yaml
index 87fae11a3..7abeeac68 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-cinder.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-cinder.yaml
@@ -8,6 +8,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -23,6 +25,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{{ volumes() }}
{% endcall %}
runner:
@@ -39,6 +43,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -52,6 +58,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -70,6 +78,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -84,10 +94,15 @@
size:
max: 1
min: 1
+ create_vm_params:
+ nics:
+ - net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -100,11 +115,16 @@
{{ vm_params(image_name,flavor_name) }}
size:
min: 1
- max: 5
+ max: 1
+ create_vm_params:
+ nics:
+ - net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -117,17 +137,21 @@
size: 1
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
-
args:
size:
min: 1
- max: 5
+ max: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -142,6 +166,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
volumes:
size: 1
volumes_per_tenant: 4
@@ -159,6 +185,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{{ volumes() }}
{% endcall %}
runner:
@@ -176,6 +204,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -188,6 +218,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -200,6 +232,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -215,6 +249,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -229,6 +265,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{{ volumes() }}
{% endcall %}
runner:
@@ -244,6 +282,8 @@
read_iops_sec: "1000"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -257,6 +297,8 @@
read_iops_sec: "1000"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -273,6 +315,8 @@
set_read_iops_sec: "1001"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -284,6 +328,8 @@
description: "rally tests creating types"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -295,18 +341,8 @@
description: "rally tests creating types"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumeTypes.create_and_update_volume_type:
- -
- args:
- description: "test"
- update_description: "test update"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -322,17 +358,8 @@
control_location: "front-end"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- CinderVolumeTypes.create_volume_type_add_and_list_type_access:
- -
- args:
- description: "rally tests creating types"
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml
index dfc1fc156..993b83ff7 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-glance.yaml
@@ -36,8 +36,9 @@
flavor:
name: {{ flavor_name }}
number_instances: 2
- nics:
- - net-id: {{ netid }}
+ boot_server_kwargs:
+ nics:
+ - net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
quotas:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-gnocchi.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-gnocchi.yaml
new file mode 100644
index 000000000..b4487daa0
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-gnocchi.yaml
@@ -0,0 +1,181 @@
+ Gnocchi.list_capabilities:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Gnocchi.get_status:
+ -
+ args:
+ detailed: false
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicyRule.list_archive_policy_rule:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicyRule.create_archive_policy_rule:
+ -
+ args:
+ metric_pattern: "cpu_*"
+ archive_policy_name: "low"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicyRule.create_delete_archive_policy_rule:
+ -
+ args:
+ metric_pattern: "cpu_*"
+ archive_policy_name: "low"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicy.list_archive_policy:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicy.create_archive_policy:
+ -
+ args:
+ definition:
+ - granularity: "0:00:01"
+ timespan: "1:00:00"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicy.create_delete_archive_policy:
+ -
+ args:
+ definition:
+ - granularity: "0:00:01"
+ timespan: "1:00:00"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResourceType.list_resource_type:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResourceType.create_resource_type:
+ -
+ args:
+ attributes:
+ foo:
+ required: false
+ type: "string"
+ bar:
+ required: true
+ type: "number"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResourceType.create_delete_resource_type:
+ -
+ args:
+ attributes:
+ foo:
+ required: false
+ type: "string"
+ bar:
+ required: true
+ type: "number"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiMetric.list_metric:
+ -
+ args:
+ limit: 10000
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiMetric.create_metric:
+ -
+ args:
+ archive_policy_name: "low"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiMetric.create_delete_metric:
+ -
+ args:
+ archive_policy_name: "low"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResource.create_resource:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResource.create_delete_resource:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml
index 2951e953a..b2248d499 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-neutron.yaml
@@ -27,7 +27,8 @@
ports_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
@@ -50,7 +51,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -74,7 +74,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -108,7 +107,8 @@
ports_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
@@ -129,7 +129,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -151,7 +150,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -185,7 +183,8 @@
ports_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
@@ -206,7 +205,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -227,7 +225,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -315,7 +312,8 @@
ports_per_network: 2
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
@@ -332,7 +330,6 @@
subnets_per_network: 2
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -350,7 +347,8 @@
subnets_per_network: 2
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
diff --git a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml
index 512448fd4..210591f9b 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/full/opnfv-nova.yaml
@@ -39,9 +39,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -59,9 +56,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -80,9 +74,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -104,9 +95,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -124,9 +112,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -140,11 +125,13 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- volume_size: 10
+ volume_size: 1
nics:
- net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -200,7 +187,7 @@
NovaServers.boot_and_live_migrate_server:
- args:
{{ vm_params(image_name, flavor_name) }}
- block_migration: false
+ block_migration: {{ block_migration }}
nics:
- net-id: {{ netid }}
context:
@@ -214,13 +201,15 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- size: 10
- block_migration: false
+ size: 1
+ block_migration: {{ block_migration }}
boot_server_kwargs:
nics:
- net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -229,13 +218,15 @@
NovaServers.boot_server_from_volume_and_live_migrate:
- args:
{{ vm_params(image_name, flavor_name) }}
- block_migration: false
- volume_size: 10
+ block_migration: {{ block_migration }}
+ volume_size: 1
force_delete: false
nics:
- net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -245,14 +236,11 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- server_kwargs:
+ boot_server_kwargs:
nics:
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova(keypairs=true) }}
@@ -266,18 +254,17 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- volume_size: 5
+ volume_size: 1
nics:
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_volumes() }}
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -293,9 +280,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -375,6 +359,40 @@
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
network: {}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_server_associate_and_dissociate_floating_ip:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ floating_network: {{ floating_network }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_and_associate_floating_ip:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ floating_network: {{ floating_network }}
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/opnfv-barbican.yaml b/functest/opnfv_tests/openstack/rally/scenario/opnfv-barbican.yaml
new file mode 100644
index 000000000..9dd9ca271
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/opnfv-barbican.yaml
@@ -0,0 +1,98 @@
+ BarbicanContainers.create_and_add:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanContainers.create_certificate_and_delete:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanContainers.create_and_delete:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanContainers.create_rsa_and_delete:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanSecrets.create_and_delete:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanSecrets.create_and_get:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanSecrets.create_and_list:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanSecrets.create:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanSecrets.get:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanContainers.list:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ BarbicanSecrets.list:
+ -
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml b/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml
index a0682acce..dcb007c50 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/opnfv-quotas.yaml
@@ -4,6 +4,8 @@
max_quota: 1024
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -15,6 +17,8 @@
max_quota: 1024
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -31,17 +35,6 @@
sla:
{{ no_failures_sla() }}
- Quotas.nova_update_and_delete:
- -
- args:
- max_quota: 1024
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
Quotas.nova_update:
-
args:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml b/functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml
new file mode 100644
index 000000000..66d7cd24d
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/opnfv-swift.yaml
@@ -0,0 +1,71 @@
+ SwiftObjects.create_container_and_object_then_list_objects:
+ -
+ args:
+ objects_per_container: 2
+ object_size: 5120
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.list_objects_in_containers:
+ -
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ swift_objects:
+ containers_per_tenant: 1
+ objects_per_container: 10
+ object_size: 1024
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.create_container_and_object_then_download_object:
+ -
+ args:
+ objects_per_container: 5
+ object_size: 1024
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.create_container_and_object_then_delete_all:
+ -
+ args:
+ objects_per_container: 5
+ object_size: 102400
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ sla:
+ {{ no_failures_sla() }}
+
+ SwiftObjects.list_and_download_objects_in_containers:
+ -
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ roles:
+ - "admin"
+ swift_objects:
+ containers_per_tenant: 1
+ objects_per_container: 5
+ object_size: 10240
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/opnfv-vm.yaml b/functest/opnfv_tests/openstack/rally/scenario/opnfv-vm.yaml
index 74f509925..3aa8ac8e5 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/opnfv-vm.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/opnfv-vm.yaml
@@ -1,42 +1,19 @@
- VMTasks.boot_runcommand_delete:
+ VMTasks.dd_load_test:
-
args:
- {{ vm_params(image_name, flavor_name) }}
- floating_network: {{ floating_network }}
- force_delete: false
- command:
- interpreter: /bin/sh
- script_file: {{ sup_dir }}/instance_dd_test.sh
- username: cirros
+ flavor:
+ name: {{ flavor_name }}
+ image:
+ name: {{ image_name }}
nics:
- net-id: {{ netid }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
- {% endcall %}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- sla:
- {{ no_failures_sla() }}
-
- -
- args:
- {{ vm_params(image_name, flavor_name) }}
- fixed_network: private
floating_network: {{ floating_network }}
force_delete: false
- command:
- interpreter: /bin/sh
- script_file: {{ sup_dir }}/instance_dd_test.sh
- use_floatingip: true
- username: cirros
- nics:
- - net-id: {{ netid }}
- volume_args:
- size: 2
+ username: {{ username }}
+ runner:
+ {{ constant_runner(concurrency=1, times=iterations, is_smoke=smoke) }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ network: {}
sla:
{{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
deleted file mode 100644
index bb070cd3a..000000000
--- a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-ceilometer.yaml
+++ /dev/null
@@ -1,247 +0,0 @@
- CeilometerAlarms.create_alarm_and_get_history:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- state: "ok"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_delete_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_get_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_list_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerQueries.create_and_query_alarm_history:
- -
- args:
- orderby: !!null
- limit: !!null
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerQueries.create_and_query_alarms:
- -
- args:
- filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]}
- orderby: !!null
- limit: 10
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerQueries.create_and_query_samples:
- -
- args:
- filter: {"=": {"counter_unit": "instance"}}
- orderby: !!null
- limit: 10
- counter_name: "cpu_util"
- counter_type: "gauge"
- counter_unit: "instance"
- counter_volume: 1.0
- resource_id: "resource_id"
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.create_and_update_alarm:
- -
- args:
- meter_name: "ram_util"
- threshold: 10.0
- type: "threshold"
- statistic: "avg"
- alarm_actions: ["http://localhost:8776/alarm"]
- ok_actions: ["http://localhost:8776/ok"]
- insufficient_data_actions: ["http://localhost:8776/notok"]
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerEvents.create_user_and_get_event:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerEvents.create_user_and_list_events:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerEvents.create_user_and_list_event_types:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerTraits.create_user_and_list_trait_descriptions:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerTraits.create_user_and_list_traits:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerStats.get_stats:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "benchmark_meter"
- counter_type: "gauge"
- counter_unit: "%"
- counter_volume: 100
- resources_per_tenant: 100
- samples_per_resource: 100
- timestamp_interval: 10
- metadata_list:
- -
- status: "active"
- name: "rally benchmark on"
- deleted: "false"
- -
- status: "terminated"
- name: "rally benchmark off"
- deleted: "true"
- {% endcall %}
- args:
- meter_name: "benchmark_meter"
- filter_by_user_id: true
- filter_by_project_id: true
- filter_by_resource_id: true
- metadata_query:
- status: "terminated"
- period: 300
- groupby: "resource_id"
- sla:
- {{ no_failures_sla() }}
-
- CeilometerResource.get_tenant_resources:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {% call user_context(tenants_amount, users_amount, use_existing_users) %}
- ceilometer:
- counter_name: "cpu_util"
- counter_type: "gauge"
- counter_volume: 1.0
- counter_unit: "instance"
- {% endcall %}
- sla:
- {{ no_failures_sla() }}
-
- CeilometerAlarms.list_alarms:
- -
- runner:
- {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
- context:
- {{ user_context(tenants_amount, users_amount, use_existing_users) }}
- sla:
- {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-cinder.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-cinder.yaml
index 832358075..f94a5a1a4 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-cinder.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-cinder.yaml
@@ -6,6 +6,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{{ volumes() }}
{% endcall %}
runner:
@@ -23,6 +25,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -35,6 +39,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -47,6 +53,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -62,6 +70,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -76,6 +86,8 @@
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
quotas:
{{ unlimited_volumes() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{{ volumes() }}
{% endcall %}
runner:
@@ -91,6 +103,8 @@
read_iops_sec: "1000"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -107,6 +121,8 @@
set_read_iops_sec: "1001"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -118,6 +134,8 @@
description: "rally tests creating types"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -133,6 +151,8 @@
control_location: "front-end"
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml
index 1b61762f9..279e81439 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-glance.yaml
@@ -36,8 +36,9 @@
flavor:
name: {{ flavor_name }}
number_instances: 2
- nics:
- - net-id: {{ netid }}
+ boot_server_kwargs:
+ nics:
+ - net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
quotas:
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-gnocchi.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-gnocchi.yaml
new file mode 100644
index 000000000..d99b15f81
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-gnocchi.yaml
@@ -0,0 +1,119 @@
+ Gnocchi.list_capabilities:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ Gnocchi.get_status:
+ -
+ args:
+ detailed: false
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicyRule.list_archive_policy_rule:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicyRule.create_delete_archive_policy_rule:
+ -
+ args:
+ metric_pattern: "cpu_*"
+ archive_policy_name: "low"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicy.list_archive_policy:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiArchivePolicy.create_delete_archive_policy:
+ -
+ args:
+ definition:
+ - granularity: "0:00:01"
+ timespan: "1:00:00"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResourceType.list_resource_type:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResourceType.create_delete_resource_type:
+ -
+ args:
+ attributes:
+ foo:
+ required: false
+ type: "string"
+ bar:
+ required: true
+ type: "number"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiMetric.list_metric:
+ -
+ args:
+ limit: 10000
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiMetric.create_delete_metric:
+ -
+ args:
+ archive_policy_name: "low"
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ GnocchiResource.create_delete_resource:
+ -
+ context:
+ {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-neutron.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-neutron.yaml
index da99a48b5..3eb7652c0 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-neutron.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-neutron.yaml
@@ -21,7 +21,8 @@
ports_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
@@ -42,7 +43,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -64,7 +64,8 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
@@ -98,7 +99,8 @@
ports_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
+ network:
+ router: {}
quotas:
neutron:
network: -1
@@ -119,7 +121,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
@@ -140,7 +141,6 @@
subnets_per_network: 1
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network: {}
quotas:
neutron:
network: -1
diff --git a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml
index 801938c4e..1fbfccb5a 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml
+++ b/functest/opnfv_tests/openstack/rally/scenario/sanity/opnfv-nova.yaml
@@ -1,7 +1,7 @@
NovaServers.boot_and_live_migrate_server:
- args:
{{ vm_params(image_name, flavor_name) }}
- block_migration: false
+ block_migration: {{ block_migration }}
nics:
- net-id: {{ netid }}
context:
@@ -15,13 +15,15 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- size: 10
- block_migration: false
+ size: 1
+ block_migration: {{ block_migration }}
boot_server_kwargs:
nics:
- net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -30,13 +32,15 @@
NovaServers.boot_server_from_volume_and_live_migrate:
- args:
{{ vm_params(image_name, flavor_name) }}
- block_migration: false
- volume_size: 10
+ block_migration: {{ block_migration }}
+ volume_size: 1
force_delete: false
nics:
- net-id: {{ netid }}
context:
{{ user_context(tenants_amount, users_amount, use_existing_users) }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
sla:
@@ -46,14 +50,11 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- server_kwargs:
+ boot_server_kwargs:
nics:
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova(keypairs=true) }}
@@ -67,18 +68,17 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- volume_size: 5
+ volume_size: 1
nics:
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_volumes() }}
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
+ api_versions:
+ {{ volume_service(version=volume_version, service_type=volume_service_type) }}
{% endcall %}
runner:
{{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
@@ -94,9 +94,6 @@
- net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
- network:
- networks_per_tenant: 1
- start_cidr: "100.1.0.0/25"
quotas:
{{ unlimited_neutron() }}
{{ unlimited_nova() }}
@@ -122,7 +119,24 @@
-
args:
{{ vm_params(image_name, flavor_name) }}
- auto_assign_nic: true
+ nics:
+ - net-id: {{ netid }}
+ context:
+ {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+ network: {}
+ {% endcall %}
+ runner:
+ {{ constant_runner(concurrency=concurrency, times=iterations, is_smoke=smoke) }}
+ sla:
+ {{ no_failures_sla() }}
+
+ NovaServers.boot_server_associate_and_dissociate_floating_ip:
+ -
+ args:
+ {{ vm_params(image_name, flavor_name) }}
+ floating_network: {{ floating_network }}
+ nics:
+ - net-id: {{ netid }}
context:
{% call user_context(tenants_amount, users_amount, use_existing_users) %}
network: {}
diff --git a/functest/opnfv_tests/openstack/rally/scenario/support/instance_dd_test.sh b/functest/opnfv_tests/openstack/rally/scenario/support/instance_dd_test.sh
deleted file mode 100644
index e3bf23405..000000000
--- a/functest/opnfv_tests/openstack/rally/scenario/support/instance_dd_test.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }
-file=/tmp/test.img
-c=${1:-$SIZE}
-c=${c:-1000} #default is 1GB
-write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c")
-read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c")
-[ -f $file ] && rm $file
-
-echo "{
- \"write_seq_${c}m\": $write_seq,
- \"read_seq_${c}m\": $read_seq
- }"
diff --git a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template
index 35b107838..75afb2dbe 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template
+++ b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_ports.yaml.template
@@ -7,7 +7,7 @@ parameters:
default: public
image:
type: string
- default: cirros-0.4.0-x86_64-uec
+ default: cirros-0.6.1-x86_64-uec
flavor:
type: string
default: m1.tiny
diff --git a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template
index 5c9a86b79..9a0f1aa72 100644
--- a/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template
+++ b/functest/opnfv_tests/openstack/rally/scenario/templates/server_with_volume.yaml.template
@@ -4,7 +4,7 @@ parameters:
# set all correct defaults for parameters before launch test
image:
type: string
- default: cirros-0.4.0-x86_64-uec
+ default: cirros-0.5.1-x86_64-uec
flavor:
type: string
default: m1.tiny
diff --git a/functest/opnfv_tests/openstack/rally/task.yaml b/functest/opnfv_tests/openstack/rally/task.yaml
index 65f101fbe..649c04557 100644
--- a/functest/opnfv_tests/openstack/rally/task.yaml
+++ b/functest/opnfv_tests/openstack/rally/task.yaml
@@ -4,7 +4,7 @@
{%- endif %}
{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%}
-{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%}
+{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args, volume_service -%}
---
{% if "authenticate" in service_list %}
@@ -15,6 +15,10 @@
{%- include "var/opnfv-cinder.yaml"-%}
{% endif %}
+{% if "gnocchi" in service_list %}
+{%- include "var/opnfv-gnocchi.yaml"-%}
+{% endif %}
+
{% if "keystone" in service_list %}
{%- include "var/opnfv-keystone.yaml"-%}
{% endif %}
@@ -31,10 +35,6 @@
{%- include "var/opnfv-neutron.yaml"-%}
{% endif %}
-{% if "ceilometer" in service_list %}
-{%- include "var/opnfv-ceilometer.yaml"-%}
-{% endif %}
-
{% if "quotas" in service_list %}
{%- include "var/opnfv-quotas.yaml"-%}
{% endif %}
@@ -43,6 +43,14 @@
{%- include "var/opnfv-heat.yaml"-%}
{% endif %}
+{% if "swift" in service_list %}
+{%- include "var/opnfv-swift.yaml"-%}
+{% endif %}
+
+{% if "barbican" in service_list %}
+{%- include "var/opnfv-barbican.yaml"-%}
+{% endif %}
+
{% if "vm" in service_list %}
{%- include "var/opnfv-vm.yaml"-%}
{% endif %}
diff --git a/functest/opnfv_tests/openstack/refstack/__init__.py b/functest/opnfv_tests/openstack/refstack/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/refstack/__init__.py
diff --git a/functest/opnfv_tests/openstack/refstack/refstack.py b/functest/opnfv_tests/openstack/refstack/refstack.py
new file mode 100644
index 000000000..87932020b
--- /dev/null
+++ b/functest/opnfv_tests/openstack/refstack/refstack.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""Refstack testcase implementation."""
+
+import logging
+import os
+import re
+import subprocess
+import yaml
+
+from functest.opnfv_tests.openstack.tempest import tempest
+from functest.utils import config
+
+
+class Refstack(tempest.TempestCommon):
+ """Refstack testcase implementation class."""
+
+ __logger = logging.getLogger(__name__)
+
+ def _extract_refstack_data(self, refstack_list):
+ yaml_data = ""
+ with open(refstack_list, encoding='utf-8') as def_file:
+ for line in def_file:
+ try:
+ grp = re.search(r'^([^\[]*)(\[.*\])\n*$', line)
+ yaml_data = f"{yaml_data}\n{grp.group(1)}: {grp.group(2)}"
+ except Exception: # pylint: disable=broad-except
+ self.__logger.warning("Cannot parse %s", line)
+ return yaml.full_load(yaml_data)
+
+ def _extract_tempest_data(self):
+ olddir = os.getcwd()
+ try:
+ os.chdir(self.verifier_repo_dir)
+ cmd = ['stestr', 'list', '^tempest.']
+ output = subprocess.check_output(cmd)
+ except subprocess.CalledProcessError as cpe:
+ self.__logger.error(
+ "Exception when listing tempest tests: %s\n%s",
+ cpe.cmd, cpe.output.decode("utf-8"))
+ raise
+ finally:
+ os.chdir(olddir)
+ yaml_data2 = ""
+ for line in output.splitlines():
+ try:
+ grp = re.search(r'^([^\[]*)(\[.*\])\n*$', line.decode("utf-8"))
+ yaml_data2 = f"{yaml_data2}\n{grp.group(1)}: {grp.group(2)}"
+ except Exception: # pylint: disable=broad-except
+ self.__logger.warning("Cannot parse %s. skipping it", line)
+ return yaml.full_load(yaml_data2)
+
+ def generate_test_list(self, **kwargs):
+ refstack_list = os.path.join(
+ getattr(config.CONF, 'dir_refstack_data'),
+ f"{kwargs.get('target', 'compute')}.txt")
+ self.backup_tempest_config(self.conf_file, '/etc')
+ refstack_data = self._extract_refstack_data(refstack_list)
+ tempest_data = self._extract_tempest_data()
+ with open(self.list, 'w', encoding='utf-8') as ref_file:
+ for key in refstack_data.keys():
+ try:
+ for data in tempest_data[key]:
+ if data == refstack_data[key][0]:
+ break
+ else:
+ self.__logger.info("%s: ids differ. skipping it", key)
+ continue
+ value = str(tempest_data[key]).replace(
+ "'", "").replace(", ", ",")
+ ref_file.write(f"{key}{value}\n")
+ except Exception: # pylint: disable=broad-except
+ self.__logger.info("%s: not found. skipping it", key)
+ continue
diff --git a/functest/opnfv_tests/openstack/refstack_client/defcore.txt b/functest/opnfv_tests/openstack/refstack_client/defcore.txt
deleted file mode 100644
index e958b47cd..000000000
--- a/functest/opnfv_tests/openstack/refstack_client/defcore.txt
+++ /dev/null
@@ -1,313 +0,0 @@
-# Set of DefCore tempest test cases not flagged and required.
-# According to https://github.com/openstack/interop/blob/master/doc/source/guidelines/2017.09.rst,
-# some tests are still flagged due to outstanding bugs in the Tempest library,
-# particularly tests that require SSH. Refstack developers
-# are working on correcting these bugs upstream. Please note that although some tests
-# are flagged because of bugs, there is still an expectation that the capabilities
-# covered by the tests are available.
-# It only contains Openstack core compute (no object storage)
-# The approved guidelines (2017.09) are valid for Mitaka, Newton, Ocata, Pike releases of OpenStack
-# The list can be generated using the Rest API from RefStack project:
-# https://refstack.openstack.org/api/v1/guidelines/2017.09/tests?target=compute&type=required&alias=true&flag=false
-tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors[id-e36c0eaa-dff5-4082-ad1f-3f9a80aa3f59]
-tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail[id-6e85fde4-b3cd-4137-ab72-ed5f418e8c24]
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6]
-tempest.api.compute.servers.test_availability_zone.AZV2TestJSON.test_get_availability_zone_list_with_non_admin_user[id-a8333aa2-205c-449f-a828-d38c2489bf25]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
-tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_active_server[id-925fdfb4-5b13-47ea-ac8a-c36ae6fddb05]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_active_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_reboot_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107]
-tempest.api.compute.test_versions.TestVersions.test_list_api_versions[id-6c0a0990-43b6-4529-9b61-5fd8daf7c55c]
-# tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513]
-tempest.api.identity.v3.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
-tempest.api.identity.v3.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
-tempest.api.identity.v3.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
-tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9]
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image[id-f848bb94-1c6e-45a4-8726-39e3a5b23535]
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image[id-f66891a7-a35c-41a8-b590-a065c2a1caa6]
-tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
-tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
-tempest.api.image.v2.test_images.ListImagesTest.test_index_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id[id-32248db1-ab88-4821-9604-c7c369f1f88c]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_non_existing_image[id-6fe40f1c-57bd-4918-89cc-8500f850f3de]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_delete_deleted_image[id-e57fc127-7ba0-4693-92d7-1d8a05ebcba9]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_image_null_id[id-ef45000d-0a72-4781-866d-4cb7bf2562ad]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_non_existent_image[id-668743d5-08ad-4480-b2b8-15da34f81d9f]
-tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image[id-10407036-6059-4f95-a2cd-cbbbee7ed329]
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_delete_non_existing_tag[id-39c023a2-325a-433a-9eea-649bf1414b19]
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_update_tags_for_non_existing_image[id-8cd30f82-6f9a-4c6e-8034-c1b51fba43d9]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
-tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-tempest.api.network.test_networks.NetworksTest.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
-tempest.api.network.test_networks.NetworksTest.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-tempest.api.network.test_networks.NetworksTest.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
-tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-tempest.api.network.test_networks.NetworksTest.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
-tempest.api.network.test_networks.NetworksTest.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-tempest.api.network.test_networks.NetworksTest.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
-tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
-tempest.api.network.test_networks.NetworksTestJSON.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-tempest.api.network.test_networks.NetworksTestJSON.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
-tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1]
-tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c]
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e]
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
-tempest.api.network.test_ports.PortsTestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f]
-tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9]
-tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6]
-tempest.api.network.test_subnetpools_extensions.SubnetPoolsTestJSON.test_create_list_show_update_delete_subnetpools[id-62595970-ab1c-4b7f-8fcc-fddfe55e9811]
-tempest.api.volume.test_availability_zone.AvailabilityZoneTestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
-tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
-tempest.api.volume.test_extensions.ExtensionsTestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
-tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
-tempest.api.volume.test_snapshot_metadata.SnapshotMetadataTestJSON.test_crud_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotMetadataTestJSON.test_update_show_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
-tempest.api.volume.test_snapshot_metadata.SnapshotMetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_crud_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
-tempest.api.volume.test_volume_metadata.VolumesMetadataTest.test_crud_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesMetadataTest.test_update_show_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
-tempest.api.volume.test_volume_metadata.VolumesMetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_crud_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_volume_upload[id-d8f1ca95-3d5b-44a3-b8ca-909691c9532d]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_upload[id-d8f1ca95-3d5b-44a3-b8ca-909691c9532d]
-tempest.api.volume.test_volumes_get.VolumesGetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51]
-tempest.api.volume.test_volumes_get.VolumesGetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
-tempest.api.volume.test_volumes_get.VolumesGetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_details_pagination[id-e9138a2c-f67b-4796-8efa-635c196d01de]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_details_with_multiple_params[id-2a7064eb-b9c3-429b-b888-33928fc5edd3]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_pagination[id-af55e775-8e4b-4feb-8719-215c43b0238c]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_without_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_without_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e]
-tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
-tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
-tempest.api.volume.test_volumes_snapshots_list.VolumesSnapshotListTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots_list.VolumesSnapshotListTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesSnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesSnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination[id-e9138a2c-f67b-4796-8efa-635c196d01de]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params[id-2a7064eb-b9c3-429b-b888-33928fc5edd3]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination[id-af55e775-8e4b-4feb-8719-215c43b0238c]
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
deleted file mode 100644
index fe32da66b..000000000
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ /dev/null
@@ -1,261 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""Refstack client testcase implemenation."""
-
-from __future__ import division
-
-import argparse
-import logging
-import os
-import re
-import sys
-import subprocess
-import time
-
-import pkg_resources
-
-from functest.core import testcase
-from functest.energy import energy
-from functest.opnfv_tests.openstack.refstack_client.tempest_conf \
- import TempestConf
-from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.utils.constants import CONST
-import functest.utils.functest_utils as ft_utils
-
-__author__ = ("Matthew Li <matthew.lijun@huawei.com>,"
- "Linda Wang <wangwulin@huawei.com>")
-
-# logging configuration """
-LOGGER = logging.getLogger(__name__)
-
-
-class RefstackClient(testcase.TestCase):
- """RefstackClient testcase implementation class."""
-
- def __init__(self, **kwargs):
- """Initialize RefstackClient testcase object."""
- if "case_name" not in kwargs:
- kwargs["case_name"] = "refstack_defcore"
- super(RefstackClient, self).__init__(**kwargs)
- self.tempestconf = None
- self.conf_path = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- self.functest_test = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests')
- self.defcore_list = 'openstack/refstack_client/defcore.txt'
- self.confpath = os.path.join(self.functest_test,
- self.conf_path)
- self.defcorelist = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
- self.testlist = None
- self.insecure = ''
- if ('https' in CONST.__getattribute__('OS_AUTH_URL') and
- CONST.__getattribute__('OS_INSECURE').lower() == 'true'):
- self.insecure = '-k'
-
- def generate_conf(self):
- if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
- os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
-
- self.tempestconf = TempestConf()
- self.tempestconf.generate_tempestconf()
-
- def run_defcore(self, conf, testlist):
- """Run defcore sys command."""
- cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
- .format(self.insecure, conf, testlist))
- LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd)
- ft_utils.execute_command(cmd)
-
- def run_defcore_default(self):
- """Run default defcore sys command."""
- options = ["-v"] if not self.insecure else ["-v", self.insecure]
- cmd = (["refstack-client", "test", "-c", self.confpath] +
- options + ["--test-list", self.defcorelist])
- LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd)
-
- with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "environment.log"), 'w+') as f_env:
- f_env.write(
- ("Refstack environment:\n"
- " SUT: {}\n Scenario: {}\n Node: {}\n Date: {}\n").format(
- CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- CONST.__getattribute__('NODE_NAME'),
- time.strftime("%a %b %d %H:%M:%S %Z %Y")))
-
- with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "refstack.log"), 'w+') as f_stdout:
- subprocess.call(cmd, shell=False, stdout=f_stdout,
- stderr=subprocess.STDOUT)
-
- def parse_refstack_result(self):
- """Parse Refstack results."""
- try:
- with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "refstack.log"), 'r') as logfile:
- for line in logfile.readlines():
- if 'Tests' in line:
- break
- if re.search(r"\} tempest\.", line):
- LOGGER.info(line.replace('\n', ''))
-
- with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "refstack.log"), 'r') as logfile:
- output = logfile.read()
-
- for match in re.findall(r"Ran: (\d+) tests in (\d+\.\d{4}) sec.",
- output):
- num_tests = match[0]
- LOGGER.info("Ran: %s tests in %s sec.", num_tests, match[1])
- for match in re.findall(r"(- Passed: )(\d+)", output):
- num_success = match[1]
- LOGGER.info("".join(match))
- for match in re.findall(r"(- Skipped: )(\d+)", output):
- num_skipped = match[1]
- LOGGER.info("".join(match))
- for match in re.findall(r"(- Failed: )(\d+)", output):
- num_failures = match[1]
- LOGGER.info("".join(match))
- success_testcases = []
- for match in re.findall(r"\{0\} (.*?) \.{3} ok", output):
- success_testcases.append(match)
- failed_testcases = []
- for match in re.findall(r"\{0\} (.*?) \.{3} FAILED", output):
- failed_testcases.append(match)
- skipped_testcases = []
- for match in re.findall(r"\{0\} (.*?) \.{3} SKIPPED:", output):
- skipped_testcases.append(match)
-
- num_executed = int(num_tests) - int(num_skipped)
-
- try:
- self.result = 100 * int(num_success) / int(num_executed)
- except ZeroDivisionError:
- LOGGER.error("No test has been executed")
-
- self.details = {"tests": int(num_tests),
- "failures": int(num_failures),
- "success": success_testcases,
- "errors": failed_testcases,
- "skipped": skipped_testcases}
- except Exception:
- self.result = 0
-
- LOGGER.info("Testcase %s success_rate is %s%%",
- self.case_name, self.result)
-
- @energy.enable_recording
- def run(self, **kwargs):
- """
- Start RefstackClient testcase.
-
- used for functest command line,
- functest testcase run refstack_defcore
- """
- self.start_time = time.time()
-
- try:
- # Make sure that Tempest is configured
- if not self.tempestconf:
- self.generate_conf()
- self.run_defcore_default()
- self.parse_refstack_result()
- res = testcase.TestCase.EX_OK
- except Exception:
- LOGGER.exception("Error with run")
- res = testcase.TestCase.EX_RUN_ERROR
- finally:
- self.tempestconf.clean()
-
- self.stop_time = time.time()
- return res
-
- def _prep_test(self):
- """Check that the config file exists."""
- if not os.path.isfile(self.confpath):
- LOGGER.error("Conf file not valid: %s", self.confpath)
- if not os.path.isfile(self.testlist):
- LOGGER.error("testlist file not valid: %s", self.testlist)
-
- def main(self, **kwargs):
- """
- Execute RefstackClient testcase manually.
-
- used for manually running,
- python refstack_client.py -c <tempest_conf_path>
- --testlist <testlist_path>
- can generate a reference refstack_tempest.conf by
- python tempest_conf.py
- """
- try:
- self.confpath = kwargs['config']
- self.testlist = kwargs['testlist']
- except KeyError as exc:
- LOGGER.error("Cannot run refstack client. Please check "
- "%s", exc)
- return self.EX_RUN_ERROR
- try:
- self._prep_test()
- self.run_defcore(self.confpath, self.testlist)
- res = testcase.TestCase.EX_OK
- except Exception as exc:
- LOGGER.error('Error with run: %s', exc)
- res = testcase.TestCase.EX_RUN_ERROR
-
- return res
-
-
-class RefstackClientParser(object): # pylint: disable=too-few-public-methods
- """Command line argument parser helper."""
-
- def __init__(self):
- """Initialize helper object."""
- self.functest_test = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests')
- self.conf_path = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- self.defcore_list = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
- self.confpath = os.path.join(self.functest_test,
- self.conf_path)
- self.defcorelist = os.path.join(self.functest_test,
- self.defcore_list)
- self.parser = argparse.ArgumentParser()
- self.parser.add_argument(
- '-c', '--config',
- help='the file path of refstack_tempest.conf',
- default=self.confpath)
- self.parser.add_argument(
- '-t', '--testlist',
- help='Specify the file path or URL of a test list text file. '
- 'This test list will contain specific test cases that '
- 'should be tested.',
- default=self.defcorelist)
-
- def parse_args(self, argv=None):
- """Parse command line arguments."""
- return vars(self.parser.parse_args(argv))
-
-
-def main():
- """Run RefstackClient testcase with CLI."""
- logging.basicConfig()
- refstackclient = RefstackClient()
- parser = RefstackClientParser()
- args = parser.parse_args(sys.argv[1:])
- try:
- result = refstackclient.main(**args)
- if result != testcase.TestCase.EX_OK:
- return result
- except Exception:
- return testcase.TestCase.EX_RUN_ERROR
diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
deleted file mode 100644
index db7452271..000000000
--- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-
-# matthew.lijun@huawei.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-import logging
-import pkg_resources
-
-from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.utils import openstack_utils
-from functest.utils.constants import CONST
-from functest.opnfv_tests.openstack.tempest.tempest \
- import TempestResourcesManager
-
-""" logging configuration """
-logger = logging.getLogger(__name__)
-
-
-class TempestConf(object):
- def __init__(self, **kwargs):
- self.VERIFIER_ID = conf_utils.get_verifier_id()
- self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
- self.VERIFIER_ID)
- self.DEPLOYMENT_ID = conf_utils.get_verifier_deployment_id()
- self.DEPLOYMENT_DIR = conf_utils.get_verifier_deployment_dir(
- self.VERIFIER_ID, self.DEPLOYMENT_ID)
- self.confpath = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- self.resources = TempestResourcesManager(**kwargs)
-
- def generate_tempestconf(self):
- try:
- openstack_utils.source_credentials(
- CONST.__getattribute__('openstack_creds'))
- resources = self.resources.create(create_project=True,
- use_custom_images=True,
- use_custom_flavors=True)
- conf_utils.configure_tempest_defcore(
- self.DEPLOYMENT_DIR,
- image_id=resources.get("image_id"),
- flavor_id=resources.get("flavor_id"),
- image_id_alt=resources.get("image_id_alt"),
- flavor_id_alt=resources.get("flavor_id_alt"),
- tenant_id=resources.get("project_id"))
- except Exception as e:
- logger.error("error with generating refstack client "
- "reference tempest conf file: %s", e)
-
- def main(self):
- try:
- self.generate_tempestconf()
- logger.info("a reference tempest conf file generated "
- "at %s", self.confpath)
- except Exception as e:
- logger.error('Error with run: %s', e)
-
- def clean(self):
- self.resources.cleanup()
-
-
-def main():
- logging.basicConfig()
- tempestconf = TempestConf()
- tempestconf.main()
diff --git a/functest/opnfv_tests/openstack/shaker/__init__.py b/functest/opnfv_tests/openstack/shaker/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/shaker/__init__.py
diff --git a/functest/opnfv_tests/openstack/shaker/shaker.py b/functest/opnfv_tests/openstack/shaker/shaker.py
new file mode 100644
index 000000000..275cc3077
--- /dev/null
+++ b/functest/opnfv_tests/openstack/shaker/shaker.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+Shaker_ wraps around popular system network testing tools like iperf, iperf3
+and netperf (with help of flent). Shaker is able to deploy OpenStack instances
+and networks in different topologies. Shaker scenario specifies the deployment
+and list of tests to execute.
+
+.. _Shaker: http://pyshaker.readthedocs.io/en/latest/
+"""
+
+import logging
+import os
+
+import json
+import scp
+
+from functest.core import singlevm
+from functest.utils import env
+
+
+class Shaker(singlevm.SingleVm2):
+ """Run shaker full+perf l2 and l3"""
+ # pylint: disable=too-many-instance-attributes
+
+ __logger = logging.getLogger(__name__)
+
+ filename = '/home/opnfv/functest/images/shaker-image-1.3.4+stretch.qcow2'
+ flavor_ram = 512
+ flavor_vcpus = 1
+ flavor_disk = 3
+ username = 'debian'
+ port = 9000
+ ssh_connect_loops = 12
+ create_server_timeout = 300
+ check_console_loop = 12
+ shaker_timeout = '3600'
+ quota_instances = -1
+ quota_cores = -1
+ check_console_loop = 12
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.role = None
+
+ def check_requirements(self):
+ if self.count_hypervisors() < 2:
+ self.__logger.warning("Shaker requires at least 2 hypervisors")
+ self.is_skipped = True
+ self.project.clean()
+
+ def prepare(self):
+ super().prepare()
+ self.cloud.create_security_group_rule(
+ self.sec.id, port_range_min=self.port, port_range_max=self.port,
+ protocol='tcp', direction='ingress')
+
+ def execute(self):
+ """
+ Returns:
+ - 0 if success
+ - 1 on operation error
+ """
+ assert self.ssh
+ endpoint = self.get_public_auth_url(self.orig_cloud)
+ self.__logger.debug("keystone endpoint: %s", endpoint)
+ if self.orig_cloud.get_role("admin"):
+ role_name = "admin"
+ elif self.orig_cloud.get_role("Admin"):
+ role_name = "Admin"
+ else:
+ raise Exception("Cannot detect neither admin nor Admin")
+ self.orig_cloud.grant_role(
+ role_name, user=self.project.user.id,
+ project=self.project.project.id,
+ domain=self.project.domain.id)
+ if not self.orig_cloud.get_role("heat_stack_owner"):
+ self.role = self.orig_cloud.create_role("heat_stack_owner")
+ self.orig_cloud.grant_role(
+ "heat_stack_owner", user=self.project.user.id,
+ project=self.project.project.id,
+ domain=self.project.domain.id)
+ self.orig_cloud.set_compute_quotas(
+ self.project.project.name,
+ instances=self.quota_instances,
+ cores=self.quota_cores)
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put('/home/opnfv/functest/conf/env_file', remote_path='~/')
+ if os.environ.get('OS_CACERT'):
+ scpc.put(os.environ.get('OS_CACERT'), remote_path='~/os_cacert')
+ opt = 'export OS_CACERT=~/os_cacert && ' if os.environ.get(
+ 'OS_CACERT') else ''
+ (_, stdout, stderr) = self.ssh.exec_command(
+ 'source ~/env_file && '
+ 'export OS_INTERFACE=public && '
+ f'export OS_AUTH_URL={endpoint} && '
+ f'export OS_USERNAME={self.project.user.name} && '
+ f'export OS_PROJECT_NAME={self.project.project.name} && '
+ f'export OS_PROJECT_ID={self.project.project.id} && '
+ 'unset OS_TENANT_NAME && '
+ 'unset OS_TENANT_ID && '
+ 'unset OS_ENDPOINT_TYPE && '
+ f'export OS_PASSWORD="{self.project.password}" && '
+ f'{opt}'
+ 'env && '
+ f'timeout {self.shaker_timeout} shaker --debug '
+ f'--image-name {self.image.name} --flavor-name {self.flavor.name} '
+ f'--server-endpoint {self.fip.floating_ip_address}:9000 '
+ f'--external-net {self.ext_net.id} '
+ f"--dns-nameservers {env.get('NAMESERVER')} "
+ '--scenario openstack/full_l2,'
+ 'openstack/full_l3_east_west,'
+ 'openstack/full_l3_north_south,'
+ 'openstack/perf_l3_north_south '
+ '--report report.html --output report.json')
+ self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
+ if not os.path.exists(self.res_dir):
+ os.makedirs(self.res_dir)
+ try:
+ scpc.get('report.json', self.res_dir)
+ scpc.get('report.html', self.res_dir)
+ except scp.SCPException:
+ self.__logger.exception("cannot get report files")
+ return 1
+ with open(
+ os.path.join(self.res_dir, 'report.json'),
+ encoding='utf-8') as json_file:
+ data = json.load(json_file)
+ for value in data["records"].values():
+ if value["status"] != "ok":
+ self.__logger.error(
+ "%s failed\n%s", value["scenario"], value["stderr"])
+ return 1
+ return stdout.channel.recv_exit_status()
+
+ def clean(self):
+ super().clean()
+ if self.role:
+ self.orig_cloud.delete_role(self.role.id)
diff --git a/functest/opnfv_tests/openstack/snaps/api_check.py b/functest/opnfv_tests/openstack/snaps/api_check.py
deleted file mode 100644
index e708b4dec..000000000
--- a/functest/opnfv_tests/openstack/snaps/api_check.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import unittest
-
-from functest.opnfv_tests.openstack.snaps import snaps_suite_builder
-from functest.opnfv_tests.openstack.snaps.snaps_test_runner import \
- SnapsTestRunner
-
-
-class ApiCheck(SnapsTestRunner):
- """
- This test executes the Python Tests included with the SNAPS libraries
- that exercise many of the OpenStack APIs within Keystone, Glance, Neutron,
- and Nova
- """
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "api_check"
- super(ApiCheck, self).__init__(**kwargs)
-
- self.suite = unittest.TestSuite()
-
- def run(self, **kwargs):
- """
- Builds the test suite then calls super.run()
- :param kwargs: the arguments to pass on
- :return:
- """
- snaps_suite_builder.add_openstack_api_tests(
- suite=self.suite,
- os_creds=self.os_creds,
- ext_net_name=self.ext_net_name,
- use_keystone=self.use_keystone,
- image_metadata=self.image_metadata)
- return super(self.__class__, self).run()
diff --git a/functest/opnfv_tests/openstack/snaps/connection_check.py b/functest/opnfv_tests/openstack/snaps/connection_check.py
deleted file mode 100644
index 1fc49349e..000000000
--- a/functest/opnfv_tests/openstack/snaps/connection_check.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import unittest
-
-from functest.opnfv_tests.openstack.snaps import snaps_suite_builder
-from functest.opnfv_tests.openstack.snaps.snaps_test_runner import \
- SnapsTestRunner
-
-
-class ConnectionCheck(SnapsTestRunner):
- """
- This test executes the Python Tests included with the SNAPS libraries
- that simply obtain the different OpenStack clients and may perform
- simple queries
- """
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "connection_check"
- super(ConnectionCheck, self).__init__(**kwargs)
-
- self.suite = unittest.TestSuite()
-
- def run(self, **kwargs):
- """
- Builds the test suite then calls super.run()
- :param kwargs: the arguments to pass on
- :return:
- """
- snaps_suite_builder.add_openstack_client_tests(
- suite=self.suite,
- os_creds=self.os_creds,
- ext_net_name=self.ext_net_name,
- use_keystone=self.use_keystone)
- return super(self.__class__, self).run()
diff --git a/functest/opnfv_tests/openstack/snaps/health_check.py b/functest/opnfv_tests/openstack/snaps/health_check.py
deleted file mode 100644
index 837c2eae7..000000000
--- a/functest/opnfv_tests/openstack/snaps/health_check.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import unittest
-
-from functest.opnfv_tests.openstack.snaps.snaps_test_runner import (
- SnapsTestRunner)
-
-from snaps.openstack.tests.os_source_file_test import OSIntegrationTestCase
-from snaps.openstack.tests.create_instance_tests import SimpleHealthCheck
-
-
-class HealthCheck(SnapsTestRunner):
- """
- This test executes the SNAPS Python Test case SimpleHealthCheck which
- creates a VM with a single port with an IPv4 address that is assigned by
- DHCP. This test then validates the expected IP with the actual
- """
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "snaps_images_cirros"
- super(HealthCheck, self).__init__(**kwargs)
-
- self.suite = unittest.TestSuite()
-
- def run(self, **kwargs):
- """
- Builds the test suite then calls super.run()
- :param kwargs: the arguments to pass on
- :return:
- """
- self.suite.addTest(
- OSIntegrationTestCase.parameterize(
- SimpleHealthCheck, os_creds=self.os_creds,
- ext_net_name=self.ext_net_name,
- use_keystone=self.use_keystone,
- flavor_metadata=self.flavor_metadata,
- image_metadata=self.image_metadata,
- netconf_override=self.netconf_override))
- return super(self.__class__, self).run()
diff --git a/functest/opnfv_tests/openstack/snaps/smoke.py b/functest/opnfv_tests/openstack/snaps/smoke.py
deleted file mode 100644
index ded149d0c..000000000
--- a/functest/opnfv_tests/openstack/snaps/smoke.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import unittest
-
-from functest.opnfv_tests.openstack.snaps import snaps_suite_builder
-from functest.opnfv_tests.openstack.snaps.snaps_test_runner import (
- SnapsTestRunner)
-
-
-class SnapsSmoke(SnapsTestRunner):
- """
- This test executes the Python Tests included with the SNAPS libraries
- that exercise many of the OpenStack APIs within Keystone, Glance, Neutron,
- and Nova
- """
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = "snaps_smoke"
- super(SnapsSmoke, self).__init__(**kwargs)
-
- self.suite = unittest.TestSuite()
-
- def run(self, **kwargs):
- """
- Builds the test suite then calls super.run()
- :param kwargs: the arguments to pass on
- :return:
- """
- snaps_suite_builder.add_openstack_integration_tests(
- suite=self.suite,
- os_creds=self.os_creds,
- ext_net_name=self.ext_net_name,
- use_keystone=self.use_keystone,
- flavor_metadata=self.flavor_metadata,
- image_metadata=self.image_metadata,
- use_floating_ips=self.use_fip,
- netconf_override=self.netconf_override)
- return super(self.__class__, self).run()
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_suite_builder.py b/functest/opnfv_tests/openstack/snaps/snaps_suite_builder.py
deleted file mode 100644
index 3e7c0a39f..000000000
--- a/functest/opnfv_tests/openstack/snaps/snaps_suite_builder.py
+++ /dev/null
@@ -1,433 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-
-from snaps.openstack.tests.create_flavor_tests import (
- CreateFlavorTests)
-from snaps.openstack.tests.create_image_tests import (
- CreateImageSuccessTests, CreateImageNegativeTests,
- CreateMultiPartImageTests)
-from snaps.openstack.tests.create_instance_tests import (
- CreateInstanceSingleNetworkTests, CreateInstanceOnComputeHost,
- CreateInstanceSimpleTests, InstanceSecurityGroupTests,
- CreateInstancePortManipulationTests, SimpleHealthCheck,
- CreateInstanceFromThreePartImage, CreateInstanceTwoNetTests,
- CreateInstanceVolumeTests)
-from snaps.openstack.tests.create_keypairs_tests import (
- CreateKeypairsTests, CreateKeypairsCleanupTests)
-from snaps.openstack.tests.create_network_tests import (
- CreateNetworkSuccessTests)
-from snaps.openstack.tests.create_project_tests import (
- CreateProjectSuccessTests, CreateProjectUserTests)
-from snaps.openstack.tests.create_qos_tests import (
- CreateQoSTests)
-from snaps.openstack.tests.create_router_tests import (
- CreateRouterSuccessTests, CreateRouterNegativeTests)
-from snaps.openstack.tests.create_security_group_tests import (
- CreateSecurityGroupTests)
-from snaps.openstack.tests.create_stack_tests import (
- CreateStackSuccessTests, CreateStackNegativeTests,
- CreateStackFlavorTests, CreateStackFloatingIpTests,
- CreateStackKeypairTests, CreateStackVolumeTests,
- CreateStackSecurityGroupTests)
-from snaps.openstack.tests.create_user_tests import (
- CreateUserSuccessTests)
-from snaps.openstack.tests.create_volume_tests import (
- CreateSimpleVolumeSuccessTests,
- CreateVolumeWithTypeTests, CreateVolumeWithImageTests,
- CreateSimpleVolumeFailureTests)
-from snaps.openstack.tests.create_volume_type_tests import (
- CreateSimpleVolumeTypeSuccessTests,
- CreateVolumeTypeComplexTests)
-from snaps.openstack.tests.os_source_file_test import (
- OSComponentTestCase, OSIntegrationTestCase)
-from snaps.openstack.utils.tests.cinder_utils_tests import (
- CinderSmokeTests, CinderUtilsQoSTests, CinderUtilsSimpleVolumeTypeTests,
- CinderUtilsAddEncryptionTests, CinderUtilsVolumeTypeCompleteTests,
- CinderUtilsVolumeTests)
-from snaps.openstack.utils.tests.glance_utils_tests import (
- GlanceSmokeTests, GlanceUtilsTests)
-from snaps.openstack.utils.tests.heat_utils_tests import (
- HeatSmokeTests, HeatUtilsCreateSimpleStackTests,
- HeatUtilsCreateComplexStackTests, HeatUtilsFlavorTests,
- HeatUtilsKeypairTests, HeatUtilsSecurityGroupTests)
-from snaps.openstack.utils.tests.keystone_utils_tests import (
- KeystoneSmokeTests, KeystoneUtilsTests)
-from snaps.openstack.utils.tests.neutron_utils_tests import (
- NeutronSmokeTests, NeutronUtilsNetworkTests, NeutronUtilsSubnetTests,
- NeutronUtilsRouterTests, NeutronUtilsSecurityGroupTests,
- NeutronUtilsFloatingIpTests)
-from snaps.openstack.utils.tests.nova_utils_tests import (
- NovaSmokeTests, NovaUtilsKeypairTests, NovaUtilsFlavorTests,
- NovaUtilsInstanceTests, NovaUtilsInstanceVolumeTests)
-from snaps.provisioning.tests.ansible_utils_tests import (
- AnsibleProvisioningTests)
-
-
-def add_openstack_client_tests(suite, os_creds, ext_net_name,
- use_keystone=True, log_level=logging.INFO):
- """
- Adds tests written to exercise OpenStack client retrieval
- :param suite: the unittest.TestSuite object to which to add the tests
- :param os_creds: and instance of OSCreds that holds the credentials
- required by OpenStack
- :param ext_net_name: the name of an external network on the cloud under
- test
- :param use_keystone: when True, tests requiring direct access to Keystone
- are added as these need to be running on a host that
- has access to the cloud's private network
- :param log_level: the logging level
- :return: None as the tests will be adding to the 'suite' parameter object
- """
- # Basic connection tests
- suite.addTest(
- OSComponentTestCase.parameterize(
- GlanceSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
-
- if use_keystone:
- suite.addTest(
- OSComponentTestCase.parameterize(
- KeystoneSmokeTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level))
-
- suite.addTest(
- OSComponentTestCase.parameterize(
- NeutronSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(
- OSComponentTestCase.parameterize(
- NovaSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(
- OSComponentTestCase.parameterize(
- HeatSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(
- OSComponentTestCase.parameterize(
- CinderSmokeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
-
-
-def add_openstack_api_tests(suite, os_creds, ext_net_name, use_keystone=True,
- image_metadata=None, log_level=logging.INFO):
- """
- Adds tests written to exercise all existing OpenStack APIs
- :param suite: the unittest.TestSuite object to which to add the tests
- :param os_creds: Instance of OSCreds that holds the credentials
- required by OpenStack
- :param ext_net_name: the name of an external network on the cloud under
- test
- :param use_keystone: when True, tests requiring direct access to Keystone
- are added as these need to be running on a host that
- has access to the cloud's private network
- :param image_metadata: dict() object containing metadata for creating an
- image with custom config
- (see YAML files in examples/image-metadata)
- :param log_level: the logging level
- :return: None as the tests will be adding to the 'suite' parameter object
- """
- # Tests the OpenStack API calls
- if use_keystone:
- suite.addTest(OSComponentTestCase.parameterize(
- KeystoneUtilsTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- CreateUserSuccessTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- CreateProjectSuccessTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- CreateProjectUserTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level))
-
- suite.addTest(OSComponentTestCase.parameterize(
- GlanceUtilsTests, os_creds=os_creds, ext_net_name=ext_net_name,
- image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NeutronUtilsNetworkTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NeutronUtilsSubnetTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NeutronUtilsRouterTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NeutronUtilsSecurityGroupTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NeutronUtilsFloatingIpTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NovaUtilsKeypairTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NovaUtilsFlavorTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- NovaUtilsInstanceTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level, image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- NovaUtilsInstanceVolumeTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- CreateFlavorTests, os_creds=os_creds, ext_net_name=ext_net_name,
- log_level=log_level))
- suite.addTest(OSComponentTestCase.parameterize(
- HeatUtilsCreateSimpleStackTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- HeatUtilsCreateComplexStackTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- HeatUtilsFlavorTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- HeatUtilsKeypairTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- HeatUtilsSecurityGroupTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- CinderUtilsQoSTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- CinderUtilsVolumeTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- CinderUtilsSimpleVolumeTypeTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- CinderUtilsAddEncryptionTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
- suite.addTest(OSComponentTestCase.parameterize(
- CinderUtilsVolumeTypeCompleteTests, os_creds=os_creds,
- ext_net_name=ext_net_name, log_level=log_level,
- image_metadata=image_metadata))
-
-
-def add_openstack_integration_tests(suite, os_creds, ext_net_name,
- use_keystone=True, flavor_metadata=None,
- image_metadata=None, use_floating_ips=True,
- netconf_override=None,
- log_level=logging.INFO):
- """
- Adds tests written to exercise all long-running OpenStack integration tests
- meaning they will be creating VM instances and potentially performing some
- SSH functions through floatingIPs
- :param suite: the unittest.TestSuite object to which to add the tests
- :param os_creds: and instance of OSCreds that holds the credentials
- required by OpenStack
- :param ext_net_name: the name of an external network on the cloud under
- test
- :param use_keystone: when True, tests requiring direct access to Keystone
- are added as these need to be running on a host that
- has access to the cloud's private network
- :param image_metadata: dict() object containing metadata for creating an
- image with custom config
- (see YAML files in examples/image-metadata)
- :param flavor_metadata: dict() object containing the metadata required by
- your flavor based on your configuration:
- (i.e. {'hw:mem_page_size': 'large'})
- :param use_floating_ips: when true, all tests requiring Floating IPs will
- be added to the suite
- :param netconf_override: dict() containing the reconfigured network_type,
- physical_network and segmentation_id
- :param log_level: the logging level
- :return: None as the tests will be adding to the 'suite' parameter object
- """
- # Tests the OpenStack API calls via a creator. If use_keystone, objects
- # will be created with a custom user and project
-
- # Creator Object tests
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateSecurityGroupTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateImageSuccessTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateImageNegativeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateMultiPartImageTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateKeypairsTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateKeypairsCleanupTests, os_creds=os_creds,
- ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateNetworkSuccessTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateRouterSuccessTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateRouterNegativeTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateQoSTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateSimpleVolumeTypeSuccessTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateVolumeTypeComplexTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateSimpleVolumeSuccessTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateSimpleVolumeFailureTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateVolumeWithTypeTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateVolumeWithImageTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
-
- # VM Instances
- suite.addTest(OSIntegrationTestCase.parameterize(
- SimpleHealthCheck, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateInstanceTwoNetTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateInstanceSimpleTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- netconf_override=netconf_override, log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateInstancePortManipulationTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- netconf_override=netconf_override, log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- InstanceSecurityGroupTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- netconf_override=netconf_override, log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateInstanceOnComputeHost, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- netconf_override=netconf_override, log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateInstanceFromThreePartImage, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- netconf_override=netconf_override, log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateInstanceVolumeTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- netconf_override=netconf_override, log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateStackSuccessTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateStackVolumeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateStackFlavorTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateStackKeypairTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateStackSecurityGroupTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateStackNegativeTests, os_creds=os_creds, ext_net_name=ext_net_name,
- use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
-
- if use_floating_ips:
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateInstanceSingleNetworkTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- CreateStackFloatingIpTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
- suite.addTest(OSIntegrationTestCase.parameterize(
- AnsibleProvisioningTests, os_creds=os_creds,
- ext_net_name=ext_net_name, use_keystone=use_keystone,
- flavor_metadata=flavor_metadata, image_metadata=image_metadata,
- log_level=log_level))
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
deleted file mode 100644
index 6dc8288bf..000000000
--- a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-
-from functest.core import unit
-from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils.constants import CONST
-
-from snaps.openstack import create_flavor
-from snaps.openstack.tests import openstack_tests
-
-
-class SnapsTestRunner(unit.Suite):
- """
- This test executes the SNAPS Python Tests
- """
- def __init__(self, **kwargs):
- super(SnapsTestRunner, self).__init__(**kwargs)
- self.logger = logging.getLogger(__name__)
-
- if 'os_creds' in kwargs:
- self.os_creds = kwargs['os_creds']
- else:
- creds_override = None
- if hasattr(CONST, 'snaps_os_creds_override'):
- creds_override = CONST.__getattribute__(
- 'snaps_os_creds_override')
- self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'),
- proxy_settings_str=None, ssh_proxy_cmd=None,
- overrides=creds_override)
-
- if 'ext_net_name' in kwargs:
- self.ext_net_name = kwargs['ext_net_name']
- else:
- self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
-
- self.netconf_override = None
- if hasattr(CONST, 'snaps_network_config'):
- self.netconf_override = CONST.__getattribute__(
- 'snaps_network_config')
-
- self.use_fip = (
- CONST.__getattribute__('snaps_use_floating_ips') == 'True')
- self.use_keystone = (
- CONST.__getattribute__('snaps_use_keystone') == 'True')
- scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
-
- self.flavor_metadata = None
- if 'ovs' in scenario or 'fdio' in scenario:
- self.flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE
-
- self.logger.info("Using flavor metadata '%s'", self.flavor_metadata)
-
- self.image_metadata = None
- if hasattr(CONST, 'snaps_images'):
- self.image_metadata = CONST.__getattribute__('snaps_images')
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_utils.py b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
deleted file mode 100644
index 284e88b51..000000000
--- a/functest/opnfv_tests/openstack/snaps/snaps_utils.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-from functest.utils.constants import CONST
-
-from snaps.openstack.utils import neutron_utils, nova_utils
-
-
-def get_ext_net_name(os_creds):
- """
- Returns the configured external network name or
- the first retrieved external network name
- :param: os_creds: an instance of snaps OSCreds object
- :return:
- """
- neutron = neutron_utils.neutron_client(os_creds)
- ext_nets = neutron_utils.get_external_networks(neutron)
- if (hasattr(CONST, 'EXTERNAL_NETWORK')):
- extnet_config = CONST.__getattribute__('EXTERNAL_NETWORK')
- for ext_net in ext_nets:
- if ext_net.name == extnet_config:
- return extnet_config
- return ext_nets[0].name if ext_nets else ""
-
-
-def get_active_compute_cnt(os_creds):
- """
- Returns the number of active compute servers
- :param: os_creds: an instance of snaps OSCreds object
- :return: the number of active compute servers
- """
- nova = nova_utils.nova_client(os_creds)
- computes = nova_utils.get_availability_zone_hosts(nova, zone_name='nova')
- return len(computes)
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
deleted file mode 100644
index e61ab8138..000000000
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ /dev/null
@@ -1,352 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import ConfigParser
-import logging
-import fileinput
-import os
-import pkg_resources
-import shutil
-import subprocess
-
-import yaml
-
-from functest.utils.constants import CONST
-import functest.utils.functest_utils as ft_utils
-
-
-IMAGE_ID_ALT = None
-FLAVOR_ID_ALT = None
-RALLY_CONF_PATH = "/etc/rally/rally.conf"
-RALLY_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
- 'functest', 'ci/rally_aarch64_patch.conf')
-GLANCE_IMAGE_PATH = os.path.join(
- CONST.__getattribute__('dir_functest_images'),
- CONST.__getattribute__('openstack_image_file_name'))
-TEMPEST_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
- 'tempest')
-TEMPEST_CUSTOM = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
-TEMPEST_BLACKLIST = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt')
-TEMPEST_DEFCORE = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt')
-TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
-TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
-REFSTACK_RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'),
- 'refstack')
-TEMPEST_CONF_YAML = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
-TEST_ACCOUNTS_FILE = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/tempest/custom_tests/test_accounts.yaml')
-
-CI_INSTALLER_TYPE = CONST.__getattribute__('INSTALLER_TYPE')
-CI_INSTALLER_IP = CONST.__getattribute__('INSTALLER_IP')
-
-""" logging configuration """
-logger = logging.getLogger(__name__)
-
-
-def create_rally_deployment():
- # set the architecture to default
- pod_arch = os.getenv("POD_ARCH", None)
- arch_filter = ['aarch64']
-
- if pod_arch and pod_arch in arch_filter:
- logger.info("Apply aarch64 specific to rally config...")
- with open(RALLY_AARCH64_PATCH_PATH, "r") as f:
- rally_patch_conf = f.read()
-
- for line in fileinput.input(RALLY_CONF_PATH, inplace=1):
- print line,
- if "cirros|testvm" in line:
- print rally_patch_conf
-
- logger.info("Creating Rally environment...")
-
- cmd = "rally deployment destroy opnfv-rally"
- ft_utils.execute_command(cmd, error_msg=(
- "Deployment %s does not exist."
- % CONST.__getattribute__('rally_deployment_name')),
- verbose=False)
-
- cmd = ("rally deployment create --fromenv --name={0}"
- .format(CONST.__getattribute__('rally_deployment_name')))
- error_msg = "Problem while creating Rally deployment"
- ft_utils.execute_command_raise(cmd, error_msg=error_msg)
-
- cmd = "rally deployment check"
- error_msg = "OpenStack not responding or faulty Rally deployment."
- ft_utils.execute_command_raise(cmd, error_msg=error_msg)
-
-
-def create_verifier():
- logger.info("Create verifier from existing repo...")
- cmd = ("rally verify delete-verifier --id '{0}' --force").format(
- CONST.__getattribute__('tempest_verifier_name'))
- ft_utils.execute_command(cmd, error_msg=(
- "Verifier %s does not exist."
- % CONST.__getattribute__('tempest_verifier_name')),
- verbose=False)
- cmd = ("rally verify create-verifier --source {0} "
- "--name {1} --type tempest --system-wide"
- .format(CONST.__getattribute__('dir_repo_tempest'),
- CONST.__getattribute__('tempest_verifier_name')))
- ft_utils.execute_command_raise(cmd,
- error_msg='Problem while creating verifier')
-
-
-def get_verifier_id():
- """
- Returns verifier id for current Tempest
- """
- create_rally_deployment()
- create_verifier()
- cmd = ("rally verify list-verifiers | awk '/" +
- CONST.__getattribute__('tempest_verifier_name') +
- "/ {print $2}'")
- p = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- deployment_uuid = p.stdout.readline().rstrip()
- if deployment_uuid == "":
- logger.error("Tempest verifier not found.")
- raise Exception('Error with command:%s' % cmd)
- return deployment_uuid
-
-
-def get_verifier_deployment_id():
- """
- Returns deployment id for active Rally deployment
- """
- cmd = ("rally deployment list | awk '/" +
- CONST.__getattribute__('rally_deployment_name') +
- "/ {print $2}'")
- p = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- deployment_uuid = p.stdout.readline().rstrip()
- if deployment_uuid == "":
- logger.error("Rally deployment not found.")
- raise Exception('Error with command:%s' % cmd)
- return deployment_uuid
-
-
-def get_verifier_repo_dir(verifier_id):
- """
- Returns installed verifier repo directory for Tempest
- """
- if not verifier_id:
- verifier_id = get_verifier_id()
-
- return os.path.join(CONST.__getattribute__('dir_rally_inst'),
- 'verification',
- 'verifier-{}'.format(verifier_id),
- 'repo')
-
-
-def get_verifier_deployment_dir(verifier_id, deployment_id):
- """
- Returns Rally deployment directory for current verifier
- """
- if not verifier_id:
- verifier_id = get_verifier_id()
-
- if not deployment_id:
- deployment_id = get_verifier_deployment_id()
-
- return os.path.join(CONST.__getattribute__('dir_rally_inst'),
- 'verification',
- 'verifier-{}'.format(verifier_id),
- 'for-deployment-{}'.format(deployment_id))
-
-
-def backup_tempest_config(conf_file):
- """
- Copy config file to tempest results directory
- """
- if not os.path.exists(TEMPEST_RESULTS_DIR):
- os.makedirs(TEMPEST_RESULTS_DIR)
- shutil.copyfile(conf_file,
- os.path.join(TEMPEST_RESULTS_DIR, 'tempest.conf'))
-
-
-def configure_tempest(deployment_dir, image_id=None, flavor_id=None,
- compute_cnt=None):
- """
- Calls rally verify and updates the generated tempest.conf with
- given parameters
- """
- conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file, image_id, flavor_id,
- compute_cnt)
-
-
-def configure_tempest_defcore(deployment_dir, image_id, flavor_id,
- image_id_alt, flavor_id_alt, tenant_id):
- """
- Add/update needed parameters into tempest.conf file
- """
- conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file, image_id, flavor_id)
-
- logger.debug("Updating selected tempest.conf parameters for defcore...")
- config = ConfigParser.RawConfigParser()
- config.read(conf_file)
- config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir))
- config.set('oslo_concurrency', 'lock_path',
- '{}/lock_files'.format(deployment_dir))
- generate_test_accounts_file(tenant_id=tenant_id)
- config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE)
- config.set('scenario', 'img_dir', '{}'.format(deployment_dir))
- config.set('scenario', 'img_file', 'tempest-image')
- config.set('compute', 'image_ref', image_id)
- config.set('compute', 'image_ref_alt', image_id_alt)
- config.set('compute', 'flavor_ref', flavor_id)
- config.set('compute', 'flavor_ref_alt', flavor_id_alt)
-
- with open(conf_file, 'wb') as config_file:
- config.write(config_file)
-
- confpath = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- shutil.copyfile(conf_file, confpath)
-
-
-def generate_test_accounts_file(tenant_id):
- """
- Add needed tenant and user params into test_accounts.yaml
- """
-
- logger.debug("Add needed params into test_accounts.yaml...")
- accounts_list = [
- {
- 'tenant_name':
- CONST.__getattribute__('tempest_identity_tenant_name'),
- 'tenant_id': str(tenant_id),
- 'username': CONST.__getattribute__('tempest_identity_user_name'),
- 'password':
- CONST.__getattribute__('tempest_identity_user_password')
- }
- ]
-
- with open(TEST_ACCOUNTS_FILE, "w") as f:
- yaml.dump(accounts_list, f, default_flow_style=False)
-
-
-def configure_tempest_update_params(tempest_conf_file, image_id=None,
- flavor_id=None, compute_cnt=1):
- """
- Add/update needed parameters into tempest.conf file
- """
- logger.debug("Updating selected tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
- config.set(
- 'compute',
- 'fixed_network_name',
- CONST.__getattribute__('tempest_private_net_name'))
- config.set('compute', 'volume_device_name',
- CONST.__getattribute__('tempest_volume_device_name'))
-
- if image_id is not None:
- config.set('compute', 'image_ref', image_id)
- if IMAGE_ID_ALT is not None:
- config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
- if CONST.__getattribute__('tempest_use_custom_flavors'):
- if flavor_id is not None:
- config.set('compute', 'flavor_ref', flavor_id)
- if FLAVOR_ID_ALT is not None:
- config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
- if compute_cnt > 1:
- # enable multinode tests
- config.set('compute', 'min_compute_nodes', compute_cnt)
- config.set('compute-feature-enabled', 'live_migration', True)
-
- config.set('identity', 'region',
- CONST.__getattribute__('OS_REGION_NAME'))
- identity_api_version = os.getenv(
- "OS_IDENTITY_API_VERSION", os.getenv("IDENTITY_API_VERSION"))
- if (identity_api_version == '3'):
- auth_version = 'v3'
- else:
- auth_version = 'v2'
- config.set('identity', 'auth_version', auth_version)
- config.set(
- 'validation', 'ssh_timeout',
- CONST.__getattribute__('tempest_validation_ssh_timeout'))
- config.set('object-storage', 'operator_role',
- CONST.__getattribute__('tempest_object_storage_operator_role'))
-
- if CONST.__getattribute__('OS_ENDPOINT_TYPE') is not None:
- config.set('identity', 'v3_endpoint_type',
- CONST.__getattribute__('OS_ENDPOINT_TYPE'))
-
- if (identity_api_version == '3'):
- config.set('identity-feature-enabled', 'api_v2', False)
-
- if CONST.__getattribute__('OS_ENDPOINT_TYPE') is not None:
- sections = config.sections()
- services_list = ['compute',
- 'volume',
- 'image',
- 'network',
- 'data-processing',
- 'object-storage',
- 'orchestration']
- for service in services_list:
- if service not in sections:
- config.add_section(service)
- config.set(service, 'endpoint_type',
- CONST.__getattribute__('OS_ENDPOINT_TYPE'))
-
- logger.debug('Add/Update required params defined in tempest_conf.yaml '
- 'into tempest.conf file')
- with open(TEMPEST_CONF_YAML) as f:
- conf_yaml = yaml.safe_load(f)
- if conf_yaml:
- sections = config.sections()
- for section in conf_yaml:
- if section not in sections:
- config.add_section(section)
- sub_conf = conf_yaml.get(section)
- for key, value in sub_conf.items():
- config.set(section, key, value)
-
- with open(tempest_conf_file, 'wb') as config_file:
- config.write(config_file)
-
- backup_tempest_config(tempest_conf_file)
-
-
-def configure_verifier(deployment_dir):
- """
- Execute rally verify configure-verifier, which generates tempest.conf
- """
- tempest_conf_file = os.path.join(deployment_dir, "tempest.conf")
- if os.path.isfile(tempest_conf_file):
- logger.debug("Verifier is already configured.")
- logger.debug("Reconfiguring the current verifier...")
- cmd = "rally verify configure-verifier --reconfigure"
- else:
- logger.info("Configuring the verifier...")
- cmd = "rally verify configure-verifier"
- ft_utils.execute_command(cmd)
-
- logger.debug("Looking for tempest.conf file...")
- if not os.path.isfile(tempest_conf_file):
- logger.error("Tempest configuration file %s NOT found."
- % tempest_conf_file)
- raise Exception("Tempest configuration file %s NOT found."
- % tempest_conf_file)
- else:
- return tempest_conf_file
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
deleted file mode 100644
index bb1aed339..000000000
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-
--
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml
new file mode 100644
index 000000000..43a77fa3c
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml
@@ -0,0 +1,19 @@
+---
+-
+ scenarios:
+ - os-ovn-nofeature-ha
+ - os-ovn-nofeature-noha
+ tests:
+ - neutron_tempest_plugin.api.admin.test_dhcp_agent_scheduler
+ - neutron_tempest_plugin.api.admin.test_ports.PortTestCasesResourceRequest.test_port_resource_request
+ - neutron_tempest_plugin.api.admin.test_ports.PortTestCasesResourceRequest.test_port_resource_request_empty
+ - neutron_tempest_plugin.api.admin.test_ports.PortTestCasesResourceRequest.test_port_resource_request_inherited_policy
+ - neutron_tempest_plugin.api.admin.test_ports.PortTestCasesResourceRequest.test_port_resource_request_no_provider_net_conflict
+ - neutron_tempest_plugin.api.test_ports.PortsTestJSON.test_create_update_port_with_dns_name
+ - patrole_tempest_plugin.tests.api.network.test_availability_zones_rbac.AvailabilityZoneExtRbacTest.test_list_availability_zone_rbac
+ - patrole_tempest_plugin.tests.api.network.test_agents_rbac.DHCPAgentSchedulersRbacTest.test_add_dhcp_agent_to_network
+ - patrole_tempest_plugin.tests.api.network.test_agents_rbac.DHCPAgentSchedulersRbacTest.test_delete_network_from_dhcp_agent
+ - patrole_tempest_plugin.tests.api.network.test_agents_rbac.DHCPAgentSchedulersRbacTest.test_list_networks_hosted_by_one_dhcp_agent
+ - patrole_tempest_plugin.tests.api.network.test_networks_rbac.NetworksRbacTest.test_create_network_provider_network_type
+ - patrole_tempest_plugin.tests.api.network.test_networks_rbac.NetworksRbacTest.test_create_network_provider_segmentation_id
+ - tempest.api.network.admin.test_dhcp_agent_scheduler
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt
deleted file mode 100644
index fbbee2ffc..000000000
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt
+++ /dev/null
@@ -1,249 +0,0 @@
-# Set of DefCore tempest test cases not flagged and required. It only contains OpenStack core (no object storage)
-# The approved guidelines (2016.08) are valid for Kilo, Liberty, Mitaka and Newton releases of OpenStack
-# The list can be generated using the Rest API from RefStack project:
-# https://refstack.openstack.org/api/v1/guidelines/2017.01/tests?target=compute&type=required&alias=true&flag=false
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
-tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_active_server[id-925fdfb4-5b13-47ea-ac8a-c36ae6fddb05]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_active_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits[id-12c80a9f-2dec-480e-882b-98ba15757659]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_reboot_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107]
-tempest.api.compute.test_versions.TestVersions.test_list_api_versions[id-6c0a0990-43b6-4529-9b61-5fd8daf7c55c]
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513]
-tempest.api.identity.v3.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
-tempest.api.identity.v3.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
-tempest.api.identity.v3.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
-tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9]
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image[id-f848bb94-1c6e-45a4-8726-39e3a5b23535]
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image[id-f66891a7-a35c-41a8-b590-a065c2a1caa6]
-tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
-tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
-tempest.api.image.v2.test_images.ListImagesTest.test_index_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id[id-32248db1-ab88-4821-9604-c7c369f1f88c]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_non_existing_image[id-6fe40f1c-57bd-4918-89cc-8500f850f3de]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_delete_deleted_image[id-e57fc127-7ba0-4693-92d7-1d8a05ebcba9]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_image_null_id[id-ef45000d-0a72-4781-866d-4cb7bf2562ad]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_non_existent_image[id-668743d5-08ad-4480-b2b8-15da34f81d9f]
-tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image[id-10407036-6059-4f95-a2cd-cbbbee7ed329]
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_delete_non_existing_tag[id-39c023a2-325a-433a-9eea-649bf1414b19]
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_update_tags_for_non_existing_image[id-8cd30f82-6f9a-4c6e-8034-c1b51fba43d9]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
-tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-tempest.api.network.test_networks.NetworksTest.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
-tempest.api.network.test_networks.NetworksTest.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-tempest.api.network.test_networks.NetworksTest.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
-tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-tempest.api.network.test_networks.NetworksTest.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
-tempest.api.network.test_networks.NetworksTest.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-tempest.api.network.test_networks.NetworksTest.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
-tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
-tempest.api.network.test_networks.NetworksTestJSON.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-tempest.api.network.test_networks.NetworksTestJSON.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
-tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1]
-tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c]
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e]
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
-tempest.api.network.test_ports.PortsTestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f]
-tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
-tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes[id-58091b66-4ff4-4cc1-a549-05d60c7acd1a]
-tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes[id-edf6766d-3d40-4621-bc6e-2521a44c257d]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9]
-tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6]
-tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
-tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_crud_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_crud_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance[id-fff42874-7db5-4487-a8e1-ddda5fb5288d]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_get_volume_attachment[id-9516a2c8-9135-488c-8dd6-5677a7e5f371]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_attach_volumes_with_nonexistent_volume_id[id-f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_without_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_detach_volumes_with_invalid_volume_id[id-9f9c24e4-011d-46b5-b992-952140ce237a]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
-tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination[id-e9138a2c-f67b-4796-8efa-635c196d01de]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params[id-2a7064eb-b9c3-429b-b888-33928fc5edd3]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination[id-af55e775-8e4b-4feb-8719-215c43b0238c]
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml
new file mode 100644
index 000000000..e53b577b2
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml
@@ -0,0 +1,15 @@
+---
+-
+ scenarios:
+ - os-*
+ tests:
+ - neutron_tempest_plugin.api.admin.test_floating_ips_admin_actions.FloatingIPAdminTestJSON.test_associate_floating_ip_with_port_from_another_project
+ - neutron_tempest_plugin.api.admin.test_quotas.QuotasTest.test_detail_quotas
+ - neutron_tempest_plugin.api.admin.test_quotas.QuotasTest.test_quotas
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_floatingip_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_network_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_port_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_router_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_security_group_rule_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_security_group_when_quotas_is_full
+ - neutron_tempest_plugin.api.admin.test_quotas_negative.QuotasAdminNegativeTestJSON.test_create_subnet_when_quotas_is_full
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
index b47a9736a..0ee4ab613 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml
@@ -1,13 +1,104 @@
-# This is an empty configuration file to be filled up with the desired options
-# to generate a custom tempest.conf
-# Examples:
-# network-feature-enabled:
-# port_security: True
-
-# volume-feature-enabled:
-# api_v1: False
-
-# validation:
-# image_ssh_user: root
-# ssh_timeout: 300
-
+---
+compute:
+ min_microversion: '2.44'
+ max_microversion: latest
+compute-feature-enabled:
+ attach_encrypted_volume: false
+ block_migration_for_live_migration: false
+ block_migrate_cinder_iscsi: false
+ change_password: false
+ cold_migration: true
+ config_drive: true
+ console_output: true
+ disk_config: true
+ enable_instance_password: true
+ hostname_fqdn_sanitization: true
+ interface_attach: true
+ live_migration: true
+ live_migrate_back_and_forth: false
+ metadata_service: true
+ pause: true
+ personality: false
+ rdp_console: false
+ rescue: true
+ resize: true
+ scheduler_available_filters: "AvailabilityZoneFilter,ComputeFilter,\
+ ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,\
+ ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+ serial_console: false
+ shelve: true
+ snapshot: true
+ spice_console: false
+ suspend: true
+ swap_volume: false
+ vnc_console: true
+ volume_backed_live_migration: false
+ volume_multiattach: false
+identity:
+ auth_version: v3
+ user_unique_last_password_count: 2
+ user_lockout_duration: 10
+ user_lockout_failure_attempts: 2
+identity-feature-enabled:
+ trust: true
+ api_v2: false
+ api_v2_admin: false
+ security_compliance: true
+ federation: false
+ external_idp: false
+ project_tags: true
+ application_credentials: true
+ access_rules: true
+image-feature-enabled:
+ api_v2: true
+ api_v1: false
+ import_image: false
+network-feature-enabled:
+ port_admin_state_change: true
+ port_security: true
+placement:
+ max_microversion: latest
+validation:
+ image_ssh_user: cirros
+ ssh_timeout: 196
+ ip_version_for_ssh: 4
+ run_validation: true
+volume:
+ max_microversion: latest
+ storage_protocol: ceph
+ manage_volume_ref: source-name,volume-%s
+ manage_snapshot_ref: source-name,snapshot-%s
+volume-feature-enabled:
+ multi_backend: false
+ backup: true
+ snapshot: true
+ clone: true
+ manage_snapshot: true
+ manage_volume: true
+ extend_attached_volume: true
+ extend_attached_encrypted_volume: false
+ consistency_group: false
+ volume_revert: true
+load_balancer:
+ test_with_ipv6: false
+neutron_plugin_options:
+ agent_availability_zone: nova
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
+ provider_vlans: public,
+ create_shared_resources: true
+object-storage-feature-enabled:
+ discoverable_apis: "account_quotas,formpost,bulk_upload,bulk_delete,\
+ tempurl,crossdomain,container_quotas,staticweb,account_quotas,slo"
+ object_versioning: true
+ discoverability: true
+ tempurl_digest_hashlib: sha1
+heat_plugin:
+ skip_functional_test_list: EncryptionVolTypeTest
+ skip_scenario_test_list: "AodhAlarmTest,SoftwareConfigIntegrationTest,\
+ VolumeBackupRestoreIntegrationTest,CfnInitIntegrationTest,\
+ LoadBalancerTest"
+ auth_version: 3
+heat_features_enabled:
+ multi_cloud: false
+rbac:
+ enable_rbac: true
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml
new file mode 100644
index 000000000..6b09d8e5a
--- /dev/null
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/tempest_conf_ovn.yaml
@@ -0,0 +1,104 @@
+---
+compute:
+ min_microversion: '2.44'
+ max_microversion: latest
+compute-feature-enabled:
+ attach_encrypted_volume: false
+ block_migration_for_live_migration: false
+ block_migrate_cinder_iscsi: false
+ change_password: false
+ cold_migration: true
+ config_drive: true
+ console_output: true
+ disk_config: true
+ enable_instance_password: true
+ hostname_fqdn_sanitization: true
+ interface_attach: true
+ live_migration: true
+ live_migrate_back_and_forth: false
+ metadata_service: true
+ pause: true
+ personality: false
+ rdp_console: false
+ rescue: true
+ resize: true
+ scheduler_available_filters: "AvailabilityZoneFilter,ComputeFilter,\
+ ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,\
+ ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+ serial_console: false
+ shelve: true
+ snapshot: true
+ spice_console: false
+ suspend: true
+ swap_volume: false
+ vnc_console: true
+ volume_backed_live_migration: false
+ volume_multiattach: false
+identity:
+ auth_version: v3
+ user_unique_last_password_count: 2
+ user_lockout_duration: 10
+ user_lockout_failure_attempts: 2
+identity-feature-enabled:
+ trust: true
+ api_v2: false
+ api_v2_admin: false
+ security_compliance: true
+ federation: false
+ external_idp: false
+ project_tags: true
+ application_credentials: true
+ access_rules: true
+image-feature-enabled:
+ api_v2: true
+ api_v1: false
+ import_image: false
+network-feature-enabled:
+ port_admin_state_change: true
+ port_security: true
+placement:
+ max_microversion: latest
+validation:
+ image_ssh_user: cirros
+ ssh_timeout: 196
+ ip_version_for_ssh: 4
+ run_validation: true
+volume:
+ max_microversion: latest
+ storage_protocol: ceph
+ manage_volume_ref: source-name,volume-%s
+ manage_snapshot_ref: source-name,snapshot-%s
+volume-feature-enabled:
+ multi_backend: false
+ backup: true
+ snapshot: true
+ clone: true
+ manage_snapshot: true
+ manage_volume: true
+ extend_attached_volume: true
+ extend_attached_encrypted_volume: false
+ consistency_group: false
+ volume_revert: true
+load_balancer:
+ test_with_ipv6: false
+neutron_plugin_options:
+ agent_availability_zone: nova
+ available_type_drivers: flat,geneve,vlan,local
+ provider_vlans: public,
+ create_shared_resources: true
+object-storage-feature-enabled:
+ discoverable_apis: "account_quotas,formpost,bulk_upload,bulk_delete,\
+ tempurl,crossdomain,container_quotas,staticweb,account_quotas,slo"
+ object_versioning: true
+ discoverability: true
+ tempurl_digest_hashlib: sha1
+heat_plugin:
+ skip_functional_test_list: EncryptionVolTypeTest
+ skip_scenario_test_list: "AodhAlarmTest,SoftwareConfigIntegrationTest,\
+ VolumeBackupRestoreIntegrationTest,CfnInitIntegrationTest,\
+ LoadBalancerTest"
+ auth_version: 3
+heat_features_enabled:
+ multi_cloud: false
+rbac:
+ enable_rbac: true
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index 5481b13b1..7233ffd60 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -8,493 +8,767 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+"""Tempest testcases implementation."""
+
from __future__ import division
+import json
import logging
import os
import re
import shutil
import subprocess
import time
-import uuid
+import pkg_resources
+from six.moves import configparser
+from xtesting.core import testcase
import yaml
-from functest.core import testcase
-from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.utils.constants import CONST
-import functest.utils.functest_utils as ft_utils
+from functest.core import singlevm
+from functest.opnfv_tests.openstack.rally import rally
+from functest.utils import config
+from functest.utils import env
+from functest.utils import functest_utils
+
+LOGGER = logging.getLogger(__name__)
+
+
+class TempestCommon(singlevm.VmReady2):
+ # pylint: disable=too-many-instance-attributes,too-many-public-methods
+ """TempestCommon testcases implementation class."""
+
+ visibility = 'public'
+ filename_alt = '/home/opnfv/functest/images/cirros-0.6.1-x86_64-disk.img'
+ shared_network = True
+ tempest_conf_yaml = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
+ tempest_custom = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
+ tempest_blacklist = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/blacklist.yaml')
+ tempest_public_blacklist = pkg_resources.resource_filename(
+ 'functest',
+ 'opnfv_tests/openstack/tempest/custom_tests/public_blacklist.yaml')
-from snaps.config.flavor import FlavorConfig
-from snaps.config.network import NetworkConfig, SubnetConfig
-from snaps.config.project import ProjectConfig
-from snaps.config.user import UserConfig
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = 'tempest'
+ super().__init__(**kwargs)
+ assert self.orig_cloud
+ assert self.cloud
+ assert self.project
+ if self.orig_cloud.get_role("admin"):
+ self.role_name = "admin"
+ elif self.orig_cloud.get_role("Admin"):
+ self.role_name = "Admin"
+ else:
+ raise Exception("Cannot detect neither admin nor Admin")
+ self.orig_cloud.grant_role(
+ self.role_name, user=self.project.user.id,
+ project=self.project.project.id,
+ domain=self.project.domain.id)
+ self.orig_cloud.grant_role(
+ self.role_name, user=self.project.user.id,
+ domain=self.project.domain.id)
+ self.deployment_id = None
+ self.verifier_id = None
+ self.verifier_repo_dir = None
+ self.deployment_dir = None
+ self.verification_id = None
+ self.res_dir = os.path.join(
+ getattr(config.CONF, 'dir_results'), self.case_name)
+ self.raw_list = os.path.join(self.res_dir, 'test_raw_list.txt')
+ self.list = os.path.join(self.res_dir, 'test_list.txt')
+ self.conf_file = None
+ self.image_alt = None
+ self.flavor_alt = None
+ self.services = []
+ try:
+ self.services = kwargs['run']['args']['services']
+ except Exception: # pylint: disable=broad-except
+ pass
+ self.neutron_extensions = []
+ try:
+ self.neutron_extensions = kwargs['run']['args'][
+ 'neutron_extensions']
+ except Exception: # pylint: disable=broad-except
+ pass
+ self.deny_skipping = kwargs.get("deny_skipping", False)
+ self.tests_count = kwargs.get("tests_count", 0)
+
+ def check_services(self):
+ """Check the mandatory services."""
+ for service in self.services:
+ try:
+ self.cloud.search_services(service)[0]
+ except Exception: # pylint: disable=broad-except
+ self.is_skipped = True
+ break
-from snaps.openstack import create_flavor
-from snaps.openstack.create_flavor import OpenStackFlavor
-from snaps.openstack.tests import openstack_tests
-from snaps.openstack.utils import deploy_utils
+ def check_extensions(self):
+ """Check the mandatory network extensions."""
+ extensions = self.cloud.get_network_extensions()
+ for network_extension in self.neutron_extensions:
+ if network_extension not in extensions:
+ LOGGER.warning(
+ "Cannot find Neutron extension: %s", network_extension)
+ self.is_skipped = True
+ break
+ def check_requirements(self):
+ self.check_services()
+ self.check_extensions()
+ if self.is_skipped:
+ self.project.clean()
-""" logging configuration """
-logger = logging.getLogger(__name__)
+ @staticmethod
+ def read_file(filename):
+ """Read file and return content as a stripped list."""
+ with open(filename, encoding='utf-8') as src:
+ return [line.strip() for line in src.readlines()]
+ @staticmethod
+ def get_verifier_result(verif_id):
+ """Retrieve verification results."""
+ result = {
+ 'num_tests': 0,
+ 'num_success': 0,
+ 'num_failures': 0,
+ 'num_skipped': 0
+ }
+ cmd = ["rally", "verify", "show", "--uuid", verif_id]
+ LOGGER.info("Showing result for a verification: '%s'.", cmd)
+ with subprocess.Popen(
+ cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT) as proc:
+ for line in proc.stdout:
+ LOGGER.info(line.decode("utf-8").rstrip())
+ new_line = line.decode("utf-8").replace(' ', '').split('|')
+ if 'Tests' in new_line:
+ break
+ if 'Testscount' in new_line:
+ result['num_tests'] = int(new_line[2])
+ elif 'Success' in new_line:
+ result['num_success'] = int(new_line[2])
+ elif 'Skipped' in new_line:
+ result['num_skipped'] = int(new_line[2])
+ elif 'Failures' in new_line:
+ result['num_failures'] = int(new_line[2])
+ return result
-class TempestCommon(testcase.TestCase):
+ @staticmethod
+ def backup_tempest_config(conf_file, res_dir):
+ """
+ Copy config file to tempest results directory
+ """
+ if not os.path.exists(res_dir):
+ os.makedirs(res_dir)
+ shutil.copyfile(conf_file,
+ os.path.join(res_dir, 'tempest.conf'))
- def __init__(self, **kwargs):
- super(TempestCommon, self).__init__(**kwargs)
- self.resources = TempestResourcesManager(**kwargs)
- self.MODE = ""
- self.OPTION = ""
- self.VERIFIER_ID = conf_utils.get_verifier_id()
- self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
- self.VERIFIER_ID)
- self.DEPLOYMENT_ID = conf_utils.get_verifier_deployment_id()
- self.DEPLOYMENT_DIR = conf_utils.get_verifier_deployment_dir(
- self.VERIFIER_ID, self.DEPLOYMENT_ID)
- self.VERIFICATION_ID = None
+ @staticmethod
+ def create_verifier():
+ """Create new verifier"""
+ LOGGER.info("Create verifier from existing repo...")
+ cmd = ['rally', 'verify', 'delete-verifier',
+ '--id', str(getattr(config.CONF, 'tempest_verifier_name')),
+ '--force']
+ try:
+ output = subprocess.check_output(cmd)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ except subprocess.CalledProcessError:
+ pass
+
+ cmd = ['rally', 'verify', 'create-verifier',
+ '--source', str(getattr(config.CONF, 'dir_repo_tempest')),
+ '--name', str(getattr(config.CONF, 'tempest_verifier_name')),
+ '--type', 'tempest', '--system-wide']
+ output = subprocess.check_output(cmd)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+ return TempestCommon.get_verifier_id()
@staticmethod
- def read_file(filename):
- with open(filename) as src:
- return [line.strip() for line in src.readlines()]
+ def get_verifier_id():
+ """
+ Returns verifier id for current Tempest
+ """
+ cmd = ("rally verify list-verifiers | awk '/" +
+ getattr(config.CONF, 'tempest_verifier_name') +
+ "/ {print $2}'")
+ with subprocess.Popen(
+ cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.DEVNULL) as proc:
+ verifier_uuid = proc.stdout.readline().rstrip()
+ return verifier_uuid.decode("utf-8")
- def generate_test_list(self, verifier_repo_dir):
- logger.debug("Generating test case list...")
- if self.MODE == 'defcore':
- shutil.copyfile(
- conf_utils.TEMPEST_DEFCORE, conf_utils.TEMPEST_RAW_LIST)
- elif self.MODE == 'custom':
- if os.path.isfile(conf_utils.TEMPEST_CUSTOM):
+ @staticmethod
+ def get_verifier_repo_dir(verifier_id):
+ """
+ Returns installed verifier repo directory for Tempest
+ """
+ return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
+ 'verification',
+ f'verifier-{verifier_id}',
+ 'repo')
+
+ @staticmethod
+ def get_verifier_deployment_dir(verifier_id, deployment_id):
+ """
+ Returns Rally deployment directory for current verifier
+ """
+ return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
+ 'verification',
+ f'verifier-{verifier_id}',
+ f'for-deployment-{deployment_id}')
+
+ @staticmethod
+ def update_tempest_conf_file(conf_file, rconfig):
+ """Update defined paramters into tempest config file"""
+ with open(TempestCommon.tempest_conf_yaml, encoding='utf-8') as yfile:
+ conf_yaml = yaml.safe_load(yfile)
+ if conf_yaml:
+ sections = rconfig.sections()
+ for section in conf_yaml:
+ if section not in sections:
+ rconfig.add_section(section)
+ sub_conf = conf_yaml.get(section)
+ for key, value in sub_conf.items():
+ rconfig.set(section, key, value)
+
+ with open(conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ @staticmethod
+ def configure_tempest_update_params(
+ tempest_conf_file, image_id=None, flavor_id=None,
+ compute_cnt=1, image_alt_id=None, flavor_alt_id=None,
+ admin_role_name='admin', cidr='192.168.120.0/24',
+ domain_id='default'):
+ # pylint: disable=too-many-branches,too-many-arguments
+ # pylint: disable=too-many-statements,too-many-locals
+ """
+ Add/update needed parameters into tempest.conf file
+ """
+ LOGGER.debug("Updating selected tempest.conf parameters...")
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(tempest_conf_file)
+ rconfig.set(
+ 'compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))
+ if image_id is not None:
+ rconfig.set('compute', 'image_ref', image_id)
+ if image_alt_id is not None:
+ rconfig.set('compute', 'image_ref_alt', image_alt_id)
+ if flavor_id is not None:
+ rconfig.set('compute', 'flavor_ref', flavor_id)
+ if flavor_alt_id is not None:
+ rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)
+ if compute_cnt > 1:
+ # enable multinode tests
+ rconfig.set('compute', 'min_compute_nodes', compute_cnt)
+ rconfig.set('compute-feature-enabled', 'live_migration', True)
+ if os.environ.get('OS_REGION_NAME'):
+ rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
+ rconfig.set('identity', 'admin_role', admin_role_name)
+ rconfig.set('identity', 'default_domain_id', domain_id)
+ if not rconfig.has_section('network'):
+ rconfig.add_section('network')
+ rconfig.set('network', 'default_network', cidr)
+ rconfig.set('network', 'project_network_cidr', cidr)
+ rconfig.set('network', 'project_networks_reachable', False)
+ rconfig.set(
+ 'identity', 'v3_endpoint_type',
+ os.environ.get('OS_INTERFACE', 'public'))
+
+ sections = rconfig.sections()
+ services_list = [
+ 'compute', 'volume', 'image', 'network', 'data-processing',
+ 'object-storage', 'orchestration']
+ for service in services_list:
+ if service not in sections:
+ rconfig.add_section(service)
+ rconfig.set(service, 'endpoint_type',
+ os.environ.get('OS_INTERFACE', 'public'))
+
+ LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '
+ 'into tempest.conf file')
+ TempestCommon.update_tempest_conf_file(tempest_conf_file, rconfig)
+
+ @staticmethod
+ def configure_verifier(deployment_dir):
+ """
+ Execute rally verify configure-verifier, which generates tempest.conf
+ """
+ cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',
+ '--id', str(getattr(config.CONF, 'tempest_verifier_name'))]
+ output = subprocess.check_output(cmd)
+ LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
+
+ LOGGER.debug("Looking for tempest.conf file...")
+ tempest_conf_file = os.path.join(deployment_dir, "tempest.conf")
+ if not os.path.isfile(tempest_conf_file):
+ LOGGER.error("Tempest configuration file %s NOT found.",
+ tempest_conf_file)
+ return None
+ return tempest_conf_file
+
+ def generate_test_list(self, **kwargs):
+ """Generate test list based on the test mode."""
+ LOGGER.debug("Generating test case list...")
+ self.backup_tempest_config(self.conf_file, '/etc')
+ if kwargs.get('mode') == 'custom':
+ if os.path.isfile(self.tempest_custom):
shutil.copyfile(
- conf_utils.TEMPEST_CUSTOM, conf_utils.TEMPEST_RAW_LIST)
+ self.tempest_custom, self.list)
else:
- raise Exception("Tempest test list file %s NOT found."
- % conf_utils.TEMPEST_CUSTOM)
+ raise Exception(
+ f"Tempest test list file {self.tempest_custom} NOT found.")
else:
- if self.MODE == 'smoke':
- testr_mode = "smoke"
- elif self.MODE == 'full':
- testr_mode = ""
- else:
- testr_mode = 'tempest.api.' + self.MODE
- cmd = ("cd {0};"
- "testr list-tests {1} > {2};"
- "cd -;".format(verifier_repo_dir,
- testr_mode,
- conf_utils.TEMPEST_RAW_LIST))
- ft_utils.execute_command(cmd)
-
- def apply_tempest_blacklist(self):
- logger.debug("Applying tempest blacklist...")
- cases_file = self.read_file(conf_utils.TEMPEST_RAW_LIST)
- result_file = open(conf_utils.TEMPEST_LIST, 'w')
- black_tests = []
- try:
- installer_type = CONST.__getattribute__('INSTALLER_TYPE')
- deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
- if (bool(installer_type) * bool(deploy_scenario)):
- # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the
- # file
- black_list_file = open(conf_utils.TEMPEST_BLACKLIST)
- black_list_yaml = yaml.safe_load(black_list_file)
- black_list_file.close()
- for item in black_list_yaml:
- scenarios = item['scenarios']
- installers = item['installers']
- if (deploy_scenario in scenarios and
- installer_type in installers):
- tests = item['tests']
- for test in tests:
- black_tests.append(test)
- break
- except Exception:
+ testr_mode = kwargs.get(
+ 'mode', r'^tempest\.(api|scenario).*\[.*\bsmoke\b.*\]$')
+ cmd = (f"(cd {self.verifier_repo_dir}; "
+ f"stestr list '{testr_mode}' > {self.list} 2>/dev/null)")
+ output = subprocess.check_output(cmd, shell=True)
+ LOGGER.info("%s\n%s", cmd, output.decode("utf-8"))
+ os.remove('/etc/tempest.conf')
+
+ def apply_tempest_blacklist(self, black_list):
+ """Exclude blacklisted test cases."""
+ LOGGER.debug("Applying tempest blacklist...")
+ if os.path.exists(self.raw_list):
+ os.remove(self.raw_list)
+ os.rename(self.list, self.raw_list)
+ cases_file = self.read_file(self.raw_list)
+ with open(self.list, 'w', encoding='utf-8') as result_file:
black_tests = []
- logger.debug("Tempest blacklist file does not exist.")
-
- for cases_line in cases_file:
- for black_tests_line in black_tests:
- if black_tests_line in cases_line:
- break
- else:
- result_file.write(str(cases_line) + '\n')
- result_file.close()
-
- def run_verifier_tests(self):
- self.OPTION += (" --load-list {} --detailed"
- .format(conf_utils.TEMPEST_LIST))
-
- cmd_line = "rally verify start " + self.OPTION
- logger.info("Starting Tempest test suite: '%s'." % cmd_line)
-
- header = ("Tempest environment:\n"
- " SUT: %s\n Scenario: %s\n Node: %s\n Date: %s\n" %
- (CONST.__getattribute__('INSTALLER_TYPE'),
- CONST.__getattribute__('DEPLOY_SCENARIO'),
- CONST.__getattribute__('NODE_NAME'),
- time.strftime("%a %b %d %H:%M:%S %Z %Y")))
-
- f_stdout = open(
- os.path.join(conf_utils.TEMPEST_RESULTS_DIR, "tempest.log"), 'w+')
- f_stderr = open(
- os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
- "tempest-error.log"), 'w+')
- f_env = open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
- "environment.log"), 'w+')
- f_env.write(header)
-
- p = subprocess.Popen(
- cmd_line, shell=True,
- stdout=subprocess.PIPE,
- stderr=f_stderr,
- bufsize=1)
-
- with p.stdout:
- for line in iter(p.stdout.readline, b''):
- if re.search("\} tempest\.", line):
- logger.info(line.replace('\n', ''))
- elif re.search('Starting verification', line):
- logger.info(line.replace('\n', ''))
- first_pos = line.index("UUID=") + len("UUID=")
- last_pos = line.index(") for deployment")
- self.VERIFICATION_ID = line[first_pos:last_pos]
- logger.debug('Verification UUID: %s', self.VERIFICATION_ID)
- f_stdout.write(line)
- p.wait()
-
- f_stdout.close()
- f_stderr.close()
- f_env.close()
-
- def parse_verifier_result(self):
- if self.VERIFICATION_ID is None:
+ try:
+ deploy_scenario = env.get('DEPLOY_SCENARIO')
+ if bool(deploy_scenario):
+ # if DEPLOY_SCENARIO is set we read the file
+ with open(black_list, encoding='utf-8') as black_list_file:
+ black_list_yaml = yaml.safe_load(black_list_file)
+ black_list_file.close()
+ for item in black_list_yaml:
+ scenarios = item['scenarios']
+ in_it = rally.RallyBase.in_iterable_re
+ if in_it(deploy_scenario, scenarios):
+ tests = item['tests']
+ black_tests.extend(tests)
+ except Exception: # pylint: disable=broad-except
+ black_tests = []
+ LOGGER.debug("Tempest blacklist file does not exist.")
+
+ for cases_line in cases_file:
+ for black_tests_line in black_tests:
+ if re.search(black_tests_line, cases_line):
+ break
+ else:
+ result_file.write(str(cases_line) + '\n')
+
+ def run_verifier_tests(self, **kwargs):
+ """Execute tempest test cases."""
+ cmd = ["rally", "verify", "start", "--load-list",
+ self.list]
+ cmd.extend(kwargs.get('option', []))
+ LOGGER.info("Starting Tempest test suite: '%s'.", cmd)
+
+ with open(
+ os.path.join(self.res_dir, "tempest.log"), 'w+',
+ encoding='utf-8') as f_stdout:
+ with subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ bufsize=1) as proc:
+ with proc.stdout:
+ for line in iter(proc.stdout.readline, b''):
+ if re.search(r"\} tempest\.", line.decode("utf-8")):
+ LOGGER.info(line.rstrip())
+ elif re.search(r'(?=\(UUID=(.*)\))',
+ line.decode("utf-8")):
+ self.verification_id = re.search(
+ r'(?=\(UUID=(.*)\))',
+ line.decode("utf-8")).group(1)
+ f_stdout.write(line.decode("utf-8"))
+ proc.wait()
+
+ if self.verification_id is None:
raise Exception('Verification UUID not found')
+ LOGGER.info('Verification UUID: %s', self.verification_id)
- cmd_line = "rally verify show --uuid {}".format(self.VERIFICATION_ID)
- logger.info("Showing result for a verification: '%s'." % cmd_line)
- p = subprocess.Popen(cmd_line,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- for line in p.stdout:
- new_line = line.replace(' ', '').split('|')
- if 'Tests' in new_line:
- break
-
- logger.info(line)
- if 'Testscount' in new_line:
- num_tests = new_line[2]
- elif 'Success' in new_line:
- num_success = new_line[2]
- elif 'Skipped' in new_line:
- num_skipped = new_line[2]
- elif 'Failures' in new_line:
- num_failures = new_line[2]
+ shutil.copy(
+ f"{self.deployment_dir}/tempest.log",
+ f"{self.res_dir}/tempest.debug.log")
+ def parse_verifier_result(self):
+ """Parse and save test results."""
+ stat = self.get_verifier_result(self.verification_id)
try:
- num_executed = int(num_tests) - int(num_skipped)
+ num_executed = stat['num_tests'] - stat['num_skipped']
try:
- self.result = 100 * int(num_success) / int(num_executed)
+ self.result = 100 * stat['num_success'] / num_executed
except ZeroDivisionError:
self.result = 0
- if int(num_tests) > 0:
- logger.info("All tests have been skipped")
+ if stat['num_tests'] > 0:
+ LOGGER.info("All tests have been skipped")
else:
- logger.error("No test has been executed")
+ LOGGER.error("No test has been executed")
return
- with open(os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
- "tempest.log"), 'r') as logfile:
+ with open(os.path.join(self.res_dir, "rally.log"),
+ 'r', encoding='utf-8') as logfile:
output = logfile.read()
success_testcases = []
- for match in re.findall('.*\{0\} (.*?)[. ]*success ', output):
+ for match in re.findall(r'.*\{\d{1,2}\} (.*?) \.{3} success ',
+ output):
success_testcases.append(match)
failed_testcases = []
- for match in re.findall('.*\{0\} (.*?)[. ]*fail ', output):
+ for match in re.findall(r'.*\{\d{1,2}\} (.*?) \.{3} fail',
+ output):
failed_testcases.append(match)
skipped_testcases = []
- for match in re.findall('.*\{0\} (.*?)[. ]*skip:', output):
+ for match in re.findall(r'.*\{\d{1,2}\} (.*?) \.{3} skip(?::| )',
+ output):
skipped_testcases.append(match)
- self.details = {"tests": int(num_tests),
- "failures": int(num_failures),
+ self.details = {"tests_number": stat['num_tests'],
+ "success_number": stat['num_success'],
+ "skipped_number": stat['num_skipped'],
+ "failures_number": stat['num_failures'],
"success": success_testcases,
- "errors": failed_testcases,
- "skipped": skipped_testcases}
- except Exception:
+ "skipped": skipped_testcases,
+ "failures": failed_testcases}
+ except Exception: # pylint: disable=broad-except
self.result = 0
- logger.info("Tempest %s success_rate is %s%%"
- % (self.case_name, self.result))
+ LOGGER.info("Tempest %s success_rate is %s%%",
+ self.case_name, self.result)
+
+ def update_rally_regex(self, rally_conf='/etc/rally/rally.conf'):
+ """Set image name as tempest img_name_regex"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(rally_conf)
+ if not rconfig.has_section('openstack'):
+ rconfig.add_section('openstack')
+ rconfig.set('openstack', 'img_name_regex', f'^{self.image.name}$')
+ with open(rally_conf, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ def update_default_role(self, rally_conf='/etc/rally/rally.conf'):
+ """Detect and update the default role if required"""
+ role = self.get_default_role(self.cloud)
+ if not role:
+ return
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(rally_conf)
+ if not rconfig.has_section('openstack'):
+ rconfig.add_section('openstack')
+ rconfig.set('openstack', 'swift_operator_role', role.name)
+ with open(rally_conf, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
- def run(self):
+ @staticmethod
+ def clean_rally_conf(rally_conf='/etc/rally/rally.conf'):
+ """Clean Rally config"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(rally_conf)
+ if rconfig.has_option('openstack', 'img_name_regex'):
+ rconfig.remove_option('openstack', 'img_name_regex')
+ if rconfig.has_option('openstack', 'swift_operator_role'):
+ rconfig.remove_option('openstack', 'swift_operator_role')
+ with open(rally_conf, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ def update_auth_section(self):
+ """Update auth section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section("auth"):
+ rconfig.add_section("auth")
+ if env.get("NEW_USER_ROLE").lower() != "member":
+ tempest_roles = []
+ if rconfig.has_option("auth", "tempest_roles"):
+ tempest_roles = functest_utils.convert_ini_to_list(
+ rconfig.get("auth", "tempest_roles"))
+ rconfig.set(
+ 'auth', 'tempest_roles',
+ functest_utils.convert_list_to_ini(
+ [env.get("NEW_USER_ROLE")] + tempest_roles))
+ if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
+ rconfig.set('auth', 'use_dynamic_credentials', False)
+ account_file = os.path.join(
+ getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
+ assert os.path.exists(
+ account_file), f"{account_file} doesn't exist"
+ rconfig.set('auth', 'test_accounts_file', account_file)
+ if env.get('NO_TENANT_NETWORK').lower() == 'true':
+ rconfig.set('auth', 'create_isolated_networks', False)
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ def update_network_section(self):
+ """Update network section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if self.ext_net:
+ if not rconfig.has_section('network'):
+ rconfig.add_section('network')
+ rconfig.set('network', 'public_network_id', self.ext_net.id)
+ rconfig.set('network', 'floating_network_name', self.ext_net.name)
+ rconfig.set('network-feature-enabled', 'floating_ips', True)
+ else:
+ if not rconfig.has_section('network-feature-enabled'):
+ rconfig.add_section('network-feature-enabled')
+ rconfig.set('network-feature-enabled', 'floating_ips', False)
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ def update_compute_section(self):
+ """Update compute section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('compute'):
+ rconfig.add_section('compute')
+ rconfig.set(
+ 'compute', 'fixed_network_name',
+ self.network.name if self.network else env.get("EXTERNAL_NETWORK"))
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ def update_validation_section(self):
+ """Update validation section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('validation'):
+ rconfig.add_section('validation')
+ rconfig.set(
+ 'validation', 'connect_method',
+ 'floating' if self.ext_net else 'fixed')
+ rconfig.set(
+ 'validation', 'network_for_ssh',
+ self.network.name if self.network else env.get("EXTERNAL_NETWORK"))
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ def update_scenario_section(self):
+ """Update scenario section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ filename = getattr(
+ config.CONF, f'{self.case_name}_image', self.filename)
+ if not rconfig.has_section('scenario'):
+ rconfig.add_section('scenario')
+ rconfig.set('scenario', 'img_file', filename)
+ rconfig.set('scenario', 'img_disk_format', getattr(
+ config.CONF, f'{self.case_name}_image_format',
+ self.image_format))
+ extra_properties = self.extra_properties.copy()
+ if env.get('IMAGE_PROPERTIES'):
+ extra_properties.update(
+ functest_utils.convert_ini_to_dict(
+ env.get('IMAGE_PROPERTIES')))
+ extra_properties.update(
+ getattr(config.CONF, f'{self.case_name}_extra_properties', {}))
+ rconfig.set(
+ 'scenario', 'img_properties',
+ functest_utils.convert_dict_to_ini(extra_properties))
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+
+ def update_dashboard_section(self):
+ """Update dashboard section in tempest.conf"""
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if env.get('DASHBOARD_URL'):
+ if not rconfig.has_section('dashboard'):
+ rconfig.add_section('dashboard')
+ rconfig.set('dashboard', 'dashboard_url', env.get('DASHBOARD_URL'))
+ else:
+ rconfig.set('service_available', 'horizon', False)
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+ def configure(self, **kwargs): # pylint: disable=unused-argument
+ """
+ Create all openstack resources for tempest-based testcases and write
+ tempest.conf.
+ """
+ if not os.path.exists(self.res_dir):
+ os.makedirs(self.res_dir)
+ self.deployment_id = rally.RallyBase.create_rally_deployment(
+ environ=self.project.get_environ())
+ if not self.deployment_id:
+ raise Exception("Deployment create failed")
+ self.verifier_id = self.create_verifier()
+ if not self.verifier_id:
+ raise Exception("Verifier create failed")
+ self.verifier_repo_dir = self.get_verifier_repo_dir(
+ self.verifier_id)
+ self.deployment_dir = self.get_verifier_deployment_dir(
+ self.verifier_id, self.deployment_id)
+
+ compute_cnt = self.count_hypervisors() if self.count_hypervisors(
+ ) <= 10 else 10
+ self.image_alt = self.publish_image_alt()
+ self.flavor_alt = self.create_flavor_alt()
+ LOGGER.debug("flavor: %s", self.flavor_alt)
+
+ self.conf_file = self.configure_verifier(self.deployment_dir)
+ if not self.conf_file:
+ raise Exception("Tempest verifier configuring failed")
+ self.configure_tempest_update_params(
+ self.conf_file,
+ image_id=self.image.id,
+ flavor_id=self.flavor.id,
+ compute_cnt=compute_cnt,
+ image_alt_id=self.image_alt.id,
+ flavor_alt_id=self.flavor_alt.id,
+ admin_role_name=self.role_name, cidr=self.cidr,
+ domain_id=self.project.domain.id)
+ self.update_auth_section()
+ self.update_network_section()
+ self.update_compute_section()
+ self.update_validation_section()
+ self.update_scenario_section()
+ self.update_dashboard_section()
+ self.backup_tempest_config(self.conf_file, self.res_dir)
+
+ def run(self, **kwargs):
self.start_time = time.time()
try:
- if not os.path.exists(conf_utils.TEMPEST_RESULTS_DIR):
- os.makedirs(conf_utils.TEMPEST_RESULTS_DIR)
- resources = self.resources.create()
- compute_cnt = snaps_utils.get_active_compute_cnt(
- self.resources.os_creds)
- conf_utils.configure_tempest(
- self.DEPLOYMENT_DIR,
- image_id=resources.get("image_id"),
- flavor_id=resources.get("flavor_id"),
- compute_cnt=compute_cnt)
- self.generate_test_list(self.VERIFIER_REPO_DIR)
- self.apply_tempest_blacklist()
- self.run_verifier_tests()
+ assert super().run(
+ **kwargs) == testcase.TestCase.EX_OK
+ if not os.path.exists(self.res_dir):
+ os.makedirs(self.res_dir)
+ self.update_rally_regex()
+ self.update_default_role()
+ rally.RallyBase.update_rally_logs(self.res_dir)
+ shutil.copy("/etc/rally/rally.conf", self.res_dir)
+ self.configure(**kwargs)
+ self.generate_test_list(**kwargs)
+ self.apply_tempest_blacklist(TempestCommon.tempest_blacklist)
+ if env.get('PUBLIC_ENDPOINT_ONLY').lower() == 'true':
+ self.apply_tempest_blacklist(
+ TempestCommon.tempest_public_blacklist)
+ self.run_verifier_tests(**kwargs)
self.parse_verifier_result()
+ rally.RallyBase.verify_report(
+ os.path.join(self.res_dir, "tempest-report.html"),
+ self.verification_id)
+ rally.RallyBase.verify_report(
+ os.path.join(self.res_dir, "tempest-report.xml"),
+ self.verification_id, "junit-xml")
res = testcase.TestCase.EX_OK
- except Exception as e:
- logger.error('Error with run: %s' % e)
+ except Exception: # pylint: disable=broad-except
+ LOGGER.exception('Error with run')
+ self.result = 0
res = testcase.TestCase.EX_RUN_ERROR
- finally:
- self.resources.cleanup()
-
self.stop_time = time.time()
return res
-
-class TempestSmokeSerial(TempestCommon):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = 'tempest_smoke_serial'
- TempestCommon.__init__(self, **kwargs)
- self.MODE = "smoke"
- self.OPTION = "--concurrency 1"
-
-
-class TempestSmokeParallel(TempestCommon):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = 'tempest_smoke_parallel'
- TempestCommon.__init__(self, **kwargs)
- self.MODE = "smoke"
- self.OPTION = ""
-
-
-class TempestFullParallel(TempestCommon):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = 'tempest_full_parallel'
- TempestCommon.__init__(self, **kwargs)
- self.MODE = "full"
-
-
-class TempestCustom(TempestCommon):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = 'tempest_custom'
- TempestCommon.__init__(self, **kwargs)
- self.MODE = "custom"
- self.OPTION = "--concurrency 1"
-
-
-class TempestDefcore(TempestCommon):
-
- def __init__(self, **kwargs):
- if "case_name" not in kwargs:
- kwargs["case_name"] = 'tempest_defcore'
- TempestCommon.__init__(self, **kwargs)
- self.MODE = "defcore"
- self.OPTION = "--concurrency 1"
-
-
-class TempestResourcesManager(object):
+ def clean(self):
+ """
+ Cleanup all OpenStack objects. Should be called on completion.
+ """
+ self.clean_rally_conf()
+ rally.RallyBase.clean_rally_logs()
+ if self.image_alt:
+ self.cloud.delete_image(self.image_alt)
+ if self.flavor_alt:
+ self.orig_cloud.delete_flavor(self.flavor_alt.id)
+ super().clean()
+
+ def is_successful(self):
+ """The overall result of the test."""
+ skips = self.details.get("skipped_number", 0)
+ if skips > 0 and self.deny_skipping:
+ return testcase.TestCase.EX_TESTCASE_FAILED
+ if self.tests_count and (
+ self.details.get("tests_number", 0) != self.tests_count):
+ return testcase.TestCase.EX_TESTCASE_FAILED
+ return super().is_successful()
+
+
+class TempestHeat(TempestCommon):
+ """Tempest Heat testcase implementation class."""
+
+ filename_alt = ('/home/opnfv/functest/images/'
+ 'Fedora-Cloud-Base-30-1.2.x86_64.qcow2')
+ flavor_alt_ram = 512
+ flavor_alt_vcpus = 1
+ flavor_alt_disk = 4
def __init__(self, **kwargs):
- self.os_creds = None
- if 'os_creds' in kwargs:
- self.os_creds = kwargs['os_creds']
+ super().__init__(**kwargs)
+ self.user2 = self.orig_cloud.create_user(
+ name=f'{self.case_name}-user2_{self.project.guid}',
+ password=self.project.password,
+ domain_id=self.project.domain.id)
+ self.orig_cloud.grant_role(
+ self.role_name, user=self.user2.id,
+ project=self.project.project.id, domain=self.project.domain.id)
+ if not self.orig_cloud.get_role("heat_stack_owner"):
+ self.role = self.orig_cloud.create_role("heat_stack_owner")
+ self.orig_cloud.grant_role(
+ "heat_stack_owner", user=self.user2.id,
+ project=self.project.project.id,
+ domain=self.project.domain.id)
+
+ def configure(self, **kwargs):
+ assert self.user2
+ super().configure(**kwargs)
+ rconfig = configparser.RawConfigParser()
+ rconfig.read(self.conf_file)
+ if not rconfig.has_section('heat_plugin'):
+ rconfig.add_section('heat_plugin')
+ # It fails if region and domain ids are unset
+ rconfig.set(
+ 'heat_plugin', 'region',
+ os.environ.get('OS_REGION_NAME', 'RegionOne'))
+ rconfig.set('heat_plugin', 'auth_url', os.environ["OS_AUTH_URL"])
+ rconfig.set('heat_plugin', 'project_domain_id', self.project.domain.id)
+ rconfig.set('heat_plugin', 'user_domain_id', self.project.domain.id)
+ rconfig.set(
+ 'heat_plugin', 'project_domain_name', self.project.domain.name)
+ rconfig.set(
+ 'heat_plugin', 'user_domain_name', self.project.domain.name)
+ rconfig.set('heat_plugin', 'username', self.user2.name)
+ rconfig.set('heat_plugin', 'password', self.project.password)
+ rconfig.set('heat_plugin', 'project_name', self.project.project.name)
+ rconfig.set('heat_plugin', 'admin_username', self.project.user.name)
+ rconfig.set('heat_plugin', 'admin_password', self.project.password)
+ rconfig.set(
+ 'heat_plugin', 'admin_project_name', self.project.project.name)
+ rconfig.set('heat_plugin', 'image_ref', self.image_alt.id)
+ rconfig.set('heat_plugin', 'instance_type', self.flavor_alt.id)
+ rconfig.set('heat_plugin', 'minimal_image_ref', self.image.id)
+ rconfig.set('heat_plugin', 'minimal_instance_type', self.flavor.id)
+ if self.ext_net:
+ rconfig.set(
+ 'heat_plugin', 'floating_network_name', self.ext_net.name)
+ if self.network:
+ rconfig.set('heat_plugin', 'fixed_network_name', self.network.name)
+ rconfig.set('heat_plugin', 'fixed_subnet_name', self.subnet.name)
+ rconfig.set('heat_plugin', 'network_for_ssh', self.network.name)
else:
- self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'))
-
- self.guid = '-' + str(uuid.uuid4())
-
- self.creators = list()
-
- if hasattr(CONST, 'snaps_images_cirros'):
- self.cirros_image_config = CONST.__getattribute__(
- 'snaps_images_cirros')
- else:
- self.cirros_image_config = None
-
- def create(self, use_custom_images=False, use_custom_flavors=False,
- create_project=False):
- if create_project:
- logger.debug("Creating project (tenant) for Tempest suite")
- project_name = CONST.__getattribute__(
- 'tempest_identity_tenant_name') + self.guid
- project_creator = deploy_utils.create_project(
- self.os_creds, ProjectConfig(
- name=project_name,
- description=CONST.__getattribute__(
- 'tempest_identity_tenant_description')))
- if (project_creator is None or
- project_creator.get_project() is None):
- raise Exception("Failed to create tenant")
- project_id = project_creator.get_project().id
- self.creators.append(project_creator)
-
- logger.debug("Creating user for Tempest suite")
- user_creator = deploy_utils.create_user(
- self.os_creds, UserConfig(
- name=CONST.__getattribute__(
- 'tempest_identity_user_name') + self.guid,
- password=CONST.__getattribute__(
- 'tempest_identity_user_password'),
- project_name=project_name))
- if user_creator is None or user_creator.get_user() is None:
- raise Exception("Failed to create user")
- user_id = user_creator.get_user().id
- self.creators.append(user_creator)
- else:
- project_name = None
- project_id = None
- user_id = None
-
- logger.debug("Creating private network for Tempest suite")
-
- tempest_network_type = None
- tempest_physical_network = None
- tempest_segmentation_id = None
-
- if hasattr(CONST, 'tempest_network_type'):
- tempest_network_type = CONST.__getattribute__(
- 'tempest_network_type')
- if hasattr(CONST, 'tempest_physical_network'):
- tempest_physical_network = CONST.__getattribute__(
- 'tempest_physical_network')
- if hasattr(CONST, 'tempest_segmentation_id'):
- tempest_segmentation_id = CONST.__getattribute__(
- 'tempest_segmentation_id')
-
- network_creator = deploy_utils.create_network(
- self.os_creds, NetworkConfig(
- name=CONST.__getattribute__(
- 'tempest_private_net_name') + self.guid,
- project_name=project_name,
- network_type=tempest_network_type,
- physical_network=tempest_physical_network,
- segmentation_id=tempest_segmentation_id,
- subnet_settings=[SubnetConfig(
- name=CONST.__getattribute__(
- 'tempest_private_subnet_name') + self.guid,
- cidr=CONST.__getattribute__('tempest_private_subnet_cidr'))
- ]))
- if network_creator is None or network_creator.get_network() is None:
- raise Exception("Failed to create private network")
- self.creators.append(network_creator)
-
- image_id = None
- image_id_alt = None
- flavor_id = None
- flavor_id_alt = None
-
- logger.debug("Creating image for Tempest suite")
- image_base_name = CONST.__getattribute__(
- 'openstack_image_name') + self.guid
- os_image_settings = openstack_tests.cirros_image_settings(
- image_base_name, public=True,
- image_metadata=self.cirros_image_config)
- logger.debug("Creating image for Tempest suite")
- image_creator = deploy_utils.create_image(
- self.os_creds, os_image_settings)
- if image_creator is None:
- raise Exception('Failed to create image')
- self.creators.append(image_creator)
- image_id = image_creator.get_image().id
-
- if use_custom_images:
- logger.debug("Creating 2nd image for Tempest suite")
- image_base_name_alt = CONST.__getattribute__(
- 'openstack_image_name_alt') + self.guid
- os_image_settings_alt = openstack_tests.cirros_image_settings(
- image_base_name_alt, public=True,
- image_metadata=self.cirros_image_config)
- logger.debug("Creating 2nd image for Tempest suite")
- image_creator_alt = deploy_utils.create_image(
- self.os_creds, os_image_settings_alt)
- if image_creator_alt is None:
- raise Exception('Failed to create image')
- self.creators.append(image_creator_alt)
- image_id_alt = image_creator_alt.get_image().id
-
- if (CONST.__getattribute__('tempest_use_custom_flavors') == 'True' or
- use_custom_flavors):
- logger.info("Creating flavor for Tempest suite")
- scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
- flavor_metadata = None
- if 'ovs' in scenario or 'fdio' in scenario:
- flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE
- flavor_creator = OpenStackFlavor(
- self.os_creds, FlavorConfig(
- name=CONST.__getattribute__(
- 'openstack_flavor_name') + self.guid,
- ram=CONST.__getattribute__('openstack_flavor_ram'),
- disk=CONST.__getattribute__('openstack_flavor_disk'),
- vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
- metadata=flavor_metadata))
- flavor = flavor_creator.create()
- if flavor is None:
- raise Exception('Failed to create flavor')
- self.creators.append(flavor_creator)
- flavor_id = flavor.id
-
- if use_custom_flavors:
- logger.info("Creating 2nd flavor for Tempest suite")
- scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
- flavor_metadata_alt = None
- if 'ovs' in scenario or 'fdio' in scenario:
- flavor_metadata_alt = create_flavor.MEM_PAGE_SIZE_LARGE
- CONST.__setattr__('openstack_flavor_ram', 1024)
- flavor_creator_alt = OpenStackFlavor(
- self.os_creds, FlavorConfig(
- name=CONST.__getattribute__(
- 'openstack_flavor_name_alt') + self.guid,
- ram=CONST.__getattribute__('openstack_flavor_ram'),
- disk=CONST.__getattribute__('openstack_flavor_disk'),
- vcpus=CONST.__getattribute__('openstack_flavor_vcpus'),
- metadata=flavor_metadata_alt))
- flavor_alt = flavor_creator_alt.create()
- if flavor_alt is None:
- raise Exception('Failed to create flavor')
- self.creators.append(flavor_creator_alt)
- flavor_id_alt = flavor_alt.id
-
- print("RESOURCES CREATE: image_id: %s, image_id_alt: %s, "
- "flavor_id: %s, flavor_id_alt: %s" % (
- image_id, image_id_alt, flavor_id, flavor_id_alt,))
-
- result = {
- 'image_id': image_id,
- 'image_id_alt': image_id_alt,
- 'flavor_id': flavor_id,
- 'flavor_id_alt': flavor_id_alt
- }
-
- if create_project:
- result['project_id'] = project_id
- result['tenant_id'] = project_id # for compatibility
- result['user_id'] = user_id
-
- return result
-
- def cleanup(self):
+ LOGGER.warning(
+ 'No tenant network created. '
+ 'Trying EXTERNAL_NETWORK as a fallback')
+ rconfig.set(
+ 'heat_plugin', 'fixed_network_name',
+ env.get("EXTERNAL_NETWORK"))
+ rconfig.set(
+ 'heat_plugin', 'network_for_ssh', env.get("EXTERNAL_NETWORK"))
+ with open(self.conf_file, 'w', encoding='utf-8') as config_file:
+ rconfig.write(config_file)
+ self.backup_tempest_config(self.conf_file, self.res_dir)
+
+ def clean(self):
"""
Cleanup all OpenStack objects. Should be called on completion.
"""
- for creator in reversed(self.creators):
- try:
- creator.clean()
- except Exception as e:
- logger.error('Unexpected error cleaning - %s', e)
+ super().clean()
+ if self.user2:
+ self.orig_cloud.delete_user(self.user2.id)
diff --git a/functest/opnfv_tests/openstack/vmtp/__init__.py b/functest/opnfv_tests/openstack/vmtp/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/functest/opnfv_tests/openstack/vmtp/__init__.py
diff --git a/functest/opnfv_tests/openstack/vmtp/vmtp.py b/functest/opnfv_tests/openstack/vmtp/vmtp.py
new file mode 100644
index 000000000..9833cc72a
--- /dev/null
+++ b/functest/opnfv_tests/openstack/vmtp/vmtp.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+"""
+VMTP_ is a small python application that will automatically perform ping
+connectivity, round trip time measurement (latency) and TCP/UDP throughput
+measurement for the following East/West flows on any OpenStack deployment:
+
+- VM to VM same network (private fixed IP, flow #1)
+- VM to VM different network using fixed IP (same as intra-tenant L3 fixed IP,
+ flow #2)
+- VM to VM different network using floating IP and NAT (same as floating IP
+ inter-tenant L3, flow #3)
+
+.. _VMTP: http://vmtp.readthedocs.io/en/latest/
+"""
+
+import json
+import logging
+import os
+import subprocess
+import tempfile
+import time
+import yaml
+
+from xtesting.core import testcase
+
+from functest.core import singlevm
+from functest.utils import env
+from functest.utils import functest_utils
+
+
+class Vmtp(singlevm.VmReady2):
+ """Class to run Vmtp_ as an OPNFV Functest testcase
+
+ .. _Vmtp: http://vmtp.readthedocs.io/en/latest/
+ """
+ # pylint: disable=too-many-instance-attributes
+
+ __logger = logging.getLogger(__name__)
+
+ filename = ('/home/opnfv/functest/images/'
+ 'ubuntu-14.04-server-cloudimg-amd64-disk1.img')
+ flavor_ram = 2048
+ flavor_vcpus = 1
+ flavor_disk = 0
+ create_server_timeout = 300
+ ssh_retry_timeout = 240
+
+ def __init__(self, **kwargs):
+ if "case_name" not in kwargs:
+ kwargs["case_name"] = 'vmtp'
+ super().__init__(**kwargs)
+ self.config = f"{self.res_dir}/vmtp.conf"
+ (_, self.privkey_filename) = tempfile.mkstemp()
+ (_, self.pubkey_filename) = tempfile.mkstemp()
+
+ def check_requirements(self):
+ if self.count_hypervisors() < 2:
+ self.__logger.warning("Vmtp requires at least 2 hypervisors")
+ self.is_skipped = True
+ self.project.clean()
+
+ def create_network_resources(self):
+ """Create router
+
+ It creates a router which gateway is the external network detected.
+
+ Raises: expection on error
+ """
+ assert self.cloud
+ assert self.ext_net
+ self.router = self.cloud.create_router(
+ name=f'{self.case_name}-router_{self.guid}',
+ ext_gateway_net_id=self.ext_net.id)
+ self.__logger.debug("router: %s", self.router)
+
+ def generate_keys(self):
+ """Generate Keys
+
+ Raises: Exception on error
+ """
+ assert self.cloud
+ name = f"vmtp_{self.guid}"
+ self.__logger.info("Creating keypair with name: '%s'", name)
+ keypair = self.cloud.create_keypair(name)
+ self.__logger.debug("keypair: %s", keypair)
+ with open(self.privkey_filename, 'w', encoding='utf-8') as key_file:
+ key_file.write(keypair.private_key)
+ with open(self.pubkey_filename, 'w', encoding='utf-8') as key_file:
+ key_file.write(keypair.public_key)
+ self.cloud.delete_keypair(keypair.id)
+
+ def write_config(self):
+ """Write vmtp.conf
+
+ Raises: Exception on error
+ """
+ assert self.cloud
+ if not os.path.exists(self.res_dir):
+ os.makedirs(self.res_dir)
+ cmd = ['vmtp', '-sc']
+ output = subprocess.check_output(cmd).decode("utf-8")
+ self.__logger.info("%s\n%s", " ".join(cmd), output)
+ with open(self.config, "w+", encoding='utf-8') as conf:
+ vmtp_conf = yaml.full_load(output)
+ vmtp_conf["private_key_file"] = self.privkey_filename
+ vmtp_conf["public_key_file"] = self.pubkey_filename
+ vmtp_conf["image_name"] = str(self.image.name)
+ vmtp_conf["router_name"] = str(self.router.name)
+ vmtp_conf["flavor_type"] = str(self.flavor.name)
+ vmtp_conf["internal_network_name"] = [
+ f"pns-internal-net_{self.guid}",
+ f"pns-internal-net2_{self.guid}"]
+ vmtp_conf["vm_name_client"] = f"TestClient_{self.guid}"
+ vmtp_conf["vm_name_server"] = f"TestServer_{self.guid}"
+ vmtp_conf["security_group_name"] = f"pns-security{self.guid}"
+ vmtp_conf["dns_nameservers"] = [env.get('NAMESERVER')]
+ vmtp_conf["generic_retry_count"] = self.create_server_timeout // 2
+ vmtp_conf["ssh_retry_count"] = self.ssh_retry_timeout // 2
+ conf.write(yaml.dump(vmtp_conf))
+
+ def run_vmtp(self):
+ # pylint: disable=unexpected-keyword-arg
+ """Run Vmtp and generate charts
+
+ Raises: Exception on error
+ """
+ assert self.cloud
+ new_env = dict(
+ os.environ,
+ OS_USERNAME=self.project.user.name,
+ OS_PROJECT_NAME=self.project.project.name,
+ OS_PROJECT_ID=self.project.project.id,
+ OS_PROJECT_DOMAIN_NAME=self.project.domain.name,
+ OS_USER_DOMAIN_NAME=self.project.domain.name,
+ OS_PASSWORD=self.project.password)
+ if not new_env["OS_AUTH_URL"].endswith(('v3', 'v3/')):
+ new_env["OS_AUTH_URL"] = f'{new_env["OS_AUTH_URL"]}/v3'
+ try:
+ del new_env['OS_TENANT_NAME']
+ del new_env['OS_TENANT_ID']
+ except Exception: # pylint: disable=broad-except
+ pass
+ cmd = ['vmtp', '-d', '--json', f'{self.res_dir}/vmtp.json',
+ '-c', self.config]
+ if env.get("VMTP_HYPERVISORS"):
+ hypervisors = functest_utils.convert_ini_to_list(
+ env.get("VMTP_HYPERVISORS"))
+ for hypervisor in hypervisors:
+ cmd.extend(["--hypervisor", hypervisor])
+ self.__logger.debug("cmd: %s", cmd)
+ output = subprocess.check_output(
+ cmd, stderr=subprocess.STDOUT, env=new_env).decode("utf-8")
+ self.__logger.info("%s\n%s", " ".join(cmd), output)
+ cmd = ['vmtp_genchart', '-c', f'{self.res_dir}/vmtp.html',
+ f'{self.res_dir}/vmtp.json']
+ output = subprocess.check_output(
+ cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ self.__logger.info("%s\n%s", " ".join(cmd), output)
+ with open(f'{self.res_dir}/vmtp.json', 'r',
+ encoding='utf-8') as res_file:
+ self.details = json.load(res_file)
+
+ def run(self, **kwargs):
+ self.start_time = time.time()
+ status = testcase.TestCase.EX_RUN_ERROR
+ try:
+ assert self.cloud
+ assert super().run(**kwargs) == self.EX_OK
+ status = testcase.TestCase.EX_RUN_ERROR
+ if self.orig_cloud.get_role("admin"):
+ role_name = "admin"
+ elif self.orig_cloud.get_role("Admin"):
+ role_name = "Admin"
+ else:
+ raise Exception("Cannot detect neither admin nor Admin")
+ self.orig_cloud.grant_role(
+ role_name, user=self.project.user.id,
+ project=self.project.project.id,
+ domain=self.project.domain.id)
+ self.generate_keys()
+ self.write_config()
+ self.run_vmtp()
+ self.result = 100
+ status = testcase.TestCase.EX_OK
+ except subprocess.CalledProcessError as cpe:
+ self.__logger.error(
+ "Exception when calling %s\n%s", cpe.cmd,
+ cpe.output.decode("utf-8"))
+ self.result = 0
+ except Exception: # pylint: disable=broad-except
+ self.__logger.exception("Cannot run vmtp")
+ self.result = 0
+ self.stop_time = time.time()
+ return status
+
+ def clean(self):
+ try:
+ assert self.cloud
+ super().clean()
+ os.remove(self.privkey_filename)
+ os.remove(self.pubkey_filename)
+ self.cloud.delete_network(f"pns-internal-net_{self.guid}")
+ self.cloud.delete_network(f"pns-internal-net2_{self.guid}")
+ except Exception: # pylint: disable=broad-except
+ pass
diff --git a/functest/opnfv_tests/openstack/vping/ping.sh b/functest/opnfv_tests/openstack/vping/ping.sh
deleted file mode 100644
index 15f5e84e1..000000000
--- a/functest/opnfv_tests/openstack/vping/ping.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-
-ping -c 1 $1 2>&1 >/dev/null
-RES=$?
-if [ "Z$RES" = "Z0" ] ; then
- echo 'vPing OK'
-else
- echo 'vPing KO'
-fi
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
deleted file mode 100644
index df9774ece..000000000
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
-#
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""Define the parent class of vping_ssh and vping_userdata testcases."""
-
-from datetime import datetime
-import logging
-import time
-import uuid
-
-from functest.core import testcase
-from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils.constants import CONST
-
-from snaps.config.flavor import FlavorConfig
-from snaps.config.network import NetworkConfig, SubnetConfig
-from snaps.config.router import RouterConfig
-from snaps.openstack import create_flavor
-from snaps.openstack.create_flavor import OpenStackFlavor
-from snaps.openstack.tests import openstack_tests
-from snaps.openstack.utils import deploy_utils
-
-
-class VPingBase(testcase.TestCase):
-
- """
- Base class for vPing tests that check connectivity between two VMs shared
- internal network.
- This class is responsible for creating the image, internal network.
- """
- # pylint: disable=too-many-instance-attributes
-
- def __init__(self, **kwargs):
- super(VPingBase, self).__init__(**kwargs)
-
- self.logger = logging.getLogger(__name__)
-
- if 'os_creds' in kwargs:
- self.os_creds = kwargs['os_creds']
- else:
- creds_override = None
- if hasattr(CONST, 'snaps_os_creds_override'):
- creds_override = CONST.__getattribute__(
- 'snaps_os_creds_override')
-
- self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'),
- overrides=creds_override)
-
- self.creators = list()
- self.image_creator = None
- self.network_creator = None
- self.vm1_creator = None
- self.vm2_creator = None
- self.router_creator = None
-
- # Shared metadata
- self.guid = '-' + str(uuid.uuid4())
-
- self.router_name = CONST.__getattribute__(
- 'vping_router_name') + self.guid
- self.vm1_name = CONST.__getattribute__('vping_vm_name_1') + self.guid
- self.vm2_name = CONST.__getattribute__('vping_vm_name_2') + self.guid
-
- self.vm_boot_timeout = CONST.__getattribute__('vping_vm_boot_timeout')
- self.vm_delete_timeout = CONST.__getattribute__(
- 'vping_vm_delete_timeout')
- self.vm_ssh_connect_timeout = CONST.__getattribute__(
- 'vping_vm_ssh_connect_timeout')
- self.ping_timeout = CONST.__getattribute__('vping_ping_timeout')
- self.flavor_name = 'vping-flavor' + self.guid
-
- # Move this configuration option up for all tests to leverage
- if hasattr(CONST, 'snaps_images_cirros'):
- self.cirros_image_config = CONST.__getattribute__(
- 'snaps_images_cirros')
- else:
- self.cirros_image_config = None
-
- def run(self):
- """
- Begins the test execution which should originate from the subclass
- """
- self.logger.info('Begin virtual environment setup')
-
- self.start_time = time.time()
- self.logger.info(
- "vPing Start Time:'%s'",
- datetime.fromtimestamp(self.start_time).strftime(
- '%Y-%m-%d %H:%M:%S'))
-
- image_base_name = '{}-{}'.format(
- CONST.__getattribute__('vping_image_name'),
- str(self.guid))
- os_image_settings = openstack_tests.cirros_image_settings(
- image_base_name, image_metadata=self.cirros_image_config)
- self.logger.info("Creating image with name: '%s'", image_base_name)
-
- self.image_creator = deploy_utils.create_image(
- self.os_creds, os_image_settings)
- self.creators.append(self.image_creator)
-
- private_net_name = CONST.__getattribute__(
- 'vping_private_net_name') + self.guid
- private_subnet_name = CONST.__getattribute__(
- 'vping_private_subnet_name') + self.guid
- private_subnet_cidr = CONST.__getattribute__(
- 'vping_private_subnet_cidr')
-
- vping_network_type = None
- vping_physical_network = None
- vping_segmentation_id = None
-
- if hasattr(CONST, 'vping_network_type'):
- vping_network_type = CONST.__getattribute__(
- 'vping_network_type')
- if hasattr(CONST, 'vping_physical_network'):
- vping_physical_network = CONST.__getattribute__(
- 'vping_physical_network')
- if hasattr(CONST, 'vping_segmentation_id'):
- vping_segmentation_id = CONST.__getattribute__(
- 'vping_segmentation_id')
-
- self.logger.info(
- "Creating network with name: '%s'", private_net_name)
- self.network_creator = deploy_utils.create_network(
- self.os_creds,
- NetworkConfig(
- name=private_net_name,
- network_type=vping_network_type,
- physical_network=vping_physical_network,
- segmentation_id=vping_segmentation_id,
- subnet_settings=[SubnetConfig(
- name=private_subnet_name,
- cidr=private_subnet_cidr)]))
- self.creators.append(self.network_creator)
-
- # Creating router to external network
- log = "Creating router with name: '%s'" % self.router_name
- self.logger.info(log)
- ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
- self.router_creator = deploy_utils.create_router(
- self.os_creds,
- RouterConfig(
- name=self.router_name,
- external_gateway=ext_net_name,
- internal_subnets=[private_subnet_name]))
- self.creators.append(self.router_creator)
-
- self.logger.info(
- "Creating flavor with name: '%s'", self.flavor_name)
- scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
- flavor_metadata = None
- flavor_ram = 512
- if 'ovs' in scenario or 'fdio' in scenario:
- flavor_metadata = create_flavor.MEM_PAGE_SIZE_LARGE
- flavor_ram = 1024
- flavor_creator = OpenStackFlavor(
- self.os_creds,
- FlavorConfig(name=self.flavor_name, ram=flavor_ram, disk=1,
- vcpus=1, metadata=flavor_metadata))
- flavor_creator.create()
- self.creators.append(flavor_creator)
-
- def _execute(self):
- """
- Method called by subclasses after environment has been setup
- :return: the exit code
- """
- self.logger.info('Begin test execution')
-
- test_ip = self.vm1_creator.get_port_ip(
- self.vm1_creator.instance_settings.port_settings[0].name)
-
- if self.vm1_creator.vm_active(
- block=True) and self.vm2_creator.vm_active(block=True):
- result = self._do_vping(self.vm2_creator, test_ip)
- else:
- raise Exception('VMs never became active')
-
- self.stop_time = time.time()
-
- if result != testcase.TestCase.EX_OK:
- self.result = 0
- return testcase.TestCase.EX_RUN_ERROR
-
- self.result = 100
- return testcase.TestCase.EX_OK
-
- def _cleanup(self):
- """
- Cleanup all OpenStack objects. Should be called on completion
- :return:
- """
- if CONST.__getattribute__('vping_cleanup_objects') == 'True':
- for creator in reversed(self.creators):
- try:
- creator.clean()
- except Exception as error: # pylint: disable=broad-except
- self.logger.error('Unexpected error cleaning - %s', error)
-
- def _do_vping(self, vm_creator, test_ip):
- """
- Method to be implemented by subclasses
- Begins the real test after the OpenStack environment has been setup
- :param vm_creator: the SNAPS VM instance creator object
- :param test_ip: the IP to which the VM needs to issue the ping
- :return: T/F
- """
- raise NotImplementedError('vping execution is not implemented')
diff --git a/functest/opnfv_tests/openstack/vping/vping_ssh.py b/functest/opnfv_tests/openstack/vping/vping_ssh.py
index 7df767edc..ad64348c4 100644
--- a/functest/opnfv_tests/openstack/vping/vping_ssh.py
+++ b/functest/opnfv_tests/openstack/vping/vping_ssh.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-#
+
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -7,32 +7,15 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
-
"""vPingSSH testcase."""
-# This 1st import is here simply for pep8 as the 'os' package import appears
-# to be required for mock and the unit tests will fail without it
-import os # noqa # pylint: disable=unused-import
-import time
-
-from scp import SCPClient
-import pkg_resources
-
-from functest.core.testcase import TestCase
-from functest.energy import energy
-from functest.opnfv_tests.openstack.vping import vping_base
-from functest.utils.constants import CONST
-
-from snaps.config.keypair import KeypairConfig
-from snaps.config.network import PortConfig
-from snaps.config.security_group import (
- Direction, Protocol, SecurityGroupConfig, SecurityGroupRuleConfig)
-from snaps.config.vm_inst import FloatingIpConfig, VmInstanceConfig
+import logging
-from snaps.openstack.utils import deploy_utils
+from functest.core import singlevm
+from functest.utils import config
-class VPingSSH(vping_base.VPingBase):
+class VPingSSH(singlevm.SingleVm2):
"""
VPingSSH testcase implementation.
@@ -40,200 +23,40 @@ class VPingSSH(vping_base.VPingBase):
to issue the ping command to the second
"""
+ __logger = logging.getLogger(__name__)
+
def __init__(self, **kwargs):
"""Initialize testcase."""
if "case_name" not in kwargs:
kwargs["case_name"] = "vping_ssh"
- super(VPingSSH, self).__init__(**kwargs)
-
- self.kp_name = CONST.__getattribute__('vping_keypair_name') + self.guid
- self.kp_priv_file = CONST.__getattribute__('vping_keypair_priv_file')
- self.kp_pub_file = CONST.__getattribute__('vping_keypair_pub_file')
- self.sg_name = CONST.__getattribute__('vping_sg_name') + self.guid
- self.sg_desc = CONST.__getattribute__('vping_sg_desc')
-
- @energy.enable_recording
- def run(self):
- """
- Excecute VPingSSH testcase.
-
- Sets up the OpenStack keypair, router, security group, and VM instance
- objects then validates the ping.
- :return: the exit code from the super.execute() method
- """
- try:
- super(VPingSSH, self).run()
-
- log = "Creating keypair with name: '%s'" % self.kp_name
- self.logger.info(log)
- kp_creator = deploy_utils.create_keypair(
- self.os_creds,
- KeypairConfig(
- name=self.kp_name, private_filepath=self.kp_priv_file,
- public_filepath=self.kp_pub_file))
- self.creators.append(kp_creator)
-
- # Creating Instance 1
- port1_settings = PortConfig(
- name=self.vm1_name + '-vPingPort',
- network_name=self.network_creator.network_settings.name)
- instance1_settings = VmInstanceConfig(
- name=self.vm1_name, flavor=self.flavor_name,
- vm_boot_timeout=self.vm_boot_timeout,
- vm_delete_timeout=self.vm_delete_timeout,
- ssh_connect_timeout=self.vm_ssh_connect_timeout,
- port_settings=[port1_settings])
-
- log = ("Creating VM 1 instance with name: '%s'"
- % instance1_settings.name)
- self.logger.info(log)
- self.vm1_creator = deploy_utils.create_vm_instance(
- self.os_creds,
- instance1_settings,
- self.image_creator.image_settings,
- keypair_creator=kp_creator)
- self.creators.append(self.vm1_creator)
-
- # Creating Instance 2
- sg_creator = self.__create_security_group()
- self.creators.append(sg_creator)
-
- port2_settings = PortConfig(
- name=self.vm2_name + '-vPingPort',
- network_name=self.network_creator.network_settings.name)
- instance2_settings = VmInstanceConfig(
- name=self.vm2_name, flavor=self.flavor_name,
- vm_boot_timeout=self.vm_boot_timeout,
- vm_delete_timeout=self.vm_delete_timeout,
- ssh_connect_timeout=self.vm_ssh_connect_timeout,
- port_settings=[port2_settings],
- security_group_names=[sg_creator.sec_grp_settings.name],
- floating_ip_settings=[FloatingIpConfig(
- name=self.vm2_name + '-FIPName',
- port_name=port2_settings.name,
- router_name=self.router_creator.router_settings.name)])
-
- log = ("Creating VM 2 instance with name: '%s'"
- % instance2_settings.name)
- self.logger.info(log)
- self.vm2_creator = deploy_utils.create_vm_instance(
- self.os_creds,
- instance2_settings,
- self.image_creator.image_settings,
- keypair_creator=kp_creator)
- self.creators.append(self.vm2_creator)
-
- return self._execute()
- except Exception as exc: # pylint: disable=broad-except
- self.logger.error('Unexpected error running test - ' + exc.message)
- return TestCase.EX_RUN_ERROR
- finally:
- self._cleanup()
-
- def _do_vping(self, vm_creator, test_ip):
- """
- Execute ping command.
-
- Override from super
- """
- if vm_creator.vm_ssh_active(block=True):
- ssh = vm_creator.ssh_client()
- if not self._transfer_ping_script(ssh):
- return TestCase.EX_RUN_ERROR
- return self._do_vping_ssh(ssh, test_ip)
- else:
- return TestCase.EX_RUN_ERROR
-
- def _transfer_ping_script(self, ssh):
- """
- Transfert vping script to VM.
-
- Uses SCP to copy the ping script via the SSH client
- :param ssh: the SSH client
- :return:
- """
- self.logger.info("Trying to transfer ping.sh")
- scp = SCPClient(ssh.get_transport())
- ping_script = pkg_resources.resource_filename(
- 'functest.opnfv_tests.openstack.vping', 'ping.sh')
- try:
- scp.put(ping_script, "~/")
- except Exception: # pylint: disable=broad-except
- self.logger.error("Cannot SCP the file '%s'", ping_script)
- return False
-
- cmd = 'chmod 755 ~/ping.sh'
- # pylint: disable=unused-variable
- (stdin, stdout, stderr) = ssh.exec_command(cmd)
- for line in stdout.readlines():
- print line
-
- return True
-
- def _do_vping_ssh(self, ssh, test_ip):
- """
- Execute ping command via SSH.
-
- Pings the test_ip via the SSH client
- :param ssh: the SSH client used to issue the ping command
- :param test_ip: the IP for the ping command to use
- :return: exit_code (int)
- """
- exit_code = TestCase.EX_TESTCASE_FAILED
- self.logger.info("Waiting for ping...")
-
- sec = 0
- cmd = '~/ping.sh ' + test_ip
- flag = False
-
- while True:
- time.sleep(1)
- (_, stdout, _) = ssh.exec_command(cmd)
- output = stdout.readlines()
-
- for line in output:
- if "vPing OK" in line:
- self.logger.info("vPing detected!")
- exit_code = TestCase.EX_OK
- flag = True
- break
-
- elif sec == self.ping_timeout:
- self.logger.info("Timeout reached.")
- flag = True
- break
- if flag:
- break
- log = "Pinging %s. Waiting for response..." % test_ip
- self.logger.debug(log)
- sec += 1
- return exit_code
-
- def __create_security_group(self):
- """
- Configure OpenStack security groups.
-
- Configures and deploys an OpenStack security group object
- :return: the creator object
- """
- sg_rules = list()
- sg_rules.append(
- SecurityGroupRuleConfig(
- sec_grp_name=self.sg_name, direction=Direction.ingress,
- protocol=Protocol.icmp))
- sg_rules.append(
- SecurityGroupRuleConfig(
- sec_grp_name=self.sg_name, direction=Direction.ingress,
- protocol=Protocol.tcp, port_range_min=22, port_range_max=22))
- sg_rules.append(
- SecurityGroupRuleConfig(
- sec_grp_name=self.sg_name, direction=Direction.egress,
- protocol=Protocol.tcp, port_range_min=22, port_range_max=22))
-
- log = "Security group with name: '%s'" % self.sg_name
- self.logger.info(log)
- return deploy_utils.create_security_group(self.os_creds,
- SecurityGroupConfig(
- name=self.sg_name,
- description=self.sg_desc,
- rule_settings=sg_rules))
+ super().__init__(**kwargs)
+ self.vm2 = None
+
+ def prepare(self):
+ super().prepare()
+ self.vm2 = self.boot_vm(
+ f'{self.case_name}-vm2_{self.guid}',
+ security_groups=[self.sec.id])
+
+ def execute(self):
+ """Ping the second VM
+
+ Returns: ping exit codes
+ """
+ assert self.ssh
+ if not self.check_regex_in_console(self.vm2.name):
+ return 1
+ ip4 = self.vm2.private_v4 or self.vm2.addresses[
+ self.network.name][0].addr
+ (_, stdout, stderr) = self.ssh.exec_command(f'ping -c 1 {ip4}')
+ self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
+ return stdout.channel.recv_exit_status()
+
+ def clean(self):
+ assert self.cloud
+ if self.vm2:
+ self.cloud.delete_server(
+ self.vm2, wait=True,
+ timeout=getattr(config.CONF, 'vping_vm_delete_timeout'))
+ super().clean()
diff --git a/functest/opnfv_tests/openstack/vping/vping_userdata.py b/functest/opnfv_tests/openstack/vping/vping_userdata.py
index ceba0917a..8a8f26f37 100644
--- a/functest/opnfv_tests/openstack/vping/vping_userdata.py
+++ b/functest/opnfv_tests/openstack/vping/vping_userdata.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-#
+
# Copyright (c) 2015 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -9,17 +9,16 @@
"""vping_userdata testcase."""
+import logging
import time
-from snaps.config.network import PortConfig
-from snaps.config.vm_inst import VmInstanceConfig
-from snaps.openstack.utils import deploy_utils
+from xtesting.core import testcase
-from functest.core.testcase import TestCase
-from functest.opnfv_tests.openstack.vping import vping_base
+from functest.core import singlevm
+from functest.utils import config
-class VPingUserdata(vping_base.VPingBase):
+class VPingUserdata(singlevm.VmReady2):
"""
Class to execute the vPing test using userdata and the VM's console
"""
@@ -27,117 +26,112 @@ class VPingUserdata(vping_base.VPingBase):
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = "vping_userdata"
- super(VPingUserdata, self).__init__(**kwargs)
+ super().__init__(**kwargs)
+ self.logger = logging.getLogger(__name__)
+ self.vm1 = None
+ self.vm2 = None
- def run(self):
+ def run(self, **kwargs):
"""
Sets up the OpenStack VM instance objects then executes the ping and
validates.
:return: the exit code from the super.execute() method
"""
try:
- super(VPingUserdata, self).run()
-
- # Creating Instance 1
- port1_settings = PortConfig(
- name=self.vm1_name + '-vPingPort',
- network_name=self.network_creator.network_settings.name)
- instance1_settings = VmInstanceConfig(
- name=self.vm1_name,
- flavor=self.flavor_name,
- vm_boot_timeout=self.vm_boot_timeout,
- port_settings=[port1_settings])
-
- self.logger.info(
- "Creating VM 1 instance with name: '%s'",
- instance1_settings.name)
- self.vm1_creator = deploy_utils.create_vm_instance(
- self.os_creds, instance1_settings,
- self.image_creator.image_settings)
- self.creators.append(self.vm1_creator)
-
- userdata = _get_userdata(
- self.vm1_creator.get_port_ip(port1_settings.name))
- if userdata:
- # Creating Instance 2
- port2_settings = PortConfig(
- name=self.vm2_name + '-vPingPort',
- network_name=self.network_creator.network_settings.name)
- instance2_settings = VmInstanceConfig(
- name=self.vm2_name,
- flavor=self.flavor_name,
- vm_boot_timeout=self.vm_boot_timeout,
- port_settings=[port2_settings],
- userdata=userdata)
-
- self.logger.info(
- "Creating VM 2 instance with name: '%s'",
- instance2_settings.name)
- self.vm2_creator = deploy_utils.create_vm_instance(
- self.os_creds, instance2_settings,
- self.image_creator.image_settings)
- self.creators.append(self.vm2_creator)
- else:
- raise Exception('Userdata is None')
-
- return self._execute()
-
- finally:
- self._cleanup()
-
- def _do_vping(self, vm_creator, test_ip):
+ assert self.cloud
+ assert super().run(
+ **kwargs) == testcase.TestCase.EX_OK
+ self.result = 0
+ self.vm1 = self.boot_vm()
+ self.vm2 = self.boot_vm(
+ f'{self.case_name}-vm2_{self.guid}',
+ userdata=self._get_userdata())
+
+ result = self._do_vping()
+ self.stop_time = time.time()
+ if result != testcase.TestCase.EX_OK:
+ return testcase.TestCase.EX_RUN_ERROR
+ self.result = 100
+ return testcase.TestCase.EX_OK
+ except Exception: # pylint: disable=broad-except
+ self.logger.exception('Unexpected error running vping_userdata')
+ return testcase.TestCase.EX_RUN_ERROR
+
+ def _do_vping(self):
"""
Override from super
"""
+ if not (self.vm1.private_v4 or self.vm1.addresses[
+ self.network.name][0].addr):
+ self.logger.error("vm1: IP addr missing")
+ return testcase.TestCase.EX_TESTCASE_FAILED
+
self.logger.info("Waiting for ping...")
- exit_code = TestCase.EX_TESTCASE_FAILED
+ exit_code = testcase.TestCase.EX_TESTCASE_FAILED
sec = 0
tries = 0
while True:
time.sleep(1)
- p_console = vm_creator.get_console_output()
+ p_console = self.cloud.get_server_console(self.vm2.id)
+ self.logger.debug("console: \n%s", p_console)
if "vPing OK" in p_console:
self.logger.info("vPing detected!")
- exit_code = TestCase.EX_OK
+ exit_code = testcase.TestCase.EX_OK
break
- elif "failed to read iid from metadata" in p_console or tries > 5:
+ if "failed to read iid from metadata" in p_console or tries > 5:
self.logger.info("Failed to read iid from metadata")
break
- elif sec == self.ping_timeout:
+ if sec == getattr(config.CONF, 'vping_ping_timeout'):
self.logger.info("Timeout reached.")
break
- elif sec % 10 == 0:
+ if sec % 10 == 0:
if "request failed" in p_console:
self.logger.debug(
- "It seems userdata is not supported in nova boot. " +
+ "It seems userdata is not supported in nova boot. "
"Waiting a bit...")
tries += 1
else:
self.logger.debug(
- "Pinging %s. Waiting for response...", test_ip)
+ "Pinging %s. Waiting for response...",
+ self.vm1.private_v4 or self.vm1.addresses[
+ self.network.name][0].addr)
sec += 1
return exit_code
-
-def _get_userdata(test_ip):
- """
- Returns the post VM creation script to be added into the VM's userdata
- :param test_ip: the IP value to substitute into the script
- :return: the bash script contents
- """
- if test_ip:
- return ("#!/bin/sh\n\n"
- "while true; do\n"
- " ping -c 1 %s 2>&1 >/dev/null\n"
- " RES=$?\n"
- " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
- " echo 'vPing OK'\n"
- " break\n"
- " else\n"
- " echo 'vPing KO'\n"
- " fi\n"
- " sleep 1\n"
- "done\n" % str(test_ip))
- return None
+ def _get_userdata(self):
+ """
+ Returns the post VM creation script to be added into the VM's userdata
+ :param test_ip: the IP value to substitute into the script
+ :return: the shell script contents
+ """
+ ip4 = self.vm1.private_v4 or self.vm1.addresses[
+ self.network.name][0].addr
+ if self.vm1.private_v4 or self.vm1.addresses[
+ self.network.name][0].addr:
+ return ("#!/bin/sh\n\n"
+ "while true; do\n"
+ f" ping -c 1 {ip4} 2>&1 >/dev/null\n"
+ " RES=$?\n"
+ " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
+ " echo 'vPing OK'\n"
+ " break\n"
+ " else\n"
+ " echo 'vPing KO'\n"
+ " fi\n"
+ " sleep 1\n"
+ "done\n")
+ return None
+
+ def clean(self):
+ assert self.cloud
+ if self.vm1:
+ self.cloud.delete_server(
+ self.vm1, wait=True,
+ timeout=getattr(config.CONF, 'vping_vm_delete_timeout'))
+ if self.vm2:
+ self.cloud.delete_server(
+ self.vm2, wait=True,
+ timeout=getattr(config.CONF, 'vping_vm_delete_timeout'))
+ super().clean()