aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.travis.yml28
-rw-r--r--build.sh2
-rw-r--r--docker/restapi/Dockerfile4
-rw-r--r--docker/smoke/Dockerfile4
-rw-r--r--functest/api/resources/v1/creds.py6
-rw-r--r--functest/api/resources/v1/tasks.py6
-rw-r--r--functest/api/resources/v1/testcases.py12
-rw-r--r--functest/ci/check_deployment.py3
-rw-r--r--functest/ci/config_functest.yaml3
-rw-r--r--functest/ci/run_tests.py19
-rw-r--r--functest/cli/commands/cli_os.py6
-rw-r--r--functest/core/vnf.py4
-rw-r--r--functest/energy/energy.py2
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py42
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/defcore.txt313
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/refstack_client.py162
-rw-r--r--functest/opnfv_tests/openstack/refstack_client/tempest_conf.py69
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_test_runner.py15
-rw-r--r--functest/opnfv_tests/openstack/snaps/snaps_utils.py12
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py151
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py55
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_base.py49
-rw-r--r--functest/opnfv_tests/openstack/vping/vping_ssh.py12
-rw-r--r--functest/opnfv_tests/sdn/odl/odl.py7
-rw-r--r--functest/opnfv_tests/vnf/epc/juju_epc.py37
-rw-r--r--functest/opnfv_tests/vnf/ims/clearwater_ims_base.py8
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.py33
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims_perf.py4
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py201
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_openims.py190
-rw-r--r--functest/opnfv_tests/vnf/router/cloudify_vrouter.py51
-rw-r--r--functest/opnfv_tests/vnf/router/utilvnf.py6
-rw-r--r--functest/opnfv_tests/vnf/router/vrouter_base.py6
-rw-r--r--functest/tests/unit/ci/test_run_tests.py3
-rw-r--r--functest/tests/unit/core/test_vnf.py9
-rw-r--r--functest/tests/unit/odl/test_odl.py20
-rw-r--r--functest/tests/unit/openstack/rally/test_rally.py4
-rw-r--r--functest/tests/unit/openstack/refstack_client/test_refstack_client.py43
-rw-r--r--functest/tests/unit/openstack/tempest/test_conf_utils.py65
-rw-r--r--functest/tests/unit/openstack/tempest/test_tempest.py12
-rw-r--r--functest/tests/unit/utils/test_env.py39
-rw-r--r--functest/tests/unit/utils/test_functest_utils.py87
-rw-r--r--functest/tests/unit/utils/test_openstack_utils.py1784
-rw-r--r--functest/utils/constants.py20
-rw-r--r--functest/utils/env.py22
-rw-r--r--functest/utils/functest_utils.py98
-rw-r--r--functest/utils/openstack_utils.py1486
-rw-r--r--requirements.txt7
-rw-r--r--setup.cfg1
49 files changed, 547 insertions, 4675 deletions
diff --git a/.travis.yml b/.travis.yml
index fcb229ead..cc8385855 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -14,12 +14,12 @@ jobs:
- stage: build functest-core images
script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs=""
- arm64_dirs="docker/core"
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/core"
- arm64_dirs=""
- stage: publish functest-core manifests
@@ -31,62 +31,62 @@ jobs:
- stage: build all functest images
script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs=""
- arm64_dirs="docker/healthcheck"
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs=""
- arm64_dirs="docker/smoke"
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs=""
- arm64_dirs="docker/features"
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs=""
- arm64_dirs="docker/components"
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs=""
- arm64_dirs="docker/parser"
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/healthcheck"
- arm64_dirs=""
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/smoke"
- arm64_dirs=""
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/features"
- arm64_dirs=""
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/components"
- arm64_dirs=""
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/vnf"
- arm64_dirs=""
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/restapi"
- arm64_dirs=""
- script: sudo -E bash build.sh
env:
- - repo="${DOCKER_USERNAME}"
+ - REPO="${DOCKER_USERNAME}"
- amd64_dirs="docker/parser"
- arm64_dirs=""
- stage: publish all manifests
diff --git a/build.sh b/build.sh
index 929f71f1b..88c5a4150 100644
--- a/build.sh
+++ b/build.sh
@@ -2,7 +2,7 @@
set -e
-repo=${repo:-opnfv}
+repo=${REPO:-opnfv}
amd64_dirs=${amd64_dirs-"\
docker/core \
docker/healthcheck \
diff --git a/docker/restapi/Dockerfile b/docker/restapi/Dockerfile
index 15b83fe3f..a731679ff 100644
--- a/docker/restapi/Dockerfile
+++ b/docker/restapi/Dockerfile
@@ -6,6 +6,7 @@ ARG ODL_TAG=master
ARG RALLY_TAG=stable/0.10
ARG OS_FAULTS_TAG=0.1.16
ARG REFSTACK_TAG=master
+ARG REFSTACK_TARGET=2017.09
ARG FDS_TAG=master
ARG VIMS_TAG=stable
@@ -56,6 +57,9 @@ RUN apk --no-cache add --update python3 sshpass \
mkdir -p /etc/rally && \
printf "[database]\nconnection = 'sqlite:////var/lib/rally/database/rally.sqlite'" > /etc/rally/rally.conf && \
mkdir -p /var/lib/rally/database && rally db create && \
+ mkdir -p /home/opnfv/functest/data/refstack && \
+ wget "https://refstack.openstack.org/api/v1/guidelines/${REFSTACK_TARGET}/tests?target=compute&type=required&alias=true&flag=false" \
+ -O /home/opnfv/functest/data/refstack/defcore.txt && \
apk del .build-deps
EXPOSE 5000
CMD ["functest_restapi"]
diff --git a/docker/smoke/Dockerfile b/docker/smoke/Dockerfile
index 5e5b1591e..00f5bac08 100644
--- a/docker/smoke/Dockerfile
+++ b/docker/smoke/Dockerfile
@@ -6,6 +6,7 @@ ARG ODL_TAG=master
ARG RALLY_TAG=stable/0.10
ARG OS_FAULTS_TAG=0.1.16
ARG REFSTACK_TAG=master
+ARG REFSTACK_TARGET=2017.09
COPY thirdparty-requirements.txt thirdparty-requirements.txt
RUN apk --no-cache add --virtual .build-deps --update \
@@ -39,6 +40,9 @@ RUN apk --no-cache add --virtual .build-deps --update \
mkdir -p /etc/rally && \
printf "[database]\nconnection = 'sqlite:////var/lib/rally/database/rally.sqlite'" > /etc/rally/rally.conf && \
mkdir -p /var/lib/rally/database && rally db create && \
+ mkdir -p /home/opnfv/functest/data/refstack && \
+ wget "https://refstack.openstack.org/api/v1/guidelines/${REFSTACK_TARGET}/tests?target=compute&type=required&alias=true&flag=false" \
+ -O /home/opnfv/functest/data/refstack/defcore.txt && \
apk del .build-deps
COPY testcases.yaml /usr/lib/python2.7/site-packages/functest/ci/testcases.yaml
CMD ["run_tests", "-t", "all"]
diff --git a/functest/api/resources/v1/creds.py b/functest/api/resources/v1/creds.py
index 25c0fd242..3eae19662 100644
--- a/functest/api/resources/v1/creds.py
+++ b/functest/api/resources/v1/creds.py
@@ -23,7 +23,7 @@ from functest.api.base import ApiResource
from functest.api.common import api_utils
from functest.ci import run_tests
from functest.cli.commands.cli_os import OpenStack
-from functest.utils.constants import CONST
+from functest.utils import constants
LOGGER = logging.getLogger(__name__)
@@ -39,7 +39,7 @@ class V1Creds(ApiResource):
endpoint='{0}/credentials'.format(ENDPOINT_CREDS))
def get(self): # pylint: disable=no-self-use
""" Get credentials """
- run_tests.Runner.source_envfile(getattr(CONST, 'env_file'))
+ run_tests.Runner.source_envfile(constants.ENV_FILE)
credentials_show = OpenStack.show_credentials()
return jsonify(credentials_show)
@@ -65,7 +65,7 @@ class V1Creds(ApiResource):
lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
- rc_file = getattr(CONST, 'env_file')
+ rc_file = constants.ENV_FILE
with open(rc_file, 'w') as creds_file:
creds_file.writelines(lines)
diff --git a/functest/api/resources/v1/tasks.py b/functest/api/resources/v1/tasks.py
index 5af8a678c..30501adf7 100644
--- a/functest/api/resources/v1/tasks.py
+++ b/functest/api/resources/v1/tasks.py
@@ -15,16 +15,16 @@ import errno
import json
import logging
import os
-import pkg_resources
import uuid
from flask import jsonify
from flasgger.utils import swag_from
+import pkg_resources
from functest.api.base import ApiResource
from functest.api.common import api_utils
from functest.api.database.v1.handlers import TasksHandler
-from functest.utils.constants import CONST
+from functest.utils import config
LOGGER = logging.getLogger(__name__)
@@ -85,7 +85,7 @@ class V1TaskLog(ApiResource):
except ValueError:
return api_utils.result_handler(status=1, data='No such task id')
- task_log_dir = getattr(CONST, 'dir_results')
+ task_log_dir = getattr(config.CONF, 'dir_results')
# pylint: disable=maybe-no-member
index = int(self._get_args().get('index', 0))
diff --git a/functest/api/resources/v1/testcases.py b/functest/api/resources/v1/testcases.py
index 064661c8b..2dbf97e4d 100644
--- a/functest/api/resources/v1/testcases.py
+++ b/functest/api/resources/v1/testcases.py
@@ -26,7 +26,7 @@ from functest.api.base import ApiResource
from functest.api.common import api_utils, thread
from functest.cli.commands.cli_testcase import Testcase
from functest.api.database.v1.handlers import TasksHandler
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.utils import env
import functest.utils.functest_utils as ft_utils
@@ -144,14 +144,14 @@ class V1Testcase(ApiResource):
def _update_logging_ini(self, task_id): # pylint: disable=no-self-use
""" Update the log file for each task"""
- config = ConfigParser.RawConfigParser()
- config.read(
+ rconfig = ConfigParser.RawConfigParser()
+ rconfig.read(
pkg_resources.resource_filename('functest', 'ci/logging.ini'))
- log_path = os.path.join(getattr(CONST, 'dir_results'),
+ log_path = os.path.join(getattr(config.CONF, 'dir_results'),
'{}.log'.format(task_id))
- config.set('handler_file', 'args', '("{}",)'.format(log_path))
+ rconfig.set('handler_file', 'args', '("{}",)'.format(log_path))
with open(
pkg_resources.resource_filename(
'functest', 'ci/logging.ini'), 'wb') as configfile:
- config.write(configfile)
+ rconfig.write(configfile)
diff --git a/functest/ci/check_deployment.py b/functest/ci/check_deployment.py
index bf43b5372..a475491a1 100644
--- a/functest/ci/check_deployment.py
+++ b/functest/ci/check_deployment.py
@@ -28,6 +28,7 @@ from snaps.openstack.utils import keystone_utils
from snaps.openstack.utils import neutron_utils
from snaps.openstack.utils import nova_utils
+from functest.utils import constants
from functest.opnfv_tests.openstack.snaps import snaps_utils
__author__ = "Jose Lausuch <jose.lausuch@ericsson.com>"
@@ -68,7 +69,7 @@ def get_auth_token(os_creds):
class CheckDeployment(object):
""" Check deployment class."""
- def __init__(self, rc_file='/home/opnfv/functest/conf/env_file'):
+ def __init__(self, rc_file=constants.ENV_FILE):
self.rc_file = rc_file
self.services = ('compute', 'network', 'image')
self.os_creds = None
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index be7a2db58..eaa860aaa 100644
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -11,12 +11,11 @@ general:
functest_conf: /home/opnfv/functest/conf
functest_data: /home/opnfv/functest/data
ims_data: /home/opnfv/functest/data/ims/
+ refstack_data: /home/opnfv/functest/data/refstack
router_data: /home/opnfv/functest/data/router/
functest_images: /home/opnfv/functest/images
rally_inst: /root/.rally
- env_file: /home/opnfv/functest/conf/env_file
-
openstack:
image_name: Cirros-0.4.0
image_name_alt: Cirros-0.4.0-1
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index ca101ce6b..651a38517 100644
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -29,10 +29,10 @@ import yaml
from functest.ci import tier_builder
from functest.core import testcase
+from functest.utils import constants
from functest.utils import env
LOGGER = logging.getLogger('functest.ci.run_tests')
-ENV_FILE = "/home/opnfv/functest/conf/env_file"
class Result(enum.Enum):
@@ -95,13 +95,12 @@ class Runner(object):
pkg_resources.resource_filename('functest', 'ci/testcases.yaml'))
@staticmethod
- def source_envfile(rc_file=ENV_FILE):
+ def source_envfile(rc_file=constants.ENV_FILE):
"""Source the env file passed as arg"""
if not os.path.isfile(rc_file):
LOGGER.debug("No env file %s found", rc_file)
return
with open(rc_file, "r") as rcfd:
- LOGGER.info("Sourcing env file %s", rc_file)
for line in rcfd:
var = (line.rstrip('"\n').replace('export ', '').split(
"=") if re.search(r'(.*)=(.*)', line) else None)
@@ -112,6 +111,8 @@ class Runner(object):
key = re.sub(r'^["\' ]*|[ \'"]*$', '', var[0])
value = re.sub(r'^["\' ]*|[ \'"]*$', '', "".join(var[1:]))
os.environ[key] = value
+ rcfd.seek(0, 0)
+ LOGGER.info("Sourcing env file %s\n\n%s", rc_file, rcfd.read())
@staticmethod
def get_dict_by_test(testname):
@@ -228,10 +229,9 @@ class Runner(object):
if 'report' in kwargs:
self.report_flag = kwargs['report']
try:
+ LOGGER.info("Deployment description:\n\n%s\n", env.string())
+ self.source_envfile()
if 'test' in kwargs:
- LOGGER.debug("Sourcing the credential file...")
- self.source_envfile()
-
LOGGER.debug("Test args: %s", kwargs['test'])
if self.tiers.get_tier(kwargs['test']):
self.run_tier(self.tiers.get_tier(kwargs['test']))
@@ -268,13 +268,6 @@ class Runner(object):
"""To generate functest report showing the overall results"""
msg = prettytable.PrettyTable(
header_style='upper', padding_width=5,
- field_names=['env var', 'value'])
- for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
- 'CI_LOOP']:
- msg.add_row([env_var, env.get(env_var)])
- LOGGER.info("Deployment description:\n\n%s\n", msg)
- msg = prettytable.PrettyTable(
- header_style='upper', padding_width=5,
field_names=['test case', 'project', 'tier',
'duration', 'result'])
tiers = [tier] if tier else self.tiers.get_tiers()
diff --git a/functest/cli/commands/cli_os.py b/functest/cli/commands/cli_os.py
index 0cf286238..d3e229c81 100644
--- a/functest/cli/commands/cli_os.py
+++ b/functest/cli/commands/cli_os.py
@@ -14,16 +14,16 @@ import click
from six.moves import urllib
from functest.ci import check_deployment
-from functest.utils.constants import CONST
+from functest.utils import constants
class OpenStack(object):
def __init__(self):
- self.os_auth_url = os.environ['OS_AUTH_URL']
+ self.os_auth_url = os.environ.get('OS_AUTH_URL', None)
self.endpoint_ip = None
self.endpoint_port = None
- self.openstack_creds = getattr(CONST, 'env_file')
+ self.openstack_creds = constants.ENV_FILE
if self.os_auth_url:
self.endpoint_ip = urllib.parse.urlparse(self.os_auth_url).hostname
self.endpoint_port = urllib.parse.urlparse(self.os_auth_url).port
diff --git a/functest/core/vnf.py b/functest/core/vnf.py
index 15065f730..412e13156 100644
--- a/functest/core/vnf.py
+++ b/functest/core/vnf.py
@@ -21,6 +21,7 @@ from snaps.openstack.utils import keystone_utils
from snaps.openstack.tests import openstack_tests
from functest.core import testcase
+from functest.utils import constants
__author__ = ("Morgan Richomme <morgan.richomme@orange.com>, "
"Valentin Boucher <valentin.boucher@orange.com>")
@@ -47,7 +48,6 @@ class VnfOnBoarding(testcase.TestCase):
"""Base model for VNF test cases."""
__logger = logging.getLogger(__name__)
- env_file = "/home/opnfv/functest/conf/env_file"
def __init__(self, **kwargs):
super(VnfOnBoarding, self).__init__(**kwargs)
@@ -111,7 +111,7 @@ class VnfOnBoarding(testcase.TestCase):
"Prepare VNF: %s, description: %s", self.case_name,
self.tenant_description)
snaps_creds = openstack_tests.get_credentials(
- os_env_file=self.env_file)
+ os_env_file=constants.ENV_FILE)
self.os_project = OpenStackProject(
snaps_creds,
diff --git a/functest/energy/energy.py b/functest/energy/energy.py
index d5f6871d0..a26522119 100644
--- a/functest/energy/energy.py
+++ b/functest/energy/energy.py
@@ -107,7 +107,7 @@ class EnergyRecorder(object):
uri_comp = "/recorders/environment/"
uri_comp += urllib.parse.quote_plus(environment)
- if creds_usr != "" and creds_pass != "":
+ if creds_usr and creds_pass:
energy_recorder_api_auth = (creds_usr, creds_pass)
else:
energy_recorder_api_auth = None
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
index add0f2437..b2213c941 100644
--- a/functest/opnfv_tests/openstack/rally/rally.py
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -28,7 +28,7 @@ from functest.core import testcase
from functest.energy import energy
from functest.opnfv_tests.openstack.snaps import snaps_utils
from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.utils import env
from snaps.config.flavor import FlavorConfig
@@ -48,19 +48,19 @@ class RallyBase(testcase.TestCase):
# pylint: disable=too-many-instance-attributes
TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
- GLANCE_IMAGE_NAME = getattr(CONST, 'openstack_image_name')
- GLANCE_IMAGE_FILENAME = getattr(CONST, 'openstack_image_file_name')
- GLANCE_IMAGE_PATH = os.path.join(getattr(CONST, 'dir_functest_images'),
- GLANCE_IMAGE_FILENAME)
- GLANCE_IMAGE_FORMAT = getattr(CONST, 'openstack_image_disk_format')
- GLANCE_IMAGE_USERNAME = getattr(CONST, 'openstack_image_username')
- GLANCE_IMAGE_EXTRA_PROPERTIES = getattr(CONST,
- 'openstack_extra_properties', {})
- FLAVOR_NAME = getattr(CONST, 'rally_flavor_name')
- FLAVOR_ALT_NAME = getattr(CONST, 'rally_flavor_alt_name')
+ GLANCE_IMAGE_NAME = getattr(config.CONF, 'openstack_image_name')
+ GLANCE_IMAGE_FILENAME = getattr(config.CONF, 'openstack_image_file_name')
+ GLANCE_IMAGE_PATH = os.path.join(getattr(
+ config.CONF, 'dir_functest_images'), GLANCE_IMAGE_FILENAME)
+ GLANCE_IMAGE_FORMAT = getattr(config.CONF, 'openstack_image_disk_format')
+ GLANCE_IMAGE_USERNAME = getattr(config.CONF, 'openstack_image_username')
+ GLANCE_IMAGE_EXTRA_PROPERTIES = getattr(
+ config.CONF, 'openstack_extra_properties', {})
+ FLAVOR_NAME = getattr(config.CONF, 'rally_flavor_name')
+ FLAVOR_ALT_NAME = getattr(config.CONF, 'rally_flavor_alt_name')
FLAVOR_RAM = 512
FLAVOR_RAM_ALT = 1024
- FLAVOR_EXTRA_SPECS = getattr(CONST, 'flavor_extra_specs', None)
+ FLAVOR_EXTRA_SPECS = getattr(config.CONF, 'flavor_extra_specs', None)
if FLAVOR_EXTRA_SPECS:
FLAVOR_RAM = 1024
FLAVOR_RAM_ALT = 2048
@@ -77,14 +77,14 @@ class RallyBase(testcase.TestCase):
TENANTS_AMOUNT = 3
ITERATIONS_AMOUNT = 10
CONCURRENCY = 4
- RESULTS_DIR = os.path.join(getattr(CONST, 'dir_results'), 'rally')
+ RESULTS_DIR = os.path.join(getattr(config.CONF, 'dir_results'), 'rally')
BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
TEMP_DIR = os.path.join(RALLY_DIR, "var")
- RALLY_PRIVATE_NET_NAME = getattr(CONST, 'rally_network_name')
- RALLY_PRIVATE_SUBNET_NAME = getattr(CONST, 'rally_subnet_name')
- RALLY_PRIVATE_SUBNET_CIDR = getattr(CONST, 'rally_subnet_cidr')
- RALLY_ROUTER_NAME = getattr(CONST, 'rally_router_name')
+ RALLY_PRIVATE_NET_NAME = getattr(config.CONF, 'rally_network_name')
+ RALLY_PRIVATE_SUBNET_NAME = getattr(config.CONF, 'rally_subnet_name')
+ RALLY_PRIVATE_SUBNET_CIDR = getattr(config.CONF, 'rally_subnet_cidr')
+ RALLY_ROUTER_NAME = getattr(config.CONF, 'rally_router_name')
def __init__(self, **kwargs):
"""Initialize RallyBase object."""
@@ -451,9 +451,11 @@ class RallyBase(testcase.TestCase):
LOGGER.debug("Creating network '%s'...", network_name)
- rally_network_type = getattr(CONST, 'rally_network_type', None)
- rally_physical_network = getattr(CONST, 'rally_physical_network', None)
- rally_segmentation_id = getattr(CONST, 'rally_segmentation_id', None)
+ rally_network_type = getattr(config.CONF, 'rally_network_type', None)
+ rally_physical_network = getattr(
+ config.CONF, 'rally_physical_network', None)
+ rally_segmentation_id = getattr(
+ config.CONF, 'rally_segmentation_id', None)
network_creator = deploy_utils.create_network(
self.os_creds, NetworkConfig(
diff --git a/functest/opnfv_tests/openstack/refstack_client/defcore.txt b/functest/opnfv_tests/openstack/refstack_client/defcore.txt
deleted file mode 100644
index e958b47cd..000000000
--- a/functest/opnfv_tests/openstack/refstack_client/defcore.txt
+++ /dev/null
@@ -1,313 +0,0 @@
-# Set of DefCore tempest test cases not flagged and required.
-# According to https://github.com/openstack/interop/blob/master/doc/source/guidelines/2017.09.rst,
-# some tests are still flagged due to outstanding bugs in the Tempest library,
-# particularly tests that require SSH. Refstack developers
-# are working on correcting these bugs upstream. Please note that although some tests
-# are flagged because of bugs, there is still an expectation that the capabilities
-# covered by the tests are available.
-# It only contains Openstack core compute (no object storage)
-# The approved guidelines (2017.09) are valid for Mitaka, Newton, Ocata, Pike releases of OpenStack
-# The list can be generated using the Rest API from RefStack project:
-# https://refstack.openstack.org/api/v1/guidelines/2017.09/tests?target=compute&type=required&alias=true&flag=false
-tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors[id-e36c0eaa-dff5-4082-ad1f-3f9a80aa3f59]
-tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail[id-6e85fde4-b3cd-4137-ab72-ed5f418e8c24]
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image[id-3731d080-d4c5-4872-b41a-64d0d0021314]
-tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_image_specify_multibyte_character_image_name[id-3b7c6fe4-dfe7-477c-9243-b06359db51e6]
-tempest.api.compute.servers.test_availability_zone.AZV2TestJSON.test_get_availability_zone_list_with_non_admin_user[id-a8333aa2-205c-449f-a828-d38c2489bf25]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_host_name_is_same_as_server_name[id-ac1ad47f-984b-4441-9274-c9079b7a0666]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers[id-9a438d88-10c6-4bcd-8b5b-5b6e25e1346f]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail[id-585e934c-448e-43c4-acbf-d06a9b899997]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_created_server_vcpus[id-cbc0f52f-05aa-492b-bdc1-84b575ca294b]
-tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details[id-5de47127-9977-400a-936f-abcfbec1218f]
-tempest.api.compute.servers.test_delete_server.DeleteServersTestJSON.test_delete_active_server[id-925fdfb4-5b13-47ea-ac8a-c36ae6fddb05]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_get_instance_action[id-aacc71ca-1d70-4aa5-bbf6-0ff71470e43c]
-tempest.api.compute.servers.test_instance_actions.InstanceActionsTestJSON.test_list_instance_actions[id-77ca5cc5-9990-45e0-ab98-1de8fead201a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_flavor[id-80c574cc-0925-44ba-8602-299028357dd9]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_image[id-b3304c3b-97df-46d2-8cd3-e2b6659724e7]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_name[id-f9eb2b70-735f-416c-b260-9914ac6181e4]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_filter_by_server_status[id-de2612ab-b7dd-4044-b0b1-d2539601911f]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_detailed_limit_results[id-67aec2d0-35fe-4503-9f92-f13272b867ed]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_active_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_flavor[id-573637f5-7325-47bb-9144-3476d0416908]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_image[id-05e8a8e7-9659-459a-989d-92c2f501f4ba]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_limit[id-614cdfc1-d557-4bac-915b-3e67b48eee76]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_name[id-9b067a7b-7fee-4f6a-b29c-be43fe18fc5a]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filter_by_server_status[id-ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e]
-tempest.api.compute.servers.test_list_server_filters.ListServerFiltersTestJSON.test_list_servers_filtered_by_name_wildcard[id-e9f624ee-92af-4562-8bec-437945a18dcb]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_future_date[id-74745ad8-b346-45b5-b9b8-509d7447fc1f]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_changes_since_invalid_date[id-87d12517-e20a-4c9c-97b6-dd1628d6d6c9]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_greater_than_actual_count[id-d47c17fb-eebd-4287-8e95-f20a7e627b18]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_negative_value[id-62610dd9-4713-4ee0-8beb-fd2c1aa7f950]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_limits_pass_string[id-679bc053-5e70-4514-9800-3dfab1a380a6]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_flavor[id-5913660b-223b-44d4-a651-a0fbfd44ca75]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_image[id-ff01387d-c7ad-47b4-ae9e-64fa214638fe]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_by_non_existing_server_name[id-e2c77c4a-000a-4af3-a0bd-629a328bde7c]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_detail_server_is_deleted[id-93055106-2d34-46fe-af68-d9ddbf7ee570]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_status_non_existing[id-fcdf192d-0f74-4d89-911f-1ec002b822c4]
-tempest.api.compute.servers.test_list_servers_negative.ListServersNegativeTestJSON.test_list_servers_with_a_deleted_server[id-24a26f1a-1ddc-4eea-b0d7-a90cc874ad8f]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_lock_unlock_server[id-80a8094c-211e-440a-ab88-9e59d556c7ee]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard[id-2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server[id-aaa6cdf3-55a7-461a-add9-1c8596b9a07c]
-tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_stop_start_server[id-af8eafd4-38a7-4a4b-bdbc-75145a580560]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_delete_server_metadata_item[id-127642d6-4c7b-4486-b7cd-07265a378658]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_get_server_metadata_item[id-3043c57d-7e0e-49a6-9a96-ad569c265e6a]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_list_server_metadata[id-479da087-92b3-4dcf-aeb3-fd293b2d14ce]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata[id-211021f6-21de-4657-a68f-908878cfe251]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_set_server_metadata_item[id-58c02d4f-5c67-40be-8744-d3fa5982eb1c]
-tempest.api.compute.servers.test_server_metadata.ServerMetadataTestJSON.test_update_server_metadata[id-344d981e-0c33-4997-8a5d-6c1d803e4134]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_server_with_admin_password[id-b92d5ec7-b1dd-44a2-87e4-45e888c46ef0]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_specify_keypair[id-f9e15296-d7f9-4e62-b53f-a04e89160833]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_create_with_existing_server_name[id-8fea6be7-065e-47cf-89b8-496e6f96c699]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_access_server_address[id-89b90870-bc13-4b73-96af-f9d4f2b70077]
-tempest.api.compute.servers.test_servers.ServersTestJSON.test_update_server_name[id-5e6ccff8-349d-4852-a8b3-055df7988dd2]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_numeric_server_name[id-fd57f159-68d6-4c2a-902b-03070828a87e]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_metadata_exceeds_length_limit[id-7fc74810-0bd2-4cd7-8244-4f33a9db865a]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_server_name_length_exceeds_256[id-c3e0fb12-07fc-4d76-a22e-37409887afe8]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_flavor[id-18f5227f-d155-4429-807c-ccb103887537]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_image[id-fcba1052-0a50-4cf3-b1ac-fae241edf02f]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_create_with_invalid_network_uuid[id-4e72dc2d-44c5-4336-9667-f7972e95c402]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_id_exceeding_length_limit[id-f4d7279b-5fd2-4bf2-9ba4-ae35df0d18c5]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_delete_server_pass_negative_id[id-75f79124-277c-45e6-a373-a1d6803f4cc4]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_get_non_existent_server[id-3436b02f-1b1e-4f03-881e-c6a602327439]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_invalid_ip_v6_address[id-5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_reboot_non_existent_server[id-d4c023a0-9c55-4747-9dd5-413b820143c7]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_non_existent_server[id-d86141a7-906e-4731-b187-d64a2ea61422]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_rebuild_reboot_deleted_server[id-98fa0458-1485-440f-873b-fe7f0d714930]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_server_name_blank[id-dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_stop_non_existent_server[id-a31460a9-49e1-42aa-82ee-06e0bb7c2d03]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_name_of_non_existent_server[id-aa8eed43-e2cb-4ebf-930b-da14f6a21d81]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_name_length_exceeds_256[id-5c8e244c-dada-4590-9944-749c455b431f]
-tempest.api.compute.servers.test_servers_negative.ServersNegativeTestJSON.test_update_server_set_empty_name[id-38204696-17c6-44da-9590-40f87fb5a899]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas[id-9bfecac7-b966-4f47-913f-1a9e2c12134a]
-tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas[id-f1ef0a97-dbbb-4cca-adc5-c9fbc4f76107]
-tempest.api.compute.test_versions.TestVersions.test_list_api_versions[id-6c0a0990-43b6-4529-9b61-5fd8daf7c55c]
-# tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_attach_detach_volume[id-52e9045a-e90d-4c0d-9087-79d657faffff]
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeTestJSON.test_list_get_volume_attachments[id-7fa563fe-f0f7-43eb-9e22-a1ece036b513]
-tempest.api.identity.v3.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
-tempest.api.identity.v3.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
-tempest.api.identity.v3.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_media_types[id-657c1970-4722-4189-8831-7325f3bc4265]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_resources[id-b9232f5e-d9e5-4d97-b96c-28d3db4de1bd]
-tempest.api.identity.v3.test_api_discovery.TestApiDiscovery.test_api_version_statuses[id-8879a470-abfb-47bb-bb8d-5a7fd279ad1e]
-tempest.api.identity.v3.test_tokens.TokensV3Test.test_create_token[id-6f8e4436-fc96-4282-8122-e41df57197a9]
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_delete_image[id-f848bb94-1c6e-45a4-8726-39e3a5b23535]
-tempest.api.image.v2.test_images.BasicOperationsImagesTest.test_update_image[id-f66891a7-a35c-41a8-b590-a065c2a1caa6]
-tempest.api.image.v2.test_images.ListImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
-tempest.api.image.v2.test_images.ListImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
-tempest.api.image.v2.test_images.ListImagesTest.test_index_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
-tempest.api.image.v2.test_images.ListImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_get_image_schema[id-622b925c-479f-4736-860d-adeaf13bc371]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_get_images_schema[id-25c8d7b2-df21-460f-87ac-93130bcdc684]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_container_format[id-9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_disk_format[id-4a4735a7-f22f-49b6-b0d9-66e1ef7453eb]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_limit[id-e914a891-3cc8-4b40-ad32-e0a39ffbddbb]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_min_max_size[id-4ad8c157-971a-4ba8-aa84-ed61154b1e7f]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_size[id-cf1b9a48-8340-480e-af7b-fe7e17690876]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_status[id-7fc9e369-0f58-4d05-9aa5-0969e2d59d15]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_images_param_visibility[id-7a95bb92-d99e-4b12-9718-7bc6ab73e6d2]
-tempest.api.image.v2.test_images.ListUserImagesTest.test_list_no_params[id-1e341d7a-90a9-494c-b143-2cdf2aeb6aee]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_image_null_id[id-32248db1-ab88-4821-9604-c7c369f1f88c]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_delete_non_existing_image[id-6fe40f1c-57bd-4918-89cc-8500f850f3de]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_delete_deleted_image[id-e57fc127-7ba0-4693-92d7-1d8a05ebcba9]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_image_null_id[id-ef45000d-0a72-4781-866d-4cb7bf2562ad]
-tempest.api.image.v2.test_images_negative.ImagesNegativeTest.test_get_non_existent_image[id-668743d5-08ad-4480-b2b8-15da34f81d9f]
-tempest.api.image.v2.test_images_tags.ImagesTagsTest.test_update_delete_tags_for_image[id-10407036-6059-4f95-a2cd-cbbbee7ed329]
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_delete_non_existing_tag[id-39c023a2-325a-433a-9eea-649bf1414b19]
-tempest.api.image.v2.test_images_tags_negative.ImagesTagsNegativeTest.test_update_tags_for_non_existing_image[id-8cd30f82-6f9a-4c6e-8034-c1b51fba43d9]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
-tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-tempest.api.network.test_networks.NetworksTest.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
-tempest.api.network.test_networks.NetworksTest.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-tempest.api.network.test_networks.NetworksTest.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
-tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-tempest.api.network.test_networks.NetworksTest.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
-tempest.api.network.test_networks.NetworksTest.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-tempest.api.network.test_networks.NetworksTest.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
-tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221]
-tempest.api.network.test_networks.NetworksTestJSON.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a]
-tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc]
-tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-tempest.api.network.test_networks.NetworksTestJSON.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
-tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1]
-tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c]
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e]
-tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
-tempest.api.network.test_ports.PortsTestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f]
-tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
-tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9]
-tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6]
-tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6]
-tempest.api.network.test_subnetpools_extensions.SubnetPoolsTestJSON.test_create_list_show_update_delete_subnetpools[id-62595970-ab1c-4b7f-8fcc-fddfe55e9811]
-tempest.api.volume.test_availability_zone.AvailabilityZoneTestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
-tempest.api.volume.test_availability_zone.AvailabilityZoneV2TestJSON.test_get_availability_zone_list[id-01f1ae88-eba9-4c6b-a011-6f7ace06b725]
-tempest.api.volume.test_extensions.ExtensionsTestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
-tempest.api.volume.test_extensions.ExtensionsV2TestJSON.test_list_extensions[id-94607eb0-43a5-47ca-82aa-736b41bd2e2c]
-tempest.api.volume.test_snapshot_metadata.SnapshotMetadataTestJSON.test_crud_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotMetadataTestJSON.test_update_show_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
-tempest.api.volume.test_snapshot_metadata.SnapshotMetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_create_get_delete_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_crud_snapshot_metadata[id-a2f20f99-e363-4584-be97-bc33afb1a56c]
-tempest.api.volume.test_snapshot_metadata.SnapshotV2MetadataTestJSON.test_update_snapshot_metadata_item[id-e8ff85c5-8f97-477f-806a-3ac364a949ed]
-tempest.api.volume.test_volume_metadata.VolumesMetadataTest.test_crud_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesMetadataTest.test_update_show_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
-tempest.api.volume.test_volume_metadata.VolumesMetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_create_get_delete_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_crud_volume_metadata[id-6f5b125b-f664-44bf-910f-751591fe5769]
-tempest.api.volume.test_volume_metadata.VolumesV2MetadataTest.test_update_volume_metadata_item[id-862261c5-8df4-475a-8c21-946e50e36a20]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
-tempest.api.volume.test_volumes_actions.VolumesActionsTest.test_volume_upload[id-d8f1ca95-3d5b-44a3-b8ca-909691c9532d]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_reserve_unreserve_volume[id-92c4ef64-51b2-40c0-9f7e-4749fbaaba33]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_bootable[id-63e21b4c-0a0c-41f6-bfc3-7c2816815599]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_readonly_update[id-fff74e1e-5bd3-4b33-9ea9-24c103bc3f59]
-tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_volume_upload[id-d8f1ca95-3d5b-44a3-b8ca-909691c9532d]
-tempest.api.volume.test_volumes_get.VolumesGetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51]
-tempest.api.volume.test_volumes_get.VolumesGetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
-tempest.api.volume.test_volumes_get.VolumesGetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete[id-27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_as_clone[id-3f591b4a-7dc6-444c-bd51-77469506b3a1]
-tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image[id-54a01030-c7fc-447c-86ee-c1182beae638]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_details_pagination[id-e9138a2c-f67b-4796-8efa-635c196d01de]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_details_with_multiple_params[id-2a7064eb-b9c3-429b-b888-33928fc5edd3]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_pagination[id-af55e775-8e4b-4feb-8719-215c43b0238c]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
-tempest.api.volume.test_volumes_list.VolumesListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list[id-0b6ddd39-b948-471f-8038-4787978747c4]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_by_name[id-a28e8da4-0b56-472f-87a8-0f4d3f819c02]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_by_name[id-2de3a6d4-12aa-403b-a8f2-fdeb42a89623]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_param_display_name_and_status[id-777c87c1-2fc4-4883-8b8e-5c0b951d1ec8]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_display_name_and_status[id-856ab8ca-6009-4c37-b691-be1065528ad4]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_detail_param_metadata[id-1ca92d3c-4a8e-4b43-93f5-e4c7fb3b291d]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_details[id-adcbb5a7-5ad8-4b61-bd10-5380e111a877]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_with_param_metadata[id-b5ebea1b-0603-40a0-bb41-15fcd0a53214]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_availability_zone[id-c0cfa863-3020-40d7-b587-e35f597d5d87]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_by_status[id-39654e13-734c-4dab-95ce-7613bf8407ce]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_availability_zone[id-e1b80d13-94f0-4ba2-a40e-386af29f8db1]
-tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volumes_list_details_by_status[id-2943f712-71ec-482a-bf49-d5ca06216b9f]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_create_volume_without_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29]
-tempest.api.volume.test_volumes_negative.VolumesNegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_invalid_size[id-1ed83a8a-682d-4dfb-a30e-ee63ffd6c049]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_snapshot_id[id-0c36f6ae-4604-4017-b0a9-34fdc63096f9]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_source_volid[id-47c73e08-4be8-45bb-bfdf-0c4e79b88344]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_nonexistent_volume_type[id-10254ed8-3849-454e-862e-3ab8e6aa01d2]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_out_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_negative[id-8b472729-9eba-446e-a83b-916bdb34bef7]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_with_size_zero[id-41331caa-eaf4-4001-869d-bc18c1869360]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_create_volume_without_passing_size[id-9387686f-334f-4d31-a439-33494b9e2683]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_invalid_volume_id[id-1f035827-7c32-4019-9240-b4ec2dbd9dfd]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_delete_volume_without_passing_volume_id[id-441a1550-5d44-4b30-af0f-a6d402f52026]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_invalid_volume_id[id-30799cfd-7ee4-446c-b66c-45b383ed211b]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_get_volume_without_passing_volume_id[id-c6c3db06-29ad-4e91-beb0-2ab195fe49e3]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_invalid_status[id-ba94b27b-be3f-496c-a00e-0283b373fa75]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_detail_with_nonexistent_name[id-9ca17820-a0e7-4cbd-a7fa-f4468735e359]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_invalid_status[id-143b279b-7522-466b-81be-34a87d564a7c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_list_volumes_with_nonexistent_name[id-0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_negative_volume_status[id-449c4ed2-ecdd-47bb-98dc-072aeccf158c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_reserve_volume_with_nonexistent_volume_id[id-ac6084c0-0546-45f9-b284-38a367e0e0e2]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_unreserve_volume_with_nonexistent_volume_id[id-eb467654-3dc1-4a72-9b46-47c29d22654c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_empty_volume_id[id-72aeca85-57a5-4c1f-9057-f320f9ea575b]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_invalid_volume_id[id-e66e40d6-65e6-4e75-bdc7-636792fa152d]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_update_volume_with_nonexistent_volume_id[id-0186422c-999a-480e-a026-6a665744c30c]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_delete_nonexistent_volume_id[id-555efa6e-efcd-44ef-8a3b-4a7ca4837a29]
-tempest.api.volume.test_volumes_negative.VolumesV2NegativeTest.test_volume_get_nonexistent_volume_id[id-f131c586-9448-44a4-a8b0-54ca838aa43e]
-tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
-tempest.api.volume.test_volumes_snapshots.VolumesSnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshot_create_get_list_update_delete[id-2a8abbe4-d871-46db-b049-c41f5af8216e]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots.VolumesV2SnapshotTestJSON.test_volume_from_snapshot[id-677863d1-3142-456d-b6ac-9924f667a7f4]
-tempest.api.volume.test_volumes_snapshots_list.VolumesSnapshotListTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots_list.VolumesSnapshotListTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_details_with_params[id-220a1022-1fcd-4a74-a7bd-6b859156cda2]
-tempest.api.volume.test_volumes_snapshots_list.VolumesV2SnapshotListTestJSON.test_snapshots_list_with_params[id-59f41f43-aebf-48a9-ab5d-d76340fab32b]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesSnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesSnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_with_nonexistent_volume_id[id-e3e466af-70ab-4f4b-a967-ab04e3532ea7]
-tempest.api.volume.test_volumes_snapshots_negative.VolumesV2SnapshotNegativeTestJSON.test_create_snapshot_without_passing_volume_id[id-bb9da53e-d335-4309-9c15-7e76fd5e4d6d]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_pagination[id-e9138a2c-f67b-4796-8efa-635c196d01de]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_details_with_multiple_params[id-2a7064eb-b9c3-429b-b888-33928fc5edd3]
-tempest.api.volume.v2.test_volumes_list.VolumesV2ListTestJSON.test_volume_list_pagination[id-af55e775-8e4b-4feb-8719-215c43b0238c]
diff --git a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
index beb1a5a66..ada6ebaba 100644
--- a/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
+++ b/functest/opnfv_tests/openstack/refstack_client/refstack_client.py
@@ -12,6 +12,7 @@
from __future__ import division
import argparse
+import ConfigParser
import logging
import os
import re
@@ -19,14 +20,13 @@ import sys
import subprocess
import time
-import pkg_resources
-
from functest.core import testcase
from functest.energy import energy
-from functest.opnfv_tests.openstack.refstack_client.tempest_conf \
- import TempestConf
from functest.opnfv_tests.openstack.tempest import conf_utils
-import functest.utils.functest_utils as ft_utils
+from functest.opnfv_tests.openstack.tempest import tempest
+from functest.utils import config
+from functest.utils import functest_utils
+
__author__ = ("Matthew Li <matthew.lijun@huawei.com>,"
"Linda Wang <wangwulin@huawei.com>")
@@ -39,59 +39,48 @@ class RefstackClient(testcase.TestCase):
"""RefstackClient testcase implementation class."""
# pylint: disable=too-many-instance-attributes
+ defcorelist = os.path.join(
+ getattr(config.CONF, 'dir_refstack_data'), 'defcore.txt')
+
def __init__(self, **kwargs):
"""Initialize RefstackClient testcase object."""
if "case_name" not in kwargs:
kwargs["case_name"] = "refstack_defcore"
super(RefstackClient, self).__init__(**kwargs)
- self.tempestconf = None
- self.conf_path = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- self.functest_test = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests')
- self.defcore_list = 'openstack/refstack_client/defcore.txt'
- self.confpath = os.path.join(self.functest_test,
- self.conf_path)
- self.defcorelist = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
- self.testlist = None
- self.insecure = ''
- if ('https' in os.environ['OS_AUTH_URL'] and
- os.getenv('OS_INSECURE', '').lower() == 'true'):
- self.insecure = '-k'
-
- def generate_conf(self):
- """ Generate tempest.conf file to run tempest"""
- if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):
- os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)
+ self.resdir = os.path.join(
+ getattr(config.CONF, 'dir_results'), 'refstack')
+ self.conf_path = os.path.join(self.resdir, 'refstack_tempest.conf')
- self.tempestconf = TempestConf()
- self.tempestconf.generate_tempestconf()
-
- def run_defcore(self, conf, testlist):
+ @staticmethod
+ def run_defcore(conf, testlist):
"""Run defcore sys command."""
+ insecure = ''
+ if ('https' in os.environ['OS_AUTH_URL'] and
+ os.getenv('OS_INSECURE', '').lower() == 'true'):
+ insecure = '-k'
cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
- .format(self.insecure, conf, testlist))
+ .format(insecure, conf, testlist))
LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd)
- ft_utils.execute_command(cmd)
+ functest_utils.execute_command(cmd)
def run_defcore_default(self):
"""Run default defcore sys command."""
- options = ["-v"] if not self.insecure else ["-v", self.insecure]
- cmd = (["refstack-client", "test", "-c", self.confpath] +
+ insecure = ''
+ if ('https' in os.environ['OS_AUTH_URL'] and
+ os.getenv('OS_INSECURE', '').lower() == 'true'):
+ insecure = '-k'
+ options = ["-v"] if not insecure else ["-v", insecure]
+ cmd = (["refstack-client", "test", "-c", self.conf_path] +
options + ["--test-list", self.defcorelist])
LOGGER.info("Starting Refstack_defcore test case: '%s'.", cmd)
-
- with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
- "refstack.log"), 'w+') as f_stdout:
+ with open(os.path.join(self.resdir, "refstack.log"), 'w+') as f_stdout:
subprocess.call(cmd, shell=False, stdout=f_stdout,
stderr=subprocess.STDOUT)
def parse_refstack_result(self):
"""Parse Refstack results."""
try:
- with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ with open(os.path.join(self.resdir,
"refstack.log"), 'r') as logfile:
for line in logfile.readlines():
if 'Tests' in line:
@@ -99,7 +88,7 @@ class RefstackClient(testcase.TestCase):
if re.search(r"\} tempest\.", line):
LOGGER.info(line.replace('\n', ''))
- with open(os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
+ with open(os.path.join(self.resdir,
"refstack.log"), 'r') as logfile:
output = logfile.read()
@@ -140,10 +129,49 @@ class RefstackClient(testcase.TestCase):
"skipped": skipped_testcases}
except Exception: # pylint: disable=broad-except
self.result = 0
-
LOGGER.info("Testcase %s success_rate is %s%%",
self.case_name, self.result)
+ def configure_tempest_defcore(self):
+ # pylint: disable=too-many-arguments
+ """
+ Add/update needed parameters into tempest.conf file
+ """
+ resources = tempest.TempestResourcesManager().create(
+ create_project=True, use_custom_images=True,
+ use_custom_flavors=True)
+ verifier_id = conf_utils.get_verifier_id()
+ deployment_id = conf_utils.get_verifier_deployment_id()
+ deployment_dir = conf_utils.get_verifier_deployment_dir(
+ verifier_id, deployment_id)
+ conf_file = conf_utils.configure_verifier(deployment_dir)
+ conf_utils.configure_tempest_update_params(
+ conf_file, resources.get("network_name"),
+ resources.get("image_id"), resources.get("flavor_id"))
+ LOGGER.debug(
+ "Updating selected tempest.conf parameters for defcore...")
+ rconfig = ConfigParser.RawConfigParser()
+ rconfig.read(conf_file)
+ rconfig.set(
+ 'DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir))
+ rconfig.set('oslo_concurrency', 'lock_path',
+ '{}/lock_files'.format(deployment_dir))
+ conf_utils.generate_test_accounts_file(
+ tenant_id=resources.get("project_id"))
+ rconfig.set('auth', 'test_accounts_file',
+ conf_utils.TEST_ACCOUNTS_FILE)
+ rconfig.set('scenario', 'img_dir', '{}'.format(deployment_dir))
+ rconfig.set('scenario', 'img_file', 'tempest-image')
+ rconfig.set('compute', 'image_ref', resources.get("image_id"))
+ rconfig.set('compute', 'image_ref_alt', resources.get("image_id_alt"))
+ rconfig.set('compute', 'flavor_ref', resources.get("flavor_id"))
+ rconfig.set('compute', 'flavor_ref_alt',
+ resources.get("flavor_id_alt"))
+ if not os.path.exists(self.resdir):
+ os.makedirs(self.resdir)
+ with open(self.conf_path, 'w') as config_fd:
+ rconfig.write(config_fd)
+
@energy.enable_recording
def run(self, **kwargs):
"""
@@ -153,31 +181,20 @@ class RefstackClient(testcase.TestCase):
functest testcase run refstack_defcore
"""
self.start_time = time.time()
-
try:
# Make sure that Tempest is configured
- if not self.tempestconf:
- self.generate_conf()
+ self.configure_tempest_defcore()
self.run_defcore_default()
self.parse_refstack_result()
res = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
LOGGER.exception("Error with run")
res = testcase.TestCase.EX_RUN_ERROR
- finally:
- self.tempestconf.clean()
-
self.stop_time = time.time()
return res
- def _prep_test(self):
- """Check that the config file exists."""
- if not os.path.isfile(self.confpath):
- LOGGER.error("Conf file not valid: %s", self.confpath)
- if not os.path.isfile(self.testlist):
- LOGGER.error("testlist file not valid: %s", self.testlist)
-
- def main(self, **kwargs):
+ @staticmethod
+ def main(**kwargs):
"""
Execute RefstackClient testcase manually.
@@ -188,21 +205,24 @@ class RefstackClient(testcase.TestCase):
python tempest_conf.py
"""
try:
- self.confpath = kwargs['config']
- self.testlist = kwargs['testlist']
+ conf_path = kwargs['config']
+ if not os.path.isfile(conf_path):
+ LOGGER.error("Conf file not valid: %s", conf_path)
+ return testcase.TestCase.EX_RUN_ERROR
+ testlist = kwargs['testlist']
+ if not os.path.isfile(testlist):
+ LOGGER.error("testlist file not valid: %s", testlist)
+ return testcase.TestCase.EX_RUN_ERROR
except KeyError as exc:
LOGGER.error("Cannot run refstack client. Please check "
"%s", exc)
- return self.EX_RUN_ERROR
+ return testcase.TestCase.EX_RUN_ERROR
try:
- self._prep_test()
- self.run_defcore(self.confpath, self.testlist)
- res = testcase.TestCase.EX_OK
+ RefstackClient.run_defcore(conf_path, testlist)
except Exception as exc: # pylint: disable=broad-except
LOGGER.error('Error with run: %s', exc)
- res = testcase.TestCase.EX_RUN_ERROR
-
- return res
+ return testcase.TestCase.EX_RUN_ERROR
+ return testcase.TestCase.EX_OK
class RefstackClientParser(object): # pylint: disable=too-few-public-methods
@@ -210,28 +230,16 @@ class RefstackClientParser(object): # pylint: disable=too-few-public-methods
def __init__(self):
"""Initialize helper object."""
- self.functest_test = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests')
- self.conf_path = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- self.defcore_list = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
- self.confpath = os.path.join(self.functest_test,
- self.conf_path)
- self.defcorelist = os.path.join(self.functest_test,
- self.defcore_list)
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'-c', '--config',
- help='the file path of refstack_tempest.conf',
- default=self.confpath)
+ help='the file path of refstack_tempest.conf')
self.parser.add_argument(
'-t', '--testlist',
help='Specify the file path or URL of a test list text file. '
'This test list will contain specific test cases that '
'should be tested.',
- default=self.defcorelist)
+ default=RefstackClient.defcorelist)
def parse_args(self, argv=None):
"""Parse command line arguments."""
diff --git a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py b/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
deleted file mode 100644
index 73a2685c9..000000000
--- a/functest/opnfv_tests/openstack/refstack_client/tempest_conf.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-
-# matthew.lijun@huawei.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-""" Used to generate tempest.conf """
-
-import logging
-import pkg_resources
-
-from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.opnfv_tests.openstack.tempest.tempest \
- import TempestResourcesManager
-
-LOGGER = logging.getLogger(__name__)
-
-
-class TempestConf(object):
- """ TempestConf class"""
- def __init__(self, **kwargs):
- self.verifier_id = conf_utils.get_verifier_id()
- self.deployment_id = conf_utils.get_verifier_deployment_id()
- self.deployment_dir = conf_utils.get_verifier_deployment_dir(
- self.verifier_id, self.deployment_id)
- self.confpath = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- self.resources = TempestResourcesManager(**kwargs)
-
- def generate_tempestconf(self):
- """ Generate tempest.conf file"""
- try:
- resources = self.resources.create(create_project=True,
- use_custom_images=True,
- use_custom_flavors=True)
- conf_utils.configure_tempest_defcore(
- self.deployment_dir,
- network_name=resources.get("network_name"),
- image_id=resources.get("image_id"),
- flavor_id=resources.get("flavor_id"),
- image_id_alt=resources.get("image_id_alt"),
- flavor_id_alt=resources.get("flavor_id_alt"),
- tenant_id=resources.get("project_id"))
- except Exception as err: # pylint: disable=broad-except
- LOGGER.error("error with generating refstack client "
- "reference tempest conf file: %s", err)
-
- def main(self):
- """ The main function called by entry point"""
- try:
- self.generate_tempestconf()
- LOGGER.info("a reference tempest conf file generated "
- "at %s", self.confpath)
- except Exception as err: # pylint: disable=broad-except
- LOGGER.error('Error with run: %s', err)
-
- def clean(self):
- """Clean up the resources"""
- self.resources.cleanup()
-
-
-def main():
- """Entry point"""
- logging.basicConfig()
- tempestconf = TempestConf()
- tempestconf.main()
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
index 34d56f70a..4de443718 100644
--- a/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
+++ b/functest/opnfv_tests/openstack/snaps/snaps_test_runner.py
@@ -14,7 +14,7 @@ import logging
from functest.core import unit
from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.utils import env
from snaps.openstack import create_flavor
@@ -37,13 +37,14 @@ class SnapsTestRunner(unit.Suite):
self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
self.netconf_override = None
- if hasattr(CONST, 'snaps_network_config'):
- self.netconf_override = getattr(CONST, 'snaps_network_config')
+ if hasattr(config.CONF, 'snaps_network_config'):
+ self.netconf_override = getattr(
+ config.CONF, 'snaps_network_config')
self.use_fip = (
- getattr(CONST, 'snaps_use_floating_ips') == 'True')
+ getattr(config.CONF, 'snaps_use_floating_ips') == 'True')
self.use_keystone = (
- getattr(CONST, 'snaps_use_keystone') == 'True')
+ getattr(config.CONF, 'snaps_use_keystone') == 'True')
scenario = env.get('DEPLOY_SCENARIO')
self.flavor_metadata = None
@@ -53,5 +54,5 @@ class SnapsTestRunner(unit.Suite):
self.logger.info("Using flavor metadata '%s'", self.flavor_metadata)
self.image_metadata = None
- if hasattr(CONST, 'snaps_images'):
- self.image_metadata = getattr(CONST, 'snaps_images')
+ if hasattr(config.CONF, 'snaps_images'):
+ self.image_metadata = getattr(config.CONF, 'snaps_images')
diff --git a/functest/opnfv_tests/openstack/snaps/snaps_utils.py b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
index 4b1c93524..fa1005ea8 100644
--- a/functest/opnfv_tests/openstack/snaps/snaps_utils.py
+++ b/functest/opnfv_tests/openstack/snaps/snaps_utils.py
@@ -9,7 +9,8 @@
"""Some common utils wrapping snaps functions """
-from functest.utils.constants import CONST
+from functest.utils import config
+from functest.utils import constants
from functest.utils import env
from snaps.openstack.tests import openstack_tests
@@ -52,10 +53,9 @@ def get_credentials(proxy_settings_str=None, ssh_proxy_cmd=None):
:return: an instance of snaps OSCreds object
"""
creds_override = None
- if hasattr(CONST, 'snaps_os_creds_override'):
- creds_override = getattr(CONST, 'snaps_os_creds_override')
+ if hasattr(config.CONF, 'snaps_os_creds_override'):
+ creds_override = getattr(config.CONF, 'snaps_os_creds_override')
os_creds = openstack_tests.get_credentials(
- os_env_file=getattr(CONST, 'env_file'),
- proxy_settings_str=proxy_settings_str, ssh_proxy_cmd=ssh_proxy_cmd,
- overrides=creds_override)
+ os_env_file=constants.ENV_FILE, proxy_settings_str=proxy_settings_str,
+ ssh_proxy_cmd=ssh_proxy_cmd, overrides=creds_override)
return os_creds
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index 875939ecc..786a7c5b6 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -20,7 +20,7 @@ import subprocess
import pkg_resources
import yaml
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.utils import env
import functest.utils.functest_utils as ft_utils
@@ -31,19 +31,16 @@ RALLY_CONF_PATH = "/etc/rally/rally.conf"
RALLY_AARCH64_PATCH_PATH = pkg_resources.resource_filename(
'functest', 'ci/rally_aarch64_patch.conf')
GLANCE_IMAGE_PATH = os.path.join(
- getattr(CONST, 'dir_functest_images'),
- getattr(CONST, 'openstack_image_file_name'))
-TEMPEST_RESULTS_DIR = os.path.join(getattr(CONST, 'dir_results'), 'tempest')
+ getattr(config.CONF, 'dir_functest_images'),
+ getattr(config.CONF, 'openstack_image_file_name'))
+TEMPEST_RESULTS_DIR = os.path.join(
+ getattr(config.CONF, 'dir_results'), 'tempest')
TEMPEST_CUSTOM = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/test_list.txt')
TEMPEST_BLACKLIST = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/blacklist.txt')
-TEMPEST_DEFCORE = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/tempest/custom_tests/defcore_req.txt')
TEMPEST_RAW_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_raw_list.txt')
TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
-REFSTACK_RESULTS_DIR = os.path.join(getattr(CONST, 'dir_results'), 'refstack')
TEMPEST_CONF_YAML = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/tempest/custom_tests/tempest_conf.yaml')
TEST_ACCOUNTS_FILE = pkg_resources.resource_filename(
@@ -77,11 +74,10 @@ def create_rally_deployment():
cmd = "rally deployment destroy opnfv-rally"
ft_utils.execute_command(cmd, error_msg=(
"Deployment %s does not exist."
- % getattr(CONST, 'rally_deployment_name')),
- verbose=False)
+ % getattr(config.CONF, 'rally_deployment_name')), verbose=False)
cmd = ("rally deployment create --fromenv --name={0}"
- .format(getattr(CONST, 'rally_deployment_name')))
+ .format(getattr(config.CONF, 'rally_deployment_name')))
error_msg = "Problem while creating Rally deployment"
ft_utils.execute_command_raise(cmd, error_msg=error_msg)
@@ -94,15 +90,15 @@ def create_verifier():
"""Create new verifier"""
LOGGER.info("Create verifier from existing repo...")
cmd = ("rally verify delete-verifier --id '{0}' --force").format(
- getattr(CONST, 'tempest_verifier_name'))
+ getattr(config.CONF, 'tempest_verifier_name'))
ft_utils.execute_command(cmd, error_msg=(
"Verifier %s does not exist."
- % getattr(CONST, 'tempest_verifier_name')),
+ % getattr(config.CONF, 'tempest_verifier_name')),
verbose=False)
cmd = ("rally verify create-verifier --source {0} "
"--name {1} --type tempest --system-wide"
- .format(getattr(CONST, 'dir_repo_tempest'),
- getattr(CONST, 'tempest_verifier_name')))
+ .format(getattr(config.CONF, 'dir_repo_tempest'),
+ getattr(config.CONF, 'tempest_verifier_name')))
ft_utils.execute_command_raise(cmd,
error_msg='Problem while creating verifier')
@@ -114,7 +110,7 @@ def get_verifier_id():
create_rally_deployment()
create_verifier()
cmd = ("rally verify list-verifiers | awk '/" +
- getattr(CONST, 'tempest_verifier_name') +
+ getattr(config.CONF, 'tempest_verifier_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
@@ -131,7 +127,7 @@ def get_verifier_deployment_id():
Returns deployment id for active Rally deployment
"""
cmd = ("rally deployment list | awk '/" +
- getattr(CONST, 'rally_deployment_name') +
+ getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
@@ -150,7 +146,7 @@ def get_verifier_repo_dir(verifier_id):
if not verifier_id:
verifier_id = get_verifier_id()
- return os.path.join(getattr(CONST, 'dir_rally_inst'),
+ return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'repo')
@@ -166,7 +162,7 @@ def get_verifier_deployment_dir(verifier_id, deployment_id):
if not deployment_id:
deployment_id = get_verifier_deployment_id()
- return os.path.join(getattr(CONST, 'dir_rally_inst'),
+ return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
'verifier-{}'.format(verifier_id),
'for-deployment-{}'.format(deployment_id))
@@ -193,41 +189,6 @@ def configure_tempest(deployment_dir, network_name=None, image_id=None,
flavor_id, compute_cnt)
-def configure_tempest_defcore(deployment_dir, network_name, image_id,
- flavor_id, image_id_alt, flavor_id_alt,
- tenant_id):
- # pylint: disable=too-many-arguments
- """
- Add/update needed parameters into tempest.conf file
- """
- conf_file = configure_verifier(deployment_dir)
- configure_tempest_update_params(conf_file, network_name, image_id,
- flavor_id)
-
- LOGGER.debug("Updating selected tempest.conf parameters for defcore...")
- config = ConfigParser.RawConfigParser()
- config.read(conf_file)
- config.set('DEFAULT', 'log_file', '{}/tempest.log'.format(deployment_dir))
- config.set('oslo_concurrency', 'lock_path',
- '{}/lock_files'.format(deployment_dir))
- generate_test_accounts_file(tenant_id=tenant_id)
- config.set('auth', 'test_accounts_file', TEST_ACCOUNTS_FILE)
- config.set('scenario', 'img_dir', '{}'.format(deployment_dir))
- config.set('scenario', 'img_file', 'tempest-image')
- config.set('compute', 'image_ref', image_id)
- config.set('compute', 'image_ref_alt', image_id_alt)
- config.set('compute', 'flavor_ref', flavor_id)
- config.set('compute', 'flavor_ref_alt', flavor_id_alt)
-
- with open(conf_file, 'wb') as config_file:
- config.write(config_file)
-
- confpath = pkg_resources.resource_filename(
- 'functest',
- 'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- shutil.copyfile(conf_file, confpath)
-
-
def generate_test_accounts_file(tenant_id):
"""
Add needed tenant and user params into test_accounts.yaml
@@ -236,10 +197,11 @@ def generate_test_accounts_file(tenant_id):
LOGGER.debug("Add needed params into test_accounts.yaml...")
accounts_list = [
{
- 'tenant_name': getattr(CONST, 'tempest_identity_tenant_name'),
+ 'tenant_name': getattr(
+ config.CONF, 'tempest_identity_tenant_name'),
'tenant_id': str(tenant_id),
- 'username': getattr(CONST, 'tempest_identity_user_name'),
- 'password': getattr(CONST, 'tempest_identity_user_password')
+ 'username': getattr(config.CONF, 'tempest_identity_user_name'),
+ 'password': getattr(config.CONF, 'tempest_identity_user_password')
}
]
@@ -247,21 +209,21 @@ def generate_test_accounts_file(tenant_id):
yaml.dump(accounts_list, tfile, default_flow_style=False)
-def update_tempest_conf_file(conf_file, config):
+def update_tempest_conf_file(conf_file, rconfig):
"""Update defined paramters into tempest config file"""
with open(TEMPEST_CONF_YAML) as yfile:
conf_yaml = yaml.safe_load(yfile)
if conf_yaml:
- sections = config.sections()
+ sections = rconfig.sections()
for section in conf_yaml:
if section not in sections:
- config.add_section(section)
+ rconfig.add_section(section)
sub_conf = conf_yaml.get(section)
for key, value in sub_conf.items():
- config.set(section, key, value)
+ rconfig.set(section, key, value)
with open(conf_file, 'wb') as config_file:
- config.write(config_file)
+ rconfig.write(config_file)
def configure_tempest_update_params(tempest_conf_file, network_name=None,
@@ -271,63 +233,58 @@ def configure_tempest_update_params(tempest_conf_file, network_name=None,
Add/update needed parameters into tempest.conf file
"""
LOGGER.debug("Updating selected tempest.conf parameters...")
- config = ConfigParser.RawConfigParser()
- config.read(tempest_conf_file)
- config.set('compute', 'fixed_network_name', network_name)
- config.set('compute', 'volume_device_name',
- getattr(CONST, 'tempest_volume_device_name'))
+ rconfig = ConfigParser.RawConfigParser()
+ rconfig.read(tempest_conf_file)
+ rconfig.set('compute', 'fixed_network_name', network_name)
+ rconfig.set('compute', 'volume_device_name',
+ getattr(config.CONF, 'tempest_volume_device_name'))
if image_id is not None:
- config.set('compute', 'image_ref', image_id)
+ rconfig.set('compute', 'image_ref', image_id)
if IMAGE_ID_ALT is not None:
- config.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
- if getattr(CONST, 'tempest_use_custom_flavors'):
+ rconfig.set('compute', 'image_ref_alt', IMAGE_ID_ALT)
+ if getattr(config.CONF, 'tempest_use_custom_flavors'):
if flavor_id is not None:
- config.set('compute', 'flavor_ref', flavor_id)
+ rconfig.set('compute', 'flavor_ref', flavor_id)
if FLAVOR_ID_ALT is not None:
- config.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
+ rconfig.set('compute', 'flavor_ref_alt', FLAVOR_ID_ALT)
if compute_cnt > 1:
# enable multinode tests
- config.set('compute', 'min_compute_nodes', compute_cnt)
- config.set('compute-feature-enabled', 'live_migration', True)
+ rconfig.set('compute', 'min_compute_nodes', compute_cnt)
+ rconfig.set('compute-feature-enabled', 'live_migration', True)
- config.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
- identity_api_version = os.environ.get(
- "OS_IDENTITY_API_VERSION", os.environ.get("IDENTITY_API_VERSION"))
+ rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
+ identity_api_version = os.environ.get("OS_IDENTITY_API_VERSION", '3')
if identity_api_version == '3':
auth_version = 'v3'
- config.set('identity-feature-enabled', 'api_v2', False)
+ rconfig.set('identity-feature-enabled', 'api_v2', False)
else:
auth_version = 'v2'
- config.set('identity', 'auth_version', auth_version)
- config.set(
+ rconfig.set('identity', 'auth_version', auth_version)
+ rconfig.set(
'validation', 'ssh_timeout',
- getattr(CONST, 'tempest_validation_ssh_timeout'))
- config.set('object-storage', 'operator_role',
- getattr(CONST, 'tempest_object_storage_operator_role'))
+ getattr(config.CONF, 'tempest_validation_ssh_timeout'))
+ rconfig.set('object-storage', 'operator_role',
+ getattr(config.CONF, 'tempest_object_storage_operator_role'))
if os.environ.get('OS_ENDPOINT_TYPE') is not None:
- config.set('identity', 'v3_endpoint_type',
- os.environ.get('OS_ENDPOINT_TYPE'))
+ rconfig.set('identity', 'v3_endpoint_type',
+ os.environ.get('OS_ENDPOINT_TYPE'))
if os.environ.get('OS_ENDPOINT_TYPE') is not None:
- sections = config.sections()
- services_list = ['compute',
- 'volume',
- 'image',
- 'network',
- 'data-processing',
- 'object-storage',
- 'orchestration']
+ sections = rconfig.sections()
+ services_list = [
+ 'compute', 'volume', 'image', 'network', 'data-processing',
+ 'object-storage', 'orchestration']
for service in services_list:
if service not in sections:
- config.add_section(service)
- config.set(service, 'endpoint_type',
- os.environ.get('OS_ENDPOINT_TYPE'))
+ rconfig.add_section(service)
+ rconfig.set(service, 'endpoint_type',
+ os.environ.get('OS_ENDPOINT_TYPE'))
LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '
'into tempest.conf file')
- update_tempest_conf_file(tempest_conf_file, config)
+ update_tempest_conf_file(tempest_conf_file, rconfig)
backup_tempest_config(tempest_conf_file)
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index d474ec3a9..dd15c08e1 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -25,7 +25,7 @@ import yaml
from functest.core import testcase
from functest.opnfv_tests.openstack.snaps import snaps_utils
from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.utils import env
import functest.utils.functest_utils as ft_utils
@@ -98,10 +98,7 @@ class TempestCommon(testcase.TestCase):
def generate_test_list(self, verifier_repo_dir):
"""Generate test list based on the test mode."""
LOGGER.debug("Generating test case list...")
- if self.mode == 'defcore':
- shutil.copyfile(
- conf_utils.TEMPEST_DEFCORE, conf_utils.TEMPEST_RAW_LIST)
- elif self.mode == 'custom':
+ if self.mode == 'custom':
if os.path.isfile(conf_utils.TEMPEST_CUSTOM):
shutil.copyfile(
conf_utils.TEMPEST_CUSTOM, conf_utils.TEMPEST_RAW_LIST)
@@ -330,16 +327,17 @@ class TempestResourcesManager(object):
self.os_creds = kwargs.get('os_creds') or snaps_utils.get_credentials()
self.guid = '-' + str(uuid.uuid4())
self.creators = list()
- self.cirros_image_config = getattr(CONST, 'snaps_images_cirros', None)
+ self.cirros_image_config = getattr(
+ config.CONF, 'snaps_images_cirros', None)
def _create_project(self):
"""Create project for tests."""
project_creator = deploy_utils.create_project(
self.os_creds, ProjectConfig(
name=getattr(
- CONST, 'tempest_identity_tenant_name') + self.guid,
+ config.CONF, 'tempest_identity_tenant_name') + self.guid,
description=getattr(
- CONST, 'tempest_identity_tenant_description')))
+ config.CONF, 'tempest_identity_tenant_description')))
if project_creator is None or project_creator.get_project() is None:
raise Exception("Failed to create tenant")
self.creators.append(project_creator)
@@ -350,11 +348,11 @@ class TempestResourcesManager(object):
user_creator = deploy_utils.create_user(
self.os_creds, UserConfig(
name=getattr(
- CONST, 'tempest_identity_user_name') + self.guid,
+ config.CONF, 'tempest_identity_user_name') + self.guid,
password=getattr(
- CONST, 'tempest_identity_user_password'),
+ config.CONF, 'tempest_identity_user_password'),
project_name=getattr(
- CONST, 'tempest_identity_tenant_name') + self.guid))
+ config.CONF, 'tempest_identity_tenant_name') + self.guid))
if user_creator is None or user_creator.get_user() is None:
raise Exception("Failed to create user")
self.creators.append(user_creator)
@@ -367,13 +365,13 @@ class TempestResourcesManager(object):
tempest_segmentation_id = None
tempest_network_type = getattr(
- CONST, 'tempest_network_type', None)
+ config.CONF, 'tempest_network_type', None)
tempest_physical_network = getattr(
- CONST, 'tempest_physical_network', None)
+ config.CONF, 'tempest_physical_network', None)
tempest_segmentation_id = getattr(
- CONST, 'tempest_segmentation_id', None)
+ config.CONF, 'tempest_segmentation_id', None)
tempest_net_name = getattr(
- CONST, 'tempest_private_net_name') + self.guid
+ config.CONF, 'tempest_private_net_name') + self.guid
network_creator = deploy_utils.create_network(
self.os_creds, NetworkConfig(
@@ -384,10 +382,11 @@ class TempestResourcesManager(object):
segmentation_id=tempest_segmentation_id,
subnet_settings=[SubnetConfig(
name=getattr(
- CONST, 'tempest_private_subnet_name') + self.guid,
+ config.CONF,
+ 'tempest_private_subnet_name') + self.guid,
project_name=project_name,
cidr=getattr(
- CONST, 'tempest_private_subnet_cidr'))]))
+ config.CONF, 'tempest_private_subnet_cidr'))]))
if network_creator is None or network_creator.get_network() is None:
raise Exception("Failed to create private network")
self.creators.append(network_creator)
@@ -414,9 +413,9 @@ class TempestResourcesManager(object):
flavor_creator = OpenStackFlavor(
self.os_creds, FlavorConfig(
name=name,
- ram=getattr(CONST, 'openstack_flavor_ram'),
- disk=getattr(CONST, 'openstack_flavor_disk'),
- vcpus=getattr(CONST, 'openstack_flavor_vcpus'),
+ ram=getattr(config.CONF, 'openstack_flavor_ram'),
+ disk=getattr(config.CONF, 'openstack_flavor_disk'),
+ vcpus=getattr(config.CONF, 'openstack_flavor_vcpus'),
metadata=flavor_metadata))
flavor = flavor_creator.create()
if flavor is None:
@@ -439,7 +438,7 @@ class TempestResourcesManager(object):
if create_project:
LOGGER.debug("Creating project and user for Tempest suite")
project_name = getattr(
- CONST, 'tempest_identity_tenant_name') + self.guid
+ config.CONF, 'tempest_identity_tenant_name') + self.guid
result['project_id'] = self._create_project()
result['user_id'] = self._create_user()
result['tenant_id'] = result['project_id'] # for compatibility
@@ -448,26 +447,28 @@ class TempestResourcesManager(object):
result['tempest_net_name'] = self._create_network(project_name)
LOGGER.debug("Creating image for Tempest suite")
- image_name = getattr(CONST, 'openstack_image_name') + self.guid
+ image_name = getattr(config.CONF, 'openstack_image_name') + self.guid
result['image_id'] = self._create_image(image_name)
if use_custom_images:
LOGGER.debug("Creating 2nd image for Tempest suite")
- image_name = getattr(CONST, 'openstack_image_name_alt') + self.guid
+ image_name = getattr(
+ config.CONF, 'openstack_image_name_alt') + self.guid
result['image_id_alt'] = self._create_image(image_name)
- if (getattr(CONST, 'tempest_use_custom_flavors') == 'True' or
+ if (getattr(config.CONF, 'tempest_use_custom_flavors') == 'True' or
use_custom_flavors):
LOGGER.info("Creating flavor for Tempest suite")
- name = getattr(CONST, 'openstack_flavor_name') + self.guid
+ name = getattr(config.CONF, 'openstack_flavor_name') + self.guid
result['flavor_id'] = self._create_flavor(name)
if use_custom_flavors:
LOGGER.info("Creating 2nd flavor for Tempest suite")
scenario = env.get('DEPLOY_SCENARIO')
if 'ovs' in scenario or 'fdio' in scenario:
- setattr(CONST, 'openstack_flavor_ram', 1024)
- name = getattr(CONST, 'openstack_flavor_name_alt') + self.guid
+ setattr(config.CONF, 'openstack_flavor_ram', 1024)
+ name = getattr(
+ config.CONF, 'openstack_flavor_name_alt') + self.guid
result['flavor_id_alt'] = self._create_flavor(name)
return result
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index fae5db2d4..586b8d655 100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
@@ -17,7 +17,7 @@ import uuid
from functest.core import testcase
from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.utils import env
from snaps.config.flavor import FlavorConfig
@@ -52,20 +52,24 @@ class VPingBase(testcase.TestCase):
# Shared metadata
self.guid = '-' + str(uuid.uuid4())
- self.router_name = getattr(CONST, 'vping_router_name') + self.guid
- self.vm1_name = getattr(CONST, 'vping_vm_name_1') + self.guid
- self.vm2_name = getattr(CONST, 'vping_vm_name_2') + self.guid
+ self.router_name = getattr(
+ config.CONF, 'vping_router_name') + self.guid
+ self.vm1_name = getattr(
+ config.CONF, 'vping_vm_name_1') + self.guid
+ self.vm2_name = getattr(config.CONF, 'vping_vm_name_2') + self.guid
- self.vm_boot_timeout = getattr(CONST, 'vping_vm_boot_timeout')
- self.vm_delete_timeout = getattr(CONST, 'vping_vm_delete_timeout')
+ self.vm_boot_timeout = getattr(config.CONF, 'vping_vm_boot_timeout')
+ self.vm_delete_timeout = getattr(
+ config.CONF, 'vping_vm_delete_timeout')
self.vm_ssh_connect_timeout = getattr(
- CONST, 'vping_vm_ssh_connect_timeout')
- self.ping_timeout = getattr(CONST, 'vping_ping_timeout')
+ config.CONF, 'vping_vm_ssh_connect_timeout')
+ self.ping_timeout = getattr(config.CONF, 'vping_ping_timeout')
self.flavor_name = 'vping-flavor' + self.guid
# Move this configuration option up for all tests to leverage
- if hasattr(CONST, 'snaps_images_cirros'):
- self.cirros_image_config = getattr(CONST, 'snaps_images_cirros')
+ if hasattr(config.CONF, 'snaps_images_cirros'):
+ self.cirros_image_config = getattr(
+ config.CONF, 'snaps_images_cirros')
else:
self.cirros_image_config = None
@@ -82,7 +86,7 @@ class VPingBase(testcase.TestCase):
'%Y-%m-%d %H:%M:%S'))
image_base_name = '{}-{}'.format(
- getattr(CONST, 'vping_image_name'),
+ getattr(config.CONF, 'vping_image_name'),
str(self.guid))
os_image_settings = openstack_tests.cirros_image_settings(
image_base_name, image_metadata=self.cirros_image_config)
@@ -92,21 +96,24 @@ class VPingBase(testcase.TestCase):
self.os_creds, os_image_settings)
self.creators.append(self.image_creator)
- private_net_name = getattr(CONST, 'vping_private_net_name') + self.guid
+ private_net_name = getattr(
+ config.CONF, 'vping_private_net_name') + self.guid
private_subnet_name = getattr(
- CONST, 'vping_private_subnet_name') + self.guid
- private_subnet_cidr = getattr(CONST, 'vping_private_subnet_cidr')
+ config.CONF, 'vping_private_subnet_name') + self.guid
+ private_subnet_cidr = getattr(config.CONF, 'vping_private_subnet_cidr')
vping_network_type = None
vping_physical_network = None
vping_segmentation_id = None
- if hasattr(CONST, 'vping_network_type'):
- vping_network_type = getattr(CONST, 'vping_network_type')
- if hasattr(CONST, 'vping_physical_network'):
- vping_physical_network = getattr(CONST, 'vping_physical_network')
- if hasattr(CONST, 'vping_segmentation_id'):
- vping_segmentation_id = getattr(CONST, 'vping_segmentation_id')
+ if hasattr(config.CONF, 'vping_network_type'):
+ vping_network_type = getattr(config.CONF, 'vping_network_type')
+ if hasattr(config.CONF, 'vping_physical_network'):
+ vping_physical_network = getattr(
+ config.CONF, 'vping_physical_network')
+ if hasattr(config.CONF, 'vping_segmentation_id'):
+ vping_segmentation_id = getattr(
+ config.CONF, 'vping_segmentation_id')
self.logger.info(
"Creating network with name: '%s'", private_net_name)
@@ -179,7 +186,7 @@ class VPingBase(testcase.TestCase):
Cleanup all OpenStack objects. Should be called on completion
:return:
"""
- if getattr(CONST, 'vping_cleanup_objects') == 'True':
+ if getattr(config.CONF, 'vping_cleanup_objects') == 'True':
for creator in reversed(self.creators):
try:
creator.clean()
diff --git a/functest/opnfv_tests/openstack/vping/vping_ssh.py b/functest/opnfv_tests/openstack/vping/vping_ssh.py
index 0964d8464..e6c6bf351 100644
--- a/functest/opnfv_tests/openstack/vping/vping_ssh.py
+++ b/functest/opnfv_tests/openstack/vping/vping_ssh.py
@@ -17,7 +17,7 @@ import pkg_resources
from functest.core.testcase import TestCase
from functest.energy import energy
from functest.opnfv_tests.openstack.vping import vping_base
-from functest.utils.constants import CONST
+from functest.utils import config
from snaps.config.keypair import KeypairConfig
from snaps.config.network import PortConfig
@@ -42,11 +42,11 @@ class VPingSSH(vping_base.VPingBase):
kwargs["case_name"] = "vping_ssh"
super(VPingSSH, self).__init__(**kwargs)
- self.kp_name = getattr(CONST, 'vping_keypair_name') + self.guid
- self.kp_priv_file = getattr(CONST, 'vping_keypair_priv_file')
- self.kp_pub_file = getattr(CONST, 'vping_keypair_pub_file')
- self.sg_name = getattr(CONST, 'vping_sg_name') + self.guid
- self.sg_desc = getattr(CONST, 'vping_sg_desc')
+ self.kp_name = getattr(config.CONF, 'vping_keypair_name') + self.guid
+ self.kp_priv_file = getattr(config.CONF, 'vping_keypair_priv_file')
+ self.kp_pub_file = getattr(config.CONF, 'vping_keypair_pub_file')
+ self.sg_name = getattr(config.CONF, 'vping_sg_name') + self.guid
+ self.sg_desc = getattr(config.CONF, 'vping_sg_desc')
@energy.enable_recording
def run(self, **kwargs):
diff --git a/functest/opnfv_tests/sdn/odl/odl.py b/functest/opnfv_tests/sdn/odl/odl.py
index 705f39dab..f5e07ad3d 100644
--- a/functest/opnfv_tests/sdn/odl/odl.py
+++ b/functest/opnfv_tests/sdn/odl/odl.py
@@ -30,7 +30,7 @@ from snaps.openstack.utils import keystone_utils
from functest.core import robotframework
from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils import constants
+from functest.utils import config
from functest.utils import env
__author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
@@ -39,7 +39,7 @@ __author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
class ODLTests(robotframework.RobotFramework):
"""ODL test runner."""
- odl_test_repo = getattr(constants.CONST, 'dir_repo_odl_test')
+ odl_test_repo = getattr(config.CONF, 'dir_repo_odl_test')
neutron_suite_dir = os.path.join(
odl_test_repo, "csit/suites/openstack/neutron")
basic_suite_dir = os.path.join(
@@ -52,7 +52,7 @@ class ODLTests(robotframework.RobotFramework):
def __init__(self, **kwargs):
super(ODLTests, self).__init__(**kwargs)
self.res_dir = os.path.join(
- getattr(constants.CONST, 'dir_results'), 'odl')
+ getattr(config.CONF, 'dir_results'), 'odl')
self.xml_file = os.path.join(self.res_dir, 'output.xml')
@classmethod
@@ -190,6 +190,7 @@ class ODLTests(robotframework.RobotFramework):
kwargs['odlrestconfport'] = '8087'
else:
kwargs['odlip'] = env.get('SDN_CONTROLLER_IP')
+ assert kwargs['odlip']
except KeyError as ex:
self.__logger.error("Cannot run ODL testcases. "
"Please check env var: "
diff --git a/functest/opnfv_tests/vnf/epc/juju_epc.py b/functest/opnfv_tests/vnf/epc/juju_epc.py
index dc92bc920..3b62a9a1c 100644
--- a/functest/opnfv_tests/vnf/epc/juju_epc.py
+++ b/functest/opnfv_tests/vnf/epc/juju_epc.py
@@ -22,7 +22,7 @@ import yaml
from functest.core import vnf
from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils.constants import CONST
+from functest.utils import config
from snaps.config.flavor import FlavorConfig
from snaps.config.image import ImageConfig
@@ -79,8 +79,6 @@ class JujuEpc(vnf.VnfOnBoarding):
__logger = logging.getLogger(__name__)
- default_region_name = "RegionOne"
-
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = "juju_epc"
@@ -91,7 +89,7 @@ class JujuEpc(vnf.VnfOnBoarding):
'functest', 'opnfv_tests/vnf/epc')
try:
self.config = getattr(
- CONST, 'vnf_{}_config'.format(self.case_name))
+ config.CONF, 'vnf_{}_config'.format(self.case_name))
except Exception:
raise Exception("VNF config file not found")
self.config_file = os.path.join(self.case_dir, self.config)
@@ -125,7 +123,7 @@ class JujuEpc(vnf.VnfOnBoarding):
self.public_auth_url = None
self.res_dir = os.path.join(
- getattr(CONST, 'dir_results'), self.case_name)
+ getattr(config.CONF, 'dir_results'), self.case_name)
def _bypass_juju_network_discovery_bug(self, name):
user_creator = OpenStackUser(
@@ -142,8 +140,7 @@ class JujuEpc(vnf.VnfOnBoarding):
clouds_yaml = os.path.join(self.res_dir, "clouds.yaml")
cloud_data = {
'url': self.public_auth_url,
- 'region': os.environ.get(
- "OS_REGION_NAME", self.default_region_name)}
+ 'region': self.snaps_creds.region_name}
with open(clouds_yaml, 'w') as yfile:
yfile.write(CLOUD_TEMPLATE.format(**cloud_data))
if os.system(
@@ -189,17 +186,17 @@ class JujuEpc(vnf.VnfOnBoarding):
def _add_custom_rule(self, sec_grp_name):
""" To add custom rule for SCTP Traffic """
sec_grp_rules = list()
- security_group_init = OpenStackSecurityGroup(
+ sec_grp_rules.append(
+ SecurityGroupRuleConfig(
+ sec_grp_name=sec_grp_name, direction=Direction.ingress,
+ protocol=Protocol.sctp))
+ security_group = OpenStackSecurityGroup(
self.snaps_creds,
SecurityGroupConfig(
name=sec_grp_name,
rule_settings=sec_grp_rules))
- security_group_init.initialize()
- sctp_rule = SecurityGroupRuleConfig(
- sec_grp_name=sec_grp_name, direction=Direction.ingress,
- protocol=Protocol.sctp)
- security_group_init.add_rule(sctp_rule)
- self.created_object.append(security_group_init)
+ security_group.create()
+ self.created_object.append(security_group)
def prepare(self):
"""Prepare testcase (Additional pre-configuration steps)."""
@@ -231,13 +228,13 @@ class JujuEpc(vnf.VnfOnBoarding):
"""
self.__logger.info("Deployed Orchestrator")
private_net_name = getattr(
- CONST, 'vnf_{}_private_net_name'.format(self.case_name))
+ config.CONF, 'vnf_{}_private_net_name'.format(self.case_name))
private_subnet_name = getattr(
- CONST, 'vnf_{}_private_subnet_name'.format(self.case_name))
+ config.CONF, 'vnf_{}_private_subnet_name'.format(self.case_name))
private_subnet_cidr = getattr(
- CONST, 'vnf_{}_private_subnet_cidr'.format(self.case_name))
+ config.CONF, 'vnf_{}_private_subnet_cidr'.format(self.case_name))
abot_router = getattr(
- CONST, 'vnf_{}_external_router'.format(self.case_name))
+ config.CONF, 'vnf_{}_external_router'.format(self.case_name))
self.__logger.info("Creating full network ...")
subnet_settings = SubnetConfig(
@@ -278,9 +275,7 @@ class JujuEpc(vnf.VnfOnBoarding):
os.system(
'juju metadata generate-image -d ~ -i {} -s {} -r '
'{} -u {}'.format(
- image_id, image_name,
- os.environ.get(
- "OS_REGION_NAME", self.default_region_name),
+ image_id, image_name, self.snaps_creds.region_name,
self.public_auth_url))
self.created_object.append(image_creator)
self.__logger.info("Network ID : %s", net_id)
diff --git a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
index f3f2e1d7c..7e1d5bb2d 100644
--- a/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
+++ b/functest/opnfv_tests/vnf/ims/clearwater_ims_base.py
@@ -18,7 +18,7 @@ import pkg_resources
import requests
import functest.core.vnf as vnf
-from functest.utils.constants import CONST
+from functest.utils import config
import functest.utils.functest_utils as ft_utils
__author__ = ("Valentin Boucher <valentin.boucher@orange.com>, "
@@ -33,10 +33,10 @@ class ClearwaterOnBoardingBase(vnf.VnfOnBoarding):
super(ClearwaterOnBoardingBase, self).__init__(**kwargs)
self.case_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/vnf/ims')
- self.data_dir = getattr(CONST, 'dir_ims_data')
- self.result_dir = os.path.join(getattr(CONST, 'dir_results'),
+ self.data_dir = getattr(config.CONF, 'dir_ims_data')
+ self.result_dir = os.path.join(getattr(config.CONF, 'dir_results'),
self.case_name)
- self.test_dir = getattr(CONST, 'dir_repo_vims_test')
+ self.test_dir = getattr(config.CONF, 'dir_repo_vims_test')
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
index 38a40ed83..81e9b5ee9 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
@@ -21,7 +21,7 @@ import yaml
from functest.energy import energy
from functest.opnfv_tests.openstack.snaps import snaps_utils
import functest.opnfv_tests.vnf.ims.clearwater_ims_base as clearwater_ims_base
-from functest.utils.constants import CONST
+from functest.utils import config
from snaps.config.flavor import FlavorConfig
from snaps.config.image import ImageConfig
@@ -59,7 +59,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
# Retrieve the configuration
try:
self.config = getattr(
- CONST, 'vnf_{}_config'.format(self.case_name))
+ config.CONF, 'vnf_{}_config'.format(self.case_name))
except Exception:
raise Exception("VNF config file not found")
@@ -280,12 +280,12 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
scp = SCPClient(ssh.get_transport(), socket_timeout=15.0)
scp.put(kp_file, '~/')
cmd = "sudo cp ~/cloudify_ims.pem /etc/cloudify/"
- run_blocking_ssh_command(ssh, cmd)
+ self.run_blocking_ssh_command(ssh, cmd)
cmd = "sudo chmod 444 /etc/cloudify/cloudify_ims.pem"
- run_blocking_ssh_command(ssh, cmd)
+ self.run_blocking_ssh_command(ssh, cmd)
cmd = "sudo yum install -y gcc python-devel"
- run_blocking_ssh_command(ssh, cmd, "Unable to install packages \
- on manager")
+ self.run_blocking_ssh_command(
+ ssh, cmd, "Unable to install packages on manager")
self.details['orchestrator'].update(status='PASS', duration=duration)
@@ -399,7 +399,7 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
try:
cfy_client.executions.cancel(execution['id'],
force=True)
- except: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
self.__logger.warn("Can't cancel the current exec")
execution = cfy_client.executions.start(
@@ -411,12 +411,22 @@ class CloudifyIms(clearwater_ims_base.ClearwaterOnBoardingBase):
wait_for_execution(cfy_client, execution, self.__logger)
cfy_client.deployments.delete(self.vnf['descriptor'].get('name'))
cfy_client.blueprints.delete(self.vnf['descriptor'].get('name'))
- except: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except
self.__logger.warn("Some issue during the undeployment ..")
self.__logger.warn("Tenant clean continue ..")
super(CloudifyIms, self).clean()
+ @staticmethod
+ def run_blocking_ssh_command(ssh, cmd,
+ error_msg="Unable to run this command"):
+ """Command to run ssh command with the exit status."""
+ _, stdout, stderr = ssh.exec_command(cmd)
+ CloudifyIms.__logger.debug("SSH %s stdout: %s", cmd, stdout.read())
+ if stdout.channel.recv_exit_status() != 0:
+ CloudifyIms.__logger.error("SSH %s stderr: %s", cmd, stderr.read())
+ raise Exception(error_msg)
+
@energy.enable_recording
def run(self, **kwargs):
"""Execute CloudifyIms test case."""
@@ -530,10 +540,3 @@ def sig_test_format(sig_test):
short_sig_test_result['skipped'] = nb_skipped
nb_test = nb_passed + nb_skipped
return (short_sig_test_result, nb_test)
-
-
-def run_blocking_ssh_command(ssh, cmd, error_msg="Unable to run this command"):
- """Command to run ssh command with the exit status."""
- stdin, stdout, stderr = ssh.exec_command(cmd)
- if stdout.channel.recv_exit_status() != 0:
- raise Exception(error_msg)
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims_perf.py b/functest/opnfv_tests/vnf/ims/cloudify_ims_perf.py
index 72e1e447e..cdf1edc0a 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims_perf.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims_perf.py
@@ -25,7 +25,7 @@ from functest.opnfv_tests.vnf.ims import cloudify_ims
from functest.opnfv_tests.vnf.ims.ixia.utils import IxChassisUtils
from functest.opnfv_tests.vnf.ims.ixia.utils import IxLoadUtils
from functest.opnfv_tests.vnf.ims.ixia.utils import IxRestUtils
-from functest.utils.constants import CONST
+from functest.utils import config
from snaps.config.flavor import FlavorConfig
from snaps.config.image import ImageConfig
@@ -58,7 +58,7 @@ class CloudifyImsPerf(cloudify_ims.CloudifyIms):
# Retrieve the configuration
try:
self.config = getattr(
- CONST, 'vnf_{}_config'.format(self.case_name))
+ config.CONF, 'vnf_{}_config'.format(self.case_name))
except Exception:
raise Exception("VNF config file not found")
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
index e1b7f3ab0..2d248b2e6 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
+++ b/functest/opnfv_tests/vnf/ims/orchestra_clearwaterims.py
@@ -14,13 +14,13 @@ import logging
import os
import socket
import time
+
import pkg_resources
import yaml
-import functest.core.vnf as vnf
-import functest.utils.openstack_utils as os_utils
+from functest.core import vnf
from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils.constants import CONST
+from functest.utils import config
from org.openbaton.cli.errors.errors import NfvoException
from org.openbaton.cli.agents.agents import MainAgent
@@ -30,6 +30,7 @@ from snaps.config.network import NetworkConfig, PortConfig, SubnetConfig
from snaps.config.router import RouterConfig
from snaps.config.security_group import (
Direction, Protocol, SecurityGroupConfig, SecurityGroupRuleConfig)
+from snaps.config.vm_inst import FloatingIpConfig
from snaps.config.vm_inst import VmInstanceConfig
from snaps.openstack.utils import keystone_utils
from snaps.openstack.create_flavor import OpenStackFlavor
@@ -71,7 +72,7 @@ def get_config(parameter, file_path):
def servertest(host, port):
"""Method to test that a server is reachable at IP:port"""
args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
- for family, socktype, proto, canonname, sockaddr in args:
+ for family, socktype, proto, _, sockaddr in args:
sock = socket.socket(family, socktype, proto)
try:
sock.connect(sockaddr)
@@ -88,8 +89,6 @@ def get_userdata(orchestrator=dict):
userdata += "echo \"Executing userdata...\"\n"
userdata += "set -x\n"
userdata += "set -e\n"
- userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
- userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
userdata += "echo \"Install curl...\"\n"
userdata += "apt-get install curl\n"
userdata += "echo \"Inject public key...\"\n"
@@ -109,10 +108,6 @@ def get_userdata(orchestrator=dict):
orchestrator['bootstrap']['config']['url'])
userdata += ("echo \"Disable usage of mysql...\"\n")
userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
- userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
- % orchestrator['details']['fip'].ip)
- userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
- "=%s/g /config_file\n" % orchestrator['details']['fip'].ip)
userdata += "echo \"Set autostart of components to 'false'\"\n"
userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
userdata += "echo \"Execute bootstrap...\"\n"
@@ -150,14 +145,14 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
self.case_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/vnf/ims/')
- self.data_dir = getattr(CONST, 'dir_ims_data')
- self.test_dir = getattr(CONST, 'dir_repo_vims_test')
+ self.data_dir = getattr(config.CONF, 'dir_ims_data')
+ self.test_dir = getattr(config.CONF, 'dir_repo_vims_test')
self.created_resources = []
self.logger.info("%s VNF onboarding test starting", self.case_name)
try:
self.config = getattr(
- CONST, 'vnf_{}_config'.format(self.case_name))
+ config.CONF, 'vnf_{}_config'.format(self.case_name))
except BaseException:
raise Exception("Orchestra VNF config file not found")
@@ -199,7 +194,8 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
"tenant_images.%s" %
self.case_name,
config_file))
- self.snaps_creds = None
+ self.creds = None
+ self.orchestra_router = None
def prepare(self):
"""Prepare testscase (Additional pre-configuration steps)."""
@@ -209,18 +205,15 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
public_auth_url = keystone_utils.get_endpoint(
self.snaps_creds, 'identity')
-
self.creds = {
- "tenant": self.snaps_creds.project_name,
- "username": self.snaps_creds.username,
- "password": self.snaps_creds.password,
- "auth_url": public_auth_url
- }
+ "tenant": self.snaps_creds.project_name,
+ "username": self.snaps_creds.username,
+ "password": self.snaps_creds.password,
+ "auth_url": public_auth_url}
self.prepare_images()
self.prepare_flavor()
self.prepare_security_groups()
self.prepare_network()
- self.prepare_floating_ip()
def prepare_images(self):
"""Upload images if they doen't exist yet"""
@@ -236,7 +229,7 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
image_file=image_file,
public=True))
image.create()
- # self.created_resources.append(image);
+ self.created_resources.append(image)
def prepare_security_groups(self):
"""Create Open Baton security group if it doesn't exist yet"""
@@ -245,48 +238,54 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
sg_rules = list()
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.ingress,
protocol=Protocol.tcp,
port_range_min=1,
port_range_max=65535))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.egress,
protocol=Protocol.tcp,
port_range_min=1,
port_range_max=65535))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.ingress,
protocol=Protocol.udp,
port_range_min=1,
port_range_max=65535))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.egress,
protocol=Protocol.udp,
port_range_min=1,
port_range_max=65535))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.ingress,
protocol=Protocol.icmp))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.egress,
protocol=Protocol.icmp))
security_group = OpenStackSecurityGroup(
self.snaps_creds,
SecurityGroupConfig(
- name="orchestra-sec-group-allowall",
+ name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
rule_settings=sg_rules))
-
security_group_info = security_group.create()
self.created_resources.append(security_group)
self.mano['details']['sec_group'] = security_group_info.name
@@ -315,12 +314,10 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
self.logger.info(
"Creating network/subnet/router if they doen't exist yet...")
subnet_settings = SubnetConfig(
- name='%s_subnet' %
- self.case_name,
+ name='{}_subnet-{}'.format(self.case_name, self.uuid),
cidr="192.168.100.0/24")
network_settings = NetworkConfig(
- name='%s_net' %
- self.case_name,
+ name='{}_net-{}'.format(self.case_name, self.uuid),
subnet_settings=[subnet_settings])
orchestra_network = OpenStackNetwork(
self.snaps_creds, network_settings)
@@ -331,77 +328,27 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
self.mano['details']['external_net_name'] = snaps_utils.\
get_ext_net_name(self.snaps_creds)
self.created_resources.append(orchestra_network)
- orchestra_router = OpenStackRouter(
+ self.orchestra_router = OpenStackRouter(
self.snaps_creds,
RouterConfig(
- name='%s_router' %
- self.case_name,
+ name='{}_router-{}'.format(self.case_name, self.uuid),
external_gateway=self.mano['details']['external_net_name'],
internal_subnets=[
subnet_settings.name]))
- orchestra_router.create()
- self.created_resources.append(orchestra_router)
+ self.orchestra_router.create()
+ self.created_resources.append(self.orchestra_router)
self.logger.info("Created network and router for Open Baton NFVO...")
- def prepare_floating_ip(self):
- """Select/Create Floating IP if it doesn't exist yet"""
- self.logger.info("Retrieving floating IP for Open Baton NFVO")
- neutron_client = snaps_utils.neutron_utils.neutron_client(
- self.snaps_creds)
- # Finding Tenant ID to check to which tenant the Floating IP belongs
- tenant_id = os_utils.get_tenant_id(
- os_utils.get_keystone_client(self.creds),
- self.tenant_name)
- # Use os_utils to retrieve complete information of Floating IPs
- floating_ips = os_utils.get_floating_ips(neutron_client)
- my_floating_ips = []
- # Filter Floating IPs with tenant id
- for floating_ip in floating_ips:
- if floating_ip.get('tenant_id') == tenant_id:
- my_floating_ips.append(floating_ip.get('floating_ip_address'))
- # Select if Floating IP exist else create new one
- if len(my_floating_ips) >= 1:
- # Get Floating IP object from snaps for clean up
- snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips(
- neutron_client)
- for my_floating_ip in my_floating_ips:
- for snaps_floating_ip in snaps_floating_ips:
- if snaps_floating_ip.ip == my_floating_ip:
- self.mano['details']['fip'] = snaps_floating_ip
- self.logger.info(
- "Selected floating IP for Open Baton NFVO %s",
- (self.mano['details']['fip'].ip))
- break
- if self.mano['details']['fip'] is not None:
- break
- else:
- self.logger.info("Creating floating IP for Open Baton NFVO")
- self.mano['details']['fip'] = snaps_utils.neutron_utils.\
- create_floating_ip(
- neutron_client,
- self.mano['details']['external_net_name'])
- self.logger.info(
- "Created floating IP for Open Baton NFVO %s",
- (self.mano['details']['fip'].ip))
-
def get_vim_descriptor(self):
""""Create VIM descriptor to be used for onboarding"""
self.logger.info(
"Building VIM descriptor with PoP creds: %s",
self.creds)
- # Depending on API version either tenant ID or project name must be
- # used
- if os_utils.is_keystone_v3():
- self.logger.info(
- "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
- project_id = os_utils.get_tenant_id(
- os_utils.get_keystone_client(),
- self.creds.get("project_name"))
- else:
- self.logger.info(
- "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
- project_id = self.creds.get("tenant_name")
- self.logger.debug("VIM project/tenant id: %s", project_id)
+ self.logger.debug("VIM project/tenant id: %s",
+ self.snaps_creds.project_name)
+ keystone = keystone_utils.keystone_client(self.snaps_creds)
+ project_id = keystone_utils.get_project(
+ keystone=keystone, project_name=self.snaps_creds.project_name).id
vim_json = {
"name": "vim-instance",
"authUrl": self.creds.get("auth_url"),
@@ -442,40 +389,28 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
exists=True)
# setting up port
port_settings = PortConfig(
- name='%s_port' % self.case_name,
+ name='{}_port-{}'.format(self.case_name, self.uuid),
network_name=self.mano['details']['network']['name'])
+
# build configuration of vm
orchestra_settings = VmInstanceConfig(
name=self.case_name,
flavor=self.mano['details']['flavor']['name'],
port_settings=[port_settings],
security_group_names=[self.mano['details']['sec_group']],
+ floating_ip_settings=[FloatingIpConfig(
+ name='orchestra_fip-{}'.format(self.uuid),
+ port_name=port_settings.name,
+ router_name=self.orchestra_router.router_settings.name)],
userdata=str(userdata))
- orchestra_vm = OpenStackVmInstance(self.snaps_creds,
- orchestra_settings,
- image_settings)
-
+ orchestra_vm = OpenStackVmInstance(
+ self.snaps_creds, orchestra_settings, image_settings)
orchestra_vm.create()
+ self.mano['details']['fip'] = orchestra_vm.get_floating_ip()
self.created_resources.append(orchestra_vm)
self.mano['details']['id'] = orchestra_vm.get_vm_info()['id']
self.logger.info(
- "Created orchestra instance: %s",
- self.mano['details']['id'])
-
- self.logger.info("Associating floating ip: '%s' to VM '%s' ",
- self.mano['details']['fip'].ip,
- self.case_name)
- nova_client = os_utils.get_nova_client()
- if not os_utils.add_floating_ip(
- nova_client,
- self.mano['details']['id'],
- self.mano['details']['fip'].ip):
- duration = time.time() - start_time
- self.details["orchestrator"].update(
- status='FAIL', duration=duration)
- self.logger.error("Cannot associate floating IP to VM.")
- return False
-
+ "Created orchestra instance: %s", self.mano['details']['id'])
self.logger.info("Waiting for Open Baton NFVO to be up and running...")
timeout = 0
while timeout < 20:
@@ -561,8 +496,8 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
self.mano['details']['nsr'] = nsr_agent.create(
self.mano['details']['nsd_id'])
- except NfvoException as exc:
- self.logger.error(exc.message)
+ except NfvoException:
+ self.logger.exception("failed")
duration = time.time() - start_time
self.details["vnf"].update(status='FAIL', duration=duration)
return False
@@ -621,13 +556,11 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
try:
main_agent = MainAgent(
nfvo_ip=self.mano['details']['fip'].ip,
- nfvo_port=8080,
- https=False,
- version=1,
+ nfvo_port=8080, https=False, version=1,
username=self.mano['credentials']['username'],
password=self.mano['credentials']['password'])
self.logger.info("Terminating %s...", self.vnf['name'])
- if (self.mano['details'].get('nsr')):
+ if self.mano['details'].get('nsr'):
main_agent.get_agent(
"nsr",
project_id=self.mano['details']['project_id']).delete(
@@ -636,32 +569,6 @@ class ClearwaterImsVnf(vnf.VnfOnBoarding):
time.sleep(60)
else:
self.logger.info("No need to terminate the VNF...")
- # os_utils.delete_instance(nova_client=os_utils.get_nova_client(),
- # instance_id=self.mano_instance_id)
except (NfvoException, KeyError) as exc:
self.logger.error('Unexpected error cleaning - %s', exc)
-
- try:
- neutron_client = os_utils.get_neutron_client(self.creds)
- self.logger.info("Deleting Open Baton Port...")
- port = snaps_utils.neutron_utils.get_port(
- neutron_client,
- port_name='%s_port' % self.case_name)
- snaps_utils.neutron_utils.delete_port(neutron_client, port)
- time.sleep(10)
- except Exception as exc: # pylint: disable=broad-except
- self.logger.error('Unexpected error cleaning - %s', exc)
- try:
- self.logger.info("Deleting Open Baton Floating IP...")
- snaps_utils.neutron_utils.delete_floating_ip(
- neutron_client, self.mano['details']['fip'])
- except Exception as exc: # pylint: disable=broad-except
- self.logger.error('Unexpected error cleaning - %s', exc)
-
- for resource in reversed(self.created_resources):
- try:
- self.logger.info("Cleaning %s", str(resource))
- resource.clean()
- except Exception as exc: # pylint: disable=broad-except
- self.logger.error('Unexpected error cleaning - %s', exc)
super(ClearwaterImsVnf, self).clean()
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_openims.py b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
index c35ec8c18..8f209d5cf 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra_openims.py
+++ b/functest/opnfv_tests/vnf/ims/orchestra_openims.py
@@ -17,9 +17,8 @@ import time
import pkg_resources
import yaml
-import functest.core.vnf as vnf
-import functest.utils.openstack_utils as os_utils
-from functest.utils.constants import CONST
+from functest.core import vnf
+from functest.utils import config
from org.openbaton.cli.errors.errors import NfvoException
from org.openbaton.cli.agents.agents import MainAgent
@@ -29,6 +28,7 @@ from snaps.config.network import NetworkConfig, PortConfig, SubnetConfig
from snaps.config.router import RouterConfig
from snaps.config.security_group import (
Direction, Protocol, SecurityGroupConfig, SecurityGroupRuleConfig)
+from snaps.config.vm_inst import FloatingIpConfig
from snaps.config.vm_inst import VmInstanceConfig
from snaps.openstack.utils import keystone_utils
from snaps.openstack.create_image import OpenStackImage
@@ -72,7 +72,7 @@ def get_config(parameter, file_path):
def servertest(host, port):
"""Method to test that a server is reachable at IP:port"""
args = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
- for family, socktype, proto, canonname, sockaddr in args:
+ for family, socktype, proto, _, sockaddr in args:
sock = socket.socket(family, socktype, proto)
try:
sock.connect(sockaddr)
@@ -89,8 +89,6 @@ def get_userdata(orchestrator=dict):
userdata += "echo \"Executing userdata...\"\n"
userdata += "set -x\n"
userdata += "set -e\n"
- userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
- userdata += "echo \"nameserver 8.8.8.8\" >> /etc/resolv.conf\n"
userdata += "echo \"Install curl...\"\n"
userdata += "apt-get install curl\n"
userdata += "echo \"Inject public key...\"\n"
@@ -110,10 +108,6 @@ def get_userdata(orchestrator=dict):
orchestrator['bootstrap']['config']['url'])
userdata += ("echo \"Disable usage of mysql...\"\n")
userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
- userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n"
- % orchestrator['details']['fip'].ip)
- userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
- "=%s/g /config_file\n" % orchestrator['details']['fip'].ip)
userdata += "echo \"Set autostart of components to 'false'\"\n"
userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
userdata += "echo \"Execute bootstrap...\"\n"
@@ -151,14 +145,14 @@ class OpenImsVnf(vnf.VnfOnBoarding):
self.case_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/vnf/ims/')
- self.data_dir = getattr(CONST, 'dir_ims_data')
- self.test_dir = getattr(CONST, 'dir_repo_vims_test')
+ self.data_dir = getattr(config.CONF, 'dir_ims_data')
+ self.test_dir = getattr(config.CONF, 'dir_repo_vims_test')
self.created_resources = []
self.logger.info("%s VNF onboarding test starting", self.case_name)
try:
self.config = getattr(
- CONST, 'vnf_{}_config'.format(self.case_name))
+ config.CONF, 'vnf_{}_config'.format(self.case_name))
except BaseException:
raise Exception("Orchestra VNF config file not found")
config_file = self.case_dir + self.config
@@ -196,6 +190,8 @@ class OpenImsVnf(vnf.VnfOnBoarding):
self.images = get_config("tenant_images.orchestrator", config_file)
self.images.update(get_config("tenant_images.%s" %
self.case_name, config_file))
+ self.creds = None
+ self.orchestra_router = None
def prepare(self):
"""Prepare testscase (Additional pre-configuration steps)."""
@@ -206,16 +202,14 @@ class OpenImsVnf(vnf.VnfOnBoarding):
self.logger.info("Additional pre-configuration steps")
self.creds = {
- "tenant": self.snaps_creds.project_name,
- "username": self.snaps_creds.username,
- "password": self.snaps_creds.password,
- "auth_url": public_auth_url
- }
+ "tenant": self.snaps_creds.project_name,
+ "username": self.snaps_creds.username,
+ "password": self.snaps_creds.password,
+ "auth_url": public_auth_url}
self.prepare_images()
self.prepare_flavor()
self.prepare_security_groups()
self.prepare_network()
- self.prepare_floating_ip()
def prepare_images(self):
"""Upload images if they doen't exist yet"""
@@ -231,7 +225,7 @@ class OpenImsVnf(vnf.VnfOnBoarding):
image_file=image_file,
public=True))
image.create()
- # self.created_resources.append(image);
+ self.created_resources.append(image)
def prepare_security_groups(self):
"""Create Open Baton security group if it doesn't exist yet"""
@@ -240,28 +234,32 @@ class OpenImsVnf(vnf.VnfOnBoarding):
sg_rules = list()
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.ingress,
protocol=Protocol.tcp,
port_range_min=1,
port_range_max=65535))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.egress,
protocol=Protocol.tcp,
port_range_min=1,
port_range_max=65535))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.ingress,
protocol=Protocol.udp,
port_range_min=1,
port_range_max=65535))
sg_rules.append(
SecurityGroupRuleConfig(
- sec_grp_name="orchestra-sec-group-allowall",
+ sec_grp_name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
direction=Direction.egress,
protocol=Protocol.udp,
port_range_min=1,
@@ -269,7 +267,8 @@ class OpenImsVnf(vnf.VnfOnBoarding):
security_group = OpenStackSecurityGroup(
self.snaps_creds,
SecurityGroupConfig(
- name="orchestra-sec-group-allowall",
+ name="orchestra-sec-group-allowall-{}".format(
+ self.uuid),
rule_settings=sg_rules))
security_group_info = security_group.create()
@@ -300,12 +299,10 @@ class OpenImsVnf(vnf.VnfOnBoarding):
self.logger.info(
"Creating network/subnet/router if they doen't exist yet...")
subnet_settings = SubnetConfig(
- name='%s_subnet' %
- self.case_name,
+ name='{}_subnet-{}'.format(self.case_name, self.uuid),
cidr="192.168.100.0/24")
network_settings = NetworkConfig(
- name='%s_net' %
- self.case_name,
+ name='{}_net-{}'.format(self.case_name, self.uuid),
subnet_settings=[subnet_settings])
orchestra_network = OpenStackNetwork(
self.snaps_creds, network_settings)
@@ -316,77 +313,27 @@ class OpenImsVnf(vnf.VnfOnBoarding):
self.mano['details']['external_net_name'] = \
snaps_utils.get_ext_net_name(self.snaps_creds)
self.created_resources.append(orchestra_network)
- orchestra_router = OpenStackRouter(
+ self.orchestra_router = OpenStackRouter(
self.snaps_creds,
RouterConfig(
- name='%s_router' %
- self.case_name,
+ name='{}_router-{}'.format(self.case_name, self.uuid),
external_gateway=self.mano['details']['external_net_name'],
internal_subnets=[
subnet_settings.name]))
- orchestra_router.create()
- self.created_resources.append(orchestra_router)
+ self.orchestra_router.create()
+ self.created_resources.append(self.orchestra_router)
self.logger.info("Created network and router for Open Baton NFVO...")
- def prepare_floating_ip(self):
- """Select/Create Floating IP if it doesn't exist yet"""
- self.logger.info("Retrieving floating IP for Open Baton NFVO")
- neutron_client = snaps_utils.neutron_utils.neutron_client(
- self.snaps_creds)
- # Finding Tenant ID to check to which tenant the Floating IP belongs
- tenant_id = os_utils.get_tenant_id(
- os_utils.get_keystone_client(self.creds),
- self.tenant_name)
- # Use os_utils to retrieve complete information of Floating IPs
- floating_ips = os_utils.get_floating_ips(neutron_client)
- my_floating_ips = []
- # Filter Floating IPs with tenant id
- for floating_ip in floating_ips:
- # self.logger.info("Floating IP: %s", floating_ip)
- if floating_ip.get('tenant_id') == tenant_id:
- my_floating_ips.append(floating_ip.get('floating_ip_address'))
- # Select if Floating IP exist else create new one
- if len(my_floating_ips) >= 1:
- # Get Floating IP object from snaps for clean up
- snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips(
- neutron_client)
- for my_floating_ip in my_floating_ips:
- for snaps_floating_ip in snaps_floating_ips:
- if snaps_floating_ip.ip == my_floating_ip:
- self.mano['details']['fip'] = snaps_floating_ip
- self.logger.info(
- "Selected floating IP for Open Baton NFVO %s",
- (self.mano['details']['fip'].ip))
- break
- if self.mano['details']['fip'] is not None:
- break
- else:
- self.logger.info("Creating floating IP for Open Baton NFVO")
- self.mano['details']['fip'] = (
- snaps_utils.neutron_utils. create_floating_ip(
- neutron_client, self.mano['details']['external_net_name']))
- self.logger.info(
- "Created floating IP for Open Baton NFVO %s",
- (self.mano['details']['fip'].ip))
-
def get_vim_descriptor(self):
""""Create VIM descriptor to be used for onboarding"""
self.logger.info(
"Building VIM descriptor with PoP creds: %s",
self.creds)
- # Depending on API version either tenant ID or project name must be
- # used
- if os_utils.is_keystone_v3():
- self.logger.info(
- "Using v3 API of OpenStack... -> Using OS_PROJECT_ID")
- project_id = os_utils.get_tenant_id(
- os_utils.get_keystone_client(),
- self.creds.get("project_name"))
- else:
- self.logger.info(
- "Using v2 API of OpenStack... -> Using OS_TENANT_NAME")
- project_id = self.creds.get("tenant_name")
- self.logger.debug("VIM project/tenant id: %s", project_id)
+ self.logger.debug("VIM project/tenant id: %s",
+ self.snaps_creds.project_name)
+ keystone = keystone_utils.keystone_client(self.snaps_creds)
+ project_id = keystone_utils.get_project(
+ keystone=keystone, project_name=self.snaps_creds.project_name).id
vim_json = {
"name": "vim-instance",
"authUrl": self.creds.get("auth_url"),
@@ -427,40 +374,27 @@ class OpenImsVnf(vnf.VnfOnBoarding):
exists=True)
# setting up port
port_settings = PortConfig(
- name='%s_port' % self.case_name,
+ name='{}_port-{}'.format(self.case_name, self.uuid),
network_name=self.mano['details']['network']['name'])
# build configuration of vm
orchestra_settings = VmInstanceConfig(
- name=self.case_name,
+ name='{}-{}'.format(self.case_name, self.uuid),
flavor=self.mano['details']['flavor']['name'],
port_settings=[port_settings],
security_group_names=[self.mano['details']['sec_group']],
+ floating_ip_settings=[FloatingIpConfig(
+ name='orchestra_fip-{}'.format(self.uuid),
+ port_name=port_settings.name,
+ router_name=self.orchestra_router.router_settings.name)],
userdata=str(userdata))
- orchestra_vm = OpenStackVmInstance(self.snaps_creds,
- orchestra_settings,
- image_settings)
-
+ orchestra_vm = OpenStackVmInstance(
+ self.snaps_creds, orchestra_settings, image_settings)
orchestra_vm.create()
+ self.mano['details']['fip'] = orchestra_vm.get_floating_ip()
self.created_resources.append(orchestra_vm)
self.mano['details']['id'] = orchestra_vm.get_vm_info()['id']
self.logger.info(
- "Created orchestra instance: %s",
- self.mano['details']['id'])
-
- self.logger.info("Associating floating ip: '%s' to VM '%s' ",
- self.mano['details']['fip'].ip,
- self.case_name)
- nova_client = os_utils.get_nova_client()
- if not os_utils.add_floating_ip(
- nova_client,
- self.mano['details']['id'],
- self.mano['details']['fip'].ip):
- duration = time.time() - start_time
- self.details["orchestrator"].update(
- status='FAIL', duration=duration)
- self.logger.error("Cannot associate floating IP to VM.")
- return False
-
+ "Created orchestra instance: %s", self.mano['details']['id'])
self.logger.info("Waiting for Open Baton NFVO to be up and running...")
timeout = 0
while timeout < 20:
@@ -545,8 +479,8 @@ class OpenImsVnf(vnf.VnfOnBoarding):
self.mano['details']['nsr'] = nsr_agent.create(
self.mano['details']['nsd_id'])
- except NfvoException as exc:
- self.logger.error(exc.message)
+ except NfvoException:
+ self.logger.exception("failed")
duration = time.time() - start_time
self.details["vnf"].update(status='FAIL', duration=duration)
return False
@@ -646,13 +580,11 @@ class OpenImsVnf(vnf.VnfOnBoarding):
try:
main_agent = MainAgent(
nfvo_ip=self.mano['details']['fip'].ip,
- nfvo_port=8080,
- https=False,
- version=1,
+ nfvo_port=8080, https=False, version=1,
username=self.mano['credentials']['username'],
password=self.mano['credentials']['password'])
self.logger.info("Terminating %s...", self.vnf['name'])
- if (self.mano['details'].get('nsr')):
+ if self.mano['details'].get('nsr'):
main_agent.get_agent(
"nsr",
project_id=self.mano['details']['project_id']).\
@@ -663,28 +595,4 @@ class OpenImsVnf(vnf.VnfOnBoarding):
self.logger.info("No need to terminate the VNF...")
except (NfvoException, KeyError) as exc:
self.logger.error('Unexpected error cleaning - %s', exc)
-
- try:
- neutron_client = os_utils.get_neutron_client(self.creds)
- self.logger.info("Deleting Open Baton Port...")
- port = snaps_utils.neutron_utils.get_port(
- neutron_client,
- port_name='%s_port' % self.case_name)
- snaps_utils.neutron_utils.delete_port(neutron_client, port)
- time.sleep(10)
- except Exception as exc: # pylint: disable=broad-except
- self.logger.error('Unexpected error cleaning - %s', exc)
- try:
- self.logger.info("Deleting Open Baton Floating IP...")
- snaps_utils.neutron_utils.delete_floating_ip(
- neutron_client, self.mano['details']['fip'])
- except Exception as exc: # pylint: disable=broad-except
- self.logger.error('Unexpected error cleaning - %s', exc)
-
- for resource in reversed(self.created_resources):
- try:
- self.logger.info("Cleaning %s", str(resource))
- resource.clean()
- except Exception as exc:
- self.logger.error('Unexpected error cleaning - %s', exc)
super(OpenImsVnf, self).clean()
diff --git a/functest/opnfv_tests/vnf/router/cloudify_vrouter.py b/functest/opnfv_tests/vnf/router/cloudify_vrouter.py
index 829206d60..18acce6f7 100644
--- a/functest/opnfv_tests/vnf/router/cloudify_vrouter.py
+++ b/functest/opnfv_tests/vnf/router/cloudify_vrouter.py
@@ -14,6 +14,7 @@
import logging
import os
import time
+import uuid
from cloudify_rest_client import CloudifyClient
from cloudify_rest_client.executions import Execution
@@ -22,7 +23,7 @@ from scp import SCPClient
from functest.opnfv_tests.openstack.snaps import snaps_utils
import functest.opnfv_tests.vnf.router.vrouter_base as vrouter_base
from functest.opnfv_tests.vnf.router.utilvnf import Utilvnf
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.utils import functest_utils
from git import Repo
@@ -34,6 +35,7 @@ from snaps.config.network import NetworkConfig, PortConfig, SubnetConfig
from snaps.config.router import RouterConfig
from snaps.config.security_group import (
Direction, Protocol, SecurityGroupConfig, SecurityGroupRuleConfig)
+from snaps.config.user import UserConfig
from snaps.config.vm_inst import FloatingIpConfig, VmInstanceConfig
from snaps.openstack.create_flavor import OpenStackFlavor
@@ -43,6 +45,7 @@ from snaps.openstack.create_keypairs import OpenStackKeypair
from snaps.openstack.create_network import OpenStackNetwork
from snaps.openstack.create_security_group import OpenStackSecurityGroup
from snaps.openstack.create_router import OpenStackRouter
+from snaps.openstack.create_user import OpenStackUser
import snaps.openstack.utils.glance_utils as glance_utils
from snaps.openstack.utils import keystone_utils
@@ -66,7 +69,7 @@ class CloudifyVrouter(vrouter_base.VrouterOnBoardingBase):
# Retrieve the configuration
try:
self.config = getattr(
- CONST, 'vnf_{}_config'.format(self.case_name))
+ config.CONF, 'vnf_{}_config'.format(self.case_name))
except Exception:
raise Exception("VNF config file not found")
@@ -118,6 +121,17 @@ class CloudifyVrouter(vrouter_base.VrouterOnBoardingBase):
"tenant_images", config_file)
self.__logger.info("Images needed for vrouter: %s", self.images)
+ @staticmethod
+ def run_blocking_ssh_command(ssh, cmd,
+ error_msg="Unable to run this command"):
+ """Command to run ssh command with the exit status."""
+ (_, stdout, stderr) = ssh.exec_command(cmd)
+ CloudifyVrouter.__logger.debug("SSH %s stdout: %s", cmd, stdout.read())
+ if stdout.channel.recv_exit_status() != 0:
+ CloudifyVrouter.__logger.error(
+ "SSH %s stderr: %s", cmd, stderr.read())
+ raise Exception(error_msg)
+
def prepare(self):
super(CloudifyVrouter, self).prepare()
self.__logger.info("Additional pre-configuration steps")
@@ -268,11 +282,11 @@ class CloudifyVrouter(vrouter_base.VrouterOnBoardingBase):
scp = SCPClient(ssh.get_transport(), socket_timeout=15.0)
scp.put(kp_file, '~/')
cmd = "sudo cp ~/cloudify_vrouter.pem /etc/cloudify/"
- run_blocking_ssh_command(ssh, cmd)
+ self.run_blocking_ssh_command(ssh, cmd)
cmd = "sudo chmod 444 /etc/cloudify/cloudify_vrouter.pem"
- run_blocking_ssh_command(ssh, cmd)
+ self.run_blocking_ssh_command(ssh, cmd)
cmd = "sudo yum install -y gcc python-devel"
- run_blocking_ssh_command(
+ self.run_blocking_ssh_command(
ssh, cmd, "Unable to install packages on manager")
self.details['orchestrator'].update(status='PASS', duration=duration)
@@ -313,21 +327,31 @@ class CloudifyVrouter(vrouter_base.VrouterOnBoardingBase):
glance = glance_utils.glance_client(self.snaps_creds)
image = glance_utils.get_image(glance, "vyos1.1.7")
+ user_creator = OpenStackUser(
+ self.snaps_creds,
+ UserConfig(
+ name='cloudify_network_bug-{}'.format(self.uuid),
+ password=str(uuid.uuid4()),
+ roles={'_member_': self.tenant_name}))
+ user_creator.create()
+ self.created_object.append(user_creator)
+ snaps_creds = user_creator.get_os_creds(self.snaps_creds.project_name)
+
self.vnf['inputs'].update(dict(target_vnf_image_id=image.id))
self.vnf['inputs'].update(dict(reference_vnf_image_id=image.id))
self.vnf['inputs'].update(dict(target_vnf_flavor_id=flavor.id))
self.vnf['inputs'].update(dict(reference_vnf_flavor_id=flavor.id))
self.vnf['inputs'].update(dict(
- keystone_username=self.snaps_creds.username))
+ keystone_username=snaps_creds.username))
self.vnf['inputs'].update(dict(
- keystone_password=self.snaps_creds.password))
+ keystone_password=snaps_creds.password))
self.vnf['inputs'].update(dict(
- keystone_tenant_name=self.snaps_creds.project_name))
+ keystone_tenant_name=snaps_creds.project_name))
self.vnf['inputs'].update(dict(
- region=self.snaps_creds.region_name))
+ region=snaps_creds.region_name))
self.vnf['inputs'].update(dict(
keystone_url=keystone_utils.get_endpoint(
- self.snaps_creds, 'identity')))
+ snaps_creds, 'identity')))
self.__logger.info("Create VNF Instance")
cfy_client.deployments.create(
@@ -473,10 +497,3 @@ def get_execution_id(client, deployment_id):
raise RuntimeError('Failed to get create_deployment_environment '
'workflow execution.'
'Available executions: {0}'.format(executions))
-
-
-def run_blocking_ssh_command(ssh, cmd, error_msg="Unable to run this command"):
- """Command to run ssh command with the exit status."""
- (_, stdout, _) = ssh.exec_command(cmd)
- if stdout.channel.recv_exit_status() != 0:
- raise Exception(error_msg)
diff --git a/functest/opnfv_tests/vnf/router/utilvnf.py b/functest/opnfv_tests/vnf/router/utilvnf.py
index d18e9375c..6861b3865 100644
--- a/functest/opnfv_tests/vnf/router/utilvnf.py
+++ b/functest/opnfv_tests/vnf/router/utilvnf.py
@@ -18,7 +18,7 @@ import pkg_resources
import requests
import yaml
-from functest.utils.constants import CONST
+from functest.utils import config
from git import Repo
from requests.auth import HTTPBasicAuth
from snaps.openstack.utils import nova_utils
@@ -55,7 +55,7 @@ class Utilvnf(object): # pylint: disable=too-many-instance-attributes
def __init__(self):
self.snaps_creds = ""
- self.vnf_data_dir = getattr(CONST, 'dir_router_data')
+ self.vnf_data_dir = getattr(config.CONF, 'dir_router_data')
self.opnfv_vnf_data_dir = "opnfv-vnf-data/"
self.command_template_dir = "command_template/"
self.test_scenario_yaml = "test_scenario.yaml"
@@ -76,7 +76,7 @@ class Utilvnf(object): # pylint: disable=too-many-instance-attributes
'functest', 'opnfv_tests/vnf/router')
config_file_name = getattr(
- CONST, 'vnf_{}_config'.format("vyos_vrouter"))
+ config.CONF, 'vnf_{}_config'.format("vyos_vrouter"))
config_file = os.path.join(case_dir, config_file_name)
diff --git a/functest/opnfv_tests/vnf/router/vrouter_base.py b/functest/opnfv_tests/vnf/router/vrouter_base.py
index 84cd51e51..8818032da 100644
--- a/functest/opnfv_tests/vnf/router/vrouter_base.py
+++ b/functest/opnfv_tests/vnf/router/vrouter_base.py
@@ -20,7 +20,7 @@ import time
import pkg_resources
import functest.core.vnf as vnf
-from functest.utils.constants import CONST
+from functest.utils import config
from functest.opnfv_tests.vnf.router.test_controller import function_test_exec
from functest.opnfv_tests.vnf.router.utilvnf import Utilvnf
@@ -37,8 +37,8 @@ class VrouterOnBoardingBase(vnf.VnfOnBoarding):
super(VrouterOnBoardingBase, self).__init__(**kwargs)
self.case_dir = pkg_resources.resource_filename(
'functest', 'opnfv_tests/vnf/router')
- self.data_dir = getattr(CONST, 'dir_router_data')
- self.result_dir = os.path.join(getattr(CONST, 'dir_results'),
+ self.data_dir = getattr(config.CONF, 'dir_router_data')
+ self.result_dir = os.path.join(getattr(config.CONF, 'dir_results'),
self.case_name)
self.util = Utilvnf()
self.util_info = {}
diff --git a/functest/tests/unit/ci/test_run_tests.py b/functest/tests/unit/ci/test_run_tests.py
index bc9677433..b8dca20c4 100644
--- a/functest/tests/unit/ci/test_run_tests.py
+++ b/functest/tests/unit/ci/test_run_tests.py
@@ -91,8 +91,7 @@ class RunTestsTesting(unittest.TestCase):
pass
envfile = 'rc_file'
with mock.patch('six.moves.builtins.open',
- mock.mock_open(read_data=msg),
- create=True) as mock_method,\
+ mock.mock_open(read_data=msg)) as mock_method,\
mock.patch('os.path.isfile', return_value=True):
mock_method.return_value.__iter__ = lambda self: iter(
self.readline, '')
diff --git a/functest/tests/unit/core/test_vnf.py b/functest/tests/unit/core/test_vnf.py
index 086ee6821..dbdcc0f05 100644
--- a/functest/tests/unit/core/test_vnf.py
+++ b/functest/tests/unit/core/test_vnf.py
@@ -16,6 +16,7 @@ import mock
from functest.core import vnf
from functest.core import testcase
+from functest.utils import constants
from snaps.openstack.os_credentials import OSCreds
@@ -113,7 +114,7 @@ class VnfBaseTesting(unittest.TestCase):
def test_prepare_exc1(self, *args):
with self.assertRaises(Exception):
self.test.prepare()
- args[0].assert_called_with(os_env_file=vnf.VnfOnBoarding.env_file)
+ args[0].assert_called_with(os_env_file=constants.ENV_FILE)
args[1].assert_not_called()
args[2].assert_not_called()
@@ -123,7 +124,7 @@ class VnfBaseTesting(unittest.TestCase):
def test_prepare_exc2(self, *args):
with self.assertRaises(Exception):
self.test.prepare()
- args[0].assert_called_with(os_env_file=vnf.VnfOnBoarding.env_file)
+ args[0].assert_called_with(os_env_file=constants.ENV_FILE)
args[1].assert_called_with(mock.ANY, mock.ANY)
args[2].assert_not_called()
@@ -136,7 +137,7 @@ class VnfBaseTesting(unittest.TestCase):
def test_prepare_exc3(self, *args):
with self.assertRaises(Exception):
self.test.prepare()
- args[0].assert_called_with(os_env_file=vnf.VnfOnBoarding.env_file)
+ args[0].assert_called_with(os_env_file=constants.ENV_FILE)
args[1].assert_called_with(mock.ANY, mock.ANY)
args[2].assert_called_with(mock.ANY)
args[3].assert_called_with(mock.ANY, mock.ANY)
@@ -150,7 +151,7 @@ class VnfBaseTesting(unittest.TestCase):
@mock.patch('snaps.openstack.tests.openstack_tests.get_credentials')
def test_prepare_default(self, *args):
self.assertEqual(self.test.prepare(), testcase.TestCase.EX_OK)
- args[0].assert_called_with(os_env_file=vnf.VnfOnBoarding.env_file)
+ args[0].assert_called_with(os_env_file=constants.ENV_FILE)
args[1].assert_called_with(mock.ANY, mock.ANY)
args[2].assert_called_with(mock.ANY)
args[3].assert_called_with(mock.ANY, mock.ANY)
diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py
index c11948bb9..65784ae53 100644
--- a/functest/tests/unit/odl/test_odl.py
+++ b/functest/tests/unit/odl/test_odl.py
@@ -13,7 +13,6 @@ import logging
import os
import unittest
-from keystoneauth1.exceptions import auth_plugins
import mock
from robot.errors import RobotError
import six
@@ -283,6 +282,15 @@ class ODLRunTesting(ODLTesting):
return_value=ODLTesting._neutron_url)
@mock.patch('functest.opnfv_tests.openstack.snaps.snaps_utils.'
'get_credentials')
+ def _test_missing_value(self, *args):
+ self.assertEqual(self.test.run(), testcase.TestCase.EX_RUN_ERROR)
+ args[0].assert_called_once_with()
+ args[1].assert_called_once_with(mock.ANY, 'network')
+
+ @mock.patch('snaps.openstack.utils.keystone_utils.get_endpoint',
+ return_value=ODLTesting._neutron_url)
+ @mock.patch('functest.opnfv_tests.openstack.snaps.snaps_utils.'
+ 'get_credentials')
def _test_run(self, status=testcase.TestCase.EX_OK,
exception=None, *args, **kwargs):
odlip = kwargs['odlip'] if 'odlip' in kwargs else '127.0.0.3'
@@ -331,7 +339,7 @@ class ODLRunTesting(ODLTesting):
def test_exc(self):
with mock.patch('snaps.openstack.utils.keystone_utils.get_endpoint',
- side_effect=auth_plugins.MissingAuthPlugin()):
+ side_effect=Exception()):
self.assertEqual(self.test.run(),
testcase.TestCase.EX_RUN_ERROR)
@@ -362,7 +370,7 @@ class ODLRunTesting(ODLTesting):
odlwebport=self._odl_webport)
def test_no_sdn_controller_ip(self):
- self.assertEqual(self.test.run(), testcase.TestCase.EX_RUN_ERROR)
+ self._test_missing_value()
def test_without_installer_type(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
@@ -387,7 +395,7 @@ class ODLRunTesting(ODLTesting):
def test_apex_no_controller_ip(self):
os.environ["INSTALLER_TYPE"] = "apex"
- self.assertEqual(self.test.run(), testcase.TestCase.EX_RUN_ERROR)
+ self._test_missing_value()
def test_apex(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
@@ -398,7 +406,7 @@ class ODLRunTesting(ODLTesting):
def test_netvirt_no_controller_ip(self):
os.environ["INSTALLER_TYPE"] = "netvirt"
- self.assertEqual(self.test.run(), testcase.TestCase.EX_RUN_ERROR)
+ self._test_missing_value()
def test_netvirt(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
@@ -415,7 +423,7 @@ class ODLRunTesting(ODLTesting):
def test_daisy_no_controller_ip(self):
os.environ["INSTALLER_TYPE"] = "daisy"
- self.assertEqual(self.test.run(), testcase.TestCase.EX_RUN_ERROR)
+ self._test_missing_value()
def test_daisy(self):
os.environ["SDN_CONTROLLER_IP"] = self._sdn_controller_ip
diff --git a/functest/tests/unit/openstack/rally/test_rally.py b/functest/tests/unit/openstack/rally/test_rally.py
index 5d82d91f9..9cc6bf53d 100644
--- a/functest/tests/unit/openstack/rally/test_rally.py
+++ b/functest/tests/unit/openstack/rally/test_rally.py
@@ -329,8 +329,6 @@ class OSRallyTesting(unittest.TestCase):
@mock.patch('snaps.openstack.utils.deploy_utils.create_image')
@mock.patch('snaps.openstack.utils.deploy_utils.create_network')
@mock.patch('snaps.openstack.utils.deploy_utils.create_router')
- @mock.patch('snaps.openstack.utils.keystone_utils.keystone_client')
- @mock.patch('snaps.openstack.utils.keystone_utils.get_project')
@mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create',
return_value=None)
def test_prepare_env_flavor_creation_failed(self, mock_create_flavor,
@@ -350,8 +348,6 @@ class OSRallyTesting(unittest.TestCase):
@mock.patch('snaps.openstack.utils.deploy_utils.create_image')
@mock.patch('snaps.openstack.utils.deploy_utils.create_network')
@mock.patch('snaps.openstack.utils.deploy_utils.create_router')
- @mock.patch('snaps.openstack.utils.keystone_utils.keystone_client')
- @mock.patch('snaps.openstack.utils.keystone_utils.get_project')
@mock.patch('snaps.openstack.create_flavor.OpenStackFlavor.create',
side_effect=[mock.Mock, None])
def test_prepare_env_flavor_alt_creation_failed(self, mock_create_flavor,
diff --git a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
index e2e7dcebc..1d5cc69f4 100644
--- a/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
+++ b/functest/tests/unit/openstack/refstack_client/test_refstack_client.py
@@ -33,12 +33,10 @@ class OSRefstackClientTesting(unittest.TestCase):
_config = pkg_resources.resource_filename(
'functest',
'opnfv_tests/openstack/refstack_client/refstack_tempest.conf')
- _testlist = pkg_resources.resource_filename(
- 'functest', 'opnfv_tests/openstack/refstack_client/defcore.txt')
def setUp(self):
- self.default_args = {'config': self._config,
- 'testlist': self._testlist}
+ self.default_args = {'config': None,
+ 'testlist': RefstackClient.defcorelist}
os.environ['OS_AUTH_URL'] = 'https://ip:5000/v3'
os.environ['OS_INSECURE'] = 'true'
self.case_name = 'refstack_defcore'
@@ -57,30 +55,28 @@ class OSRefstackClientTesting(unittest.TestCase):
'get_credentials', return_value=self.os_creds):
return RefstackClient()
- def test_run_defcore_insecure(self):
+ @mock.patch('functest.utils.functest_utils.execute_command')
+ def test_run_defcore_insecure(self, m_cmd):
insecure = '-k'
config = 'tempest.conf'
testlist = 'testlist'
client = self._create_client()
- with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
- 'refstack_client.ft_utils.execute_command') as m_cmd:
- cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
- .format(insecure, config, testlist))
- client.run_defcore(config, testlist)
- m_cmd.assert_any_call(cmd)
-
- def test_run_defcore(self):
+ cmd = ("refstack-client test {0} -c {1} -v --test-list {2}".format(
+ insecure, config, testlist))
+ client.run_defcore(config, testlist)
+ m_cmd.assert_any_call(cmd)
+
+ @mock.patch('functest.utils.functest_utils.execute_command')
+ def test_run_defcore(self, m_cmd):
os.environ['OS_AUTH_URL'] = 'http://ip:5000/v3'
insecure = ''
config = 'tempest.conf'
testlist = 'testlist'
client = self._create_client()
- with mock.patch('functest.opnfv_tests.openstack.refstack_client.'
- 'refstack_client.ft_utils.execute_command') as m_cmd:
- cmd = ("refstack-client test {0} -c {1} -v --test-list {2}"
- .format(insecure, config, testlist))
- client.run_defcore(config, testlist)
- m_cmd.assert_any_call(cmd)
+ cmd = ("refstack-client test {0} -c {1} -v --test-list {2}".format(
+ insecure, config, testlist))
+ client.run_defcore(config, testlist)
+ m_cmd.assert_any_call(cmd)
@mock.patch('functest.opnfv_tests.openstack.refstack_client.'
'refstack_client.LOGGER.info')
@@ -109,7 +105,7 @@ class OSRefstackClientTesting(unittest.TestCase):
def _get_main_kwargs(self, key=None):
kwargs = {'config': self._config,
- 'testlist': self._testlist}
+ 'testlist': RefstackClient.defcorelist}
if key:
del kwargs[key]
return kwargs
@@ -136,15 +132,16 @@ class OSRefstackClientTesting(unittest.TestCase):
self._test_argparser('config', self._config)
def test_argparser_testlist(self):
- self._test_argparser('testlist', self._testlist)
+ self._test_argparser('testlist', RefstackClient.defcorelist)
def test_argparser_multiple_args(self):
self.default_args['config'] = self._config
- self.default_args['testlist'] = self._testlist
+ self.default_args['testlist'] = RefstackClient.defcorelist
parser = RefstackClientParser()
self.assertEqual(parser.parse_args(
["--config={}".format(self._config),
- "--testlist={}".format(self._testlist)]), self.default_args)
+ "--testlist={}".format(RefstackClient.defcorelist)]),
+ self.default_args)
if __name__ == "__main__":
diff --git a/functest/tests/unit/openstack/tempest/test_conf_utils.py b/functest/tests/unit/openstack/tempest/test_conf_utils.py
index 34dc0f4c4..1d5f29a2c 100644
--- a/functest/tests/unit/openstack/tempest/test_conf_utils.py
+++ b/functest/tests/unit/openstack/tempest/test_conf_utils.py
@@ -14,7 +14,7 @@ import unittest
import mock
from functest.opnfv_tests.openstack.tempest import tempest, conf_utils
-from functest.utils.constants import CONST
+from functest.utils import config
from snaps.openstack.os_credentials import OSCreds
@@ -77,13 +77,13 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
tempest_resources = tempest.TempestResourcesManager(
os_creds=self.os_creds)
- setattr(CONST, 'tempest_use_custom_flavors', 'True')
+ setattr(config.CONF, 'tempest_use_custom_flavors', 'True')
with self.assertRaises(Exception) as context:
tempest_resources.create()
msg = 'Failed to create flavor'
self.assertTrue(msg in context.exception, msg=str(context.exception))
- setattr(CONST, 'tempest_use_custom_flavors', 'False')
+ setattr(config.CONF, 'tempest_use_custom_flavors', 'False')
with self.assertRaises(Exception) as context:
tempest_resources.create(use_custom_flavors=True)
msg = 'Failed to create flavor'
@@ -101,12 +101,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
cmd = "rally deployment destroy opnfv-rally"
error_msg = "Deployment %s does not exist." % \
- getattr(CONST, 'rally_deployment_name')
+ getattr(config.CONF, 'rally_deployment_name')
mock_logger_info.assert_any_call("Creating Rally environment...")
mock_exec.assert_any_call(cmd, error_msg=error_msg, verbose=False)
cmd = "rally deployment create --fromenv --name="
- cmd += getattr(CONST, 'rally_deployment_name')
+ cmd += getattr(config.CONF, 'rally_deployment_name')
error_msg = "Problem while creating Rally deployment"
mock_exec_raise.assert_any_call(cmd, error_msg=error_msg)
@@ -123,12 +123,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'stdout.readline.return_value': '0'}
mock_popen.configure_mock(**attrs)
- setattr(CONST, 'tempest_verifier_name', 'test_veifier_name')
+ setattr(config.CONF, 'tempest_verifier_name', 'test_verifier_name')
with mock.patch('functest.utils.functest_utils.execute_command_raise',
side_effect=Exception), \
self.assertRaises(Exception):
conf_utils.create_verifier()
- mock_logger_debug.assert_any_call("Tempest test_veifier_name"
+ mock_logger_debug.assert_any_call("Tempest test_verifier_name"
" does not exist")
@mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
@@ -137,7 +137,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'create_rally_deployment', return_value=mock.Mock())
def test_get_verif_id_missing_verif(self, mock_rally, mock_tempest):
# pylint: disable=unused-argument
- setattr(CONST, 'tempest_verifier_name', 'test_verifier_name')
+ setattr(config.CONF, 'tempest_verifier_name', 'test_verifier_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
@@ -153,7 +153,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'create_rally_deployment', return_value=mock.Mock())
def test_get_verifier_id_default(self, mock_rally, mock_tempest):
# pylint: disable=unused-argument
- setattr(CONST, 'tempest_verifier_name', 'test_verifier_name')
+ setattr(config.CONF, 'tempest_verifier_name', 'test_verifier_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
@@ -165,7 +165,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'test_deploy_id')
def test_get_depl_id_missing_rally(self):
- setattr(CONST, 'tempest_verifier_name', 'test_deploy_name')
+ setattr(config.CONF, 'tempest_verifier_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen, \
self.assertRaises(Exception):
@@ -176,7 +176,7 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
conf_utils.get_verifier_deployment_id()
def test_get_depl_id_default(self):
- setattr(CONST, 'tempest_verifier_name', 'test_deploy_name')
+ setattr(config.CONF, 'tempest_verifier_name', 'test_deploy_name')
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.subprocess.Popen') as mock_popen:
mock_stdout = mock.Mock()
@@ -241,39 +241,6 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
conf_utils.configure_tempest('test_dep_dir')
self.assertTrue(mock_upd.called)
- def test_conf_tempest_defcore_def(self):
- with mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_verifier',
- return_value='test_conf_file'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.configure_tempest_update_params'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'set') as mset, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'read') as mread, \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ConfigParser.RawConfigParser.'
- 'write') as mwrite, \
- mock.patch('__builtin__.open', mock.mock_open()), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.generate_test_accounts_file'), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.shutil.copyfile'):
- conf_utils.configure_tempest_defcore(
- 'test_dep_dir', 'test_network_name', 'test_image_id',
- 'test_flavor_id', 'test_image_alt_id', 'test_flavor_alt_id',
- 'test_tenant_id')
- mset.assert_any_call('compute', 'image_ref', 'test_image_id')
- mset.assert_any_call('compute', 'image_ref_alt',
- 'test_image_alt_id')
- mset.assert_any_call('compute', 'flavor_ref', 'test_flavor_id')
- mset.assert_any_call('compute', 'flavor_ref_alt',
- 'test_flavor_alt_id')
- self.assertTrue(mread.called)
- self.assertTrue(mwrite.called)
-
def test_gen_test_accounts_file_def(self):
with mock.patch("__builtin__.open", mock.mock_open()), \
mock.patch('functest.opnfv_tests.openstack.tempest.conf_utils.'
@@ -313,12 +280,12 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
'test_image_id_alt'), None, None)
def test_upd_missing_flavor_id(self):
- setattr(CONST, 'tempest_use_custom_flavors', 'True')
+ setattr(config.CONF, 'tempest_use_custom_flavors', 'True')
self._test_missing_param(('compute', 'flavor_ref', 'test_flavor_id'),
None, 'test_flavor_id')
def test_upd_missing_flavor_id_alt(self):
- setattr(CONST, 'tempest_use_custom_flavors', 'True')
+ setattr(config.CONF, 'tempest_use_custom_flavors', 'True')
conf_utils.FLAVOR_ID_ALT = 'test_flavor_id_alt'
self._test_missing_param(('compute', 'flavor_ref_alt',
'test_flavor_id_alt'), None, None)
@@ -327,14 +294,14 @@ class OSTempestConfUtilsTesting(unittest.TestCase):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
'conf_utils.os.path.isfile',
return_value=False), \
- mock.patch('functest.opnfv_tests.openstack.tempest.'
- 'conf_utils.ft_utils.execute_command') as mexe, \
+ mock.patch('functest.opnfv_tests.openstack.tempest.'
+ 'conf_utils.ft_utils.execute_command') as mexe, \
self.assertRaises(Exception) as context:
conf_utils.configure_verifier('test_dep_dir')
mexe.assert_any_call("rally verify configure-verifier")
msg = ("Tempest configuration file 'test_dep_dir/tempest.conf'"
" NOT found.")
- self.assertTrue(msg in context)
+ self.assertTrue(msg in context.exception)
def test_configure_verifier_default(self):
with mock.patch('functest.opnfv_tests.openstack.tempest.'
diff --git a/functest/tests/unit/openstack/tempest/test_tempest.py b/functest/tests/unit/openstack/tempest/test_tempest.py
index 77cf28afa..ba2c1c48f 100644
--- a/functest/tests/unit/openstack/tempest/test_tempest.py
+++ b/functest/tests/unit/openstack/tempest/test_tempest.py
@@ -49,15 +49,6 @@ class OSTempestTesting(unittest.TestCase):
self.tempestcustom = tempest.TempestCustom()
self.tempestdefcore = tempest.TempestDefcore()
- @mock.patch('functest.opnfv_tests.openstack.tempest.tempest.LOGGER.debug')
- def test_gen_tl_defcore_mode(self, mock_logger_debug):
- # pylint: disable=unused-argument
- self.tempestcommon.mode = 'defcore'
- with mock.patch('functest.opnfv_tests.openstack.tempest.tempest.'
- 'shutil.copyfile') as mock_copyfile:
- self.tempestcommon.generate_test_list('test_verifier_repo_dir')
- self.assertTrue(mock_copyfile.called)
-
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.LOGGER.error')
@mock.patch('functest.opnfv_tests.openstack.tempest.tempest.LOGGER.debug')
def test_gen_tl_cm_missing_file(self, mock_logger_debug,
@@ -69,7 +60,8 @@ class OSTempestTesting(unittest.TestCase):
self.assertRaises(Exception) as context:
msg = "Tempest test list file %s NOT found."
self.tempestcommon.generate_test_list('test_verifier_repo_dir')
- self.assertTrue((msg % conf_utils.TEMPEST_CUSTOM) in context)
+ self.assertTrue(
+ (msg % conf_utils.TEMPEST_CUSTOM) in context.exception)
def test_gen_tl_cm_default(self):
self.tempestcommon.mode = 'custom'
diff --git a/functest/tests/unit/utils/test_env.py b/functest/tests/unit/utils/test_env.py
index 064ff9880..49d2d974c 100644
--- a/functest/tests/unit/utils/test_env.py
+++ b/functest/tests/unit/utils/test_env.py
@@ -16,7 +16,6 @@ import unittest
from six.moves import reload_module
from functest.utils import env
-from functest.utils import constants
class EnvTesting(unittest.TestCase):
@@ -30,65 +29,27 @@ class EnvTesting(unittest.TestCase):
def test_get_unset_unknown_env(self):
del os.environ['FOO']
self.assertEqual(env.get('FOO'), None)
- # Backward compatibilty (waiting for SDNVPN and SFC)
- reload_module(env)
- with self.assertRaises(AttributeError):
- getattr(env.ENV, 'FOO')
- reload_module(constants)
- with self.assertRaises(AttributeError):
- getattr(constants.CONST, 'FOO')
def test_get_unknown_env(self):
self.assertEqual(env.get('FOO'), 'foo')
reload_module(env)
- # Backward compatibilty (waiting for SDNVPN and SFC)
- with self.assertRaises(AttributeError):
- getattr(env.ENV, 'FOO')
- reload_module(constants)
- with self.assertRaises(AttributeError):
- getattr(constants.CONST, 'FOO')
def test_get_unset_env(self):
del os.environ['CI_LOOP']
self.assertEqual(
env.get('CI_LOOP'), env.INPUTS['CI_LOOP'])
- # Backward compatibilty (waiting for SDNVPN and SFC)
- reload_module(env)
- self.assertEqual(
- getattr(env.ENV, 'CI_LOOP'), env.INPUTS['CI_LOOP'])
- reload_module(constants)
- self.assertEqual(
- getattr(constants.CONST, 'CI_LOOP'),
- env.INPUTS['CI_LOOP'])
def test_get_env(self):
self.assertEqual(
env.get('CI_LOOP'), 'weekly')
- # Backward compatibilty (waiting for SDNVPN and SFC)
- reload_module(env)
- self.assertEqual(getattr(env.ENV, 'CI_LOOP'), 'weekly')
- reload_module(constants)
- self.assertEqual(getattr(constants.CONST, 'CI_LOOP'), 'weekly')
def test_get_unset_env2(self):
del os.environ['BUILD_TAG']
self.assertEqual(
env.get('BUILD_TAG'), env.INPUTS['BUILD_TAG'])
- # Backward compatibilty (waiting for SDNVPN and SFC)
- reload_module(env)
- self.assertEqual(
- getattr(env.ENV, 'BUILD_TAG'), env.INPUTS['BUILD_TAG'])
- reload_module(constants)
- self.assertEqual(
- getattr(constants.CONST, 'BUILD_TAG'), env.INPUTS['BUILD_TAG'])
def test_get_env2(self):
self.assertEqual(env.get('BUILD_TAG'), 'master')
- # Backward compatibilty (waiting for SDNVPN and SFC)
- reload_module(env)
- self.assertEqual(getattr(env.ENV, 'BUILD_TAG'), 'master')
- reload_module(env)
- self.assertEqual(getattr(constants.CONST, 'BUILD_TAG'), 'master')
if __name__ == "__main__":
diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py
index 9f8733bba..77328fda8 100644
--- a/functest/tests/unit/utils/test_functest_utils.py
+++ b/functest/tests/unit/utils/test_functest_utils.py
@@ -10,13 +10,11 @@
# pylint: disable=missing-docstring
import logging
-import os
import time
import unittest
import mock
import pkg_resources
-from six.moves import urllib
from functest.utils import functest_utils
@@ -63,40 +61,6 @@ class FunctestUtilsTesting(unittest.TestCase):
self.file_yaml = {'general': {'openstack': {'image_name':
'test_image_name'}}}
- @mock.patch('six.moves.urllib.request.urlopen',
- side_effect=urllib.error.URLError('no host given'))
- def test_check_internet_connectivity_failed(self, mock_method):
- self.assertFalse(functest_utils.check_internet_connectivity())
- mock_method.assert_called_once_with(self.url, timeout=self.timeout)
-
- @mock.patch('six.moves.urllib.request.urlopen')
- def test_check_internet_connectivity_default(self, mock_method):
- self.assertTrue(functest_utils.check_internet_connectivity())
- mock_method.assert_called_once_with(self.url, timeout=self.timeout)
-
- @mock.patch('six.moves.urllib.request.urlopen')
- def test_check_internet_connectivity_debian(self, mock_method):
- self.url = "https://www.debian.org/"
- self.assertTrue(functest_utils.check_internet_connectivity(self.url))
- mock_method.assert_called_once_with(self.url, timeout=self.timeout)
-
- @mock.patch('six.moves.urllib.request.urlopen',
- side_effect=urllib.error.URLError('no host given'))
- def test_download_url_failed(self, mock_url):
- self.assertFalse(functest_utils.download_url(self.url, self.dest_path))
-
- @mock.patch('six.moves.urllib.request.urlopen')
- def test_download_url_default(self, mock_url):
- with mock.patch("six.moves.builtins.open", mock.mock_open()) as m, \
- mock.patch('functest.utils.functest_utils.shutil.copyfileobj')\
- as mock_sh:
- name = self.url.rsplit('/')[-1]
- dest = self.dest_path + "/" + name
- self.assertTrue(functest_utils.download_url(self.url,
- self.dest_path))
- m.assert_called_once_with(dest, 'wb')
- self.assertTrue(mock_sh.called)
-
def _get_env_dict(self, var):
dic = {'INSTALLER_TYPE': self.installer,
'DEPLOY_SCENARIO': self.scenario,
@@ -113,21 +77,6 @@ class FunctestUtilsTesting(unittest.TestCase):
FunctestUtilsTesting.readline += 1
return FunctestUtilsTesting.test_ip[FunctestUtilsTesting.readline]
- # TODO: get_resolvconf_ns
- @mock.patch('functest.utils.functest_utils.dns.resolver.Resolver')
- def test_get_resolvconf_ns_default(self, mock_dns_resolve):
- attrs = {'query.return_value': ["test"]}
- mock_dns_resolve.configure_mock(**attrs)
-
- m = mock.Mock()
- attrs = {'readline.side_effect': self.readline_side}
- m.configure_mock(**attrs)
-
- with mock.patch("six.moves.builtins.open") as mo:
- mo.return_value = m
- self.assertEqual(functest_utils.get_resolvconf_ns(),
- self.test_ip[1:])
-
def _get_environ(self, var, *args): # pylint: disable=unused-argument
if var == 'INSTALLER_TYPE':
return self.installer
@@ -135,14 +84,8 @@ class FunctestUtilsTesting(unittest.TestCase):
return self.scenario
return var
- def test_get_ci_envvars_default(self):
- with mock.patch('os.environ.get',
- side_effect=self._get_environ):
- dic = {"installer": self.installer,
- "scenario": self.scenario}
- self.assertDictEqual(functest_utils.get_ci_envvars(), dic)
-
- def cmd_readline(self):
+ @staticmethod
+ def cmd_readline():
return 'test_value\n'
@mock.patch('functest.utils.functest_utils.LOGGER.error')
@@ -250,9 +193,6 @@ class FunctestUtilsTesting(unittest.TestCase):
output_file=None)
self.assertEqual(resp, 1)
- def _get_functest_config(self, var):
- return var
-
def test_get_parameter_from_yaml_failed(self):
self.file_yaml['general'] = None
with mock.patch('six.moves.builtins.open', mock.mock_open()), \
@@ -276,29 +216,6 @@ class FunctestUtilsTesting(unittest.TestCase):
self.test_file),
'test_image_name')
- @mock.patch('functest.utils.functest_utils.get_parameter_from_yaml')
- def test_get_functest_config_default(self, mock_get_parameter_from_yaml):
- with mock.patch.dict(os.environ,
- {'CONFIG_FUNCTEST_YAML': self.config_yaml}):
- functest_utils.get_functest_config(self.parameter)
- mock_get_parameter_from_yaml. \
- assert_called_once_with(self.parameter,
- self.config_yaml)
-
- def test_get_functest_yaml(self):
- with mock.patch('six.moves.builtins.open', mock.mock_open()), \
- mock.patch('functest.utils.functest_utils.yaml.safe_load') \
- as mock_yaml:
- mock_yaml.return_value = self.file_yaml
- resp = functest_utils.get_functest_yaml()
- self.assertEqual(resp, self.file_yaml)
-
- @mock.patch('functest.utils.functest_utils.LOGGER.info')
- def test_print_separator(self, mock_logger_info):
- functest_utils.print_separator()
- mock_logger_info.assert_called_once_with("======================="
- "=======================")
-
if __name__ == "__main__":
logging.disable(logging.CRITICAL)
diff --git a/functest/tests/unit/utils/test_openstack_utils.py b/functest/tests/unit/utils/test_openstack_utils.py
deleted file mode 100644
index 259e60d5c..000000000
--- a/functest/tests/unit/utils/test_openstack_utils.py
+++ /dev/null
@@ -1,1784 +0,0 @@
-#!/usr/bin/env python
-
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# pylint: disable=missing-docstring
-
-import copy
-import logging
-import os
-import unittest
-
-import mock
-
-from functest.utils import openstack_utils
-
-
-class OSUtilsTesting(unittest.TestCase):
-
- def _get_env_cred_dict(self, os_prefix=''):
- return {'OS_USERNAME': os_prefix + 'username',
- 'OS_PASSWORD': os_prefix + 'password',
- 'OS_AUTH_URL': os_prefix + 'auth_url',
- 'OS_TENANT_NAME': os_prefix + 'tenant_name',
- 'OS_USER_DOMAIN_NAME': os_prefix + 'user_domain_name',
- 'OS_PROJECT_DOMAIN_NAME': os_prefix + 'project_domain_name',
- 'OS_PROJECT_NAME': os_prefix + 'project_name',
- 'OS_ENDPOINT_TYPE': os_prefix + 'endpoint_type',
- 'OS_REGION_NAME': os_prefix + 'region_name',
- 'OS_CACERT': os_prefix + 'https_cacert',
- 'OS_INSECURE': os_prefix + 'https_insecure'}
-
- def _get_os_env_vars(self):
- return {'username': 'test_username', 'password': 'test_password',
- 'auth_url': 'test_auth_url', 'tenant_name': 'test_tenant_name',
- 'user_domain_name': 'test_user_domain_name',
- 'project_domain_name': 'test_project_domain_name',
- 'project_name': 'test_project_name',
- 'endpoint_type': 'test_endpoint_type',
- 'region_name': 'test_region_name',
- 'https_cacert': 'test_https_cacert',
- 'https_insecure': 'test_https_insecure'}
-
- def setUp(self):
- self.env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD']
- self.tenant_name = 'test_tenant_name'
- self.env_cred_dict = self._get_env_cred_dict()
- self.os_environs = self._get_env_cred_dict(os_prefix='test_')
- self.os_env_vars = self._get_os_env_vars()
-
- mock_obj = mock.Mock()
- attrs = {'name': 'test_flavor',
- 'id': 'flavor_id',
- 'ram': 2}
- mock_obj.configure_mock(**attrs)
- self.flavor = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'name': 'test_aggregate',
- 'id': 'aggregate_id',
- 'hosts': ['host_name']}
- mock_obj.configure_mock(**attrs)
- self.aggregate = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'id': 'instance_id',
- 'name': 'test_instance',
- 'status': 'ok'}
- mock_obj.configure_mock(**attrs)
- self.instance = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'id': 'azone_id',
- 'zoneName': 'test_azone',
- 'status': 'ok'}
- mock_obj.configure_mock(**attrs)
- self.availability_zone = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'floating_network_id': 'floating_id',
- 'floating_ip_address': 'test_floating_ip'}
- mock_obj.configure_mock(**attrs)
- self.floating_ip = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'id': 'hypervisor_id',
- 'hypervisor_hostname': 'test_hostname',
- 'state': 'up'}
- mock_obj.configure_mock(**attrs)
- self.hypervisor = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'id': 'image_id',
- 'name': 'test_image'}
- mock_obj.configure_mock(**attrs)
- self.image = mock_obj
-
- mock_obj = mock.Mock()
- self.mock_return = mock_obj
-
- self.nova_client = mock.Mock()
- attrs = {'servers.list.return_value': [self.instance],
- 'servers.get.return_value': self.instance,
- 'servers.find.return_value': self.instance,
- 'servers.create.return_value': self.instance,
- 'flavors.list.return_value': [self.flavor],
- 'flavors.find.return_value': self.flavor,
- 'servers.add_floating_ip.return_value': mock.Mock(),
- 'servers.force_delete.return_value': mock.Mock(),
- 'aggregates.list.return_value': [self.aggregate],
- 'aggregates.add_host.return_value': mock.Mock(),
- 'aggregates.remove_host.return_value': mock.Mock(),
- 'aggregates.get.return_value': self.aggregate,
- 'aggregates.delete.return_value': mock.Mock(),
- 'availability_zones.list.return_value':
- [self.availability_zone],
- 'hypervisors.list.return_value': [self.hypervisor],
- 'create.return_value': mock.Mock(),
- 'add_security_group.return_value': mock.Mock(),
- 'images.list.return_value': [self.image],
- 'images.delete.return_value': mock.Mock(),
- }
- self.nova_client.configure_mock(**attrs)
-
- self.glance_client = mock.Mock()
- attrs = {'images.list.return_value': [self.image],
- 'images.create.return_value': self.image,
- 'images.upload.return_value': mock.Mock()}
- self.glance_client.configure_mock(**attrs)
-
- mock_obj = mock.Mock()
- attrs = {'id': 'volume_id',
- 'name': 'test_volume'}
- mock_obj.configure_mock(**attrs)
- self.volume = mock_obj
-
- self.cinder_client = mock.Mock()
- attrs = {'volumes.list.return_value': [self.volume],
- 'quotas.update.return_value': mock.Mock(),
- 'volumes.detach.return_value': mock.Mock(),
- 'volumes.force_delete.return_value': mock.Mock(),
- 'volumes.delete.return_value': mock.Mock()
- }
- self.cinder_client.configure_mock(**attrs)
-
- self.resource = mock.Mock()
- attrs = {'id': 'resource_test_id',
- 'name': 'resource_test_name'
- }
-
- self.heat_client = mock.Mock()
- attrs = {'resources.get.return_value': self.resource}
- self.heat_client.configure_mock(**attrs)
-
- mock_obj = mock.Mock()
- attrs = {'id': 'tenant_id',
- 'name': 'test_tenant'}
- mock_obj.configure_mock(**attrs)
- self.tenant = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'id': 'user_id',
- 'name': 'test_user'}
- mock_obj.configure_mock(**attrs)
- self.user = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'id': 'role_id',
- 'name': 'test_role'}
- mock_obj.configure_mock(**attrs)
- self.role = mock_obj
-
- mock_obj = mock.Mock()
- attrs = {'id': 'domain_id',
- 'name': 'test_domain'}
- mock_obj.configure_mock(**attrs)
- self.domain = mock_obj
-
- self.keystone_client = mock.Mock()
- attrs = {'projects.list.return_value': [self.tenant],
- 'tenants.list.return_value': [self.tenant],
- 'users.list.return_value': [self.user],
- 'roles.list.return_value': [self.role],
- 'domains.list.return_value': [self.domain],
- 'projects.create.return_value': self.tenant,
- 'tenants.create.return_value': self.tenant,
- 'users.create.return_value': self.user,
- 'roles.grant.return_value': mock.Mock(),
- 'roles.add_user_role.return_value': mock.Mock(),
- 'projects.delete.return_value': mock.Mock(),
- 'tenants.delete.return_value': mock.Mock(),
- 'users.delete.return_value': mock.Mock(),
- }
- self.keystone_client.configure_mock(**attrs)
-
- self.router = {'id': 'router_id',
- 'name': 'test_router'}
-
- self.subnet = {'id': 'subnet_id',
- 'name': 'test_subnet'}
-
- self.networks = [{'id': 'network_id',
- 'name': 'test_network',
- 'router:external': False,
- 'shared': True,
- 'subnets': [self.subnet]},
- {'id': 'network_id1',
- 'name': 'test_network1',
- 'router:external': True,
- 'shared': True,
- 'subnets': [self.subnet]}]
-
- self.port = {'id': 'port_id',
- 'name': 'test_port'}
-
- self.sec_group = {'id': 'sec_group_id',
- 'name': 'test_sec_group'}
-
- self.sec_group_rule = {'id': 'sec_group_rule_id',
- 'direction': 'direction',
- 'protocol': 'protocol',
- 'port_range_max': 'port_max',
- 'security_group_id': self.sec_group['id'],
- 'port_range_min': 'port_min'}
- self.neutron_floatingip = {'id': 'fip_id',
- 'floating_ip_address': 'test_ip'}
- self.neutron_client = mock.Mock()
- attrs = {'list_networks.return_value': {'networks': self.networks},
- 'list_routers.return_value': {'routers': [self.router]},
- 'list_ports.return_value': {'ports': [self.port]},
- 'list_subnets.return_value': {'subnets': [self.subnet]},
- 'create_network.return_value': {'network': self.networks[0]},
- 'create_subnet.return_value': {'subnets': [self.subnet]},
- 'create_router.return_value': {'router': self.router},
- 'create_port.return_value': {'port': self.port},
- 'create_floatingip.return_value': {'floatingip':
- self.neutron_floatingip},
- 'update_network.return_value': mock.Mock(),
- 'update_port.return_value': {'port': self.port},
- 'add_interface_router.return_value': mock.Mock(),
- 'add_gateway_router.return_value': mock.Mock(),
- 'delete_network.return_value': mock.Mock(),
- 'delete_subnet.return_value': mock.Mock(),
- 'delete_router.return_value': mock.Mock(),
- 'delete_port.return_value': mock.Mock(),
- 'remove_interface_router.return_value': mock.Mock(),
- 'remove_gateway_router.return_value': mock.Mock(),
- 'list_security_groups.return_value': {'security_groups':
- [self.sec_group]},
- 'list_security_group_rules.'
- 'return_value': {'security_group_rules':
- [self.sec_group_rule]},
- 'create_security_group_rule.return_value': mock.Mock(),
- 'create_security_group.return_value': {'security_group':
- self.sec_group},
- 'update_quota.return_value': mock.Mock(),
- 'delete_security_group.return_value': mock.Mock(),
- 'list_floatingips.return_value': {'floatingips':
- [self.floating_ip]},
- 'delete_floatingip.return_value': mock.Mock(),
- }
- self.neutron_client.configure_mock(**attrs)
-
- self.empty_client = mock.Mock()
- attrs = {'list_networks.return_value': {'networks': []},
- 'list_routers.return_value': {'routers': []},
- 'list_ports.return_value': {'ports': []},
- 'list_subnets.return_value': {'subnets': []}}
- self.empty_client.configure_mock(**attrs)
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value=None)
- def test_is_keystone_v3_missing_identity(self, mock_os_getenv):
- self.assertEqual(openstack_utils.is_keystone_v3(), False)
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='3')
- def test_is_keystone_v3_default(self, mock_os_getenv):
- self.assertEqual(openstack_utils.is_keystone_v3(), True)
-
- @mock.patch('functest.utils.openstack_utils.is_keystone_v3',
- return_value=False)
- def test_get_rc_env_vars_missing_identity(self, mock_get_rc_env):
- exp_resp = self.env_vars
- exp_resp.extend(['OS_TENANT_NAME'])
- self.assertEqual(openstack_utils.get_rc_env_vars(), exp_resp)
-
- @mock.patch('functest.utils.openstack_utils.is_keystone_v3',
- return_value=True)
- def test_get_rc_env_vars_default(self, mock_get_rc_env):
- exp_resp = self.env_vars
- exp_resp.extend(['OS_PROJECT_NAME',
- 'OS_USER_DOMAIN_NAME',
- 'OS_PROJECT_DOMAIN_NAME'])
- self.assertEqual(openstack_utils.get_rc_env_vars(), exp_resp)
-
- @mock.patch('functest.utils.openstack_utils.get_rc_env_vars')
- def test_check_credentials_missing_env(self, mock_get_rc_env):
- exp_resp = self.env_vars
- exp_resp.extend(['OS_TENANT_NAME'])
- mock_get_rc_env.return_value = exp_resp
- with mock.patch.dict('functest.utils.openstack_utils.os.environ', {},
- clear=True):
- self.assertEqual(openstack_utils.check_credentials(), False)
-
- @mock.patch('functest.utils.openstack_utils.get_rc_env_vars')
- def test_check_credentials_default(self, mock_get_rc_env):
- exp_resp = ['OS_TENANT_NAME']
- mock_get_rc_env.return_value = exp_resp
- with mock.patch.dict('functest.utils.openstack_utils.os.environ',
- {'OS_TENANT_NAME': self.tenant_name},
- clear=True):
- self.assertEqual(openstack_utils.check_credentials(), True)
-
- def test_get_env_cred_dict(self):
- self.assertDictEqual(openstack_utils.get_env_cred_dict(),
- self.env_cred_dict)
-
- @mock.patch('functest.utils.openstack_utils.get_rc_env_vars')
- def test_get_credentials_default(self, mock_get_rc_env):
- mock_get_rc_env.return_value = self.env_cred_dict.keys()
- with mock.patch.dict('functest.utils.openstack_utils.os.environ',
- self.os_environs,
- clear=True):
- self.assertDictEqual(openstack_utils.get_credentials(),
- self.os_env_vars)
-
- def _get_credentials_missing_env(self, var):
- dic = copy.deepcopy(self.os_environs)
- dic.pop(var)
- with mock.patch('functest.utils.openstack_utils.get_rc_env_vars',
- return_value=self.env_cred_dict.keys()), \
- mock.patch.dict('functest.utils.openstack_utils.os.environ',
- dic,
- clear=True):
- self.assertRaises(openstack_utils.MissingEnvVar,
- lambda: openstack_utils.get_credentials())
-
- def test_get_credentials_missing_username(self):
- self._get_credentials_missing_env('OS_USERNAME')
-
- def test_get_credentials_missing_password(self):
- self._get_credentials_missing_env('OS_PASSWORD')
-
- def test_get_credentials_missing_auth_url(self):
- self._get_credentials_missing_env('OS_AUTH_URL')
-
- def test_get_credentials_missing_tenantname(self):
- self._get_credentials_missing_env('OS_TENANT_NAME')
-
- def test_get_credentials_missing_domainname(self):
- self._get_credentials_missing_env('OS_USER_DOMAIN_NAME')
-
- def test_get_credentials_missing_projectname(self):
- self._get_credentials_missing_env('OS_PROJECT_NAME')
-
- def test_get_credentials_missing_endpoint_type(self):
- self._get_credentials_missing_env('OS_ENDPOINT_TYPE')
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value=None)
- def test_get_keystone_client_version_missing_env(self, mock_os_getenv):
- self.assertEqual(openstack_utils.get_keystone_client_version(),
- openstack_utils.DEFAULT_API_VERSION)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='3')
- def test_get_keystone_client_version_default(self, mock_os_getenv,
- mock_logger_info):
- self.assertEqual(openstack_utils.get_keystone_client_version(),
- '3')
- mock_logger_info.assert_called_once_with("OS_IDENTITY_API_VERSION is "
- "set in env as '%s'", '3')
-
- @mock.patch('functest.utils.openstack_utils.get_session')
- @mock.patch('functest.utils.openstack_utils.keystoneclient.Client')
- @mock.patch('functest.utils.openstack_utils.get_keystone_client_version',
- return_value='3')
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='public')
- def test_get_keystone_client_with_interface(self, mock_os_getenv,
- mock_keystoneclient_version,
- mock_key_client,
- mock_get_session):
- mock_keystone_obj = mock.Mock()
- mock_session_obj = mock.Mock()
- mock_key_client.return_value = mock_keystone_obj
- mock_get_session.return_value = mock_session_obj
- self.assertEqual(openstack_utils.get_keystone_client(),
- mock_keystone_obj)
- mock_key_client.assert_called_once_with('3',
- session=mock_session_obj,
- interface='public')
-
- @mock.patch('functest.utils.openstack_utils.get_session')
- @mock.patch('functest.utils.openstack_utils.keystoneclient.Client')
- @mock.patch('functest.utils.openstack_utils.get_keystone_client_version',
- return_value='3')
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='admin')
- def test_get_keystone_client_no_interface(self, mock_os_getenv,
- mock_keystoneclient_version,
- mock_key_client,
- mock_get_session):
- mock_keystone_obj = mock.Mock()
- mock_session_obj = mock.Mock()
- mock_key_client.return_value = mock_keystone_obj
- mock_get_session.return_value = mock_session_obj
- self.assertEqual(openstack_utils.get_keystone_client(),
- mock_keystone_obj)
- mock_key_client.assert_called_once_with('3',
- session=mock_session_obj,
- interface='admin')
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value=None)
- def test_get_nova_client_version_missing_env(self, mock_os_getenv):
- self.assertEqual(openstack_utils.get_nova_client_version(),
- openstack_utils.DEFAULT_API_VERSION)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='3')
- def test_get_nova_client_version_default(self, mock_os_getenv,
- mock_logger_info):
- self.assertEqual(openstack_utils.get_nova_client_version(),
- '3')
- mock_logger_info.assert_called_once_with("OS_COMPUTE_API_VERSION is "
- "set in env as '%s'", '3')
-
- def test_get_nova_client(self):
- mock_nova_obj = mock.Mock()
- mock_session_obj = mock.Mock()
- with mock.patch('functest.utils.openstack_utils'
- '.get_nova_client_version', return_value='3'), \
- mock.patch('functest.utils.openstack_utils'
- '.novaclient.Client',
- return_value=mock_nova_obj) \
- as mock_nova_client, \
- mock.patch('functest.utils.openstack_utils.get_session',
- return_value=mock_session_obj):
- self.assertEqual(openstack_utils.get_nova_client(),
- mock_nova_obj)
- mock_nova_client.assert_called_once_with('3',
- session=mock_session_obj)
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value=None)
- def test_get_cinder_client_version_missing_env(self, mock_os_getenv):
- self.assertEqual(openstack_utils.get_cinder_client_version(),
- openstack_utils.DEFAULT_API_VERSION)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='3')
- def test_get_cinder_client_version_default(self, mock_os_getenv,
- mock_logger_info):
- self.assertEqual(openstack_utils.get_cinder_client_version(),
- '3')
- mock_logger_info.assert_called_once_with("OS_VOLUME_API_VERSION is "
- "set in env as '%s'", '3')
-
- def test_get_cinder_client(self):
- mock_cinder_obj = mock.Mock()
- mock_session_obj = mock.Mock()
- with mock.patch('functest.utils.openstack_utils'
- '.get_cinder_client_version', return_value='3'), \
- mock.patch('functest.utils.openstack_utils'
- '.cinderclient.Client',
- return_value=mock_cinder_obj) \
- as mock_cind_client, \
- mock.patch('functest.utils.openstack_utils.get_session',
- return_value=mock_session_obj):
- self.assertEqual(openstack_utils.get_cinder_client(),
- mock_cinder_obj)
- mock_cind_client.assert_called_once_with('3',
- session=mock_session_obj)
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value=None)
- def test_get_neutron_client_version_missing_env(self, mock_os_getenv):
- self.assertEqual(openstack_utils.get_neutron_client_version(),
- openstack_utils.DEFAULT_API_VERSION)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='3')
- def test_get_neutron_client_version_default(self, mock_os_getenv,
- mock_logger_info):
- self.assertEqual(openstack_utils.get_neutron_client_version(),
- '3')
- mock_logger_info.assert_called_once_with("OS_NETWORK_API_VERSION is "
- "set in env as '%s'", '3')
-
- def test_get_neutron_client(self):
- mock_neutron_obj = mock.Mock()
- mock_session_obj = mock.Mock()
- with mock.patch('functest.utils.openstack_utils'
- '.get_neutron_client_version', return_value='3'), \
- mock.patch('functest.utils.openstack_utils'
- '.neutronclient.Client',
- return_value=mock_neutron_obj) \
- as mock_neut_client, \
- mock.patch('functest.utils.openstack_utils.get_session',
- return_value=mock_session_obj):
- self.assertEqual(openstack_utils.get_neutron_client(),
- mock_neutron_obj)
- mock_neut_client.assert_called_once_with('3',
- session=mock_session_obj)
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value=None)
- def test_get_glance_client_version_missing_env(self, mock_os_getenv):
- self.assertEqual(openstack_utils.get_glance_client_version(),
- openstack_utils.DEFAULT_API_VERSION)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value='3')
- def test_get_glance_client_version_default(self, mock_os_getenv,
- mock_logger_info):
- self.assertEqual(openstack_utils.get_glance_client_version(),
- '3')
- mock_logger_info.assert_called_once_with("OS_IMAGE_API_VERSION is "
- "set in env as '%s'", '3')
-
- def test_get_glance_client(self):
- mock_glance_obj = mock.Mock()
- mock_session_obj = mock.Mock()
- with mock.patch('functest.utils.openstack_utils'
- '.get_glance_client_version', return_value='3'), \
- mock.patch('functest.utils.openstack_utils'
- '.glanceclient.Client',
- return_value=mock_glance_obj) \
- as mock_glan_client, \
- mock.patch('functest.utils.openstack_utils.get_session',
- return_value=mock_session_obj):
- self.assertEqual(openstack_utils.get_glance_client(),
- mock_glance_obj)
- mock_glan_client.assert_called_once_with('3',
- session=mock_session_obj)
-
- @mock.patch('functest.utils.openstack_utils.os.getenv',
- return_value=None)
- def test_get_heat_client_version_missing_env(self, mock_os_getenv):
- self.assertEqual(openstack_utils.get_heat_client_version(),
- openstack_utils.DEFAULT_HEAT_API_VERSION)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.os.getenv', return_value='1')
- def test_get_heat_client_version_default(self, mock_os_getenv,
- mock_logger_info):
- self.assertEqual(openstack_utils.get_heat_client_version(), '1')
- mock_logger_info.assert_called_once_with(
- "OS_ORCHESTRATION_API_VERSION is set in env as '%s'", '1')
-
- def test_get_heat_client(self):
- mock_heat_obj = mock.Mock()
- mock_session_obj = mock.Mock()
- with mock.patch('functest.utils.openstack_utils'
- '.get_heat_client_version', return_value='1'), \
- mock.patch('functest.utils.openstack_utils'
- '.heatclient.Client',
- return_value=mock_heat_obj) \
- as mock_heat_client, \
- mock.patch('functest.utils.openstack_utils.get_session',
- return_value=mock_session_obj):
- self.assertEqual(openstack_utils.get_heat_client(),
- mock_heat_obj)
- mock_heat_client.assert_called_once_with('1',
- session=mock_session_obj)
-
- def test_get_instances_default(self):
- self.assertEqual(openstack_utils.get_instances(self.nova_client),
- [self.instance])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_instances_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_instances(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_instance_status_default(self):
- self.assertEqual(openstack_utils.get_instance_status(self.nova_client,
- self.instance),
- 'ok')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_instance_status_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_instance_status(Exception,
- self.instance),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_instance_by_name_default(self):
- self.assertEqual(openstack_utils.
- get_instance_by_name(self.nova_client,
- 'test_instance'),
- self.instance)
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_instance_by_name_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_instance_by_name(Exception,
- 'test_instance'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_flavor_id_default(self):
- self.assertEqual(openstack_utils.
- get_flavor_id(self.nova_client,
- 'test_flavor'),
- self.flavor.id)
-
- def test_get_flavor_id_by_ram_range_default(self):
- self.assertEqual(openstack_utils.
- get_flavor_id_by_ram_range(self.nova_client,
- 1, 3),
- self.flavor.id)
-
- def test_get_aggregates_default(self):
- self.assertEqual(openstack_utils.
- get_aggregates(self.nova_client),
- [self.aggregate])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_aggregates_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_aggregates(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_aggregate_id_default(self):
- with mock.patch('functest.utils.openstack_utils.get_aggregates',
- return_value=[self.aggregate]):
- self.assertEqual(openstack_utils.
- get_aggregate_id(self.nova_client,
- 'test_aggregate'),
- 'aggregate_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_aggregate_id_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_aggregate_id(Exception,
- 'test_aggregate'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_availability_zone_names_default(self):
- with mock.patch('functest.utils.openstack_utils'
- '.get_availability_zones',
- return_value=[self.availability_zone]):
- self.assertEqual(openstack_utils.
- get_availability_zone_names(self.nova_client),
- ['test_azone'])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_availability_zone_names_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_availability_zone_names(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_availability_zones_default(self):
- self.assertEqual(openstack_utils.
- get_availability_zones(self.nova_client),
- [self.availability_zone])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_availability_zones_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_availability_zones(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_floating_ips_default(self):
- self.assertEqual(openstack_utils.
- get_floating_ips(self.neutron_client),
- [self.floating_ip])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_floating_ips_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_floating_ips(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_hypervisors_default(self):
- self.assertEqual(openstack_utils.
- get_hypervisors(self.nova_client),
- ['test_hostname'])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_hypervisors_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_hypervisors(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_aggregate_default(self):
- self.assertTrue(openstack_utils.
- create_aggregate(self.nova_client,
- 'test_aggregate',
- 'azone'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_aggregate_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_aggregate(Exception,
- 'test_aggregate',
- 'azone'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_add_host_to_aggregate_default(self):
- with mock.patch('functest.utils.openstack_utils.get_aggregate_id'):
- self.assertTrue(openstack_utils.
- add_host_to_aggregate(self.nova_client,
- 'test_aggregate',
- 'test_hostname'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_add_host_to_aggregate_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- add_host_to_aggregate(Exception,
- 'test_aggregate',
- 'test_hostname'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_aggregate_with_host_default(self):
- with mock.patch('functest.utils.openstack_utils.create_aggregate'), \
- mock.patch('functest.utils.openstack_utils.'
- 'add_host_to_aggregate'):
- self.assertTrue(openstack_utils.
- create_aggregate_with_host(self.nova_client,
- 'test_aggregate',
- 'test_azone',
- 'test_hostname'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_aggregate_with_host_exception(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.create_aggregate',
- side_effect=Exception):
- self.assertEqual(openstack_utils.
- create_aggregate_with_host(Exception,
- 'test_aggregate',
- 'test_azone',
- 'test_hostname'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_instance_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_nova_client',
- return_value=self.nova_client):
- self.assertEqual(openstack_utils.
- create_instance('test_flavor',
- 'image_id',
- 'network_id'),
- self.instance)
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_instance_exception(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_nova_client',
- return_value=self.nova_client):
- self.nova_client.flavors.find.side_effect = Exception
- self.assertEqual(openstack_utils.
- create_instance('test_flavor',
- 'image_id',
- 'network_id'),
- None)
- self.assertTrue(mock_logger_error)
-
- def test_create_floating_ip_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_external_net_id',
- return_value='external_net_id'):
- exp_resp = {'fip_addr': 'test_ip', 'fip_id': 'fip_id'}
- self.assertEqual(openstack_utils.
- create_floating_ip(self.neutron_client),
- exp_resp)
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_floating_ip_exception(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_external_net_id',
- return_value='external_net_id'):
- self.assertEqual(openstack_utils.
- create_floating_ip(Exception),
- None)
- self.assertTrue(mock_logger_error)
-
- def test_add_floating_ip_default(self):
- with mock.patch('functest.utils.openstack_utils.get_aggregate_id'):
- self.assertTrue(openstack_utils.
- add_floating_ip(self.nova_client,
- 'test_serverid',
- 'test_floatingip_addr'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_add_floating_ip_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- add_floating_ip(Exception,
- 'test_serverid',
- 'test_floatingip_addr'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_instance_default(self):
- self.assertTrue(openstack_utils.
- delete_instance(self.nova_client,
- 'instance_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_instance_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_instance(Exception,
- 'instance_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_floating_ip_default(self):
- self.assertTrue(openstack_utils.
- delete_floating_ip(self.neutron_client,
- 'floating_ip_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_floating_ip_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_floating_ip(Exception,
- 'floating_ip_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_remove_host_from_aggregate_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_aggregate_id'):
- self.assertTrue(openstack_utils.
- remove_host_from_aggregate(self.nova_client,
- 'agg_name',
- 'host_name'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_remove_host_from_aggregate_exception(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_aggregate_id', side_effect=Exception):
- self.assertFalse(openstack_utils.
- remove_host_from_aggregate(self.nova_client,
- 'agg_name',
- 'host_name'))
- self.assertTrue(mock_logger_error.called)
-
- def test_remove_hosts_from_aggregate_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_aggregate_id'), \
- mock.patch('functest.utils.openstack_utils.'
- 'remove_host_from_aggregate',
- return_value=True) \
- as mock_method:
- openstack_utils.remove_hosts_from_aggregate(self.nova_client,
- 'test_aggregate')
- mock_method.assert_any_call(self.nova_client,
- 'test_aggregate',
- 'host_name')
-
- def test_delete_aggregate_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'remove_hosts_from_aggregate'):
- self.assertTrue(openstack_utils.
- delete_aggregate(self.nova_client,
- 'agg_name'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_aggregate_exception(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.'
- 'remove_hosts_from_aggregate', side_effect=Exception):
- self.assertFalse(openstack_utils.
- delete_aggregate(self.nova_client,
- 'agg_name'))
- self.assertTrue(mock_logger_error.called)
-
- def test_get_network_list_default(self):
- self.assertEqual(openstack_utils.
- get_network_list(self.neutron_client),
- self.networks)
-
- def test_get_network_list_missing_network(self):
- self.assertEqual(openstack_utils.
- get_network_list(self.empty_client),
- None)
-
- def test_get_router_list_default(self):
- self.assertEqual(openstack_utils.
- get_router_list(self.neutron_client),
- [self.router])
-
- def test_get_router_list_missing_router(self):
- self.assertEqual(openstack_utils.
- get_router_list(self.empty_client),
- None)
-
- def test_get_port_list_default(self):
- self.assertEqual(openstack_utils.
- get_port_list(self.neutron_client),
- [self.port])
-
- def test_get_port_list_missing_port(self):
- self.assertEqual(openstack_utils.
- get_port_list(self.empty_client),
- None)
-
- def test_get_network_id_default(self):
- self.assertEqual(openstack_utils.
- get_network_id(self.neutron_client,
- 'test_network'),
- 'network_id')
-
- def test_get_subnet_id_default(self):
- self.assertEqual(openstack_utils.
- get_subnet_id(self.neutron_client,
- 'test_subnet'),
- 'subnet_id')
-
- def test_get_router_id_default(self):
- self.assertEqual(openstack_utils.
- get_router_id(self.neutron_client,
- 'test_router'),
- 'router_id')
-
- def test_get_private_net_default(self):
- self.assertEqual(openstack_utils.
- get_private_net(self.neutron_client),
- self.networks[0])
-
- def test_get_private_net_missing_net(self):
- self.assertEqual(openstack_utils.
- get_private_net(self.empty_client),
- None)
-
- def test_get_external_net_default(self):
- self.assertEqual(openstack_utils.
- get_external_net(self.neutron_client),
- 'test_network1')
-
- def test_get_external_net_missing_net(self):
- self.assertEqual(openstack_utils.
- get_external_net(self.empty_client),
- None)
-
- def test_get_external_net_id_default(self):
- self.assertEqual(openstack_utils.
- get_external_net_id(self.neutron_client),
- 'network_id1')
-
- def test_get_external_net_id_missing_net(self):
- self.assertEqual(openstack_utils.
- get_external_net_id(self.empty_client),
- None)
-
- def test_check_neutron_net_default(self):
- self.assertTrue(openstack_utils.
- check_neutron_net(self.neutron_client,
- 'test_network'))
-
- def test_check_neutron_net_missing_net(self):
- self.assertFalse(openstack_utils.
- check_neutron_net(self.empty_client,
- 'test_network'))
-
- def test_create_neutron_net_default(self):
- self.assertEqual(openstack_utils.
- create_neutron_net(self.neutron_client,
- 'test_network'),
- 'network_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_neutron_net_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_neutron_net(Exception,
- 'test_network'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_neutron_subnet_default(self):
- self.assertEqual(openstack_utils.
- create_neutron_subnet(self.neutron_client,
- 'test_subnet',
- 'test_cidr',
- 'network_id'),
- 'subnet_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_neutron_subnet_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_neutron_subnet(Exception,
- 'test_subnet',
- 'test_cidr',
- 'network_id'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_neutron_router_default(self):
- self.assertEqual(openstack_utils.
- create_neutron_router(self.neutron_client,
- 'test_router'),
- 'router_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_neutron_router_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_neutron_router(Exception,
- 'test_router'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_neutron_port_default(self):
- self.assertEqual(openstack_utils.
- create_neutron_port(self.neutron_client,
- 'test_port',
- 'network_id',
- 'test_ip'),
- 'port_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_neutron_port_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_neutron_port(Exception,
- 'test_port',
- 'network_id',
- 'test_ip'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_update_neutron_net_default(self):
- self.assertTrue(openstack_utils.
- update_neutron_net(self.neutron_client,
- 'network_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_update_neutron_net_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- update_neutron_net(Exception,
- 'network_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_update_neutron_port_default(self):
- self.assertEqual(openstack_utils.
- update_neutron_port(self.neutron_client,
- 'port_id',
- 'test_owner'),
- 'port_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_update_neutron_port_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- update_neutron_port(Exception,
- 'port_id',
- 'test_owner'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_add_interface_router_default(self):
- self.assertTrue(openstack_utils.
- add_interface_router(self.neutron_client,
- 'router_id',
- 'subnet_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_add_interface_router_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- add_interface_router(Exception,
- 'router_id',
- 'subnet_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_add_gateway_router_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_external_net_id',
- return_value='network_id'):
- self.assertTrue(openstack_utils.
- add_gateway_router(self.neutron_client,
- 'router_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_add_gateway_router_exception(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_external_net_id',
- return_value='network_id'):
- self.assertFalse(openstack_utils.
- add_gateway_router(Exception,
- 'router_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_neutron_net_default(self):
- self.assertTrue(openstack_utils.
- delete_neutron_net(self.neutron_client,
- 'network_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_neutron_net_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_neutron_net(Exception,
- 'network_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_neutron_subnet_default(self):
- self.assertTrue(openstack_utils.
- delete_neutron_subnet(self.neutron_client,
- 'subnet_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_neutron_subnet_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_neutron_subnet(Exception,
- 'subnet_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_neutron_router_default(self):
- self.assertTrue(openstack_utils.
- delete_neutron_router(self.neutron_client,
- 'router_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_neutron_router_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_neutron_router(Exception,
- 'router_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_neutron_port_default(self):
- self.assertTrue(openstack_utils.
- delete_neutron_port(self.neutron_client,
- 'port_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_neutron_port_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_neutron_port(Exception,
- 'port_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_remove_interface_router_default(self):
- self.assertTrue(openstack_utils.
- remove_interface_router(self.neutron_client,
- 'router_id',
- 'subnet_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_remove_interface_router_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- remove_interface_router(Exception,
- 'router_id',
- 'subnet_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_remove_gateway_router_default(self):
- self.assertTrue(openstack_utils.
- remove_gateway_router(self.neutron_client,
- 'router_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_remove_gateway_router_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- remove_gateway_router(Exception,
- 'router_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_get_security_groups_default(self):
- self.assertEqual(openstack_utils.
- get_security_groups(self.neutron_client),
- [self.sec_group])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_security_groups_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_security_groups(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_security_group_id_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_security_groups',
- return_value=[self.sec_group]):
- self.assertEqual(openstack_utils.
- get_security_group_id(self.neutron_client,
- 'test_sec_group'),
- 'sec_group_id')
-
- def test_get_security_group_rules_default(self):
- self.assertEqual(openstack_utils.
- get_security_group_rules(self.neutron_client,
- self.sec_group['id']),
- [self.sec_group_rule])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_security_group_rules_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_security_group_rules(Exception,
- 'sec_group_id'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_check_security_group_rules_not_exists(self):
- self.assertEqual(openstack_utils.
- check_security_group_rules(self.neutron_client,
- 'sec_group_id_2',
- 'direction',
- 'protocol',
- 'port_min',
- 'port_max'),
- True)
-
- def test_check_security_group_rules_exists(self):
- self.assertEqual(openstack_utils.
- check_security_group_rules(self.neutron_client,
- self.sec_group['id'],
- 'direction',
- 'protocol',
- 'port_min',
- 'port_max'),
- False)
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_check_security_group_rules_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- check_security_group_rules(Exception,
- 'sec_group_id',
- 'direction',
- 'protocol',
- 'port_max',
- 'port_min'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_security_group_default(self):
- self.assertEqual(openstack_utils.
- create_security_group(self.neutron_client,
- 'test_sec_group',
- 'sec_group_desc'),
- self.sec_group)
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_security_group_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_security_group(Exception,
- 'test_sec_group',
- 'sec_group_desc'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_secgroup_rule_default(self):
- self.assertTrue(openstack_utils.
- create_secgroup_rule(self.neutron_client,
- 'sg_id',
- 'direction',
- 'protocol',
- 80,
- 80))
- self.assertTrue(openstack_utils.
- create_secgroup_rule(self.neutron_client,
- 'sg_id',
- 'direction',
- 'protocol'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_secgroup_rule_invalid_port_range(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- create_secgroup_rule(self.neutron_client,
- 'sg_id',
- 'direction',
- 'protocol',
- 80))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_secgroup_rule_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- create_secgroup_rule(Exception,
- 'sg_id',
- 'direction',
- 'protocol'))
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- def test_create_security_group_full_default(self, mock_logger_info):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_security_group_id',
- return_value='sg_id'):
- self.assertEqual(openstack_utils.
- create_security_group_full(self.neutron_client,
- 'sg_name',
- 'sg_desc'),
- 'sg_id')
- self.assertTrue(mock_logger_info)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_security_group_full_sec_group_fail(self,
- mock_logger_error,
- mock_logger_info):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_security_group_id',
- return_value=''), \
- mock.patch('functest.utils.openstack_utils.'
- 'create_security_group',
- return_value=False):
- self.assertEqual(openstack_utils.
- create_security_group_full(self.neutron_client,
- 'sg_name',
- 'sg_desc'),
- None)
- self.assertTrue(mock_logger_error)
- self.assertTrue(mock_logger_info)
-
- @mock.patch('functest.utils.openstack_utils.logger.debug')
- @mock.patch('functest.utils.openstack_utils.logger.info')
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_security_group_full_secgroup_rule_fail(self,
- mock_logger_error,
- mock_logger_info,
- mock_logger_debug):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_security_group_id',
- return_value=''), \
- mock.patch('functest.utils.openstack_utils.'
- 'create_security_group',
- return_value={'id': 'sg_id',
- 'name': 'sg_name'}), \
- mock.patch('functest.utils.openstack_utils.'
- 'create_secgroup_rule',
- return_value=False):
- self.assertEqual(openstack_utils.
- create_security_group_full(self.neutron_client,
- 'sg_name',
- 'sg_desc'),
- None)
- self.assertTrue(mock_logger_error)
- self.assertTrue(mock_logger_info)
- self.assertTrue(mock_logger_debug)
-
- def test_add_secgroup_to_instance_default(self):
- self.assertTrue(openstack_utils.
- add_secgroup_to_instance(self.nova_client,
- 'instance_id',
- 'sec_group_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_add_secgroup_to_instance_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- add_secgroup_to_instance(Exception,
- 'instance_id',
- 'sec_group_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_update_sg_quota_default(self):
- self.assertTrue(openstack_utils.
- update_sg_quota(self.neutron_client,
- 'tenant_id',
- 'sg_quota',
- 'sg_rule_quota'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_update_sg_quota_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- update_sg_quota(Exception,
- 'tenant_id',
- 'sg_quota',
- 'sg_rule_quota'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_security_group_default(self):
- self.assertTrue(openstack_utils.
- delete_security_group(self.neutron_client,
- 'sec_group_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_security_group_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_security_group(Exception,
- 'sec_group_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_get_images_default(self):
- self.assertEqual(openstack_utils.
- get_images(self.glance_client),
- [self.image])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_images_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_images(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_image_id_default(self):
- self.assertEqual(openstack_utils.
- get_image_id(self.glance_client,
- 'test_image'),
- 'image_id')
-
- # create_glance_image, get_or_create_image
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_glance_image_file_present(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.'
- 'os.path.isfile',
- return_value=False):
- self.assertEqual(openstack_utils.
- create_glance_image(self.glance_client,
- 'test_image',
- 'file_path'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- def test_create_glance_image_already_exist(self, mock_logger_info):
- with mock.patch('functest.utils.openstack_utils.'
- 'os.path.isfile',
- return_value=True), \
- mock.patch('functest.utils.openstack_utils.get_image_id',
- return_value='image_id'):
- self.assertEqual(openstack_utils.
- create_glance_image(self.glance_client,
- 'test_image',
- 'file_path'),
- 'image_id')
- self.assertTrue(mock_logger_info.called)
-
- @mock.patch('functest.utils.openstack_utils.logger.info')
- def test_create_glance_image_default(self, mock_logger_info):
- with mock.patch('functest.utils.openstack_utils.'
- 'os.path.isfile',
- return_value=True), \
- mock.patch('functest.utils.openstack_utils.get_image_id',
- return_value=''), \
- mock.patch('six.moves.builtins.open',
- mock.mock_open(read_data='1')) as m:
- self.assertEqual(openstack_utils.
- create_glance_image(self.glance_client,
- 'test_image',
- 'file_path'),
- 'image_id')
- m.assert_called_once_with('file_path')
- self.assertTrue(mock_logger_info.called)
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_glance_image_exception(self, mock_logger_error):
- with mock.patch('functest.utils.openstack_utils.'
- 'os.path.isfile',
- return_value=True), \
- mock.patch('functest.utils.openstack_utils.get_image_id',
- side_effect=Exception):
- self.assertEqual(openstack_utils.
- create_glance_image(self.glance_client,
- 'test_image',
- 'file_path'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_glance_image_default(self):
- self.assertTrue(openstack_utils.
- delete_glance_image(self.nova_client,
- 'image_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_glance_image_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_glance_image(Exception,
- 'image_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_get_volumes_default(self):
- self.assertEqual(openstack_utils.
- get_volumes(self.cinder_client),
- [self.volume])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_volumes_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_volumes(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_update_cinder_quota_default(self):
- self.assertTrue(openstack_utils.
- update_cinder_quota(self.cinder_client,
- 'tenant_id',
- 'vols_quota',
- 'snap_quota',
- 'giga_quota'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_update_cinder_quota_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- update_cinder_quota(Exception,
- 'tenant_id',
- 'vols_quota',
- 'snap_quota',
- 'giga_quota'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_volume_default(self):
- self.assertTrue(openstack_utils.
- delete_volume(self.cinder_client,
- 'volume_id',
- forced=False))
-
- self.assertTrue(openstack_utils.
- delete_volume(self.cinder_client,
- 'volume_id',
- forced=True))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_volume_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_volume(Exception,
- 'volume_id',
- forced=True))
- self.assertTrue(mock_logger_error.called)
-
- def test_get_tenants_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=True):
- self.assertEqual(openstack_utils.
- get_tenants(self.keystone_client),
- [self.tenant])
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=False):
- self.assertEqual(openstack_utils.
- get_tenants(self.keystone_client),
- [self.tenant])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_tenants_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_tenants(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_users_default(self):
- self.assertEqual(openstack_utils.
- get_users(self.keystone_client),
- [self.user])
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_users_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_users(Exception),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_tenant_id_default(self):
- self.assertEqual(openstack_utils.
- get_tenant_id(self.keystone_client,
- 'test_tenant'),
- 'tenant_id')
-
- def test_get_user_id_default(self):
- self.assertEqual(openstack_utils.
- get_user_id(self.keystone_client,
- 'test_user'),
- 'user_id')
-
- def test_get_role_id_default(self):
- self.assertEqual(openstack_utils.
- get_role_id(self.keystone_client,
- 'test_role'),
- 'role_id')
-
- def test_get_domain_id_default(self):
- self.assertEqual(openstack_utils.
- get_domain_id(self.keystone_client,
- 'test_domain'),
- 'domain_id')
-
- def test_create_tenant_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=True):
- os.environ['OS_PROJECT_DOMAIN_NAME'] = 'Default'
- self.assertEqual(openstack_utils.
- create_tenant(self.keystone_client,
- 'test_tenant',
- 'tenant_desc'),
- 'tenant_id')
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=False):
- self.assertEqual(openstack_utils.
- create_tenant(self.keystone_client,
- 'test_tenant',
- 'tenant_desc'),
- 'tenant_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_tenant_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_tenant(Exception,
- 'test_tenant',
- 'tenant_desc'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_create_user_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=True):
- self.assertEqual(openstack_utils.
- create_user(self.keystone_client,
- 'test_user',
- 'password',
- 'email',
- 'tenant_id'),
- 'user_id')
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=False):
- self.assertEqual(openstack_utils.
- create_user(self.keystone_client,
- 'test_user',
- 'password',
- 'email',
- 'tenant_id'),
- 'user_id')
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_create_user_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- create_user(Exception,
- 'test_user',
- 'password',
- 'email',
- 'tenant_id'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_add_role_user_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=True):
- self.assertTrue(openstack_utils.
- add_role_user(self.keystone_client,
- 'user_id',
- 'role_id',
- 'tenant_id'))
-
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=False):
- self.assertTrue(openstack_utils.
- add_role_user(self.keystone_client,
- 'user_id',
- 'role_id',
- 'tenant_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_add_role_user_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- add_role_user(Exception,
- 'user_id',
- 'role_id',
- 'tenant_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_tenant_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=True):
- self.assertTrue(openstack_utils.
- delete_tenant(self.keystone_client,
- 'tenant_id'))
-
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=False):
- self.assertTrue(openstack_utils.
- delete_tenant(self.keystone_client,
- 'tenant_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_tenant_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_tenant(Exception,
- 'tenant_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_delete_user_default(self):
- self.assertTrue(openstack_utils.
- delete_user(self.keystone_client,
- 'user_id'))
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_delete_user_exception(self, mock_logger_error):
- self.assertFalse(openstack_utils.
- delete_user(Exception,
- 'user_id'))
- self.assertTrue(mock_logger_error.called)
-
- def test_get_resource_default(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'is_keystone_v3', return_value=True):
- self.assertEqual(openstack_utils.
- get_resource(self.heat_client,
- 'stack_id',
- 'resource'),
- self.resource)
-
- @mock.patch('functest.utils.openstack_utils.logger.error')
- def test_get_resource_exception(self, mock_logger_error):
- self.assertEqual(openstack_utils.
- get_resource(Exception,
- 'stack_id',
- 'resource'),
- None)
- self.assertTrue(mock_logger_error.called)
-
- def test_get_or_create_user_for_vnf_get(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_user_id',
- return_value='user_id'), \
- mock.patch('functest.utils.openstack_utils.get_tenant_id',
- return_value='tenant_id'):
- self.assertFalse(openstack_utils.
- get_or_create_user_for_vnf(self.keystone_client,
- 'my_vnf'))
-
- def test_get_or_create_user_for_vnf_create(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_user_id',
- return_value=None), \
- mock.patch('functest.utils.openstack_utils.get_tenant_id',
- return_value='tenant_id'):
- self.assertTrue(openstack_utils.
- get_or_create_user_for_vnf(self.keystone_client,
- 'my_vnf'))
-
- def test_get_or_create_user_for_vnf_error_get_user_id(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_user_id',
- side_effect=Exception):
- self.assertRaises(Exception)
-
- def test_get_or_create_user_for_vnf_error_get_tenant_id(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_user_id',
- return_value='user_id'), \
- mock.patch('functest.utils.openstack_utils.get_tenant_id',
- side_effect='Exception'):
- self.assertRaises(Exception)
-
- def test_get_or_create_tenant_for_vnf_get(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_tenant_id',
- return_value='tenant_id'):
- self.assertFalse(
- openstack_utils.get_or_create_tenant_for_vnf(
- self.keystone_client, 'tenant_name', 'tenant_description'))
-
- def test_get_or_create_tenant_for_vnf_create(self):
- with mock.patch('functest.utils.openstack_utils.get_tenant_id',
- return_value=None):
- self.assertTrue(
- openstack_utils.get_or_create_tenant_for_vnf(
- self.keystone_client, 'tenant_name', 'tenant_description'))
-
- def test_get_or_create_tenant_for_vnf_error_get_tenant_id(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'get_tenant_id',
- side_effect=Exception):
- self.assertRaises(Exception)
-
- def test_download_and_add_image_on_glance_image_creation_failure(self):
- with mock.patch('functest.utils.openstack_utils.'
- 'os.makedirs'), \
- mock.patch('functest.utils.openstack_utils.'
- 'ft_utils.download_url',
- return_value=True), \
- mock.patch('functest.utils.openstack_utils.'
- 'create_glance_image',
- return_value=''):
- resp = openstack_utils.download_and_add_image_on_glance(
- self.glance_client,
- 'image_name',
- 'http://url',
- 'data_dir')
- self.assertEqual(resp, False)
-
-
-if __name__ == "__main__":
- logging.disable(logging.CRITICAL)
- unittest.main(verbosity=2)
diff --git a/functest/utils/constants.py b/functest/utils/constants.py
index d8a1d54d1..0bc00d80a 100644
--- a/functest/utils/constants.py
+++ b/functest/utils/constants.py
@@ -3,22 +3,8 @@
# pylint: disable=missing-docstring
import pkg_resources
-import six
-from functest.utils import config
-from functest.utils import env
+CONFIG_FUNCTEST_YAML = pkg_resources.resource_filename(
+ 'functest', 'ci/config_functest.yaml')
-
-class Constants(object): # pylint: disable=too-few-public-methods
-
- CONFIG_FUNCTEST_YAML = pkg_resources.resource_filename(
- 'functest', 'ci/config_functest.yaml')
-
- def __init__(self):
- for attr_n, attr_v in six.iteritems(config.CONF.__dict__):
- setattr(self, attr_n, attr_v)
- for env_n, env_v in six.iteritems(env.ENV.__dict__):
- setattr(self, env_n, env_v)
-
-
-CONST = Constants()
+ENV_FILE = '/home/opnfv/functest/conf/env_file'
diff --git a/functest/utils/env.py b/functest/utils/env.py
index 0c0515ba1..aa2da0b54 100644
--- a/functest/utils/env.py
+++ b/functest/utils/env.py
@@ -11,7 +11,7 @@
import os
-import six
+import prettytable
INPUTS = {
'EXTERNAL_NETWORK': None,
@@ -24,8 +24,8 @@ INPUTS = {
'POD_ARCH': None,
'TEST_DB_URL': 'http://testresults.opnfv.org/test/api/v1/results',
'ENERGY_RECORDER_API_URL': 'http://energy.opnfv.fr/resources',
- 'ENERGY_RECORDER_API_USER': '',
- 'ENERGY_RECORDER_API_PASSWORD': ''
+ 'ENERGY_RECORDER_API_USER': None,
+ 'ENERGY_RECORDER_API_PASSWORD': None
}
@@ -35,12 +35,10 @@ def get(env_var):
return os.environ.get(env_var, INPUTS[env_var])
-class Environment(object): # pylint: disable=too-few-public-methods
-
- # Backward compatibilty (waiting for SDNVPN and SFC)
- def __init__(self):
- for key, _ in six.iteritems(INPUTS):
- setattr(self, key, get(key))
-
-# Backward compatibilty (waiting for SDNVPN and SFC)
-ENV = Environment()
+def string():
+ msg = prettytable.PrettyTable(
+ header_style='upper', padding_width=5,
+ field_names=['env var', 'value'])
+ for env_var in INPUTS:
+ msg.add_row([env_var, get(env_var) if get(env_var) else ''])
+ return msg
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
index 72c9d2076..b614af321 100644
--- a/functest/utils/functest_utils.py
+++ b/functest/utils/functest_utils.py
@@ -11,90 +11,13 @@
from __future__ import print_function
import logging
-import re
-import shutil
import subprocess
import sys
-
-import dns.resolver
-from six.moves import urllib
import yaml
-from functest.utils import constants
-from functest.utils import env
-
LOGGER = logging.getLogger(__name__)
-# ----------------------------------------------------------
-#
-# INTERNET UTILS
-#
-# -----------------------------------------------------------
-def check_internet_connectivity(url='http://www.opnfv.org/'):
- """
- Check if there is access to the internet
- """
- try:
- urllib.request.urlopen(url, timeout=5)
- return True
- except urllib.error.URLError:
- return False
-
-
-def download_url(url, dest_path):
- """
- Download a file to a destination path given a URL
- """
- name = url.rsplit('/')[-1]
- dest = dest_path + "/" + name
- try:
- response = urllib.request.urlopen(url)
- except (urllib.error.HTTPError, urllib.error.URLError):
- return False
-
- with open(dest, 'wb') as lfile:
- shutil.copyfileobj(response, lfile)
- return True
-
-
-# ----------------------------------------------------------
-#
-# CI UTILS
-#
-# -----------------------------------------------------------
-def get_resolvconf_ns():
- """
- Get nameservers from current resolv.conf
- """
- nameservers = []
- rconf = open("/etc/resolv.conf", "r")
- line = rconf.readline()
- resolver = dns.resolver.Resolver()
- while line:
- addr_ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", line)
- if addr_ip:
- resolver.nameservers = [addr_ip.group(0)]
- try:
- result = resolver.query('opnfv.org')[0]
- if result != "":
- nameservers.append(addr_ip.group())
- except dns.exception.Timeout:
- pass
- line = rconf.readline()
- return nameservers
-
-
-def get_ci_envvars():
- """
- Get the CI env variables
- """
- ci_env_var = {
- "installer": env.get('INSTALLER_TYPE'),
- "scenario": env.get('DEPLOY_SCENARIO')}
- return ci_env_var
-
-
def execute_command_raise(cmd, info=False, error_msg="",
verbose=True, output_file=None):
ret = execute_command(cmd, info, error_msg, verbose, output_file)
@@ -134,11 +57,6 @@ def execute_command(cmd, info=False, error_msg="",
return returncode
-# ----------------------------------------------------------
-#
-# YAML UTILS
-#
-# -----------------------------------------------------------
def get_parameter_from_yaml(parameter, yfile):
"""
Returns the value of a given parameter in file.yaml
@@ -154,19 +72,3 @@ def get_parameter_from_yaml(parameter, yfile):
raise ValueError("The parameter %s is not defined in"
" %s" % (parameter, yfile))
return value
-
-
-def get_functest_config(parameter):
- yaml_ = constants.Constants.CONFIG_FUNCTEST_YAML
- return get_parameter_from_yaml(parameter, yaml_)
-
-
-def get_functest_yaml():
- # pylint: disable=bad-continuation
- with open(constants.Constants.CONFIG_FUNCTEST_YAML) as yaml_fd:
- functest_yaml = yaml.safe_load(yaml_fd)
- return functest_yaml
-
-
-def print_separator():
- LOGGER.info("==============================================")
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
deleted file mode 100644
index 98da48b8c..000000000
--- a/functest/utils/openstack_utils.py
+++ /dev/null
@@ -1,1486 +0,0 @@
-#!/usr/bin/env python
-#
-# jose.lausuch@ericsson.com
-# valentin.boucher@orange.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import logging
-import os.path
-import sys
-import time
-
-from keystoneauth1 import loading
-from keystoneauth1 import session
-from cinderclient import client as cinderclient
-from glanceclient import client as glanceclient
-from heatclient import client as heatclient
-from novaclient import client as novaclient
-from keystoneclient import client as keystoneclient
-from neutronclient.neutron import client as neutronclient
-
-from functest.utils import env
-import functest.utils.functest_utils as ft_utils
-
-logger = logging.getLogger(__name__)
-
-DEFAULT_API_VERSION = '2'
-DEFAULT_HEAT_API_VERSION = '1'
-
-
-# *********************************************
-# CREDENTIALS
-# *********************************************
-class MissingEnvVar(Exception):
-
- def __init__(self, var):
- self.var = var
-
- def __str__(self):
- return str.format("Please set the mandatory env var: {}", self.var)
-
-
-def is_keystone_v3():
- keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
- if (keystone_api_version is None or
- keystone_api_version == '2'):
- return False
- else:
- return True
-
-
-def get_rc_env_vars():
- env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD']
- if is_keystone_v3():
- env_vars.extend(['OS_PROJECT_NAME',
- 'OS_USER_DOMAIN_NAME',
- 'OS_PROJECT_DOMAIN_NAME'])
- else:
- env_vars.extend(['OS_TENANT_NAME'])
- return env_vars
-
-
-def check_credentials():
- """
- Check if the OpenStack credentials (openrc) are sourced
- """
- env_vars = get_rc_env_vars()
- return all(map(lambda v: v in os.environ and os.environ[v], env_vars))
-
-
-def get_env_cred_dict():
- env_cred_dict = {
- 'OS_USERNAME': 'username',
- 'OS_PASSWORD': 'password',
- 'OS_AUTH_URL': 'auth_url',
- 'OS_TENANT_NAME': 'tenant_name',
- 'OS_USER_DOMAIN_NAME': 'user_domain_name',
- 'OS_PROJECT_DOMAIN_NAME': 'project_domain_name',
- 'OS_PROJECT_NAME': 'project_name',
- 'OS_ENDPOINT_TYPE': 'endpoint_type',
- 'OS_REGION_NAME': 'region_name',
- 'OS_CACERT': 'https_cacert',
- 'OS_INSECURE': 'https_insecure'
- }
- return env_cred_dict
-
-
-def get_credentials(other_creds={}):
- """Returns a creds dictionary filled with parsed from env
- """
- creds = {}
- env_vars = get_rc_env_vars()
- env_cred_dict = get_env_cred_dict()
-
- for envvar in env_vars:
- if os.getenv(envvar) is None:
- raise MissingEnvVar(envvar)
- else:
- creds_key = env_cred_dict.get(envvar)
- creds.update({creds_key: os.getenv(envvar)})
-
- if 'tenant' in other_creds.keys():
- if is_keystone_v3():
- tenant = 'project_name'
- else:
- tenant = 'tenant_name'
- other_creds[tenant] = other_creds.pop('tenant')
-
- creds.update(other_creds)
-
- return creds
-
-
-def get_session_auth(other_creds={}):
- loader = loading.get_plugin_loader('password')
- creds = get_credentials(other_creds)
- auth = loader.load_from_options(**creds)
- return auth
-
-
-def get_endpoint(service_type, interface='public'):
- auth = get_session_auth()
- return get_session().get_endpoint(auth=auth,
- service_type=service_type,
- interface=interface)
-
-
-def get_session(other_creds={}):
- auth = get_session_auth(other_creds)
- https_cacert = os.getenv('OS_CACERT', '')
- https_insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
- return session.Session(auth=auth,
- verify=(https_cacert or not https_insecure))
-
-
-# *********************************************
-# CLIENTS
-# *********************************************
-def get_keystone_client_version():
- api_version = os.getenv('OS_IDENTITY_API_VERSION')
- if api_version is not None:
- logger.info("OS_IDENTITY_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_keystone_client(other_creds={}):
- sess = get_session(other_creds)
- return keystoneclient.Client(get_keystone_client_version(),
- session=sess,
- interface=os.getenv('OS_INTERFACE', 'admin'))
-
-
-def get_nova_client_version():
- api_version = os.getenv('OS_COMPUTE_API_VERSION')
- if api_version is not None:
- logger.info("OS_COMPUTE_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_nova_client(other_creds={}):
- sess = get_session(other_creds)
- return novaclient.Client(get_nova_client_version(), session=sess)
-
-
-def get_cinder_client_version():
- api_version = os.getenv('OS_VOLUME_API_VERSION')
- if api_version is not None:
- logger.info("OS_VOLUME_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_cinder_client(other_creds={}):
- sess = get_session(other_creds)
- return cinderclient.Client(get_cinder_client_version(), session=sess)
-
-
-def get_neutron_client_version():
- api_version = os.getenv('OS_NETWORK_API_VERSION')
- if api_version is not None:
- logger.info("OS_NETWORK_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_neutron_client(other_creds={}):
- sess = get_session(other_creds)
- return neutronclient.Client(get_neutron_client_version(), session=sess)
-
-
-def get_glance_client_version():
- api_version = os.getenv('OS_IMAGE_API_VERSION')
- if api_version is not None:
- logger.info("OS_IMAGE_API_VERSION is set in env as '%s'", api_version)
- return api_version
- return DEFAULT_API_VERSION
-
-
-def get_glance_client(other_creds={}):
- sess = get_session(other_creds)
- return glanceclient.Client(get_glance_client_version(), session=sess)
-
-
-def get_heat_client_version():
- api_version = os.getenv('OS_ORCHESTRATION_API_VERSION')
- if api_version is not None:
- logger.info("OS_ORCHESTRATION_API_VERSION is set in env as '%s'",
- api_version)
- return api_version
- return DEFAULT_HEAT_API_VERSION
-
-
-def get_heat_client(other_creds={}):
- sess = get_session(other_creds)
- return heatclient.Client(get_heat_client_version(), session=sess)
-
-
-def download_and_add_image_on_glance(glance, image_name, image_url, data_dir):
- try:
- dest_path = data_dir
- if not os.path.exists(dest_path):
- os.makedirs(dest_path)
- file_name = image_url.rsplit('/')[-1]
- if not ft_utils.download_url(image_url, dest_path):
- return False
- except Exception:
- raise Exception("Impossible to download image from {}".format(
- image_url))
-
- try:
- image = create_glance_image(
- glance, image_name, dest_path + file_name)
- if not image:
- return False
- else:
- return image
- except Exception:
- raise Exception("Impossible to put image {} in glance".format(
- image_name))
-
-
-# *********************************************
-# NOVA
-# *********************************************
-def get_instances(nova_client):
- try:
- instances = nova_client.servers.list(search_opts={'all_tenants': 1})
- return instances
- except Exception as e:
- logger.error("Error [get_instances(nova_client)]: %s" % e)
- return None
-
-
-def get_instance_status(nova_client, instance):
- try:
- instance = nova_client.servers.get(instance.id)
- return instance.status
- except Exception as e:
- logger.error("Error [get_instance_status(nova_client)]: %s" % e)
- return None
-
-
-def get_instance_by_name(nova_client, instance_name):
- try:
- instance = nova_client.servers.find(name=instance_name)
- return instance
- except Exception as e:
- logger.error("Error [get_instance_by_name(nova_client, '%s')]: %s"
- % (instance_name, e))
- return None
-
-
-def get_flavor_id(nova_client, flavor_name):
- flavors = nova_client.flavors.list(detailed=True)
- id = ''
- for f in flavors:
- if f.name == flavor_name:
- id = f.id
- break
- return id
-
-
-def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
- flavors = nova_client.flavors.list(detailed=True)
- id = ''
- for f in flavors:
- if min_ram <= f.ram and f.ram <= max_ram:
- id = f.id
- break
- return id
-
-
-def get_aggregates(nova_client):
- try:
- aggregates = nova_client.aggregates.list()
- return aggregates
- except Exception as e:
- logger.error("Error [get_aggregates(nova_client)]: %s" % e)
- return None
-
-
-def get_aggregate_id(nova_client, aggregate_name):
- try:
- aggregates = get_aggregates(nova_client)
- _id = [ag.id for ag in aggregates if ag.name == aggregate_name][0]
- return _id
- except Exception as e:
- logger.error("Error [get_aggregate_id(nova_client, %s)]:"
- " %s" % (aggregate_name, e))
- return None
-
-
-def get_availability_zones(nova_client):
- try:
- availability_zones = nova_client.availability_zones.list()
- return availability_zones
- except Exception as e:
- logger.error("Error [get_availability_zones(nova_client)]: %s" % e)
- return None
-
-
-def get_availability_zone_names(nova_client):
- try:
- az_names = [az.zoneName for az in get_availability_zones(nova_client)]
- return az_names
- except Exception as e:
- logger.error("Error [get_availability_zone_names(nova_client)]:"
- " %s" % e)
- return None
-
-
-def create_flavor(nova_client, flavor_name, ram, disk, vcpus, public=True):
- try:
- flavor = nova_client.flavors.create(
- flavor_name, ram, vcpus, disk, is_public=public)
- try:
- extra_specs = ft_utils.get_functest_config(
- 'general.flavor_extra_specs')
- flavor.set_keys(extra_specs)
- except ValueError:
- # flavor extra specs are not configured, therefore skip the update
- pass
-
- except Exception as e:
- logger.error("Error [create_flavor(nova_client, '%s', '%s', '%s', "
- "'%s')]: %s" % (flavor_name, ram, disk, vcpus, e))
- return None
- return flavor.id
-
-
-def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
- flavor_exists = False
- nova_client = get_nova_client()
-
- flavor_id = get_flavor_id(nova_client, flavor_name)
- if flavor_id != '':
- logger.info("Using existing flavor '%s'..." % flavor_name)
- flavor_exists = True
- else:
- logger.info("Creating flavor '%s' with '%s' RAM, '%s' disk size, "
- "'%s' vcpus..." % (flavor_name, ram, disk, vcpus))
- flavor_id = create_flavor(
- nova_client, flavor_name, ram, disk, vcpus, public=public)
- if not flavor_id:
- raise Exception("Failed to create flavor '%s'..." % (flavor_name))
- else:
- logger.debug("Flavor '%s' with ID=%s created successfully."
- % (flavor_name, flavor_id))
-
- return flavor_exists, flavor_id
-
-
-def get_floating_ips(neutron_client):
- try:
- floating_ips = neutron_client.list_floatingips()
- return floating_ips['floatingips']
- except Exception as e:
- logger.error("Error [get_floating_ips(neutron_client)]: %s" % e)
- return None
-
-
-def get_hypervisors(nova_client):
- try:
- nodes = []
- hypervisors = nova_client.hypervisors.list()
- for hypervisor in hypervisors:
- if hypervisor.state == "up":
- nodes.append(hypervisor.hypervisor_hostname)
- return nodes
- except Exception as e:
- logger.error("Error [get_hypervisors(nova_client)]: %s" % e)
- return None
-
-
-def create_aggregate(nova_client, aggregate_name, av_zone):
- try:
- nova_client.aggregates.create(aggregate_name, av_zone)
- return True
- except Exception as e:
- logger.error("Error [create_aggregate(nova_client, %s, %s)]: %s"
- % (aggregate_name, av_zone, e))
- return None
-
-
-def add_host_to_aggregate(nova_client, aggregate_name, compute_host):
- try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.add_host(aggregate_id, compute_host)
- return True
- except Exception as e:
- logger.error("Error [add_host_to_aggregate(nova_client, %s, %s)]: %s"
- % (aggregate_name, compute_host, e))
- return None
-
-
-def create_aggregate_with_host(
- nova_client, aggregate_name, av_zone, compute_host):
- try:
- create_aggregate(nova_client, aggregate_name, av_zone)
- add_host_to_aggregate(nova_client, aggregate_name, compute_host)
- return True
- except Exception as e:
- logger.error("Error [create_aggregate_with_host("
- "nova_client, %s, %s, %s)]: %s"
- % (aggregate_name, av_zone, compute_host, e))
- return None
-
-
-def create_instance(flavor_name,
- image_id,
- network_id,
- instance_name="functest-vm",
- confdrive=True,
- userdata=None,
- av_zone='',
- fixed_ip=None,
- files=None):
- nova_client = get_nova_client()
- try:
- flavor = nova_client.flavors.find(name=flavor_name)
- except:
- flavors = nova_client.flavors.list()
- logger.error("Error: Flavor '%s' not found. Available flavors are: "
- "\n%s" % (flavor_name, flavors))
- return None
- if fixed_ip is not None:
- nics = {"net-id": network_id, "v4-fixed-ip": fixed_ip}
- else:
- nics = {"net-id": network_id}
- if userdata is None:
- instance = nova_client.servers.create(
- name=instance_name,
- flavor=flavor,
- image=image_id,
- nics=[nics],
- availability_zone=av_zone,
- files=files
- )
- else:
- instance = nova_client.servers.create(
- name=instance_name,
- flavor=flavor,
- image=image_id,
- nics=[nics],
- config_drive=confdrive,
- userdata=userdata,
- availability_zone=av_zone,
- files=files
- )
- return instance
-
-
-def create_instance_and_wait_for_active(flavor_name,
- image_id,
- network_id,
- instance_name="",
- config_drive=False,
- userdata="",
- av_zone='',
- fixed_ip=None,
- files=None):
- SLEEP = 3
- VM_BOOT_TIMEOUT = 180
- nova_client = get_nova_client()
- instance = create_instance(flavor_name,
- image_id,
- network_id,
- instance_name,
- config_drive,
- userdata,
- av_zone=av_zone,
- fixed_ip=fixed_ip,
- files=files)
- count = VM_BOOT_TIMEOUT / SLEEP
- for n in range(count, -1, -1):
- status = get_instance_status(nova_client, instance)
- if status is None:
- time.sleep(SLEEP)
- continue
- elif status.lower() == "active":
- return instance
- elif status.lower() == "error":
- logger.error("The instance %s went to ERROR status."
- % instance_name)
- return None
- time.sleep(SLEEP)
- logger.error("Timeout booting the instance %s." % instance_name)
- return None
-
-
-def create_floating_ip(neutron_client):
- extnet_id = get_external_net_id(neutron_client)
- props = {'floating_network_id': extnet_id}
- try:
- ip_json = neutron_client.create_floatingip({'floatingip': props})
- fip_addr = ip_json['floatingip']['floating_ip_address']
- fip_id = ip_json['floatingip']['id']
- except Exception as e:
- logger.error("Error [create_floating_ip(neutron_client)]: %s" % e)
- return None
- return {'fip_addr': fip_addr, 'fip_id': fip_id}
-
-
-def add_floating_ip(nova_client, server_id, floatingip_addr):
- try:
- nova_client.servers.add_floating_ip(server_id, floatingip_addr)
- return True
- except Exception as e:
- logger.error("Error [add_floating_ip(nova_client, '%s', '%s')]: %s"
- % (server_id, floatingip_addr, e))
- return False
-
-
-def delete_instance(nova_client, instance_id):
- try:
- nova_client.servers.force_delete(instance_id)
- return True
- except Exception as e:
- logger.error("Error [delete_instance(nova_client, '%s')]: %s"
- % (instance_id, e))
- return False
-
-
-def delete_floating_ip(neutron_client, floatingip_id):
- try:
- neutron_client.delete_floatingip(floatingip_id)
- return True
- except Exception as e:
- logger.error("Error [delete_floating_ip(neutron_client, '%s')]: %s"
- % (floatingip_id, e))
- return False
-
-
-def remove_host_from_aggregate(nova_client, aggregate_name, compute_host):
- try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.remove_host(aggregate_id, compute_host)
- return True
- except Exception as e:
- logger.error("Error [remove_host_from_aggregate(nova_client, %s, %s)]:"
- " %s" % (aggregate_name, compute_host, e))
- return False
-
-
-def remove_hosts_from_aggregate(nova_client, aggregate_name):
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- hosts = nova_client.aggregates.get(aggregate_id).hosts
- assert(
- all(remove_host_from_aggregate(nova_client, aggregate_name, host)
- for host in hosts))
-
-
-def delete_aggregate(nova_client, aggregate_name):
- try:
- remove_hosts_from_aggregate(nova_client, aggregate_name)
- nova_client.aggregates.delete(aggregate_name)
- return True
- except Exception as e:
- logger.error("Error [delete_aggregate(nova_client, %s)]: %s"
- % (aggregate_name, e))
- return False
-
-
-# *********************************************
-# NEUTRON
-# *********************************************
-def get_network_list(neutron_client):
- network_list = neutron_client.list_networks()['networks']
- if len(network_list) == 0:
- return None
- else:
- return network_list
-
-
-def get_router_list(neutron_client):
- router_list = neutron_client.list_routers()['routers']
- if len(router_list) == 0:
- return None
- else:
- return router_list
-
-
-def get_port_list(neutron_client):
- port_list = neutron_client.list_ports()['ports']
- if len(port_list) == 0:
- return None
- else:
- return port_list
-
-
-def get_network_id(neutron_client, network_name):
- networks = neutron_client.list_networks()['networks']
- id = ''
- for n in networks:
- if n['name'] == network_name:
- id = n['id']
- break
- return id
-
-
-def get_subnet_id(neutron_client, subnet_name):
- subnets = neutron_client.list_subnets()['subnets']
- id = ''
- for s in subnets:
- if s['name'] == subnet_name:
- id = s['id']
- break
- return id
-
-
-def get_router_id(neutron_client, router_name):
- routers = neutron_client.list_routers()['routers']
- id = ''
- for r in routers:
- if r['name'] == router_name:
- id = r['id']
- break
- return id
-
-
-def get_private_net(neutron_client):
- # Checks if there is an existing shared private network
- networks = neutron_client.list_networks()['networks']
- if len(networks) == 0:
- return None
- for net in networks:
- if (net['router:external'] is False) and (net['shared'] is True):
- return net
- return None
-
-
-def get_external_net(neutron_client):
- if (env.get('EXTERNAL_NETWORK')):
- return env.get('EXTERNAL_NETWORK')
- for network in neutron_client.list_networks()['networks']:
- if network['router:external']:
- return network['name']
- return None
-
-
-def get_external_net_id(neutron_client):
- if (env.get('EXTERNAL_NETWORK')):
- networks = neutron_client.list_networks(
- name=env.get('EXTERNAL_NETWORK'))
- net_id = networks['networks'][0]['id']
- return net_id
- for network in neutron_client.list_networks()['networks']:
- if network['router:external']:
- return network['id']
- return None
-
-
-def check_neutron_net(neutron_client, net_name):
- for network in neutron_client.list_networks()['networks']:
- if network['name'] == net_name:
- for subnet in network['subnets']:
- return True
- return False
-
-
-def create_neutron_net(neutron_client, name):
- json_body = {'network': {'name': name,
- 'admin_state_up': True}}
- try:
- network = neutron_client.create_network(body=json_body)
- network_dict = network['network']
- return network_dict['id']
- except Exception as e:
- logger.error("Error [create_neutron_net(neutron_client, '%s')]: %s"
- % (name, e))
- return None
-
-
-def create_neutron_subnet(neutron_client, name, cidr, net_id,
- dns=['8.8.8.8', '8.8.4.4']):
- json_body = {'subnets': [{'name': name, 'cidr': cidr,
- 'ip_version': 4, 'network_id': net_id,
- 'dns_nameservers': dns}]}
-
- try:
- subnet = neutron_client.create_subnet(body=json_body)
- return subnet['subnets'][0]['id']
- except Exception as e:
- logger.error("Error [create_neutron_subnet(neutron_client, '%s', "
- "'%s', '%s')]: %s" % (name, cidr, net_id, e))
- return None
-
-
-def create_neutron_router(neutron_client, name):
- json_body = {'router': {'name': name, 'admin_state_up': True}}
- try:
- router = neutron_client.create_router(json_body)
- return router['router']['id']
- except Exception as e:
- logger.error("Error [create_neutron_router(neutron_client, '%s')]: %s"
- % (name, e))
- return None
-
-
-def create_neutron_port(neutron_client, name, network_id, ip):
- json_body = {'port': {
- 'admin_state_up': True,
- 'name': name,
- 'network_id': network_id,
- 'fixed_ips': [{"ip_address": ip}]
- }}
- try:
- port = neutron_client.create_port(body=json_body)
- return port['port']['id']
- except Exception as e:
- logger.error("Error [create_neutron_port(neutron_client, '%s', '%s', "
- "'%s')]: %s" % (name, network_id, ip, e))
- return None
-
-
-def update_neutron_net(neutron_client, network_id, shared=False):
- json_body = {'network': {'shared': shared}}
- try:
- neutron_client.update_network(network_id, body=json_body)
- return True
- except Exception as e:
- logger.error("Error [update_neutron_net(neutron_client, '%s', '%s')]: "
- "%s" % (network_id, str(shared), e))
- return False
-
-
-def update_neutron_port(neutron_client, port_id, device_owner):
- json_body = {'port': {
- 'device_owner': device_owner,
- }}
- try:
- port = neutron_client.update_port(port=port_id,
- body=json_body)
- return port['port']['id']
- except Exception as e:
- logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
- " %s" % (port_id, device_owner, e))
- return None
-
-
-def add_interface_router(neutron_client, router_id, subnet_id):
- json_body = {"subnet_id": subnet_id}
- try:
- neutron_client.add_interface_router(router=router_id, body=json_body)
- return True
- except Exception as e:
- logger.error("Error [add_interface_router(neutron_client, '%s', "
- "'%s')]: %s" % (router_id, subnet_id, e))
- return False
-
-
-def add_gateway_router(neutron_client, router_id):
- ext_net_id = get_external_net_id(neutron_client)
- router_dict = {'network_id': ext_net_id}
- try:
- neutron_client.add_gateway_router(router_id, router_dict)
- return True
- except Exception as e:
- logger.error("Error [add_gateway_router(neutron_client, '%s')]: %s"
- % (router_id, e))
- return False
-
-
-def delete_neutron_net(neutron_client, network_id):
- try:
- neutron_client.delete_network(network_id)
- return True
- except Exception as e:
- logger.error("Error [delete_neutron_net(neutron_client, '%s')]: %s"
- % (network_id, e))
- return False
-
-
-def delete_neutron_subnet(neutron_client, subnet_id):
- try:
- neutron_client.delete_subnet(subnet_id)
- return True
- except Exception as e:
- logger.error("Error [delete_neutron_subnet(neutron_client, '%s')]: %s"
- % (subnet_id, e))
- return False
-
-
-def delete_neutron_router(neutron_client, router_id):
- try:
- neutron_client.delete_router(router=router_id)
- return True
- except Exception as e:
- logger.error("Error [delete_neutron_router(neutron_client, '%s')]: %s"
- % (router_id, e))
- return False
-
-
-def delete_neutron_port(neutron_client, port_id):
- try:
- neutron_client.delete_port(port_id)
- return True
- except Exception as e:
- logger.error("Error [delete_neutron_port(neutron_client, '%s')]: %s"
- % (port_id, e))
- return False
-
-
-def remove_interface_router(neutron_client, router_id, subnet_id):
- json_body = {"subnet_id": subnet_id}
- try:
- neutron_client.remove_interface_router(router=router_id,
- body=json_body)
- return True
- except Exception as e:
- logger.error("Error [remove_interface_router(neutron_client, '%s', "
- "'%s')]: %s" % (router_id, subnet_id, e))
- return False
-
-
-def remove_gateway_router(neutron_client, router_id):
- try:
- neutron_client.remove_gateway_router(router_id)
- return True
- except Exception as e:
- logger.error("Error [remove_gateway_router(neutron_client, '%s')]: %s"
- % (router_id, e))
- return False
-
-
-def create_network_full(neutron_client,
- net_name,
- subnet_name,
- router_name,
- cidr,
- dns=['8.8.8.8', '8.8.4.4']):
-
- # Check if the network already exists
- network_id = get_network_id(neutron_client, net_name)
- subnet_id = get_subnet_id(neutron_client, subnet_name)
- router_id = get_router_id(neutron_client, router_name)
-
- if network_id != '' and subnet_id != '' and router_id != '':
- logger.info("A network with name '%s' already exists..." % net_name)
- else:
- neutron_client.format = 'json'
- logger.info('Creating neutron network %s...' % net_name)
- network_id = create_neutron_net(neutron_client, net_name)
-
- if not network_id:
- return False
-
- logger.debug("Network '%s' created successfully" % network_id)
- logger.debug('Creating Subnet....')
- subnet_id = create_neutron_subnet(neutron_client, subnet_name,
- cidr, network_id, dns)
- if not subnet_id:
- return None
-
- logger.debug("Subnet '%s' created successfully" % subnet_id)
- logger.debug('Creating Router...')
- router_id = create_neutron_router(neutron_client, router_name)
-
- if not router_id:
- return None
-
- logger.debug("Router '%s' created successfully" % router_id)
- logger.debug('Adding router to subnet...')
-
- if not add_interface_router(neutron_client, router_id, subnet_id):
- return None
-
- logger.debug("Interface added successfully.")
-
- logger.debug('Adding gateway to router...')
- if not add_gateway_router(neutron_client, router_id):
- return None
-
- logger.debug("Gateway added successfully.")
-
- network_dic = {'net_id': network_id,
- 'subnet_id': subnet_id,
- 'router_id': router_id}
- return network_dic
-
-
-def create_shared_network_full(net_name, subnt_name, router_name, subnet_cidr):
- neutron_client = get_neutron_client()
-
- network_dic = create_network_full(neutron_client,
- net_name,
- subnt_name,
- router_name,
- subnet_cidr)
- if network_dic:
- if not update_neutron_net(neutron_client,
- network_dic['net_id'],
- shared=True):
- logger.error("Failed to update network %s..." % net_name)
- return None
- else:
- logger.debug("Network '%s' is available..." % net_name)
- else:
- logger.error("Network %s creation failed" % net_name)
- return None
- return network_dic
-
-
-# *********************************************
-# SEC GROUPS
-# *********************************************
-
-
-def get_security_groups(neutron_client):
- try:
- security_groups = neutron_client.list_security_groups()[
- 'security_groups']
- return security_groups
- except Exception as e:
- logger.error("Error [get_security_groups(neutron_client)]: %s" % e)
- return None
-
-
-def get_security_group_id(neutron_client, sg_name):
- security_groups = get_security_groups(neutron_client)
- id = ''
- for sg in security_groups:
- if sg['name'] == sg_name:
- id = sg['id']
- break
- return id
-
-
-def create_security_group(neutron_client, sg_name, sg_description):
- json_body = {'security_group': {'name': sg_name,
- 'description': sg_description}}
- try:
- secgroup = neutron_client.create_security_group(json_body)
- return secgroup['security_group']
- except Exception as e:
- logger.error("Error [create_security_group(neutron_client, '%s', "
- "'%s')]: %s" % (sg_name, sg_description, e))
- return None
-
-
-def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
- port_range_min=None, port_range_max=None):
- # We create a security group in 2 steps
- # 1 - we check the format and set the json body accordingly
- # 2 - we call neturon client to create the security group
-
- # Format check
- json_body = {'security_group_rule': {'direction': direction,
- 'security_group_id': sg_id,
- 'protocol': protocol}}
- # parameters may be
- # - both None => we do nothing
- # - both Not None => we add them to the json description
- # but one cannot be None is the other is not None
- if (port_range_min is not None and port_range_max is not None):
- # add port_range in json description
- json_body['security_group_rule']['port_range_min'] = port_range_min
- json_body['security_group_rule']['port_range_max'] = port_range_max
- logger.debug("Security_group format set (port range included)")
- else:
- # either both port range are set to None => do nothing
- # or one is set but not the other => log it and return False
- if port_range_min is None and port_range_max is None:
- logger.debug("Security_group format set (no port range mentioned)")
- else:
- logger.error("Bad security group format."
- "One of the port range is not properly set:"
- "range min: {},"
- "range max: {}".format(port_range_min,
- port_range_max))
- return False
-
- # Create security group using neutron client
- try:
- neutron_client.create_security_group_rule(json_body)
- return True
- except:
- logger.exception("Impossible to create_security_group_rule,"
- "security group rule probably already exists")
- return False
-
-
-def get_security_group_rules(neutron_client, sg_id):
- try:
- security_rules = neutron_client.list_security_group_rules()[
- 'security_group_rules']
- security_rules = [rule for rule in security_rules
- if rule["security_group_id"] == sg_id]
- return security_rules
- except Exception as e:
- logger.error("Error [get_security_group_rules(neutron_client, sg_id)]:"
- " %s" % e)
- return None
-
-
-def check_security_group_rules(neutron_client, sg_id, direction, protocol,
- port_min=None, port_max=None):
- try:
- security_rules = get_security_group_rules(neutron_client, sg_id)
- security_rules = [rule for rule in security_rules
- if (rule["direction"].lower() == direction and
- rule["protocol"].lower() == protocol and
- rule["port_range_min"] == port_min and
- rule["port_range_max"] == port_max)]
- if len(security_rules) == 0:
- return True
- else:
- return False
- except Exception as e:
- logger.error("Error [check_security_group_rules("
- " neutron_client, sg_id, direction,"
- " protocol, port_min=None, port_max=None)]: "
- "%s" % e)
- return None
-
-
-def create_security_group_full(neutron_client,
- sg_name, sg_description):
- sg_id = get_security_group_id(neutron_client, sg_name)
- if sg_id != '':
- logger.info("Using existing security group '%s'..." % sg_name)
- else:
- logger.info("Creating security group '%s'..." % sg_name)
- SECGROUP = create_security_group(neutron_client,
- sg_name,
- sg_description)
- if not SECGROUP:
- logger.error("Failed to create the security group...")
- return None
-
- sg_id = SECGROUP['id']
-
- logger.debug("Security group '%s' with ID=%s created successfully."
- % (SECGROUP['name'], sg_id))
-
- logger.debug("Adding ICMP rules in security group '%s'..."
- % sg_name)
- if not create_secgroup_rule(neutron_client, sg_id,
- 'ingress', 'icmp'):
- logger.error("Failed to create the security group rule...")
- return None
-
- logger.debug("Adding SSH rules in security group '%s'..."
- % sg_name)
- if not create_secgroup_rule(
- neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
- logger.error("Failed to create the security group rule...")
- return None
-
- if not create_secgroup_rule(
- neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
- logger.error("Failed to create the security group rule...")
- return None
- return sg_id
-
-
-def add_secgroup_to_instance(nova_client, instance_id, secgroup_id):
- try:
- nova_client.servers.add_security_group(instance_id, secgroup_id)
- return True
- except Exception as e:
- logger.error("Error [add_secgroup_to_instance(nova_client, '%s', "
- "'%s')]: %s" % (instance_id, secgroup_id, e))
- return False
-
-
-def update_sg_quota(neutron_client, tenant_id, sg_quota, sg_rule_quota):
- json_body = {"quota": {
- "security_group": sg_quota,
- "security_group_rule": sg_rule_quota
- }}
-
- try:
- neutron_client.update_quota(tenant_id=tenant_id,
- body=json_body)
- return True
- except Exception as e:
- logger.error("Error [update_sg_quota(neutron_client, '%s', '%s', "
- "'%s')]: %s" % (tenant_id, sg_quota, sg_rule_quota, e))
- return False
-
-
-def delete_security_group(neutron_client, secgroup_id):
- try:
- neutron_client.delete_security_group(secgroup_id)
- return True
- except Exception as e:
- logger.error("Error [delete_security_group(neutron_client, '%s')]: %s"
- % (secgroup_id, e))
- return False
-
-
-# *********************************************
-# GLANCE
-# *********************************************
-def get_images(glance_client):
- try:
- images = glance_client.images.list()
- return images
- except Exception as e:
- logger.error("Error [get_images]: %s" % e)
- return None
-
-
-def get_image_id(glance_client, image_name):
- images = glance_client.images.list()
- id = ''
- for i in images:
- if i.name == image_name:
- id = i.id
- break
- return id
-
-
-def create_glance_image(glance_client,
- image_name,
- file_path,
- disk="qcow2",
- extra_properties={},
- container="bare",
- public="public"):
- if not os.path.isfile(file_path):
- logger.error("Error: file %s does not exist." % file_path)
- return None
- try:
- image_id = get_image_id(glance_client, image_name)
- if image_id != '':
- logger.info("Image %s already exists." % image_name)
- else:
- logger.info("Creating image '%s' from '%s'..." % (image_name,
- file_path))
-
- image = glance_client.images.create(name=image_name,
- visibility=public,
- disk_format=disk,
- container_format=container,
- **extra_properties)
- image_id = image.id
- with open(file_path) as image_data:
- glance_client.images.upload(image_id, image_data)
- return image_id
- except Exception as e:
- logger.error("Error [create_glance_image(glance_client, '%s', '%s', "
- "'%s')]: %s" % (image_name, file_path, public, e))
- return None
-
-
-def get_or_create_image(name, path, format, extra_properties):
- image_exists = False
- glance_client = get_glance_client()
-
- image_id = get_image_id(glance_client, name)
- if image_id != '':
- logger.info("Using existing image '%s'..." % name)
- image_exists = True
- else:
- logger.info("Creating image '%s' from '%s'..." % (name, path))
- image_id = create_glance_image(glance_client,
- name,
- path,
- format,
- extra_properties)
- if not image_id:
- logger.error("Failed to create a Glance image...")
- else:
- logger.debug("Image '%s' with ID=%s created successfully."
- % (name, image_id))
-
- return image_exists, image_id
-
-
-def delete_glance_image(glance_client, image_id):
- try:
- glance_client.images.delete(image_id)
- return True
- except Exception as e:
- logger.error("Error [delete_glance_image(glance_client, '%s')]: %s"
- % (image_id, e))
- return False
-
-
-# *********************************************
-# CINDER
-# *********************************************
-def get_volumes(cinder_client):
- try:
- volumes = cinder_client.volumes.list(search_opts={'all_tenants': 1})
- return volumes
- except Exception as e:
- logger.error("Error [get_volumes(cinder_client)]: %s" % e)
- return None
-
-
-def update_cinder_quota(cinder_client, tenant_id, vols_quota,
- snapshots_quota, gigabytes_quota):
- quotas_values = {"volumes": vols_quota,
- "snapshots": snapshots_quota,
- "gigabytes": gigabytes_quota}
-
- try:
- cinder_client.quotas.update(tenant_id, **quotas_values)
- return True
- except Exception as e:
- logger.error("Error [update_cinder_quota(cinder_client, '%s', '%s', "
- "'%s' '%s')]: %s" % (tenant_id, vols_quota,
- snapshots_quota, gigabytes_quota, e))
- return False
-
-
-def delete_volume(cinder_client, volume_id, forced=False):
- try:
- if forced:
- try:
- cinder_client.volumes.detach(volume_id)
- except:
- logger.error(sys.exc_info()[0])
- cinder_client.volumes.force_delete(volume_id)
- else:
- cinder_client.volumes.delete(volume_id)
- return True
- except Exception as e:
- logger.error("Error [delete_volume(cinder_client, '%s', '%s')]: %s"
- % (volume_id, str(forced), e))
- return False
-
-
-# *********************************************
-# KEYSTONE
-# *********************************************
-def get_tenants(keystone_client):
- try:
- if is_keystone_v3():
- tenants = keystone_client.projects.list()
- else:
- tenants = keystone_client.tenants.list()
- return tenants
- except Exception as e:
- logger.error("Error [get_tenants(keystone_client)]: %s" % e)
- return None
-
-
-def get_users(keystone_client):
- try:
- users = keystone_client.users.list()
- return users
- except Exception as e:
- logger.error("Error [get_users(keystone_client)]: %s" % e)
- return None
-
-
-def get_tenant_id(keystone_client, tenant_name):
- tenants = get_tenants(keystone_client)
- id = ''
- for t in tenants:
- if t.name == tenant_name:
- id = t.id
- break
- return id
-
-
-def get_user_id(keystone_client, user_name):
- users = get_users(keystone_client)
- id = ''
- for u in users:
- if u.name == user_name:
- id = u.id
- break
- return id
-
-
-def get_role_id(keystone_client, role_name):
- roles = keystone_client.roles.list()
- id = ''
- for r in roles:
- if r.name == role_name:
- id = r.id
- break
- return id
-
-
-def get_domain_id(keystone_client, domain_name):
- domains = keystone_client.domains.list()
- id = ''
- for d in domains:
- if d.name == domain_name:
- id = d.id
- break
- return id
-
-
-def create_tenant(keystone_client, tenant_name, tenant_description):
- try:
- if is_keystone_v3():
- domain_name = os.environ['OS_PROJECT_DOMAIN_NAME']
- domain_id = get_domain_id(keystone_client, domain_name)
- tenant = keystone_client.projects.create(
- name=tenant_name,
- description=tenant_description,
- domain=domain_id,
- enabled=True)
- else:
- tenant = keystone_client.tenants.create(tenant_name,
- tenant_description,
- enabled=True)
- return tenant.id
- except Exception as e:
- logger.error("Error [create_tenant(keystone_client, '%s', '%s')]: %s"
- % (tenant_name, tenant_description, e))
- return None
-
-
-def get_or_create_tenant(keystone_client, tenant_name, tenant_description):
- tenant_id = get_tenant_id(keystone_client, tenant_name)
- if not tenant_id:
- tenant_id = create_tenant(keystone_client, tenant_name,
- tenant_description)
-
- return tenant_id
-
-
-def get_or_create_tenant_for_vnf(keystone_client, tenant_name,
- tenant_description):
- """Get or Create a Tenant
-
- Args:
- keystone_client: keystone client reference
- tenant_name: the name of the tenant
- tenant_description: the description of the tenant
-
- return False if tenant retrieved though get
- return True if tenant created
- raise Exception if error during processing
- """
- try:
- tenant_id = get_tenant_id(keystone_client, tenant_name)
- if not tenant_id:
- tenant_id = create_tenant(keystone_client, tenant_name,
- tenant_description)
- return True
- else:
- return False
- except:
- raise Exception("Impossible to create a Tenant for the VNF {}".format(
- tenant_name))
-
-
-def create_user(keystone_client, user_name, user_password,
- user_email, tenant_id):
- try:
- if is_keystone_v3():
- user = keystone_client.users.create(name=user_name,
- password=user_password,
- email=user_email,
- project_id=tenant_id,
- enabled=True)
- else:
- user = keystone_client.users.create(user_name,
- user_password,
- user_email,
- tenant_id,
- enabled=True)
- return user.id
- except Exception as e:
- logger.error("Error [create_user(keystone_client, '%s', '%s', '%s'"
- "'%s')]: %s" % (user_name, user_password,
- user_email, tenant_id, e))
- return None
-
-
-def get_or_create_user(keystone_client, user_name, user_password,
- tenant_id, user_email=None):
- user_id = get_user_id(keystone_client, user_name)
- if not user_id:
- user_id = create_user(keystone_client, user_name, user_password,
- user_email, tenant_id)
- return user_id
-
-
-def get_or_create_user_for_vnf(keystone_client, vnf_ref):
- """Get or Create user for VNF
-
- Args:
- keystone_client: keystone client reference
- vnf_ref: VNF reference used as user name & password, tenant name
-
- return False if user retrieved through get
- return True if user created
- raise Exception if error during processing
- """
- try:
- user_id = get_user_id(keystone_client, vnf_ref)
- tenant_id = get_tenant_id(keystone_client, vnf_ref)
- created = False
- if not user_id:
- user_id = create_user(keystone_client, vnf_ref, vnf_ref,
- "", tenant_id)
- created = True
- try:
- role_id = get_role_id(keystone_client, 'admin')
- tenant_id = get_tenant_id(keystone_client, vnf_ref)
- add_role_user(keystone_client, user_id, role_id, tenant_id)
- except:
- logger.warn("Cannot associate user to role admin on tenant")
- return created
- except:
- raise Exception("Impossible to create a user for the VNF {}".format(
- vnf_ref))
-
-
-def add_role_user(keystone_client, user_id, role_id, tenant_id):
- try:
- if is_keystone_v3():
- keystone_client.roles.grant(role=role_id,
- user=user_id,
- project=tenant_id)
- else:
- keystone_client.roles.add_user_role(user_id, role_id, tenant_id)
- return True
- except Exception as e:
- logger.error("Error [add_role_user(keystone_client, '%s', '%s'"
- "'%s')]: %s " % (user_id, role_id, tenant_id, e))
- return False
-
-
-def delete_tenant(keystone_client, tenant_id):
- try:
- if is_keystone_v3():
- keystone_client.projects.delete(tenant_id)
- else:
- keystone_client.tenants.delete(tenant_id)
- return True
- except Exception as e:
- logger.error("Error [delete_tenant(keystone_client, '%s')]: %s"
- % (tenant_id, e))
- return False
-
-
-def delete_user(keystone_client, user_id):
- try:
- keystone_client.users.delete(user_id)
- return True
- except Exception as e:
- logger.error("Error [delete_user(keystone_client, '%s')]: %s"
- % (user_id, e))
- return False
-
-
-# *********************************************
-# HEAT
-# *********************************************
-def get_resource(heat_client, stack_id, resource):
- try:
- resources = heat_client.resources.get(stack_id, resource)
- return resources
- except Exception as e:
- logger.error("Error [get_resource]: %s" % e)
- return None
diff --git a/requirements.txt b/requirements.txt
index 8c83f0a8a..86d4b9a00 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,13 +4,6 @@
pbr!=2.1.0,>=2.0.0 # Apache-2.0
PyYAML>=3.10.0 # MIT
GitPython>=1.0.1 # BSD License (3 clause)
-keystoneauth1>=3.1.0 # Apache-2.0
-python-cinderclient>=3.1.0 # Apache-2.0
-python-glanceclient>=2.8.0 # Apache-2.0
-python-heatclient>=1.6.1 # Apache-2.0
-python-keystoneclient>=3.8.0 # Apache-2.0
-python-neutronclient>=6.3.0 # Apache-2.0
-python-novaclient>=9.0.0 # Apache-2.0
requests>=2.14.2 # Apache-2.0
robotframework>=3.0
scp
diff --git a/setup.cfg b/setup.cfg
index 47f3feaf6..4b03d4e5f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -13,7 +13,6 @@ console_scripts =
functest = functest.cli.cli_base:cli
functest_odl = functest.opnfv_tests.sdn.odl.odl:main
functest_refstack_client = functest.opnfv_tests.openstack.refstack_client.refstack_client:main
- functest_tempest_conf = functest.opnfv_tests.openstack.refstack_client.tempest_conf:main
run_tests = functest.ci.run_tests:main
check_deployment = functest.ci.check_deployment:main
functest_restapi = functest.api.server:main