aboutsummaryrefslogtreecommitdiffstats
path: root/functest/core
diff options
context:
space:
mode:
Diffstat (limited to 'functest/core')
-rw-r--r--functest/core/cloudify.py164
-rw-r--r--functest/core/singlevm.py272
-rw-r--r--functest/core/tenantnetwork.py149
-rw-r--r--functest/core/vnf.py187
4 files changed, 453 insertions, 319 deletions
diff --git a/functest/core/cloudify.py b/functest/core/cloudify.py
index 954491f6c..966d33645 100644
--- a/functest/core/cloudify.py
+++ b/functest/core/cloudify.py
@@ -12,9 +12,13 @@
from __future__ import division
import logging
+import os
import time
+import traceback
from cloudify_rest_client import CloudifyClient
+from cloudify_rest_client.executions import Execution
+import scp
from functest.core import singlevm
@@ -25,23 +29,28 @@ class Cloudify(singlevm.SingleVm2):
__logger = logging.getLogger(__name__)
filename = ('/home/opnfv/functest/images/'
- 'cloudify-manager-premium-4.0.1.qcow2')
+ 'ubuntu-18.04-server-cloudimg-amd64.img')
flavor_ram = 4096
flavor_vcpus = 2
- flavor_disk = 50
- username = 'centos'
+ flavor_disk = 40
+ username = 'ubuntu'
ssh_connect_loops = 12
+ create_server_timeout = 600
ports = [80, 443, 5671, 53333]
+ cloudify_archive = ('/home/opnfv/functest/images/'
+ 'cloudify-docker-manager-community-19.01.24.tar')
+ cloudify_container = "docker-cfy-manager:latest"
+
def __init__(self, **kwargs):
"""Initialize Cloudify testcase object."""
if "case_name" not in kwargs:
kwargs["case_name"] = "cloudify"
- super(Cloudify, self).__init__(**kwargs)
+ super().__init__(**kwargs)
self.cfy_client = None
def prepare(self):
- super(Cloudify, self).prepare()
+ super().prepare()
for port in self.ports:
self.cloud.create_security_group_rule(
self.sec.id, port_range_min=port, port_range_max=port,
@@ -51,12 +60,28 @@ class Cloudify(singlevm.SingleVm2):
"""
Deploy Cloudify Manager.
"""
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(self.cloudify_archive,
+ remote_path=os.path.basename(self.cloudify_archive))
+ (_, stdout, stderr) = self.ssh.exec_command(
+ "sudo apt-get update && "
+ "sudo apt-get install -y docker.io && "
+ "sudo docker load -i "
+ f"~/{os.path.basename(self.cloudify_archive)} && "
+ "sudo docker run --name cfy_manager_local -d "
+ "--restart unless-stopped -v /sys/fs/cgroup:/sys/fs/cgroup:ro "
+ "--tmpfs /run --tmpfs /run/lock --security-opt seccomp:unconfined "
+ f"--cap-add SYS_ADMIN --network=host {self.cloudify_container}")
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
self.cfy_client = CloudifyClient(
- host=self.fip.floating_ip_address,
- username='admin', password='admin', tenant='default_tenant',
- api_version='v3')
+ host=self.fip.floating_ip_address if self.fip else (
+ self.sshvm.public_v4),
+ username='admin', password='admin', tenant='default_tenant')
self.__logger.info("Attemps running status of the Manager")
- for loop in range(10):
+ secret_key = "foo"
+ secret_value = "bar"
+ for loop in range(20):
try:
self.__logger.debug(
"status %s", self.cfy_client.manager.get_status())
@@ -65,13 +90,130 @@ class Cloudify(singlevm.SingleVm2):
"The current manager status is %s", cfy_status)
if str(cfy_status) != 'running':
raise Exception("Cloudify Manager isn't up and running")
+ for secret in iter(self.cfy_client.secrets.list()):
+ if secret_key == secret["key"]:
+ self.__logger.debug("Updating secrets: %s", secret_key)
+ self.cfy_client.secrets.update(
+ secret_key, secret_value)
+ break
+ else:
+ self.__logger.debug("Creating secrets: %s", secret_key)
+ self.cfy_client.secrets.create(secret_key, secret_value)
+ self.cfy_client.secrets.delete(secret_key)
+ self.__logger.info("Secrets API successfully reached")
break
except Exception: # pylint: disable=broad-except
- self.__logger.info(
- "try %s: Cloudify Manager isn't up and running", loop + 1)
+ self.__logger.debug(
+ "try %s: Cloudify Manager isn't up and running \n%s",
+ loop + 1, traceback.format_exc())
time.sleep(30)
else:
self.__logger.error("Cloudify Manager isn't up and running")
return 1
self.__logger.info("Cloudify Manager is up and running")
return 0
+
+ def put_private_key(self):
+ """Put private keypair in manager"""
+ self.__logger.info("Put private keypair in manager")
+ scpc = scp.SCPClient(self.ssh.get_transport())
+ scpc.put(self.key_filename, remote_path='~/cloudify_ims.pem')
+ (_, stdout, stderr) = self.ssh.exec_command(
+ "sudo docker cp ~/cloudify_ims.pem "
+ "cfy_manager_local:/etc/cloudify/ && "
+ "sudo docker exec cfy_manager_local "
+ "chmod 444 /etc/cloudify/cloudify_ims.pem")
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
+
+ def upload_cfy_plugins(self, yaml, wgn):
+ """Upload Cloudify plugins"""
+ (_, stdout, stderr) = self.ssh.exec_command(
+ "sudo docker exec cfy_manager_local "
+ f"cfy plugins upload -y {yaml} {wgn} && "
+ "sudo docker exec cfy_manager_local cfy status")
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
+
+ def kill_existing_execution(self, dep_name):
+ """kill existing execution"""
+ try:
+ self.__logger.info('Deleting the current deployment')
+ exec_list = self.cfy_client.executions.list()
+ for execution in exec_list:
+ if execution['status'] == "started":
+ try:
+ self.cfy_client.executions.cancel(
+ execution['id'], force=True)
+ except Exception: # pylint: disable=broad-except
+ self.__logger.warning("Can't cancel the current exec")
+ execution = self.cfy_client.executions.start(
+ dep_name, 'uninstall', parameters=dict(ignore_failure=True))
+ wait_for_execution(self.cfy_client, execution, self.__logger)
+ self.cfy_client.deployments.delete(dep_name)
+ time.sleep(10)
+ self.cfy_client.blueprints.delete(dep_name)
+ except Exception: # pylint: disable=broad-except
+ self.__logger.exception("Some issue during the undeployment ..")
+
+
+def wait_for_execution(client, execution, logger, timeout=3600, ):
+ """Wait for a workflow execution on Cloudify Manager."""
+ # if execution already ended - return without waiting
+ if execution.status in Execution.END_STATES:
+ return execution
+
+ if timeout is not None:
+ deadline = time.time() + timeout
+
+ # Poll for execution status and execution logs, until execution ends
+ # and we receive an event of type in WORKFLOW_END_TYPES
+ offset = 0
+ batch_size = 50
+ event_list = []
+ execution_ended = False
+ while True:
+ event_list = client.events.list(
+ execution_id=execution.id,
+ _offset=offset,
+ _size=batch_size,
+ include_logs=True,
+ sort='@timestamp').items
+
+ offset = offset + len(event_list)
+ for event in event_list:
+ logger.debug(event.get('message'))
+
+ if timeout is not None:
+ if time.time() > deadline:
+ raise RuntimeError(
+ 'execution of operation {execution.workflow_id} for '
+ 'deployment {execution.deployment_id} timed out')
+ # update the remaining timeout
+ timeout = deadline - time.time()
+
+ if not execution_ended:
+ execution = client.executions.get(execution.id)
+ execution_ended = execution.status in Execution.END_STATES
+
+ if execution_ended:
+ break
+
+ time.sleep(5)
+
+ return execution
+
+
+def get_execution_id(client, deployment_id):
+ """
+ Get the execution id of a env preparation.
+
+ network, security group, fip, VM creation
+ """
+ executions = client.executions.list(deployment_id=deployment_id)
+ for execution in executions:
+ if execution.workflow_id == 'create_deployment_environment':
+ return execution
+ raise RuntimeError('Failed to get create_deployment_environment '
+ 'workflow execution.'
+ f'Available executions: {executions}')
diff --git a/functest/core/singlevm.py b/functest/core/singlevm.py
index ad79a8e14..4bce516d3 100644
--- a/functest/core/singlevm.py
+++ b/functest/core/singlevm.py
@@ -9,11 +9,12 @@
"""Ease deploying a single VM reachable via ssh
-It offers a simple way to create all tenant network ressources + a VM for
+It offers a simple way to create all tenant network resources + a VM for
advanced testcases (e.g. deploying an orchestrator).
"""
import logging
+import re
import tempfile
import time
@@ -22,6 +23,8 @@ from xtesting.core import testcase
from functest.core import tenantnetwork
from functest.utils import config
+from functest.utils import env
+from functest.utils import functest_utils
class VmReady1(tenantnetwork.TenantNetwork1):
@@ -36,23 +39,27 @@ class VmReady1(tenantnetwork.TenantNetwork1):
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
- filename = '/home/opnfv/functest/images/cirros-0.4.0-x86_64-disk.img'
+ filename = '/home/opnfv/functest/images/cirros-0.6.1-x86_64-disk.img'
+ image_format = 'qcow2'
+ extra_properties = {}
+ filename_alt = filename
+ image_alt_format = image_format
+ extra_alt_properties = extra_properties
visibility = 'private'
- extra_properties = None
flavor_ram = 512
flavor_vcpus = 1
flavor_disk = 1
+ flavor_extra_specs = {}
flavor_alt_ram = 1024
flavor_alt_vcpus = 1
flavor_alt_disk = 1
-
- image_format = 'qcow2'
+ flavor_alt_extra_specs = flavor_extra_specs
+ create_server_timeout = 180
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'vmready1'
- super(VmReady1, self).__init__(**kwargs)
- self.orig_cloud = self.cloud
+ super().__init__(**kwargs)
self.image = None
self.flavor = None
@@ -67,20 +74,60 @@ class VmReady1(tenantnetwork.TenantNetwork1):
Raises: expection on error
"""
assert self.cloud
+ extra_properties = self.extra_properties.copy()
+ if env.get('IMAGE_PROPERTIES'):
+ extra_properties.update(
+ functest_utils.convert_ini_to_dict(
+ env.get('IMAGE_PROPERTIES')))
+ extra_properties.update(
+ getattr(config.CONF, f'{self.case_name}_extra_properties', {}))
image = self.cloud.create_image(
- name if name else '{}-img_{}'.format(self.case_name, self.guid),
+ name if name else f'{self.case_name}-img_{self.guid}',
filename=getattr(
- config.CONF, '{}_image'.format(self.case_name),
+ config.CONF, f'{self.case_name}_image',
self.filename),
- meta=getattr(
- config.CONF, '{}_extra_properties'.format(self.case_name),
- self.extra_properties),
+ meta=extra_properties,
disk_format=getattr(
- config.CONF, '{}_image_format'.format(self.case_name),
+ config.CONF, f'{self.case_name}_image_format',
self.image_format),
visibility=getattr(
- config.CONF, '{}_visibility'.format(self.case_name),
- self.visibility))
+ config.CONF, f'{self.case_name}_visibility',
+ self.visibility),
+ wait=True)
+ self.__logger.debug("image: %s", image)
+ return image
+
+ def publish_image_alt(self, name=None):
+ """Publish alternative image
+
+ It allows publishing multiple images for the child testcases. It forces
+ the same configuration for all subtestcases.
+
+ Returns: image
+
+ Raises: expection on error
+ """
+ assert self.cloud
+ extra_alt_properties = self.extra_alt_properties.copy()
+ if env.get('IMAGE_PROPERTIES'):
+ extra_alt_properties.update(
+ functest_utils.convert_ini_to_dict(
+ env.get('IMAGE_PROPERTIES')))
+ extra_alt_properties.update(
+ getattr(config.CONF, f'{self.case_name}_extra_alt_properties', {}))
+ image = self.cloud.create_image(
+ name if name else f'{self.case_name}-img_alt_{self.guid}',
+ filename=getattr(
+ config.CONF, f'{self.case_name}_image_alt',
+ self.filename_alt),
+ meta=extra_alt_properties,
+ disk_format=getattr(
+ config.CONF, f'{self.case_name}_image_alt_format',
+ self.image_format),
+ visibility=getattr(
+ config.CONF, f'{self.case_name}_visibility',
+ self.visibility),
+ wait=True)
self.__logger.debug("image: %s", image)
return image
@@ -96,16 +143,23 @@ class VmReady1(tenantnetwork.TenantNetwork1):
"""
assert self.orig_cloud
flavor = self.orig_cloud.create_flavor(
- name if name else '{}-flavor_{}'.format(self.case_name, self.guid),
- getattr(config.CONF, '{}_flavor_ram'.format(self.case_name),
+ name if name else f'{self.case_name}-flavor_{self.guid}',
+ getattr(config.CONF, f'{self.case_name}_flavor_ram',
self.flavor_ram),
- getattr(config.CONF, '{}_flavor_vcpus'.format(self.case_name),
+ getattr(config.CONF, f'{self.case_name}_flavor_vcpus',
self.flavor_vcpus),
- getattr(config.CONF, '{}_flavor_disk'.format(self.case_name),
+ getattr(config.CONF, f'{self.case_name}_flavor_disk',
self.flavor_disk))
self.__logger.debug("flavor: %s", flavor)
- self.orig_cloud.set_flavor_specs(
- flavor.id, getattr(config.CONF, 'flavor_extra_specs', {}))
+ flavor_extra_specs = self.flavor_extra_specs.copy()
+ if env.get('FLAVOR_EXTRA_SPECS'):
+ flavor_extra_specs.update(
+ functest_utils.convert_ini_to_dict(
+ env.get('FLAVOR_EXTRA_SPECS')))
+ flavor_extra_specs.update(
+ getattr(config.CONF,
+ f'{self.case_name}_flavor_extra_specs', {}))
+ self.orig_cloud.set_flavor_specs(flavor.id, flavor_extra_specs)
return flavor
def create_flavor_alt(self, name=None):
@@ -120,17 +174,24 @@ class VmReady1(tenantnetwork.TenantNetwork1):
"""
assert self.orig_cloud
flavor = self.orig_cloud.create_flavor(
- name if name else '{}-flavor_alt_{}'.format(
- self.case_name, self.guid),
- getattr(config.CONF, '{}_flavor_alt_ram'.format(self.case_name),
+ name if name else f'{self.case_name}-flavor_alt_{self.guid}',
+ getattr(config.CONF, f'{self.case_name}_flavor_alt_ram',
self.flavor_alt_ram),
- getattr(config.CONF, '{}_flavor_alt_vcpus'.format(self.case_name),
+ getattr(config.CONF, f'{self.case_name}_flavor_alt_vcpus',
self.flavor_alt_vcpus),
- getattr(config.CONF, '{}_flavor_alt_disk'.format(self.case_name),
+ getattr(config.CONF, f'{self.case_name}_flavor_alt_disk',
self.flavor_alt_disk))
self.__logger.debug("flavor: %s", flavor)
+ flavor_alt_extra_specs = self.flavor_alt_extra_specs.copy()
+ if env.get('FLAVOR_EXTRA_SPECS'):
+ flavor_alt_extra_specs.update(
+ functest_utils.convert_ini_to_dict(
+ env.get('FLAVOR_EXTRA_SPECS')))
+ flavor_alt_extra_specs.update(
+ getattr(config.CONF,
+ f'{self.case_name}_flavor_alt_extra_specs', {}))
self.orig_cloud.set_flavor_specs(
- flavor.id, getattr(config.CONF, 'flavor_extra_specs', {}))
+ flavor.id, flavor_alt_extra_specs)
return flavor
def boot_vm(self, name=None, **kwargs):
@@ -145,15 +206,71 @@ class VmReady1(tenantnetwork.TenantNetwork1):
"""
assert self.cloud
vm1 = self.cloud.create_server(
- name if name else '{}-vm_{}'.format(self.case_name, self.guid),
+ name if name else f'{self.case_name}-vm_{self.guid}',
image=self.image.id, flavor=self.flavor.id,
- auto_ip=False, wait=True,
- network=self.network.id,
- **kwargs)
- vm1 = self.cloud.wait_for_server(vm1, auto_ip=False)
+ auto_ip=False,
+ network=self.network.id if self.network else env.get(
+ "EXTERNAL_NETWORK"),
+ timeout=self.create_server_timeout, wait=True, **kwargs)
self.__logger.debug("vm: %s", vm1)
return vm1
+ def check_regex_in_console(self, name, regex=' login: ', loop=6):
+ """Wait for specific message in console
+
+ Returns: True or False on errors
+ """
+ assert self.cloud
+ for iloop in range(loop):
+ console = self.cloud.get_server_console(name)
+ self.__logger.debug("console: \n%s", console)
+ if re.search(regex, console):
+ self.__logger.debug(
+ "regex found: '%s' in console\n%s", regex, console)
+ return True
+ self.__logger.debug(
+ "try %s: cannot find regex '%s' in console\n%s",
+ iloop + 1, regex, console)
+ time.sleep(10)
+ self.__logger.error("cannot find regex '%s' in console", regex)
+ return False
+
+ def clean_orphan_security_groups(self):
+ """Clean all security groups which are not owned by an existing tenant
+
+ It lists all orphan security groups in use as debug to avoid
+ misunderstanding the testcase results (it could happen if cloud admin
+ removes accounts without cleaning the virtual machines)
+ """
+ sec_groups = self.orig_cloud.list_security_groups()
+ for sec_group in sec_groups:
+ if not sec_group.tenant_id:
+ continue
+ if not self.orig_cloud.get_project(sec_group.tenant_id):
+ self.__logger.debug("Cleaning security group %s", sec_group.id)
+ try:
+ self.orig_cloud.delete_security_group(sec_group.id)
+ except Exception: # pylint: disable=broad-except
+ self.__logger.debug(
+ "Orphan security group %s in use", sec_group.id)
+
+ def count_hypervisors(self):
+ """Count hypervisors."""
+ if env.get('SKIP_DOWN_HYPERVISORS').lower() == 'false':
+ return len(self.orig_cloud.list_hypervisors())
+ return self.count_active_hypervisors()
+
+ def count_active_hypervisors(self):
+ """Count all hypervisors which are up."""
+ compute_cnt = 0
+ for hypervisor in self.orig_cloud.list_hypervisors():
+ if hypervisor['state'] == 'up':
+ compute_cnt += 1
+ else:
+ self.__logger.warning(
+ "%s is down", hypervisor['hypervisor_hostname'])
+ return compute_cnt
+
def run(self, **kwargs):
"""Boot the new VM
@@ -168,7 +285,7 @@ class VmReady1(tenantnetwork.TenantNetwork1):
status = testcase.TestCase.EX_RUN_ERROR
try:
assert self.cloud
- assert super(VmReady1, self).run(
+ assert super().run(
**kwargs) == testcase.TestCase.EX_OK
self.image = self.publish_image()
self.flavor = self.create_flavor()
@@ -185,20 +302,22 @@ class VmReady1(tenantnetwork.TenantNetwork1):
try:
assert self.orig_cloud
assert self.cloud
- super(VmReady1, self).clean()
+ super().clean()
if self.image:
self.cloud.delete_image(self.image.id)
if self.flavor:
self.orig_cloud.delete_flavor(self.flavor.id)
+ if env.get('CLEAN_ORPHAN_SECURITY_GROUPS').lower() == 'true':
+ self.clean_orphan_security_groups()
except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot clean all ressources")
+ self.__logger.exception("Cannot clean all resources")
class VmReady2(VmReady1):
"""Deploy a single VM reachable via ssh (scenario2)
It creates new user/project before creating and configuring all tenant
- network ressources, flavors, images, etc. required by advanced testcases.
+ network resources, flavors, images, etc. required by advanced testcases.
It ensures that all testcases inheriting from SingleVm2 could work
without specific configurations (or at least read the same config data).
@@ -209,7 +328,7 @@ class VmReady2(VmReady1):
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'vmready2'
- super(VmReady2, self).__init__(**kwargs)
+ super().__init__(**kwargs)
try:
assert self.orig_cloud
self.project = tenantnetwork.NewProject(
@@ -223,11 +342,11 @@ class VmReady2(VmReady1):
def clean(self):
try:
- super(VmReady2, self).clean()
+ super().clean()
assert self.project
self.project.clean()
except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot clean all ressources")
+ self.__logger.exception("Cannot clean all resources")
class SingleVm1(VmReady1):
@@ -243,13 +362,16 @@ class SingleVm1(VmReady1):
__logger = logging.getLogger(__name__)
username = 'cirros'
- ssh_connect_timeout = 60
+ ssh_connect_timeout = 1
ssh_connect_loops = 6
+ create_floating_ip_timeout = 120
+ check_console_loop = 6
+ check_console_regex = ' login: '
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'singlevm1'
- super(SingleVm1, self).__init__(**kwargs)
+ super().__init__(**kwargs)
self.sshvm = None
self.sec = None
self.fip = None
@@ -267,14 +389,15 @@ class SingleVm1(VmReady1):
"""
assert self.cloud
self.keypair = self.cloud.create_keypair(
- '{}-kp_{}'.format(self.case_name, self.guid))
+ f'{self.case_name}-kp_{self.guid}')
self.__logger.debug("keypair: %s", self.keypair)
- self.__logger.debug("private_key: %s", self.keypair.private_key)
- with open(self.key_filename, 'w') as private_key_file:
+ self.__logger.debug("private_key:\n%s", self.keypair.private_key)
+ with open(
+ self.key_filename, 'w', encoding='utf-8') as private_key_file:
private_key_file.write(self.keypair.private_key)
self.sec = self.cloud.create_security_group(
- '{}-sg_{}'.format(self.case_name, self.guid),
- 'created by OPNFV Functest ({})'.format(self.case_name))
+ f'{self.case_name}-sg_{self.guid}',
+ f'created by OPNFV Functest ({self.case_name})')
self.cloud.create_security_group_rule(
self.sec.id, port_range_min='22', port_range_max='22',
protocol='tcp', direction='ingress')
@@ -292,31 +415,34 @@ class SingleVm1(VmReady1):
- None on error
"""
assert vm1
- fip = self.cloud.create_floating_ip(
- network=self.ext_net.id, server=vm1)
- self.__logger.debug("floating_ip: %s", fip)
- p_console = self.cloud.get_server_console(vm1)
- self.__logger.debug("vm console: \n%s", p_console)
+ fip = None
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
+ fip = self.cloud.create_floating_ip(
+ network=self.ext_net.id, server=vm1, wait=True,
+ timeout=self.create_floating_ip_timeout)
+ self.__logger.debug("floating_ip: %s", fip)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
for loop in range(self.ssh_connect_loops):
try:
+ p_console = self.cloud.get_server_console(vm1)
+ self.__logger.debug("vm console: \n%s", p_console)
ssh.connect(
- fip.floating_ip_address,
+ fip.floating_ip_address if fip else vm1.public_v4,
username=getattr(
config.CONF,
- '{}_image_user'.format(self.case_name), self.username),
+ f'{self.case_name}_image_user', self.username),
key_filename=self.key_filename,
timeout=getattr(
config.CONF,
- '{}_vm_ssh_connect_timeout'.format(self.case_name),
+ f'{self.case_name}_vm_ssh_connect_timeout',
self.ssh_connect_timeout))
break
- except Exception: # pylint: disable=broad-except
+ except Exception as exc: # pylint: disable=broad-except
self.__logger.debug(
- "try %s: cannot connect to %s", loop + 1,
- fip.floating_ip_address)
- time.sleep(10)
+ "try %s: cannot connect to %s: %s", loop + 1,
+ fip.floating_ip_address if fip else vm1.public_v4, exc)
+ time.sleep(9)
else:
self.__logger.error(
"cannot connect to %s", fip.floating_ip_address)
@@ -330,8 +456,9 @@ class SingleVm1(VmReady1):
Returns: echo exit codes
"""
- (_, stdout, _) = self.ssh.exec_command('echo Hello World')
- self.__logger.debug("output:\n%s", stdout.read())
+ (_, stdout, stderr) = self.ssh.exec_command('echo Hello World')
+ self.__logger.debug("output:\n%s", stdout.read().decode("utf-8"))
+ self.__logger.debug("error:\n%s", stderr.read().decode("utf-8"))
return stdout.channel.recv_exit_status()
def run(self, **kwargs):
@@ -350,16 +477,19 @@ class SingleVm1(VmReady1):
status = testcase.TestCase.EX_RUN_ERROR
try:
assert self.cloud
- assert super(SingleVm1, self).run(
+ assert super().run(
**kwargs) == testcase.TestCase.EX_OK
self.result = 0
self.prepare()
self.sshvm = self.boot_vm(
key_name=self.keypair.id, security_groups=[self.sec.id])
- (self.fip, self.ssh) = self.connect(self.sshvm)
- if not self.execute():
- self.result = 100
- status = testcase.TestCase.EX_OK
+ if self.check_regex_in_console(
+ self.sshvm.name, regex=self.check_console_regex,
+ loop=self.check_console_loop):
+ (self.fip, self.ssh) = self.connect(self.sshvm)
+ if not self.execute():
+ self.result = 100
+ status = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
self.__logger.exception('Cannot run %s', self.case_name)
finally:
@@ -378,16 +508,16 @@ class SingleVm1(VmReady1):
self.cloud.delete_security_group(self.sec.id)
if self.keypair:
self.cloud.delete_keypair(self.keypair.name)
- super(SingleVm1, self).clean()
+ super().clean()
except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot clean all ressources")
+ self.__logger.exception("Cannot clean all resources")
class SingleVm2(SingleVm1):
"""Deploy a single VM reachable via ssh (scenario2)
It creates new user/project before creating and configuring all tenant
- network ressources and vms required by advanced testcases.
+ network resources and vms required by advanced testcases.
It ensures that all testcases inheriting from SingleVm2 could work
without specific configurations (or at least read the same config data).
@@ -398,7 +528,7 @@ class SingleVm2(SingleVm1):
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'singlevm2'
- super(SingleVm2, self).__init__(**kwargs)
+ super().__init__(**kwargs)
try:
assert self.orig_cloud
self.project = tenantnetwork.NewProject(
@@ -412,8 +542,8 @@ class SingleVm2(SingleVm1):
def clean(self):
try:
- super(SingleVm2, self).clean()
+ super().clean()
assert self.project
self.project.clean()
except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot clean all ressources")
+ self.__logger.exception("Cannot clean all resources")
diff --git a/functest/core/tenantnetwork.py b/functest/core/tenantnetwork.py
index 286aef67e..3670dbe8a 100644
--- a/functest/core/tenantnetwork.py
+++ b/functest/core/tenantnetwork.py
@@ -9,10 +9,11 @@
"""Ease deploying tenant networks
-It offers a simple way to create all tenant network ressources required by a
+It offers a simple way to create all tenant network resources required by a
testcase (including all Functest ones):
+
- TenantNetwork1 selects the user and the project set as env vars
- - TenantNetwork2 creates a user and project to isolate the same ressources
+ - TenantNetwork2 creates a user and project to isolate the same resources
This classes could be reused by more complexed scenarios (Single VM)
"""
@@ -24,18 +25,19 @@ import uuid
import os_client_config
import shade
+from tempest.lib.common.utils import data_utils
from xtesting.core import testcase
from functest.utils import config
from functest.utils import env
+from functest.utils import functest_utils
-class NewProject(object):
+class NewProject():
"""Ease creating new projects/users"""
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
- default_member = "Member"
def __init__(self, cloud, case_name, guid):
self.cloud = None
@@ -46,25 +48,25 @@ class NewProject(object):
self.user = None
self.password = None
self.domain = None
- self.role = None
self.role_name = None
+ self.default_member = env.get('NEW_USER_ROLE')
def create(self):
"""Create projects/users"""
assert self.orig_cloud
assert self.case_name
- self.password = str(uuid.uuid4())
+ self.password = data_utils.rand_password().replace('%', '!')
+ self.__logger.debug("password: %s", self.password)
self.domain = self.orig_cloud.get_domain(
name_or_id=self.orig_cloud.auth.get(
"project_domain_name", "Default"))
self.project = self.orig_cloud.create_project(
- name='{}-project_{}'.format(self.case_name, self.guid),
- description="Created by OPNFV Functest: {}".format(
- self.case_name),
+ name=f'{self.case_name[:18]}-project_{self.guid}',
+ description=f"Created by OPNFV Functest: {self.case_name}",
domain_id=self.domain.id)
self.__logger.debug("project: %s", self.project)
self.user = self.orig_cloud.create_user(
- name='{}-user_{}'.format(self.case_name, self.guid),
+ name=f'{self.case_name}-user_{self.guid}',
password=self.password,
domain_id=self.domain.id)
self.__logger.debug("user: %s", self.user)
@@ -74,12 +76,12 @@ class NewProject(object):
elif self.orig_cloud.get_role(self.default_member.lower()):
self.role_name = self.default_member.lower()
else:
- raise Exception("Cannot detect {}".format(self.default_member))
+ raise Exception(f"Cannot detect {self.default_member}")
except Exception: # pylint: disable=broad-except
self.__logger.info("Creating default role %s", self.default_member)
- self.role = self.orig_cloud.create_role(self.default_member)
- self.role_name = self.role.name
- self.__logger.debug("role: %s", self.role)
+ role = self.orig_cloud.create_role(self.default_member)
+ self.role_name = role.name
+ self.__logger.debug("role: %s", role)
self.orig_cloud.grant_role(
self.role_name, user=self.user.id, project=self.project.id,
domain=self.domain.id)
@@ -95,6 +97,21 @@ class NewProject(object):
cloud_config=osconfig.get_one_cloud())
self.__logger.debug("new cloud %s", self.cloud.auth)
+ def get_environ(self):
+ "Get new environ"
+ environ = dict(
+ os.environ,
+ OS_USERNAME=self.user.name,
+ OS_PROJECT_NAME=self.project.name,
+ OS_PROJECT_ID=self.project.id,
+ OS_PASSWORD=self.password)
+ try:
+ del environ['OS_TENANT_NAME']
+ del environ['OS_TENANT_ID']
+ except Exception: # pylint: disable=broad-except
+ pass
+ return environ
+
def clean(self):
"""Remove projects/users"""
try:
@@ -103,17 +120,21 @@ class NewProject(object):
self.orig_cloud.delete_user(self.user.id)
if self.project:
self.orig_cloud.delete_project(self.project.id)
- if self.role:
- self.orig_cloud.delete_role(self.role.id)
+ secgroups = self.orig_cloud.list_security_groups(
+ filters={'name': 'default',
+ 'project_id': self.project.id})
+ if secgroups:
+ sec_id = secgroups[0].id
+ self.orig_cloud.delete_security_group(sec_id)
except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot clean all ressources")
+ self.__logger.exception("Cannot clean all resources")
class TenantNetwork1(testcase.TestCase):
# pylint: disable=too-many-instance-attributes
"""Create a tenant network (scenario1)
- It creates and configures all tenant network ressources required by
+ It creates and configures all tenant network resources required by
advanced testcases (subnet, network and router).
It ensures that all testcases inheriting from TenantNetwork1 could work
@@ -122,26 +143,30 @@ class TenantNetwork1(testcase.TestCase):
"""
__logger = logging.getLogger(__name__)
- cidr = '192.168.0.0/24'
+ cidr = '192.168.120.0/24'
shared_network = False
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tenantnetwork1'
- super(TenantNetwork1, self).__init__(**kwargs)
- self.res_dir = os.path.join(
- getattr(config.CONF, 'dir_results'), self.case_name)
+ super().__init__(**kwargs)
+ self.dir_results = os.path.join(getattr(config.CONF, 'dir_results'))
+ self.res_dir = os.path.join(self.dir_results, self.case_name)
+ self.output_log_name = 'functest.log'
+ self.output_debug_log_name = 'functest.debug.log'
+ self.ext_net = None
try:
cloud_config = os_client_config.get_config()
- self.cloud = shade.OpenStackCloud(cloud_config=cloud_config)
+ self.cloud = self.orig_cloud = shade.OpenStackCloud(
+ cloud_config=cloud_config)
except Exception: # pylint: disable=broad-except
- self.cloud = None
- self.ext_net = None
+ self.cloud = self.orig_cloud = None
self.__logger.exception("Cannot connect to Cloud")
- try:
- self.ext_net = self.get_external_network(self.cloud)
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot get the external network")
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
+ try:
+ self.ext_net = self.get_external_network(self.cloud)
+ except Exception: # pylint: disable=broad-except
+ self.__logger.exception("Cannot get the external network")
self.guid = str(uuid.uuid4())
self.network = None
self.subnet = None
@@ -175,38 +200,61 @@ class TenantNetwork1(testcase.TestCase):
role = cloud.get_role(member.lower())
return role
- def _create_network_ressources(self):
+ @staticmethod
+ def get_public_auth_url(cloud):
+ """Get Keystone public endpoint"""
+ keystone_id = functest_utils.search_services(cloud, 'keystone')[0].id
+ endpoint = cloud.search_endpoints(
+ filters={'interface': 'public',
+ 'service_id': keystone_id})[0].url
+ return endpoint
+
+ def create_network_resources(self):
+ """Create all tenant network resources
+
+ It creates a router which gateway is the external network detected.
+ The new subnet is attached to that router.
+
+ Raises: expection on error
+ """
assert self.cloud
- assert self.ext_net
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
+ assert self.ext_net
provider = {}
- if hasattr(config.CONF, '{}_network_type'.format(self.case_name)):
+ if hasattr(config.CONF, f'{self.case_name}_network_type'):
provider["network_type"] = getattr(
- config.CONF, '{}_network_type'.format(self.case_name))
- if hasattr(config.CONF, '{}_physical_network'.format(self.case_name)):
+ config.CONF, f'{self.case_name}_network_type')
+ if hasattr(config.CONF, f'{self.case_name}_physical_network'):
provider["physical_network"] = getattr(
- config.CONF, '{}_physical_network'.format(self.case_name))
- if hasattr(config.CONF, '{}_segmentation_id'.format(self.case_name)):
+ config.CONF, f'{self.case_name}_physical_network')
+ if hasattr(config.CONF, f'{self.case_name}_segmentation_id'):
provider["segmentation_id"] = getattr(
- config.CONF, '{}_segmentation_id'.format(self.case_name))
- self.network = self.cloud.create_network(
- '{}-net_{}'.format(self.case_name, self.guid),
- provider=provider,
+ config.CONF, f'{self.case_name}_segmentation_id')
+ domain = self.orig_cloud.get_domain(
+ name_or_id=self.orig_cloud.auth.get(
+ "project_domain_name", "Default"))
+ project = self.orig_cloud.get_project(
+ self.cloud.auth['project_name'],
+ domain_id=domain.id)
+ self.network = self.orig_cloud.create_network(
+ f'{self.case_name}-net_{self.guid}',
+ provider=provider, project_id=project.id,
shared=self.shared_network)
self.__logger.debug("network: %s", self.network)
self.subnet = self.cloud.create_subnet(
self.network.id,
- subnet_name='{}-subnet_{}'.format(self.case_name, self.guid),
+ subnet_name=f'{self.case_name}-subnet_{self.guid}',
cidr=getattr(
- config.CONF, '{}_private_subnet_cidr'.format(self.case_name),
+ config.CONF, f'{self.case_name}_private_subnet_cidr',
self.cidr),
enable_dhcp=True,
dns_nameservers=[env.get('NAMESERVER')])
self.__logger.debug("subnet: %s", self.subnet)
self.router = self.cloud.create_router(
- name='{}-router_{}'.format(self.case_name, self.guid),
- ext_gateway_net_id=self.ext_net.id)
+ name=f'{self.case_name}-router_{self.guid}',
+ ext_gateway_net_id=self.ext_net.id if self.ext_net else None)
self.__logger.debug("router: %s", self.router)
self.cloud.add_router_interface(self.router, subnet_id=self.subnet.id)
@@ -215,7 +263,8 @@ class TenantNetwork1(testcase.TestCase):
try:
assert self.cloud
self.start_time = time.time()
- self._create_network_ressources()
+ if env.get('NO_TENANT_NETWORK').lower() != 'true':
+ self.create_network_resources()
self.result = 100
status = testcase.TestCase.EX_OK
except Exception: # pylint: disable=broad-except
@@ -237,14 +286,14 @@ class TenantNetwork1(testcase.TestCase):
if self.network:
self.cloud.delete_network(self.network.id)
except Exception: # pylint: disable=broad-except
- self.__logger.exception("cannot clean all ressources")
+ self.__logger.exception("cannot clean all resources")
class TenantNetwork2(TenantNetwork1):
"""Create a tenant network (scenario2)
It creates new user/project before creating and configuring all tenant
- network ressources required by a testcase (subnet, network and router).
+ network resources required by a testcase (subnet, network and router).
It ensures that all testcases inheriting from TenantNetwork2 could work
without network specific configurations (or at least read the same config
@@ -256,7 +305,7 @@ class TenantNetwork2(TenantNetwork1):
def __init__(self, **kwargs):
if "case_name" not in kwargs:
kwargs["case_name"] = 'tenantnetwork2'
- super(TenantNetwork2, self).__init__(**kwargs)
+ super().__init__(**kwargs)
try:
assert self.cloud
self.project = NewProject(
@@ -270,8 +319,8 @@ class TenantNetwork2(TenantNetwork1):
def clean(self):
try:
- super(TenantNetwork2, self).clean()
+ super().clean()
assert self.project
self.project.clean()
except Exception: # pylint: disable=broad-except
- self.__logger.exception("Cannot clean all ressources")
+ self.__logger.exception("Cannot clean all resources")
diff --git a/functest/core/vnf.py b/functest/core/vnf.py
deleted file mode 100644
index a6afd4e6b..000000000
--- a/functest/core/vnf.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2016 Orange and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""Define the parent class of all VNF TestCases."""
-
-import logging
-import uuid
-
-from snaps.config.user import UserConfig
-from snaps.config.project import ProjectConfig
-from snaps.openstack.create_user import OpenStackUser
-from snaps.openstack.create_project import OpenStackProject
-from snaps.openstack.utils import keystone_utils
-from snaps.openstack.tests import openstack_tests
-
-from xtesting.core import vnf
-from functest.utils import constants
-
-__author__ = ("Morgan Richomme <morgan.richomme@orange.com>, "
- "Valentin Boucher <valentin.boucher@orange.com>")
-
-
-class VnfPreparationException(vnf.VnfPreparationException):
- """Raise when VNF preparation cannot be executed."""
-
-
-class OrchestratorDeploymentException(vnf.OrchestratorDeploymentException):
- """Raise when orchestrator cannot be deployed."""
-
-
-class VnfDeploymentException(vnf.VnfDeploymentException):
- """Raise when VNF cannot be deployed."""
-
-
-class VnfTestException(vnf.VnfTestException):
- """Raise when VNF cannot be tested."""
-
-
-class VnfOnBoarding(vnf.VnfOnBoarding):
- # pylint: disable=too-many-instance-attributes
- """Base model for OpenStack VNF test cases."""
-
- __logger = logging.getLogger(__name__)
-
- def __init__(self, **kwargs):
- super(VnfOnBoarding, self).__init__(**kwargs)
- self.uuid = uuid.uuid4()
- self.user_name = "{}-{}".format(self.case_name, self.uuid)
- self.tenant_name = "{}-{}".format(self.case_name, self.uuid)
- self.snaps_creds = {}
- self.created_object = []
- self.os_project = None
- self.tenant_description = "Created by OPNFV Functest: {}".format(
- self.case_name)
-
- def prepare(self):
- """
- Prepare the environment for VNF testing:
-
- * Creation of a user,
- * Creation of a tenant,
- * Allocation admin role to the user on this tenant
-
- Returns base.TestCase.EX_OK if preparation is successfull
-
- Raise VnfPreparationException in case of problem
- """
- try:
- self.__logger.info(
- "Prepare VNF: %s, description: %s", self.case_name,
- self.tenant_description)
- snaps_creds = openstack_tests.get_credentials(
- os_env_file=constants.ENV_FILE)
-
- self.os_project = OpenStackProject(
- snaps_creds,
- ProjectConfig(
- name=self.tenant_name,
- description=self.tenant_description,
- domain=snaps_creds.project_domain_name
- ))
- self.os_project.create()
- self.created_object.append(self.os_project)
-
- snaps_creds.project_domain_id = \
- self.os_project.get_project().domain_id
- snaps_creds.user_domain_id = \
- self.os_project.get_project().domain_id
-
- for role in ['admin', 'Admin']:
- if keystone_utils.get_role_by_name(
- keystone_utils.keystone_client(snaps_creds), role):
- admin_role = role
- break
-
- user_creator = OpenStackUser(
- snaps_creds,
- UserConfig(
- name=self.user_name,
- password=str(uuid.uuid4()),
- project_name=self.tenant_name,
- domain_name=snaps_creds.user_domain_name,
- roles={admin_role: self.tenant_name}))
- user_creator.create()
- self.created_object.append(user_creator)
- self.snaps_creds = user_creator.get_os_creds(self.tenant_name)
- self.__logger.debug("snaps creds: %s", self.snaps_creds)
-
- return vnf.VnfOnBoarding.EX_OK
- except Exception: # pylint: disable=broad-except
- self.__logger.exception("Exception raised during VNF preparation")
- raise VnfPreparationException
-
- def deploy_orchestrator(self):
- """
- Deploy an orchestrator (optional).
-
- If this method is overriden then raise orchestratorDeploymentException
- if error during orchestrator deployment
- """
- self.__logger.info("Deploy orchestrator (if necessary)")
- return True
-
- def deploy_vnf(self):
- """
- Deploy the VNF
-
- This function MUST be implemented by vnf test cases.
- The details section MAY be updated in the vnf test cases.
-
- The deployment can be executed via a specific orchestrator
- or using build-in orchestrators such as heat, OpenBaton, cloudify,
- juju, onap, ...
-
- Returns:
- True if the VNF is properly deployed
- False if the VNF is not deployed
-
- Raise VnfDeploymentException if error during VNF deployment
- """
- self.__logger.error("VNF must be deployed")
- raise VnfDeploymentException
-
- def test_vnf(self):
- """
- Test the VNF
-
- This function MUST be implemented by vnf test cases.
- The details section MAY be updated in the vnf test cases.
-
- Once a VNF is deployed, it is assumed that specific test suite can be
- run to validate the VNF.
- Please note that the same test suite can be used on several test case
- (e.g. clearwater test suite can be used whatever the orchestrator used
- for the deployment)
-
- Returns:
- True if VNF tests are PASS
- False if test suite is FAIL
-
- Raise VnfTestException if error during VNF test
- """
- self.__logger.error("VNF must be tested")
- raise VnfTestException
-
- def clean(self):
- """
- Clean VNF test case.
-
- It is up to the test providers to delete resources used for the tests.
- By default we clean:
-
- * the user,
- * the tenant
- """
- self.__logger.info('Removing the VNF resources ..')
- for creator in reversed(self.created_object):
- try:
- creator.clean()
- except Exception as exc: # pylint: disable=broad-except
- self.__logger.error('Unexpected error cleaning - %s', exc)