summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornikoskarandreas <nick@intracom-telecom.com>2018-07-12 19:40:30 +0300
committernikoskarandreas <nick@intracom-telecom.com>2018-10-15 12:13:29 +0300
commitcfcb04c938abdcddd76bcdd2375b4a81ea28fa51 (patch)
treef01f95abff0319d4b7d562e2b39b98b4d0fd2822
parentedd6cfe15ecd4e2bf608c23c6ca4612334df044b (diff)
Using heat orchestrator for sdnvpn - test case 1
Heat orchestrator and the use of Heat Orchestrator Templates is introduced in sdnvpn test cases. The deployment of the nodes and networks under test is performed as a stack with the use of the heat api and the use of the other apis is kept to as little as possible. The scenarios that are executed are the same as in the orginal test cases. This is the implementation of sdnvpn test case 1: VPN provides connectivity between subnets and also base functions for heat api access and some utilities. JIRA: SDNVPN-219 Change-Id: Ic284722e600652c9058da96d349dff9398bcacf1 Signed-off-by: nikoskarandreas <nick@intracom-telecom.com>
-rw-r--r--sdnvpn/artifacts/testcase_1bis.yaml234
-rw-r--r--sdnvpn/lib/openstack_utils.py40
-rw-r--r--sdnvpn/lib/utils.py123
-rw-r--r--sdnvpn/test/functest/config.yaml25
-rw-r--r--sdnvpn/test/functest/testcase_1bis.py215
5 files changed, 637 insertions, 0 deletions
diff --git a/sdnvpn/artifacts/testcase_1bis.yaml b/sdnvpn/artifacts/testcase_1bis.yaml
new file mode 100644
index 0000000..f269943
--- /dev/null
+++ b/sdnvpn/artifacts/testcase_1bis.yaml
@@ -0,0 +1,234 @@
+heat_template_version: 2013-05-23
+
+description: >
+ Template for SDNVPN testcase 1
+ VPN provides connectivity between subnets
+
+parameters:
+ flavor:
+ type: string
+ description: flavor for the servers to be created
+ constraints:
+ - custom_constraint: nova.flavor
+ image_n:
+ type: string
+ description: image for the servers to be created
+ constraints:
+ - custom_constraint: glance.image
+ av_zone_1:
+ type: string
+ description: availability zone 1
+ av_zone_2:
+ type: string
+ description: availability zone 2
+
+ net_1_name:
+ type: string
+ description: network 1
+ subnet_1_name:
+ type: string
+ description: subnet 1 name
+ subnet_1_cidr:
+ type: string
+ description: subnet 1 cidr
+ net_2_name:
+ type: string
+ description: network 2
+ subnet_2_name:
+ type: string
+ description: subnet 2 name
+ subnet_2_cidr:
+ type: string
+ description: subnet 1 cidr
+
+ secgroup_name:
+ type: string
+ description: security group name
+ secgroup_descr:
+ type: string
+ description: security group slogan
+
+ instance_1_name:
+ type: string
+ description: instance name
+ instance_2_name:
+ type: string
+ description: instance name
+ instance_3_name:
+ type: string
+ description: instance name
+ instance_4_name:
+ type: string
+ description: instance name
+ instance_5_name:
+ type: string
+ description: instance name
+
+ ping_count:
+ type: string
+ description: ping count for user data script
+ default: 10
+
+resources:
+ net_1:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_1_name }
+ subnet_1:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_1_name }
+ network: { get_resource: net_1 }
+ cidr: { get_param: subnet_1_cidr }
+ net_2:
+ type: OS::Neutron::Net
+ properties:
+ name: { get_param: net_2_name }
+ subnet_2:
+ type: OS::Neutron::Subnet
+ properties:
+ name: { get_param: subnet_2_name }
+ network: { get_resource: net_2 }
+ cidr: { get_param: subnet_2_cidr }
+
+ sec_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: { get_param: secgroup_name }
+ description: { get_param: secgroup_descr }
+ rules:
+ - protocol: icmp
+ remote_ip_prefix: 0.0.0.0/0
+ - protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: 0.0.0.0/0
+
+ vm1:
+ type: OS::Nova::Server
+ depends_on: [ vm2, vm3, vm4, vm5 ]
+ properties:
+ name: { get_param: instance_1_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM2 $IP_VM3 $IP_VM4 $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM2: { get_attr: [vm2, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM3: { get_attr: [vm3, addresses, { get_resource: net_1}, 0, addr] }
+ $IP_VM4: { get_attr: [vm4, addresses, { get_resource: net_2}, 0, addr] }
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+ vm2:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_2_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm3:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_3_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_1 }
+ vm4:
+ type: OS::Nova::Server
+ depends_on: vm5
+ properties:
+ name: { get_param: instance_4_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_1 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+ config_drive: True
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ template: |
+ #!/bin/sh
+ set $IP_VM5
+ while true; do
+ for i do
+ ip=$i
+ ping -c $COUNT $ip 2>&1 >/dev/null
+ RES=$?
+ if [ \"Z$RES\" = \"Z0\" ] ; then
+ echo ping $ip OK
+ else echo ping $ip KO
+ fi
+ done
+ sleep 1
+ done
+ params:
+ $IP_VM5: { get_attr: [vm5, addresses, { get_resource: net_2}, 0, addr] }
+ $COUNT: { get_param: ping_count }
+
+ vm5:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_5_name }
+ image: { get_param: image_n }
+ flavor: { get_param: flavor }
+ availability_zone: { get_param: av_zone_2 }
+ security_groups:
+ - { get_resource: sec_group }
+ networks:
+ - subnet: { get_resource: subnet_2 }
+
+outputs:
+ net_1_o:
+ description: the id of network 1
+ value: { get_attr: [net_1, show, id] }
+ net_2_o:
+ description: the id of network 2
+ value: { get_attr: [net_2, show, id] }
+ vm1_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm1, show, name] }
+ vm2_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm2, show, name] }
+ vm3_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm3, show, name] }
+ vm4_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm4, show, name] }
+ vm5_o:
+ description: the deployed vm resource
+ value: { get_attr: [vm5, show, name] }
diff --git a/sdnvpn/lib/openstack_utils.py b/sdnvpn/lib/openstack_utils.py
index fc36c5b..3fa17e6 100644
--- a/sdnvpn/lib/openstack_utils.py
+++ b/sdnvpn/lib/openstack_utils.py
@@ -1441,3 +1441,43 @@ def get_resource(heat_client, stack_id, resource):
except Exception as e:
logger.error("Error [get_resource]: %s" % e)
return None
+
+
+def create_stack(heat_client, **kwargs):
+ try:
+ stack = heat_client.stacks.create(**kwargs)
+ stack_id = stack['stack']['id']
+ if stack_id is None:
+ logger.error("Stack create start failed")
+ raise SystemError("Stack create start failed")
+ return stack_id
+ except Exception as e:
+ logger.error("Error [create_stack]: %s" % e)
+ return None
+
+
+def delete_stack(heat_client, stack_id):
+ try:
+ heat_client.stacks.delete(stack_id)
+ return True
+ except Exception as e:
+ logger.error("Error [delete_stack]: %s" % e)
+ return False
+
+
+def list_stack(heat_client, **kwargs):
+ try:
+ result = heat_client.stacks.list(**kwargs)
+ return result
+ except Exception as e:
+ logger.error("Error [list_stack]: %s" % e)
+ return None
+
+
+def get_output(heat_client, stack_id, output_key):
+ try:
+ output = heat_client.stacks.output_show(stack_id, output_key)
+ return output
+ except Exception as e:
+ logger.error("Error [get_output]: %s" % e)
+ return None
diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py
index 9a5e181..135501c 100644
--- a/sdnvpn/lib/utils.py
+++ b/sdnvpn/lib/utils.py
@@ -14,6 +14,7 @@ import time
import requests
import re
import subprocess
+import yaml
from concurrent.futures import ThreadPoolExecutor
from openstack.exceptions import ResourceNotFound
from requests.auth import HTTPBasicAuth
@@ -987,3 +988,125 @@ def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
logger.error('Failed to find ip prefix %s with error %s'
% (ip_prefix, e))
return False
+
+
+def wait_stack_for_status(heat_client, stack_id, stack_status, limit=12):
+ """ Waits to reach specified stack status. To be used with
+ CREATE_COMPLETE and UPDATE_COMPLETE.
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the stack id returned by create_stack api call
+ :param stack_status: the stack status waiting for
+ :param limit: the maximum number of attempts
+ """
+ logger.debug("Stack '%s' create started" % stack_id)
+
+ stack_create_complete = False
+ attempts = 0
+ while attempts < limit:
+ kwargs = {
+ "filters": {
+ "id": stack_id
+ }
+ }
+ stack_st = os_utils.list_stack(
+ heat_client, **kwargs).next().stack_status
+ if stack_st == stack_status:
+ stack_create_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if stack_create_complete is False:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+
+ return True
+
+
+def delete_stack_and_wait(heat_client, stack_id, limit=12):
+ """ Starts and waits for completion of delete stack
+
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the id of the stack to be deleted
+ :param limit: the maximum number of attempts
+ """
+ delete_started = False
+ if stack_id is not None:
+ delete_started = os_utils.delete_stack(heat_client, stack_id)
+
+ if delete_started is True:
+ logger.debug("Stack delete succesfully started")
+ else:
+ logger.error("Stack delete start failed")
+
+ stack_delete_complete = False
+ attempts = 0
+ while attempts < limit:
+ kwargs = {
+ "filters": {
+ "id": stack_id
+ }
+ }
+ try:
+ stack_st = os_utils.list_stack(
+ heat_client, **kwargs).next().stack_status
+ if stack_st == 'DELETE_COMPLETE':
+ stack_delete_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+ except StopIteration:
+ stack_delete_complete = True
+ break
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if not stack_delete_complete:
+ logger.error("Stack delete failed")
+ raise SystemError("Stack delete failed")
+ return False
+
+ return True
+
+
+def get_heat_environment(testcase, common_config):
+ """ Reads the heat parameters of a testcase into a yaml object
+
+ Each testcase where Heat Orchestratoin Template (HOT) is introduced
+ has an associated parameters section.
+ Reads testcase.heat_parameters section and read COMMON_CONFIG.flavor
+ and place it under parameters tree.
+
+ :param testcase: the tescase for which the HOT file is fetched
+ :param common_config: the common config section
+ :return environment: a yaml object to be used as environment
+ """
+ fl = common_config.default_flavor
+ param_dict = testcase.heat_parameters
+ param_dict['flavor'] = fl
+ env_dict = {'parameters': param_dict}
+ environment = yaml.safe_dump(env_dict, default_flow_style=False)
+ return environment
+
+
+def get_vms_from_stack_outputs(heat_client, conn,
+ stack_id, vm_stack_output_keys):
+ """ Converts a vm name from a heat stack output to a nova vm object
+
+ :param stack_id: the id of the stack to fetch the vms from
+ :param vm_stack_output_keys: a list of stack outputs with the vm names
+ :return vms: a list of vm objects corresponding to the outputs
+ """
+ vms = []
+ for vmk in vm_stack_output_keys:
+ vm_output = os_utils.get_output(heat_client, stack_id, vmk)
+ vm_name = vm_output['output']['output_value']
+ logger.debug("vm '%s' read from heat output" % vm_name)
+ vm = os_utils.get_instance_by_name(conn, vm_name)
+ vms.append(vm)
+ return vms
diff --git a/sdnvpn/test/functest/config.yaml b/sdnvpn/test/functest/config.yaml
index 31dce67..cd398ee 100644
--- a/sdnvpn/test/functest/config.yaml
+++ b/sdnvpn/test/functest/config.yaml
@@ -26,6 +26,31 @@ testcases:
targets2: '55:55'
route_distinguishers: '11:11'
+ sdnvpn.test.functest.testcase_1bis:
+ enabled: true
+ order: 14
+ description: Test bed for HOT introduction - same tests as case 1
+ image_name: sdnvpn-image
+ stack_name: stack-1bis
+ hot_file_name: artifacts/testcase_1bis.yaml
+ heat_parameters:
+ instance_1_name: sdnvpn-1-1
+ instance_2_name: sdnvpn-1-2
+ instance_3_name: sdnvpn-1-3
+ instance_4_name: sdnvpn-1-4
+ instance_5_name: sdnvpn-1-5
+ net_1_name: sdnvpn-1-1-net
+ subnet_1_name: sdnvpn-1-1-subnet
+ subnet_1_cidr: 10.10.10.0/24
+ net_2_name: sdnvpn-1-2-net
+ subnet_2_name: sdnvpn-1-2-subnet
+ subnet_2_cidr: 10.10.11.0/24
+ secgroup_name: sdnvpn-sg
+ secgroup_descr: Security group for SDNVPN test cases
+ targets1: '88:88'
+ targets2: '55:55'
+ route_distinguishers: '11:11'
+
sdnvpn.test.functest.testcase_2:
enabled: true
order: 2
diff --git a/sdnvpn/test/functest/testcase_1bis.py b/sdnvpn/test/functest/testcase_1bis.py
new file mode 100644
index 0000000..f33d247
--- /dev/null
+++ b/sdnvpn/test/functest/testcase_1bis.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2018 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import logging
+import sys
+import pkg_resources
+
+from random import randint
+from sdnvpn.lib import config as sdnvpn_config
+from sdnvpn.lib import openstack_utils as os_utils
+from sdnvpn.lib import utils as test_utils
+from sdnvpn.lib.results import Results
+
+logger = logging.getLogger(__name__)
+
+COMMON_CONFIG = sdnvpn_config.CommonConfig()
+TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(
+ 'sdnvpn.test.functest.testcase_1bis')
+
+
+def main():
+ conn = os_utils.get_os_connection()
+ results = Results(COMMON_CONFIG.line_length, conn)
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ heat_client = os_utils.get_heat_client()
+ # neutron client is needed as long as bgpvpn heat module
+ # is not yet installed by default in apex (APEX-618)
+ neutron_client = os_utils.get_neutron_client()
+
+ image_ids = []
+ bgpvpn_ids = []
+
+ try:
+ # image created outside HOT (OS::Glance::Image deprecated since ocata)
+ image_id = os_utils.create_glance_image(
+ conn, TESTCASE_CONFIG.image_name,
+ COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
+ container="bare", public='public')
+ image_ids = [image_id]
+
+ compute_nodes = test_utils.assert_and_get_compute_nodes(conn)
+ az_1 = "nova:" + compute_nodes[0]
+ az_2 = "nova:" + compute_nodes[1]
+
+ file_path = pkg_resources.resource_filename(
+ 'sdnvpn', TESTCASE_CONFIG.hot_file_name)
+ templ = open(file_path, 'r').read()
+ logger.debug("Template is read: '%s'" % templ)
+ env = test_utils.get_heat_environment(TESTCASE_CONFIG, COMMON_CONFIG)
+ logger.debug("Environment is read: '%s'" % env)
+
+ kwargs = {
+ "stack_name": TESTCASE_CONFIG.stack_name,
+ "template": templ,
+ "environment": env,
+ "parameters": {
+ "image_n": TESTCASE_CONFIG.image_name,
+ "av_zone_1": az_1,
+ "av_zone_2": az_2
+ }
+ }
+ stack_id = os_utils.create_stack(heat_client, **kwargs)
+ if stack_id is None:
+ logger.error("Stack create start failed")
+ raise SystemError("Stack create start failed")
+
+ test_utils.wait_stack_for_status(heat_client,
+ stack_id, 'CREATE_COMPLETE')
+
+ net_1_output = os_utils.get_output(heat_client, stack_id, 'net_1_o')
+ network_1_id = net_1_output['output']['output_value']
+ net_2_output = os_utils.get_output(heat_client, stack_id, 'net_2_o')
+ network_2_id = net_2_output['output']['output_value']
+
+ vm_stack_output_keys = ['vm1_o', 'vm2_o', 'vm3_o', 'vm4_o', 'vm5_o']
+ vms = test_utils.get_vms_from_stack_outputs(heat_client,
+ conn,
+ stack_id,
+ vm_stack_output_keys)
+
+ logger.debug("Entering base test case with stack '%s'" % stack_id)
+
+ msg = ("Create VPN with eRT<>iRT")
+ results.record_action(msg)
+ vpn_name = "sdnvpn-" + str(randint(100000, 999999))
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets2,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_1_id)
+
+ # Remember: vms[X] is former vm_X+1
+
+ results.get_ping_status(vms[0], vms[1], expected="PASS", timeout=200)
+ results.get_ping_status(vms[0], vms[2], expected="PASS", timeout=30)
+ results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_2_name'])
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assocs(neutron_client,
+ bgpvpn_id,
+ network_1_id,
+ network_2_id)
+
+ logger.info("Waiting for the VMs to connect to each other using the"
+ " updated network configuration")
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[3], vms[4], expected="PASS", timeout=30)
+ # TODO enable again when isolation in VPN with iRT != eRT works
+ # results.get_ping_status(vms[0], vms[3], expected="FAIL", timeout=30)
+ # results.get_ping_status(vms[0], vms[4], expected="FAIL", timeout=30)
+
+ msg = ("Update VPN with eRT=iRT ...")
+ results.add_to_summary(0, "-")
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed
+ # kwargs = {"import_targets": TESTCASE_CONFIG.targets1,
+ # "export_targets": TESTCASE_CONFIG.targets1,
+ # "name": vpn_name}
+ # bgpvpn = test_utils.update_bgpvpn(neutron_client,
+ # bgpvpn_id, **kwargs)
+
+ test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)
+ bgpvpn_ids.remove(bgpvpn_id)
+ kwargs = {
+ "import_targets": TESTCASE_CONFIG.targets1,
+ "export_targets": TESTCASE_CONFIG.targets1,
+ "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
+ "name": vpn_name
+ }
+
+ test_utils.wait_before_subtest()
+
+ bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
+ bgpvpn_id = bgpvpn['bgpvpn']['id']
+ logger.debug("VPN re-created details: %s" % bgpvpn)
+ bgpvpn_ids.append(bgpvpn_id)
+
+ msg = ("Associate network '%s' to the VPN." %
+ TESTCASE_CONFIG.heat_parameters['net_1_name'])
+ results.record_action(msg)
+ results.add_to_summary(0, "-")
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_1_id)
+
+ test_utils.create_network_association(
+ neutron_client, bgpvpn_id, network_2_id)
+
+ test_utils.wait_for_bgp_net_assocs(neutron_client,
+ bgpvpn_id,
+ network_1_id,
+ network_2_id)
+ # The above code has to be removed after re-enabling bgpvpn-update
+
+ logger.info("Waiting for the VMs to connect to each other using the"
+ " updated network configuration")
+ test_utils.wait_before_subtest()
+
+ results.get_ping_status(vms[0], vms[3], expected="PASS", timeout=30)
+ results.get_ping_status(vms[0], vms[4], expected="PASS", timeout=30)
+
+ except Exception as e:
+ logger.error("exception occurred while executing testcase_1bis: %s", e)
+ raise
+ finally:
+ test_utils.cleanup_glance(conn, image_ids)
+ test_utils.cleanup_neutron(conn, neutron_client, [], bgpvpn_ids,
+ [], [], [], [])
+
+ try:
+ test_utils.delete_stack_and_wait(heat_client, stack_id)
+ except Exception as e:
+ logger.error(
+ "exception occurred while executing testcase_1bis: %s", e)
+
+ return results.compile_summary()
+
+
+if __name__ == '__main__':
+ sys.exit(main())