From 56cc23958f869cb968f40a02ce22c92057ff6764 Mon Sep 17 00:00:00 2001 From: panageo2 Date: Tue, 1 Aug 2017 08:57:05 +0000 Subject: Add/Remove testcase This testcase investigates if communication between a group of VMs is interupted upon deleteon and creation of VMs inside this group. Test case flow: 3 vms: [1] on compute 1, [2] on compute 1, [3] on compute 2, all vms ping each other. vm [2] is deleted. Traffic is still flying between [1] and [3]. A new [4] vm is added to compute 1. Traffic is not interrupted and [4] can be reached as well. JIRA: SDNVPN-105 Change-Id: I93a65de2a5df83551e3115fd20b60241cd460e97 Signed-off-by: panageo2 --- sdnvpn/lib/utils.py | 6 +- sdnvpn/test/functest/config.yaml | 16 +++ sdnvpn/test/functest/testcase_10.py | 274 ++++++++++++++++++++++++++++++++++++ 3 files changed, 293 insertions(+), 3 deletions(-) create mode 100644 sdnvpn/test/functest/testcase_10.py diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py index a7aa991..994c3bb 100644 --- a/sdnvpn/lib/utils.py +++ b/sdnvpn/lib/utils.py @@ -143,7 +143,7 @@ def create_instance(nova_client, return instance -def generate_ping_userdata(ips_array): +def generate_ping_userdata(ips_array, ping_count=10): ips = "" for ip in ips_array: ips = ("%s %s" % (ips, ip)) @@ -154,7 +154,7 @@ def generate_ping_userdata(ips_array): "while true; do\n" " for i do\n" " ip=$i\n" - " ping -c 10 $ip 2>&1 >/dev/null\n" + " ping -c %s $ip 2>&1 >/dev/null\n" " RES=$?\n" " if [ \"Z$RES\" = \"Z0\" ] ; then\n" " echo ping $ip OK\n" @@ -163,7 +163,7 @@ def generate_ping_userdata(ips_array): " done\n" " sleep 1\n" "done\n" - % ips) + % (ips, ping_count)) def generate_userdata_common(): diff --git a/sdnvpn/test/functest/config.yaml b/sdnvpn/test/functest/config.yaml index 3ffd215..45ce0d3 100644 --- a/sdnvpn/test/functest/config.yaml +++ b/sdnvpn/test/functest/config.yaml @@ -158,3 +158,19 @@ testcases: enabled: true description: Verify that all OpenStack nodes OVS br-int have fail_mode set to secure. testname_db: functest_testcase_9 + + testcase_10: + enabled: true + description: Test if interupts occure during ping, when removing and adding instances + testname_db: functest_testcase_10 + instance_1_name: sdnvpn-10-1 + instance_2_name: sdnvpn-10-2 + instance_3_name: sdnvpn-10-3 + instance_4_name: sdnvpn-10-4 + image_name: sdnvpn-image + net_1_name: sdnvpn-10-1-net + subnet_1_name: sdnvpn-10-1-subnet + subnet_1_cidr: 10.10.10.0/24 + router_1_name: sdnvpn-10-1-router + secgroup_name: sdnvpn-sg + secgroup_descr: Security group for SDNVPN test cases diff --git a/sdnvpn/test/functest/testcase_10.py b/sdnvpn/test/functest/testcase_10.py new file mode 100644 index 0000000..15346e6 --- /dev/null +++ b/sdnvpn/test/functest/testcase_10.py @@ -0,0 +1,274 @@ +#!/usr/bin/python +# +# Copyright (c) 2017 All rights reserved +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# + +import argparse +import logging +import re +import sys +import time +import traceback + +from functest.utils import openstack_utils as os_utils +from multiprocessing import Process, Manager, Lock +from sdnvpn.lib import config as sdnvpn_config +from sdnvpn.lib import utils as test_utils +from sdnvpn.lib.results import Results + +parser = argparse.ArgumentParser() + +parser.add_argument("-r", "--report", + help="Create json result file", + action="store_true") + +args = parser.parse_args() + +logger = logging.getLogger('sdnvpn-testcase-10') + +std_out_lock = Lock() + +COMMON_CONFIG = sdnvpn_config.CommonConfig() +TESTCASE_CONFIG = sdnvpn_config.TestcaseConfig('testcase_10') + + +def monitor(in_data, out_data, vm): + # At the beginning of ping we might have some + # failures, so we ignore the first 10 pings + lines_offset = 10 + while in_data["stop_thread"] is False: + try: + time.sleep(1) + vm_console_out_lines = vm.get_console_output().split('\n') + if lines_offset < len(vm_console_out_lines): + for console_line in vm_console_out_lines[lines_offset:-1]: + is_ping_error = re.match(r'ping.*KO', console_line) + if is_ping_error and out_data["error_msg"] == "": + out_data["error_msg"] += ("Ping failure from " + "instance {}". + format(vm.name)) + # Atomic write to std out + with std_out_lock: + logging.error("Failure during ping from " + "instance {}: {}". + format(vm.name, console_line)) + elif re.match(r'ping.*OK', console_line): + # Atomic write to std out + with std_out_lock: + logging.info("Ping from instance {}: {}". + format(vm.name, console_line)) + lines_offset = len(vm_console_out_lines) + except: + # Atomic write to std out + with std_out_lock: + logging.error("Failure in monitor_thread of instance {}". + format(vm.name)) + # Return to main process + return + + +def main(): + results = Results(COMMON_CONFIG.line_length) + + results.add_to_summary(0, "=") + results.add_to_summary(2, "STATUS", "SUBTEST") + results.add_to_summary(0, "=") + + nova_client = os_utils.get_nova_client() + neutron_client = os_utils.get_neutron_client() + glance_client = os_utils.get_glance_client() + + (floatingip_ids, instance_ids, router_ids, network_ids, image_ids, + subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8)) + image_id = os_utils.create_glance_image(glance_client, + TESTCASE_CONFIG.image_name, + COMMON_CONFIG.image_path, + disk=COMMON_CONFIG.image_format, + container="bare", + public='public') + image_ids.append(image_id) + + network_1_id = test_utils.create_net(neutron_client, + TESTCASE_CONFIG.net_1_name) + subnet_1_id = test_utils.create_subnet(neutron_client, + TESTCASE_CONFIG.subnet_1_name, + TESTCASE_CONFIG.subnet_1_cidr, + network_1_id) + + network_ids.append(network_1_id) + subnet_ids.append(subnet_1_id) + + sg_id = os_utils.create_security_group_full(neutron_client, + TESTCASE_CONFIG.secgroup_name, + TESTCASE_CONFIG.secgroup_descr) + + compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client) + av_zone_1 = "nova:" + compute_nodes[0] + av_zone_2 = "nova:" + compute_nodes[1] + + # boot INSTANCES + vm_2 = test_utils.create_instance( + nova_client, + TESTCASE_CONFIG.instance_2_name, + image_id, + network_1_id, + sg_id, + secgroup_name=TESTCASE_CONFIG.secgroup_name, + compute_node=av_zone_1) + vm2_ip = test_utils.get_instance_ip(vm_2) + + u1 = test_utils.generate_ping_userdata([vm2_ip], 1) + vm_1 = test_utils.create_instance( + nova_client, + TESTCASE_CONFIG.instance_1_name, + image_id, + network_1_id, + sg_id, + secgroup_name=TESTCASE_CONFIG.secgroup_name, + compute_node=av_zone_1, + userdata=u1) + vm1_ip = test_utils.get_instance_ip(vm_1) + + u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip], 1) + vm_3 = test_utils.create_instance( + nova_client, + TESTCASE_CONFIG.instance_3_name, + image_id, + network_1_id, + sg_id, + secgroup_name=TESTCASE_CONFIG.secgroup_name, + compute_node=av_zone_2, + userdata=u3) + vm3_ip = test_utils.get_instance_ip(vm_3) + # We do not put vm_2 id in instance_ids table because we will + # delete the current instance during the testing process + instance_ids.extend([vm_1.id, vm_3.id]) + + # Wait for VMs to get ips. + instances_up = test_utils.wait_for_instances_up(vm_1, vm_2, + vm_3) + + if not instances_up: + logger.error("One or more instances is down") + # TODO: Handle this appropriately + # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3 + m = Manager() + monitor_input1 = m.dict() + monitor_output1 = m.dict() + monitor_input1["stop_thread"] = False + monitor_output1["error_msg"] = "" + monitor_thread1 = Process(target=monitor, args=(monitor_input1, + monitor_output1, vm_1,)) + monitor_input2 = m.dict() + monitor_output2 = m.dict() + monitor_input2["stop_thread"] = False + monitor_output2["error_msg"] = "" + monitor_thread2 = Process(target=monitor, args=(monitor_input2, + monitor_output2, vm_2,)) + monitor_input3 = m.dict() + monitor_output3 = m.dict() + monitor_input3["stop_thread"] = False + monitor_output3["error_msg"] = "" + monitor_thread3 = Process(target=monitor, args=(monitor_input3, + monitor_output3, vm_3,)) + # Lists of all monitor threads and their inputs and outputs. + threads = [monitor_thread1, monitor_thread2, monitor_thread3] + thread_inputs = [monitor_input1, monitor_input2, monitor_input3] + thread_outputs = [monitor_output1, monitor_output2, monitor_output3] + try: + logging.info("Starting all monitor threads") + # Start all monitor threads + for thread in threads: + thread.start() + logging.info("Wait before subtest") + test_utils.wait_before_subtest() + monitor_err_msg = "" + for thread_output in thread_outputs: + if thread_output["error_msg"] != "": + monitor_err_msg += " ,{}".format(thread_output["error_msg"]) + thread_output["error_msg"] = "" + results.record_action("Check ping status of vm_1, vm_2, and vm_3") + results.add_to_summary(0, "-") + if len(monitor_err_msg) == 0: + results.add_success("Ping succeeds") + else: + results.add_failure(monitor_err_msg) + # Stop monitor thread 2 and delete instance vm_2 + thread_inputs[1]["stop_thread"] = True + if not os_utils.delete_instance(nova_client, vm_2.id): + logging.error("Fail to delete vm_2 instance during " + "testing process") + raise Exception("Fail to delete instance vm_2.") + # Create a new vm (vm_4) on compute 1 node + u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip], 1) + vm_4 = test_utils.create_instance( + nova_client, + TESTCASE_CONFIG.instance_4_name, + image_id, + network_1_id, + sg_id, + secgroup_name=TESTCASE_CONFIG.secgroup_name, + compute_node=av_zone_1, + userdata=u4) + instance_ids.append(vm_4.id) + # Wait for VMs to get ips. + instances_up = test_utils.wait_for_instances_up(vm_4) + if not instances_up: + logger.error("Instance vm_4 failed to start.") + # TODO: Handle this appropriately + # Create and start a new monitor thread for vm_4 + monitor_input4 = m.dict() + monitor_output4 = m.dict() + monitor_input4["stop_thread"] = False + monitor_output4["error_msg"] = "" + monitor_thread4 = Process(target=monitor, args=(monitor_input4, + monitor_output4, + vm_4,)) + threads.append(monitor_thread4) + thread_inputs.append(monitor_input4) + thread_outputs.append(monitor_output4) + logging.info("Starting monitor thread of vm_4") + threads[3].start() + test_utils.wait_before_subtest() + monitor_err_msg = "" + for thread_output in thread_outputs: + if thread_output["error_msg"] != "": + monitor_err_msg += " ,{}".format(thread_output["error_msg"]) + thread_output["error_msg"] = "" + results.record_action("Check ping status of vm_1, vm_3 and vm_4. " + "Instance vm_2 is deleted") + results.add_to_summary(0, "-") + if len(monitor_err_msg) == 0: + results.add_success("Ping succeeds") + else: + results.add_failure(monitor_err_msg) + + except: + logging.exception("======== EXCEPTION =========") + exc_type, exc_value, exc_tb = sys.exc_info() + traceback.print_exception(exc_type, exc_value, exc_tb) + finally: + # Give a stop signal to all threads + logging.info("Sending stop signal to monitor thread") + for thread_input in thread_inputs: + thread_input["stop_thread"] = True + # Wait for all threads to stop and return to the main process + for thread in threads: + thread.join() + + test_utils.cleanup_nova(nova_client, instance_ids, image_ids) + test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, + interfaces, subnet_ids, router_ids, + network_ids) + + return results.compile_summary() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + sys.exit(main()) -- cgit 1.2.3-korg