summaryrefslogtreecommitdiffstats
path: root/sdnvpn/lib
diff options
context:
space:
mode:
authorRomanos Skiadas <rski@intracom-telecom.com>2016-12-15 14:57:08 +0200
committerRomanos Skiadas <rski@intracom-telecom.com>2016-12-15 16:59:31 +0200
commitc9356c8ef4a056f47e25cb0f07796e0f6e7ff574 (patch)
tree43b657e852520d332c259b81b47e12bef1c1cc0f /sdnvpn/lib
parent033ddcc028b083df3ec9c077c6cb4bc53f4dc5d3 (diff)
Make sdnvpn a package
- Clean up the test/functest folder and move things to lib/ & artifacts/ - Add a new top level folder for the sdnvpn python code Change-Id: I5fdc7fa5475fb800f488a17d3481158c9c4f84e1 Signed-off-by: Romanos Skiadas <rski@intracom-telecom.com>
Diffstat (limited to 'sdnvpn/lib')
-rw-r--r--sdnvpn/lib/__init__.py0
-rw-r--r--sdnvpn/lib/config.py53
-rw-r--r--sdnvpn/lib/results.py192
-rw-r--r--sdnvpn/lib/utils.py303
4 files changed, 548 insertions, 0 deletions
diff --git a/sdnvpn/lib/__init__.py b/sdnvpn/lib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/sdnvpn/lib/__init__.py
diff --git a/sdnvpn/lib/config.py b/sdnvpn/lib/config.py
new file mode 100644
index 0000000..ac9bbe2
--- /dev/null
+++ b/sdnvpn/lib/config.py
@@ -0,0 +1,53 @@
+import yaml
+import os
+
+import functest.utils.functest_constants as ft_constants
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+
+logger = ft_logger.Logger("sndvpn_test_config").getLogger()
+
+
+class CommonConfig(object):
+ """
+ Common configuration parameters across testcases
+ """
+
+ def __init__(self):
+ self.repo_path = ft_constants.SDNVPN_REPO_DIR
+ self.config_file = os.path.join(self.repo_path,
+ 'sdnvpn/test/functest/config.yaml')
+ self.keyfile_path = os.path.join(self.repo_path,
+ 'sdnvpn/artifacts/id_rsa')
+ self.test_db = ft_utils.get_functest_config("results.test_db_url")
+ self.line_length = 90 # length for the summary table
+ self.vm_boot_timeout = 180
+ self.default_flavor = ft_utils.get_parameter_from_yaml(
+ "defaults.flavor", self.config_file)
+ self.image_filename = ft_utils.get_functest_config(
+ "general.openstack.image_file_name")
+ self.image_format = ft_utils.get_functest_config(
+ "general.openstack.image_disk_format")
+ self.image_path = '{0}/{1}'.format(
+ ft_utils.get_functest_config(
+ "general.directories.dir_functest_data"),
+ self.image_filename)
+
+
+class TestcaseConfig(object):
+ """
+ Configuration for a testcase.
+ Parse config.yaml into a dict and create an object out of it.
+ """
+
+ def __init__(self, testcase):
+ common_config = CommonConfig()
+ test_config = None
+ with open(common_config.config_file) as f:
+ testcases_yaml = yaml.safe_load(f)
+ test_config = testcases_yaml['testcases'].get(testcase, None)
+ if test_config is None:
+ logger.error('Test {0} configuration is not present in {1}'
+ .format(testcase, common_config.config_file))
+ # Update class fields with configuration variables dynamically
+ self.__dict__.update(**test_config)
diff --git a/sdnvpn/lib/results.py b/sdnvpn/lib/results.py
new file mode 100644
index 0000000..66e399e
--- /dev/null
+++ b/sdnvpn/lib/results.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import time
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+
+logger = ft_logger.Logger("sdnvpn-results").getLogger()
+
+
+class Results(object):
+
+ def __init__(self, line_length):
+ self.line_length = line_length
+ self.test_result = "PASS"
+ self.summary = ""
+ self.details = []
+ self.num_tests = 0
+ self.num_tests_failed = 0
+
+ def get_ping_status(self,
+ vm_source,
+ vm_target,
+ expected="PASS", timeout=30):
+ console_log = vm_source.get_console_output()
+
+ ip_source = vm_source.networks.itervalues().next()[0]
+ ip_target = vm_target.networks.itervalues().next()[0]
+
+ if "request failed" in console_log:
+ # Normally, cirros displays this message when userdata fails
+ logger.debug("It seems userdata is not supported in "
+ "nova boot...")
+ return False
+ else:
+ tab = ("%s" % (" " * 53))
+ expected_result = 'can ping' if expected == 'PASS' \
+ else 'cannot ping'
+ test_case_name = ("'%s' %s '%s'" %
+ (vm_source.name,
+ expected_result,
+ vm_target.name))
+ logger.debug("%sPing\n%sfrom '%s' (%s)\n%sto '%s' (%s).\n"
+ "%s-->Expected result: %s.\n"
+ % (tab, tab, vm_source.name, ip_source,
+ tab, vm_target.name, ip_target,
+ tab, expected_result))
+ while True:
+ console_log = vm_source.get_console_output()
+ # the console_log is a long string, we want to take
+ # the last 4 lines (for example)
+ lines = console_log.split('\n')
+ last_n_lines = lines[-5:]
+ if ("ping %s OK" % ip_target) in last_n_lines:
+ msg = ("'%s' can ping '%s'"
+ % (vm_source.name, vm_target.name))
+ if expected == "PASS":
+ logger.debug("[PASS] %s" % msg)
+ self.add_success(test_case_name)
+ else:
+ logger.debug("[FAIL] %s" % msg)
+ self.test_result = "FAIL"
+ self.add_failure(test_case_name)
+ logger.debug("\n%s" % last_n_lines)
+ break
+ elif ("ping %s KO" % ip_target) in last_n_lines:
+ msg = ("'%s' cannot ping '%s'" %
+ (vm_source.name, vm_target.name))
+ if expected == "FAIL":
+ logger.debug("[PASS] %s" % msg)
+ self.add_success(test_case_name)
+ else:
+ logger.debug("[FAIL] %s" % msg)
+ self.test_result = "FAIL"
+ self.add_failure(test_case_name)
+ break
+ time.sleep(1)
+ timeout -= 1
+ if timeout == 0:
+ self.test_result = "FAIL"
+ logger.debug("[FAIL] Timeout reached for '%s'. "
+ "No ping output captured in the console log"
+ % vm_source.name)
+ self.add_failure(test_case_name)
+ break
+
+ def add_to_summary(self, num_cols, col1, col2=""):
+ if num_cols == 0:
+ self.summary += ("+%s+\n" % (col1 * (self.line_length - 2)))
+ elif num_cols == 1:
+ self.summary += ("| " + col1.ljust(self.line_length - 3) + "|\n")
+ elif num_cols == 2:
+ self.summary += ("| %s" % col1.ljust(7) + "| ")
+ self.summary += (col2.ljust(self.line_length - 12) + "|\n")
+ if col1 in ("FAIL", "PASS"):
+ self.details.append({col2: col1})
+ self.num_tests += 1
+ if col1 == "FAIL":
+ self.num_tests_failed += 1
+
+ def record_action(self, msg):
+ """Record and log an action and display it in the summary."""
+ logger.info(msg)
+ self.add_to_summary(1, msg)
+
+ def add_failure(self, test):
+ self.add_to_summary(2, "FAIL", test)
+
+ def add_success(self, test):
+ self.add_to_summary(2, "PASS", test)
+
+ def check_ssh_output(self, vm_source, vm_target,
+ expected, timeout=30):
+ console_log = vm_source.get_console_output()
+ ip_source = vm_source.networks.itervalues().next()[0]
+ ip_target = vm_target.networks.itervalues().next()[0]
+
+ if "request failed" in console_log:
+ # Normally, cirros displays this message when userdata fails
+ logger.debug("It seems userdata is not supported in "
+ "nova boot...")
+ return False
+ else:
+ tab = ("%s" % (" " * 53))
+ test_case_name = ("[%s] returns 'I am %s' to '%s'[%s]" %
+ (ip_target, expected,
+ vm_source.name, ip_source))
+ logger.debug("%sSSH\n%sfrom '%s' (%s)\n%sto '%s' (%s).\n"
+ "%s-->Expected result: %s.\n"
+ % (tab, tab, vm_source.name, ip_source,
+ tab, vm_target.name, ip_target,
+ tab, expected))
+ while True:
+ console_log = vm_source.get_console_output()
+ # the console_log is a long string, we want to take
+ # the last 4 lines (for example)
+ lines = console_log.split('\n')
+ last_n_lines = lines[-5:]
+ if ("%s %s" % (ip_target, expected)) in last_n_lines:
+ logger.debug("[PASS] %s" % test_case_name)
+ self.add_success(test_case_name)
+ break
+ elif ("%s not reachable" % ip_target) in last_n_lines:
+ logger.debug("[FAIL] %s" % test_case_name)
+ self.add_failure(test_case_name)
+ self.test_result = "FAIL"
+ break
+ time.sleep(1)
+ timeout -= 1
+ if timeout == 0:
+ self.test_result = "FAIL"
+ logger.debug("[FAIL] Timeout reached for '%s'."
+ " No ping output captured in the console log"
+ % vm_source.name)
+ self.add_failure(test_case_name)
+ break
+
+ def ping_ip_test(self, address):
+ ping = "ping %s -c 3" % address
+ testcase_name = "Ping IP %s" % address
+ exit_code = ft_utils.execute_command(ping)
+
+ if exit_code != 0:
+ self.add_failure(testcase_name)
+ else:
+ self.add_success(testcase_name)
+
+ def compile_summary(self, SUCCESS_CRITERIA):
+ success_message = "All the subtests have passed."
+ failure_message = "One or more subtests have failed."
+
+ self.add_to_summary(0, "=")
+ logger.info("\n%s" % self.summary)
+ if self.test_result == "PASS":
+ logger.info(success_message)
+ else:
+ logger.info(failure_message)
+
+ status = "PASS"
+ success = 100 - \
+ (100 * int(self.num_tests_failed) / int(self.num_tests))
+ if success < int(SUCCESS_CRITERIA):
+ status = "FAILED"
+
+ return {"status": status, "details": self.details}
diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py
new file mode 100644
index 0000000..b551954
--- /dev/null
+++ b/sdnvpn/lib/utils.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import sys
+import time
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.openstack_utils as os_utils
+import re
+
+from sdnvpn.lib import config as sdnvpn_config
+
+logger = ft_logger.Logger("sndvpn_test_utils").getLogger()
+
+common_config = sdnvpn_config.CommonConfig()
+
+
+def create_net(neutron_client, name):
+ logger.debug("Creating network %s", name)
+ net_id = os_utils.create_neutron_net(neutron_client, name)
+ if not net_id:
+ logger.error(
+ "There has been a problem when creating the neutron network")
+ sys.exit(-1)
+ return net_id
+
+
+def create_subnet(neutron_client, name, cidr, net_id):
+ logger.debug("Creating subnet %s in network %s with cidr %s",
+ name, net_id, cidr)
+ subnet_id = os_utils.create_neutron_subnet(neutron_client,
+ name,
+ cidr,
+ net_id)
+ if not subnet_id:
+ logger.error(
+ "There has been a problem when creating the neutron subnet")
+ sys.exit(-1)
+ return subnet_id
+
+
+def create_network(neutron_client, net, subnet1, cidr1,
+ router, subnet2=None, cidr2=None):
+ """Network assoc will not work for networks/subnets created by this function.
+
+ It is an ODL limitation due to it handling routers as vpns.
+ See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
+ network_dic = os_utils.create_network_full(neutron_client,
+ net,
+ subnet1,
+ router,
+ cidr1)
+ if not network_dic:
+ logger.error(
+ "There has been a problem when creating the neutron network")
+ sys.exit(-1)
+ net_id = network_dic["net_id"]
+ subnet_id = network_dic["subnet_id"]
+ router_id = network_dic["router_id"]
+
+ if subnet2 is not None:
+ logger.debug("Creating and attaching a second subnet...")
+ subnet_id = os_utils.create_neutron_subnet(
+ neutron_client, subnet2, cidr2, net_id)
+ if not subnet_id:
+ logger.error(
+ "There has been a problem when creating the second subnet")
+ sys.exit(-1)
+ logger.debug("Subnet '%s' created successfully" % subnet_id)
+ return net_id, subnet_id, router_id
+
+
+def create_instance(nova_client,
+ name,
+ image_id,
+ network_id,
+ sg_id,
+ secgroup_name=None,
+ fixed_ip=None,
+ compute_node='',
+ userdata=None,
+ files=None,
+ **kwargs
+ ):
+ if 'flavor' not in kwargs:
+ kwargs['flavor'] = common_config.default_flavor
+
+ logger.info("Creating instance '%s'..." % name)
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
+ " network=%s\n secgroup=%s \n hypervisor=%s \n"
+ " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
+ % (name, kwargs['flavor'], image_id, network_id, sg_id,
+ compute_node, fixed_ip, files, userdata))
+ instance = os_utils.create_instance_and_wait_for_active(
+ kwargs['flavor'],
+ image_id,
+ network_id,
+ name,
+ config_drive=True,
+ userdata=userdata,
+ av_zone=compute_node,
+ fixed_ip=fixed_ip,
+ files=files)
+
+ if instance is None:
+ logger.error("Error while booting instance.")
+ sys.exit(-1)
+ else:
+ logger.debug("Instance '%s' booted successfully. IP='%s'." %
+ (name, instance.networks.itervalues().next()[0]))
+ # Retrieve IP of INSTANCE
+ # instance_ip = instance.networks.get(network_id)[0]
+
+ if secgroup_name:
+ logger.debug("Adding '%s' to security group '%s'..."
+ % (name, secgroup_name))
+ else:
+ logger.debug("Adding '%s' to security group '%s'..."
+ % (name, sg_id))
+ os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+
+ return instance
+
+
+def generate_ping_userdata(ips_array):
+ ips = ""
+ for ip in ips_array:
+ ips = ("%s %s" % (ips, ip))
+
+ ips = ips.replace(' ', ' ')
+ return ("#!/bin/sh\n"
+ "set%s\n"
+ "while true; do\n"
+ " for i do\n"
+ " ip=$i\n"
+ " ping -c 1 $ip 2>&1 >/dev/null\n"
+ " RES=$?\n"
+ " if [ \"Z$RES\" = \"Z0\" ] ; then\n"
+ " echo ping $ip OK\n"
+ " else echo ping $ip KO\n"
+ " fi\n"
+ " done\n"
+ " sleep 1\n"
+ "done\n"
+ % ips)
+
+
+def generate_userdata_common():
+ return ("#!/bin/sh\n"
+ "sudo mkdir -p /home/cirros/.ssh/\n"
+ "sudo chown cirros:cirros /home/cirros/.ssh/\n"
+ "sudo chown cirros:cirros /home/cirros/id_rsa\n"
+ "mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
+ "sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
+ "stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
+ "sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
+ "ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
+ "cirros@test1>/home/cirros/.ssh/authorized_keys\n"
+ "sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
+ "chmod 700 /home/cirros/.ssh\n"
+ "chmod 644 /home/cirros/.ssh/authorized_keys\n"
+ "chmod 600 /home/cirros/.ssh/id_rsa\n"
+ )
+
+
+def generate_userdata_with_ssh(ips_array):
+ u1 = generate_userdata_common()
+
+ ips = ""
+ for ip in ips_array:
+ ips = ("%s %s" % (ips, ip))
+
+ ips = ips.replace(' ', ' ')
+ u2 = ("#!/bin/sh\n"
+ "set%s\n"
+ "while true; do\n"
+ " for i do\n"
+ " ip=$i\n"
+ " hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
+ "cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
+ " RES=$?\n"
+ " if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
+ " else echo $ip 'not reachable';fi;\n"
+ " done\n"
+ " sleep 1\n"
+ "done\n"
+ % ips)
+ return (u1 + u2)
+
+
+def wait_for_instance(instance):
+ logger.info("Waiting for instance %s to get a DHCP lease..." % instance.id)
+ # The sleep this function replaced waited for 80s
+ tries = 40
+ sleep_time = 2
+ pattern = "Lease of .* obtained, lease time"
+ expected_regex = re.compile(pattern)
+ console_log = ""
+ while tries > 0 and not expected_regex.search(console_log):
+ console_log = instance.get_console_output()
+ time.sleep(sleep_time)
+ tries -= 1
+
+ if not expected_regex.search(console_log):
+ logger.error("Instance %s seems to have failed leasing an IP."
+ % instance.id)
+ return False
+ return True
+
+
+def wait_for_instances_up(*args):
+ check = [wait_for_instance(instance) for instance in args]
+ return all(check)
+
+
+def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
+ tries = 30
+ sleep_time = 1
+ nets = []
+ logger.debug("Waiting for network %s to associate with BGPVPN %s "
+ % (bgpvpn_id, net_id))
+
+ while tries > 0 and net_id not in nets:
+ nets = os_utils.get_bgpvpn_networks(neutron_client, bgpvpn_id)
+ time.sleep(sleep_time)
+ tries -= 1
+ if net_id not in nets:
+ logger.error("Association of network %s with BGPVPN %s failed" %
+ (net_id, bgpvpn_id))
+ return False
+ return True
+
+
+def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
+ check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
+ for id in args]
+ # Return True if all associations succeeded
+ return all(check)
+
+
+def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
+ tries = 30
+ sleep_time = 1
+ routers = []
+ logger.debug("Waiting for router %s to associate with BGPVPN %s "
+ % (bgpvpn_id, router_id))
+ while tries > 0 and router_id not in routers:
+ routers = os_utils.get_bgpvpn_routers(neutron_client, bgpvpn_id)
+ time.sleep(sleep_time)
+ tries -= 1
+ if router_id not in routers:
+ logger.error("Association of router %s with BGPVPN %s failed" %
+ (router_id, bgpvpn_id))
+ return False
+ return True
+
+
+def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
+ check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
+ for id in args]
+ # Return True if all associations succeeded
+ return all(check)
+
+
+def wait_before_subtest(*args, **kwargs):
+ ''' This is a placeholder.
+ TODO: Replace delay with polling logic. '''
+ time.sleep(30)
+
+
+def assert_and_get_compute_nodes(nova_client, required_node_number=2):
+ """Get the compute nodes in the deployment
+
+ Exit if the deployment doesn't have enough compute nodes"""
+ compute_nodes = os_utils.get_hypervisors(nova_client)
+
+ num_compute_nodes = len(compute_nodes)
+ if num_compute_nodes < 2:
+ logger.error("There are %s compute nodes in the deployment. "
+ "Minimum number of nodes to complete the test is 2."
+ % num_compute_nodes)
+ sys.exit(-1)
+
+ logger.debug("Compute nodes: %s" % compute_nodes)
+ return compute_nodes
+
+
+def open_icmp_ssh(neutron_client, security_group_id):
+ os_utils.create_secgroup_rule(neutron_client,
+ security_group_id,
+ 'ingress',
+ 'icmp')
+ os_utils.create_secgroup_rule(neutron_client,
+ security_group_id,
+ 'tcp',
+ 80, 80)