summaryrefslogtreecommitdiffstats
path: root/sfc/lib
diff options
context:
space:
mode:
authorjose.lausuch <jose.lausuch@ericsson.com>2016-12-13 12:31:39 +0100
committerjose.lausuch <jose.lausuch@ericsson.com>2016-12-15 12:37:52 +0100
commitacf339f2840d0fe7a46187a0597704cf5b486214 (patch)
treede114d6482bcd7cfd9d057f756d8b6f8864911cb /sfc/lib
parent1350624dbcf49c27ae0333719c2d7dda86ca7b0e (diff)
Make SFC installable as a python module
New directory structure: <root>/sfc <root>/sfc/lib/ <root>/sfc/tests <root>/sfc/tests/functest JIRA: SFC-60 After installing sfc, the imports would be: import sfc.tests.functest.x import sfc.lib.x Change-Id: Ib15172239aefdef65056d6598210a1b28a4b2eff Signed-off-by: jose.lausuch <jose.lausuch@ericsson.com>
Diffstat (limited to 'sfc/lib')
-rw-r--r--sfc/lib/__init__.py0
-rw-r--r--sfc/lib/config.py84
-rw-r--r--sfc/lib/results.py52
-rw-r--r--sfc/lib/utils.py413
4 files changed, 549 insertions, 0 deletions
diff --git a/sfc/lib/__init__.py b/sfc/lib/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/sfc/lib/__init__.py
diff --git a/sfc/lib/config.py b/sfc/lib/config.py
new file mode 100644
index 00000000..97fa9122
--- /dev/null
+++ b/sfc/lib/config.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import yaml
+import os
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.functest_constants as ft_constants
+
+logger = ft_logger.Logger("sfc_test_config").getLogger()
+
+
+class CommonConfig(object):
+ """
+ Common configuration parameters across testcases
+ """
+
+ def __init__(self):
+ self.line_length = 30
+ self.test_db = ft_utils.get_functest_config("results.test_db_url")
+ self.repo_path = ft_constants.SFC_REPO_DIR
+ self.sfc_test_dir = os.path.join(self.repo_path, "tests", "functest")
+ self.vnfd_dir = os.path.join(self.sfc_test_dir, "vnfd-templates")
+ self.functest_results_dir = os.path.join(
+ ft_constants.FUNCTEST_RESULTS_DIR, "odl-sfc")
+ self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
+ self.fuel_master_ip = ft_utils.get_parameter_from_yaml(
+ "defaults.fuel_master_ip", self.config_file)
+ self.fuel_master_uname = ft_utils.get_parameter_from_yaml(
+ "defaults.fuel_master_uname", self.config_file)
+ self.fuel_master_passwd = ft_utils.get_parameter_from_yaml(
+ "defaults.fuel_master_passwd", self.config_file)
+ self.fuel_proxy = {
+ 'ip': self.fuel_master_ip,
+ 'username': self.fuel_master_uname,
+ 'password': self.fuel_master_passwd
+ }
+ self.flavor = ft_utils.get_parameter_from_yaml(
+ "defaults.flavor", self.config_file)
+ self.ram_size_in_mb = ft_utils.get_parameter_from_yaml(
+ "defaults.ram_size_in_mb", self.config_file)
+ self.disk_size_in_gb = ft_utils.get_parameter_from_yaml(
+ "defaults.disk_size_in_gb", self.config_file)
+ self.vcpu_count = ft_utils.get_parameter_from_yaml(
+ "defaults.vcpu_count", self.config_file)
+ self.image_name = ft_utils.get_parameter_from_yaml(
+ "defaults.image_name", self.config_file)
+ self.image_file_name = ft_utils.get_parameter_from_yaml(
+ "defaults.image_file_name", self.config_file)
+ self.image_format = ft_utils.get_parameter_from_yaml(
+ "defaults.image_format", self.config_file)
+ self.url = ft_utils.get_parameter_from_yaml(
+ "defaults.url", self.config_file)
+ self.dir_functest_data = ft_utils.get_functest_config(
+ "general.directories.dir_functest_data")
+ self.image_path = os.path.join(
+ self.dir_functest_data, self.image_file_name)
+
+
+class TestcaseConfig(object):
+ """
+ Configuration for a testcase.
+ Parse config.yaml into a dict and create an object out of it.
+ """
+
+ def __init__(self, testcase):
+ common_config = CommonConfig()
+ test_config = None
+ with open(common_config.config_file) as f:
+ testcases_yaml = yaml.safe_load(f)
+ test_config = testcases_yaml['testcases'].get(testcase, None)
+ if test_config is None:
+ logger.error('Test {0} configuration is not present in {1}'
+ .format(testcase, common_config.config_file))
+ # Update class fields with configuration variables dynamically
+ self.__dict__.update(**test_config)
diff --git a/sfc/lib/results.py b/sfc/lib/results.py
new file mode 100644
index 00000000..5fa9aa05
--- /dev/null
+++ b/sfc/lib/results.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import functest.utils.functest_logger as ft_logger
+
+logger = ft_logger.Logger("sfc-results").getLogger()
+
+
+class Results(object):
+
+ def __init__(self, line_length):
+ self.line_length = line_length
+ self.test_result = "FAIL"
+ self.summary = ""
+ self.details = []
+ self.num_tests = 0
+ self.num_tests_failed = 0
+
+ def add_to_summary(self, num_cols, col1, col2=""):
+ if num_cols == 0:
+ self.summary += ("+%s+\n" % (col1 * (self.line_length - 2)))
+ elif num_cols == 1:
+ self.summary += ("| " + col1.ljust(self.line_length - 3) + "|\n")
+ elif num_cols == 2:
+ self.summary += ("| %s" % col1.ljust(7) + "| ")
+ self.summary += (col2.ljust(self.line_length - 12) + "|\n")
+ if col1 in ("FAIL", "PASS"):
+ self.details.append({col2: col1})
+ self.num_tests += 1
+ if col1 == "FAIL":
+ self.num_tests_failed += 1
+
+ def compile_summary(self):
+ success_message = "All the subtests have passed."
+ failure_message = "One or more subtests have failed."
+
+ self.add_to_summary(0, "=")
+ logger.info("\n%s" % self.summary)
+ if self.num_tests_failed == 0:
+ self.test_result = "PASS"
+ logger.info(success_message)
+ else:
+ logger.info(failure_message)
+
+ return {"status": self.test_result, "details": self.details}
diff --git a/sfc/lib/utils.py b/sfc/lib/utils.py
new file mode 100644
index 00000000..00f98985
--- /dev/null
+++ b/sfc/lib/utils.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import json
+import os
+import re
+import subprocess
+import time
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import opnfv.utils.SSHUtils as ssh_utils
+
+
+logger = ft_logger.Logger("sfc_test_utils").getLogger()
+SSH_OPTIONS = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+FUNCTEST_RESULTS_DIR = os.path.join("home", "opnfv",
+ "functest", "results", "odl-sfc")
+
+
+def run_cmd(cmd, wdir=None, ignore_stderr=False, ignore_no_output=True):
+ """run given command locally and return commands output if success"""
+ pipe = subprocess.Popen(cmd, shell=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, cwd=wdir)
+
+ (output, errors) = pipe.communicate()
+ if output:
+ output = output.strip()
+ if pipe.returncode < 0:
+ logger.error(errors)
+ return False
+ if errors:
+ logger.error(errors)
+ return ignore_stderr
+
+ if ignore_no_output and not output:
+ return True
+
+ return output
+
+
+def run_cmd_on_controller(cmd):
+ """run given command on OpenStack controller"""
+ ip_controllers = get_openstack_node_ips("controller")
+ if not ip_controllers:
+ return None
+
+ ssh_cmd = "ssh %s %s %s" % (SSH_OPTIONS, ip_controllers[0], cmd)
+ return run_cmd_on_fm(ssh_cmd)
+
+
+def run_cmd_on_compute(cmd):
+ """run given command on OpenStack Compute node"""
+ ip_computes = get_openstack_node_ips("compute")
+ if not ip_computes:
+ return None
+
+ ssh_cmd = "ssh %s %s %s" % (SSH_OPTIONS, ip_computes[0], cmd)
+ return run_cmd_on_fm(ssh_cmd)
+
+
+def run_cmd_on_fm(cmd, username="root", passwd="r00tme"):
+ """run given command on Fuel Master"""
+ ip = os.environ.get("INSTALLER_IP")
+ ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
+ passwd, SSH_OPTIONS, username, ip, cmd)
+ return run_cmd(ssh_cmd)
+
+
+def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
+ """run given command on Remote Machine, Can be VM"""
+ ssh_opt_append = "%s -o ConnectTimeout=50 " % SSH_OPTIONS
+ ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
+ passwd, ssh_opt_append, username, ip, cmd)
+ return run_cmd(ssh_cmd)
+
+
+def get_openstack_node_ips(role):
+ """Get OpenStack Nodes IP Address"""
+ fuel_env = os.environ.get("FUEL_ENV")
+ if fuel_env is not None:
+ cmd = "fuel2 node list -f json -e %s" % fuel_env
+ else:
+ cmd = "fuel2 node list -f json"
+
+ nodes = run_cmd_on_fm(cmd)
+ ips = []
+ nodes = json.loads(nodes)
+ for node in nodes:
+ if role in node["roles"]:
+ ips.append(node["ip"])
+
+ return ips
+
+
+def configure_iptables():
+ """Configures IPTABLES on OpenStack Controller"""
+ iptable_cmds = ["iptables -P INPUT ACCEPT",
+ "iptables -t nat -P INPUT ACCEPT",
+ "iptables -A INPUT -m state \
+ --state NEW,ESTABLISHED,RELATED -j ACCEPT"]
+
+ for cmd in iptable_cmds:
+ logger.info("Configuring %s on contoller" % cmd)
+ run_cmd_on_controller(cmd)
+
+
+def download_image(url, image_path):
+ image_filename = os.path.basename(image_path)
+ image_url = "%s/%s" % (url, image_filename)
+ image_dir = os.path.dirname(image_path)
+ if not os.path.isfile(image_path):
+ logger.info("Downloading image")
+ ft_utils.download_url(image_url, image_dir)
+ else:
+ logger.info("Using old image")
+
+
+def setup_neutron(neutron_client, net, subnet, router, subnet_cidr):
+ n_dict = os_utils.create_network_full(neutron_client,
+ net,
+ subnet,
+ router,
+ subnet_cidr)
+ if not n_dict:
+ logger.error("failed to create neutron network")
+ return False
+
+ return n_dict["net_id"]
+
+
+def setup_ingress_egress_secgroup(neutron_client, protocol,
+ min_port=None, max_port=None):
+ secgroups = os_utils.get_security_groups(neutron_client)
+ for sg in secgroups:
+ os_utils.create_secgroup_rule(neutron_client, sg['id'],
+ 'ingress', protocol,
+ port_range_min=min_port,
+ port_range_max=max_port)
+ os_utils.create_secgroup_rule(neutron_client, sg['id'],
+ 'egress', protocol,
+ port_range_min=min_port,
+ port_range_max=max_port)
+
+
+def create_security_groups(neutron_client, secgroup_name, secgroup_descr):
+ sg_id = os_utils.create_security_group_full(neutron_client,
+ secgroup_name, secgroup_descr)
+ setup_ingress_egress_secgroup(neutron_client, "icmp")
+ setup_ingress_egress_secgroup(neutron_client, "tcp", 22, 22)
+ setup_ingress_egress_secgroup(neutron_client, "tcp", 80, 80)
+ setup_ingress_egress_secgroup(neutron_client, "udp", 67, 68)
+ return sg_id
+
+
+def create_instance(nova_client, name, flavor, image_id, network_id, sg_id,
+ secgroup_name=None, fixed_ip=None,
+ compute_node='', userdata=None, files=None):
+ logger.info("Creating instance '%s'..." % name)
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
+ " network=%s\n secgroup=%s \n hypervisor=%s \n"
+ " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
+ % (name, flavor, image_id, network_id, sg_id,
+ compute_node, fixed_ip, files, userdata))
+ instance = os_utils.create_instance_and_wait_for_active(
+ flavor,
+ image_id,
+ network_id,
+ name,
+ config_drive=True,
+ userdata=userdata,
+ av_zone=compute_node,
+ fixed_ip=fixed_ip,
+ files=files)
+
+ if instance is None:
+ logger.error("Error while booting instance.")
+ return None
+
+ if secgroup_name:
+ logger.debug("Adding '%s' to security group '%s'..."
+ % (name, secgroup_name))
+ else:
+ logger.debug("Adding '%s' to security group '%s'..."
+ % (name, sg_id))
+ os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+
+ return instance
+
+
+def ping(remote, pkt_cnt=1, iface=None, retries=100, timeout=None):
+ ping_cmd = 'ping'
+
+ if timeout:
+ ping_cmd = ping_cmd + ' -w %s' % timeout
+
+ grep_cmd = "grep -e 'packet loss' -e rtt"
+
+ if iface is not None:
+ ping_cmd = ping_cmd + ' -I %s' % iface
+
+ ping_cmd = ping_cmd + ' -i 0 -c %d %s' % (pkt_cnt, remote)
+ cmd = ping_cmd + '|' + grep_cmd
+
+ while retries > 0:
+ output = run_cmd(cmd)
+ if not output:
+ return False
+
+ match = re.search('(\d*)% packet loss', output)
+ if not match:
+ return False
+
+ packet_loss = int(match.group(1))
+ if packet_loss == 0:
+ return True
+
+ retries -= 1
+
+ return False
+
+
+def get_floating_ips(nova_client, neutron_client):
+ ips = []
+ instances = nova_client.servers.list(search_opts={'all_tenants': 1})
+ for instance in instances:
+ floatip_dic = os_utils.create_floating_ip(neutron_client)
+ floatip = floatip_dic['fip_addr']
+ instance.add_floating_ip(floatip)
+ logger.info("Instance name and ip %s:%s " % (instance.name, floatip))
+ logger.info("Waiting for instance %s:%s to come up" %
+ (instance.name, floatip))
+ if not ping(floatip):
+ logger.info("Instance %s:%s didn't come up" %
+ (instance.name, floatip))
+ return None
+
+ if instance.name == "server":
+ logger.info("Server:%s is reachable" % floatip)
+ server_ip = floatip
+ elif instance.name == "client":
+ logger.info("Client:%s is reachable" % floatip)
+ client_ip = floatip
+ else:
+ logger.info("SF:%s is reachable" % floatip)
+ ips.append(floatip)
+
+ return server_ip, client_ip, ips[1], ips[0]
+
+
+def start_http_server(ip):
+ """Start http server on a given machine, Can be VM"""
+ cmd = "\'python -m SimpleHTTPServer 80"
+ cmd = cmd + " > /dev/null 2>&1 &\'"
+ run_cmd_remote(ip, cmd)
+ output = run_cmd_remote(ip, "ps aux|grep SimpleHTTPServer")
+ if not output:
+ logger.error("Failed to start http server")
+ return False
+
+ logger.info(output)
+ return True
+
+
+def vxlan_firewall(sf, iface="eth0", port="22", block=True):
+ """Set firewall using vxlan_tool.py on a given machine, Can be VM"""
+ cmd = "python vxlan_tool.py -i %s -d forward -v off" % iface
+ if block:
+ cmd = "python vxlan_tool.py -i eth0 -d forward -v off -b %s" % port
+
+ cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
+ run_cmd_remote(sf, cmd)
+
+
+def vxlan_tool_stop(sf):
+ cmd = "pkill -f vxlan_tool.py"
+ run_cmd_remote(sf, cmd)
+
+
+def netcat(s_ip, c_ip, port="80", timeout=5):
+ """Run netcat on a give machine, Can be VM"""
+ cmd = "nc -zv "
+ cmd = cmd + " -w %s %s %s" % (timeout, s_ip, port)
+ cmd = cmd + " 2>&1"
+ output = run_cmd_remote(c_ip, cmd)
+ logger.info("%s" % output)
+ return output
+
+
+def is_ssh_blocked(srv_prv_ip, client_ip):
+ res = netcat(srv_prv_ip, client_ip, port="22")
+ match = re.search("nc:.*timed out:.*", res, re.M)
+ if match:
+ return True
+
+ return False
+
+
+def is_http_blocked(srv_prv_ip, client_ip):
+ res = netcat(srv_prv_ip, client_ip, port="80")
+ match = re.search(".* 80 port.* succeeded!", res, re.M)
+ if match:
+ return False
+
+ return True
+
+
+def capture_err_logs(ovs_logger, controller_clients, compute_clients, error):
+ timestamp = time.strftime("%Y%m%d-%H%M%S")
+ ovs_logger.dump_ovs_logs(controller_clients,
+ compute_clients,
+ related_error=error,
+ timestamp=timestamp)
+
+
+def get_ssh_clients(role, proxy):
+ clients = []
+ for ip in get_openstack_node_ips(role):
+ s_client = ssh_utils.get_ssh_client(ip, 'root', proxy=proxy)
+ clients.append(s_client)
+
+ return clients
+
+
+def check_ssh(ips, retries=100):
+ """Check SSH connectivity to VNFs"""
+ check = [False, False]
+ logger.info("Checking SSH connectivity to the SFs with ips %s" % str(ips))
+ while retries and not all(check):
+ for index, ip in enumerate(ips):
+ check[index] = run_cmd_remote(ip, "exit")
+
+ if all(check):
+ logger.info("SSH connectivity to the SFs established")
+ return True
+
+ time.sleep(3)
+ retries -= 1
+
+ return False
+
+
+def ofctl_time_counter(ovs_logger, ssh_conn):
+ try:
+ # We get the flows from table 11
+ table = 11
+ br = "br-int"
+ output = ovs_logger.ofctl_dump_flows(ssh_conn, br, table)
+ pattern = "NXM_NX_NSP"
+ rsps = []
+ lines = output.split(",")
+ for line in lines:
+ is_there = re.findall(pattern, line)
+ if is_there:
+ value = line.split(":")[1].split("-")[0]
+ rsps.append(value)
+ return rsps
+ except Exception, e:
+ logger.error('Error when countering %s' % e)
+ return None
+
+
+@ft_utils.timethis
+def capture_time_log(ovs_logger, compute_clients, timeout=200):
+ rsps = ofctl_time_counter(ovs_logger, compute_clients[0])
+ first_RSP = rsps[0] if len(rsps) > 0 else ''
+ while not ((len(rsps) > 1) and
+ (first_RSP != rsps[0]) and
+ (rsps[0] == rsps[1])):
+ rsps = ofctl_time_counter(ovs_logger, compute_clients[0])
+ timeout -= 1
+ if timeout == 0:
+ logger.error(
+ "Timeout but classification rules are not updated")
+ return
+ time.sleep(1)
+ logger.info("classification rules updated")
+
+
+def get_compute_nodes(nova_client, required_node_number=2):
+ """Get the compute nodes in the deployment"""
+ compute_nodes = os_utils.get_hypervisors(nova_client)
+
+ num_compute_nodes = len(compute_nodes)
+ if num_compute_nodes < 2:
+ logger.error("There are %s compute nodes in the deployment. "
+ "Minimum number of nodes to complete the test is 2."
+ % num_compute_nodes)
+ return None
+
+ logger.debug("Compute nodes: %s" % compute_nodes)
+ return compute_nodes
+
+
+def setup_compute_node(cidr):
+ logger.info("bringing up br-int iface")
+ run_cmd_on_compute("ifconfig br-int up")
+ if not run_cmd_on_compute("ip route|grep -o %s" % cidr):
+ logger.info("adding route %s" % cidr)
+ return run_cmd_on_compute("ip route add %s" % cidr)
+ else:
+ logger.info("route %s exists" % cidr)