summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/functest/odl-sfc/README.sfc-test-211
-rw-r--r--tests/functest/odl-sfc/config.py63
-rw-r--r--tests/functest/odl-sfc/config.yaml23
-rw-r--r--tests/functest/odl-sfc/ovs_utils.py1
-rw-r--r--tests/functest/odl-sfc/results.py53
-rwxr-xr-xtests/functest/odl-sfc/sfc-test2.py586
-rwxr-xr-xtests/functest/odl-sfc/sfc.py611
-rwxr-xr-xtests/functest/odl-sfc/sfc_change_classi.bash7
-rwxr-xr-xtests/functest/odl-sfc/sfc_tacker_test2.bash (renamed from tests/functest/odl-sfc/sfc_tacker.bash)8
-rw-r--r--tests/functest/odl-sfc/test2-vnfd1.yaml31
-rw-r--r--tests/functest/odl-sfc/test2-vnfd2.yaml31
-rw-r--r--tests/functest/odl-sfc/utils.py388
12 files changed, 1346 insertions, 467 deletions
diff --git a/tests/functest/odl-sfc/README.sfc-test-2 b/tests/functest/odl-sfc/README.sfc-test-2
new file mode 100644
index 00000000..5a9f2ebe
--- /dev/null
+++ b/tests/functest/odl-sfc/README.sfc-test-2
@@ -0,0 +1,11 @@
+### ODL-SFC TEST2 DESCRIPTION ###
+
+This is a simple description of the test case
+
+We create one client and one server using nova. Then, 2 SFs are created using
+tacker. The SFs are deployed in two different compute nodes. A chain is created
+where both SFs are included.
+
+vxlan_tool is started in both SFs and HTTP traffic is sent from the client to
+the server. If it works, the vxlan_tool is modified to block HTTP traffic.
+It is tried again and it should fail because packets are dropped
diff --git a/tests/functest/odl-sfc/config.py b/tests/functest/odl-sfc/config.py
new file mode 100644
index 00000000..556302e6
--- /dev/null
+++ b/tests/functest/odl-sfc/config.py
@@ -0,0 +1,63 @@
+import yaml
+import os
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.functest_constants as ft_constants
+
+logger = ft_logger.Logger("sfc_test_config").getLogger()
+
+
+class CommonConfig(object):
+ """
+ Common configuration parameters across testcases
+ """
+
+ def __init__(self):
+ self.line_length = 30
+ self.test_db = ft_utils.get_functest_config("results.test_db_url")
+ self.repo_path = ft_constants.SFC_REPO_DIR
+ self.sfc_test_dir = os.path.join(self.repo_path, "tests",
+ "functest", "odl-sfc")
+ self.functest_results_dir = os.path.join(
+ ft_constants.FUNCTEST_RESULTS_DIR, "odl-sfc")
+ self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
+ self.fuel_master_ip = ft_utils.get_parameter_from_yaml(
+ "defaults.fuel_master_ip", self.config_file)
+ self.fuel_master_uname = ft_utils.get_parameter_from_yaml(
+ "defaults.fuel_master_uname", self.config_file)
+ self.fuel_master_passwd = ft_utils.get_parameter_from_yaml(
+ "defaults.fuel_master_passwd", self.config_file)
+ self.flavor = ft_utils.get_parameter_from_yaml(
+ "defaults.flavor", self.config_file)
+ self.image_name = ft_utils.get_parameter_from_yaml(
+ "defaults.image_name", self.config_file)
+ self.image_file_name = ft_utils.get_parameter_from_yaml(
+ "defaults.image_file_name", self.config_file)
+ self.image_format = ft_utils.get_parameter_from_yaml(
+ "defaults.image_format", self.config_file)
+ self.url = ft_utils.get_parameter_from_yaml(
+ "defaults.url", self.config_file)
+ self.dir_functest_data = ft_utils.get_functest_config(
+ "general.directories.dir_functest_data")
+ self.image_path = os.path.join(
+ self.dir_functest_data, self.image_file_name)
+
+
+class TestcaseConfig(object):
+ """
+ Configuration for a testcase.
+ Parse config.yaml into a dict and create an object out of it.
+ """
+
+ def __init__(self, testcase):
+ common_config = CommonConfig()
+ test_config = None
+ with open(common_config.config_file) as f:
+ testcases_yaml = yaml.safe_load(f)
+ test_config = testcases_yaml['testcases'].get(testcase, None)
+ if test_config is None:
+ logger.error('Test {0} configuration is not present in {1}'
+ .format(testcase, common_config.config_file))
+ # Update class fields with configuration variables dynamically
+ self.__dict__.update(**test_config)
diff --git a/tests/functest/odl-sfc/config.yaml b/tests/functest/odl-sfc/config.yaml
new file mode 100644
index 00000000..c6624af6
--- /dev/null
+++ b/tests/functest/odl-sfc/config.yaml
@@ -0,0 +1,23 @@
+defaults:
+ flavor: custom #odl-sfc uses custom flavor
+ image_name: sf_nsh_colorado
+ image_file_name: sf_nsh_colorado.qcow2
+ fuel_master_ip: 10.20.0.2
+ fuel_master_uname: root
+ fuel_master_passwd: r00tme
+ image_format: qcow2
+ url: "http://artifacts.opnfv.org/sfc/demo"
+
+testcases:
+ sfc_two_chains_SSH_and_HTTP:
+ enabled: true
+ description: "ODL-SFC tests"
+ testname_db: "sfc_two_chains_SSH_and_HTTP"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd_red: "test-vnfd1.yaml"
+ test_vnfd_blue: "test-vnfd2.yaml"
diff --git a/tests/functest/odl-sfc/ovs_utils.py b/tests/functest/odl-sfc/ovs_utils.py
index af1f232c..48dfd620 100644
--- a/tests/functest/odl-sfc/ovs_utils.py
+++ b/tests/functest/odl-sfc/ovs_utils.py
@@ -21,6 +21,7 @@ class OVSLogger(object):
self.ovs_dir = basedir
self.ft_resdir = ft_resdir
self.__mkdir_p(self.ovs_dir)
+ self.__mkdir_p(self.ft_resdir)
def __mkdir_p(self, dirpath):
if not os.path.exists(dirpath):
diff --git a/tests/functest/odl-sfc/results.py b/tests/functest/odl-sfc/results.py
new file mode 100644
index 00000000..69e5523b
--- /dev/null
+++ b/tests/functest/odl-sfc/results.py
@@ -0,0 +1,53 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import functest.utils.functest_logger as ft_logger
+
+logger = ft_logger.Logger("sfc-results").getLogger()
+
+
+class Results(object):
+
+ def __init__(self, line_length):
+ self.line_length = line_length
+ self.test_result = "FAIL"
+ self.summary = ""
+ self.details = []
+ self.num_tests = 0
+ self.num_tests_failed = 0
+
+ def add_to_summary(self, num_cols, col1, col2=""):
+ if num_cols == 0:
+ self.summary += ("+%s+\n" % (col1 * (self.line_length - 2)))
+ elif num_cols == 1:
+ self.summary += ("| " + col1.ljust(self.line_length - 3) + "|\n")
+ elif num_cols == 2:
+ self.summary += ("| %s" % col1.ljust(7) + "| ")
+ self.summary += (col2.ljust(self.line_length - 12) + "|\n")
+ if col1 in ("FAIL", "PASS"):
+ self.details.append({col2: col1})
+ self.num_tests += 1
+ if col1 == "FAIL":
+ self.num_tests_failed += 1
+
+ def compile_summary(self):
+ success_message = "All the subtests have passed."
+ failure_message = "One or more subtests have failed."
+
+ self.add_to_summary(0, "=")
+ logger.info("\n%s" % self.summary)
+ status = "FAILED"
+ if self.test_result == "PASS":
+ status = "PASS"
+ logger.info(success_message)
+ else:
+ logger.info(failure_message)
+
+ return {"status": status, "details": self.details}
diff --git a/tests/functest/odl-sfc/sfc-test2.py b/tests/functest/odl-sfc/sfc-test2.py
new file mode 100755
index 00000000..b954d0d4
--- /dev/null
+++ b/tests/functest/odl-sfc/sfc-test2.py
@@ -0,0 +1,586 @@
+import argparse
+import os
+import subprocess
+import sys
+import time
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import re
+import json
+import SSHUtils as ssh_utils
+import ovs_utils
+
+
+parser = argparse.ArgumentParser()
+
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+
+args = parser.parse_args()
+
+""" logging configuration """
+logger = ft_logger.Logger("ODL_SFC").getLogger()
+
+FUNCTEST_RESULTS_DIR = '/home/opnfv/functest/results/odl-sfc'
+FUNCTEST_REPO = ft_utils.FUNCTEST_REPO
+REPO_PATH = os.path.join(os.environ['repos_dir'], 'sfc/')
+CLIENT = "client"
+SERVER = "server"
+FLAVOR = "custom"
+IMAGE_NAME = "sf_nsh_colorado"
+IMAGE_FILENAME = "sf_nsh_colorado.qcow2"
+IMAGE_FORMAT = "qcow2"
+IMAGE_DIR = "/home/opnfv/functest/data"
+IMAGE_PATH = os.path.join(IMAGE_DIR, IMAGE_FILENAME)
+IMAGE_URL = "http://artifacts.opnfv.org/sfc/demo/" + IMAGE_FILENAME
+
+# NEUTRON Private Network parameters
+NET_NAME = "example-net"
+SUBNET_NAME = "example-subnet"
+SUBNET_CIDR = "11.0.0.0/24"
+ROUTER_NAME = "example-router"
+SECGROUP_NAME = "example-sg"
+SECGROUP_DESCR = "Example Security group"
+SFC_TEST_DIR = os.path.join(REPO_PATH, "tests/functest/odl-sfc/")
+TACKER_SCRIPT = os.path.join(SFC_TEST_DIR, "sfc_tacker_test2.bash")
+TACKER_VNFD1 = os.path.join(SFC_TEST_DIR, "test2-vnfd1.yaml")
+TACKER_VNFD2 = os.path.join(SFC_TEST_DIR, "test2-vnfd2.yaml")
+ssh_options = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+json_results = {"tests": 4, "failures": 0}
+
+PROXY = {
+ 'ip': '10.20.0.2',
+ 'username': 'root',
+ 'password': 'r00tme'
+}
+
+# run given command locally and return commands output if success
+
+
+def run_cmd(cmd, wdir=None, ignore_stderr=False, ignore_no_output=True):
+ pipe = subprocess.Popen(cmd, shell=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, cwd=wdir)
+
+ (output, errors) = pipe.communicate()
+ if output:
+ output = output.strip()
+ if pipe.returncode < 0:
+ logger.error(errors)
+ return False
+ if errors:
+ logger.error(errors)
+ if ignore_stderr:
+ return True
+ else:
+ return False
+
+ if ignore_no_output:
+ if not output:
+ return True
+
+ return output
+
+# run given command on OpenStack controller
+
+
+def run_cmd_on_cntlr(cmd):
+ ip_cntlrs = get_openstack_node_ips("controller")
+ if not ip_cntlrs:
+ return None
+
+ ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_cntlrs[0], cmd)
+ return run_cmd_on_fm(ssh_cmd)
+
+# run given command on OpenStack Compute node
+
+
+def run_cmd_on_compute(cmd):
+ ip_computes = get_openstack_node_ips("compute")
+ if not ip_computes:
+ return None
+
+ ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_computes[0], cmd)
+ return run_cmd_on_fm(ssh_cmd)
+
+# run given command on Fuel Master
+
+
+def run_cmd_on_fm(cmd, username="root", passwd="r00tme"):
+ ip = os.environ.get("INSTALLER_IP")
+ ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
+ passwd, ssh_options, username, ip, cmd)
+ return run_cmd(ssh_cmd)
+
+# run given command on Remote Machine, Can be VM
+
+
+def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
+ ssh_opt_append = "%s -o ConnectTimeout=50 " % ssh_options
+ ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
+ passwd, ssh_opt_append, username, ip, cmd)
+ return run_cmd(ssh_cmd)
+
+# Get OpenStack Nodes IP Address
+
+
+def get_openstack_node_ips(role):
+ fuel_env = os.environ.get("FUEL_ENV")
+ if fuel_env is not None:
+ cmd = "fuel2 node list -f json -e %s" % fuel_env
+ else:
+ cmd = "fuel2 node list -f json"
+
+ nodes = run_cmd_on_fm(cmd)
+ ips = []
+ nodes = json.loads(nodes)
+ for node in nodes:
+ if role in node["roles"]:
+ ips.append(node["ip"])
+
+ return ips
+
+# Configures IPTABLES on OpenStack Controller
+
+
+def configure_iptables():
+ iptable_cmds = ["iptables -P INPUT ACCEPT",
+ "iptables -t nat -P INPUT ACCEPT",
+ "iptables -A INPUT -m state \
+ --state NEW,ESTABLISHED,RELATED -j ACCEPT"]
+
+ for cmd in iptable_cmds:
+ logger.info("Configuring %s on contoller" % cmd)
+ run_cmd_on_cntlr(cmd)
+
+ return
+
+
+def download_image():
+ if not os.path.isfile(IMAGE_PATH):
+ logger.info("Downloading image")
+ ft_utils.download_url(IMAGE_URL, IMAGE_DIR)
+
+ logger.info("Using old image")
+ return
+
+
+def setup_glance(glance_client):
+ image_id = os_utils.create_glance_image(glance_client,
+ IMAGE_NAME,
+ IMAGE_PATH,
+ disk=IMAGE_FORMAT,
+ container="bare",
+ public=True)
+
+ return image_id
+
+
+def setup_neutron(neutron_client):
+ n_dict = os_utils.create_network_full(neutron_client,
+ NET_NAME,
+ SUBNET_NAME,
+ ROUTER_NAME,
+ SUBNET_CIDR)
+ if not n_dict:
+ logger.error("failed to create neutron network")
+ sys.exit(-1)
+
+ network_id = n_dict["net_id"]
+ return network_id
+
+
+def setup_ingress_egress_secgroup(neutron_client, protocol,
+ min_port=None, max_port=None):
+ secgroups = os_utils.get_security_groups(neutron_client)
+ for sg in secgroups:
+ os_utils.create_secgroup_rule(neutron_client, sg['id'],
+ 'ingress', protocol,
+ port_range_min=min_port,
+ port_range_max=max_port)
+ os_utils.create_secgroup_rule(neutron_client, sg['id'],
+ 'egress', protocol,
+ port_range_min=min_port,
+ port_range_max=max_port)
+ return
+
+
+def setup_security_groups(neutron_client):
+ sg_id = os_utils.create_security_group_full(neutron_client,
+ SECGROUP_NAME, SECGROUP_DESCR)
+ setup_ingress_egress_secgroup(neutron_client, "icmp")
+ setup_ingress_egress_secgroup(neutron_client, "udp", 67, 68)
+ setup_ingress_egress_secgroup(neutron_client, "tcp", 22, 22)
+ setup_ingress_egress_secgroup(neutron_client, "tcp", 80, 80)
+ return sg_id
+
+
+# JIRA: SFC-52 new function
+def setup_availability_zones(nova_client):
+ computes = os_utils.get_hypervisors(nova_client)
+ az = ["nova::" + computes[0], "nova::" + computes[1]]
+ logger.debug("These are the availability zones %s" % az)
+ return az
+
+
+# JIRA: SFC-52 new function
+def modify_vnfd(tacker_vnfd, az):
+ try:
+ with open(tacker_vnfd, 'r') as stream:
+ lines = stream.readlines()
+ with open(tacker_vnfd, 'w') as stream:
+ for line in lines:
+ stream.write(re.sub('nova$', az, line))
+
+ except Exception, e:
+ logger.error("Problem when changing vnfd %s" % e)
+
+
+# JIRA: SFC-52 new function
+def prepare_tacker_vnfd(nova_client):
+ azs = setup_availability_zones(nova_client)
+ modify_vnfd(TACKER_VNFD1, azs[0])
+ modify_vnfd(TACKER_VNFD2, azs[1])
+
+
+def boot_instance(nova_client, name, flavor, image_id, network_id, sg_id,):
+ logger.info("Creating instance '%s'..." % name)
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
+ "network=%s \n" % (name, flavor, image_id, network_id))
+
+ instance = os_utils.create_instance_and_wait_for_active(flavor,
+ image_id,
+ network_id,
+ name)
+
+ if instance is None:
+ logger.error("Error while booting instance.")
+ sys.exit(-1)
+
+ instance_ip = instance.networks.get(NET_NAME)[0]
+ logger.debug("Instance '%s' got private ip '%s'." %
+ (name, instance_ip))
+
+ logger.info("Adding '%s' to security group %s" % (name, SECGROUP_NAME))
+ os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+
+ return instance_ip
+
+
+def ping(remote, pkt_cnt=1, iface=None, retries=100, timeout=None):
+ ping_cmd = 'ping'
+
+ if timeout:
+ ping_cmd = ping_cmd + ' -w %s' % timeout
+
+ grep_cmd = "grep -e 'packet loss' -e rtt"
+
+ if iface is not None:
+ ping_cmd = ping_cmd + ' -I %s' % iface
+
+ ping_cmd = ping_cmd + ' -i 0 -c %d %s' % (pkt_cnt, remote)
+ cmd = ping_cmd + '|' + grep_cmd
+
+ while retries > 0:
+ output = run_cmd(cmd)
+ if not output:
+ return False
+
+ match = re.search('(\d*)% packet loss', output)
+ if not match:
+ return False
+
+ packet_loss = int(match.group(1))
+ if packet_loss == 0:
+ return True
+
+ retries = retries - 1
+
+ return False
+
+
+def get_floating_ips(nova_client, neutron_client):
+ ips = []
+ instances = nova_client.servers.list(search_opts={'all_tenants': 1})
+ for instance in instances:
+ floatip_dic = os_utils.create_floating_ip(neutron_client)
+ floatip = floatip_dic['fip_addr']
+ instance.add_floating_ip(floatip)
+ logger.info("Instance name and ip %s:%s " % (instance.name, floatip))
+ logger.info("Waiting for instance %s:%s to come up" %
+ (instance.name, floatip))
+ if not ping(floatip):
+ logger.info("Instance %s:%s didn't come up" %
+ (instance.name, floatip))
+ sys.exit(1)
+
+ if instance.name == "server":
+ logger.info("Server:%s is reachable" % floatip)
+ server_ip = floatip
+ elif instance.name == "client":
+ logger.info("Client:%s is reachable" % floatip)
+ client_ip = floatip
+ else:
+ logger.info("SF:%s is reachable" % floatip)
+ ips.append(floatip)
+
+ return server_ip, client_ip, ips[1], ips[0]
+
+# Start http server on a give machine, Can be VM
+
+
+def start_http_server(ip):
+ cmd = "\'python -m SimpleHTTPServer 80"
+ cmd = cmd + " > /dev/null 2>&1 &\'"
+ return run_cmd_remote(ip, cmd)
+
+# Set firewall using vxlan_tool.py on a give machine, Can be VM
+
+
+def vxlan_firewall(sf, iface="eth0", port="22", block=True):
+ cmd = "python vxlan_tool.py"
+ cmd = cmd + " -i " + iface + " -d forward -v off"
+ if block:
+ cmd = "python vxlan_tool.py -i eth0 -d forward -v off -b " + port
+
+ cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
+ run_cmd_remote(sf, cmd)
+
+# Stop the vxlan_tool process if it was working
+
+
+# JIRA: SFC-52 added function
+def vxlan_tool_stop(sf):
+ cmd = "pkill -f vxlan_tool.py"
+ run_cmd_remote(sf, cmd)
+
+# Run netcat on a give machine, Can be VM
+
+
+def netcat(s_ip, c_ip, port="80", timeout=5):
+ cmd = "nc -zv "
+ cmd = cmd + " -w %s %s %s" % (timeout, s_ip, port)
+ cmd = cmd + " 2>&1"
+ output = run_cmd_remote(c_ip, cmd)
+ logger.info("%s" % output)
+ return output
+
+
+def is_ssh_blocked(srv_prv_ip, client_ip):
+ res = netcat(srv_prv_ip, client_ip, port="22")
+ match = re.search("nc:.*timed out:.*", res, re.M)
+ if match:
+ return True
+
+ return False
+
+
+def is_http_blocked(srv_prv_ip, client_ip):
+ res = netcat(srv_prv_ip, client_ip, port="80")
+ match = re.search(".* 80 port.* succeeded!", res, re.M)
+ if match:
+ return False
+
+ return True
+
+
+def capture_err_logs(controller_clients, compute_clients, error):
+ ovs_logger = ovs_utils.OVSLogger(
+ os.path.join(os.getcwd(), 'ovs-logs'),
+ FUNCTEST_RESULTS_DIR)
+
+ timestamp = time.strftime("%Y%m%d-%H%M%S")
+ ovs_logger.dump_ovs_logs(controller_clients,
+ compute_clients,
+ related_error=error,
+ timestamp=timestamp)
+ return
+
+
+def update_json_results(name, result):
+ json_results.update({name: result})
+ if result is not "Passed":
+ json_results["failures"] += 1
+
+ return
+
+
+def get_ssh_clients(role):
+ clients = []
+ for ip in get_openstack_node_ips(role):
+ s_client = ssh_utils.get_ssh_client(ip,
+ 'root',
+ proxy=PROXY)
+ clients.append(s_client)
+
+ return clients
+
+# Check SSH connectivity to VNFs
+
+
+def check_ssh(ips, retries=100):
+ check = [False, False]
+ logger.info("Checking SSH connectivity to the SFs with ips %s" % str(ips))
+ while retries and not all(check):
+ for index, ip in enumerate(ips):
+ check[index] = run_cmd_remote(ip, "exit")
+
+ if all(check):
+ logger.info("SSH connectivity to the SFs established")
+ return True
+
+ time.sleep(3)
+ retries -= 1
+
+ return False
+
+# Measure the time it takes to update the classification rules
+
+
+def capture_time_log(compute_clients):
+ ovs_logger = ovs_utils.OVSLogger(
+ os.path.join(os.getcwd(), 'ovs-logs'),
+ "test")
+ i = 0
+ first_RSP = ""
+ start_time = time.time()
+ while True:
+ rsps = ovs_logger.ofctl_time_counter(compute_clients[0])
+ if not i:
+ if len(rsps) > 0:
+ first_RSP = rsps[0]
+ i = i + 1
+ else:
+ first_RSP = 0
+ i = i + 1
+ if (len(rsps) > 1):
+ if(first_RSP != rsps[0]):
+ if (rsps[0] == rsps[1]):
+ stop_time = time.time()
+ logger.info("classification rules updated")
+ difference = stop_time - start_time
+ logger.info("It took %s seconds" % difference)
+ break
+ time.sleep(1)
+ return
+
+
+def main():
+ installer_type = os.environ.get("INSTALLER_TYPE")
+ if installer_type != "fuel":
+ logger.error(
+ '\033[91mCurrently supported only Fuel Installer type\033[0m')
+ sys.exit(1)
+
+ installer_ip = os.environ.get("INSTALLER_IP")
+ if not installer_ip:
+ logger.error(
+ '\033[91minstaller ip is not set\033[0m')
+ logger.error(
+ '\033[91mexport INSTALLER_IP=<ip>\033[0m')
+ sys.exit(1)
+
+ start_time = time.time()
+ status = "PASS"
+ configure_iptables()
+ download_image()
+ _, custom_flv_id = os_utils.get_or_create_flavor(
+ FLAVOR, 1500, 10, 1, public=True)
+ if not custom_flv_id:
+ logger.error("Failed to create custom flavor")
+ sys.exit(1)
+
+ glance_client = os_utils.get_glance_client()
+ neutron_client = os_utils.get_neutron_client()
+ nova_client = os_utils.get_nova_client()
+
+ controller_clients = get_ssh_clients("controller")
+ compute_clients = get_ssh_clients("compute")
+
+ image_id = setup_glance(glance_client)
+ network_id = setup_neutron(neutron_client)
+ sg_id = setup_security_groups(neutron_client)
+ prepare_tacker_vnfd(nova_client)
+
+ boot_instance(
+ nova_client, CLIENT, FLAVOR, image_id, network_id, sg_id,)
+ srv_prv_ip = boot_instance(
+ nova_client, SERVER, FLAVOR, image_id, network_id, sg_id,)
+
+ subprocess.call(TACKER_SCRIPT, shell=True)
+
+ server_ip, client_ip, sf1, sf2 = get_floating_ips(
+ nova_client, neutron_client)
+
+ if not check_ssh([sf1, sf2]):
+ logger.error("Cannot establish SSH connection to the SFs")
+ sys.exit(1)
+
+ logger.info("Starting HTTP server on %s" % server_ip)
+ if not start_http_server(server_ip):
+ logger.error(
+ '\033[91mFailed to start HTTP server on %s\033[0m' % server_ip)
+ sys.exit(1)
+
+ logger.info("Starting vxlan_tool on %s" % sf2)
+ vxlan_firewall(sf2, block=False)
+ logger.info("Starting vxlan_tool on %s" % sf1)
+ vxlan_firewall(sf1, block=False)
+
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ time.sleep(100)
+
+ logger.info("Test HTTP")
+ if not is_http_blocked(srv_prv_ip, client_ip):
+ logger.info('\033[92mTEST 1 [PASSED] ==> HTTP WORKS\033[0m')
+ update_json_results("Test 1: HTTP works", "Passed")
+ else:
+ error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ capture_err_logs(controller_clients, compute_clients, error)
+ update_json_results("Test 1: HTTP works", "Failed")
+
+ logger.info("Changing the vxlan_tool to block HTTP traffic")
+
+ # Make SF1 block now http traffic
+ vxlan_tool_stop(sf1)
+ vxlan_firewall(sf1, port="80")
+
+ logger.info("Test HTTP")
+ if is_http_blocked(srv_prv_ip, client_ip):
+ logger.info('\033[92mTEST 2 [PASSED] ==> HTTP Blocked\033[0m')
+ update_json_results("Test 2: HTTP Blocked", "Passed")
+ else:
+ error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ capture_err_logs(controller_clients, compute_clients, error)
+ update_json_results("Test 2: HTTP Blocked", "Failed")
+
+ if json_results["failures"]:
+ status = "FAIL"
+ logger.error('\033[91mSFC TESTS: %s :( FOUND %s FAIL \033[0m' % (
+ status, json_results["failures"]))
+
+ if args.report:
+ stop_time = time.time()
+ logger.debug("Promise Results json: " + str(json_results))
+ ft_utils.push_results_to_db("sfc",
+ "sfc_one_chain_two_service_functions"
+ "_different_computes",
+ start_time,
+ stop_time,
+ status,
+ json_results)
+
+ if status == "PASS":
+ logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status)
+ sys.exit(0)
+
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/functest/odl-sfc/sfc.py b/tests/functest/odl-sfc/sfc.py
index 2aaa4eb4..de233869 100755
--- a/tests/functest/odl-sfc/sfc.py
+++ b/tests/functest/odl-sfc/sfc.py
@@ -1,16 +1,17 @@
import argparse
import os
-import subprocess
import sys
import time
import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
-import re
-import json
-import SSHUtils as ssh_utils
+import functest.utils.openstack_tacker as os_tacker
+import threading
import ovs_utils
-import thread
+import utils as test_utils
+import config as sfc_config
+from results import Results
+
parser = argparse.ArgumentParser()
@@ -23,416 +24,24 @@ args = parser.parse_args()
""" logging configuration """
logger = ft_logger.Logger("ODL_SFC").getLogger()
-FUNCTEST_RESULTS_DIR = '/home/opnfv/functest/results/odl-sfc'
-FUNCTEST_REPO = ft_utils.FUNCTEST_REPO
-REPO_PATH = os.path.join(os.environ['repos_dir'], 'sfc/')
CLIENT = "client"
SERVER = "server"
-FLAVOR = "custom"
-IMAGE_NAME = "sf_nsh_colorado"
-IMAGE_FILENAME = "sf_nsh_colorado.qcow2"
-IMAGE_FORMAT = "qcow2"
-IMAGE_DIR = "/home/opnfv/functest/data"
-IMAGE_PATH = os.path.join(IMAGE_DIR, IMAGE_FILENAME)
-IMAGE_URL = "http://artifacts.opnfv.org/sfc/demo/" + IMAGE_FILENAME
-
-# NEUTRON Private Network parameters
-NET_NAME = "example-net"
-SUBNET_NAME = "example-subnet"
-SUBNET_CIDR = "11.0.0.0/24"
-ROUTER_NAME = "example-router"
-SECGROUP_NAME = "example-sg"
-SECGROUP_DESCR = "Example Security group"
-SFC_TEST_DIR = os.path.join(REPO_PATH, "tests/functest/odl-sfc/")
-TACKER_SCRIPT = os.path.join(SFC_TEST_DIR, "sfc_tacker.bash")
-TACKER_CHANGECLASSI = os.path.join(SFC_TEST_DIR, "sfc_change_classi.bash")
-ssh_options = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-json_results = {"tests": 4, "failures": 0}
+COMMON_CONFIG = sfc_config.CommonConfig()
+TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_two_chains_SSH_and_HTTP')
PROXY = {
- 'ip': '10.20.0.2',
- 'username': 'root',
- 'password': 'r00tme'
+ 'ip': COMMON_CONFIG.fuel_master_ip,
+ 'username': COMMON_CONFIG.fuel_master_uname,
+ 'password': COMMON_CONFIG.fuel_master_passwd
}
-# run given command locally and return commands output if success
-
-
-def run_cmd(cmd, wdir=None, ignore_stderr=False, ignore_no_output=True):
- pipe = subprocess.Popen(cmd, shell=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, cwd=wdir)
-
- (output, errors) = pipe.communicate()
- if output:
- output = output.strip()
- if pipe.returncode < 0:
- logger.error(errors)
- return False
- if errors:
- logger.error(errors)
- if ignore_stderr:
- return True
- else:
- return False
-
- if ignore_no_output:
- if not output:
- return True
-
- return output
-
-# run given command on OpenStack controller
-
-
-def run_cmd_on_cntlr(cmd):
- ip_cntlrs = get_openstack_node_ips("controller")
- if not ip_cntlrs:
- return None
-
- ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_cntlrs[0], cmd)
- return run_cmd_on_fm(ssh_cmd)
-
-# run given command on OpenStack Compute node
-
-
-def run_cmd_on_compute(cmd):
- ip_computes = get_openstack_node_ips("compute")
- if not ip_computes:
- return None
-
- ssh_cmd = "ssh %s %s %s" % (ssh_options, ip_computes[0], cmd)
- return run_cmd_on_fm(ssh_cmd)
-
-# run given command on Fuel Master
-
-
-def run_cmd_on_fm(cmd, username="root", passwd="r00tme"):
- ip = os.environ.get("INSTALLER_IP")
- ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
- passwd, ssh_options, username, ip, cmd)
- return run_cmd(ssh_cmd)
-
-# run given command on Remote Machine, Can be VM
-
-
-def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
- ssh_opt_append = "%s -o ConnectTimeout=50 " % ssh_options
- ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
- passwd, ssh_opt_append, username, ip, cmd)
- return run_cmd(ssh_cmd)
-
-# Get OpenStack Nodes IP Address
-
-
-def get_openstack_node_ips(role):
- fuel_env = os.environ.get("FUEL_ENV")
- if fuel_env is not None:
- cmd = "fuel2 node list -f json -e %s" % fuel_env
- else:
- cmd = "fuel2 node list -f json"
-
- nodes = run_cmd_on_fm(cmd)
- ips = []
- nodes = json.loads(nodes)
- for node in nodes:
- if role in node["roles"]:
- ips.append(node["ip"])
-
- return ips
-
-# Configures IPTABLES on OpenStack Controller
-
-
-def configure_iptables():
- iptable_cmds = ["iptables -P INPUT ACCEPT",
- "iptables -t nat -P INPUT ACCEPT",
- "iptables -A INPUT -m state \
- --state NEW,ESTABLISHED,RELATED -j ACCEPT"]
-
- for cmd in iptable_cmds:
- logger.info("Configuring %s on contoller" % cmd)
- run_cmd_on_cntlr(cmd)
-
- return
-
-
-def download_image():
- if not os.path.isfile(IMAGE_PATH):
- logger.info("Downloading image")
- ft_utils.download_url(IMAGE_URL, IMAGE_DIR)
-
- logger.info("Using old image")
- return
-
-
-def setup_glance(glance_client):
- image_id = os_utils.create_glance_image(glance_client,
- IMAGE_NAME,
- IMAGE_PATH,
- disk=IMAGE_FORMAT,
- container="bare",
- public=True)
-
- return image_id
-
-
-def setup_neutron(neutron_client):
- n_dict = os_utils.create_network_full(neutron_client,
- NET_NAME,
- SUBNET_NAME,
- ROUTER_NAME,
- SUBNET_CIDR)
- if not n_dict:
- logger.error("failed to create neutron network")
- sys.exit(-1)
-
- network_id = n_dict["net_id"]
- return network_id
-
-
-def setup_ingress_egress_secgroup(neutron_client, protocol,
- min_port=None, max_port=None):
- secgroups = os_utils.get_security_groups(neutron_client)
- for sg in secgroups:
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'ingress', protocol,
- port_range_min=min_port,
- port_range_max=max_port)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'egress', protocol,
- port_range_min=min_port,
- port_range_max=max_port)
- return
-
-
-def setup_security_groups(neutron_client):
- sg_id = os_utils.create_security_group_full(neutron_client,
- SECGROUP_NAME, SECGROUP_DESCR)
- setup_ingress_egress_secgroup(neutron_client, "icmp")
- setup_ingress_egress_secgroup(neutron_client, "udp", 67, 68)
- setup_ingress_egress_secgroup(neutron_client, "tcp", 22, 22)
- setup_ingress_egress_secgroup(neutron_client, "tcp", 80, 80)
- return sg_id
-
-
-def boot_instance(nova_client, name, flavor, image_id, network_id, sg_id):
- logger.info("Creating instance '%s'..." % name)
- logger.debug(
- "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
- "network=%s \n" % (name, flavor, image_id, network_id))
-
- instance = os_utils.create_instance_and_wait_for_active(flavor,
- image_id,
- network_id,
- name)
-
- if instance is None:
- logger.error("Error while booting instance.")
- sys.exit(-1)
-
- instance_ip = instance.networks.get(NET_NAME)[0]
- logger.debug("Instance '%s' got private ip '%s'." %
- (name, instance_ip))
-
- logger.info("Adding '%s' to security group %s" % (name, SECGROUP_NAME))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
-
- return instance_ip
-
-
-def ping(remote, pkt_cnt=1, iface=None, retries=100, timeout=None):
- ping_cmd = 'ping'
-
- if timeout:
- ping_cmd = ping_cmd + ' -w %s' % timeout
-
- grep_cmd = "grep -e 'packet loss' -e rtt"
-
- if iface is not None:
- ping_cmd = ping_cmd + ' -I %s' % iface
-
- ping_cmd = ping_cmd + ' -i 0 -c %d %s' % (pkt_cnt, remote)
- cmd = ping_cmd + '|' + grep_cmd
-
- while retries > 0:
- output = run_cmd(cmd)
- if not output:
- return False
-
- match = re.search('(\d*)% packet loss', output)
- if not match:
- return False
-
- packet_loss = int(match.group(1))
- if packet_loss == 0:
- return True
-
- retries = retries - 1
-
- return False
-
-
-def get_floating_ips(nova_client, neutron_client):
- ips = []
- instances = nova_client.servers.list(search_opts={'all_tenants': 1})
- for instance in instances:
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip = floatip_dic['fip_addr']
- instance.add_floating_ip(floatip)
- logger.info("Instance name and ip %s:%s " % (instance.name, floatip))
- logger.info("Waiting for instance %s:%s to come up" %
- (instance.name, floatip))
- if not ping(floatip):
- logger.info("Instance %s:%s didn't come up" %
- (instance.name, floatip))
- sys.exit(1)
-
- if instance.name == "server":
- logger.info("Server:%s is reachable" % floatip)
- server_ip = floatip
- elif instance.name == "client":
- logger.info("Client:%s is reachable" % floatip)
- client_ip = floatip
- else:
- logger.info("SF:%s is reachable" % floatip)
- ips.append(floatip)
-
- return server_ip, client_ip, ips[1], ips[0]
-
-# Start http server on a give machine, Can be VM
-
-
-def start_http_server(ip):
- cmd = "\'python -m SimpleHTTPServer 80"
- cmd = cmd + " > /dev/null 2>&1 &\'"
- return run_cmd_remote(ip, cmd)
-
-# Set firewall using vxlan_tool.py on a give machine, Can be VM
-
-
-def vxlan_firewall(sf, iface="eth0", port="22", block=True):
- cmd = "python vxlan_tool.py"
- cmd = cmd + " -i " + iface + " -d forward -v off"
- if block:
- cmd = "python vxlan_tool.py -i eth0 -d forward -v off -b " + port
-
- cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
- run_cmd_remote(sf, cmd)
-
-# Run netcat on a give machine, Can be VM
-
-
-def netcat(s_ip, c_ip, port="80", timeout=5):
- cmd = "nc -zv "
- cmd = cmd + " -w %s %s %s" % (timeout, s_ip, port)
- cmd = cmd + " 2>&1"
- output = run_cmd_remote(c_ip, cmd)
- logger.info("%s" % output)
- return output
-
-
-def is_ssh_blocked(srv_prv_ip, client_ip):
- res = netcat(srv_prv_ip, client_ip, port="22")
- match = re.search("nc:.*timed out:.*", res, re.M)
- if match:
- return True
-
- return False
-
-
-def is_http_blocked(srv_prv_ip, client_ip):
- res = netcat(srv_prv_ip, client_ip, port="80")
- match = re.search(".* 80 port.* succeeded!", res, re.M)
- if match:
- return False
-
- return True
-
-
-def capture_err_logs(controller_clients, compute_clients, error):
- ovs_logger = ovs_utils.OVSLogger(
- os.path.join(os.getcwd(), 'ovs-logs'),
- FUNCTEST_RESULTS_DIR)
-
- timestamp = time.strftime("%Y%m%d-%H%M%S")
- ovs_logger.dump_ovs_logs(controller_clients,
- compute_clients,
- related_error=error,
- timestamp=timestamp)
- return
-
-
-def update_json_results(name, result):
- json_results.update({name: result})
- if result is not "Passed":
- json_results["failures"] += 1
-
- return
-
-
-def get_ssh_clients(role):
- clients = []
- for ip in get_openstack_node_ips(role):
- s_client = ssh_utils.get_ssh_client(ip,
- 'root',
- proxy=PROXY)
- clients.append(s_client)
-
- return clients
-
-# Check SSH connectivity to VNFs
-
-
-def check_ssh(ips, retries=100):
- check = [False, False]
- logger.info("Checking SSH connectivity to the SFs with ips %s" % str(ips))
- while retries and not all(check):
- for index, ip in enumerate(ips):
- check[index] = run_cmd_remote(ip, "exit")
-
- if all(check):
- logger.info("SSH connectivity to the SFs established")
- return True
-
- time.sleep(3)
- retries -= 1
-
- return False
-
-# Measure the time it takes to update the classification rules
-
-
-def capture_time_log(compute_clients):
- ovs_logger = ovs_utils.OVSLogger(
- os.path.join(os.getcwd(), 'ovs-logs'),
- "test")
- i = 0
- first_RSP = ""
- start_time = time.time()
- while True:
- rsps = ovs_logger.ofctl_time_counter(compute_clients[0])
- if not i:
- if len(rsps) > 0:
- first_RSP = rsps[0]
- i = i + 1
- else:
- first_RSP = 0
- i = i + 1
- if (len(rsps) > 1):
- if(first_RSP != rsps[0]):
- if (rsps[0] == rsps[1]):
- stop_time = time.time()
- logger.info("classification rules updated")
- difference = stop_time - start_time
- logger.info("It took %s seconds" % difference)
- break
- time.sleep(1)
- return
-
def main():
+ results = Results(COMMON_CONFIG.line_length)
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
installer_type = os.environ.get("INSTALLER_TYPE")
if installer_type != "fuel":
logger.error(
@@ -449,10 +58,11 @@ def main():
start_time = time.time()
status = "PASS"
- configure_iptables()
- download_image()
+ test_utils.configure_iptables()
+ test_utils.download_image(COMMON_CONFIG.url,
+ COMMON_CONFIG.image_path)
_, custom_flv_id = os_utils.get_or_create_flavor(
- FLAVOR, 1500, 10, 1, public=True)
+ COMMON_CONFIG.flavor, 1500, 10, 1, public=True)
if not custom_flv_id:
logger.error("Failed to create custom flavor")
sys.exit(1)
@@ -460,114 +70,204 @@ def main():
glance_client = os_utils.get_glance_client()
neutron_client = os_utils.get_neutron_client()
nova_client = os_utils.get_nova_client()
+ tacker_client = os_tacker.get_tacker_client()
+
+ controller_clients = test_utils.get_ssh_clients("controller", PROXY)
+ compute_clients = test_utils.get_ssh_clients("compute", PROXY)
+
+ ovs_logger = ovs_utils.OVSLogger(
+ os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
+ COMMON_CONFIG.functest_results_dir)
+
+ image_id = os_utils.create_glance_image(glance_client,
+ COMMON_CONFIG.image_name,
+ COMMON_CONFIG.image_path,
+ COMMON_CONFIG.image_format,
+ public=True)
- controller_clients = get_ssh_clients("controller")
- compute_clients = get_ssh_clients("compute")
+ network_id = test_utils.setup_neutron(neutron_client,
+ TESTCASE_CONFIG.net_name,
+ TESTCASE_CONFIG.subnet_name,
+ TESTCASE_CONFIG.router_name,
+ TESTCASE_CONFIG.subnet_cidr)
+
+ sg_id = test_utils.create_security_groups(neutron_client,
+ TESTCASE_CONFIG.secgroup_name,
+ TESTCASE_CONFIG.secgroup_descr)
+
+ test_utils.create_instance(
+ nova_client, CLIENT, COMMON_CONFIG.flavor, image_id,
+ network_id, sg_id)
+ srv_instance = test_utils.create_instance(
+ nova_client, SERVER, COMMON_CONFIG.flavor, image_id,
+ network_id, sg_id)
+
+ srv_prv_ip = srv_instance.networks.get(TESTCASE_CONFIG.net_name)[0]
+
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ TESTCASE_CONFIG.test_vnfd_red)
+ os_tacker.create_vnfd(
+ tacker_client,
+ tosca_file=tosca_file)
+
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ TESTCASE_CONFIG.test_vnfd_blue)
+ os_tacker.create_vnfd(
+ tacker_client,
+ tosca_file=tosca_file)
+
+ os_tacker.create_vnf(
+ tacker_client, 'testVNF1', vnfd_name='test-vnfd1')
+ os_tacker.create_vnf(
+ tacker_client, 'testVNF2', vnfd_name='test-vnfd2')
- image_id = setup_glance(glance_client)
- network_id = setup_neutron(neutron_client)
- sg_id = setup_security_groups(neutron_client)
+ try:
+ os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF1')
+ os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF2')
+ except:
+ logger.error('ERROR while booting vnfs')
+ sys.exit(1)
- boot_instance(
- nova_client, CLIENT, FLAVOR, image_id, network_id, sg_id)
- srv_prv_ip = boot_instance(
- nova_client, SERVER, FLAVOR, image_id, network_id, sg_id)
+ os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1'])
+ os_tacker.create_sfc(tacker_client, 'blue', chain_vnf_names=['testVNF2'])
- subprocess.call(TACKER_SCRIPT, shell=True)
+ os_tacker.create_sfc_classifier(
+ tacker_client, 'red_http', sfc_name='red',
+ match={
+ 'source_port': 0,
+ 'dest_port': 80,
+ 'protocol': 6
+ })
+
+ os_tacker.create_sfc_classifier(
+ tacker_client, 'red_ssh', sfc_name='red',
+ match={
+ 'source_port': 0,
+ 'dest_port': 22,
+ 'protocol': 6
+ })
+
+ logger.info(test_utils.run_cmd('tacker sfc-list'))
+ logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
# Start measuring the time it takes to implement the classification rules
+ t1 = threading.Thread(target=test_utils.capture_time_log,
+ args=(ovs_logger, compute_clients,))
try:
- thread.start_new_thread(capture_time_log, (compute_clients,))
+ t1.start()
except Exception, e:
logger.error("Unable to start the thread that counts time %s" % e)
- server_ip, client_ip, sf1, sf2 = get_floating_ips(
+ server_ip, client_ip, sf1, sf2 = test_utils.get_floating_ips(
nova_client, neutron_client)
- if not check_ssh([sf1, sf2]):
+ if not test_utils.check_ssh([sf1, sf2]):
logger.error("Cannot establish SSH connection to the SFs")
sys.exit(1)
logger.info("Starting HTTP server on %s" % server_ip)
- if not start_http_server(server_ip):
+ if not test_utils.start_http_server(server_ip):
logger.error(
'\033[91mFailed to start HTTP server on %s\033[0m' % server_ip)
sys.exit(1)
logger.info("Starting HTTP firewall on %s" % sf2)
- vxlan_firewall(sf2, port="80")
+ test_utils.vxlan_firewall(sf2, port="80")
logger.info("Starting SSH firewall on %s" % sf1)
- vxlan_firewall(sf1, port="22")
+ test_utils.vxlan_firewall(sf1, port="22")
logger.info("Wait for ODL to update the classification rules in OVS")
- time.sleep(120)
+ t1.join()
logger.info("Test SSH")
- if is_ssh_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 1 [PASSED] ==> SSH BLOCKED\033[0m')
- update_json_results("Test 1: SSH Blocked", "Passed")
+ if test_utils.is_ssh_blocked(srv_prv_ip, client_ip):
+ results.add_to_summary(2, "PASS", "SSH Blocked")
else:
error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 1: SSH Blocked", "Failed")
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
+ results.add_to_summary(2, "FAIL", "SSH Blocked")
logger.info("Test HTTP")
- if not is_http_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 2 [PASSED] ==> HTTP WORKS\033[0m')
- update_json_results("Test 2: HTTP works", "Passed")
+ if not test_utils.is_http_blocked(srv_prv_ip, client_ip):
+ results.add_to_summary(2, "PASS", "HTTP works")
else:
error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 2: HTTP works", "Failed")
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
+ results.add_to_summary(2, "FAIL", "HTTP works")
logger.info("Changing the classification")
- subprocess.call(TACKER_CHANGECLASSI, shell=True)
+ os_tacker.delete_sfc_classifier(tacker_client, sfc_clf_name='red_http')
+ os_tacker.delete_sfc_classifier(tacker_client, sfc_clf_name='red_ssh')
+
+ os_tacker.create_sfc_classifier(
+ tacker_client, 'blue_http', sfc_name='blue',
+ match={
+ 'source_port': 0,
+ 'dest_port': 80,
+ 'protocol': 6
+ })
+
+ os_tacker.create_sfc_classifier(
+ tacker_client, 'blue_ssh', sfc_name='blue',
+ match={
+ 'source_port': 0,
+ 'dest_port': 22,
+ 'protocol': 6
+ })
+
+ logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
# Start measuring the time it takes to implement the classification rules
+ t2 = threading.Thread(target=test_utils.capture_time_log,
+ args=(ovs_logger, compute_clients,))
try:
- thread.start_new_thread(capture_time_log, (compute_clients,))
+ t2.start()
except Exception, e:
logger.error("Unable to start the thread that counts time %s" % e)
logger.info("Wait for ODL to update the classification rules in OVS")
- time.sleep(100)
+ t2.join()
logger.info("Test HTTP")
- if is_http_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 3 [PASSED] ==> HTTP Blocked\033[0m')
- update_json_results("Test 3: HTTP Blocked", "Passed")
+ if test_utils.is_http_blocked(srv_prv_ip, client_ip):
+ results.add_to_summary(2, "PASS", "HTTP Blocked")
else:
error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 3: HTTP Blocked", "Failed")
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
+ results.add_to_summary(2, "FAIL", "HTTP Blocked")
logger.info("Test SSH")
- if not is_ssh_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 4 [PASSED] ==> SSH Works\033[0m')
- update_json_results("Test 4: SSH Works", "Passed")
+ if not test_utils.is_ssh_blocked(srv_prv_ip, client_ip):
+ results.add_to_summary(2, "PASS", "SSH works")
else:
error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
logger.error(error)
- capture_err_logs(controller_clients, compute_clients, error)
- update_json_results("Test 4: SSH Works", "Failed")
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
+ results.add_to_summary(2, "FAIL", "SSH works")
- if json_results["failures"]:
+ if results.num_tests_failed > 0:
status = "FAIL"
logger.error('\033[91mSFC TESTS: %s :( FOUND %s FAIL \033[0m' % (
- status, json_results["failures"]))
+ status, results.num_tests_failed))
if args.report:
+ details = results["details"]
stop_time = time.time()
- logger.debug("Promise Results json: " + str(json_results))
+ logger.debug("Promise Results json: " + str(details))
ft_utils.push_results_to_db("sfc",
- "functest-odl-sfc",
+ "sfc_two_chains_SSH_and_HTTP",
start_time,
stop_time,
status,
- json_results)
+ details)
+ ovs_logger.create_artifact_archive()
if status == "PASS":
logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status)
@@ -575,5 +275,6 @@ def main():
sys.exit(1)
+
if __name__ == '__main__':
main()
diff --git a/tests/functest/odl-sfc/sfc_change_classi.bash b/tests/functest/odl-sfc/sfc_change_classi.bash
deleted file mode 100755
index 70375ab3..00000000
--- a/tests/functest/odl-sfc/sfc_change_classi.bash
+++ /dev/null
@@ -1,7 +0,0 @@
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete red_ssh
-
-tacker sfc-classifier-create --name blue_http --chain blue --match source_port=0,dest_port=80,protocol=6
-tacker sfc-classifier-create --name blue_ssh --chain blue --match source_port=0,dest_port=22,protocol=6
-
-tacker sfc-classifier-list
diff --git a/tests/functest/odl-sfc/sfc_tacker.bash b/tests/functest/odl-sfc/sfc_tacker_test2.bash
index 690d5f52..04e6506e 100755
--- a/tests/functest/odl-sfc/sfc_tacker.bash
+++ b/tests/functest/odl-sfc/sfc_tacker_test2.bash
@@ -2,8 +2,8 @@
BASEDIR=`dirname $0`
#import VNF descriptor
-tacker vnfd-create --vnfd-file ${BASEDIR}/test-vnfd1.yaml
-tacker vnfd-create --vnfd-file ${BASEDIR}/test-vnfd2.yaml
+tacker vnfd-create --vnfd-file ${BASEDIR}/test2-vnfd1.yaml
+tacker vnfd-create --vnfd-file ${BASEDIR}/test2-vnfd2.yaml
#create instances of the imported VNF
tacker vnf-create --name testVNF1 --vnfd-name test-vnfd1
@@ -20,12 +20,10 @@ while $key;do
done
#create service chain
-tacker sfc-create --name red --chain testVNF1
-tacker sfc-create --name blue --chain testVNF2
+tacker sfc-create --name red --chain testVNF1,testVNF2
#create classifier
tacker sfc-classifier-create --name red_http --chain red --match source_port=0,dest_port=80,protocol=6
-tacker sfc-classifier-create --name red_ssh --chain red --match source_port=0,dest_port=22,protocol=6
tacker sfc-list
tacker sfc-classifier-list
diff --git a/tests/functest/odl-sfc/test2-vnfd1.yaml b/tests/functest/odl-sfc/test2-vnfd1.yaml
new file mode 100644
index 00000000..5c672e38
--- /dev/null
+++ b/tests/functest/odl-sfc/test2-vnfd1.yaml
@@ -0,0 +1,31 @@
+template_name: test-vnfd1
+description: firewall1-example
+
+service_properties:
+ Id: firewall1-vnfd
+ vendor: tacker
+ version: 1
+ type:
+ - firewall1
+vdus:
+ vdu1:
+ id: vdu1
+ vm_image: sf_nsh_colorado
+ instance_type: custom
+ service_type: firewall1
+
+ network_interfaces:
+ management:
+ network: example-net
+ management: true
+
+ placement_policy:
+ availability_zone: nova
+
+ auto-scaling: noop
+ monitoring_policy: noop
+ failure_policy: respawn
+
+ config:
+ param0: key0
+ param1: key1
diff --git a/tests/functest/odl-sfc/test2-vnfd2.yaml b/tests/functest/odl-sfc/test2-vnfd2.yaml
new file mode 100644
index 00000000..8a570ab9
--- /dev/null
+++ b/tests/functest/odl-sfc/test2-vnfd2.yaml
@@ -0,0 +1,31 @@
+template_name: test-vnfd2
+description: firewall2-example
+
+service_properties:
+ Id: firewall2-vnfd
+ vendor: tacker
+ version: 1
+ type:
+ - firewall2
+vdus:
+ vdu1:
+ id: vdu1
+ vm_image: sf_nsh_colorado
+ instance_type: custom
+ service_type: firewall2
+
+ network_interfaces:
+ management:
+ network: example-net
+ management: true
+
+ placement_policy:
+ availability_zone: nova
+
+ auto-scaling: noop
+ monitoring_policy: noop
+ failure_policy: respawn
+
+ config:
+ param0: key0
+ param1: key1
diff --git a/tests/functest/odl-sfc/utils.py b/tests/functest/odl-sfc/utils.py
new file mode 100644
index 00000000..f0b81760
--- /dev/null
+++ b/tests/functest/odl-sfc/utils.py
@@ -0,0 +1,388 @@
+import os
+import subprocess
+import time
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import re
+import json
+import SSHUtils as ssh_utils
+import functools
+
+
+logger = ft_logger.Logger("sfc_test_utils").getLogger()
+SSH_OPTIONS = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+FUNCTEST_RESULTS_DIR = os.path.join("home", "opnfv",
+ "functest", "results", "odl-sfc")
+
+
+def run_cmd(cmd, wdir=None, ignore_stderr=False, ignore_no_output=True):
+ """run given command locally and return commands output if success"""
+ pipe = subprocess.Popen(cmd, shell=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, cwd=wdir)
+
+ (output, errors) = pipe.communicate()
+ if output:
+ output = output.strip()
+ if pipe.returncode < 0:
+ logger.error(errors)
+ return False
+ if errors:
+ logger.error(errors)
+ return ignore_stderr
+
+ if ignore_no_output and not output:
+ return True
+
+ return output
+
+
+def run_cmd_on_controller(cmd):
+ """run given command on OpenStack controller"""
+ ip_controllers = get_openstack_node_ips("controller")
+ if not ip_controllers:
+ return None
+
+ ssh_cmd = "ssh %s %s %s" % (SSH_OPTIONS, ip_controllers[0], cmd)
+ return run_cmd_on_fm(ssh_cmd)
+
+
+def run_cmd_on_compute(cmd):
+ """run given command on OpenStack Compute node"""
+ ip_computes = get_openstack_node_ips("compute")
+ if not ip_computes:
+ return None
+
+ ssh_cmd = "ssh %s %s %s" % (SSH_OPTIONS, ip_computes[0], cmd)
+ return run_cmd_on_fm(ssh_cmd)
+
+
+def run_cmd_on_fm(cmd, username="root", passwd="r00tme"):
+ """run given command on Fuel Master"""
+ ip = os.environ.get("INSTALLER_IP")
+ ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
+ passwd, SSH_OPTIONS, username, ip, cmd)
+ return run_cmd(ssh_cmd)
+
+
+def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
+ """run given command on Remote Machine, Can be VM"""
+ ssh_opt_append = "%s -o ConnectTimeout=50 " % SSH_OPTIONS
+ ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
+ passwd, ssh_opt_append, username, ip, cmd)
+ return run_cmd(ssh_cmd)
+
+
+def get_openstack_node_ips(role):
+ """Get OpenStack Nodes IP Address"""
+ fuel_env = os.environ.get("FUEL_ENV")
+ if fuel_env is not None:
+ cmd = "fuel2 node list -f json -e %s" % fuel_env
+ else:
+ cmd = "fuel2 node list -f json"
+
+ nodes = run_cmd_on_fm(cmd)
+ ips = []
+ nodes = json.loads(nodes)
+ for node in nodes:
+ if role in node["roles"]:
+ ips.append(node["ip"])
+
+ return ips
+
+
+def configure_iptables():
+ """Configures IPTABLES on OpenStack Controller"""
+ iptable_cmds = ["iptables -P INPUT ACCEPT",
+ "iptables -t nat -P INPUT ACCEPT",
+ "iptables -A INPUT -m state \
+ --state NEW,ESTABLISHED,RELATED -j ACCEPT"]
+
+ for cmd in iptable_cmds:
+ logger.info("Configuring %s on contoller" % cmd)
+ run_cmd_on_controller(cmd)
+
+ return
+
+
+def download_image(url, image_path):
+ image_filename = os.path.basename(image_path)
+ image_url = "%s/%s" % (url, image_filename)
+ image_dir = os.path.dirname(image_path)
+ if not os.path.isfile(image_path):
+ logger.info("Downloading image")
+ ft_utils.download_url(image_url, image_dir)
+ return None
+
+ logger.info("Using old image")
+ return
+
+
+def setup_neutron(neutron_client, net, subnet, router, subnet_cidr):
+ n_dict = os_utils.create_network_full(neutron_client,
+ net,
+ subnet,
+ router,
+ subnet_cidr)
+ if not n_dict:
+ logger.error("failed to create neutron network")
+ return False
+
+ return n_dict["net_id"]
+
+
+def setup_ingress_egress_secgroup(neutron_client, protocol,
+ min_port=None, max_port=None):
+ secgroups = os_utils.get_security_groups(neutron_client)
+ for sg in secgroups:
+ os_utils.create_secgroup_rule(neutron_client, sg['id'],
+ 'ingress', protocol,
+ port_range_min=min_port,
+ port_range_max=max_port)
+ os_utils.create_secgroup_rule(neutron_client, sg['id'],
+ 'egress', protocol,
+ port_range_min=min_port,
+ port_range_max=max_port)
+ return
+
+
+def create_security_groups(neutron_client, secgroup_name, secgroup_descr):
+ sg_id = os_utils.create_security_group_full(neutron_client,
+ secgroup_name, secgroup_descr)
+ setup_ingress_egress_secgroup(neutron_client, "icmp")
+ setup_ingress_egress_secgroup(neutron_client, "tcp", 22, 22)
+ setup_ingress_egress_secgroup(neutron_client, "tcp", 80, 80)
+
+ return sg_id
+
+
+def create_instance(nova_client, name, flavor, image_id, network_id, sg_id,
+ secgroup_name=None, fixed_ip=None,
+ compute_node='', userdata=None, files=None):
+ logger.info("Creating instance '%s'..." % name)
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
+ " network=%s\n secgroup=%s \n hypervisor=%s \n"
+ " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
+ % (name, flavor, image_id, network_id, sg_id,
+ compute_node, fixed_ip, files, userdata))
+ instance = os_utils.create_instance_and_wait_for_active(
+ flavor,
+ image_id,
+ network_id,
+ name,
+ config_drive=True,
+ userdata=userdata,
+ av_zone=compute_node,
+ fixed_ip=fixed_ip,
+ files=files)
+
+ if instance is None:
+ logger.error("Error while booting instance.")
+ return None
+
+ if secgroup_name:
+ logger.debug("Adding '%s' to security group '%s'..."
+ % (name, secgroup_name))
+ else:
+ logger.debug("Adding '%s' to security group '%s'..."
+ % (name, sg_id))
+ os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+
+ return instance
+
+
+def ping(remote, pkt_cnt=1, iface=None, retries=100, timeout=None):
+ ping_cmd = 'ping'
+
+ if timeout:
+ ping_cmd = ping_cmd + ' -w %s' % timeout
+
+ grep_cmd = "grep -e 'packet loss' -e rtt"
+
+ if iface is not None:
+ ping_cmd = ping_cmd + ' -I %s' % iface
+
+ ping_cmd = ping_cmd + ' -i 0 -c %d %s' % (pkt_cnt, remote)
+ cmd = ping_cmd + '|' + grep_cmd
+
+ while retries > 0:
+ output = run_cmd(cmd)
+ if not output:
+ return False
+
+ match = re.search('(\d*)% packet loss', output)
+ if not match:
+ return False
+
+ packet_loss = int(match.group(1))
+ if packet_loss == 0:
+ return True
+
+ retries -= 1
+
+ return False
+
+
+def get_floating_ips(nova_client, neutron_client):
+ ips = []
+ instances = nova_client.servers.list(search_opts={'all_tenants': 1})
+ for instance in instances:
+ floatip_dic = os_utils.create_floating_ip(neutron_client)
+ floatip = floatip_dic['fip_addr']
+ instance.add_floating_ip(floatip)
+ logger.info("Instance name and ip %s:%s " % (instance.name, floatip))
+ logger.info("Waiting for instance %s:%s to come up" %
+ (instance.name, floatip))
+ if not ping(floatip):
+ logger.info("Instance %s:%s didn't come up" %
+ (instance.name, floatip))
+ return None
+
+ if instance.name == "server":
+ logger.info("Server:%s is reachable" % floatip)
+ server_ip = floatip
+ elif instance.name == "client":
+ logger.info("Client:%s is reachable" % floatip)
+ client_ip = floatip
+ else:
+ logger.info("SF:%s is reachable" % floatip)
+ ips.append(floatip)
+
+ return server_ip, client_ip, ips[1], ips[0]
+
+
+def start_http_server(ip):
+ """Start http server on a given machine, Can be VM"""
+ cmd = "\'python -m SimpleHTTPServer 80"
+ cmd = cmd + " > /dev/null 2>&1 &\'"
+ run_cmd_remote(ip, cmd)
+ output = run_cmd_remote(ip, "ps aux|grep SimpleHTTPServer")
+ if not output:
+ logger.error("Failed to start http server")
+ return False
+
+ logger.info(output)
+ return True
+
+
+def vxlan_firewall(sf, iface="eth0", port="22", block=True):
+ """Set firewall using vxlan_tool.py on a given machine, Can be VM"""
+ cmd = "python vxlan_tool.py -i %s -d forward -v off" % iface
+ if block:
+ cmd = "python vxlan_tool.py -i eth0 -d forward -v off -b %s" % port
+
+ cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
+ run_cmd_remote(sf, cmd)
+
+
+def netcat(s_ip, c_ip, port="80", timeout=5):
+ """Run netcat on a give machine, Can be VM"""
+ cmd = "nc -zv "
+ cmd = cmd + " -w %s %s %s" % (timeout, s_ip, port)
+ cmd = cmd + " 2>&1"
+ output = run_cmd_remote(c_ip, cmd)
+ logger.info("%s" % output)
+ return output
+
+
+def is_ssh_blocked(srv_prv_ip, client_ip):
+ res = netcat(srv_prv_ip, client_ip, port="22")
+ match = re.search("nc:.*timed out:.*", res, re.M)
+ if match:
+ return True
+
+ return False
+
+
+def is_http_blocked(srv_prv_ip, client_ip):
+ res = netcat(srv_prv_ip, client_ip, port="80")
+ match = re.search(".* 80 port.* succeeded!", res, re.M)
+ if match:
+ return False
+
+ return True
+
+
+def capture_err_logs(ovs_logger, controller_clients, compute_clients, error):
+ timestamp = time.strftime("%Y%m%d-%H%M%S")
+ ovs_logger.dump_ovs_logs(controller_clients,
+ compute_clients,
+ related_error=error,
+ timestamp=timestamp)
+ return
+
+
+def get_ssh_clients(role, proxy):
+ clients = []
+ for ip in get_openstack_node_ips(role):
+ s_client = ssh_utils.get_ssh_client(ip, 'root', proxy=proxy)
+ clients.append(s_client)
+
+ return clients
+
+
+def check_ssh(ips, retries=100):
+ """Check SSH connectivity to VNFs"""
+ check = [False, False]
+ logger.info("Checking SSH connectivity to the SFs with ips %s" % str(ips))
+ while retries and not all(check):
+ for index, ip in enumerate(ips):
+ check[index] = run_cmd_remote(ip, "exit")
+
+ if all(check):
+ logger.info("SSH connectivity to the SFs established")
+ return True
+
+ time.sleep(3)
+ retries -= 1
+
+ return False
+
+
+# Measure the time it takes to update the classification rules
+def timethis(func):
+ @functools.wraps(func)
+ def timed(*args, **kwargs):
+ ts = time.time()
+ result = func(*args, **kwargs)
+ te = time.time()
+ elapsed = '{0}'.format(te - ts)
+ logger.info('{f}(*{a}, **{kw}) took: {t} sec'.format(
+ f=func.__name__, a=args, kw=kwargs, t=elapsed))
+ return result
+ return timed
+
+
+@timethis
+def capture_time_log(ovs_logger, compute_clients, timeout=200):
+ rsps = ovs_logger.ofctl_time_counter(compute_clients[0])
+ first_RSP = rsps[0] if len(rsps) > 0 else ''
+ while not ((len(rsps) > 1) and
+ (first_RSP != rsps[0]) and
+ (rsps[0] == rsps[1])):
+ rsps = ovs_logger.ofctl_time_counter(compute_clients[0])
+ timeout -= 1
+ if timeout == 0:
+ logger.error(
+ "Timeout but classification rules are not updated")
+ return
+ time.sleep(1)
+ logger.info("classification rules updated")
+
+
+def get_compute_nodes(nova_client, required_node_number=2):
+ """Get the compute nodes in the deployment"""
+ compute_nodes = os_utils.get_hypervisors(nova_client)
+
+ num_compute_nodes = len(compute_nodes)
+ if num_compute_nodes < 2:
+ logger.error("There are %s compute nodes in the deployment. "
+ "Minimum number of nodes to complete the test is 2."
+ % num_compute_nodes)
+ return None
+
+ logger.debug("Compute nodes: %s" % compute_nodes)
+ return compute_nodes