aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sfc/lib/cleanup.py120
-rw-r--r--sfc/lib/topology_shuffler.py19
-rw-r--r--sfc/lib/utils.py121
-rw-r--r--sfc/tests/functest/config.yaml11
-rw-r--r--sfc/tests/functest/run_tests.py119
-rw-r--r--sfc/tests/functest/sfc_one_chain_two_service_functions.py (renamed from sfc/tests/functest/sfc_one_chain_two_service_functions_different_computes.py)18
-rw-r--r--sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py23
7 files changed, 360 insertions, 71 deletions
diff --git a/sfc/lib/cleanup.py b/sfc/lib/cleanup.py
new file mode 100644
index 00000000..83eac3e6
--- /dev/null
+++ b/sfc/lib/cleanup.py
@@ -0,0 +1,120 @@
+import sys
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.openstack_utils as os_utils
+import functest.utils.openstack_tacker as os_tacker
+import sfc.lib.utils as utils
+
+
+logger = ft_logger.Logger(__name__).getLogger()
+
+
+def delete_odl_resources(odl_ip, odl_port, resource):
+ rsrc_list = utils.get_odl_resource_list(odl_ip, odl_port, resource)
+ elem_names = utils.odl_resource_list_names(resource, rsrc_list)
+ for elem in elem_names:
+ logger.info("Removing ODL resource: {0}/{1}".format(resource, elem))
+ utils.delete_odl_resource_elem(odl_ip, odl_port, resource, elem)
+
+
+def delete_odl_ietf_access_lists(odl_ip, odl_port):
+ acl_list = utils.get_odl_acl_list(odl_ip, odl_port)
+ acl_types_names = utils.odl_acl_types_names(acl_list)
+ for acl_type, acl_name in acl_types_names:
+ utils.delete_odl_acl(odl_ip, odl_port, acl_type, acl_name)
+
+
+def delete_vnfds():
+ t = os_tacker.get_tacker_client()
+ vnfds = os_tacker.list_vnfds(t)
+ if vnfds is None:
+ return
+ for vnfd in vnfds:
+ logger.info("Removing vnfd: {0}".format(vnfd))
+ os_tacker.delete_vnfd(t, vnfd_id=vnfd)
+
+
+def delete_vnfs():
+ t = os_tacker.get_tacker_client()
+ vnfs = os_tacker.list_vnfs(t)
+ if vnfs is None:
+ return
+ for vnf in vnfs:
+ logger.info("Removing vnf: {0}".format(vnf))
+ os_tacker.delete_vnf(t, vnf_id=vnf)
+
+
+def delete_sfcs():
+ t = os_tacker.get_tacker_client()
+ sfcs = os_tacker.list_sfcs(t)
+ if sfcs is None:
+ return
+ for sfc in sfcs:
+ logger.info("Removing sfc: {0}".format(sfc))
+ os_tacker.delete_sfc(t, sfc_id=sfc)
+
+
+def delete_sfc_clfs():
+ t = os_tacker.get_tacker_client()
+ sfc_clfs = os_tacker.list_sfc_classifiers(t)
+ if sfc_clfs is None:
+ return
+ for sfc_clf in sfc_clfs:
+ logger.info("Removing sfc classifier: {0}".format(sfc_clf))
+ os_tacker.delete_sfc_classifier(t, sfc_clf_id=sfc_clf)
+
+
+def delete_floating_ips():
+ n = os_utils.get_nova_client()
+ fips = os_utils.get_floating_ips(n)
+ if fips is None:
+ return
+ for fip in fips:
+ logger.info("Removing floating ip: {0}".format(fip.ip))
+ os_utils.delete_floating_ip(n, fip.id)
+
+
+def delete_stacks():
+ logger.info("Removing stack: sfc")
+ utils.run_cmd('openstack stack delete sfc --y')
+ logger.info("Removing stack: sfc_test1")
+ utils.run_cmd('openstack stack delete sfc_test1 --y')
+ logger.info("Removing stack: sfc_test2")
+ utils.run_cmd('openstack stack delete sfc_test2 --y')
+
+
+def delete_instances():
+ n = os_utils.get_nova_client()
+ instances = os_utils.get_instances(n)
+ if instances is None:
+ return
+ for inst in instances:
+ logger.info("Removing instance: {0}".format(inst.id))
+ os_utils.delete_instance(n, inst.id)
+
+
+def cleanup_odl(odl_ip, odl_port):
+ delete_odl_resources(odl_ip, odl_port, 'service-function-forwarder')
+ delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
+ delete_odl_resources(odl_ip, odl_port, 'service-function-path')
+ delete_odl_resources(odl_ip, odl_port, 'service-function')
+ delete_odl_ietf_access_lists(odl_ip, odl_port)
+
+
+def cleanup(odl_ip=None, odl_port=None):
+ delete_sfc_clfs()
+ delete_sfcs()
+ delete_vnfs()
+ delete_vnfds()
+ delete_stacks()
+ delete_floating_ips()
+ delete_instances()
+ if odl_ip is not None and odl_port is not None:
+ cleanup_odl(odl_ip, odl_port)
+
+
+if __name__ == '__main__':
+ if sys.argv > 2:
+ cleanup(sys.argv[1], sys.argv[2])
+ else:
+ cleanup()
diff --git a/sfc/lib/topology_shuffler.py b/sfc/lib/topology_shuffler.py
index 79825fcf..4e027d9e 100644
--- a/sfc/lib/topology_shuffler.py
+++ b/sfc/lib/topology_shuffler.py
@@ -16,9 +16,16 @@ TOPOLOGIES = [
'''
},
{
+ 'id': 'CLIENT_SERVER_SAME_HOST_SPLIT_VNF',
+ 'description': '''
+ Client and server are on the same host.
+ The VNFs are split between hosts Round Robin.
+ '''
+ },
+ {
'id': 'CLIENT_VNF_SAME_HOST',
'description': '''
- Client instance and vnfs are are on the same
+ Client instance and vnfs are on the same
compute host. Server instance is on a different host
'''
},
@@ -32,18 +39,11 @@ TOPOLOGIES = [
{
'id': 'SERVER_VNF_SAME_HOST',
'description': '''
- Server instance and vnfs are are on the same
+ Server instance and vnfs are on the same
compute host. Client instance is on a different host
'''
},
{
- 'id': 'CLIENT_SERVER_SAME_HOST_SPLIT_VNF',
- 'description': '''
- Client and server are on the same host.
- The VNFs are split between hosts Round Robin.
- '''
- },
- {
'id': 'CLIENT_SERVER_DIFFERENT_HOST_SPLIT_VNF',
'description': '''
Client and server are on different hosts.
@@ -68,6 +68,7 @@ def get_seed():
Probably with the Jenkins job id
'''
cutoff = len(TOPOLOGIES) - 1
+ # We only add the topologies which are working
seed = datetime.datetime.today().weekday()
if seed > cutoff:
seed = random.randrange(cutoff)
diff --git a/sfc/lib/utils.py b/sfc/lib/utils.py
index 8623f859..a480f2c2 100644
--- a/sfc/lib/utils.py
+++ b/sfc/lib/utils.py
@@ -11,7 +11,9 @@
import os
import re
import subprocess
+import requests
import time
+import xmltodict
import yaml
import functest.utils.functest_logger as ft_logger
@@ -302,6 +304,7 @@ def vxlan_firewall(sf, iface="eth0", port="22", block=True):
cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
run_cmd_remote(sf, cmd)
+ time.sleep(7)
def vxlan_tool_stop(sf):
@@ -417,13 +420,25 @@ def wait_for_classification_rules(ovs_logger, compute_clients,
# first_RSP saves a potential RSP from an old deployment.
# ODL may take quite some time to implement the new flow
# and an old flow may be there
- first_RSP = rsps[0] if len(rsps) > 0 else ''
- logger.debug("This is the first_RSP: %s" % first_RSP)
+ if compute_client == compute_clients[0]:
+ first_RSP = rsps[0] if len(rsps) > 0 else ''
+ else:
+ first_RSP = ''
+ rsps = ''
+ logger.info("This is the first_RSP: %s" % first_RSP)
if num_chains == 1:
while not ((len(rsps) == 1) and (first_RSP != rsps[0])):
rsps = ofctl_time_counter(ovs_logger, compute_client)
- logger.debug("These are the rsps: %s" % rsps)
+ logger.info("These are the rsps: %s" % rsps)
timeout -= 1
+ if timeout == 10:
+ output = ovs_logger.ofctl_dump_flows(compute_client)
+ logger.info("output ofctl: %s" % output)
+ output2 = ovs_logger.vsctl_show(compute_client)
+ logger.info("output vsctl: %s" % output2)
+ _, stdout, _ = compute_client.exec_command('ip a')
+ output3 = ''.join(stdout.readlines())
+ logger.info("The interfaces: %s" % output3)
if timeout == 0:
logger.error(
"Timeout but classification rules are not updated")
@@ -435,6 +450,14 @@ def wait_for_classification_rules(ovs_logger, compute_clients,
rsps = ofctl_time_counter(ovs_logger, compute_client)
logger.info("This is the rsps: %s" % rsps)
timeout -= 1
+ if timeout == 10:
+ output = ovs_logger.ofctl_dump_flows(compute_client)
+ logger.info("output ofctl: %s" % output)
+ output2 = ovs_logger.vsctl_show(compute_client)
+ logger.info("output vsctl: %s" % output2)
+ _, stdout, _ = compute_client.exec_command('ip a')
+ output3 = ''.join(stdout.readlines())
+ logger.info("The interfaces: %s" % output3)
if timeout == 0:
logger.error(
"Timeout but classification rules are not updated")
@@ -444,10 +467,11 @@ def wait_for_classification_rules(ovs_logger, compute_clients,
def setup_compute_node(cidr, compute_nodes):
- logger.info("bringing up br-int iface")
+ logger.info("bringing up br-int iface and flushing arp tables")
grep_cidr_routes = ("ip route | grep -o {0} || true".format(cidr)).strip()
add_cidr = "ip route add {0} dev br-int".format(cidr)
for compute in compute_nodes:
+ compute.run_cmd("ip -s -s neigh flush all")
compute.run_cmd("ifconfig br-int up")
if not compute.run_cmd(grep_cidr_routes):
logger.info("adding route %s in %s" % (cidr, compute.ip))
@@ -470,16 +494,83 @@ def get_nova_id(tacker_client, resource, vnf_id=None, vnf_name=None):
return None
-def filter_sffs(compute_nodes, testTopology, vnfs):
- if 'nova' in testTopology.values():
- computes_to_check = [node.id for node in compute_nodes]
- else:
- # Get the number of the compute (e.g.node-7.domain.tld ==> 7)
- computes_to_check = [
- testTopology[vnf].split('.')[0].split('-')[1] for vnf in vnfs]
+def get_odl_ip_port(nodes):
+ local_jetty = os.path.join(os.getcwd(), 'jetty.xml')
+ odl_node = next(n for n in nodes if n.is_odl())
+ odl_node.get_file('/opt/opendaylight/etc/jetty.xml', local_jetty)
+ with open(local_jetty) as fd:
+ parsed = xmltodict.parse(fd.read(), dict_constructor=dict)
+
+ ip = (parsed['Configure']['Call'][0]['Arg']['New']
+ ['Set'][0]['Property']['@default'])
+ port = (parsed['Configure']['Call'][0]['Arg']['New']
+ ['Set'][1]['Property']['@default'])
+ return ip, port
+
+
+def pluralize(s):
+ return '{0}s'.format(s)
+
+
+def format_odl_resource_list_url(odl_ip, odl_port, resource,
+ odl_user='admin', odl_pwd='admin'):
+ return ('http://{usr}:{pwd}@{ip}:{port}/restconf/config/{rsrc}:{rsrcs}'
+ .format(usr=odl_user, pwd=odl_pwd, ip=odl_ip, port=odl_port,
+ rsrc=resource, rsrcs=pluralize(resource)))
+
+
+def format_odl_resource_elem_url(odl_ip, odl_port, resource, elem_name):
+ list_url = format_odl_resource_list_url(odl_ip, odl_port, resource)
+ return ('{0}/{1}/{2}'.format(list_url, resource, elem_name))
+
+
+def odl_resource_list_names(resource, resource_json):
+ if len(resource_json[pluralize(resource)].items()) == 0:
+ return []
+ return [r['name'] for r in resource_json[pluralize(resource)][resource]]
+
+
+def get_odl_resource_list(odl_ip, odl_port, resource):
+ url = format_odl_resource_list_url(odl_ip, odl_port, resource)
+ return requests.get(url).json()
+
+
+def delete_odl_resource_elem(odl_ip, odl_port, resource, elem_name):
+ url = format_odl_resource_elem_url(odl_ip, odl_port, resource, elem_name)
+ requests.delete(url)
+
+
+def odl_acl_types_names(acl_json):
+ if len(acl_json['access-lists'].items()) == 0:
+ return []
+ return [(acl['acl-type'], acl['acl-name'])
+ for acl in acl_json['access-lists']['acl']]
+
+
+def format_odl_acl_list_url(odl_ip, odl_port,
+ odl_user='admin', odl_pwd='admin'):
+ acl_list_url = ('http://{usr}:{pwd}@{ip}:{port}/restconf/config/'
+ 'ietf-access-control-list:access-lists'
+ .format(usr=odl_user, pwd=odl_pwd,
+ ip=odl_ip, port=odl_port))
+ return acl_list_url
+
+
+def get_odl_acl_list(odl_ip, odl_port):
+ acl_list_url = format_odl_acl_list_url(odl_ip, odl_port)
+ r = requests.get(acl_list_url)
+ return r.json()
+
+
+def delete_odl_acl(odl_ip, odl_port, acl_type, acl_name):
+ acl_list_url = format_odl_acl_list_url(odl_ip, odl_port)
+ acl_url = '{0}/acl/{1}/{2}'.format(acl_list_url, acl_type, acl_name)
+ requests.delete(acl_url)
- computes_sff = [
- node.ssh_client for node in compute_nodes
- if node.id in computes_to_check]
- return computes_sff
+def delete_classifier_and_acl(tacker_client, clf_name, odl_ip, odl_port):
+ os_tacker.delete_sfc_classifier(tacker_client, sfc_clf_name=clf_name)
+ delete_odl_acl(odl_ip,
+ odl_port,
+ 'ietf-access-control-list:ipv4-acl',
+ clf_name)
diff --git a/sfc/tests/functest/config.yaml b/sfc/tests/functest/config.yaml
index 1e241cdf..bc6e1726 100644
--- a/sfc/tests/functest/config.yaml
+++ b/sfc/tests/functest/config.yaml
@@ -20,6 +20,7 @@ defaults:
testcases:
sfc_two_chains_SSH_and_HTTP:
enabled: true
+ order: 1
description: "ODL-SFC tests"
testname_db: "sfc_two_chains_SSH_and_HTTP"
net_name: example-net
@@ -31,10 +32,11 @@ testcases:
test_vnfd_red: "test-vnfd1.yaml"
test_vnfd_blue: "test-vnfd2.yaml"
- sfc_one_chain_two_service_functions_different_computes:
+ sfc_one_chain_two_service_functions:
enabled: true
- description: "ODL-SFC Testing SFs in different computes"
- testname_db: "sfc_one_chain_two_service_functions_different_computes"
+ order: 0
+ description: "ODL-SFC Testing SFs when they are located on the same chain"
+ testname_db: "sfc_one_chain_two_service_functions"
net_name: example-net
subnet_name: example-subnet
router_name: example-router
@@ -45,7 +47,8 @@ testcases:
test_vnfd_blue: "test2-vnfd2.yaml"
sfc_symmetric_chain:
- enabled: false
+ enabled: false
+ order: 2
description: "Verify the behavior of a symmetric service chain"
testname_db: "sfc_symmetric_chain"
net_name: example-net
diff --git a/sfc/tests/functest/run_tests.py b/sfc/tests/functest/run_tests.py
index 762e6b3d..a6c218ab 100644
--- a/sfc/tests/functest/run_tests.py
+++ b/sfc/tests/functest/run_tests.py
@@ -19,15 +19,13 @@ import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
import opnfv.utils.ovs_logger as ovs_log
+import sfc.lib.cleanup as sfc_cleanup
import sfc.lib.config as sfc_config
-from opnfv.deployment.factory import Factory as DeploymentFactory
+import sfc.lib.utils as sfc_utils
+from collections import OrderedDict
+from opnfv.deployment.factory import Factory as DeploymentFactory
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
logger = ft_logger.Logger(__name__).getLogger()
COMMON_CONFIG = sfc_config.CommonConfig()
@@ -53,7 +51,29 @@ def fetch_tackerc_file(controller_node):
return rc_file
-def main():
+def disable_heat_resource_finder_cache(nodes):
+ controllers = [node for node in nodes if node.is_controller()]
+ remote_heat_conf = '/etc/heat/heat.conf'
+ local_heat_conf = '/tmp/heat.conf'
+ for controller in controllers:
+ logger.info("Fetch {0} from controller {1}"
+ .format(remote_heat_conf, controller.ip))
+ controller.get_file(remote_heat_conf, local_heat_conf)
+ with open(local_heat_conf, 'a') as cfg:
+ cfg.write('\n[resource_finder_cache]\n')
+ cfg.write('caching=False\n')
+ logger.info("Replace {0} with {1} in controller {2}"
+ .format(remote_heat_conf, local_heat_conf, controller.ip))
+ controller.run_cmd('rm -f {0}'.format(remote_heat_conf))
+ controller.put_file(local_heat_conf, remote_heat_conf)
+ logger.info("Restart heat-engine in {0}".format(controller.ip))
+ controller.run_cmd('service heat-engine restart')
+ os.remove(local_heat_conf)
+ logger.info("Waiting for heat-engine to restart in controllers")
+ time.sleep(10)
+
+
+def main(report=False):
deploymentHandler = DeploymentFactory.get_handler(
COMMON_CONFIG.installer_type,
COMMON_CONFIG.installer_ip,
@@ -67,10 +87,20 @@ def main():
a_controller = [node for node in nodes
if node.is_controller()][0]
+
+ disable_heat_resource_finder_cache(nodes)
+
rc_file = fetch_tackerc_file(a_controller)
+ os_utils.source_credentials(rc_file)
+
+ logger.info("Updating env with {0}".format(rc_file))
+ logger.info("OS credentials:")
+ for var, value in os.environ.items():
+ if var.startswith("OS_"):
+ logger.info("\t{0}={1}".format(var, value))
+
+ odl_ip, odl_port = sfc_utils.get_odl_ip_port(nodes)
- creds = os_utils.source_credentials(rc_file)
- logger.info("Updating env with {0}".format(creds))
ovs_logger = ovs_log.OVSLogger(
os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
COMMON_CONFIG.functest_results_dir)
@@ -80,44 +110,66 @@ def main():
config_yaml = yaml.safe_load(f)
testcases = config_yaml.get("testcases")
+ testcases_ordered = OrderedDict(sorted(testcases.items(),
+ key=lambda x: x[1]['order']))
overall_details = {}
- overall_status = "FAIL"
+ overall_status = "NOT TESTED"
overall_start_time = time.time()
- for testcase in testcases:
- if testcases[testcase]['enabled']:
+ for testcase, test_cfg in testcases_ordered.items():
+ if test_cfg['enabled']:
test_name = testcase
- test_descr = testcases[testcase]['description']
- test_name_db = testcases[testcase]['testname_db']
+ test_descr = test_cfg['description']
+ test_name_db = test_cfg['testname_db']
title = ("Running '%s - %s'" %
(test_name, test_descr))
logger.info(title)
logger.info("%s\n" % ("=" * len(title)))
- t = importlib.import_module(testcase, package=None)
+ t = importlib.import_module(
+ "sfc.tests.functest.{0}".format(testcase),
+ package=None)
start_time = time.time()
- result = t.main()
+ try:
+ result = t.main()
+ except Exception, e:
+ logger.error("Exception when executing: %s" % testcase)
+ logger.error(e)
+ result = {'status': 'FAILED'}
+ for node in nodes:
+ if node.get_file("/usr/lib/python2.7/dist-packages/tacker/"
+ "sfc/plugin.py", "/tmp/plugin.py"):
+ node.get_file("/var/log/tacker/tacker-server.log",
+ "/tmp/tacker-server.log")
+ break
+ with open("/tmp/plugin.py") as fd:
+ logger.info(fd.read())
+ with open("/tmp/tacker-server.log") as fd1:
+ logger.info(fd1.read())
end_time = time.time()
duration = end_time - start_time
- status = "FAIL"
- if result != 0:
- overall_details.update({test_name_db: "execution error."})
+ logger.info("Results of test case '%s - %s':\n%s\n" %
+ (test_name, test_descr, result))
+ if result['status'] == 'PASS':
+ status = 'PASS'
+ overall_details.update({test_name_db: "worked"})
+ if overall_status != "FAIL":
+ overall_status = "PASS"
else:
- status = result.get("status")
- if status == "FAIL":
- overall_status = "FAIL"
- ovs_logger.create_artifact_archive()
-
- logger.info("Results of test case '%s - %s':\n%s\n" %
- (test_name, test_descr, result))
+ status = 'FAIL'
+ overall_status = "FAIL"
+ overall_details.update({test_name_db: "execution error."})
+ ovs_logger.create_artifact_archive()
- dic = {"duration": duration, "status": overall_status}
- overall_details.update({test_name_db: dic})
- if args.report:
+ if report:
details = result.get("details")
push_results(
test_name_db, start_time, end_time, status, details)
+ dic = {"duration": duration, "status": status}
+ overall_details.update({test_name_db: dic})
+ sfc_cleanup.cleanup(odl_ip=odl_ip, odl_port=odl_port)
+
overall_end_time = time.time()
- if args.report:
+ if report:
push_results(
"odl-sfc", overall_start_time, overall_end_time,
overall_status, overall_details)
@@ -129,4 +181,9 @@ def main():
if __name__ == '__main__':
- main()
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
+ args = parser.parse_args()
+ main(report=args.report)
diff --git a/sfc/tests/functest/sfc_one_chain_two_service_functions_different_computes.py b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
index 69d86f3f..e55af011 100644
--- a/sfc/tests/functest/sfc_one_chain_two_service_functions_different_computes.py
+++ b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
@@ -29,8 +29,9 @@ logger = ft_logger.Logger(__name__).getLogger()
CLIENT = "client"
SERVER = "server"
COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_one_chain_two_service'
- '_functions_different_computes')
+TESTCASE_CONFIG = sfc_config.TestcaseConfig(
+ 'sfc_one_chain_two_service'
+ '_functions')
def main():
@@ -50,6 +51,9 @@ def main():
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
+ for compute in compute_nodes:
+ logger.info("This is a compute: %s" % compute.info)
+
results = Results(COMMON_CONFIG.line_length)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
@@ -151,6 +155,11 @@ def main():
logger.error('ERROR while booting vnfs')
sys.exit(1)
+ instances = os_utils.get_instances(nova_client)
+ for instance in instances:
+ if ('client' not in instance.name) and ('server' not in instance.name):
+ os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+
os_tacker.create_sfc(tacker_client, 'red',
chain_vnf_names=[vnfs[0], vnfs[1]])
@@ -167,12 +176,9 @@ def main():
num_chains = 1
- # We want to check the classif. only in the SFFs (computes with a SF)
- compute_sffs = test_utils.filter_sffs(compute_nodes, testTopology, vnfs)
-
# Start measuring the time it takes to implement the classification rules
t1 = threading.Thread(target=test_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_sffs, num_chains,))
+ args=(ovs_logger, compute_clients, num_chains,))
try:
t1.start()
except Exception, e:
diff --git a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
index f7527d48..54cacbc7 100644
--- a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
+++ b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
@@ -49,6 +49,11 @@ def main():
compute_nodes = [node for node in openstack_nodes
if node.is_compute()]
+ odl_ip, odl_port = test_utils.get_odl_ip_port(openstack_nodes)
+
+ for compute in compute_nodes:
+ logger.info("This is a compute: %s" % compute.info)
+
results = Results(COMMON_CONFIG.line_length)
results.add_to_summary(0, "=")
results.add_to_summary(2, "STATUS", "SUBTEST")
@@ -158,6 +163,11 @@ def main():
logger.error('ERROR while booting vnfs')
sys.exit(1)
+ instances = os_utils.get_instances(nova_client)
+ for instance in instances:
+ if ('client' not in instance.name) and ('server' not in instance.name):
+ os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+
os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1'])
os_tacker.create_sfc(tacker_client, 'blue', chain_vnf_names=['testVNF2'])
@@ -181,12 +191,10 @@ def main():
logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1])
num_chains = 2
- # We want to check the classif. only in the SFFs (computes with a SF)
- compute_sffs = test_utils.filter_sffs(compute_nodes, testTopology, vnfs)
# Start measuring the time it takes to implement the classification rules
t1 = threading.Thread(target=test_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_sffs, num_chains,))
+ args=(ovs_logger, compute_clients, num_chains,))
try:
t1.start()
@@ -235,8 +243,11 @@ def main():
results.add_to_summary(2, "FAIL", "HTTP works")
logger.info("Changing the classification")
- os_tacker.delete_sfc_classifier(tacker_client, sfc_clf_name='red_http')
- os_tacker.delete_sfc_classifier(tacker_client, sfc_clf_name='red_ssh')
+ test_utils.delete_classifier_and_acl(
+ tacker_client, 'red_http', odl_ip, odl_port)
+
+ test_utils.delete_classifier_and_acl(
+ tacker_client, 'red_ssh', odl_ip, odl_port)
os_tacker.create_sfc_classifier(
tacker_client, 'blue_http', sfc_name='blue',
@@ -258,7 +269,7 @@ def main():
# Start measuring the time it takes to implement the classification rules
t2 = threading.Thread(target=test_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_sffs, num_chains,))
+ args=(ovs_logger, compute_clients, num_chains,))
try:
t2.start()
except Exception, e: