summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/functest/odl-sfc/ovs_utils.py1
-rwxr-xr-xtests/functest/odl-sfc/sfc.py23
-rw-r--r--tests/functest/odl-sfc/utils.py59
3 files changed, 44 insertions, 39 deletions
diff --git a/tests/functest/odl-sfc/ovs_utils.py b/tests/functest/odl-sfc/ovs_utils.py
index af1f232c..48dfd620 100644
--- a/tests/functest/odl-sfc/ovs_utils.py
+++ b/tests/functest/odl-sfc/ovs_utils.py
@@ -21,6 +21,7 @@ class OVSLogger(object):
self.ovs_dir = basedir
self.ft_resdir = ft_resdir
self.__mkdir_p(self.ovs_dir)
+ self.__mkdir_p(self.ft_resdir)
def __mkdir_p(self, dirpath):
if not os.path.exists(dirpath):
diff --git a/tests/functest/odl-sfc/sfc.py b/tests/functest/odl-sfc/sfc.py
index 3dba62f9..fe9d3e5f 100755
--- a/tests/functest/odl-sfc/sfc.py
+++ b/tests/functest/odl-sfc/sfc.py
@@ -7,8 +7,10 @@ import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
import functest.utils.openstack_tacker as os_tacker
import threading
+import ovs_utils
import utils as test_utils
+
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--report",
@@ -91,6 +93,10 @@ def main():
controller_clients = test_utils.get_ssh_clients("controller", PROXY)
compute_clients = test_utils.get_ssh_clients("compute", PROXY)
+ ovs_logger = ovs_utils.OVSLogger(
+ os.path.join(SFC_TEST_DIR, 'ovs-logs'),
+ FUNCTEST_RESULTS_DIR)
+
image_id = os_utils.create_glance_image(glance_client,
IMAGE_NAME,
IMAGE_PATH,
@@ -159,7 +165,7 @@ def main():
# Start measuring the time it takes to implement the classification rules
t1 = threading.Thread(target=test_utils.capture_time_log,
- args=(compute_clients,))
+ args=(ovs_logger, compute_clients,))
try:
t1.start()
except Exception, e:
@@ -193,7 +199,8 @@ def main():
else:
error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
logger.error(error)
- test_utils.capture_err_logs(controller_clients, compute_clients, error)
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
update_json_results("Test 1: SSH Blocked", "Failed")
logger.info("Test HTTP")
@@ -203,7 +210,8 @@ def main():
else:
error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
logger.error(error)
- test_utils.capture_err_logs(controller_clients, compute_clients, error)
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
update_json_results("Test 2: HTTP works", "Failed")
logger.info("Changing the classification")
@@ -230,7 +238,7 @@ def main():
# Start measuring the time it takes to implement the classification rules
t2 = threading.Thread(target=test_utils.capture_time_log,
- args=(compute_clients,))
+ args=(ovs_logger, compute_clients,))
try:
t2.start()
except Exception, e:
@@ -246,7 +254,8 @@ def main():
else:
error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
logger.error(error)
- test_utils.capture_err_logs(controller_clients, compute_clients, error)
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
update_json_results("Test 3: HTTP Blocked", "Failed")
logger.info("Test SSH")
@@ -256,7 +265,8 @@ def main():
else:
error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
logger.error(error)
- test_utils.capture_err_logs(controller_clients, compute_clients, error)
+ test_utils.capture_err_logs(
+ ovs_logger, controller_clients, compute_clients, error)
update_json_results("Test 4: SSH Works", "Failed")
if json_results["failures"]:
@@ -273,6 +283,7 @@ def main():
stop_time,
status,
json_results)
+ ovs_logger.create_artifact_archive()
if status == "PASS":
logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status)
diff --git a/tests/functest/odl-sfc/utils.py b/tests/functest/odl-sfc/utils.py
index f24f76c4..f0b81760 100644
--- a/tests/functest/odl-sfc/utils.py
+++ b/tests/functest/odl-sfc/utils.py
@@ -7,7 +7,7 @@ import functest.utils.openstack_utils as os_utils
import re
import json
import SSHUtils as ssh_utils
-import ovs_utils
+import functools
logger = ft_logger.Logger("sfc_test_utils").getLogger()
@@ -306,11 +306,7 @@ def is_http_blocked(srv_prv_ip, client_ip):
return True
-def capture_err_logs(controller_clients, compute_clients, error):
- ovs_logger = ovs_utils.OVSLogger(
- os.path.join(os.getcwd(), 'ovs-logs'),
- FUNCTEST_RESULTS_DIR)
-
+def capture_err_logs(ovs_logger, controller_clients, compute_clients, error):
timestamp = time.strftime("%Y%m%d-%H%M%S")
ovs_logger.dump_ovs_logs(controller_clients,
compute_clients,
@@ -346,38 +342,35 @@ def check_ssh(ips, retries=100):
return False
-def capture_time_log(compute_clients, timeout=200):
- """Measure the time it takes to update the classification rules"""
- ovs_logger = ovs_utils.OVSLogger(
- os.path.join(os.getcwd(), 'ovs-logs'),
- "test")
- i = 0
- first_RSP = ""
- start_time = time.time()
- while True:
+# Measure the time it takes to update the classification rules
+def timethis(func):
+ @functools.wraps(func)
+ def timed(*args, **kwargs):
+ ts = time.time()
+ result = func(*args, **kwargs)
+ te = time.time()
+ elapsed = '{0}'.format(te - ts)
+ logger.info('{f}(*{a}, **{kw}) took: {t} sec'.format(
+ f=func.__name__, a=args, kw=kwargs, t=elapsed))
+ return result
+ return timed
+
+
+@timethis
+def capture_time_log(ovs_logger, compute_clients, timeout=200):
+ rsps = ovs_logger.ofctl_time_counter(compute_clients[0])
+ first_RSP = rsps[0] if len(rsps) > 0 else ''
+ while not ((len(rsps) > 1) and
+ (first_RSP != rsps[0]) and
+ (rsps[0] == rsps[1])):
rsps = ovs_logger.ofctl_time_counter(compute_clients[0])
- if not i:
- if len(rsps) > 0:
- first_RSP = rsps[0]
- i = i + 1
- else:
- first_RSP = 0
- i = i + 1
- if (len(rsps) > 1):
- if(first_RSP != rsps[0]):
- if (rsps[0] == rsps[1]):
- stop_time = time.time()
- logger.info("classification rules updated")
- difference = stop_time - start_time
- logger.info("It took %s seconds" % difference)
- break
timeout -= 1
- if not timeout:
+ if timeout == 0:
logger.error(
"Timeout but classification rules are not updated")
- break
+ return
time.sleep(1)
- return
+ logger.info("classification rules updated")
def get_compute_nodes(nova_client, required_node_number=2):