aboutsummaryrefslogtreecommitdiffstats
path: root/tests/functest/odl-sfc
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functest/odl-sfc')
-rw-r--r--tests/functest/odl-sfc/README.sfc-test-211
-rw-r--r--tests/functest/odl-sfc/config.py75
-rw-r--r--tests/functest/odl-sfc/config.yaml45
-rwxr-xr-xtests/functest/odl-sfc/prepare_odl_sfc.py90
-rw-r--r--tests/functest/odl-sfc/results.py52
-rw-r--r--tests/functest/odl-sfc/run_tests.py130
-rwxr-xr-xtests/functest/odl-sfc/setup_scripts/compute_presetup_CI.bash27
-rwxr-xr-xtests/functest/odl-sfc/setup_scripts/delete.sh16
-rwxr-xr-xtests/functest/odl-sfc/setup_scripts/prepare_odl_sfc.py90
-rwxr-xr-xtests/functest/odl-sfc/setup_scripts/server_presetup_CI.bash13
-rwxr-xr-xtests/functest/odl-sfc/setup_scripts/tacker_client_install.sh43
-rwxr-xr-xtests/functest/odl-sfc/sfc.py259
-rwxr-xr-xtests/functest/odl-sfc/sfc_one_chain_two_service_functions_different_computes.py260
-rwxr-xr-xtests/functest/odl-sfc/sfc_tacker_test2.bash29
-rw-r--r--tests/functest/odl-sfc/utils.py402
-rw-r--r--tests/functest/odl-sfc/vnfd-templates/test-vnfd1.yaml31
-rw-r--r--tests/functest/odl-sfc/vnfd-templates/test-vnfd2.yaml31
-rw-r--r--tests/functest/odl-sfc/vnfd-templates/test2-vnfd1.yaml31
-rw-r--r--tests/functest/odl-sfc/vnfd-templates/test2-vnfd2.yaml31
19 files changed, 0 insertions, 1666 deletions
diff --git a/tests/functest/odl-sfc/README.sfc-test-2 b/tests/functest/odl-sfc/README.sfc-test-2
deleted file mode 100644
index 5a9f2ebe..00000000
--- a/tests/functest/odl-sfc/README.sfc-test-2
+++ /dev/null
@@ -1,11 +0,0 @@
-### ODL-SFC TEST2 DESCRIPTION ###
-
-This is a simple description of the test case
-
-We create one client and one server using nova. Then, 2 SFs are created using
-tacker. The SFs are deployed in two different compute nodes. A chain is created
-where both SFs are included.
-
-vxlan_tool is started in both SFs and HTTP traffic is sent from the client to
-the server. If it works, the vxlan_tool is modified to block HTTP traffic.
-It is tried again and it should fail because packets are dropped
diff --git a/tests/functest/odl-sfc/config.py b/tests/functest/odl-sfc/config.py
deleted file mode 100644
index 3b76e3bc..00000000
--- a/tests/functest/odl-sfc/config.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import yaml
-import os
-
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.functest_constants as ft_constants
-
-logger = ft_logger.Logger("sfc_test_config").getLogger()
-
-
-class CommonConfig(object):
- """
- Common configuration parameters across testcases
- """
-
- def __init__(self):
- self.line_length = 30
- self.test_db = ft_utils.get_functest_config("results.test_db_url")
- self.repo_path = ft_constants.SFC_REPO_DIR
- self.sfc_test_dir = os.path.join(self.repo_path, "tests",
- "functest", "odl-sfc")
- self.vnfd_dir = os.path.join(self.sfc_test_dir, "vnfd-templates")
- self.functest_results_dir = os.path.join(
- ft_constants.FUNCTEST_RESULTS_DIR, "odl-sfc")
- self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
- self.fuel_master_ip = ft_utils.get_parameter_from_yaml(
- "defaults.fuel_master_ip", self.config_file)
- self.fuel_master_uname = ft_utils.get_parameter_from_yaml(
- "defaults.fuel_master_uname", self.config_file)
- self.fuel_master_passwd = ft_utils.get_parameter_from_yaml(
- "defaults.fuel_master_passwd", self.config_file)
- self.fuel_proxy = {
- 'ip': self.fuel_master_ip,
- 'username': self.fuel_master_uname,
- 'password': self.fuel_master_passwd
- }
- self.flavor = ft_utils.get_parameter_from_yaml(
- "defaults.flavor", self.config_file)
- self.ram_size_in_mb = ft_utils.get_parameter_from_yaml(
- "defaults.ram_size_in_mb", self.config_file)
- self.disk_size_in_gb = ft_utils.get_parameter_from_yaml(
- "defaults.disk_size_in_gb", self.config_file)
- self.vcpu_count = ft_utils.get_parameter_from_yaml(
- "defaults.vcpu_count", self.config_file)
- self.image_name = ft_utils.get_parameter_from_yaml(
- "defaults.image_name", self.config_file)
- self.image_file_name = ft_utils.get_parameter_from_yaml(
- "defaults.image_file_name", self.config_file)
- self.image_format = ft_utils.get_parameter_from_yaml(
- "defaults.image_format", self.config_file)
- self.url = ft_utils.get_parameter_from_yaml(
- "defaults.url", self.config_file)
- self.dir_functest_data = ft_utils.get_functest_config(
- "general.directories.dir_functest_data")
- self.image_path = os.path.join(
- self.dir_functest_data, self.image_file_name)
-
-
-class TestcaseConfig(object):
- """
- Configuration for a testcase.
- Parse config.yaml into a dict and create an object out of it.
- """
-
- def __init__(self, testcase):
- common_config = CommonConfig()
- test_config = None
- with open(common_config.config_file) as f:
- testcases_yaml = yaml.safe_load(f)
- test_config = testcases_yaml['testcases'].get(testcase, None)
- if test_config is None:
- logger.error('Test {0} configuration is not present in {1}'
- .format(testcase, common_config.config_file))
- # Update class fields with configuration variables dynamically
- self.__dict__.update(**test_config)
diff --git a/tests/functest/odl-sfc/config.yaml b/tests/functest/odl-sfc/config.yaml
deleted file mode 100644
index 78cf42fb..00000000
--- a/tests/functest/odl-sfc/config.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-defaults:
- #odl-sfc uses custom flavors as per below params
- flavor: custom
- ram_size_in_mb: 1500
- disk_size_in_gb: 10
- vcpu_count: 1
- image_name: sf_nsh_colorado
- image_file_name: sf_nsh_colorado.qcow2
- fuel_master_ip: 10.20.0.2
- fuel_master_uname: root
- fuel_master_passwd: r00tme
- image_format: qcow2
- url: "http://artifacts.opnfv.org/sfc/demo"
- vnfd-dir: "vnfd-templates"
-
-testcases:
-# this change requires sfc.py to be renamed as sfc_two_chains_SSH_and_HTTP
-# for run_tests.py integration. Rename of sfc.py will be submitted in
-# separate patch for better review clarity and name will be changed back to
-# sfc_two_chains_SSH_and_HTTP once sfc.py is renamed
- sfc:
- enabled: true
- description: "ODL-SFC tests"
- testname_db: "sfc_two_chains_SSH_and_HTTP"
- net_name: example-net
- subnet_name: example-subnet
- router_name: example-router
- subnet_cidr: "11.0.0.0/24"
- secgroup_name: "example-sg"
- secgroup_descr: "Example Security group"
- test_vnfd_red: "test-vnfd1.yaml"
- test_vnfd_blue: "test-vnfd2.yaml"
-
- sfc_one_chain_two_service_functions_different_computes:
- enabled: true
- description: "ODL-SFC Testing SFs in different computes"
- testname_db: "sfc_one_chain_two_service_functions_different_computes"
- net_name: example-net
- subnet_name: example-subnet
- router_name: example-router
- subnet_cidr: "11.0.0.0/24"
- secgroup_name: "example-sg"
- secgroup_descr: "Example Security group"
- test_vnfd_red: "test2-vnfd1.yaml"
- test_vnfd_blue: "test2-vnfd2.yaml"
diff --git a/tests/functest/odl-sfc/prepare_odl_sfc.py b/tests/functest/odl-sfc/prepare_odl_sfc.py
deleted file mode 100755
index c3162cba..00000000
--- a/tests/functest/odl-sfc/prepare_odl_sfc.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import os
-import sys
-import subprocess
-import paramiko
-import functest.utils.functest_logger as ft_logger
-
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-SFC_REPO_DIR = "/home/opnfv/repos/sfc"
-
-try:
- INSTALLER_IP = os.environ['INSTALLER_IP']
-except:
- logger.debug("INSTALLER_IP does not exist. We create 10.20.0.2")
- INSTALLER_IP = "10.20.0.2"
-
-os.environ['ODL_SFC_LOG'] = "/home/opnfv/functest/results/odl-sfc.log"
-os.environ['ODL_SFC_DIR'] = os.path.join(SFC_REPO_DIR,
- "tests/functest/odl-sfc")
-SETUP_SCRIPTS_DIR = os.path.join(os.environ['ODL_SFC_DIR'], 'setup_scripts')
-
-command = SETUP_SCRIPTS_DIR + ("/server_presetup_CI.bash | "
- "tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
-output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# i = line.rstrip()
-# print(i)
-
-# Make sure the process is finished before checking the returncode
-if not output.poll():
- output.wait()
-
-# Get return value
-if output.returncode:
- print("The presetup of the server did not work")
- sys.exit(output.returncode)
-
-logger.info("The presetup of the server worked ")
-
-ssh_options = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-ssh = paramiko.SSHClient()
-ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
-try:
- ssh.connect(INSTALLER_IP, username="root",
- password="r00tme", timeout=2)
- command = "fuel node | grep compute | awk '{print $10}'"
- logger.info("Executing ssh to collect the compute IPs")
- (stdin, stdout, stderr) = ssh.exec_command(command)
-except:
- logger.debug("Something went wrong in the ssh to collect the computes IP")
-
-output = stdout.readlines()
-for ip in output:
- command = SETUP_SCRIPTS_DIR + ("/compute_presetup_CI.bash " + ip.rstrip() +
- "| tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
- output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# print(line)
-# sys.stdout.flush()
-
- output.stdout.close()
-
- if not (output.poll()):
- output.wait()
-
- # Get return value
- if output.returncode:
- print("The compute config did not work on compute %s" % ip)
- sys.exit(output.returncode)
-
-sys.exit(0)
diff --git a/tests/functest/odl-sfc/results.py b/tests/functest/odl-sfc/results.py
deleted file mode 100644
index 5fa9aa05..00000000
--- a/tests/functest/odl-sfc/results.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import functest.utils.functest_logger as ft_logger
-
-logger = ft_logger.Logger("sfc-results").getLogger()
-
-
-class Results(object):
-
- def __init__(self, line_length):
- self.line_length = line_length
- self.test_result = "FAIL"
- self.summary = ""
- self.details = []
- self.num_tests = 0
- self.num_tests_failed = 0
-
- def add_to_summary(self, num_cols, col1, col2=""):
- if num_cols == 0:
- self.summary += ("+%s+\n" % (col1 * (self.line_length - 2)))
- elif num_cols == 1:
- self.summary += ("| " + col1.ljust(self.line_length - 3) + "|\n")
- elif num_cols == 2:
- self.summary += ("| %s" % col1.ljust(7) + "| ")
- self.summary += (col2.ljust(self.line_length - 12) + "|\n")
- if col1 in ("FAIL", "PASS"):
- self.details.append({col2: col1})
- self.num_tests += 1
- if col1 == "FAIL":
- self.num_tests_failed += 1
-
- def compile_summary(self):
- success_message = "All the subtests have passed."
- failure_message = "One or more subtests have failed."
-
- self.add_to_summary(0, "=")
- logger.info("\n%s" % self.summary)
- if self.num_tests_failed == 0:
- self.test_result = "PASS"
- logger.info(success_message)
- else:
- logger.info(failure_message)
-
- return {"status": self.test_result, "details": self.details}
diff --git a/tests/functest/odl-sfc/run_tests.py b/tests/functest/odl-sfc/run_tests.py
deleted file mode 100644
index cb96c206..00000000
--- a/tests/functest/odl-sfc/run_tests.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import argparse
-import config as sfc_config
-import importlib
-import os
-import sys
-import time
-import opnfv.utils.ovs_logger as ovs_log
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import yaml
-import utils
-import opnfv.utils.SSHUtils as ssh_utils
-
-
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-logger = ft_logger.Logger("sfc-run-tests").getLogger()
-COMMON_CONFIG = sfc_config.CommonConfig()
-
-
-def push_results(testname, start_time, end_time, criteria, details):
- logger.info("Push testcase '%s' results into the DB...\n" % testname)
- ft_utils.push_results_to_db("sfc",
- testname,
- start_time,
- end_time,
- criteria,
- details)
-
-
-def get_tackerc_file():
- rc_file = os.path.join(COMMON_CONFIG.sfc_test_dir, 'tackerc')
- if not os.path.exists(rc_file):
- logger.info("tackerc file not found, getting it from controller")
- ip = utils.get_openstack_node_ips("controller")
- ssh_conn = ssh_utils.get_ssh_client(ip[0], 'root',
- proxy=COMMON_CONFIG.fuel_proxy)
- ssh_utils.get_file(ssh_conn, "tackerc", rc_file)
- else:
- logger.info("found tackerc file")
-
- return rc_file
-
-
-def set_tacker_rc_file_env():
- rc_file = get_tackerc_file()
- with open(rc_file) as f:
- for line in f.readlines():
- if not (line.startswith('#') or len(line) == 1):
- filtered = line.strip().split(' ')
- kv = filtered[1].split('=')
- logger.info("Set shell env %s=%s" % (kv[0], kv[1]))
- os.environ[kv[0]] = kv[1].strip("'")
-
-
-def main():
- set_tacker_rc_file_env()
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- config_file = os.path.join(COMMON_CONFIG.config_file)
- with open(config_file) as f:
- config_yaml = yaml.safe_load(f)
-
- testcases = config_yaml.get("testcases")
- overall_details = {}
- overall_status = "FAIL"
- overall_start_time = time.time()
- for testcase in testcases:
- if testcases[testcase]['enabled']:
- test_name = testcase
- test_descr = testcases[testcase]['description']
- test_name_db = testcases[testcase]['testname_db']
- title = ("Running '%s - %s'" %
- (test_name, test_descr))
- logger.info(title)
- logger.info("%s\n" % ("=" * len(title)))
- t = importlib.import_module(testcase, package=None)
- start_time = time.time()
- result = t.main()
- end_time = time.time()
- duration = end_time - start_time
- status = "FAIL"
- if result != 0:
- overall_details.update({test_name_db: "execution error."})
- else:
- status = result.get("status")
- if status == "FAIL":
- overall_status = "FAIL"
- ovs_logger.create_artifact_archive()
-
- logger.info("Results of test case '%s - %s':\n%s\n" %
- (test_name, test_descr, result))
-
- dic = {"duration": duration, "status": overall_status}
- overall_details.update({test_name_db: dic})
- if args.report:
- details = result.get("details")
- push_results(
- test_name_db, start_time, end_time, status, details)
-
- overall_end_time = time.time()
- if args.report:
- push_results(
- "odl-sfc", overall_start_time, overall_end_time,
- overall_status, overall_details)
-
- if overall_status == "FAIL":
- sys.exit(-1)
-
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/tests/functest/odl-sfc/setup_scripts/compute_presetup_CI.bash b/tests/functest/odl-sfc/setup_scripts/compute_presetup_CI.bash
deleted file mode 100755
index 36148aa1..00000000
--- a/tests/functest/odl-sfc/setup_scripts/compute_presetup_CI.bash
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# This script must be use with vxlan-gpe + nsh. Once we have eth + nsh support
-# in ODL, we will not need it anymore
-
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-#ip=`sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep compute|\
-#awk '{print $10}' | head -1`
-
-ip=$1
-echo $ip
-#sshpass -p r00tme scp $ssh_options correct_classifier.bash ${INSTALLER_IP}:/root
-#sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp correct_classifier.bash '"$ip"':/root'
-
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ifconfig br-int up'
-output=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route | \
-cut -d" " -f1 | grep 11.0.0.0' ; exit 0)
-
-if [ -z "$output" ]; then
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route add 11.0.0.0/24 \
-dev br-int'
-fi
diff --git a/tests/functest/odl-sfc/setup_scripts/delete.sh b/tests/functest/odl-sfc/setup_scripts/delete.sh
deleted file mode 100755
index 3da36148..00000000
--- a/tests/functest/odl-sfc/setup_scripts/delete.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-source ${repos_dir}/sfc/tests/functest/odl-sfc/tackerc
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete blue_ssh
-tacker sfc-classifier-delete red_ssh
-tacker sfc-classifier-delete blue_http
-tacker sfc-delete red
-tacker sfc-delete blue
-tacker vnf-delete testVNF1
-tacker vnf-delete testVNF2
-tacker vnfd-delete test-vnfd1
-tacker vnfd-delete test-vnfd2
-openstack stack delete sfc --y
-openstack stack delete sfc_test1 --y
-openstack stack delete sfc_test2 --y
-nova delete client
-nova delete server
diff --git a/tests/functest/odl-sfc/setup_scripts/prepare_odl_sfc.py b/tests/functest/odl-sfc/setup_scripts/prepare_odl_sfc.py
deleted file mode 100755
index c3162cba..00000000
--- a/tests/functest/odl-sfc/setup_scripts/prepare_odl_sfc.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import os
-import sys
-import subprocess
-import paramiko
-import functest.utils.functest_logger as ft_logger
-
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-SFC_REPO_DIR = "/home/opnfv/repos/sfc"
-
-try:
- INSTALLER_IP = os.environ['INSTALLER_IP']
-except:
- logger.debug("INSTALLER_IP does not exist. We create 10.20.0.2")
- INSTALLER_IP = "10.20.0.2"
-
-os.environ['ODL_SFC_LOG'] = "/home/opnfv/functest/results/odl-sfc.log"
-os.environ['ODL_SFC_DIR'] = os.path.join(SFC_REPO_DIR,
- "tests/functest/odl-sfc")
-SETUP_SCRIPTS_DIR = os.path.join(os.environ['ODL_SFC_DIR'], 'setup_scripts')
-
-command = SETUP_SCRIPTS_DIR + ("/server_presetup_CI.bash | "
- "tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
-output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# i = line.rstrip()
-# print(i)
-
-# Make sure the process is finished before checking the returncode
-if not output.poll():
- output.wait()
-
-# Get return value
-if output.returncode:
- print("The presetup of the server did not work")
- sys.exit(output.returncode)
-
-logger.info("The presetup of the server worked ")
-
-ssh_options = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-ssh = paramiko.SSHClient()
-ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
-try:
- ssh.connect(INSTALLER_IP, username="root",
- password="r00tme", timeout=2)
- command = "fuel node | grep compute | awk '{print $10}'"
- logger.info("Executing ssh to collect the compute IPs")
- (stdin, stdout, stderr) = ssh.exec_command(command)
-except:
- logger.debug("Something went wrong in the ssh to collect the computes IP")
-
-output = stdout.readlines()
-for ip in output:
- command = SETUP_SCRIPTS_DIR + ("/compute_presetup_CI.bash " + ip.rstrip() +
- "| tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
- output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# print(line)
-# sys.stdout.flush()
-
- output.stdout.close()
-
- if not (output.poll()):
- output.wait()
-
- # Get return value
- if output.returncode:
- print("The compute config did not work on compute %s" % ip)
- sys.exit(output.returncode)
-
-sys.exit(0)
diff --git a/tests/functest/odl-sfc/setup_scripts/server_presetup_CI.bash b/tests/functest/odl-sfc/setup_scripts/server_presetup_CI.bash
deleted file mode 100755
index 240353f5..00000000
--- a/tests/functest/odl-sfc/setup_scripts/server_presetup_CI.bash
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-ip=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep controller|awk '{print $10}' | head -1)
-echo $ip
-
-sshpass -p r00tme scp $ssh_options delete.sh ${INSTALLER_IP}:/root
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp '"$ip"':/root/tackerc .'
-sshpass -p r00tme scp $ssh_options ${INSTALLER_IP}:/root/tackerc $BASEDIR
diff --git a/tests/functest/odl-sfc/setup_scripts/tacker_client_install.sh b/tests/functest/odl-sfc/setup_scripts/tacker_client_install.sh
deleted file mode 100755
index adb9a44b..00000000
--- a/tests/functest/odl-sfc/setup_scripts/tacker_client_install.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-MYDIR=$(dirname $(readlink -f "$0"))
-CLIENT=$(echo python-python-tackerclient_*_all.deb)
-CLIREPO="tacker-client"
-
-# Function checks whether a python egg is available, if not, installs
-function chkPPkg() {
- PKG="$1"
- IPPACK=$(python - <<'____EOF'
-import pip
-from os.path import join
-for package in pip.get_installed_distributions():
- print(package.location)
- print(join(package.location, *package._get_metadata("top_level.txt")))
-____EOF
-)
- echo "$IPPACK" | grep -q "$PKG"
- if [ $? -ne 0 ];then
- pip install "$PKG"
- fi
-}
-
-function envSetup() {
- apt-get install -y python-all debhelper fakeroot
- #pip install --upgrade python-keystoneclient==1.7.4
- chkPPkg stdeb
-}
-
-# Function installs python-tackerclient from github
-function deployTackerClient() {
- cd $MYDIR
- git clone -b 'SFC_refactor' https://github.com/trozet/python-tackerclient.git $CLIREPO
- cd $CLIREPO
- python setup.py --command-packages=stdeb.command bdist_deb
- cd "deb_dist"
- CLIENT=$(echo python-python-tackerclient_*_all.deb)
- cp $CLIENT $MYDIR
- dpkg -i "${MYDIR}/${CLIENT}"
- apt-get -f -y install
- dpkg -i "${MYDIR}/${CLIENT}"
-}
-
-envSetup
-deployTackerClient
diff --git a/tests/functest/odl-sfc/sfc.py b/tests/functest/odl-sfc/sfc.py
deleted file mode 100755
index 5d1024db..00000000
--- a/tests/functest/odl-sfc/sfc.py
+++ /dev/null
@@ -1,259 +0,0 @@
-import argparse
-import os
-import sys
-import functest.utils.functest_logger as ft_logger
-import functest.utils.openstack_utils as os_utils
-import functest.utils.openstack_tacker as os_tacker
-import threading
-import opnfv.utils.ovs_logger as ovs_log
-import utils as test_utils
-import config as sfc_config
-from results import Results
-
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-# TestcaseConfig sfc name will be changed once
-# we rename sfc.py with appropriate name
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc')
-
-
-def main():
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
-
- installer_type = os.environ.get("INSTALLER_TYPE")
- if installer_type != "fuel":
- logger.error(
- '\033[91mCurrently supported only Fuel Installer type\033[0m')
- sys.exit(1)
-
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
-
- test_utils.setup_compute_node(TESTCASE_CONFIG.subnet_cidr)
- test_utils.configure_iptables()
- test_utils.download_image(COMMON_CONFIG.url,
- COMMON_CONFIG.image_path)
- _, custom_flv_id = os_utils.get_or_create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count, public=True)
- if not custom_flv_id:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- glance_client = os_utils.get_glance_client()
- neutron_client = os_utils.get_neutron_client()
- nova_client = os_utils.get_nova_client()
- tacker_client = os_tacker.get_tacker_client()
-
- controller_clients = test_utils.get_ssh_clients("controller",
- COMMON_CONFIG.fuel_proxy)
- compute_clients = test_utils.get_ssh_clients("compute",
- COMMON_CONFIG.fuel_proxy)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_id = os_utils.create_glance_image(glance_client,
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_path,
- COMMON_CONFIG.image_format,
- public=True)
-
- network_id = test_utils.setup_neutron(neutron_client,
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.router_name,
- TESTCASE_CONFIG.subnet_cidr)
-
- sg_id = test_utils.create_security_groups(neutron_client,
- TESTCASE_CONFIG.secgroup_name,
- TESTCASE_CONFIG.secgroup_descr)
-
- test_utils.create_instance(
- nova_client, CLIENT, COMMON_CONFIG.flavor, image_id,
- network_id, sg_id)
- srv_instance = test_utils.create_instance(
- nova_client, SERVER, COMMON_CONFIG.flavor, image_id,
- network_id, sg_id)
-
- srv_prv_ip = srv_instance.networks.get(TESTCASE_CONFIG.net_name)[0]
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
- os_tacker.create_vnfd(
- tacker_client,
- tosca_file=tosca_file)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
- os_tacker.create_vnfd(
- tacker_client,
- tosca_file=tosca_file)
-
- os_tacker.create_vnf(
- tacker_client, 'testVNF1', vnfd_name='test-vnfd1')
- os_tacker.create_vnf(
- tacker_client, 'testVNF2', vnfd_name='test-vnfd2')
-
- try:
- os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF1')
- os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF2')
- except:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
-
- os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1'])
- os_tacker.create_sfc(tacker_client, 'blue', chain_vnf_names=['testVNF2'])
-
- os_tacker.create_sfc_classifier(
- tacker_client, 'red_http', sfc_name='red',
- match={
- 'source_port': 0,
- 'dest_port': 80,
- 'protocol': 6
- })
-
- os_tacker.create_sfc_classifier(
- tacker_client, 'red_ssh', sfc_name='red',
- match={
- 'source_port': 0,
- 'dest_port': 22,
- 'protocol': 6
- })
-
- logger.info(test_utils.run_cmd('tacker sfc-list'))
- logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
-
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=test_utils.capture_time_log,
- args=(ovs_logger, compute_clients,))
- try:
- t1.start()
- except Exception, e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- server_ip, client_ip, sf1, sf2 = test_utils.get_floating_ips(
- nova_client, neutron_client)
-
- if not test_utils.check_ssh([sf1, sf2]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_ip)
- if not test_utils.start_http_server(server_ip):
- logger.error(
- '\033[91mFailed to start HTTP server on %s\033[0m' % server_ip)
- sys.exit(1)
-
- logger.info("Starting HTTP firewall on %s" % sf2)
- test_utils.vxlan_firewall(sf2, port="80")
- logger.info("Starting SSH firewall on %s" % sf1)
- test_utils.vxlan_firewall(sf1, port="22")
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- logger.info("Test SSH")
- if test_utils.is_ssh_blocked(srv_prv_ip, client_ip):
- results.add_to_summary(2, "PASS", "SSH Blocked")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_err_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH Blocked")
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(srv_prv_ip, client_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_err_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
-
- logger.info("Changing the classification")
- os_tacker.delete_sfc_classifier(tacker_client, sfc_clf_name='red_http')
- os_tacker.delete_sfc_classifier(tacker_client, sfc_clf_name='red_ssh')
-
- os_tacker.create_sfc_classifier(
- tacker_client, 'blue_http', sfc_name='blue',
- match={
- 'source_port': 0,
- 'dest_port': 80,
- 'protocol': 6
- })
-
- os_tacker.create_sfc_classifier(
- tacker_client, 'blue_ssh', sfc_name='blue',
- match={
- 'source_port': 0,
- 'dest_port': 22,
- 'protocol': 6
- })
-
- logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
-
- # Start measuring the time it takes to implement the classification rules
- t2 = threading.Thread(target=test_utils.capture_time_log,
- args=(ovs_logger, compute_clients,))
- try:
- t2.start()
- except Exception, e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t2.join()
-
- logger.info("Test HTTP")
- if test_utils.is_http_blocked(srv_prv_ip, client_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_err_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
-
- logger.info("Test SSH")
- if not test_utils.is_ssh_blocked(srv_prv_ip, client_ip):
- results.add_to_summary(2, "PASS", "SSH works")
- else:
- error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_err_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH works")
-
- return results.compile_summary()
-
-
-if __name__ == '__main__':
- main()
diff --git a/tests/functest/odl-sfc/sfc_one_chain_two_service_functions_different_computes.py b/tests/functest/odl-sfc/sfc_one_chain_two_service_functions_different_computes.py
deleted file mode 100755
index 8f289670..00000000
--- a/tests/functest/odl-sfc/sfc_one_chain_two_service_functions_different_computes.py
+++ /dev/null
@@ -1,260 +0,0 @@
-import argparse
-import os
-import sys
-import time
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-import functest.utils.openstack_tacker as os_tacker
-import re
-import opnfv.utils.ovs_logger as ovs_log
-import utils as test_utils
-import config as sfc_config
-
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-
-args = parser.parse_args()
-
-""" logging configuration """
-logger = ft_logger.Logger("ODL_SFC").getLogger()
-
-REPO_PATH = os.path.join(os.environ['REPOS_DIR'], 'sfc/')
-SFC_TEST_DIR = os.path.join(REPO_PATH, "tests/functest/odl-sfc/")
-TACKER_SCRIPT = os.path.join(SFC_TEST_DIR, "sfc_tacker_test2.bash")
-TACKER_VNFD1 = os.path.join(SFC_TEST_DIR, "vnfd-templates", "test2-vnfd1.yaml")
-TACKER_VNFD2 = os.path.join(SFC_TEST_DIR, "vnfd-templates", "test2-vnfd2.yaml")
-CLIENT = "client"
-SERVER = "server"
-ssh_options = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-json_results = {"tests": 4, "failures": 0}
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_one_chain_two_service'
- '_functions_different_computes')
-
-PROXY = {
- 'ip': COMMON_CONFIG.fuel_master_ip,
- 'username': COMMON_CONFIG.fuel_master_uname,
- 'password': COMMON_CONFIG.fuel_master_passwd
-}
-
-
-def update_json_results(name, result):
- json_results.update({name: result})
- if result is not "Passed":
- json_results["failures"] += 1
- return
-
-
-# JIRA: SFC-52 new function
-def setup_availability_zones(nova_client):
- computes = os_utils.get_hypervisors(nova_client)
- az = ["nova::" + computes[0], "nova::" + computes[1]]
- logger.debug("These are the availability zones %s" % az)
- return az
-
-
-# JIRA: SFC-52 new function
-def modify_vnfd(tacker_vnfd, az):
- try:
- with open(tacker_vnfd, 'r') as stream:
- lines = stream.readlines()
- with open(tacker_vnfd, 'w') as stream:
- for line in lines:
- stream.write(re.sub('nova$', az, line))
-
- except Exception, e:
- logger.error("Problem when changing vnfd %s" % e)
-
-
-# JIRA: SFC-52 new function
-def prepare_tacker_vnfd(nova_client):
- azs = setup_availability_zones(nova_client)
- modify_vnfd(TACKER_VNFD1, azs[0])
- modify_vnfd(TACKER_VNFD2, azs[1])
-
-
-def main():
- installer_type = os.environ.get("INSTALLER_TYPE")
- if installer_type != "fuel":
- logger.error(
- '\033[91mCurrently supported only Fuel Installer type\033[0m')
- sys.exit(1)
-
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
-
- start_time = time.time()
- status = "PASS"
- test_utils.configure_iptables()
- test_utils.download_image(COMMON_CONFIG.url,
- COMMON_CONFIG.image_path)
- _, custom_flv_id = os_utils.get_or_create_flavor(
- COMMON_CONFIG.flavor, 1500, 10, 1, public=True)
- if not custom_flv_id:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- glance_client = os_utils.get_glance_client()
- neutron_client = os_utils.get_neutron_client()
- nova_client = os_utils.get_nova_client()
- tacker_client = os_tacker.get_tacker_client()
-
- controller_clients = test_utils.get_ssh_clients("controller", PROXY)
- compute_clients = test_utils.get_ssh_clients("compute", PROXY)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_id = os_utils.create_glance_image(glance_client,
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_path,
- COMMON_CONFIG.image_format,
- public=True)
-
- network_id = test_utils.setup_neutron(neutron_client,
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.router_name,
- TESTCASE_CONFIG.subnet_cidr)
-
- sg_id = test_utils.create_security_groups(neutron_client,
- TESTCASE_CONFIG.secgroup_name,
- TESTCASE_CONFIG.secgroup_descr)
-
- prepare_tacker_vnfd(nova_client)
-
- test_utils.create_instance(
- nova_client, CLIENT, COMMON_CONFIG.flavor,
- image_id, network_id, sg_id)
-
- srv_instance = test_utils.create_instance(
- nova_client, SERVER, COMMON_CONFIG.flavor, image_id,
- network_id, sg_id)
-
- srv_prv_ip = srv_instance.networks.get(TESTCASE_CONFIG.net_name)[0]
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
-
- os_tacker.create_vnfd(
- tacker_client,
- tosca_file=tosca_file)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
- os_tacker.create_vnfd(
- tacker_client,
- tosca_file=tosca_file)
-
- os_tacker.create_vnf(
- tacker_client, 'testVNF1', vnfd_name='test-vnfd1')
- os_tacker.create_vnf(
- tacker_client, 'testVNF2', vnfd_name='test-vnfd2')
-
- try:
- os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF1')
- os_tacker.wait_for_vnf(tacker_client, vnf_name='testVNF2')
- except:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
-
- os_tacker.create_sfc(tacker_client, 'red',
- chain_vnf_names=['testVNF1', 'testVNF2'])
-
- os_tacker.create_sfc_classifier(
- tacker_client, 'red_http', sfc_name='red',
- match={
- 'source_port': 0,
- 'dest_port': 80,
- 'protocol': 6
- })
-
- logger.info(test_utils.run_cmd('tacker sfc-list'))
- logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
-
- server_ip, client_ip, sf1, sf2 = test_utils.get_floating_ips(
- nova_client, neutron_client)
-
- if not test_utils.check_ssh([sf1, sf2]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_ip)
- if not test_utils.start_http_server(server_ip):
- logger.error(
- '\033[91mFailed to start HTTP server on %s\033[0m' % server_ip)
- sys.exit(1)
-
- logger.info("Starting vxlan_tool on %s" % sf2)
- test_utils.vxlan_firewall(sf2, block=False)
- logger.info("Starting vxlan_tool on %s" % sf1)
- test_utils.vxlan_firewall(sf1, block=False)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- time.sleep(100)
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 1 [PASSED] ==> HTTP WORKS\033[0m')
- update_json_results("Test 1: HTTP works", "Passed")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_err_logs(
- ovs_logger, controller_clients, compute_clients, error)
- update_json_results("Test 1: HTTP works", "Failed")
-
- logger.info("Changing the vxlan_tool to block HTTP traffic")
-
- # Make SF1 block now http traffic
- test_utils.vxlan_tool_stop(sf1)
- test_utils.vxlan_firewall(sf1, port="80")
-
- logger.info("Test HTTP again")
- if test_utils.is_http_blocked(srv_prv_ip, client_ip):
- logger.info('\033[92mTEST 2 [PASSED] ==> HTTP Blocked\033[0m')
- update_json_results("Test 2: HTTP Blocked", "Passed")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_err_logs(
- ovs_logger, controller_clients, compute_clients, error)
- update_json_results("Test 2: HTTP Blocked", "Failed")
-
- if json_results["failures"]:
- status = "FAIL"
- logger.error('\033[91mSFC TESTS: %s :( FOUND %s FAIL \033[0m' % (
- status, json_results["failures"]))
-
- if args.report:
- stop_time = time.time()
- logger.debug("Promise Results json: " + str(json_results))
- ft_utils.push_results_to_db("sfc",
- "sfc_one_chain_two_service_functions"
- "_different_computes",
- start_time,
- stop_time,
- status,
- json_results)
-
- if status == "PASS":
- logger.info('\033[92mSFC ALL TESTS: %s :)\033[0m' % status)
- sys.exit(0)
-
- sys.exit(1)
-
-if __name__ == '__main__':
- main()
diff --git a/tests/functest/odl-sfc/sfc_tacker_test2.bash b/tests/functest/odl-sfc/sfc_tacker_test2.bash
deleted file mode 100755
index 156b19cb..00000000
--- a/tests/functest/odl-sfc/sfc_tacker_test2.bash
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-BASEDIR=`dirname $0`
-
-#import VNF descriptor
-tacker vnfd-create --vnfd-file ${BASEDIR}/vnfd-templates/test2-vnfd1.yaml
-tacker vnfd-create --vnfd-file ${BASEDIR}/vnfd-templates/test2-vnfd2.yaml
-
-#create instances of the imported VNF
-tacker vnf-create --name testVNF1 --vnfd-name test-vnfd1
-tacker vnf-create --name testVNF2 --vnfd-name test-vnfd2
-
-key=true
-while $key;do
- sleep 3
- active=`tacker vnf-list | grep -E 'PENDING|ERROR'`
- echo -e "checking if SFs are up: $active"
- if [ -z "$active" ]; then
- key=false
- fi
-done
-
-#create service chain
-tacker sfc-create --name red --chain testVNF1,testVNF2
-
-#create classifier
-tacker sfc-classifier-create --name red_http --chain red --match source_port=0,dest_port=80,protocol=6
-
-tacker sfc-list
-tacker sfc-classifier-list
diff --git a/tests/functest/odl-sfc/utils.py b/tests/functest/odl-sfc/utils.py
deleted file mode 100644
index b4e40622..00000000
--- a/tests/functest/odl-sfc/utils.py
+++ /dev/null
@@ -1,402 +0,0 @@
-import os
-import subprocess
-import time
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-import re
-import json
-import opnfv.utils.SSHUtils as ssh_utils
-
-
-logger = ft_logger.Logger("sfc_test_utils").getLogger()
-SSH_OPTIONS = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-FUNCTEST_RESULTS_DIR = os.path.join("home", "opnfv",
- "functest", "results", "odl-sfc")
-
-
-def run_cmd(cmd, wdir=None, ignore_stderr=False, ignore_no_output=True):
- """run given command locally and return commands output if success"""
- pipe = subprocess.Popen(cmd, shell=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, cwd=wdir)
-
- (output, errors) = pipe.communicate()
- if output:
- output = output.strip()
- if pipe.returncode < 0:
- logger.error(errors)
- return False
- if errors:
- logger.error(errors)
- return ignore_stderr
-
- if ignore_no_output and not output:
- return True
-
- return output
-
-
-def run_cmd_on_controller(cmd):
- """run given command on OpenStack controller"""
- ip_controllers = get_openstack_node_ips("controller")
- if not ip_controllers:
- return None
-
- ssh_cmd = "ssh %s %s %s" % (SSH_OPTIONS, ip_controllers[0], cmd)
- return run_cmd_on_fm(ssh_cmd)
-
-
-def run_cmd_on_compute(cmd):
- """run given command on OpenStack Compute node"""
- ip_computes = get_openstack_node_ips("compute")
- if not ip_computes:
- return None
-
- ssh_cmd = "ssh %s %s %s" % (SSH_OPTIONS, ip_computes[0], cmd)
- return run_cmd_on_fm(ssh_cmd)
-
-
-def run_cmd_on_fm(cmd, username="root", passwd="r00tme"):
- """run given command on Fuel Master"""
- ip = os.environ.get("INSTALLER_IP")
- ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
- passwd, SSH_OPTIONS, username, ip, cmd)
- return run_cmd(ssh_cmd)
-
-
-def run_cmd_remote(ip, cmd, username="root", passwd="opnfv"):
- """run given command on Remote Machine, Can be VM"""
- ssh_opt_append = "%s -o ConnectTimeout=50 " % SSH_OPTIONS
- ssh_cmd = "sshpass -p %s ssh %s %s@%s %s" % (
- passwd, ssh_opt_append, username, ip, cmd)
- return run_cmd(ssh_cmd)
-
-
-def get_openstack_node_ips(role):
- """Get OpenStack Nodes IP Address"""
- fuel_env = os.environ.get("FUEL_ENV")
- if fuel_env is not None:
- cmd = "fuel2 node list -f json -e %s" % fuel_env
- else:
- cmd = "fuel2 node list -f json"
-
- nodes = run_cmd_on_fm(cmd)
- ips = []
- nodes = json.loads(nodes)
- for node in nodes:
- if role in node["roles"]:
- ips.append(node["ip"])
-
- return ips
-
-
-def configure_iptables():
- """Configures IPTABLES on OpenStack Controller"""
- iptable_cmds = ["iptables -P INPUT ACCEPT",
- "iptables -t nat -P INPUT ACCEPT",
- "iptables -A INPUT -m state \
- --state NEW,ESTABLISHED,RELATED -j ACCEPT"]
-
- for cmd in iptable_cmds:
- logger.info("Configuring %s on contoller" % cmd)
- run_cmd_on_controller(cmd)
-
-
-def download_image(url, image_path):
- image_filename = os.path.basename(image_path)
- image_url = "%s/%s" % (url, image_filename)
- image_dir = os.path.dirname(image_path)
- if not os.path.isfile(image_path):
- logger.info("Downloading image")
- ft_utils.download_url(image_url, image_dir)
- else:
- logger.info("Using old image")
-
-
-def setup_neutron(neutron_client, net, subnet, router, subnet_cidr):
- n_dict = os_utils.create_network_full(neutron_client,
- net,
- subnet,
- router,
- subnet_cidr)
- if not n_dict:
- logger.error("failed to create neutron network")
- return False
-
- return n_dict["net_id"]
-
-
-def setup_ingress_egress_secgroup(neutron_client, protocol,
- min_port=None, max_port=None):
- secgroups = os_utils.get_security_groups(neutron_client)
- for sg in secgroups:
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'ingress', protocol,
- port_range_min=min_port,
- port_range_max=max_port)
- os_utils.create_secgroup_rule(neutron_client, sg['id'],
- 'egress', protocol,
- port_range_min=min_port,
- port_range_max=max_port)
-
-
-def create_security_groups(neutron_client, secgroup_name, secgroup_descr):
- sg_id = os_utils.create_security_group_full(neutron_client,
- secgroup_name, secgroup_descr)
- setup_ingress_egress_secgroup(neutron_client, "icmp")
- setup_ingress_egress_secgroup(neutron_client, "tcp", 22, 22)
- setup_ingress_egress_secgroup(neutron_client, "tcp", 80, 80)
- setup_ingress_egress_secgroup(neutron_client, "udp", 67, 68)
- return sg_id
-
-
-def create_instance(nova_client, name, flavor, image_id, network_id, sg_id,
- secgroup_name=None, fixed_ip=None,
- compute_node='', userdata=None, files=None):
- logger.info("Creating instance '%s'..." % name)
- logger.debug(
- "Configuration:\n name=%s \n flavor=%s \n image=%s \n"
- " network=%s\n secgroup=%s \n hypervisor=%s \n"
- " fixed_ip=%s\n files=%s\n userdata=\n%s\n"
- % (name, flavor, image_id, network_id, sg_id,
- compute_node, fixed_ip, files, userdata))
- instance = os_utils.create_instance_and_wait_for_active(
- flavor,
- image_id,
- network_id,
- name,
- config_drive=True,
- userdata=userdata,
- av_zone=compute_node,
- fixed_ip=fixed_ip,
- files=files)
-
- if instance is None:
- logger.error("Error while booting instance.")
- return None
-
- if secgroup_name:
- logger.debug("Adding '%s' to security group '%s'..."
- % (name, secgroup_name))
- else:
- logger.debug("Adding '%s' to security group '%s'..."
- % (name, sg_id))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
-
- return instance
-
-
-def ping(remote, pkt_cnt=1, iface=None, retries=100, timeout=None):
- ping_cmd = 'ping'
-
- if timeout:
- ping_cmd = ping_cmd + ' -w %s' % timeout
-
- grep_cmd = "grep -e 'packet loss' -e rtt"
-
- if iface is not None:
- ping_cmd = ping_cmd + ' -I %s' % iface
-
- ping_cmd = ping_cmd + ' -i 0 -c %d %s' % (pkt_cnt, remote)
- cmd = ping_cmd + '|' + grep_cmd
-
- while retries > 0:
- output = run_cmd(cmd)
- if not output:
- return False
-
- match = re.search('(\d*)% packet loss', output)
- if not match:
- return False
-
- packet_loss = int(match.group(1))
- if packet_loss == 0:
- return True
-
- retries -= 1
-
- return False
-
-
-def get_floating_ips(nova_client, neutron_client):
- ips = []
- instances = nova_client.servers.list(search_opts={'all_tenants': 1})
- for instance in instances:
- floatip_dic = os_utils.create_floating_ip(neutron_client)
- floatip = floatip_dic['fip_addr']
- instance.add_floating_ip(floatip)
- logger.info("Instance name and ip %s:%s " % (instance.name, floatip))
- logger.info("Waiting for instance %s:%s to come up" %
- (instance.name, floatip))
- if not ping(floatip):
- logger.info("Instance %s:%s didn't come up" %
- (instance.name, floatip))
- return None
-
- if instance.name == "server":
- logger.info("Server:%s is reachable" % floatip)
- server_ip = floatip
- elif instance.name == "client":
- logger.info("Client:%s is reachable" % floatip)
- client_ip = floatip
- else:
- logger.info("SF:%s is reachable" % floatip)
- ips.append(floatip)
-
- return server_ip, client_ip, ips[1], ips[0]
-
-
-def start_http_server(ip):
- """Start http server on a given machine, Can be VM"""
- cmd = "\'python -m SimpleHTTPServer 80"
- cmd = cmd + " > /dev/null 2>&1 &\'"
- run_cmd_remote(ip, cmd)
- output = run_cmd_remote(ip, "ps aux|grep SimpleHTTPServer")
- if not output:
- logger.error("Failed to start http server")
- return False
-
- logger.info(output)
- return True
-
-
-def vxlan_firewall(sf, iface="eth0", port="22", block=True):
- """Set firewall using vxlan_tool.py on a given machine, Can be VM"""
- cmd = "python vxlan_tool.py -i %s -d forward -v off" % iface
- if block:
- cmd = "python vxlan_tool.py -i eth0 -d forward -v off -b %s" % port
-
- cmd = "sh -c 'cd /root;nohup " + cmd + " > /dev/null 2>&1 &'"
- run_cmd_remote(sf, cmd)
-
-
-def vxlan_tool_stop(sf):
- cmd = "pkill -f vxlan_tool.py"
- run_cmd_remote(sf, cmd)
-
-
-def netcat(s_ip, c_ip, port="80", timeout=5):
- """Run netcat on a give machine, Can be VM"""
- cmd = "nc -zv "
- cmd = cmd + " -w %s %s %s" % (timeout, s_ip, port)
- cmd = cmd + " 2>&1"
- output = run_cmd_remote(c_ip, cmd)
- logger.info("%s" % output)
- return output
-
-
-def is_ssh_blocked(srv_prv_ip, client_ip):
- res = netcat(srv_prv_ip, client_ip, port="22")
- match = re.search("nc:.*timed out:.*", res, re.M)
- if match:
- return True
-
- return False
-
-
-def is_http_blocked(srv_prv_ip, client_ip):
- res = netcat(srv_prv_ip, client_ip, port="80")
- match = re.search(".* 80 port.* succeeded!", res, re.M)
- if match:
- return False
-
- return True
-
-
-def capture_err_logs(ovs_logger, controller_clients, compute_clients, error):
- timestamp = time.strftime("%Y%m%d-%H%M%S")
- ovs_logger.dump_ovs_logs(controller_clients,
- compute_clients,
- related_error=error,
- timestamp=timestamp)
-
-
-def get_ssh_clients(role, proxy):
- clients = []
- for ip in get_openstack_node_ips(role):
- s_client = ssh_utils.get_ssh_client(ip, 'root', proxy=proxy)
- clients.append(s_client)
-
- return clients
-
-
-def check_ssh(ips, retries=100):
- """Check SSH connectivity to VNFs"""
- check = [False, False]
- logger.info("Checking SSH connectivity to the SFs with ips %s" % str(ips))
- while retries and not all(check):
- for index, ip in enumerate(ips):
- check[index] = run_cmd_remote(ip, "exit")
-
- if all(check):
- logger.info("SSH connectivity to the SFs established")
- return True
-
- time.sleep(3)
- retries -= 1
-
- return False
-
-
-def ofctl_time_counter(ovs_logger, ssh_conn):
- try:
- # We get the flows from table 11
- table = 11
- br = "br-int"
- output = ovs_logger.ofctl_dump_flows(ssh_conn, br, table)
- pattern = "NXM_NX_NSP"
- rsps = []
- lines = output.split(",")
- for line in lines:
- is_there = re.findall(pattern, line)
- if is_there:
- value = line.split(":")[1].split("-")[0]
- rsps.append(value)
- return rsps
- except Exception, e:
- logger.error('Error when countering %s' % e)
- return None
-
-
-@ft_utils.timethis
-def capture_time_log(ovs_logger, compute_clients, timeout=200):
- rsps = ofctl_time_counter(ovs_logger, compute_clients[0])
- first_RSP = rsps[0] if len(rsps) > 0 else ''
- while not ((len(rsps) > 1) and
- (first_RSP != rsps[0]) and
- (rsps[0] == rsps[1])):
- rsps = ofctl_time_counter(ovs_logger, compute_clients[0])
- timeout -= 1
- if timeout == 0:
- logger.error(
- "Timeout but classification rules are not updated")
- return
- time.sleep(1)
- logger.info("classification rules updated")
-
-
-def get_compute_nodes(nova_client, required_node_number=2):
- """Get the compute nodes in the deployment"""
- compute_nodes = os_utils.get_hypervisors(nova_client)
-
- num_compute_nodes = len(compute_nodes)
- if num_compute_nodes < 2:
- logger.error("There are %s compute nodes in the deployment. "
- "Minimum number of nodes to complete the test is 2."
- % num_compute_nodes)
- return None
-
- logger.debug("Compute nodes: %s" % compute_nodes)
- return compute_nodes
-
-
-def setup_compute_node(cidr):
- logger.info("bringing up br-int iface")
- run_cmd_on_compute("ifconfig br-int up")
- if not run_cmd_on_compute("ip route|grep -o %s" % cidr):
- logger.info("adding route %s" % cidr)
- return run_cmd_on_compute("ip route add %s" % cidr)
- else:
- logger.info("route %s exists" % cidr)
diff --git a/tests/functest/odl-sfc/vnfd-templates/test-vnfd1.yaml b/tests/functest/odl-sfc/vnfd-templates/test-vnfd1.yaml
deleted file mode 100644
index 5c672e38..00000000
--- a/tests/functest/odl-sfc/vnfd-templates/test-vnfd1.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd1
-description: firewall1-example
-
-service_properties:
- Id: firewall1-vnfd
- vendor: tacker
- version: 1
- type:
- - firewall1
-vdus:
- vdu1:
- id: vdu1
- vm_image: sf_nsh_colorado
- instance_type: custom
- service_type: firewall1
-
- network_interfaces:
- management:
- network: example-net
- management: true
-
- placement_policy:
- availability_zone: nova
-
- auto-scaling: noop
- monitoring_policy: noop
- failure_policy: respawn
-
- config:
- param0: key0
- param1: key1
diff --git a/tests/functest/odl-sfc/vnfd-templates/test-vnfd2.yaml b/tests/functest/odl-sfc/vnfd-templates/test-vnfd2.yaml
deleted file mode 100644
index 8a570ab9..00000000
--- a/tests/functest/odl-sfc/vnfd-templates/test-vnfd2.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd2
-description: firewall2-example
-
-service_properties:
- Id: firewall2-vnfd
- vendor: tacker
- version: 1
- type:
- - firewall2
-vdus:
- vdu1:
- id: vdu1
- vm_image: sf_nsh_colorado
- instance_type: custom
- service_type: firewall2
-
- network_interfaces:
- management:
- network: example-net
- management: true
-
- placement_policy:
- availability_zone: nova
-
- auto-scaling: noop
- monitoring_policy: noop
- failure_policy: respawn
-
- config:
- param0: key0
- param1: key1
diff --git a/tests/functest/odl-sfc/vnfd-templates/test2-vnfd1.yaml b/tests/functest/odl-sfc/vnfd-templates/test2-vnfd1.yaml
deleted file mode 100644
index 5c672e38..00000000
--- a/tests/functest/odl-sfc/vnfd-templates/test2-vnfd1.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd1
-description: firewall1-example
-
-service_properties:
- Id: firewall1-vnfd
- vendor: tacker
- version: 1
- type:
- - firewall1
-vdus:
- vdu1:
- id: vdu1
- vm_image: sf_nsh_colorado
- instance_type: custom
- service_type: firewall1
-
- network_interfaces:
- management:
- network: example-net
- management: true
-
- placement_policy:
- availability_zone: nova
-
- auto-scaling: noop
- monitoring_policy: noop
- failure_policy: respawn
-
- config:
- param0: key0
- param1: key1
diff --git a/tests/functest/odl-sfc/vnfd-templates/test2-vnfd2.yaml b/tests/functest/odl-sfc/vnfd-templates/test2-vnfd2.yaml
deleted file mode 100644
index 8a570ab9..00000000
--- a/tests/functest/odl-sfc/vnfd-templates/test2-vnfd2.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-template_name: test-vnfd2
-description: firewall2-example
-
-service_properties:
- Id: firewall2-vnfd
- vendor: tacker
- version: 1
- type:
- - firewall2
-vdus:
- vdu1:
- id: vdu1
- vm_image: sf_nsh_colorado
- instance_type: custom
- service_type: firewall2
-
- network_interfaces:
- management:
- network: example-net
- management: true
-
- placement_policy:
- availability_zone: nova
-
- auto-scaling: noop
- monitoring_policy: noop
- failure_policy: respawn
-
- config:
- param0: key0
- param1: key1