aboutsummaryrefslogtreecommitdiffstats
path: root/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'sfc')
-rw-r--r--sfc/lib/cleanup.py83
-rw-r--r--sfc/lib/config.py111
-rw-r--r--sfc/lib/odl_utils.py93
-rw-r--r--sfc/lib/openstack_utils.py517
-rw-r--r--sfc/lib/results.py1
-rw-r--r--sfc/lib/test_utils.py18
-rw-r--r--sfc/tests/NAME_tests.py11
-rw-r--r--sfc/tests/functest/README.tests37
-rw-r--r--sfc/tests/functest/config-pike.yaml84
-rw-r--r--sfc/tests/functest/config.yaml48
-rw-r--r--sfc/tests/functest/pod.yaml.sample58
-rw-r--r--sfc/tests/functest/run_sfc_tests.py91
-rw-r--r--sfc/tests/functest/setup_scripts/compute_presetup_CI.bash27
-rw-r--r--sfc/tests/functest/setup_scripts/delete.sh8
-rw-r--r--sfc/tests/functest/setup_scripts/delete_symmetric.sh9
-rw-r--r--sfc/tests/functest/setup_scripts/prepare_odl_sfc.py92
-rw-r--r--sfc/tests/functest/setup_scripts/server_presetup_CI.bash13
-rw-r--r--sfc/tests/functest/sfc_chain_deletion.py300
-rw-r--r--sfc/tests/functest/sfc_one_chain_two_service_functions.py307
-rw-r--r--sfc/tests/functest/sfc_parent_function.py768
-rw-r--r--sfc/tests/functest/sfc_symmetric_chain.py393
-rw-r--r--sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py351
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml38
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml10
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml40
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml10
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml14
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens46
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml38
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml39
-rw-r--r--sfc/unit_tests/__init__.py (renamed from sfc/tests/functest/setup_scripts/__init__.py)0
-rw-r--r--sfc/unit_tests/unit/__init__.py0
-rw-r--r--sfc/unit_tests/unit/lib/test_cleanup.py469
-rw-r--r--sfc/unit_tests/unit/lib/test_odl_utils.py817
-rw-r--r--sfc/unit_tests/unit/lib/test_openstack_utils.py2504
-rw-r--r--sfc/unit_tests/unit/lib/test_test_utils.py543
36 files changed, 6241 insertions, 1747 deletions
diff --git a/sfc/lib/cleanup.py b/sfc/lib/cleanup.py
index 7a2f4053..e97034ad 100644
--- a/sfc/lib/cleanup.py
+++ b/sfc/lib/cleanup.py
@@ -1,8 +1,9 @@
+import logging
import sys
import time
-import logging
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
+from openstack import connection
logger = logging.getLogger(__name__)
@@ -73,15 +74,6 @@ def delete_vims():
os_sfc_utils.delete_vim(t, vim_id=vim)
-# Creators is a list full of SNAPs objects
-def delete_openstack_objects(creators):
- for creator in reversed(creators):
- try:
- creator.clean()
- except Exception as e:
- logger.error('Unexpected error cleaning - %s', e)
-
-
# Networking-odl generates a new security group when creating a router
# which is not tracked by SNAPs
def delete_untracked_security_groups():
@@ -91,32 +83,79 @@ def delete_untracked_security_groups():
def cleanup_odl(odl_ip, odl_port):
delete_odl_resources(odl_ip, odl_port, 'service-function-forwarder')
- delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
- delete_odl_resources(odl_ip, odl_port, 'service-function-path')
- delete_odl_resources(odl_ip, odl_port, 'service-function')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-path')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function')
delete_odl_ietf_access_lists(odl_ip, odl_port)
-def cleanup(creators, odl_ip=None, odl_port=None):
+def cleanup_nsfc_objects():
+ '''
+ cleanup the networking-sfc objects created for the test
+ '''
+ # TODO Add n-sfc to snaps so that it can be removed through
+ # delete_openstack_objects
+ openstack_sfc = os_sfc_utils.OpenStackSFC()
+ openstack_sfc.delete_chain()
+ openstack_sfc.delete_port_groups()
+
+
+def cleanup_tacker_objects():
+ '''
+ cleanup the tacker objects created for the test
+ '''
delete_vnffgs()
delete_vnffgds()
delete_vnfs()
time.sleep(20)
delete_vnfds()
delete_vims()
- delete_openstack_objects(creators)
+
+
+def cleanup_mano_objects(mano):
+ '''
+ Cleanup the mano objects (chains, classifiers, etc)
+ '''
+ if mano == 'tacker':
+ cleanup_tacker_objects()
+ elif mano == 'no-mano':
+ cleanup_nsfc_objects()
+
+
+def delete_openstack_objects(testcase_config, creators):
+ conn = connection.from_config(verify=False)
+ for creator in creators:
+ if creator.name == testcase_config.subnet_name:
+ subnet_obj = creator
+
+ for creator in reversed(creators):
+ try:
+ logger.info("Deleting " + creator.name)
+ if creator.name == testcase_config.router_name:
+ logger.info("Removing subnet from router")
+ conn.network.remove_interface_from_router(
+ creator.id, subnet_obj.id)
+ time.sleep(2)
+ logger.info("Deleting router")
+ conn.network.delete_router(creator)
+ else:
+ creator.delete(conn.session)
+ time.sleep(2)
+ creators.remove(creator)
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
+
+
+def cleanup(testcase_config, creators, mano, odl_ip=None, odl_port=None):
+ cleanup_mano_objects(mano)
+ delete_openstack_objects(testcase_config, creators)
delete_untracked_security_groups()
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
-def cleanup_from_bash(odl_ip=None, odl_port=None):
- delete_vnffgs()
- delete_vnffgds()
- delete_vnfs()
- time.sleep(20)
- delete_vnfds()
- delete_vims()
+def cleanup_from_bash(odl_ip=None, odl_port=None, mano='no-mano'):
+ cleanup_mano_objects(mano=mano)
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
diff --git a/sfc/lib/config.py b/sfc/lib/config.py
index a4f5d67b..bf9864a5 100644
--- a/sfc/lib/config.py
+++ b/sfc/lib/config.py
@@ -8,19 +8,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+
import logging
import os
+import sfc
import yaml
import functest
-
-from functest.utils import config
-from functest.utils import env
import functest.utils.functest_utils as ft_utils
-
-import sfc
import sfc.lib.test_utils as test_utils
+from functest.utils import config
+from functest.utils import env
+
logger = logging.getLogger(__name__)
@@ -43,50 +43,58 @@ class CommonConfig(object):
self.vnffgd_dir = os.path.join(self.sfc_test_dir, "vnffgd-templates")
self.functest_results_dir = os.path.join(
getattr(config.CONF, 'dir_results'), "odl-sfc")
-
- # We need to know the openstack version in order to use one config or
- # another. For Pike we will use config-pike.yaml. Queens and Rocky
- # will use config.yaml
- if 'OPENSTACK_OSA_VERSION' in os.environ:
- if os.environ['OPENSTACK_OSA_VERSION'] == 'stable/pike':
- self.config_file = os.path.join(self.sfc_test_dir,
- "config-pike.yaml")
- else:
- self.config_file = os.path.join(self.sfc_test_dir,
- "config.yaml")
- else:
- self.config_file = os.path.join(self.sfc_test_dir,
- "config-pike.yaml")
-
+ self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
self.vim_file = os.path.join(self.sfc_test_dir, "register-vim.json")
- self.installer_type = env.get('INSTALLER_TYPE')
-
- self.installer_fields = test_utils.fill_installer_dict(
- self.installer_type)
-
- self.installer_ip = env.get('INSTALLER_IP')
+ pod_yaml_exists = os.path.isfile(self.sfc_test_dir + "/pod.yaml")
- self.installer_user = ft_utils.get_parameter_from_yaml(
- self.installer_fields['user'], self.config_file)
+ if pod_yaml_exists:
+ self.pod_file = os.path.join(self.sfc_test_dir, "pod.yaml")
+ self.nodes_pod = ft_utils.get_parameter_from_yaml(
+ "nodes", self.pod_file)
+ self.host_ip = self.nodes_pod[0]['ip']
+ self.host_user = self.nodes_pod[0]['user']
- try:
- self.installer_password = ft_utils.get_parameter_from_yaml(
- self.installer_fields['password'], self.config_file)
- except Exception:
- self.installer_password = None
-
- try:
- self.installer_key_file = ft_utils.get_parameter_from_yaml(
- self.installer_fields['pkey_file'], self.config_file)
- except Exception:
- self.installer_key_file = None
-
- try:
- self.installer_cluster = ft_utils.get_parameter_from_yaml(
- self.installer_fields['cluster'], self.config_file)
- except Exception:
+ self.installer_type = 'configByUser'
+ self.installer_ip = self.host_ip
+ self.installer_user = self.host_user
self.installer_cluster = None
+ try:
+ self.installer_password = self.host_ip[0]['password']
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = self.host_ip[0]['key_filename']
+ except Exception:
+ self.installer_key_file = None
+ else:
+ self.nodes_pod = None
+ self.host_ip = None
+ self.installer_type = env.get('INSTALLER_TYPE')
+ self.installer_fields = test_utils.fill_installer_dict(
+ self.installer_type)
+ self.installer_ip = env.get('INSTALLER_IP')
+ self.installer_user = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['user'], self.config_file)
+
+ try:
+ self.installer_password = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['password'], self.config_file)
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['pkey_file'], self.config_file)
+ except Exception:
+ self.installer_key_file = None
+
+ try:
+ self.installer_cluster = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['cluster'], self.config_file)
+ except Exception:
+ self.installer_cluster = None
self.flavor = ft_utils.get_parameter_from_yaml(
"defaults.flavor", self.config_file)
@@ -102,6 +110,21 @@ class CommonConfig(object):
"defaults.image_format", self.config_file)
self.image_url = ft_utils.get_parameter_from_yaml(
"defaults.image_url", self.config_file)
+ self.mano_component = ft_utils.get_parameter_from_yaml(
+ "defaults.mano_component", self.config_file)
+ try:
+ self.vnf_image_name = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_name", self.config_file)
+ self.vnf_image_url = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_url", self.config_file)
+ self.vnf_image_format = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_format", self.config_file)
+ except ValueError:
+ # If the parameter does not exist we use the default
+ self.vnf_image_name = self.image_name
+ self.vnf_image_url = self.image_url
+ self.vnf_image_format = self.image_format
+
self.dir_functest_data = getattr(config.CONF, 'dir_functest_data')
diff --git a/sfc/lib/odl_utils.py b/sfc/lib/odl_utils.py
index e1980423..2c657a13 100644
--- a/sfc/lib/odl_utils.py
+++ b/sfc/lib/odl_utils.py
@@ -1,16 +1,17 @@
import ConfigParser
+import functools
+import json
+import logging
import os
+import re
import requests
import time
-import json
-import re
-import logging
-import functools
-import sfc.lib.openstack_utils as os_sfc_utils
+import sfc.lib.openstack_utils as os_sfc_utils
logger = logging.getLogger(__name__)
-
+odl_username = 'admin'
+odl_password = 'admin'
ODL_MODULE_EXCEPTIONS = {
"service-function-path-state": "service-function-path"
@@ -24,15 +25,16 @@ ODL_PLURAL_EXCEPTIONS = {
def actual_rsps_in_compute(ovs_logger, compute_ssh):
'''
Example flows that match the regex (line wrapped because of flake8)
- table=101, n_packets=7, n_bytes=595, priority=500,tcp,in_port=2,tp_dst=80
- actions=push_nsh,load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],
- load:0x27->NXM_NX_NSP[0..23],load:0xff->NXM_NX_NSI[],
- load:0xffffff->NXM_NX_NSH_C1[],load:0->NXM_NX_NSH_C2[],resubmit(,17)
+ cookie=0xf005ba1100000002, duration=5.843s, table=101, n_packets=0,
+ n_bytes=0, priority=500,tcp,in_port=48,tp_dst=80
+ actions=load:0x169->NXM_NX_REG2[8..31],load:0xff->NXM_NX_REG2[0..7],
+ resubmit(,17)', u' cookie=0xf005ba1100000002, duration=5.825s, table=101,
+ n_packets=2, n_bytes=684, priority=10 actions=resubmit(,17)
'''
match_rsp = re.compile(r'.+'
r'(tp_(?:src|dst)=[0-9]+)'
r'.+'
- r'load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\]'
+ r'actions=load:(0x[0-9a-f]+)->NXM_NX_REG2'
r'.+')
# First line is OFPST_FLOW reply (OF1.3) (xid=0x2):
# This is not a flow so ignore
@@ -64,7 +66,7 @@ def get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
# We get the first ace. ODL creates a new ACL
# with one ace for each classifier
ace = acl['access-list-entries']['ace'][0]
- except:
+ except Exception:
logger.warn('ACL {0} does not have an ACE'.format(
acl['acl-name']))
continue
@@ -200,6 +202,11 @@ def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
time.sleep(3)
while timeout > 0:
+ # When swapping classifiers promised_rsps update takes time to
+ # get updated
+ # TODO: Need to optimise this code
+ promised_rsps = promised_rsps_in_compute(odl_ip, odl_port,
+ neutron_ports)
logger.info("RSPs in ODL Operational DataStore"
"for compute '{}':".format(compute_name))
logger.info("{0}".format(promised_rsps))
@@ -245,6 +252,36 @@ def get_odl_ip_port(nodes):
return ip, port
+def get_odl_ip_port_no_installer(nodes_pod):
+ node_index = 0
+ for n in nodes_pod:
+ if n['role'] == 'Controller':
+ break
+ node_index += 1
+ remote_ml2_conf_etc = '/etc/neutron/plugins/ml2/ml2_conf.ini'
+ os.system('scp {0}@{1}:{2} .'.
+ format(nodes_pod[node_index]['user'],
+ nodes_pod[node_index]['ip'],
+ remote_ml2_conf_etc))
+ file = open('ml2_conf.ini', 'r')
+ string = re.findall(r'[0-9]+(?:\.[0-9]+){3}\:[0-9]+', file.read())
+ file.close()
+ ip = string[0].split(':')[0]
+ port = string[0].split(':')[1]
+ return ip, port
+
+
+def get_odl_username_password():
+ local_ml2_conf_file = os.path.join(os.getcwd(), 'ml2_conf.ini')
+ con_par = ConfigParser.RawConfigParser()
+ con_par.read(local_ml2_conf_file)
+ global odl_username
+ odl_username = con_par.get('ml2_odl', 'username')
+ global odl_password
+ odl_password = con_par.get('ml2_odl', 'password')
+ return odl_username, odl_password
+
+
def pluralize(resource):
plural = ODL_PLURAL_EXCEPTIONS.get(resource, None)
if not plural:
@@ -260,11 +297,11 @@ def get_module(resource):
def format_odl_resource_list_url(odl_ip, odl_port, resource,
- datastore='config', odl_user='admin',
- odl_pwd='admin'):
+ datastore='config', odl_user=odl_username,
+ odl_pwd=odl_password):
return ('http://{usr}:{pwd}@{ip}:{port}/restconf/{ds}/{rsrc}:{rsrcs}'
- .format(usr=odl_user, pwd=odl_pwd, ip=odl_ip, port=odl_port,
- ds=datastore, rsrc=get_module(resource),
+ .format(usr=odl_username, pwd=odl_password, ip=odl_ip,
+ port=odl_port, ds=datastore, rsrc=get_module(resource),
rsrcs=pluralize(resource)))
@@ -314,10 +351,10 @@ def odl_acl_types_names(acl_json):
def format_odl_acl_list_url(odl_ip, odl_port,
- odl_user='admin', odl_pwd='admin'):
+ odl_user=odl_username, odl_pwd=odl_password):
acl_list_url = ('http://{usr}:{pwd}@{ip}:{port}/restconf/config/'
'ietf-access-control-list:access-lists'
- .format(usr=odl_user, pwd=odl_pwd,
+ .format(usr=odl_username, pwd=odl_password,
ip=odl_ip, port=odl_port))
return acl_list_url
@@ -410,7 +447,8 @@ def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
try:
compute = find_compute(compute_client_name, compute_nodes)
except Exception as e:
- logger.debug("There was an error getting the compute: e" % e)
+ logger.debug("There was an error getting the compute: %s" % e)
+ return False
retries_counter = retries
@@ -427,20 +465,3 @@ def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
return False
return True
-
-
-def create_chain(tacker_client, default_param_file, neutron_port,
- COMMON_CONFIG, TESTCASE_CONFIG):
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
-
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
diff --git a/sfc/lib/openstack_utils.py b/sfc/lib/openstack_utils.py
index 0b343f37..c46ff123 100644
--- a/sfc/lib/openstack_utils.py
+++ b/sfc/lib/openstack_utils.py
@@ -1,44 +1,31 @@
-import logging
import os
import time
import json
+import logging
import yaml
+import urllib2
+
+
from tackerclient.tacker import client as tackerclient
from functest.utils import constants
from functest.utils import env
-
from snaps.openstack.tests import openstack_tests
-
-from snaps.openstack.create_image import OpenStackImage
-from snaps.config.image import ImageConfig
-
-from snaps.config.flavor import FlavorConfig
-from snaps.openstack.create_flavor import OpenStackFlavor
-
-from snaps.config.network import NetworkConfig, SubnetConfig, PortConfig
-from snaps.openstack.create_network import OpenStackNetwork
-
-from snaps.config.router import RouterConfig
-from snaps.openstack.create_router import OpenStackRouter
-
-from snaps.config.security_group import (
- Protocol, SecurityGroupRuleConfig, Direction, SecurityGroupConfig)
-
-from snaps.openstack.create_security_group import OpenStackSecurityGroup
-
+from snaps.config.vm_inst import FloatingIpConfig
import snaps.openstack.create_instance as cr_inst
-from snaps.config.vm_inst import VmInstanceConfig, FloatingIpConfig
-
from snaps.openstack.utils import (
nova_utils, neutron_utils, heat_utils, keystone_utils)
+from openstack import connection
+from neutronclient.neutron import client as neutronclient
logger = logging.getLogger(__name__)
DEFAULT_TACKER_API_VERSION = '1.0'
+DEFAULT_API_VERSION = '2'
class OpenStackSFC:
def __init__(self):
+ self.conn = self.get_os_connection()
self.os_creds = openstack_tests.get_credentials(
os_env_file=constants.ENV_FILE)
self.creators = []
@@ -46,113 +33,191 @@ class OpenStackSFC:
self.neutron = neutron_utils.neutron_client(self.os_creds)
self.heat = heat_utils.heat_client(self.os_creds)
self.keystone = keystone_utils.keystone_client(self.os_creds)
+ self.neutron_client = neutronclient.\
+ Client(self.get_neutron_client_version(),
+ session=self.conn.session)
+
+ def get_os_connection(self):
+ return connection.from_config(verify=False)
+
+ def get_neutron_client_version(self):
+ api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_NETWORK_API_VERSION is %s" % api_version)
+ return api_version
+ return DEFAULT_API_VERSION
def register_glance_image(self, name, url, img_format, public):
- image_settings = ImageConfig(name=name, img_format=img_format, url=url,
- public=public, image_user='admin')
+ logger.info("Registering the image...")
+ image = self.conn.image.find_image(name)
+ if image:
+ logger.info("Image %s already exists." % image.name)
+ else:
+ if 'http' in url:
+ logger.info("Downloading image")
+ response = urllib2.urlopen(url)
+ image_data = response.read()
+ else:
+ with open(url) as f:
+ image_data = f.read()
- # TODO Remove this when tacker is part of SNAPS
- self.image_settings = image_settings
+ image_settings = {'name': name,
+ 'disk_format': img_format,
+ 'data': image_data,
+ 'is_public': public,
+ 'container_format': 'bare'}
+ image = self.conn.image.upload_image(**image_settings)
+ self.creators.append(image)
+ logger.info("Image created")
- image_creator = OpenStackImage(self.os_creds, image_settings)
- image_creator.create()
+ self.image_settings = image_settings
- self.creators.append(image_creator)
- return image_creator
+ return image
def create_flavor(self, name, ram, disk, vcpus):
- flavor_settings = FlavorConfig(name=name, ram=ram, disk=disk,
- vcpus=vcpus)
- flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)
- flavor = flavor_creator.create()
+ logger.info("Creating flavor...")
+ flavor_settings = {"name": name, "ram": ram, "disk": disk,
+ "vcpus": vcpus}
+
+ flavor = self.conn.compute.create_flavor(**flavor_settings)
- self.creators.append(flavor_creator)
+ self.creators.append(flavor)
return flavor
def create_network_infrastructure(self, net_name, subnet_name, subnet_cidr,
router_name):
+ logger.info("Creating Networks...")
# Network and subnet
- subnet_settings = SubnetConfig(name=subnet_name, cidr=subnet_cidr)
- network_settings = NetworkConfig(name=net_name,
- subnet_settings=[subnet_settings])
- network_creator = OpenStackNetwork(self.os_creds, network_settings)
- network = network_creator.create()
+ network = self.conn.network.create_network(name=net_name)
+ self.creators.append(network)
- self.creators.append(network_creator)
+ subnet_settings = {"name": subnet_name, "cidr": subnet_cidr,
+ "network_id": network.id, 'ip_version': '4'}
+ subnet = self.conn.network.create_subnet(**subnet_settings)
+ self.creators.append(subnet)
# Router
ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+ router_dict = {'network_id': ext_net.id}
- router_settings = RouterConfig(name=router_name,
- external_gateway=ext_network_name,
- internal_subnets=[subnet_name])
+ logger.info("Creating Router...")
+ router = self.conn.network.create_router(name=router_name)
- router_creator = OpenStackRouter(self.os_creds, router_settings)
- router = router_creator.create()
+ self.conn.network.add_interface_to_router(router.id,
+ subnet_id=subnet.id)
- self.creators.append(router_creator)
+ self.conn.network.update_router(router.id,
+ external_gateway_info=router_dict)
+ router_obj = self.conn.network.get_router(router.id)
+ self.creators.append(router_obj)
- return network, router
+ return network, router_obj
def create_security_group(self, sec_grp_name):
- rule_ping = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.icmp)
-
- rule_ssh = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=22,
- port_range_max=22)
-
- rule_http = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=80,
- port_range_max=80)
+ logger.info("Creating the security groups...")
+ sec_group = self.conn.network.create_security_group(name=sec_grp_name)
- rules = [rule_ping, rule_ssh, rule_http]
+ rule_ping = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "icmp"}
+
+ rule_ssh = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 22,
+ "port_range_max": 22}
- secgroup_settings = SecurityGroupConfig(name=sec_grp_name,
- rule_settings=rules)
+ rule_http = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 80,
+ "port_range_max": 80}
- sec_group_creator = OpenStackSecurityGroup(self.os_creds,
- secgroup_settings)
- sec_group = sec_group_creator.create()
+ rules = [rule_ping, rule_ssh, rule_http]
+
+ for rule in rules:
+ self.conn.network.create_security_group_rule(**rule)
- self.creators.append(sec_group_creator)
+ self.creators.append(sec_group)
return sec_group
- def create_instance(self, vm_name, flavor_name, image_creator, network,
- secgrp, av_zone):
+ def create_instance(self, vm_name, flavor, image, network,
+ sec_group, av_zone, ports, port_security=True):
+ logger.info("Creating Key Pair {}...".format(vm_name))
+
+ keypair = self.conn.compute.\
+ create_keypair(name="{}_keypair".format(vm_name))
+ self.creators.append(keypair)
+ flavor_obj = self.conn.compute.find_flavor(flavor)
+
+ logger.info("Creating Port {}...".format(ports))
+ port_list = []
+ for port in ports:
+ if port_security:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id, security_group_ids=[sec_group.id])
+ else:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id)
+ port_list.append(port_obj)
+ self.creators.append(port_obj)
+ logger.info("Creating the instance {}...".format(vm_name))
+
+ if len(port_list) > 1:
+ network_list = [{"port": port_list[0].id},
+ {"port": port_list[1].id}]
+ else:
+ network_list = [{"port": port_obj.id}]
- port_settings = PortConfig(name=vm_name + '-port',
- network_name=network.name)
+ instance = self.conn.compute.create_server(name=vm_name,
+ image_id=image.id,
+ flavor_id=flavor_obj.id,
+ networks=network_list,
+ key_name=keypair.name,
+ availability_zone=av_zone)
- instance_settings = VmInstanceConfig(
- name=vm_name, flavor=flavor_name,
- security_group_names=str(secgrp.name),
- port_settings=[port_settings],
- availability_zone=av_zone)
+ logger.info("Waiting for {} to become Active".format(instance.name))
+ self.conn.compute.wait_for_server(instance)
+ logger.info("{} is active".format(instance.name))
- instance_creator = cr_inst.OpenStackVmInstance(
- self.os_creds,
- instance_settings,
- image_creator.image_settings)
+ self.creators.append(instance)
- instance = instance_creator.create()
+ return instance, port_list
- self.creators.append(instance_creator)
- return instance, instance_creator
+ def get_instance(self, instance_id):
+ """
+ Return a dictionary of metadata for a server instance
+ """
+ return self.conn.compute.get_server_metadata(instance_id)
def get_av_zones(self):
'''
Return the availability zone each host belongs to
'''
- hosts = nova_utils.get_hypervisor_hosts(self.nova)
+ hosts = self.get_hypervisor_hosts()
return ['nova::{0}'.format(host) for host in hosts]
+ def get_hypervisor_hosts(self):
+ """
+ Returns the host names of all nova nodes with active hypervisors
+ :param nova: the Nova client
+ :return: a list of hypervisor host names
+ """
+ try:
+ nodes = []
+ hypervisors = self.conn.compute.hypervisors()
+ for hypervisor in hypervisors:
+ if hypervisor.state == "up":
+ nodes.append(hypervisor.name)
+ return nodes
+ except Exception as e:
+ logger.error("Error [get_hypervisors(compute)]: %s" % e)
+ return None
+
def get_compute_client(self):
'''
Return the compute where the client sits
@@ -178,18 +243,37 @@ class OpenStackSFC:
raise Exception("There is no VM with name '{}'!!".format(vm_name))
- def assign_floating_ip(self, router, vm, vm_creator):
+ def get_port_by_ip(self, ip_address):
+ """
+ Return a dictionary of metadata for a port instance
+ by its ip_address
+ """
+
+ ports = self.conn.network.ports()
+ for port in ports:
+ if port.fixed_ips[0]['ip_address'] == ip_address:
+ return self.conn.network.get_port(port.id)
+
+ def assign_floating_ip(self, vm, vm_port):
'''
Assign floating ips to all the VMs
'''
- name = vm.name + "-float"
- port_name = vm.ports[0].name
- float_ip = FloatingIpConfig(name=name,
- port_name=port_name,
- router_name=router.name)
- ip = vm_creator.add_floating_ip(float_ip)
+ logger.info(" Creating floating ips ")
+
+ ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+
+ fip = self.conn.network.create_ip(floating_network_id=ext_net.id,
+ port_id=vm_port.id)
+ logger.info(
+ " FLoating IP address {} created".format(fip.floating_ip_address))
- return ip.ip
+ logger.info(" Adding Floating IPs to instances ")
+ self.conn.compute.add_floating_ip_to_server(
+ vm.id, fip.floating_ip_address)
+
+ self.creators.append(fip)
+ return fip.floating_ip_address
# We need this function because tacker VMs cannot be created through SNAPs
def assign_floating_ip_vnfs(self, router, ips=None):
@@ -204,7 +288,7 @@ class OpenStackSFC:
for stack in stacks:
servers = heat_utils.get_stack_servers(self.heat,
self.nova,
- self.neutron,
+ self.neutron_client,
self.keystone,
stack,
project_name)
@@ -224,10 +308,11 @@ class OpenStackSFC:
break
if port_name is None:
- err_msg = "The VNF {} does not have any suitable port {} " \
- "for floating IP assignment".format(
- name,
- 'with ip any of ' + str(ips) if ips else '')
+ err_msg = ("The VNF {} does not have any suitable port {} "
+ "for floating IP assignment"
+ .format(name,
+ 'with ip any of ' +
+ str(ips) if ips else ''))
logger.error(err_msg)
raise Exception(err_msg)
@@ -240,11 +325,12 @@ class OpenStackSFC:
return fips
- def get_client_port(self, vm, vm_creator):
+ def get_instance_port(self, vm, vm_creator, port_name=None):
'''
Get the neutron port id of the client
'''
- port_name = vm.name + "-port"
+ if not port_name:
+ port_name = vm.name + "-port"
port = vm_creator.get_port_by_name(port_name)
if port is not None:
return port
@@ -256,13 +342,209 @@ class OpenStackSFC:
def delete_all_security_groups(self):
'''
Deletes all the available security groups
-
Needed until this bug is fixed:
https://bugs.launchpad.net/networking-odl/+bug/1763705
'''
- sec_groups = neutron_utils.list_security_groups(self.neutron)
+ logger.info("Deleting remaining security groups...")
+ sec_groups = self.conn.network.security_groups()
for sg in sec_groups:
- neutron_utils.delete_security_group(self.neutron, sg)
+ self.conn.network.delete_security_group(sg)
+
+ def wait_for_vnf(self, vnf_creator):
+ '''
+ Waits for VNF to become active
+ '''
+ return vnf_creator.vm_active(block=True, poll_interval=5)
+
+ def create_port_groups(self, vnf_ports, vm_instance):
+ '''
+ Creates a networking-sfc port pair and group
+ '''
+ logger.info("Creating the port pairs...")
+ port_pair = dict()
+ port_pair['name'] = vm_instance.name + '-connection-points'
+ port_pair['description'] = 'port pair for ' + vm_instance.name
+
+ # In the symmetric testcase ingres != egress (VNF has 2 interfaces)
+ if len(vnf_ports) == 1:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[0].id
+ elif len(vnf_ports) == 2:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[1].id
+ else:
+ logger.error("Only SFs with one or two ports are supported")
+ raise Exception("Failed to create port pairs")
+ port_pair_info = \
+ self.neutron_client.create_sfc_port_pair({'port_pair': port_pair})
+ if not port_pair_info:
+ logger.warning("Chain creation failed due to port pair "
+ "creation failed for vnf %(vnf)s",
+ {'vnf': vm_instance.name})
+ return None
+
+ # Avoid race conditions by checking the port pair is already committed
+ iterations = 5
+ found_it = False
+ for _ in range(iterations):
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ if pp['id'] == port_pair_info['port_pair']['id']:
+ found_it = True
+ break
+ if found_it:
+ break
+ else:
+ time.sleep(3)
+
+ if not found_it:
+ raise Exception("Port pair was not committed in openstack")
+
+ logger.info("Creating the port pair groups for %s" % vm_instance.name)
+
+ port_pair_group = {}
+ port_pair_group['name'] = vm_instance.name + '-port-pair-group'
+ port_pair_group['description'] = \
+ 'port pair group for ' + vm_instance.name
+ port_pair_group['port_pairs'] = []
+ port_pair_group['port_pairs'].append(port_pair_info['port_pair']['id'])
+ ppg_config = {'port_pair_group': port_pair_group}
+ port_pair_group_info = \
+ self.neutron_client.create_sfc_port_pair_group(ppg_config)
+ if not port_pair_group_info:
+ logger.warning("Chain creation failed due to port pair group "
+ "creation failed for vnf "
+ "{}".format(vm_instance.name))
+ return None
+
+ return port_pair_group_info['port_pair_group']['id']
+
+ def create_classifier(self, neutron_port, port, protocol, fc_name,
+ symmetrical, server_port=None, server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': fc_name,
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ def create_chain(self, port_groups, neutron_port, port, protocol,
+ vnffg_name, symmetrical, server_port=None,
+ server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ fc_info = \
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ logger.info("Creating the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['description'] = 'port-chain for SFC'
+ port_chain['port_pair_groups'] = port_groups
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_info['flow_classifier']['id'])
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ return self.neutron_client.create_sfc_port_chain(chain_config)
+
+ def update_chain(self, vnffg_name, fc_name, symmetrical):
+ '''
+ Update the new Flow Classifier ID
+ '''
+ fc_id = self.neutron_client.find_resource('flow_classifier',
+ fc_name)['id']
+ logger.info("Update the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_id)
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ pc_id = self.neutron_client.find_resource('port_chain',
+ port_chain['name'])['id']
+ return self.neutron_client.update_sfc_port_chain(pc_id, chain_config)
+
+ def swap_classifiers(self, vnffg_1_name, vnffg_2_name, symmetric=False):
+
+ '''
+ Swap Classifiers
+ '''
+ logger.info("Swap classifiers...")
+
+ self.update_chain(vnffg_1_name, 'dummy', symmetric)
+ vnffg_1_classifier_name = vnffg_1_name + '-classifier'
+ self.update_chain(vnffg_2_name, vnffg_1_classifier_name, symmetric)
+ vnffg_2_classifier_name = vnffg_2_name + '-classifier'
+ self.update_chain(vnffg_1_name, vnffg_2_classifier_name, symmetric)
+
+ def delete_port_groups(self):
+ '''
+ Delete all port groups and port pairs
+ '''
+ logger.info("Deleting the port groups...")
+ ppg_list = self.neutron_client.\
+ list_sfc_port_pair_groups()['port_pair_groups']
+ for ppg in ppg_list:
+ self.neutron_client.delete_sfc_port_pair_group(ppg['id'])
+
+ logger.info("Deleting the port pairs...")
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ self.neutron_client.delete_sfc_port_pair(pp['id'])
+
+ def delete_chain(self):
+ '''
+ Delete the classifiers and the chains
+ '''
+ logger.info("Deleting the chain...")
+ pc_list = self.neutron_client.list_sfc_port_chains()['port_chains']
+ for pc in pc_list:
+ self.neutron_client.delete_sfc_port_chain(pc['id'])
+
+ logger.info("Deleting the classifiers...")
+ fc_list = self.neutron_client.\
+ list_sfc_flow_classifiers()['flow_classifiers']
+ for fc in fc_list:
+ self.neutron_client.delete_sfc_flow_classifier(fc['id'])
# TACKER SECTION #
@@ -344,12 +626,14 @@ def list_vnfds(tacker_client, verbose=False):
def create_vnfd(tacker_client, tosca_file=None, vnfd_name=None):
+ logger.info("Creating the vnfd...")
try:
vnfd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnfd_body = tosca_fd.read()
- logger.info('VNFD template:\n{0}'.format(vnfd_body))
+ vnfd = tosca_fd.read()
+ vnfd_body = yaml.safe_load(vnfd)
+ logger.info('VNFD template:\n{0}'.format(vnfd))
return tacker_client.create_vnfd(
body={"vnfd": {"attributes": {"vnfd": vnfd_body},
"name": vnfd_name}})
@@ -386,6 +670,7 @@ def list_vnfs(tacker_client, verbose=False):
def create_vnf(tacker_client, vnf_name, vnfd_id=None,
vnfd_name=None, vim_id=None, vim_name=None, param_file=None):
+ logger.info("Creating the vnf...")
try:
vnf_body = {
'vnf': {
@@ -444,7 +729,6 @@ def get_vnf_ip(tacker_client, vnf_id=None, vnf_name=None):
"""
Get the management ip of the first VNF component as obtained from the
tacker REST API:
-
{
"vnf": {
...
@@ -461,7 +745,7 @@ def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
vnf = get_vnf(tacker_client, vnf_id, vnf_name)
if vnf is None:
raise Exception("Could not retrieve VNF - id='%s', name='%s'"
- % vnf_id, vnf_name)
+ % (vnf_id, vnf_name))
logger.info('Waiting for vnf {0}'.format(str(vnf)))
while vnf['status'] != 'ACTIVE' and timeout >= 0:
if vnf['status'] == 'ERROR':
@@ -496,6 +780,7 @@ def delete_vnf(tacker_client, vnf_id=None, vnf_name=None):
def create_vim(tacker_client, vim_file=None):
+ logger.info("Creating the vim...")
try:
vim_body = {}
if vim_file is not None:
@@ -510,12 +795,14 @@ def create_vim(tacker_client, vim_file=None):
def create_vnffgd(tacker_client, tosca_file=None, vnffgd_name=None):
+ logger.info("Creating the vnffgd...")
try:
vnffgd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnffgd_body = yaml.safe_load(tosca_fd)
- logger.info('VNFFGD template:\n{0}'.format(vnffgd_body))
+ vnffgd = tosca_fd.read()
+ vnffgd_body = yaml.safe_load(vnffgd)
+ logger.info('VNFFGD template:\n{0}'.format(vnffgd))
return tacker_client.create_vnffgd(
body={'vnffgd': {'name': vnffgd_name,
'template': {'vnffgd': vnffgd_body}}})
@@ -530,6 +817,7 @@ def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
'''
Creates the vnffg which will provide the RSP and the classifier
'''
+ logger.info("Creating the vnffg...")
try:
vnffg_body = {
'vnffg': {
@@ -657,8 +945,7 @@ def register_vim(tacker_client, vim_file=None):
create_vim(tacker_client, vim_file=tmp_file)
-def create_vnf_in_av_zone(
- tacker_client,
+def create_vnf_in_av_zone(tacker_client,
vnf_name,
vnfd_name,
vim_name,
@@ -670,9 +957,7 @@ def create_vnf_in_av_zone(
param_file = os.path.join(
'/tmp',
'param_{0}.json'.format(av_zone.replace('::', '_')))
- data = {
- 'zone': av_zone
- }
+ data = {'zone': av_zone}
with open(param_file, 'w+') as f:
json.dump(data, f)
create_vnf(tacker_client,
diff --git a/sfc/lib/results.py b/sfc/lib/results.py
index 15d82e02..2f2edfc0 100644
--- a/sfc/lib/results.py
+++ b/sfc/lib/results.py
@@ -7,7 +7,6 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
import logging
logger = logging.getLogger(__name__)
diff --git a/sfc/lib/test_utils.py b/sfc/lib/test_utils.py
index 18c55dc1..ed50c390 100644
--- a/sfc/lib/test_utils.py
+++ b/sfc/lib/test_utils.py
@@ -12,10 +12,8 @@ import subprocess
import time
import shutil
import urllib
-
import logging
-
logger = logging.getLogger(__name__)
SSH_OPTIONS = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
FUNCTEST_RESULTS_DIR = os.path.join("home", "opnfv",
@@ -232,11 +230,11 @@ def check_ssh(ips, retries=100):
def fill_installer_dict(installer_type):
- default_string = "defaults.installer.{}.".format(installer_type)
- installer_yaml_fields = {
- "user": default_string+"user",
- "password": default_string+"password",
- "cluster": default_string+"cluster",
- "pkey_file": default_string+"pkey_file"
- }
- return installer_yaml_fields
+ default_string = "defaults.installer.{}.".format(installer_type)
+ installer_yaml_fields = {
+ "user": default_string+"user",
+ "password": default_string+"password",
+ "cluster": default_string+"cluster",
+ "pkey_file": default_string+"pkey_file"
+ }
+ return installer_yaml_fields
diff --git a/sfc/tests/NAME_tests.py b/sfc/tests/NAME_tests.py
deleted file mode 100644
index e95004bc..00000000
--- a/sfc/tests/NAME_tests.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-def setup():
- print "SETUP!"
-
-
-def teardown():
- print "TEAR DOWN!"
-
-
-def test_basic():
- print "I RAN!"
diff --git a/sfc/tests/functest/README.tests b/sfc/tests/functest/README.tests
index d4e3df3e..f39d8888 100644
--- a/sfc/tests/functest/README.tests
+++ b/sfc/tests/functest/README.tests
@@ -34,15 +34,36 @@ will be running a firewall that blocks the traffic in a specific port (e.g.
33333). A symmetric service chain routing the traffic throught this SF will be
created as well.
-1st check: The client is able to reach the server using a source port different
-from the one that the firewall blocks (e.g 22222), and the response gets back
-to the client.
+1st check: The client is able to reach the server and the response gets back
+to the client. Here the firewall is running without blocking any port.
-2nd check: The client is able to reach the server using the source port that
-the firewall blocks, but responses back from the server are blocked, as the
-symmetric service chain makes them go through the firewall that blocks on the
-destination port initially used as source port by the client (e.g. 33333).
+2nd check: The client is not able to reach the server as the firewall
+is configured to block traffic on port 80, and the request from the client
+is blocked, as the symmetric service chain makes them go through the firewall.
-If the client is able to receive the response, it would be a symptom of the
+If the client is able to reach the server, it would be a symptom of the
+symmetric chain not working, as traffic would be flowing from client to server
+directly without traversing the SF.
+
+3rd check: The client is able to reach the server, as the firewall
+is configured to block traffic on port 22222, and the response from the server
+is blocked.
+
+If the server is able to reach the client, it would be a symptom of the
symmetric chain not working, as traffic would be flowing from server to client
directly without traversing the SF.
+
+4th check: The client is able to reach the server and the response gets back
+to the client. Like in 1st check to verify test ends with same config
+as at the beginning.
+
+
+## TEST DELETION - sfc_chain_deletion ##
+
+One client and one server are created using nova. Then a SF is created using tacker.
+A service chain which routes the traffic through this SF will be created as well.
+After that the chain is deleted and re-created.
+
+vxlan_tool is started in the SF and HTTP traffic is sent from the client to the server.
+If it works, the vxlan_tool is modified to block HTTP traffic.
+It is tried again and it should fail because packets are dropped. \ No newline at end of file
diff --git a/sfc/tests/functest/config-pike.yaml b/sfc/tests/functest/config-pike.yaml
deleted file mode 100644
index eff95c08..00000000
--- a/sfc/tests/functest/config-pike.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-defaults:
- # odl-sfc uses custom flavors as per below params
- flavor: custom
- ram_size_in_mb: 500
- disk_size_in_gb: 1
- vcpu_count: 1
- image_name: sfc_nsh_fraser
- installer:
- fuel:
- user: root
- password: r00tme
- cluster: 1 # Change this to the id of the desired fuel env (1, 2, 3...)
- apex:
- user: stack
- pkey_file: "/root/.ssh/id_rsa"
- osa:
- user: root
- pkey_file: "/root/.ssh/id_rsa"
- compass:
- user: root
- pkey_file: "/root/.ssh/id_rsa"
- image_format: qcow2
- image_url: "http://artifacts.opnfv.org/sfc/images/sfc_nsh_fraser.qcow2"
- vnfd-dir: "vnfd-templates"
- vnfd-default-params-file: "test-vnfd-default-params.yaml"
-
-
-testcases:
- sfc_one_chain_two_service_functions:
- enabled: true
- order: 0
- description: "ODL-SFC Testing SFs when they are located on the same chain"
- net_name: example-net
- subnet_name: example-subnet
- router_name: example-router
- subnet_cidr: "11.0.0.0/24"
- secgroup_name: "example-sg"
- secgroup_descr: "Example Security group"
- test_vnfd_red: "test-one-chain-vnfd1.yaml"
- test_vnfd_blue: "test-one-chain-vnfd2.yaml"
- test_vnffgd_red: "test-one-chain-vnffgd-pike.yaml"
-
- sfc_two_chains_SSH_and_HTTP:
- enabled: false
- order: 1
- description: "ODL-SFC tests with two chains and one SF per chain"
- net_name: example-net
- subnet_name: example-subnet
- router_name: example-router
- subnet_cidr: "11.0.0.0/24"
- secgroup_name: "example-sg"
- secgroup_descr: "Example Security group"
- test_vnfd_red: "test-two-chains-vnfd1.yaml"
- test_vnfd_blue: "test-two-chains-vnfd2.yaml"
- test_vnffgd_red: "test-two-chains-vnffgd1-pike.yaml"
- test_vnffgd_blue: "test-two-chains-vnffgd2-pike.yaml"
-
- sfc_symmetric_chain:
- enabled: false
- order: 2
- description: "Verify the behavior of a symmetric service chain"
- net_name: example-net
- subnet_name: example-subnet
- router_name: example-router
- subnet_cidr: "11.0.0.0/24"
- secgroup_name: "example-sg"
- secgroup_descr: "Example Security group"
- test_vnfd: "test-symmetric-vnfd.yaml"
- test_vnffgd: "test-symmetric-vnffgd.yaml"
- source_port: 22222
-
- sfc_chain_deletion:
- enabled: false
- order: 3
- description: "Verify if chains work correctly after deleting one"
- net_name: example-net
- subnet_name: example-subnet
- router_name: example-router
- subnet_cidr: "11.0.0.0/24"
- secgroup_name: "example-sg"
- secgroup_descr: "Example Security group"
- test_vnfd_red: "test-one-chain-vnfd1.yaml"
- test_vnffgd_red: "test-deletion-vnffgd-pike.yaml"
diff --git a/sfc/tests/functest/config.yaml b/sfc/tests/functest/config.yaml
index cad3cf72..021b4c39 100644
--- a/sfc/tests/functest/config.yaml
+++ b/sfc/tests/functest/config.yaml
@@ -25,9 +25,18 @@ defaults:
vnfd-dir: "vnfd-templates"
vnfd-default-params-file: "test-vnfd-default-params.yaml"
+ # mano_component can be [tacker, no-mano]. When no-mano,
+ # then networking-sfc is used
+ mano_component: "no-mano"
+
+ # [OPTIONAL] Only when deploying VNFs without the default image (vxlan_tool)
+ # vnf_image_name: xxx
+ # vnf_image_format: yyy
+ # vnf_image_url: zzz
testcases:
sfc_one_chain_two_service_functions:
+ class_name: "SfcOneChainTwoServiceTC"
enabled: true
order: 0
description: "ODL-SFC Testing SFs when they are located on the same chain"
@@ -37,12 +46,21 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ - 'testVNF2'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd_red: "test-one-chain-vnfd1.yaml"
test_vnfd_blue: "test-one-chain-vnfd2.yaml"
test_vnffgd_red: "test-one-chain-vnffgd.yaml"
sfc_two_chains_SSH_and_HTTP:
- enabled: false
+ class_name: "SfcTwoChainsSSHandHTTP"
+ enabled: true
order: 1
description: "ODL-SFC tests with two chains and one SF per chain"
net_name: example-net
@@ -51,13 +69,22 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ - 'testVNF2'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd_red: "test-two-chains-vnfd1.yaml"
test_vnfd_blue: "test-two-chains-vnfd2.yaml"
test_vnffgd_red: "test-two-chains-vnffgd1.yaml"
test_vnffgd_blue: "test-two-chains-vnffgd2.yaml"
sfc_symmetric_chain:
- enabled: false
+ class_name: "SfcSymmetricChain"
+ enabled: true
order: 2
description: "Verify the behavior of a symmetric service chain"
net_name: example-net
@@ -66,12 +93,20 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd: "test-symmetric-vnfd.yaml"
test_vnffgd: "test-symmetric-vnffgd.yaml"
source_port: 22222
sfc_chain_deletion:
- enabled: false
+ class_name: "SfcChainDeletion"
+ enabled: true
order: 3
description: "Verify if chains work correctly after deleting one"
net_name: example-net
@@ -80,5 +115,12 @@ testcases:
subnet_cidr: "11.0.0.0/24"
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
+ vnf_names:
+ - 'testVNF1'
+ supported_installers:
+ - 'fuel'
+ - 'apex'
+ - 'osa'
+ - 'compass'
test_vnfd_red: "test-one-chain-vnfd1.yaml"
test_vnffgd_red: "test-deletion-vnffgd.yaml"
diff --git a/sfc/tests/functest/pod.yaml.sample b/sfc/tests/functest/pod.yaml.sample
new file mode 100644
index 00000000..aa5fddad
--- /dev/null
+++ b/sfc/tests/functest/pod.yaml.sample
@@ -0,0 +1,58 @@
+# Sample config file about the POD information is located under the dovetail project.
+# https://github.com/opnfv/dovetail/blob/master/etc/userconfig/pod.yaml.sample
+# On the top of the above template the node0 could be used, defining the role Host.
+# After that the proper number of controller nodes should be defined and
+# at the end the respective compute nodes.
+
+nodes:
+-
+ # This can not be changed and must be node0.
+ name: node0
+
+ # This must be Host.
+ role: Host
+
+ # This is the instance IP of a node which has installed.
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+-
+ # This can not be changed and must be node1.
+ name: node1
+
+ # This must be controller.
+ role: Controller
+
+ # This is the instance IP of a controller node
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+-
+ # This can not be changed and must be node1.
+ name: node2
+
+ # This must be compute.
+ role: Compute
+
+ # This is the instance IP of a compute node
+ ip: xx.xx.xx.xx
+
+ # User name of the user of this node. This user **must** have sudo privileges.
+ user: root
+
+ # keyfile of the user.
+ key_filename: /root/.ssh/id_rsa
+
+ # Private ssh key for accessing the controller nodes. If there is not
+ # a keyfile for that use, the password of the user could be used instead.
+ # password: root \ No newline at end of file
diff --git a/sfc/tests/functest/run_sfc_tests.py b/sfc/tests/functest/run_sfc_tests.py
index 64c5b385..7f0eaa8a 100644
--- a/sfc/tests/functest/run_sfc_tests.py
+++ b/sfc/tests/functest/run_sfc_tests.py
@@ -11,18 +11,17 @@
import importlib
import os
import time
-import yaml
+import logging
import sys
+import yaml
-from xtesting.core import testcase
+from collections import OrderedDict
from opnfv.utils import ovs_logger as ovs_log
from opnfv.deployment.factory import Factory as DeploymentFactory
from sfc.lib import cleanup as sfc_cleanup
from sfc.lib import config as sfc_config
from sfc.lib import odl_utils as odl_utils
-
-from collections import OrderedDict
-import logging
+from xtesting.core import testcase
logger = logging.getLogger(__name__)
COMMON_CONFIG = sfc_config.CommonConfig()
@@ -30,6 +29,13 @@ COMMON_CONFIG = sfc_config.CommonConfig()
class SfcFunctest(testcase.TestCase):
+ def __init__(self, **kwargs):
+ super(SfcFunctest, self).__init__(**kwargs)
+
+ self.cleanup_flag = True
+ if '--nocleanup' in sys.argv:
+ self.cleanup_flag = False
+
def __fetch_tackerc_file(self, controller_node):
rc_file = os.path.join(COMMON_CONFIG.sfc_test_dir, 'tackerc')
if not os.path.exists(rc_file):
@@ -98,35 +104,49 @@ class SfcFunctest(testcase.TestCase):
time.sleep(10)
def __disable_heat_resource_finder_cache(self, nodes, installer_type):
- controllers = [node for node in nodes if node.is_controller()]
+
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ controllers = [node for node in nodes if node.is_controller()]
+ else:
+ controllers = []
+ for n in COMMON_CONFIG.nodes_pod:
+ if n['role'] == 'Controller':
+ controllers.append(n)
+ logger.info("CONTROLLER : %s", controllers)
if installer_type == 'apex':
self.__disable_heat_resource_finder_cache_apex(controllers)
elif installer_type == "fuel":
self.__disable_heat_resource_finder_cache_fuel(controllers)
- elif installer_type == "osa" or "compass":
+ elif installer_type == "osa" or "compass" or "configByUser":
pass
else:
raise Exception('Unsupported installer')
def run(self):
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
cluster = COMMON_CONFIG.installer_cluster
- nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
-
- self.__disable_heat_resource_finder_cache(nodes,
- COMMON_CONFIG.installer_type)
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ deploymentHandler = DeploymentFactory.get_handler(
+ COMMON_CONFIG.installer_type,
+ COMMON_CONFIG.installer_ip,
+ COMMON_CONFIG.installer_user,
+ COMMON_CONFIG.installer_password,
+ COMMON_CONFIG.installer_key_file)
+
+ nodes = (deploymentHandler.get_nodes({'cluster': cluster})
+ if cluster is not None
+ else deploymentHandler.get_nodes())
+ self.__disable_heat_resource_finder_cache(nodes,
+ COMMON_CONFIG.
+ installer_type)
+ odl_ip, odl_port = odl_utils.get_odl_ip_port(nodes)
- odl_ip, odl_port = odl_utils.get_odl_ip_port(nodes)
+ else:
+ nodes = COMMON_CONFIG.nodes_pod
+ self.__disable_heat_resource_finder_cache(nodes, "configByUser")
+ odl_ip, odl_port = odl_utils. \
+ get_odl_ip_port_no_installer(COMMON_CONFIG.nodes_pod)
ovs_logger = ovs_log.OVSLogger(
os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
@@ -149,16 +169,33 @@ class SfcFunctest(testcase.TestCase):
(test_name, test_descr))
logger.info(title)
logger.info("%s\n" % ("=" * len(title)))
- t = importlib.import_module(
+ module = importlib.import_module(
"sfc.tests.functest.{0}".format(test_name),
package=None)
+
+ testcase_config = sfc_config.TestcaseConfig(test_name)
+ supported_installers = test_cfg['supported_installers']
+ vnf_names = test_cfg['vnf_names']
+
+ tc_class = getattr(module, test_cfg['class_name'])
+ tc_instance = tc_class(testcase_config, supported_installers,
+ vnf_names)
+ cleanup_run_flag = False
start_time = time.time()
try:
- result, creators = t.main()
+ result, creators = tc_instance.run()
except Exception as e:
logger.error("Exception when executing: %s" % test_name)
logger.error(e)
result = {'status': 'FAILED'}
+ creators = tc_instance.get_creators()
+ if self.cleanup_flag is True:
+ sfc_cleanup.cleanup(testcase_config,
+ creators,
+ COMMON_CONFIG.mano_component,
+ odl_ip=odl_ip,
+ odl_port=odl_port)
+ cleanup_run_flag = True
end_time = time.time()
duration = end_time - start_time
logger.info("Results of test case '%s - %s':\n%s\n" %
@@ -176,7 +213,13 @@ class SfcFunctest(testcase.TestCase):
dic = {"duration": duration, "status": status}
self.details.update({test_name: dic})
- sfc_cleanup.cleanup(creators, odl_ip=odl_ip, odl_port=odl_port)
+
+ if cleanup_run_flag is not True and self.cleanup_flag is True:
+ sfc_cleanup.cleanup(testcase_config,
+ creators,
+ COMMON_CONFIG.mano_component,
+ odl_ip=odl_ip,
+ odl_port=odl_port)
self.stop_time = time.time()
diff --git a/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash b/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash
deleted file mode 100644
index 36148aa1..00000000
--- a/sfc/tests/functest/setup_scripts/compute_presetup_CI.bash
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# This script must be use with vxlan-gpe + nsh. Once we have eth + nsh support
-# in ODL, we will not need it anymore
-
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-#ip=`sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep compute|\
-#awk '{print $10}' | head -1`
-
-ip=$1
-echo $ip
-#sshpass -p r00tme scp $ssh_options correct_classifier.bash ${INSTALLER_IP}:/root
-#sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp correct_classifier.bash '"$ip"':/root'
-
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ifconfig br-int up'
-output=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route | \
-cut -d" " -f1 | grep 11.0.0.0' ; exit 0)
-
-if [ -z "$output" ]; then
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'ssh root@'"$ip"' ip route add 11.0.0.0/24 \
-dev br-int'
-fi
diff --git a/sfc/tests/functest/setup_scripts/delete.sh b/sfc/tests/functest/setup_scripts/delete.sh
deleted file mode 100644
index 3333c52b..00000000
--- a/sfc/tests/functest/setup_scripts/delete.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-# Remember to source the env variables $creds before
-FILE=$(readlink -f $0)
-FILE_PATH=$(dirname $FILE)
-cd $FILE_PATH
-python ../../../lib/cleanup.py $1 $2
-openstack server delete client
-openstack server delete server
-for line in $(openstack floating ip list);do openstack floating ip delete $line;done
diff --git a/sfc/tests/functest/setup_scripts/delete_symmetric.sh b/sfc/tests/functest/setup_scripts/delete_symmetric.sh
deleted file mode 100644
index b0aa4d81..00000000
--- a/sfc/tests/functest/setup_scripts/delete_symmetric.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-# Remember to source the env variables $creds before
-tacker sfc-classifier-delete red_http
-tacker sfc-classifier-delete red_http_reverse
-tacker sfc-delete red
-tacker vnf-delete testVNF1
-tacker vnfd-delete test-vnfd1
-nova delete client
-nova delete server
-for line in $(neutron floatingip-list | cut -d" " -f2);do neutron floatingip-delete $line;done
diff --git a/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py b/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py
deleted file mode 100644
index 1ddf36a6..00000000
--- a/sfc/tests/functest/setup_scripts/prepare_odl_sfc.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# Author: George Paraskevopoulos (geopar@intracom-telecom.com)
-# Manuel Buil (manuel.buil@ericsson.com)
-# Prepares the controller and the compute nodes for the odl-sfc testcase
-#
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import os
-import paramiko
-import subprocess
-import sys
-
-import functest.utils.functest_logger as ft_logger
-
-
-logger = ft_logger.Logger(__name__).getLogger()
-
-SFC_REPO_DIR = "/home/opnfv/repos/sfc"
-
-try:
- INSTALLER_IP = os.environ['INSTALLER_IP']
-except:
- logger.debug("INSTALLER_IP does not exist. We create 10.20.0.2")
- INSTALLER_IP = "10.20.0.2"
-
-os.environ['ODL_SFC_LOG'] = "/home/opnfv/functest/results/sfc.log"
-os.environ['ODL_SFC_DIR'] = os.path.join(SFC_REPO_DIR,
- "sfc/tests/functest")
-SETUP_SCRIPTS_DIR = os.path.join(os.environ['ODL_SFC_DIR'], 'setup_scripts')
-
-command = SETUP_SCRIPTS_DIR + ("/server_presetup_CI.bash | "
- "tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
-output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# i = line.rstrip()
-# print(i)
-
-# Make sure the process is finished before checking the returncode
-if not output.poll():
- output.wait()
-
-# Get return value
-if output.returncode:
- print("The presetup of the server did not work")
- sys.exit(output.returncode)
-
-logger.info("The presetup of the server worked ")
-
-ssh_options = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-ssh = paramiko.SSHClient()
-ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
-try:
- ssh.connect(INSTALLER_IP, username="root",
- password="r00tme", timeout=2)
- command = "fuel node | grep compute | awk '{print $10}'"
- logger.info("Executing ssh to collect the compute IPs")
- (stdin, stdout, stderr) = ssh.exec_command(command)
-except:
- logger.debug("Something went wrong in the ssh to collect the computes IP")
-
-output = stdout.readlines()
-for ip in output:
- command = SETUP_SCRIPTS_DIR + ("/compute_presetup_CI.bash " + ip.rstrip() +
- "| tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1")
-
- output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
-
-# This code is for debugging purposes
-# for line in iter(output.stdout.readline, ''):
-# print(line)
-# sys.stdout.flush()
-
- output.stdout.close()
-
- if not (output.poll()):
- output.wait()
-
- # Get return value
- if output.returncode:
- print("The compute config did not work on compute %s" % ip)
- sys.exit(output.returncode)
-
-sys.exit(0)
diff --git a/sfc/tests/functest/setup_scripts/server_presetup_CI.bash b/sfc/tests/functest/setup_scripts/server_presetup_CI.bash
deleted file mode 100644
index 240353f5..00000000
--- a/sfc/tests/functest/setup_scripts/server_presetup_CI.bash
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-set -e
-ssh_options='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
-BASEDIR=`dirname $0`
-INSTALLER_IP=${INSTALLER_IP:-10.20.0.2}
-
-pushd $BASEDIR
-ip=$(sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'fuel node'|grep controller|awk '{print $10}' | head -1)
-echo $ip
-
-sshpass -p r00tme scp $ssh_options delete.sh ${INSTALLER_IP}:/root
-sshpass -p r00tme ssh $ssh_options root@${INSTALLER_IP} 'scp '"$ip"':/root/tackerc .'
-sshpass -p r00tme scp $ssh_options ${INSTALLER_IP}:/root/tackerc $BASEDIR
diff --git a/sfc/tests/functest/sfc_chain_deletion.py b/sfc/tests/functest/sfc_chain_deletion.py
index 9fde460f..5f73d0c7 100644
--- a/sfc/tests/functest/sfc_chain_deletion.py
+++ b/sfc/tests/functest/sfc_chain_deletion.py
@@ -8,261 +8,113 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
+import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-
import sfc.lib.config as sfc_config
import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-from opnfv.deployment.factory import Factory as DeploymentFactory
-import sfc.lib.topology_shuffler as topo_shuffler
-
+from sfc.tests.functest import sfc_parent_function
logger = logging.getLogger(__name__)
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_chain_deletion')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- installer_type = os.environ.get("INSTALLER_TYPE")
-
- supported_installers = ['fuel', 'apex', 'osa', 'compass']
-
- if installer_type not in supported_installers:
- logger.error(
- '\033[91mYour installer is not supported yet\033[0m')
- sys.exit(1)
-
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
- cluster = COMMON_CONFIG.installer_cluster
- openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
+class SfcChainDeletion(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, a SF is created using tacker.
+ A service chain routing the traffic
+ throught this SF will be created as well.
+ After that the chain is deleted and re-created.
+ Finally, the vxlan tool is used in order to check a single
+ HTTP traffic scenario.
+ """
+ def run(self):
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
- compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
+ logger.info("The test scenario %s is starting", __name__)
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
- odl_ip, odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http', port=80, protocol='tcp', symmetric=False)
- for compute in compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
+ logger.info("Assigning floating IPs to instances")
- openstack_sfc = os_sfc_utils.OpenStackSFC()
-
- custom_flv = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if not custom_flv:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- tacker_client = os_sfc_utils.get_tacker_client()
-
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
-
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
-
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
-
- vnf_names = ['testVNF1', 'testVNF2']
+ self.assign_floating_ip_client_server()
+ self.assign_floating_ip_sfs()
- topo_seed = topo_shuffler.get_seed() # change to None for nova av zone
- testTopology = topo_shuffler.topology(vnf_names, openstack_sfc,
- seed=topo_seed)
+ self.check_floating_ips()
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
+ self.start_services_in_vm()
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
+ self.remove_vnffg('red_http', 'red')
+ self.check_deletion()
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'blue',
+ 'blue_http', port=80, protocol='tcp',
+ symmetric=False)
- server_ip = server_instance.ports[0].ips[0]['ip_address']
+ t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t2.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
+ logger.info("Starting SSH firewall on %s" % self.fips_sfs[0])
+ test_utils.start_vxlan_tool(self.fips_sfs[0])
- tosca_red = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
- os_sfc_utils.create_vnfd(tacker_client,
- tosca_file=tosca_red,
- vnfd_name='test-vnfd1')
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t2.join()
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
+ logger.info("Test HTTP")
+ results = self.present_results_allowed_http()
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnf_names[0], 'test-vnfd1', 'test-vim',
- default_param_file, testTopology[vnf_names[0]])
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
- vnf1_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_names[0])
- if vnf1_id is None:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
+ logger.info("Test HTTP again")
+ results = self.present_results_http()
- neutron_port = openstack_sfc.get_client_port(client_instance,
- client_creator)
- odl_utils.create_chain(tacker_client, default_param_file, neutron_port,
- COMMON_CONFIG, TESTCASE_CONFIG)
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),
- [neutron_port],))
+ if __name__ == 'sfc.tests.functest.sfc_chain_deletion':
+ return results.compile_summary(), self.creators
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf1_floating_ip = fips_sfs[0]
+ :return: creators
+ """
+ return self.creators
- fips = [client_floating_ip, server_floating_ip, sf1_floating_ip]
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
-
- if not test_utils.check_ssh([sf1_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error('\033[91mFailed to start HTTP server on %s\033[0m'
- % server_floating_ip)
- sys.exit(1)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- os_sfc_utils.delete_vnffg(tacker_client, vnffg_name='red_http')
-
- os_sfc_utils.delete_vnffgd(tacker_client, vnffgd_name='red')
-
- if not odl_utils.check_vnffg_deletion(odl_ip, odl_port, ovs_logger,
- [neutron_port],
- openstack_sfc.get_compute_client(),
- compute_nodes):
- logger.debug("The chains were not correctly removed")
- raise Exception("Chains not correctly removed, test failed")
-
- odl_utils.create_chain(tacker_client, default_param_file, neutron_port,
- COMMON_CONFIG, TESTCASE_CONFIG)
-
- # Start measuring the time it takes to implement the classification rules
- t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),
- [neutron_port],))
- try:
- t2.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Starting SSH firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t2.join()
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
-
- logger.info("Stopping HTTP firewall on %s" % sf1_floating_ip)
- test_utils.stop_vxlan_tool(sf1_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip, block="80")
-
- logger.info("Test HTTP again")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP works\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
+if __name__ == '__main__':
- return results.compile_summary(), openstack_sfc.creators
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_chain_deletion')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1', 'testVNF2']
-if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+ test_run = SfcChainDeletion(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_one_chain_two_service_functions.py b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
index 07f7814c..38fa3fef 100644
--- a/sfc/tests/functest/sfc_one_chain_two_service_functions.py
+++ b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
@@ -7,280 +7,97 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
+import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-
import sfc.lib.config as sfc_config
-import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-from opnfv.deployment.factory import Factory as DeploymentFactory
-import sfc.lib.topology_shuffler as topo_shuffler
+from sfc.tests.functest import sfc_parent_function
""" logging configuration """
logger = logging.getLogger(__name__)
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig(
- 'sfc_one_chain_two_service'
- '_functions')
+class SfcOneChainTwoServiceTC(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, 2 SFs are created using tacker.
+ A chain is created where both SFs are included.
+ The vxlan tool is used on both SFs. The purpose is to
+ check different HTTP traffic combinations.
+ """
+ def run(self):
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
+ logger.info("The test scenario %s is starting", __name__)
- installer_type = os.environ.get("INSTALLER_TYPE")
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.register_vnf_template(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
- supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_vnf(self.vnfs[1], 'test-vnfd2', 'test-vim')
- if installer_type not in supported_installers:
- logger.error(
- '\033[91mYour installer is not supported yet\033[0m')
- sys.exit(1)
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http', port=80, protocol='tcp', symmetric=False)
+ # Start measuring the time it takes to implement the
+ # classification rules
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
+ self.assign_floating_ip_client_server()
- cluster = COMMON_CONFIG.installer_cluster
+ self.assign_floating_ip_sfs()
- openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
+ self.check_floating_ips()
+ self.start_services_in_vm()
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
- compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
+ t1.join()
- odl_ip, odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
+ logger.info("Allowed HTTP scenario")
+ results = self.present_results_allowed_http()
- for compute in compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+ results = self.present_results_http()
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
+ self.vxlan_blocking_start(self.fips_sfs[1], "80")
+ self.vxlan_blocking_stop(self.fips_sfs[0])
- openstack_sfc = os_sfc_utils.OpenStackSFC()
+ results = self.present_results_http()
- custom_flv = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if not custom_flv:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
- tacker_client = os_sfc_utils.get_tacker_client()
+ if __name__ == \
+ 'sfc.tests.functest.sfc_one_chain_two_service_functions':
+ return results.compile_summary(), self.creators
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
+ :return: creators
+ """
+ return self.creators
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
+if __name__ == '__main__':
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_one_chain_two_service'
+ '_functions')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
vnfs = ['testVNF1', 'testVNF2']
- topo_seed = topo_shuffler.get_seed()
- testTopology = topo_shuffler.topology(vnfs, openstack_sfc, seed=topo_seed)
-
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
- logger.info("Server instance received private ip [{}]".format(server_ip))
-
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
-
- os_sfc_utils.create_vnfd(
- tacker_client,
- tosca_file=tosca_file, vnfd_name='test-vnfd1')
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
- os_sfc_utils.create_vnfd(
- tacker_client,
- tosca_file=tosca_file, vnfd_name='test-vnfd2')
-
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
-
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnfs[0], 'test-vnfd1', 'test-vim',
- default_param_file, testTopology[vnfs[0]])
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnfs[1], 'test-vnfd2', 'test-vim',
- default_param_file, testTopology[vnfs[1]])
-
- vnf1_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnfs[0])
- vnf2_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnfs[1])
- if vnf1_id is None or vnf2_id is None:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
-
- neutron_port = openstack_sfc.get_client_port(client_instance,
- client_creator)
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
-
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),
- [neutron_port],))
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf1_floating_ip = fips_sfs[0]
- sf2_floating_ip = fips_sfs[1]
-
- fips = [client_floating_ip, server_floating_ip, sf1_floating_ip,
- sf2_floating_ip]
-
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
-
- if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error(
- 'Failed to start HTTP server on %s' % server_floating_ip)
- sys.exit(1)
-
- for sf_floating_ip in (sf1_floating_ip, sf2_floating_ip):
- logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
- test_utils.start_vxlan_tool(sf_floating_ip)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP blocked")
-
- logger.info("Changing the vxlan_tool to block HTTP traffic")
-
- # Make SF1 block http traffic
- test_utils.stop_vxlan_tool(sf1_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip, block="80")
-
- logger.info("Test HTTP again blocking SF1")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP not blocked")
-
- # Make SF2 block http traffic
- test_utils.stop_vxlan_tool(sf2_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf2_floating_ip)
- test_utils.start_vxlan_tool(sf2_floating_ip, block="80")
- logger.info("Stopping HTTP firewall on %s" % sf1_floating_ip)
- test_utils.stop_vxlan_tool(sf1_floating_ip)
-
- logger.info("Test HTTP again blocking SF2")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP not blocked")
-
- return results.compile_summary(), openstack_sfc.creators
-
-
-if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+ test_run = SfcOneChainTwoServiceTC(TESTCASE_CONFIG, supported_installers,
+ vnfs)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_parent_function.py b/sfc/tests/functest/sfc_parent_function.py
new file mode 100644
index 00000000..410c0e71
--- /dev/null
+++ b/sfc/tests/functest/sfc_parent_function.py
@@ -0,0 +1,768 @@
+import logging
+import os
+import urllib3
+
+import sfc.lib.test_utils as test_utils
+import sfc.lib.openstack_utils as os_sfc_utils
+import sfc.lib.topology_shuffler as topo_shuffler
+
+from opnfv.utils import ovs_logger as ovs_log
+from opnfv.deployment.factory import Factory as DeploymentFactory
+from sfc.lib import config as sfc_config
+from sfc.lib import odl_utils as odl_utils
+from sfc.lib.results import Results
+
+# Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+urllib3.disable_warnings()
+
+logger = logging.getLogger(__name__)
+CLIENT = "client"
+SERVER = "server"
+openstack_sfc = os_sfc_utils.OpenStackSFC()
+COMMON_CONFIG = sfc_config.CommonConfig()
+results = Results(COMMON_CONFIG.line_length)
+
+
+class SfcCommonTestCase(object):
+
+ def __init__(self, testcase_config, supported_installers, vnfs):
+
+ self.compute_nodes = None
+ self.controller_clients = None
+ self.compute_clients = None
+ self.tacker_client = None
+ self.ovs_logger = None
+ self.network = None
+ self.router = None
+ self.sg = None
+ self.image_creator = None
+ self.vnf_image_creator = None
+ self.creators = None
+ self.odl_ip = None
+ self.odl_port = None
+ self.default_param_file = None
+ self.topo_seed = None
+ self.test_topology = None
+ self.server_instance = None
+ self.server_creator = None
+ self.client_instance = None
+ self.client_creator = None
+ self.vnf_id = None
+ self.client_floating_ip = None
+ self.server_floating_ip = None
+ self.fips_sfs = []
+ self.vnf_objects = dict()
+ self.testcase_config = testcase_config
+ self.vnfs = vnfs
+ self.port_server = None
+ self.server_ip = None
+ self.port_client = None
+
+ self.prepare_env(testcase_config, supported_installers, vnfs)
+
+ def prepare_env(self, testcase_config, supported_installers, vnfs):
+ """Prepare the testcase environment and the components
+ that the test scenario is going to use later on.
+
+ :param testcase_config: the input test config file
+ :param supported_installers: the supported installers for this tc
+ :param vnfs: the names of vnfs
+ :return: Environment preparation
+ """
+
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ deployment_handler = DeploymentFactory.get_handler(
+ COMMON_CONFIG.installer_type,
+ COMMON_CONFIG.installer_ip,
+ COMMON_CONFIG.installer_user,
+ COMMON_CONFIG.installer_password,
+ COMMON_CONFIG.installer_key_file)
+
+ installer_type = os.environ.get("INSTALLER_TYPE")
+ installer_ip = os.environ.get("INSTALLER_IP")
+ cluster = COMMON_CONFIG.installer_cluster
+ openstack_nodes = (deployment_handler.
+ get_nodes({'cluster': cluster})
+ if cluster is not None
+ else deployment_handler.get_nodes())
+
+ self.compute_nodes = [node for node in openstack_nodes
+ if node.is_compute()]
+
+ for compute in self.compute_nodes:
+ logger.info("This is a compute: %s" % compute.ip)
+
+ controller_nodes = [node for node in openstack_nodes
+ if node.is_controller()]
+ self.controller_clients = test_utils. \
+ get_ssh_clients(controller_nodes)
+ self.compute_clients = test_utils. \
+ get_ssh_clients(self.compute_nodes)
+
+ self.odl_ip, self.odl_port = odl_utils. \
+ get_odl_ip_port(openstack_nodes)
+
+ else:
+ installer_type = 'configByUser'
+ installer_ip = COMMON_CONFIG.installer_ip
+ openstack_nodes = COMMON_CONFIG.nodes_pod
+ self.compute_nodes = [node for node in
+ COMMON_CONFIG.nodes_pod
+ if node['role'] == 'Compute']
+
+ for compute in self.compute_nodes:
+ logger.info("This is a compute: %s" % compute['ip'])
+
+ controller_nodes = [node for node in openstack_nodes
+ if node['role'] == 'Controller']
+
+ self.odl_ip, self.odl_port = odl_utils. \
+ get_odl_ip_port_no_installer(openstack_nodes)
+
+ if installer_type not in supported_installers:
+ if installer_type != 'configByUser':
+ raise Exception(
+ '\033[91mYour installer is not supported yet\033[0m')
+
+ if not installer_ip:
+ logger.error(
+ '\033[91minstaller ip is not set\033[0m')
+ raise Exception(
+ '\033[91mexport INSTALLER_IP=<ip>\033[0m')
+
+ results.add_to_summary(0, "=")
+ results.add_to_summary(2, "STATUS", "SUBTEST")
+ results.add_to_summary(0, "=")
+
+ custom_flv = openstack_sfc.create_flavor(
+ COMMON_CONFIG.flavor,
+ COMMON_CONFIG.ram_size_in_mb,
+ COMMON_CONFIG.disk_size_in_gb,
+ COMMON_CONFIG.vcpu_count)
+ if not custom_flv:
+ raise Exception("Failed to create custom flavor")
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ self.tacker_client = os_sfc_utils.get_tacker_client()
+ os_sfc_utils.register_vim(self.tacker_client,
+ vim_file=COMMON_CONFIG.vim_file)
+
+ self.ovs_logger = ovs_log.OVSLogger(
+ os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
+ COMMON_CONFIG.functest_results_dir)
+
+ self.network, self.router = openstack_sfc.\
+ create_network_infrastructure(testcase_config.net_name,
+ testcase_config.subnet_name,
+ testcase_config.subnet_cidr,
+ testcase_config.router_name)
+
+ self.sg = openstack_sfc.create_security_group(
+ testcase_config.secgroup_name)
+
+ # Image for the vnf is registered
+ self.vnf_image_creator = openstack_sfc.register_glance_image(
+ COMMON_CONFIG.vnf_image_name,
+ COMMON_CONFIG.vnf_image_url,
+ COMMON_CONFIG.vnf_image_format,
+ 'public')
+
+ # Image for the client/server is registered
+ self.image_creator = openstack_sfc.register_glance_image(
+ COMMON_CONFIG.image_name,
+ COMMON_CONFIG.image_url,
+ COMMON_CONFIG.image_format,
+ 'public')
+
+ self.creators = openstack_sfc.creators
+
+ odl_utils.get_odl_username_password()
+
+ self.default_param_file = os.path.join(
+ COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnfd_dir,
+ COMMON_CONFIG.vnfd_default_params_file)
+
+ self.topo_seed = topo_shuffler.get_seed()
+ self.test_topology = topo_shuffler.topology(vnfs, openstack_sfc,
+ seed=self.topo_seed)
+
+ logger.info('This test is run with the topology {0}'
+ .format(self.test_topology['id']))
+ logger.info('Topology description: {0}'
+ .format(self.test_topology['description']))
+
+ self.server_instance, port_server = \
+ openstack_sfc.create_instance(SERVER, COMMON_CONFIG.flavor,
+ self.image_creator, self.network,
+ self.sg,
+ self.test_topology['server'],
+ [SERVER + '-port'])
+
+ self.client_instance, port_client = \
+ openstack_sfc.create_instance(CLIENT, COMMON_CONFIG.flavor,
+ self.image_creator, self.network,
+ self.sg,
+ self.test_topology['client'],
+ [CLIENT + '-port'])
+
+ logger.info('This test is run with the topology {0}'.format(
+ self.test_topology['id']))
+ logger.info('Topology description: {0}'.format(
+ self.test_topology['description']))
+
+ if COMMON_CONFIG.installer_type != 'configByUser':
+ self.port_server = port_server[0]
+ self.port_client = port_client[0]
+ port_fixed_ips = self.port_server
+ for ip in port_fixed_ips:
+ self.server_ip = ip.get('ip_address')
+ logger.info("Server instance received private ip [{}]".format(
+ self.server_ip))
+ else:
+ self.port_server = port_server
+ self.port_client = port_client
+ self.server_ip = self.server_instance.ports[0].ips[0]['ip_address']
+ logger.info("Server instance received private ip [{}]".format(
+ self.server_ip))
+
+ def register_vnf_template(self, test_case_name, template_name):
+ """ Register the template which defines the VNF
+
+ :param test_case_name: the name of the test case
+ :param template_name: name of the template
+ """
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ self.create_custom_vnfd(test_case_name, template_name)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ # networking-sfc does not have the template concept
+ pass
+
+ def create_custom_vnfd(self, test_case_name, vnfd_name):
+ """Create VNF Descriptor (VNFD)
+
+ :param test_case_name: the name of test case
+ :param vnfd_name: the name of vnfd
+ :return: vnfd
+ """
+
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnfd_dir, test_case_name)
+
+ os_sfc_utils.create_vnfd(self.tacker_client,
+ tosca_file=tosca_file,
+ vnfd_name=vnfd_name)
+
+ def create_vnf(self, vnf_name, vnfd_name=None, vim_name=None,
+ symmetric=False):
+ """Create custom vnf
+
+ :param vnf_name: name of the vnf
+ :param vnfd_name: name of the vnfd template (tacker)
+ :param vim_name: name of the vim (tacker)
+ :param symmetric: specifies whether this is part of the symmetric test
+ :return: av zone
+ """
+
+ logger.info('This test is run with the topology {0}'.
+ format(self.test_topology['id']))
+ logger.info('Topology description: {0}'
+ .format(self.test_topology['description']))
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ os_sfc_utils.create_vnf_in_av_zone(
+ self.tacker_client, vnf_name, vnfd_name, vim_name,
+ self.default_param_file, self.test_topology[vnf_name])
+
+ self.vnf_id = os_sfc_utils.wait_for_vnf(self.tacker_client,
+ vnf_name=vnf_name)
+ if self.vnf_id is None:
+ raise Exception('ERROR while booting vnfs')
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ av_zone = self.test_topology[vnf_name]
+ if symmetric:
+ ports = [vnf_name + '-port1', vnf_name + '-port2']
+ else:
+ ports = [vnf_name + '-port']
+ vnf_instance, vnf_port = \
+ openstack_sfc.create_instance(vnf_name, COMMON_CONFIG.flavor,
+ self.vnf_image_creator,
+ self.network,
+ self.sg,
+ av_zone,
+ ports,
+ port_security=False)
+
+ self.vnf_objects[vnf_name] = [vnf_instance, vnf_port]
+ logger.info("Creating VNF with name...%s", vnf_name)
+ logger.info("Port associated with VNF...%s",
+ self.vnf_objects[vnf_name][1])
+
+ def assign_floating_ip_client_server(self):
+ """Assign floating IPs on the router about server and the client
+ instances
+ :return: Floating IPs for client and server
+ """
+ logger.info("Assigning floating IPs to client and server instances")
+
+ self.client_floating_ip = openstack_sfc.assign_floating_ip(
+ self.client_instance, self.port_client)
+ self.server_floating_ip = openstack_sfc.assign_floating_ip(
+ self.server_instance, self.port_server)
+
+ def assign_floating_ip_sfs(self):
+ """Assign floating IPs to service function
+
+ :return: The list fips_sfs consist of the available IPs for service
+ functions
+ """
+
+ logger.info("Assigning floating IPs to service functions")
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ vnf_ip = os_sfc_utils.get_vnf_ip(self.tacker_client,
+ vnf_id=self.vnf_id)
+ self.fips_sfs = openstack_sfc.assign_floating_ip_vnfs(self.router,
+ vnf_ip)
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ for vnf in self.vnfs:
+ # instance object is in [0] and port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ sf_floating_ip = openstack_sfc.\
+ assign_floating_ip(vnf_instance, vnf_port[0])
+ self.fips_sfs.append(sf_floating_ip)
+
+ def check_floating_ips(self):
+ """Check the responsivness of the floating IPs
+
+ :return: The responsivness of IPs in the fips_sfs list is checked
+ """
+
+ fips = [self.client_floating_ip, self.server_floating_ip]
+
+ for sf in self.fips_sfs:
+ fips.append(sf)
+
+ for ip in fips:
+ logger.info("Checking connectivity towards floating IP [%s]" % ip)
+ if not test_utils.ping(ip, retries=50, retry_timeout=3):
+ os_sfc_utils.get_tacker_items()
+ odl_utils.get_odl_items(self.odl_ip, self.odl_port)
+ raise Exception("Cannot ping floating IP [%s]" % ip)
+ logger.info("Successful ping to floating IP [%s]" % ip)
+
+ if not test_utils.check_ssh(self.fips_sfs):
+ raise Exception("Cannot establish SSH connection to the SFs")
+
+ def start_services_in_vm(self):
+ """Start the HTTP server in the server VM as well as the vxlan tool for
+ the SFs IPs included in the fips_sfs list
+
+ :return: HTTP server and vxlan tools are started
+ """
+
+ logger.info("Starting HTTP server on %s" % self.server_floating_ip)
+ if not test_utils.start_http_server(self.server_floating_ip):
+ raise Exception('\033[91mFailed to start HTTP server on %s\033[0m'
+ % self.server_floating_ip)
+
+ for sf_floating_ip in self.fips_sfs:
+ logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
+ test_utils.start_vxlan_tool(sf_floating_ip)
+
+ def present_results_ssh(self):
+ """Check whether the connection between server and client using
+ SSH protocol is blocked or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test SSH")
+ if test_utils.is_ssh_blocked(self.client_floating_ip, self.server_ip):
+ results.add_to_summary(2, "PASS", "SSH Blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> SSH NOT BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "SSH Works")
+
+ return results
+
+ def present_results_allowed_ssh(self):
+ """Check whether the connection between server and client using
+ SSH protocol is available or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test SSH")
+ if not test_utils.is_ssh_blocked(self.client_floating_ip,
+ self.server_ip):
+ results.add_to_summary(2, "PASS", "SSH works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> SSH BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "SSH is blocked")
+
+ return results
+
+ def remove_vnffg(self, par_vnffg_name, par_vnffgd_name):
+ """Delete the vnffg and the vnffgd items that have been created
+ during the test scenario.
+
+ :param par_vnffg_name: The vnffg name of network components
+ :param par_vnffgd_name: The vnffgd name of network components
+ :return: Remove the vnffg and vnffgd components
+ """
+ if COMMON_CONFIG.mano_component == 'tacker':
+ os_sfc_utils.delete_vnffg(self.tacker_client,
+ vnffg_name=par_vnffg_name)
+
+ os_sfc_utils.delete_vnffgd(self.tacker_client,
+ vnffgd_name=par_vnffgd_name)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ # TODO: If we had a testcase where only one chains must be removed
+ # we would need to add the logic. Now it removes all of them
+ openstack_sfc.delete_chain()
+ openstack_sfc.delete_port_groups()
+
+ def create_classifier(self, fc_name, port=85,
+ protocol='tcp', symmetric=False):
+ """Create the classifier component following the instructions from
+ relevant templates.
+
+ :param fc_name: The name of the classifier
+ :param port: Input port number
+ :param protocol: Input protocol
+ :param symmetric: Check symmetric
+ :return: Create the classifier component
+ """
+
+ logger.info("Creating the classifier...")
+
+ self.neutron_port = self.port_client
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ openstack_sfc.create_classifier(self.neutron_port.id,
+ port,
+ protocol,
+ fc_name,
+ symmetric)
+
+ elif COMMON_CONFIG.mano_component == 'tacker':
+ logger.info("Creating classifier with tacker is not supported")
+
+ def create_vnffg(self, testcase_config_name, vnffgd_name, vnffg_name,
+ port=80, protocol='tcp', symmetric=False, vnf_index=-1):
+ """Create the vnffg components following the instructions from
+ relevant templates.
+
+ :param testcase_config_name: The config input of the test case
+ :param vnffgd_name: The name of the vnffgd template
+ :param vnffg_name: The name for the vnffg
+ :param port: Input port number
+ :param protocol: Input protocol
+ :param symmetric: Check symmetric
+ :param vnf_index: Index to specify vnf
+ :return: Create the vnffg component
+ """
+
+ logger.info("Creating the vnffg...")
+
+ if COMMON_CONFIG.mano_component == 'tacker':
+ tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnffgd_dir,
+ testcase_config_name)
+
+ os_sfc_utils.create_vnffgd(self.tacker_client,
+ tosca_file=tosca_file,
+ vnffgd_name=vnffgd_name)
+
+ self.neutron_port = self.port_client
+
+ if symmetric:
+ server_ip_prefix = self.server_ip + '/32'
+
+ os_sfc_utils.create_vnffg_with_param_file(
+ self.tacker_client,
+ vnffgd_name,
+ vnffg_name,
+ self.default_param_file,
+ self.neutron_port.id,
+ server_port=self.port_server.id,
+ server_ip=server_ip_prefix)
+
+ else:
+ os_sfc_utils.create_vnffg_with_param_file(
+ self.tacker_client,
+ vnffgd_name,
+ vnffg_name,
+ self.default_param_file,
+ self.neutron_port.id)
+
+ elif COMMON_CONFIG.mano_component == 'no-mano':
+ logger.info("Creating the vnffg without any mano component...")
+ port_groups = []
+ if vnf_index == -1:
+ for vnf in self.vnfs:
+ # vnf_instance is in [0] and vnf_port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = \
+ openstack_sfc.create_port_groups(neutron_ports,
+ vnf_instance)
+ port_groups.append(port_group)
+
+ else:
+ vnf = self.vnfs[vnf_index]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = openstack_sfc.create_port_groups(
+ neutron_ports, vnf_instance)
+ port_groups.append(port_group)
+
+ self.neutron_port = self.port_client
+
+ if symmetric:
+ # We must pass the server_port and server_ip in the symmetric
+ # case. Otherwise ODL does not work well
+ server_ip_prefix = self.server_ip + '/32'
+ openstack_sfc.create_chain(port_groups,
+ self.neutron_port.id,
+ port, protocol, vnffg_name,
+ symmetric,
+ server_port=self.port_server.id,
+ server_ip=server_ip_prefix)
+
+ else:
+ openstack_sfc.create_chain(port_groups,
+ self.neutron_port.id,
+ port, protocol, vnffg_name,
+ symmetric)
+
+ def update_vnffg(self, testcase_config_name, vnffgd_name, vnffg_name,
+ port=80, protocol='tcp', symmetric=False,
+ vnf_index=0, fc_name='red'):
+ """Update the vnffg components following the instructions from
+ relevant templates.
+
+ :param testcase_config_name: The config input of the test case
+ :param vnffgd_name: The name of the vnffgd template
+ :param vnffg_name: The name for the vnffg
+ :param port: To input port number
+ :param protocol: To input protocol
+ :param symmetric: To check symmetric
+ :param vnf_index: Index to identify vnf
+ :param fc_name: The name of the flow classifier
+ :return: Update the vnffg component
+ """
+
+ logger.info("Update the vnffg...")
+
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ port_groups = []
+ for vnf in self.vnfs:
+ # vnf_instance is in [0] and vnf_port in [1]
+ vnf_instance = self.vnf_objects[vnf][0]
+ vnf_port = self.vnf_objects[vnf][1]
+ if symmetric:
+ # VNFs have two ports
+ neutron_port1 = vnf_port[0]
+ neutron_port2 = vnf_port[1]
+ neutron_ports = [neutron_port1, neutron_port2]
+ else:
+ neutron_port1 = vnf_port[0]
+ neutron_ports = [neutron_port1]
+
+ port_group = \
+ openstack_sfc.create_port_groups(neutron_ports,
+ vnf_instance)
+ port_groups.append(port_group)
+
+ openstack_sfc.update_chain(vnffg_name, fc_name, symmetric)
+
+ elif COMMON_CONFIG.mano_component == 'tacker':
+ logger.info("update for tacker is not supported")
+
+ def swap_classifiers(self, vnffg_1_name, vnffg_2_name, symmetric=False):
+ """Interchange classifiers between port chains
+
+ :param vnffg_1_name: Reference to port_chain_1
+ :param vnffg_2_name: Reference to port_chain_2
+ :param symmetric: To check symmetric
+ :return: Interchange the classifiers
+ """
+
+ if COMMON_CONFIG.mano_component == 'no-mano':
+ openstack_sfc.swap_classifiers(vnffg_1_name,
+ vnffg_2_name,
+ symmetric=False)
+
+ def present_results_http(self):
+ """Check whether the connection between server and client using
+ HTTP protocol is blocked or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ logger.info("Test HTTP")
+ if test_utils.is_http_blocked(self.client_floating_ip, self.server_ip):
+ results.add_to_summary(2, "PASS", "HTTP Blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP works")
+
+ return results
+
+ def present_results_allowed_port_http(self, testcase_config):
+ """Check whether the connection between server and client using
+ HTTP protocol and for a specific port is available or not.
+
+ :param testcase_config: The config input of the test case
+ :return: The results for the specific action of the scenario
+ """
+
+ allowed_port = testcase_config.source_port
+ logger.info("Test if HTTP from port %s works" % allowed_port)
+ if not test_utils.is_http_blocked(
+ self.client_floating_ip, self.server_ip, allowed_port):
+ results.add_to_summary(2, "PASS", "HTTP works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP is blocked")
+
+ return results
+
+ def present_results_blocked_port_http(self, testcase_config,
+ test='HTTP'):
+ """Check whether the connection between server and client using
+ HTTP protocol and for a specific port is blocked or not.
+
+ :param testcase_config: The config input of the test case
+ :param test: custom test string to print on result summary
+ :return: The results for the specific action of the scenario
+ """
+
+ allowed_port = testcase_config.source_port
+ logger.info("Test if HTTP from port %s doesn't work" % allowed_port)
+ if test_utils.is_http_blocked(
+ self.client_floating_ip, self.server_ip, allowed_port):
+ results.add_to_summary(2, "PASS", test + " blocked")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", test + " works")
+
+ return results
+
+ def check_deletion(self):
+ """Check that the deletion of the chain has been completed sucessfully.
+
+ :return: Check that the chain has been completed deleted without
+ leftovers.
+ """
+
+ if not odl_utils.\
+ check_vnffg_deletion(self.odl_ip, self.odl_port,
+ self.ovs_logger,
+ [self.neutron_port],
+ self.client_instance.hypervisor_hostname,
+ self.compute_nodes):
+ logger.debug("The chains were not correctly removed")
+ raise Exception("Chains not correctly removed, test failed")
+
+ def present_results_allowed_http(self):
+ """Check whether the connection between server and client using
+ HTTP protocol is available or not.
+
+ :return: The results for the specific action of the scenario
+ """
+
+ if not test_utils.is_http_blocked(self.client_floating_ip,
+ self.server_ip):
+ results.add_to_summary(2, "PASS", "HTTP works")
+ else:
+ error = ('\033[91mTEST [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ self.ovs_logger, self.controller_clients, self.compute_clients,
+ error)
+ results.add_to_summary(2, "FAIL", "HTTP is blocked")
+
+ return results
+
+ def vxlan_blocking_start(self, floating_ip, port_blocked):
+ """Start the vxlan tool for one floating IP and blocking
+ a specific port.
+
+ :param floating_ip: Floating IP
+ :param port_blocked: Port
+ :return: The port for the floating IP is blocked
+ """
+
+ test_utils.stop_vxlan_tool(floating_ip)
+ logger.info("Starting HTTP firewall on %s" % floating_ip)
+ test_utils.start_vxlan_tool(floating_ip, block=port_blocked)
+
+ def vxlan_blocking_stop(self, floating_ip):
+ """Stop the vxlan tool for a specific IP
+
+ :param floating_ip: Floating IP
+ :return: The vxlan tool for the specific floating IP is stopped
+ """
+
+ logger.info("Starting HTTP firewall on %s" % floating_ip)
+ test_utils.stop_vxlan_tool(floating_ip)
+
+ def vxlan_start_interface(self, floating_ip, interface, output, block):
+ """Start the vxlan tool for one floating IP and blocking
+ a specific interface.
+
+ :param floating_ip: Floating IP
+ :param interface: Interface
+ :param output: output interface
+ :param block: port
+ :return: The interface or/and port for specific floating IP is blocked
+ """
+
+ logger.info("Starting vxlan_tool on %s" % floating_ip)
+ test_utils.start_vxlan_tool(floating_ip, interface=interface,
+ output=output, block=block)
diff --git a/sfc/tests/functest/sfc_symmetric_chain.py b/sfc/tests/functest/sfc_symmetric_chain.py
index 43599d62..cec45219 100644
--- a/sfc/tests/functest/sfc_symmetric_chain.py
+++ b/sfc/tests/functest/sfc_symmetric_chain.py
@@ -8,302 +8,114 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
-import os
-import sys
import threading
import logging
+import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-from opnfv.deployment.factory import Factory as DeploymentFactory
-
import sfc.lib.config as sfc_config
-import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-import sfc.lib.topology_shuffler as topo_shuffler
-
+from sfc.tests.functest import sfc_parent_function
+""" logging configuration """
logger = logging.getLogger(__name__)
-
+COMMON_CONFIG = sfc_config.CommonConfig()
CLIENT = "client"
SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- cluster = COMMON_CONFIG.installer_cluster
- all_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
-
- controller_nodes = [node for node in all_nodes if node.is_controller()]
- compute_nodes = [node for node in all_nodes if node.is_compute()]
-
- odl_ip, odl_port = odl_utils.get_odl_ip_port(all_nodes)
-
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
-
- openstack_sfc = os_sfc_utils.OpenStackSFC()
-
- tacker_client = os_sfc_utils.get_tacker_client()
-
- custom_flavor = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if custom_flavor is None:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
-
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
-
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
-
- vnf_name = 'testVNF1'
- topo_seed = topo_shuffler.get_seed()
- testTopology = topo_shuffler.topology([vnf_name], openstack_sfc,
- seed=topo_seed)
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology[CLIENT])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology[SERVER])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
- logger.info("Server instance received private ip [{}]".format(server_ip))
-
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
-
- tosca_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd)
-
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
-
- os_sfc_utils.create_vnfd(
- tacker_client,
- tosca_file=tosca_file,
- vnfd_name='test-vnfd1')
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client,
- vnf_name,
- 'test-vnfd1',
- 'test-vim',
- default_param_file,
- testTopology[vnf_name])
-
- vnf_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_name)
- if vnf_id is None:
- logger.error('ERROR while booting VNF')
- sys.exit(1)
-
- tosca_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd)
- os_sfc_utils.create_vnffgd(
- tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='test-vnffgd')
-
- client_port = openstack_sfc.get_client_port(
- client_instance,
- client_creator)
- server_port = openstack_sfc.get_client_port(
- server_instance,
- server_creator)
-
- server_ip_prefix = server_ip + '/32'
-
- os_sfc_utils.create_vnffg_with_param_file(
- tacker_client,
- 'test-vnffgd',
- 'test-vnffg',
- default_param_file,
- client_port.id,
- server_port.id,
- server_ip_prefix)
-
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(
- target=wait_for_classification_rules,
- args=(ovs_logger, compute_nodes,
- openstack_sfc.get_compute_server(), server_port,
- openstack_sfc.get_compute_client(), client_port,
- odl_ip, odl_port,))
-
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
-
- vnf_ip = os_sfc_utils.get_vnf_ip(tacker_client, vnf_id=vnf_id)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router, [vnf_ip])
- sf_floating_ip = fips_sfs[0]
-
- fips = [client_floating_ip, server_floating_ip, sf_floating_ip]
-
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
-
- if not test_utils.check_ssh([sf_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
-
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error('\033[91mFailed to start the HTTP server\033[0m')
- sys.exit(1)
-
- logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
- output='eth1')
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
- output='eth0')
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip,
- server_ip,
- TESTCASE_CONFIG.source_port):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP blocked")
-
- logger.info("Changing the vxlan_tool to block HTTP request traffic")
-
- # Make SF1 block http request traffic
- test_utils.stop_vxlan_tool(sf_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf_floating_ip)
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
- output='eth1', block="80")
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
- output='eth0')
-
- logger.info("Test HTTP again blocking request on SF1")
- if test_utils.is_http_blocked(client_floating_ip,
- server_ip,
- TESTCASE_CONFIG.source_port):
- results.add_to_summary(2, "PASS", "HTTP uplink blocked")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
-
- logger.info("Changing the vxlan_tool to block HTTP response traffic")
-
- # Make SF1 block response http traffic
- test_utils.stop_vxlan_tool(sf_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf_floating_ip)
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
- output='eth1')
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
- output='eth0',
- block=TESTCASE_CONFIG.source_port)
-
- logger.info("Test HTTP again blocking response on SF1")
- if test_utils.is_http_blocked(client_floating_ip,
- server_ip,
- TESTCASE_CONFIG.source_port):
- results.add_to_summary(2, "PASS", "HTTP downlink blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
-
- logger.info("Changing the vxlan_tool to allow HTTP traffic")
-
- # Make SF1 allow http traffic
- test_utils.stop_vxlan_tool(sf_floating_ip)
- logger.info("Starting HTTP firewall on %s" % sf_floating_ip)
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
- output='eth1')
- test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
- output='eth0')
-
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP restored")
- else:
- error = ('\033[91mTEST 4 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP blocked")
- return results.compile_summary(), openstack_sfc.creators
+class SfcSymmetricChain(sfc_parent_function.SfcCommonTestCase):
+ """One client and one server are created using nova.
+ The server will be running a web server on port 80.
+ Then one Service Function (SF) is created using Tacker.
+ This service function will be running a firewall that
+ blocks the traffic in a specific port.
+ A symmetric service chain routing the traffic throught
+ this SF will be created as well.
+ The purpose is to check different HTTP traffic
+ combinations using firewall.
+ """
+
+ def run(self):
+
+ logger.info("The test scenario %s is starting", __name__)
+
+ self.register_vnf_template(self.testcase_config.test_vnfd,
+ 'test-vnfd1')
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim', symmetric=True)
+
+ self.create_vnffg(self.testcase_config.test_vnffgd, 'red-symmetric',
+ 'red_http', port=80, protocol='tcp', symmetric=True)
+
+ # Start measuring the time it takes to implement the classification
+ # rules
+ t1 = threading.Thread(target=symmetric_wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.server_instance.hypervisor_hostname,
+ self.port_server,
+ self.client_instance.hypervisor_hostname,
+ self.port_client,
+ self.odl_ip, self.odl_port,))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
+
+ logger.info("Assigning floating IPs to instances")
+ self.assign_floating_ip_client_server()
+
+ self.assign_floating_ip_sfs()
+
+ self.check_floating_ips()
-def wait_for_classification_rules(ovs_logger, compute_nodes,
- server_compute, server_port,
- client_compute, client_port,
- odl_ip, odl_port):
+ self.start_services_in_vm()
+
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
+
+ results = self.present_results_allowed_port_http(self.testcase_config)
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', "80")
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+
+ results = self.present_results_blocked_port_http(self.testcase_config,
+ 'HTTP uplink')
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0',
+ self.testcase_config.source_port)
+
+ results = self.present_results_blocked_port_http(self.testcase_config,
+ 'HTTP downlink')
+
+ self.vxlan_blocking_stop(self.fips_sfs[0])
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth0', 'eth1', None)
+ self.vxlan_start_interface(self.fips_sfs[0], 'eth1', 'eth0', None)
+ results = self.present_results_allowed_http()
+
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
+
+ if __name__ == 'sfc.tests.functest.sfc_symmetric_chain':
+ return results.compile_summary(), self.creators
+
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
+
+ :return: creators
+ """
+ return self.creators
+
+
+def symmetric_wait_for_classification_rules(ovs_logger, compute_nodes,
+ server_compute, server_port,
+ client_compute, client_port,
+ odl_ip, odl_port):
if client_compute == server_compute:
odl_utils.wait_for_classification_rules(
ovs_logger,
@@ -330,5 +142,14 @@ def wait_for_classification_rules(ovs_logger, compute_nodes,
if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_symmetric_chain')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1']
+
+ test_run = SfcSymmetricChain(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
index a5133f00..92c2711e 100644
--- a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
+++ b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
@@ -8,307 +8,120 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
-import os
-import sys
import threading
import logging
+import urllib3
-import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
-import opnfv.utils.ovs_logger as ovs_log
-
import sfc.lib.config as sfc_config
-import sfc.lib.test_utils as test_utils
-from sfc.lib.results import Results
-from opnfv.deployment.factory import Factory as DeploymentFactory
-import sfc.lib.topology_shuffler as topo_shuffler
-
+from sfc.tests.functest import sfc_parent_function
+""" logging configuration """
logger = logging.getLogger(__name__)
-CLIENT = "client"
-SERVER = "server"
-COMMON_CONFIG = sfc_config.CommonConfig()
-TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_two_chains_SSH_and_HTTP')
-
-
-def main():
- deploymentHandler = DeploymentFactory.get_handler(
- COMMON_CONFIG.installer_type,
- COMMON_CONFIG.installer_ip,
- COMMON_CONFIG.installer_user,
- COMMON_CONFIG.installer_password,
- COMMON_CONFIG.installer_key_file)
-
- installer_type = os.environ.get("INSTALLER_TYPE")
-
- supported_installers = ['fuel', 'apex', 'osa', 'compass']
-
- if installer_type not in supported_installers:
- logger.error(
- '\033[91mYour installer is not supported yet\033[0m')
- sys.exit(1)
-
- installer_ip = os.environ.get("INSTALLER_IP")
- if not installer_ip:
- logger.error(
- '\033[91minstaller ip is not set\033[0m')
- logger.error(
- '\033[91mexport INSTALLER_IP=<ip>\033[0m')
- sys.exit(1)
-
- cluster = COMMON_CONFIG.installer_cluster
- openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
- if cluster is not None
- else deploymentHandler.get_nodes())
-
- controller_nodes = [node for node in openstack_nodes
- if node.is_controller()]
- compute_nodes = [node for node in openstack_nodes
- if node.is_compute()]
-
- odl_ip, odl_port = odl_utils.get_odl_ip_port(openstack_nodes)
-
- for compute in compute_nodes:
- logger.info("This is a compute: %s" % compute.ip)
-
- results = Results(COMMON_CONFIG.line_length)
- results.add_to_summary(0, "=")
- results.add_to_summary(2, "STATUS", "SUBTEST")
- results.add_to_summary(0, "=")
-
- openstack_sfc = os_sfc_utils.OpenStackSFC()
-
- custom_flv = openstack_sfc.create_flavor(
- COMMON_CONFIG.flavor,
- COMMON_CONFIG.ram_size_in_mb,
- COMMON_CONFIG.disk_size_in_gb,
- COMMON_CONFIG.vcpu_count)
- if not custom_flv:
- logger.error("Failed to create custom flavor")
- sys.exit(1)
-
- tacker_client = os_sfc_utils.get_tacker_client()
-
- controller_clients = test_utils.get_ssh_clients(controller_nodes)
- compute_clients = test_utils.get_ssh_clients(compute_nodes)
-
- ovs_logger = ovs_log.OVSLogger(
- os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
- COMMON_CONFIG.functest_results_dir)
-
- image_creator = openstack_sfc.register_glance_image(
- COMMON_CONFIG.image_name,
- COMMON_CONFIG.image_url,
- COMMON_CONFIG.image_format,
- 'public')
- network, router = openstack_sfc.create_network_infrastructure(
- TESTCASE_CONFIG.net_name,
- TESTCASE_CONFIG.subnet_name,
- TESTCASE_CONFIG.subnet_cidr,
- TESTCASE_CONFIG.router_name)
+class SfcTwoChainsSSHandHTTP(sfc_parent_function.SfcCommonTestCase):
+ """We create one client and one server using nova.
+ Then, 2 SFs are created using tacker.
+ Two chains are created, having one SF each.
+ The vxlan tool is used on both SFs. The purpose is to
+ check different HTTP and SSH traffic combinations.
+ """
- sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
+ def run(self):
- vnf_names = ['testVNF1', 'testVNF2']
-
- topo_seed = topo_shuffler.get_seed() # change to None for nova av zone
- testTopology = topo_shuffler.topology(vnf_names, openstack_sfc,
- seed=topo_seed)
-
- logger.info('This test is run with the topology {0}'
- .format(testTopology['id']))
- logger.info('Topology description: {0}'
- .format(testTopology['description']))
-
- client_instance, client_creator = openstack_sfc.create_instance(
- CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
-
- server_instance, server_creator = openstack_sfc.create_instance(
- SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
-
- server_ip = server_instance.ports[0].ips[0]['ip_address']
-
- os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
-
- tosca_red = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_red)
- os_sfc_utils.create_vnfd(tacker_client,
- tosca_file=tosca_red,
- vnfd_name='test-vnfd1')
-
- tosca_blue = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- TESTCASE_CONFIG.test_vnfd_blue)
-
- os_sfc_utils.create_vnfd(tacker_client,
- tosca_file=tosca_blue,
- vnfd_name='test-vnfd2')
+ logger.info("The test scenario %s is starting", __name__)
- default_param_file = os.path.join(
- COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnfd_dir,
- COMMON_CONFIG.vnfd_default_params_file)
+ self.register_vnf_template(self.testcase_config.test_vnfd_red,
+ 'test-vnfd1')
+ self.register_vnf_template(self.testcase_config.test_vnfd_blue,
+ 'test-vnfd2')
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnf_names[0], 'test-vnfd1', 'test-vim',
- default_param_file, testTopology[vnf_names[0]])
- os_sfc_utils.create_vnf_in_av_zone(
- tacker_client, vnf_names[1], 'test-vnfd2', 'test-vim',
- default_param_file, testTopology[vnf_names[1]])
+ self.create_vnf(self.vnfs[0], 'test-vnfd1', 'test-vim')
+ self.create_vnf(self.vnfs[1], 'test-vnfd2', 'test-vim')
- vnf1_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_names[0])
- vnf2_id = os_sfc_utils.wait_for_vnf(tacker_client, vnf_name=vnf_names[1])
- if vnf1_id is None or vnf2_id is None:
- logger.error('ERROR while booting vnfs')
- sys.exit(1)
+ logger.info("Call Parent create_vnffg with index")
+ self.create_vnffg(self.testcase_config.test_vnffgd_red, 'red',
+ 'red_http', port=80, protocol='tcp',
+ symmetric=False, vnf_index=0)
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
+ self.create_vnffg(self.testcase_config.test_vnffgd_blue, 'blue',
+ 'blue_ssh', port=22, protocol='tcp',
+ symmetric=False, vnf_index=1)
+ self.create_classifier('dummy')
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
+ t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t1.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- neutron_port = openstack_sfc.get_client_port(client_instance,
- client_creator)
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
+ logger.info("Assigning floating IPs to instances")
+ self.assign_floating_ip_client_server()
- # Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),
- [neutron_port],))
+ self.assign_floating_ip_sfs()
- try:
- t1.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
+ self.check_floating_ips()
+ self.start_services_in_vm()
+ self.vxlan_blocking_start(self.fips_sfs[0], "80")
+ self.vxlan_blocking_start(self.fips_sfs[1], "22")
- logger.info("Assigning floating IPs to instances")
- client_floating_ip = openstack_sfc.assign_floating_ip(router,
- client_instance,
- client_creator)
- server_floating_ip = openstack_sfc.assign_floating_ip(router,
- server_instance,
- server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
- sf1_floating_ip = fips_sfs[0]
- sf2_floating_ip = fips_sfs[1]
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t1.join()
- fips = [client_floating_ip, server_floating_ip, sf1_floating_ip,
- sf2_floating_ip]
+ results = self.present_results_ssh()
+ results = self.present_results_http()
- for ip in fips:
- logger.info("Checking connectivity towards floating IP [%s]" % ip)
- if not test_utils.ping(ip, retries=50, retry_timeout=3):
- logger.error("Cannot ping floating IP [%s]" % ip)
- os_sfc_utils.get_tacker_items()
- odl_utils.get_odl_items(odl_ip, odl_port)
- sys.exit(1)
- logger.info("Successful ping to floating IP [%s]" % ip)
+ logger.info("Changing the classification")
- if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]):
- logger.error("Cannot establish SSH connection to the SFs")
- sys.exit(1)
+ self.swap_classifiers('red_http', 'blue_ssh')
- logger.info("Starting HTTP server on %s" % server_floating_ip)
- if not test_utils.start_http_server(server_floating_ip):
- logger.error('\033[91mFailed to start HTTP server on %s\033[0m'
- % server_floating_ip)
- sys.exit(1)
+ # Start measuring the time it takes to implement the classification
+ # rules
+ t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
+ args=(self.ovs_logger, self.compute_nodes,
+ self.odl_ip, self.odl_port,
+ self.client_instance.hypervisor_hostname,
+ [self.neutron_port],))
+ try:
+ t2.start()
+ except Exception as e:
+ logger.error("Unable to start the thread that counts time %s" % e)
- logger.info("Starting SSH firewall on %s" % sf1_floating_ip)
- test_utils.start_vxlan_tool(sf1_floating_ip, block="22")
- logger.info("Starting HTTP firewall on %s" % sf2_floating_ip)
- test_utils.start_vxlan_tool(sf2_floating_ip, block="80")
+ logger.info("Wait for ODL to update the classification rules in OVS")
+ t2.join()
- logger.info("Wait for ODL to update the classification rules in OVS")
- t1.join()
+ results = self.present_results_allowed_http()
+ results = self.present_results_allowed_ssh()
- logger.info("Test SSH")
- if test_utils.is_ssh_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "SSH Blocked")
- else:
- error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH Blocked")
+ if __name__ == '__main__':
+ return results.compile_summary(), self.creators
- logger.info("Test HTTP")
- if not test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP works")
- else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP works")
+ if __name__ == 'sfc.tests.functest.sfc_two_chains_SSH_and_HTTP':
+ return results.compile_summary(), self.creators
- logger.info("Changing the classification")
+ def get_creators(self):
+ """Return the creators info, specially in case the info is not
+ returned due to an exception.
- os_sfc_utils.delete_vnffg(tacker_client, vnffg_name='red_http_works')
+ :return: creators
+ """
+ return self.creators
- os_sfc_utils.delete_vnffgd(tacker_client, vnffgd_name='red')
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_blue)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='blue')
-
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'blue',
- 'blue_ssh',
- default_param_file,
- neutron_port)
-
- # Start measuring the time it takes to implement the classification rules
- t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),
- [neutron_port],))
- try:
- t2.start()
- except Exception as e:
- logger.error("Unable to start the thread that counts time %s" % e)
-
- logger.info("Wait for ODL to update the classification rules in OVS")
- t2.join()
-
- logger.info("Test HTTP")
- if test_utils.is_http_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
- else:
- error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
-
- logger.info("Test SSH")
- if not test_utils.is_ssh_blocked(client_floating_ip, server_ip):
- results.add_to_summary(2, "PASS", "SSH works")
- else:
- error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
- logger.error(error)
- test_utils.capture_ovs_logs(
- ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "SSH works")
+if __name__ == '__main__':
- return results.compile_summary(), openstack_sfc.creators
+ # Disable InsecureRequestWarning errors when executing the SFC tests in XCI
+ urllib3.disable_warnings()
+ TESTCASE_CONFIG = sfc_config.TestcaseConfig('sfc_two_chains_SSH_and_HTTP')
+ supported_installers = ['fuel', 'apex', 'osa', 'compass']
+ vnf_names = ['testVNF1', 'testVNF2']
-if __name__ == '__main__':
- logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
- main()
+ test_run = SfcTwoChainsSSHandHTTP(TESTCASE_CONFIG, supported_installers,
+ vnf_names)
+ test_run.run()
diff --git a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml
deleted file mode 100644
index 3f10e6b8..00000000
--- a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_HTTP Test
-
-topology_template:
- description: topology-template-test2
- inputs:
- net_src_port_id:
- type: string
-
- node_templates:
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path
- properties:
- id: 1
- policy:
- type: ACL
- criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 80-80
- - ip_proto: 6
- path:
- - forwarder: test-vnfd1
- capability: CP1
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: creates chain
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP1]
- constituent_vnfs: [test-vnfd1]
- members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
index 28b78ead..fd549079 100644
--- a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
@@ -10,16 +10,18 @@ topology_template:
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 80-80
- ip_proto: 6
+ - name: http_classifier
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml
deleted file mode 100644
index 27c7d545..00000000
--- a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case1
-
-topology_template:
- description: topology-template-test1
- inputs:
- net_src_port_id:
- type: string
-
- node_templates:
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path
- properties:
- id: 1
- policy:
- type: ACL
- criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 80-80
- - ip_proto: 6
- path:
- - forwarder: test-vnfd1
- capability: CP1
- - forwarder: test-vnfd2
- capability: CP1
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: creates chain
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL1, VL1]
- connection_point: [CP1, CP1]
- constituent_vnfs: [test-vnfd1, test-vnfd2]
- members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
index 544d6e8e..4dcc0f3c 100644
--- a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
@@ -10,16 +10,18 @@ topology_template:
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 80-80
- ip_proto: 6
+ - name: http_classifier
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
index 6b14df1b..371d25fe 100644
--- a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
@@ -14,18 +14,20 @@ topology_template:
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - network_dst_port_id: {get_input: net_dst_port_id}
- - ip_dst_prefix: {get_input: ip_dst_prefix}
- - destination_port_range: 80-80
- - ip_proto: 6
+ - name: http_classifier
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ network_dst_port_id: {get_input: net_dst_port_id}
+ ip_dst_prefix: {get_input: ip_dst_prefix}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens
deleted file mode 100644
index c40c447c..00000000
--- a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens
+++ /dev/null
@@ -1,46 +0,0 @@
----
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case-symmetric
-
-topology_template:
- description: topology-template-test1
- inputs:
- net_src_port_id:
- type: string
- net_dst_port_id:
- type: string
- ip_dst_prefix:
- type: string
-
- node_templates:
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path
- properties:
- id: 1
- policy:
- type: ACL
- criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- network_dst_port_id: {get_input: net_dst_port_id}
- ip_dst_prefix: {get_input: ip_dst_prefix}
- destination_port_range: 80-80
- ip_proto: 6
- path:
- - forwarder: test-vnfd1
- capability: CP1
- - forwarder: test-vnfd1
- capability: CP2
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: creates chain
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 2
- dependent_virtual_link: [VL1, VL1]
- connection_point: [CP1, CP2]
- constituent_vnfs: [test-vnfd1, test-vnfd1]
- members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml
deleted file mode 100644
index f0615e4e..00000000
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_HTTP Test
-
-topology_template:
- description: topology-template-test2
- inputs:
- net_src_port_id:
- type: string
-
- node_templates:
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path
- properties:
- id: 1
- policy:
- type: ACL
- criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
- path:
- - forwarder: test-vnfd1
- capability: CP1
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: creates chain
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP1]
- constituent_vnfs: [test-vnfd1]
- members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml
deleted file mode 100644
index ec18c9d6..00000000
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_SSH Test
-
-topology_template:
- description: topology-template-test2
- inputs:
- net_src_port_id:
- type: string
-
-
- node_templates:
- Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
- description: creates path
- properties:
- id: 2
- policy:
- type: ACL
- criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
- path:
- - forwarder: test-vnfd2
- capability: CP1
-
- groups:
- VNFFG1:
- type: tosca.groups.nfv.VNFFG
- description: creates chain
- properties:
- vendor: tacker
- version: 1.0
- number_of_endpoints: 1
- dependent_virtual_link: [VL1]
- connection_point: [CP1]
- constituent_vnfs: [test-vnfd2]
- members: [Forwarding_path1]
diff --git a/sfc/tests/functest/setup_scripts/__init__.py b/sfc/unit_tests/__init__.py
index e69de29b..e69de29b 100644
--- a/sfc/tests/functest/setup_scripts/__init__.py
+++ b/sfc/unit_tests/__init__.py
diff --git a/sfc/unit_tests/unit/__init__.py b/sfc/unit_tests/unit/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/sfc/unit_tests/unit/__init__.py
diff --git a/sfc/unit_tests/unit/lib/test_cleanup.py b/sfc/unit_tests/unit/lib/test_cleanup.py
new file mode 100644
index 00000000..e6f59d23
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_cleanup.py
@@ -0,0 +1,469 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+import sfc.lib.cleanup as cleanup
+
+from mock import patch
+from mock import call
+from mock import DEFAULT
+from mock import Mock
+
+
+__author__ = "Dimitrios Markou <mardim@intracom-telecom.com>"
+
+
+class SfcCleanupTesting(unittest.TestCase):
+
+ def setUp(self):
+ self.odl_ip = '10.10.10.10'
+ self.odl_port = '8081'
+ self.patcher = patch('sfc.lib.openstack_utils.get_tacker_client')
+ self.mock_tacker_client = self.patcher.start()
+ self.mock_tacker_client.return_value = 'tacker_client_obj'
+
+ def tearDown(self):
+ self.patcher.stop()
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.odl_utils.delete_odl_resource_elem')
+ @patch('sfc.lib.odl_utils.odl_resource_list_names')
+ @patch('sfc.lib.odl_utils.get_odl_resource_list')
+ def test_delete_odl_resource(self, mock_resource_list,
+ mock_resource_list_name,
+ mock_del_resource_elem,
+ mock_log):
+ """
+ Checks if the functions which belong to the odl_utils
+ library are getting called.
+ """
+
+ resource = 'mock_resource'
+ log_calls = [call("Removing ODL resource: mock_resource/elem_one"),
+ call("Removing ODL resource: mock_resource/elem_two")]
+
+ del_calls = [call(self.odl_ip, self.odl_port, resource, 'elem_one'),
+ call(self.odl_ip, self.odl_port, resource, 'elem_two')]
+
+ mock_resource_list_name.return_value = ['elem_one', 'elem_two']
+ mock_resource_list.return_value = ['rsrc_one',
+ 'rsrc_two',
+ 'rsrc_three']
+
+ cleanup.delete_odl_resources(self.odl_ip, self.odl_port, resource)
+
+ mock_resource_list.assert_called_once_with(self.odl_ip,
+ self.odl_port,
+ resource)
+ mock_resource_list_name.assert_called_once_with(
+ resource, ['rsrc_one', 'rsrc_two', 'rsrc_three'])
+ mock_del_resource_elem.assert_has_calls(del_calls)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.odl_utils.get_odl_acl_list')
+ @patch('sfc.lib.odl_utils.odl_acl_types_names')
+ @patch('sfc.lib.odl_utils.delete_odl_acl')
+ def test_delete_odl_ietf_access_lists(self,
+ mock_del_acl,
+ mock_acl_types,
+ mock_get_acls):
+ """
+ Ckecks the proper functionality of the delete_odl_ietf_access_lists
+ function
+ """
+
+ mock_acl_type_name_list = [('acl_type_one', 'name_one'),
+ ('acl_type_two', 'name_two')]
+ mock_get_acls.return_value = ['acl_one', 'acl_two']
+ mock_acl_types.return_value = mock_acl_type_name_list
+ del_calls = [call(self.odl_ip, self.odl_port, key, value)
+ for key, value in mock_acl_type_name_list]
+
+ cleanup.delete_odl_ietf_access_lists(self.odl_ip, self.odl_port)
+
+ mock_get_acls.assert_called_once_with(self.odl_ip, self.odl_port)
+ mock_acl_types.assert_called_once_with(['acl_one', 'acl_two'])
+ mock_del_acl.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnfds', return_value=None)
+ def test_delete_vnfds_returned_list_is_none(self, mock_list_vnfds):
+ """
+ Check the proper functionality of the delete_vnfds
+ function when the returned vnfds list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnfds())
+ mock_list_vnfds.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnfd')
+ @patch('sfc.lib.openstack_utils.list_vnfds')
+ def test_delete_vnfds_not_empty_list(self,
+ mock_list_vnfds,
+ mock_del_vnfd,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnfds
+ function when the returned vnfds list is not empty
+ """
+
+ mock_list_vnfds.return_value = ['vnfd_one', 'vnfd_two']
+ log_calls = [call("Removing vnfd: vnfd_one"),
+ call("Removing vnfd: vnfd_two")]
+
+ del_calls = [call('tacker_client_obj', vnfd_id='vnfd_one'),
+ call('tacker_client_obj', vnfd_id='vnfd_two')]
+
+ cleanup.delete_vnfds()
+ mock_list_vnfds.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnfd.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnfs', return_value=None)
+ def test_delete_vnfs_returned_list_is_none(self, mock_list_vnfs):
+ """
+ Check the proper functionality of the delete_vnfs
+ function when the returned vnfs list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnfs())
+ mock_list_vnfs.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnf')
+ @patch('sfc.lib.openstack_utils.list_vnfs')
+ def test_delete_vnfs_not_empty_list(self,
+ mock_list_vnfs,
+ mock_del_vnf,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnfs
+ function when the returned vnfs list is not empty
+ """
+
+ mock_list_vnfs.return_value = ['vnf_one', 'vnf_two']
+ log_calls = [call("Removing vnf: vnf_one"),
+ call("Removing vnf: vnf_two")]
+
+ del_calls = [call('tacker_client_obj', vnf_id='vnf_one'),
+ call('tacker_client_obj', vnf_id='vnf_two')]
+
+ cleanup.delete_vnfs()
+ mock_list_vnfs.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnf.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnffgs', return_value=None)
+ def test_delete_vnffgs_returned_list_is_none(self, mock_list_vnffgs):
+ """
+ Check the proper functionality of the delete_vnffgs
+ function when the returned vnffgs list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnffgs())
+ mock_list_vnffgs.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnffg')
+ @patch('sfc.lib.openstack_utils.list_vnffgs')
+ def test_delete_vnffgs_not_empty_list(self,
+ mock_list_vnffgs,
+ mock_del_vnffg,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnffgs
+ function when the returned vnffgs list is not empty
+ """
+
+ mock_list_vnffgs.return_value = ['vnffg_one', 'vnffg_two']
+ log_calls = [call("Removing vnffg: vnffg_two"),
+ call("Removing vnffg: vnffg_one")]
+
+ del_calls = [call('tacker_client_obj', vnffg_id='vnffg_two'),
+ call('tacker_client_obj', vnffg_id='vnffg_one')]
+
+ cleanup.delete_vnffgs()
+ mock_list_vnffgs.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnffg.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vnffgds', return_value=None)
+ def test_delete_vnffgds_returned_list_is_none(self, mock_list_vnffgds):
+ """
+ Check the proper functionality of the delete_vnffgds
+ function when the returned vnffgds list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vnffgds())
+ mock_list_vnffgds.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vnffgd')
+ @patch('sfc.lib.openstack_utils.list_vnffgds')
+ def test_delete_vnffgds_not_empty_list(self,
+ mock_list_vnffgds,
+ mock_del_vnffgd,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vnffgds
+ function when the returned vnffgds list is not empty
+ """
+
+ mock_list_vnffgds.return_value = ['vnffgd_one', 'vnffgd_two']
+ log_calls = [call("Removing vnffgd: vnffgd_one"),
+ call("Removing vnffgd: vnffgd_two")]
+
+ del_calls = [call('tacker_client_obj', vnffgd_id='vnffgd_one'),
+ call('tacker_client_obj', vnffgd_id='vnffgd_two')]
+
+ cleanup.delete_vnffgds()
+ mock_list_vnffgds.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vnffgd.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.list_vims', return_value=None)
+ def test_delete_vims_returned_list_is_none(self, mock_list_vims):
+ """
+ Check the proper functionality of the delete_vims
+ function when the returned vims list is None
+ """
+
+ self.assertIsNone(cleanup.delete_vims())
+ mock_list_vims.assert_called_once_with('tacker_client_obj')
+
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.openstack_utils.delete_vim')
+ @patch('sfc.lib.openstack_utils.list_vims')
+ def test_delete_vims_not_empty_list(self,
+ mock_list_vims,
+ mock_del_vim,
+ mock_log):
+ """
+ Check the proper functionality of the delete_vims
+ function when the returned vims list is not empty
+ """
+
+ mock_list_vims.return_value = ['vim_one', 'vim_two']
+ log_calls = [call("Removing vim: vim_one"),
+ call("Removing vim: vim_two")]
+
+ del_calls = [call('tacker_client_obj', vim_id='vim_one'),
+ call('tacker_client_obj', vim_id='vim_two')]
+
+ cleanup.delete_vims()
+ mock_list_vims.assert_called_once_with('tacker_client_obj')
+ mock_log.assert_has_calls(log_calls)
+ mock_del_vim.assert_has_calls(del_calls)
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC', autospec=True)
+ def test_delete_untracked_security_groups(self,
+ mock_obj):
+ instance = mock_obj.return_value
+ cleanup.delete_untracked_security_groups()
+ instance.delete_all_security_groups.assert_called_once()
+
+ @patch('sfc.lib.cleanup.delete_odl_resources')
+ @patch('sfc.lib.cleanup.delete_odl_ietf_access_lists')
+ def test_cleanup_odl(self,
+ mock_del_odl_ietf,
+ mock_del_odl_res):
+ resources = ['service-function-forwarder']
+
+ odl_res_calls = [call(self.odl_ip, self.odl_port, item)
+ for item in resources]
+
+ cleanup.cleanup_odl(self.odl_ip, self.odl_port)
+
+ mock_del_odl_res.assert_has_calls(odl_res_calls)
+ mock_del_odl_ietf.assert_called_once_with(self.odl_ip, self.odl_port)
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC', autospec=True)
+ def test_cleanup_nsfc_objects(self, mock_os_sfc):
+ mock_os_sfc_ins = mock_os_sfc.return_value
+ cleanup.cleanup_nsfc_objects()
+ mock_os_sfc_ins.delete_chain.assert_called_once()
+ mock_os_sfc_ins.delete_port_groups.assert_called_once()
+
+ @patch('time.sleep')
+ def test_cleanup_tacker_objects(self, mock_time):
+
+ mock_dict = {'delete_vnffgs': DEFAULT,
+ 'delete_vnffgds': DEFAULT,
+ 'delete_vnfs': DEFAULT,
+ 'delete_vnfds': DEFAULT,
+ 'delete_vims': DEFAULT}
+ with patch.multiple('sfc.lib.cleanup',
+ **mock_dict) as mock_values:
+ cleanup.cleanup_tacker_objects()
+
+ for key in mock_values:
+ mock_values[key].assert_called_once()
+
+ mock_time.assert_called_once_with(20)
+
+ @patch('sfc.lib.cleanup.cleanup_tacker_objects')
+ def test_cleanup_mano_objects_tacker(self, mock_cleanup_tacker):
+ cleanup.cleanup_mano_objects('tacker')
+ mock_cleanup_tacker.assert_called_once()
+
+ @patch('sfc.lib.cleanup.cleanup_nsfc_objects')
+ def test_cleanup_mano_objects_nsfc(self, mock_cleanup_nsfc):
+ cleanup.cleanup_mano_objects('no-mano')
+ mock_cleanup_nsfc.assert_called_once()
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ def test_delete_openstack_objects(self, mock_log, mock_conn):
+ """
+ Checks the delete_chain method
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj_one = Mock()
+ mock_creator_obj_one.name = 'subnet_name'
+ mock_creator_obj_two = Mock()
+ mock_creator_obj_two.name = 'creator_name'
+ mock_creator_objs_list = [mock_creator_obj_one, mock_creator_obj_two]
+
+ mock_conn.from_config.return_value = conn
+ testcase_config.subnet_name = mock_creator_obj_one.name
+ log_calls = [call('Deleting ' + mock_creator_obj_two.name),
+ call('Deleting ' + mock_creator_obj_one.name)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ mock_creator_obj_one.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_obj_two.delete.\
+ assert_called_once_with(conn.session)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ def test_delete_openstack_objects_router(self, mock_log, mock_conn):
+ """
+ Checks the delete_chain method
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj = Mock()
+ mock_creator_obj.name = 'creator_name'
+ mock_creator_router = Mock()
+ mock_creator_router.name = 'router_name'
+ mock_creator_router.id = '1'
+ mock_creator_subnet = Mock()
+ mock_creator_subnet.name = 'subnet_name'
+ mock_creator_subnet.id = '2'
+ mock_creator_objs_list = [mock_creator_subnet,
+ mock_creator_router,
+ mock_creator_obj]
+
+ mock_conn.from_config.return_value = conn
+ testcase_config.router_name = mock_creator_router.name
+ testcase_config.subnet_name = mock_creator_subnet.name
+
+ conn.network.get_subnet.return_value = mock_creator_subnet
+ log_calls = [call('Deleting ' + mock_creator_obj.name),
+ call('Deleting ' + mock_creator_router.name),
+ call('Removing subnet from router'),
+ call('Deleting router'),
+ call('Deleting ' + mock_creator_subnet.name)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ conn.network.remove_interface_from_router.\
+ assert_called_once_with(mock_creator_router.id,
+ mock_creator_subnet.id)
+ conn.network.delete_router.\
+ assert_called_once_with(mock_creator_router)
+ mock_creator_obj.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_subnet.delete.\
+ assert_called_once_with(conn.session)
+ mock_log.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.cleanup.connection')
+ @patch('sfc.lib.cleanup.logger.info')
+ @patch('sfc.lib.cleanup.logger.error')
+ def test_delete_openstack_objects_exception(self, mock_log_err,
+ mock_log_info, mock_conn):
+ """
+ Check the proper functionality of the delete_openstack_objects
+ function when exception occurs.
+ """
+ testcase_config = Mock()
+ conn = Mock()
+ mock_creator_obj_one = Mock()
+ mock_creator_obj_one.name = 'subnet_name'
+ mock_creator_obj_two = Mock()
+ mock_creator_obj_two.name = 'creator_name'
+ exception_one = Exception('First Boom!')
+ exception_two = Exception('Second Boom!')
+ attrs_list = [{'delete.side_effect': exception_one},
+ {'delete.side_effect': exception_two}]
+
+ mock_creator_obj_one.configure_mock(**attrs_list[0])
+ mock_creator_obj_two.configure_mock(**attrs_list[1])
+
+ mock_creator_objs_list = [mock_creator_obj_one, mock_creator_obj_two]
+ mock_conn.from_config.return_value = conn
+ testcase_config.subnet_name = mock_creator_obj_one.name
+
+ log_calls = [call('Deleting ' + mock_creator_obj_two.name),
+ call('Deleting ' + mock_creator_obj_one.name),
+ call('Unexpected error cleaning - %s', exception_two),
+ call('Unexpected error cleaning - %s', exception_one)]
+
+ cleanup.delete_openstack_objects(testcase_config,
+ mock_creator_objs_list)
+ mock_creator_obj_one.delete.\
+ assert_called_once_with(conn.session)
+ mock_creator_obj_two.delete.\
+ assert_called_once_with(conn.session)
+
+ mock_log_info.assert_has_calls(log_calls[:2])
+ mock_log_err.assert_has_calls(log_calls[2:])
+
+ @patch('sfc.lib.cleanup.delete_untracked_security_groups')
+ @patch('sfc.lib.cleanup.cleanup_mano_objects')
+ @patch('sfc.lib.cleanup.delete_openstack_objects')
+ @patch('sfc.lib.cleanup.cleanup_odl')
+ def test_cleanup(self,
+ mock_cleanup_odl,
+ mock_del_os_obj,
+ mock_cleanup_mano,
+ mock_untr_sec_grps):
+
+ cleanup.cleanup('testcase_config', ['creator_one', 'creator_two'],
+ 'mano',
+ self.odl_ip,
+ self.odl_port)
+
+ mock_cleanup_odl.assert_called_once_with(self.odl_ip,
+ self.odl_port)
+ mock_del_os_obj.assert_called_once_with('testcase_config',
+ ['creator_one', 'creator_two'])
+ mock_cleanup_mano.assert_called_once_with('mano')
+ mock_untr_sec_grps.assert_called_once()
+
+ @patch('sfc.lib.cleanup.cleanup_mano_objects')
+ @patch('sfc.lib.cleanup.cleanup_odl')
+ def test_cleanup_from_bash(self,
+ mock_cleanup_odl,
+ mock_cleanup_mano):
+
+ cleanup.cleanup_from_bash(self.odl_ip,
+ self.odl_port,
+ 'mano')
+
+ mock_cleanup_odl.assert_called_once_with(self.odl_ip,
+ self.odl_port)
+ mock_cleanup_mano.assert_called_once_with(mano='mano')
diff --git a/sfc/unit_tests/unit/lib/test_odl_utils.py b/sfc/unit_tests/unit/lib/test_odl_utils.py
new file mode 100644
index 00000000..1dfcf1ed
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_odl_utils.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import call
+from mock import Mock
+from mock import patch
+
+import sfc.lib.odl_utils as odl_utils
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcOdlUtilsTesting(unittest.TestCase):
+
+ @patch('re.compile', autospec=True)
+ @patch('opnfv.utils.ovs_logger.OVSLogger', autospec=True)
+ def test_actual_rsps_in_compute(self, mock_ovs_log, mock_compile):
+ """
+ Checks the proper functionality of actual_rsps_in_compute
+ function
+ """
+
+ match_calls = [call('msg_1'), call('msg_2')]
+
+ mf = Mock()
+ mf.group.side_effect = ['msg_p_1', 'msg_p_2']
+ mock_compile.return_value.match.side_effect = [mf, None]
+ mock_ovs_log.ofctl_dump_flows.return_value = '\nflow_rep\nmsg_1\nmsg_2'
+
+ result = odl_utils.actual_rsps_in_compute(mock_ovs_log, 'compute_ssh')
+
+ self.assertEqual(['msg_p_1|msg_p_2'], result)
+ mock_compile.return_value.match.assert_has_calls(match_calls)
+ mock_ovs_log.ofctl_dump_flows.assert_called_once_with('compute_ssh',
+ 'br-int', '101')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.get_rsp', autospec=True)
+ @patch('sfc.lib.odl_utils.get_odl_acl_list', autospec=True)
+ @patch('sfc.lib.odl_utils.get_rsps_from_netvirt_acl_actions',
+ autospec=True)
+ def test_get_active_rsps_on_ports(self,
+ mock_rsps_from_netvirt_acl_actions,
+ mock_odl_acl_list,
+ mock_get_rsp,
+ mock_log):
+ """
+ Checks the proper functionality of get_active_rsps_on_ports
+ function
+ """
+
+ log_calls = [call('ACL acl_obj_one does not have an ACE')]
+
+ port_one = Mock()
+ port_two = Mock()
+ port_one.id = 's_p'
+ port_two.id = 'd_p'
+ neutron_ports = [port_one, port_two]
+
+ mock_rsps_from_netvirt_acl_actions.return_value = ['rsp_obj_one',
+ 'rsp_obj_two']
+
+ mock_get_rsp.side_effect = [{'of-matches': ['of-match-one'],
+ 'reverse-path': 'r-path-one'},
+ {'of-matches': ['of-match-two']}]
+
+ mock_odl_acl_list.return_value = {'access-lists': {'acl': [
+ {'acl-name': 'acl_obj_one',
+ 'access-list-entries': {'ace': []}},
+ {'acl-name': 'acl_obj_two',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': None}}]}},
+ {'acl-name': 'acl_obj_three',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': {'lower-port': 22},
+ 'netvirt-sfc-acl:source-port-uuid': 's_p_uuid',
+ 'netvirt-sfc-acl:destination-port-uuid': 'd_p_uuid'}}]}},
+ {'acl-name': 'acl_obj_four',
+ 'access-list-entries': {'ace': [{'matches': {
+ 'destination-port-range': {'lower-port': 22},
+ 'netvirt-sfc-acl:source-port-uuid': 's_p',
+ 'netvirt-sfc-acl:destination-port-uuid': 'd_p'},
+ 'actions': 'm_actions'}]}}]}}
+
+ expected = [{'of-matches': ['of-match-two', 'tp_dst=22']},
+ {'of-matches': ['of-match-one', 'tp_src=22'],
+ 'reverse-path': 'r-path-one'}]
+
+ result = odl_utils.get_active_rsps_on_ports('odl_ip',
+ 'odl_port',
+ neutron_ports)
+
+ self.assertEqual(sorted(expected), sorted(result))
+ mock_log.warn.assert_has_calls(log_calls)
+ mock_rsps_from_netvirt_acl_actions.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'm_actions')
+
+ @patch('sfc.lib.odl_utils.get_odl_resource_elem', autospec=True)
+ def test_get_rsps_from_netvirt_acl_actions(self, mock_odl_resource_elem):
+ """
+ Checks the proper functionality of get_rsps_from_netvirt_acl_actions
+ function
+ """
+
+ netv = {'netvirt-sfc-acl:rsp-name': 'rsp-name',
+ 'netvirt-sfc-acl:sfp-name': 'sfp-name'}
+
+ sfp_state = {'sfp-rendered-service-path': [{'name': 'sfp-rsp-one'},
+ {'name': 'sfp-rsp-two'}]}
+
+ mock_odl_resource_elem.return_value = sfp_state
+ rsp_names = ['rsp-name', 'sfp-rsp-one', 'sfp-rsp-two']
+
+ result = odl_utils.get_rsps_from_netvirt_acl_actions('odl_ip',
+ 'odl_port',
+ netv)
+ self.assertEqual(rsp_names, result)
+ mock_odl_resource_elem.assert_called_once_with('odl_ip', 'odl_port',
+ 'service-function-path-'
+ 'state', 'sfp-name',
+ datastore='operational')
+
+ @patch('sfc.lib.odl_utils.get_odl_resource_elem',
+ autospec=True, return_value='mocked_rsp')
+ def test_get_rsp(self, mock_odl_resource_elem):
+ """
+ Checks the proper functionality of get_rsp
+ function
+ """
+
+ result = odl_utils.get_rsp('odl_ip', 'odl_port', 'rsp_name')
+ self.assertEqual('mocked_rsp', result)
+ mock_odl_resource_elem.assert_called_once_with('odl_ip', 'odl_port',
+ 'rendered-service-path',
+ 'rsp_name',
+ datastore='operational')
+
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_promised_rsps_in_compute(self, mock_active_rsps_on_ports):
+ """
+ Checks the proper functionality of propmised_rsps_in_compute
+ function
+ """
+
+ mock_active_rsps_on_ports.return_value = [
+ {'of-matches': {'one': 'one'}, 'path-id': 1},
+ {'of-matches': {'two': 'two'}, 'path-id': 2}]
+
+ result = odl_utils.promised_rsps_in_compute('odl_ip', 'odl_port',
+ 'compute_ports')
+
+ self.assertEqual(['0x1|one', '0x2|two'], result)
+ mock_active_rsps_on_ports.assert_called_once_with('odl_ip', 'odl_port',
+ 'compute_ports')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('time.time', autospec=True, side_effect=[1, 2])
+ def test_timethis(self,
+ mock_time,
+ mock_log):
+ """
+ Checks the proper functionality of timethis
+ function
+ """
+
+ expected = ('mock_this', '1')
+ log_calls = [call("mock_func(*('mock',), **{'name': 'this'}) "
+ "took: 1 sec")]
+
+ @odl_utils.timethis
+ def mock_func(msg, name=''):
+ return msg+'_'+name
+
+ result = mock_func('mock', name='this')
+ self.assertEqual(result, expected)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_odl_items', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.os_sfc_utils.get_tacker_items', autospec=True)
+ def test_wait_for_classification_rules_rsps_not_configured(
+ self, mock_get_tacker_items, mock_promised_rsps_in_compute,
+ mock_get_odl_items, mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when rsps are not configured in ODL
+ """
+
+ log_calls = [call("Error when waiting for classification rules: "
+ "RSPs not configured in ODL")]
+
+ mock_find_compute.return_value = 'mock_compute'
+ mock_promised_rsps_in_compute.return_value = None
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports')
+ mock_promised_rsps_in_compute.assert_called_with('odl_ip',
+ 'odl_port',
+ 'neutron_ports')
+ assert mock_promised_rsps_in_compute.call_count == 10
+ mock_find_compute.assert_called_once_with('compute_name',
+ 'compute_nodes')
+ mock_sleep.assert_called_with(3)
+ assert mock_sleep.call_count == 9
+ mock_get_tacker_items.assert_called_once_with()
+ mock_get_odl_items.assert_called_once_with('odl_ip', 'odl_port')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ def test_wait_for_classification_rules_timeout_not_updated(
+ self, mock_promised_rsps_in_compute, mock_actual_rsps_in_compute,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when classification rules are not updated in a given timeout
+ """
+
+ log_calls = [call("Timeout but classification rules are not updated"),
+ call("RSPs in ODL Operational DataStore"
+ "for compute 'compute_name':"),
+ call("['compute|rsps']"),
+ call("RSPs in compute nodes:"),
+ call("[]")]
+
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps_in_compute.return_value = []
+ mock_promised_rsps_in_compute.return_value = ['compute|rsps']
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports',
+ timeout=2)
+ mock_find_compute.assert_called_once_with('compute_name',
+ 'compute_nodes')
+ mock_log.error.assert_has_calls(log_calls[:1])
+ mock_log.info.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.promised_rsps_in_compute', autospec=True)
+ def test_wait_for_classification_rules_updated(
+ self, mock_promised_rsps_in_compute, mock_actual_rsps_in_compute,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of wait_for_classification_rules
+ function when classification rules are not updated in a given timeout
+ """
+
+ log_calls = [call("RSPs in ODL Operational DataStore"
+ "for compute 'compute_name':"),
+ call("['compute|rsps']"),
+ call("RSPs in compute nodes:"),
+ call("['compute|rsps']"),
+ call("Classification rules were updated")]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps_in_compute.return_value = ['compute|rsps']
+ mock_promised_rsps_in_compute.return_value = ['compute|rsps']
+
+ odl_utils.wait_for_classification_rules('ovs_logger',
+ 'compute_nodes',
+ 'odl_ip',
+ 'odl_port',
+ 'compute_name',
+ 'neutron_ports',
+ timeout=2)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('re.search', autospec=True)
+ @patch('ConfigParser.RawConfigParser', autospec=True)
+ @patch('os.getcwd', autospec=True, return_value='/etc')
+ @patch('os.path.join', autospec=True, return_value='/etc/ml2_conf.ini')
+ def test_get_odl_ip_port(self, mock_join,
+ mock_getcwd,
+ mock_rawconfigparser,
+ mock_search):
+ """
+ Checks the proper functionality of get_odl_ip_port
+ function
+ """
+
+ cmd_calls = [call('pwd'),
+ call('sudo cp /etc/neutron/plugins/ml2/ml2_conf.ini '
+ '/etc/'),
+ call('sudo chmod 777 /etc/ml2_conf.ini')]
+
+ n1 = Mock()
+ n2 = Mock()
+ nodes = [n1, n2]
+ mock_rawconfigparser.return_value.get.return_value = 'config'
+ mock_search.return_value.group.return_value = 'odl_ip:odl_port'
+ n1.run_cmd.side_effect = ['/etc', '', '']
+
+ result = odl_utils.get_odl_ip_port(nodes)
+ self.assertEqual(('odl_ip', 'odl_port'), result)
+ n1.run_cmd.assert_has_calls(cmd_calls)
+ n1.is_controller.assert_called_once_with()
+ mock_getcwd.assert_called_once_with()
+ mock_join.assert_called_once_with('/etc', 'ml2_conf.ini')
+ n1.get_file.assert_called_once_with('/etc/ml2_conf.ini',
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.read.assert_called_once_with(
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.get.assert_called_with(
+ 'ml2_odl', 'url')
+ mock_search.assert_called_once_with(r'[0-9]+(?:\.[0-9]+){3}\:[0-9]+',
+ 'config')
+
+ @patch('re.search', autospec=True)
+ @patch('ConfigParser.RawConfigParser', autospec=True)
+ @patch('os.getcwd', autospec=True, return_value='/etc')
+ @patch('os.path.join', autospec=True, return_value='/etc/ml2_conf.ini')
+ def test_get_odl_username_password(self, mock_join,
+ mock_getcwd,
+ mock_rawconfigparser,
+ mock_search):
+ """
+ Check the proper functionality of get odl_username_password
+ function
+ """
+
+ mock_rawconfigparser.return_value.get.return_value = 'odl_username'
+ result = odl_utils.get_odl_username_password()
+ self.assertEqual(('odl_username'), result[0])
+ mock_getcwd.assert_called_once_with()
+ mock_join.assert_called_once_with('/etc', 'ml2_conf.ini')
+ mock_rawconfigparser.return_value.read.assert_called_once_with(
+ '/etc/ml2_conf.ini')
+ mock_rawconfigparser.return_value.get.return_value = 'odl_password'
+ result = odl_utils.get_odl_username_password()
+ self.assertEqual(('odl_password'), result[1])
+
+ def test_pluralize(self):
+ """
+ Checks the proper functionality of pluralize
+ function
+ """
+
+ result = odl_utils.pluralize('service-function-path')
+ self.assertEqual('service-function-paths', result)
+
+ def test_get_module(self):
+ """
+ Checks the proper functionality of get_module
+ function
+ """
+
+ result = odl_utils.get_module('service-function-path')
+ self.assertEqual('service-function-path', result)
+
+ @patch('sfc.lib.odl_utils.get_module',
+ autospec=True, return_value='mocked_module')
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_format_odl_resource_list_url(self, mock_plularize,
+ mock_get_module):
+ """
+ Checks the proper functionality of format_odl_resource_list_url
+ function
+ """
+
+ result = odl_utils.format_odl_resource_list_url('odl_ip',
+ 'odl_port',
+ 'resource')
+ formatted_url = ('http://admin:admin@odl_ip:'
+ 'odl_port/restconf/config/mocked_module:'
+ 'resources')
+ self.assertEqual(formatted_url, result)
+ mock_plularize.assert_called_once_with('resource')
+ mock_get_module.assert_called_once_with('resource')
+
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url',
+ autospec=True, return_value='list_u/r/l')
+ def test_format_odl_resource_elem_url(self, mock_odl_resource_list_url):
+ """
+ Checks the proper functionality of format_odl_resource_elem_url
+ function
+ """
+
+ result = odl_utils.format_odl_resource_elem_url('odl_ip', 'odl_port',
+ 'resource',
+ 'elem_name')
+ formatted_url = ('list_u/r/l/resource/elem_name')
+ self.assertEqual(formatted_url, result)
+ mock_odl_resource_list_url.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'resource',
+ 'config')
+
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_odl_resource_list_names_returns_empty_list(self, mock_plularize):
+ """
+ Checks the proper functionality of odl_resource_list_names
+ function when resources are empty
+ """
+
+ resource_json = {'resources': {}}
+ result = odl_utils.odl_resource_list_names('resource', resource_json)
+ self.assertEqual([], result)
+
+ @patch('sfc.lib.odl_utils.pluralize',
+ autospec=True, return_value='resources')
+ def test_odl_resource_list_names(self, mock_plularize):
+ """
+ Checks the proper functionality of odl_resource_list_names
+ function
+ """
+
+ resource_json = {'resources': {'resource': [{'name': 'resource_one'},
+ {'name': 'resource_two'}]}}
+ result = odl_utils.odl_resource_list_names('resource', resource_json)
+ self.assertEqual(['resource_one', 'resource_two'], result)
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url', autospec=True)
+ def test_get_odl_resource_list(self,
+ mock_odl_resource_list_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_resource_list
+ function
+ """
+
+ mock_odl_resource_list_url.return_value = 'u/r/l'
+ mock_get.return_value.json.return_value = {'key': 'value'}
+
+ result = odl_utils.get_odl_resource_list('odl_ip',
+ 'odl_port',
+ 'resource')
+
+ self.assertEqual({'key': 'value'}, result)
+ mock_odl_resource_list_url.assert_called_once_with('odl_ip',
+ 'odl_port',
+ 'resource',
+ datastore='config')
+ mock_get.assert_called_once_with('u/r/l')
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_elem_url', autospec=True)
+ def test_get_odl_resource_elem(self,
+ mock_odl_resource_elem_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_resource_elem
+ function
+ """
+
+ mock_response = Mock()
+ mock_response.get.return_value = ['elem_one', 'elem_two']
+ mock_get.return_value.json.return_value = mock_response
+ mock_odl_resource_elem_url.return_value = 'u/r/l'
+
+ result = odl_utils.get_odl_resource_elem(
+ 'odl_ip', 'odl_port', 'resource', 'elem_name')
+
+ self.assertEqual('elem_one', result)
+ mock_odl_resource_elem_url.assert_called_once_with(
+ 'odl_ip', 'odl_port', 'resource', 'elem_name', 'config')
+ mock_get.assert_called_once_with('u/r/l')
+ mock_response.get.assert_called_once_with('resource', [{}])
+
+ @patch('requests.delete', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_elem_url',
+ autospec=True, return_value='u/r/l')
+ def test_delete_odl_resource_elem(self,
+ mock_odl_resource_elem_url,
+ mock_delete):
+ """
+ Checks the proper functionality of delete_odl_resource_elem
+ function
+ """
+
+ odl_utils.delete_odl_resource_elem('odl_ip', 'odl_port', 'resource',
+ 'elem_name')
+
+ mock_odl_resource_elem_url('odl_ip', 'odl_port', 'resource',
+ 'elem_name', 'config')
+ mock_delete.assert_called_once_with('u/r/l')
+
+ def test_odl_acl_types_names_returns_empty_list(self):
+ """
+ Checks the proper functionality of odl_acl_types_names
+ function when access lists are empty
+ """
+
+ acl_json = {'access-lists': {}}
+ result = odl_utils.odl_acl_types_names(acl_json)
+ self.assertEqual([], result)
+
+ def test_odl_acl_types_names(self):
+ """
+ Checks the proper functionality of odl_acl_types_names
+ function
+ """
+
+ acl_json = {'access-lists': {'acl': [{'acl-type': 'type-one',
+ 'acl-name': 'name-one'},
+ {'acl-type': 'type-two',
+ 'acl-name': 'name-two'}]}}
+ acl_types = [('type-one', 'name-one'),
+ ('type-two', 'name-two')]
+
+ result = odl_utils.odl_acl_types_names(acl_json)
+ self.assertEqual(acl_types, result)
+
+ def test_format_odl_acl_list_url(self):
+ """
+ Checks the proper functionality of format_odl_acl_list_url
+ function
+ """
+
+ formatted_url = ('http://admin:admin@odl_ip:odl_port/restconf/config/'
+ 'ietf-access-control-list:access-lists')
+ result = odl_utils.format_odl_acl_list_url('odl_ip', 'odl_port')
+ self.assertEqual(formatted_url, result)
+
+ @patch('json.dumps',
+ autospec=True, return_value='{\n "key": "value"\n}')
+ def test_improve_json_layout(self, mock_dumps):
+ """
+ Checks the proper functionality of improve_json_layout
+ function
+ """
+
+ result = odl_utils.improve_json_layout({'key': 'value'})
+
+ self.assertEqual('{\n "key": "value"\n}', result)
+ mock_dumps.assert_called_once_with({'key': 'value'},
+ indent=4,
+ separators=(',', ': '))
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url',
+ autospec=True, return_value='acl_list_u/r/l')
+ @patch('sfc.lib.odl_utils.improve_json_layout', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_resource_list_url', autospec=True)
+ def test_get_odl_items(self,
+ mock_odl_resource_list_url,
+ mock_json_layout,
+ mock_odl_acl_list_url,
+ mock_log,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_items
+ function
+ """
+
+ log_calls = [call('Configured ACLs in ODL: r_acl_j_s_o_n'),
+ call('Configured SFs in ODL: r_sf_j_s_o_n'),
+ call('Configured SFFs in ODL: r_sff_j_s_o_n'),
+ call('Configured SFCs in ODL: r_sfc_j_s_o_n'),
+ call('Configured RSPs in ODL: r_sp_j_s_o_n')]
+
+ resource_list_url_calls = [call('odl_ip', 'odl_port',
+ 'service-function'),
+ call('odl_ip', 'odl_port',
+ 'service-function-forwarder'),
+ call('odl_ip', 'odl_port',
+ 'service-function-chain'),
+ call('odl_ip', 'odl_port',
+ 'rendered-service-path',
+ datastore='operational')]
+
+ resource_list_urls = ['sf_list_u/r/l', 'sff_list_u/r/l',
+ 'sfc_list_u/r/l', 'rsp_list_u/r/l']
+
+ get_calls = [call(url) for url in resource_list_urls]
+
+ mock_odl_resource_list_url.side_effect = resource_list_urls
+
+ mock_get.return_value.json.side_effect = ['r_acl_json', 'r_sf_json',
+ 'r_sff_json', 'r_sfc_json',
+ 'r_rsp_json']
+
+ mock_json_layout.side_effect = ['r_acl_j_s_o_n', 'r_sf_j_s_o_n',
+ 'r_sff_j_s_o_n', 'r_sfc_j_s_o_n',
+ 'r_sp_j_s_o_n']
+
+ odl_utils.get_odl_items('odl_ip', 'odl_port')
+
+ mock_odl_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_odl_resource_list_url.assert_has_calls(resource_list_url_calls)
+ mock_get.assert_has_calls(get_calls, any_order=True)
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('requests.get', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url', autospec=True)
+ def test_get_odl_acl_list(self,
+ mock_acl_list_url,
+ mock_get):
+ """
+ Checks the proper functionality of get_odl_acl_list
+ function
+ """
+
+ mock_acl_list_url.return_value = 'acl_list/url'
+ mock_get.return_value.json.return_value = {'key': 'value'}
+ result = odl_utils.get_odl_acl_list('odl_ip', 'odl_port')
+ mock_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_get.assert_called_once_with('acl_list/url')
+ self.assertEqual({'key': 'value'}, result)
+
+ @patch('requests.delete', autospec=True)
+ @patch('sfc.lib.odl_utils.format_odl_acl_list_url', autospec=True)
+ def test_delete_odl_acl(self,
+ mock_acl_list_url,
+ mock_delete):
+ """
+ Checks the proper functionality of delete_odl_acl
+ function
+ """
+
+ mock_acl_list_url.return_value = 'acl_list/url'
+
+ odl_utils.delete_odl_acl('odl_ip', 'odl_port', 'acl_type', 'acl_name')
+
+ mock_acl_list_url.assert_called_once_with('odl_ip', 'odl_port')
+ mock_delete.assert_called_once_with(
+ 'acl_list/url/acl/acl_type/acl_name')
+
+ @patch('sfc.lib.odl_utils.delete_odl_acl', autospec=True)
+ def test_delete_acl(self, mock_delete_odl_acl):
+ """
+ Checks the proper fucntionality of delete_acl
+ function
+ """
+
+ odl_utils.delete_acl('clf_name', 'odl_ip', 'odl_port')
+ mock_delete_odl_acl.assert_called_once_with(
+ 'odl_ip',
+ 'odl_port',
+ 'ietf-access-control-list:ipv4-acl',
+ 'clf_name')
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ def test_find_compute_raises_exception(self, mock_log):
+ """
+ Checks the proper functionality of find_compute
+ function when compute was not found in the client
+ """
+
+ ErrorMSG = 'No compute, where the client is, was found'
+ compute_node_one = Mock()
+ compute_node_two = Mock()
+ compute_nodes = [compute_node_one, compute_node_two]
+ compute_node_one.name = 'compute_one'
+ compute_node_two.name = 'compute_two'
+
+ with self.assertRaises(Exception) as cm:
+ odl_utils.find_compute('compute_client', compute_nodes)
+
+ self.assertEqual(ErrorMSG, cm.exception.message)
+ mock_log.debug.assert_called_once_with(ErrorMSG)
+
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ def test_find_compute(self, mock_log):
+ """
+ Checks the proper functionality of find_compute
+ function when compute was not found in the client
+ """
+
+ compute_node_one = Mock()
+ compute_node_two = Mock()
+ compute_nodes = [compute_node_one, compute_node_two]
+ compute_node_one.name = 'compute_one'
+ compute_node_two.name = 'compute_two'
+
+ result = odl_utils.find_compute('compute_two', compute_nodes)
+
+ self.assertEqual(compute_node_two, result)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_rsps_still_active(
+ self, mock_active_rsps_on_ports,
+ mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('RSPs are still active in the MD-SAL')]
+ mock_active_rsps_on_ports.return_value = True
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=1)
+ self.assertFalse(result)
+ mock_active_rsps_on_ports.assert_called_once_with('odl_ip', 'odl_port',
+ 'neutron_ports')
+ mock_sleep.assert_called_once_with(3)
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_error_getting_compute(
+ self, mock_active_rsps_on_ports, mock_actual_rsps,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('There was an error getting the compute: ErrorMSG')]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.side_effect = [Exception('ErrorMSG'), mock_compute]
+ mock_active_rsps_on_ports.side_effect = [True, False]
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertFalse(result)
+ mock_sleep.assert_called_once_with(3)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_false_classification_flow_in_compute(
+ self, mock_active_rsps_on_ports, mock_actual_rsps,
+ mock_find_compute, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns false on the given condition
+ """
+
+ log_calls = [call('Classification flows still in the compute')]
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_find_compute.return_value = mock_compute
+ mock_actual_rsps.side_effect = [True, True]
+ mock_active_rsps_on_ports.side_effect = [True, False]
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertFalse(result)
+ mock_actual_rsps.assert_called_with('ovs_logger', 'mock_ssh_client')
+ mock_sleep.assert_called_with(3)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ assert mock_sleep.call_count == 3
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.odl_utils.logger', autospec=True)
+ @patch('sfc.lib.odl_utils.find_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.actual_rsps_in_compute', autospec=True)
+ @patch('sfc.lib.odl_utils.get_active_rsps_on_ports', autospec=True)
+ def test_check_vnffg_deletion_returns_true(self,
+ mock_active_rsps_on_ports,
+ mock_actual_rsps,
+ mock_find_compute,
+ mock_log, mock_sleep):
+ """
+ Checks the proper functionality of check_vnffg_deletion
+ function to verify that it returns true
+ """
+
+ mock_compute = Mock()
+ mock_compute.ssh_client = 'mock_ssh_client'
+ mock_active_rsps_on_ports.side_effect = [True, False]
+
+ mock_actual_rsps.side_effect = [True, False]
+
+ mock_find_compute.return_value = mock_compute
+
+ result = odl_utils.check_vnffg_deletion('odl_ip', 'odl_port',
+ 'ovs_logger', 'neutron_ports',
+ 'compute_client_name',
+ 'compute_nodes', retries=2)
+ self.assertTrue(result)
+ mock_find_compute.assert_called_once_with('compute_client_name',
+ 'compute_nodes')
+ assert mock_sleep.call_count == 2
+ mock_log.assert_not_called()
diff --git a/sfc/unit_tests/unit/lib/test_openstack_utils.py b/sfc/unit_tests/unit/lib/test_openstack_utils.py
new file mode 100644
index 00000000..bdd53d36
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_openstack_utils.py
@@ -0,0 +1,2504 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import call
+from mock import Mock
+from mock import patch
+from mock import mock_open
+from mock import DEFAULT
+from mock import MagicMock
+
+import sfc.lib.openstack_utils as os_sfc_utils
+from tackerclient.v1_0 import client as tacker_client
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcOpenStackUtilsTesting(unittest.TestCase):
+
+ def setUp(self):
+ self.patcher1 = patch.object(os_sfc_utils.constants,
+ 'ENV_FILE', autospec=True)
+ self.patcher2 = patch.object(os_sfc_utils.openstack_tests,
+ 'get_credentials', autospec=True)
+ self.patcher3 = patch.object(os_sfc_utils.nova_utils,
+ 'nova_client', autospec=True)
+ self.patcher4 = patch.object(os_sfc_utils.neutron_utils,
+ 'neutron_client', autospec=True)
+ self.patcher5 = patch.object(os_sfc_utils.heat_utils,
+ 'heat_client', autospec=True)
+ self.patcher6 = patch.object(os_sfc_utils.keystone_utils,
+ 'keystone_client', autospec=True)
+ self.patcher7 = patch.object(os_sfc_utils.connection,
+ 'from_config', autospec=True,)
+ self.patcher8 = patch.object(os_sfc_utils.neutronclient,
+ 'Client', autospec=True,)
+
+ self.env_file = self.patcher1.start().return_value
+ self.os_creds = self.patcher2.start().return_value
+ self.nova = self.patcher3.start().return_value
+ self.neutron = self.patcher4.start().return_value
+ self.heat = self.patcher5.start().return_value
+ self.keystone = self.patcher6.start().return_value
+ self.conn = self.patcher7.start().return_value
+ self.neutron_client = self.patcher8.start().return_value
+
+ self.os_sfc = os_sfc_utils.OpenStackSFC()
+
+ def tearDown(self):
+ self.patcher1.stop()
+ self.patcher2.stop()
+ self.patcher3.stop()
+ self.patcher4.stop()
+ self.patcher5.stop()
+ self.patcher6.stop()
+ self.patcher7.stop()
+ self.patcher8.stop()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('os.environ', {'OS_NETWORK_API_VERSION': '1'})
+ def test_get_neutron_client_version(self,
+ mock_log):
+ """
+ Checks the proper functionality of get_neutron_client_version
+ """
+ log_calls = [call("OS_NETWORK_API_VERSION is 1")]
+ result = self.os_sfc.get_neutron_client_version()
+ assert result == '1'
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_register_glance_image_already_exists(self,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is local
+ """
+ image_obj = Mock()
+ image_obj.name = 'name'
+ log_calls = [call('Registering the image...'),
+ call('Image ' + image_obj.name + ' already exists.')]
+
+ self.conn.image.find_image.return_value = image_obj
+ result = self.os_sfc.register_glance_image('name',
+ 'url',
+ 'img_format',
+ 'public')
+
+ self.conn.image.find_image.assert_called_once_with(image_obj.name)
+
+ assert result is image_obj
+
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch("__builtin__.open", autospec=True)
+ def test_register_glance_image_is_local(self,
+ mock_open_fn,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is local
+ """
+ log_calls = [call('Registering the image...'),
+ call('Image created')]
+
+ image_obj_None = None
+ image_obj_name = 'name'
+ image_obj = Mock()
+ mocked_file = mock_open(read_data='url').return_value
+ mock_open_fn.return_value = mocked_file
+
+ self.conn.image.find_image.return_value = image_obj_None
+ self.conn.image.upload_image.return_value = image_obj
+ result = self.os_sfc.register_glance_image('name',
+ 'url',
+ 'img_format',
+ 'public')
+ assert result is image_obj
+
+ self.conn.image.find_image.assert_called_once_with(image_obj_name)
+
+ self.conn.image.upload_image.\
+ assert_called_once_with(name='name',
+ disk_format='img_format',
+ data='url',
+ is_public='public',
+ container_format='bare')
+
+ self.assertEqual([image_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.urllib2.urlopen', autospec=True)
+ def test_register_glance_image_is_not_local(self,
+ mock_urlopen,
+ mock_log):
+ """
+ Checks the proper functionality of register_glance_image
+ function when the image is not local
+ """
+ log_calls = [call('Registering the image...'),
+ call('Downloading image'),
+ call('Image created')]
+
+ image_obj_None = None
+ image_obj_name = 'name'
+ image_obj = Mock()
+ mock_file = Mock()
+ mock_file.read.side_effect = ['http://url']
+ mock_urlopen.return_value = mock_file
+
+ self.conn.image.find_image.return_value = image_obj_None
+ self.conn.image.upload_image.return_value = image_obj
+
+ result = self.os_sfc.register_glance_image('name',
+ 'http://url',
+ 'img_format',
+ 'public')
+
+ assert result is image_obj
+
+ self.conn.image.find_image.assert_called_once_with(image_obj_name)
+
+ self.conn.image.upload_image.\
+ assert_called_once_with(name='name',
+ disk_format='img_format',
+ data='http://url',
+ is_public='public',
+ container_format='bare')
+
+ self.assertEqual([image_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_flavour(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_flavor
+ function
+ """
+
+ mock_openstack_flavor_ins = self.conn.compute.\
+ create_flavor.return_value
+ log_calls = [call('Creating flavor...')]
+
+ result = self.os_sfc.create_flavor('name',
+ 'ram',
+ 'disk',
+ 'vcpus')
+ assert result is mock_openstack_flavor_ins
+ self.assertEqual([mock_openstack_flavor_ins],
+ self.os_sfc.creators)
+ self.conn.compute.create_flavor.\
+ assert_called_once_with(name='name',
+ ram='ram',
+ disk='disk',
+ vcpus='vcpus')
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.env.get', autospec=True)
+ def test_create_network_infrastructure(self, mock_env_get, mock_log):
+ log_calls = [call('Creating Networks...'),
+ call('Creating Router...')]
+ network_obj = Mock()
+ network_obj.id = '1'
+ subnet_obj = Mock()
+ subnet_obj.id = '2'
+ ext_network_obj = Mock()
+ ext_network_obj.id = '3'
+ router_obj = Mock()
+ router_obj.id = '4'
+
+ self.conn.network.create_network.return_value = network_obj
+ self.conn.network.create_subnet.return_value = subnet_obj
+ self.conn.network.find_network.return_value = ext_network_obj
+ self.conn.network.create_router.return_value = router_obj
+ self.conn.network.get_router.return_value = router_obj
+ mock_env_get.return_value = 'ext_net_name'
+
+ expected = (network_obj, router_obj)
+ result = self.os_sfc.create_network_infrastructure('net_name',
+ 'sn_name',
+ 'subnet_cidr',
+ 'router_name')
+ self.conn.network.create_network.\
+ assert_called_once_with(name='net_name')
+ self.conn.network.create_subnet.\
+ assert_called_once_with(name='sn_name', cidr='subnet_cidr',
+ network_id=network_obj.id, ip_version='4')
+ self.conn.network.find_network.\
+ assert_called_once_with('ext_net_name')
+ self.conn.network.create_router.\
+ assert_called_once_with(name='router_name')
+ self.conn.network.add_interface_to_router.\
+ assert_called_once_with(router_obj.id, subnet_id=subnet_obj.id)
+ self.conn.network.update_router.\
+ assert_called_once_with(
+ router_obj.id,
+ external_gateway_info={'network_id': ext_network_obj.id})
+ self.conn.network.get_router.assert_called_once_with(router_obj.id)
+
+ self.assertEqual(expected, result)
+ self.assertEqual([network_obj, subnet_obj, router_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_security_group(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_security_group
+ function
+ """
+ log_calls = [call('Creating the security groups...')]
+ sec_group_obj = Mock()
+ sec_group_obj.id = '1'
+
+ self.conn.network.create_security_group.return_value = sec_group_obj
+
+ result = self.os_sfc.create_security_group('sec_grp_name')
+ assert result is sec_group_obj
+
+ self.conn.network.create_security_group.\
+ assert_called_once_with(name='sec_grp_name')
+
+ pc_calls = [call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='icmp'),
+ call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='tcp',
+ port_range_min=22,
+ port_range_max=22),
+ call(security_group_id=sec_group_obj.id,
+ direction='ingress',
+ protocol='tcp',
+ port_range_min=80,
+ port_range_max=80)]
+
+ self.conn.network.create_security_group_rule.\
+ assert_has_calls(pc_calls)
+
+ self.assertEqual([sec_group_obj], self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance_port_security_false(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj1 = Mock()
+ port_obj1.id = '2'
+ port_obj2 = Mock()
+ port_obj2.id = '3'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1', 'port2']
+ port_security = False
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.side_effect = [port_obj1, port_obj2]
+ self.conn.compute.create_server.return_value = instance_obj
+
+ port_obj_list = [port_obj1, port_obj2]
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id),
+ call(name=ports[1],
+ is_port_security_enabled=port_security,
+ network_id=network.id)]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj1.id},
+ {"port": port_obj2.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj1, port_obj2, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1']
+ port_obj_list = [port_obj]
+ port_security = True
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.return_value = port_obj
+ self.conn.compute.create_server.return_value = instance_obj
+ # self.conn.compute.wait_for_server.return_value = wait_ins_obj
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id,
+ security_group_ids=[secgrp.id])]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_instance_port_security_false_one_port(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_instance
+ function
+ """
+
+ keypair_obj = Mock()
+ keypair_obj.name = 'keypair_name'
+ flavor_obj = Mock()
+ flavor_obj.id = '1'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.name = 'instance_name'
+ secgrp = Mock()
+ secgrp.name = 'sec_grp'
+ secgrp.id = '4'
+ img_cre = Mock()
+ img_cre.id = '5'
+ network = Mock()
+ network.id = '6'
+ ports = ['port1']
+ port_obj_list = [port_obj]
+ port_security = False
+
+ log_calls = [call('Creating Key Pair vm_name...'),
+ call('Creating Port ' + str(ports) + '...'),
+ call('Creating the instance vm_name...'),
+ call('Waiting for instance_name to become Active'),
+ call('instance_name is active')]
+
+ self.conn.compute.create_keypair.return_value = keypair_obj
+ self.conn.compute.find_flavor.return_value = flavor_obj
+ self.conn.network.create_port.return_value = port_obj
+ self.conn.compute.create_server.return_value = instance_obj
+
+ expected = (instance_obj, port_obj_list)
+ result = self.os_sfc.create_instance('vm_name',
+ 'flavor_name',
+ img_cre,
+ network,
+ secgrp,
+ 'av_zone',
+ ports,
+ port_security=port_security)
+ self.assertEqual(expected, result)
+
+ pc_calls = [call(name=ports[0],
+ is_port_security_enabled=port_security,
+ network_id=network.id)]
+
+ self.conn.compute.create_keypair.\
+ assert_called_once_with(name='vm_name' + "_keypair")
+
+ self.conn.compute.find_flavor.assert_called_once_with('flavor_name')
+
+ self.conn.network.create_port.\
+ assert_has_calls(pc_calls)
+
+ self.conn.compute.create_server.\
+ assert_called_once_with(name='vm_name',
+ image_id=img_cre.id,
+ flavor_id=flavor_obj.id,
+ networks=[{"port": port_obj.id}],
+ key_name=keypair_obj.name,
+ availability_zone='av_zone')
+
+ self.conn.compute.wait_for_server.\
+ assert_called_once_with(instance_obj)
+
+ self.assertEqual([keypair_obj, port_obj, instance_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ def test_get_instance(self):
+ """
+ Checks the proper functionality of get_instance function
+ """
+
+ mock_instance_id = 'instance-abyz'
+ mock_instance = Mock()
+ mock_instance.id = mock_instance_id
+ mock_instance.name = 'test-instance'
+ mock_instance.hypervisor_hostname = 'nova-abyz'
+ self.conn.compute.get_server_metadata.return_value = mock_instance
+ result = self.os_sfc.get_instance(mock_instance_id)
+ self.assertEqual(result, mock_instance)
+
+ @patch.object(os_sfc_utils.OpenStackSFC, 'get_hypervisor_hosts')
+ def test_get_av_zones(self, mock_hosts):
+ """
+ Checks the proper functionality of get_av_zone
+ function
+ """
+ mock_hosts.return_value = ['host1', 'host2']
+ result = self.os_sfc.get_av_zones()
+ mock_hosts.assert_called_once()
+ self.assertEqual(['nova::host1', 'nova::host2'], result)
+
+ def test_get_hypervisor_hosts(self):
+ """
+ Checks the proper functionality of get_av_zone
+ function
+ """
+ from openstack.compute.v2 import hypervisor
+
+ hypervisor1 = Mock()
+ hypervisor1.state = 'up'
+ hypervisor1.name = 'compute00'
+ hypervisor2 = Mock()
+ hypervisor2.state = 'up'
+ hypervisor2.name = 'compute01'
+ nodes = [hypervisor1.name, hypervisor2.name]
+ hypervisors_list = MagicMock()
+ mock_obj = patch.object(hypervisor, 'Hypervisor')
+ mock_obj.side_effect = [hypervisor1, hypervisor2]
+ self.conn.compute.hypervisors.return_value = hypervisors_list
+ hypervisors_list.__iter__.return_value = [hypervisor1, hypervisor2]
+
+ result = self.os_sfc.get_hypervisor_hosts()
+ self.conn.compute.hypervisors.assert_called_once()
+ self.assertEqual(nodes, result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_hypervisor_hosts_exception(self, mock_log):
+ """
+ Checks the proper functionality of get_av_zone
+ function when an exception appears
+ """
+ log_calls = [call('Error [get_hypervisors(compute)]: Error MSG')]
+ self.conn.compute.hypervisors.side_effect = Exception('Error MSG')
+ result = self.os_sfc.get_hypervisor_hosts()
+ mock_log.error.assert_has_calls(log_calls)
+ self.assertIsNone(result)
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC.get_vm_compute',
+ autospec=True, return_value='mock_client')
+ def test_compute_client(self, mock_get_vm_compute):
+ """
+ Checks the proper functionality of get_compute_client
+ function
+ """
+
+ result = self.os_sfc.get_compute_client()
+ self.assertEqual('mock_client', result)
+ mock_get_vm_compute.assert_called_once_with(self.os_sfc, 'client')
+
+ @patch('sfc.lib.openstack_utils.OpenStackSFC.get_vm_compute',
+ autospec=True, return_value='mock_server')
+ def test_get_compute_server(self, mock_get_vm_compute):
+ """
+ Checks the proper functionality of get_compute_server
+ function
+ """
+
+ result = self.os_sfc.get_compute_server()
+ self.assertEqual('mock_server', result)
+ mock_get_vm_compute.assert_called_once_with(self.os_sfc, 'server')
+
+ def test_get_vm_compute_raised_exception(self):
+ """
+ Checks the proper functionality of get_vm_compute
+ function when no VM with the given name is found
+ """
+
+ ErrorMSG = "There is no VM with name 'mock_vm_name'!!"
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.get_vm_compute('mock_vm_name')
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+
+ def test_get_vm_compute(self):
+ """
+ Checks the proper functionality of get_vm_compute
+ function
+ """
+
+ mock_cre_obj_1 = Mock()
+ mock_cre_obj_2 = Mock()
+ mock_cre_obj_1.get_vm_inst.return_value.name = 'pro_vm'
+ mock_cre_obj_2.get_vm_inst.return_value.name = 'dev_vm'
+ mock_cre_obj_2.get_vm_inst.return_value.compute_host = 'mock_host'
+ self.os_sfc.creators = [mock_cre_obj_1, mock_cre_obj_2]
+
+ result = self.os_sfc.get_vm_compute('dev_vm')
+ self.assertEqual('mock_host', result)
+
+ def test_get_port_by_ip(self):
+ """
+ Checks the proper functonality of get_port_by_ip function
+ """
+
+ mock_port_ip_address = 'e.f.g.h'
+ mock_port_one, mock_port_two = Mock(), Mock()
+ mock_port_one.id = 'port-abcd'
+ mock_port_two.id = 'port-efgz'
+ mock_port_one.fixed_ips = [{'ip_address': 'a.b.c.d'}]
+ mock_port_two.fixed_ips = [{'ip_address': 'e.f.g.h'}]
+ self.conn.network.ports.return_value = [mock_port_one, mock_port_two]
+ self.conn.network.get_port.return_value = mock_port_two
+ result = self.os_sfc.get_port_by_ip(mock_port_ip_address)
+ self.assertEqual(result, mock_port_two)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_get_instance_port_raised_exceptioin(self,
+ mock_os_vm,
+ mock_log):
+ """
+ Checks the proper functionality of get_client_port
+ function when no port is returned
+ """
+
+ mock_os_vm_ins = mock_os_vm.return_value
+ mock_vm = Mock()
+ mock_vm.name = 'mock_vm_name'
+ mock_os_vm_ins.get_port_by_name.return_value = None
+ ErrorMSG = 'Client VM does not have the desired port'
+ log_calls = [call("The VM mock_vm_name does not have any port"
+ " with name mock_vm_name-port")]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.get_instance_port(mock_vm, mock_os_vm_ins)
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_get_instance_port(self,
+ mock_os_vm,
+ mock_log):
+ """
+ Checks the proper functionality of get_client_port
+ function when no port is returned
+ """
+
+ mock_os_vm_ins = mock_os_vm.return_value
+ mock_vm = Mock()
+ mock_vm.name = 'mock_vm_name'
+ mock_os_vm_ins.get_port_by_name.return_value = 'mock_port'
+ result = self.os_sfc.get_instance_port(mock_vm, mock_os_vm_ins)
+ self.assertEqual('mock_port', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.env.get', autospec=True)
+ def test_assign_floating_ip(self,
+ mock_env_get,
+ mock_log):
+ """
+ Checks the proper functionality of assigning_floating_ip
+ function
+ """
+ ext_network_obj = Mock()
+ ext_network_obj.id = '1'
+ fip_obj = Mock()
+ fip_obj.floating_ip_address = 'floating_ip_address'
+ port_obj = Mock()
+ port_obj.id = '2'
+ instance_obj = Mock()
+ instance_obj.id = '3'
+
+ log_calls = [call(' Creating floating ips '),
+ call(' FLoating IP address '
+ + fip_obj.floating_ip_address
+ + ' created'),
+ call(' Adding Floating IPs to instances ')]
+
+ mock_env_get.return_value = 'ext_net_name'
+ self.conn.network.find_network.return_value = ext_network_obj
+ self.conn.network.create_ip.return_value = fip_obj
+ self.conn.netwotk.get_port.return_value = port_obj
+ self.conn.compute.get_server.return_value = instance_obj
+
+ result = self.os_sfc.assign_floating_ip(instance_obj, port_obj)
+ assert result is fip_obj.floating_ip_address
+
+ self.conn.network.find_network.assert_called_once_with('ext_net_name')
+ self.conn.network.create_ip.\
+ assert_called_once_with(floating_network_id=ext_network_obj.id,
+ port_id=port_obj.id)
+ self.conn.compute.add_floating_ip_to_server.\
+ assert_called_once_with(instance_obj.id,
+ fip_obj.floating_ip_address)
+
+ self.assertEqual([fip_obj],
+ self.os_sfc.creators)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator', autospec=True)
+ def test_assign_floating_ip_vnfs_raised_exception_ips_provided(
+ self, mock_generate_creator, mock_get_stack_servers, mock_log):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function when server name does not have any floating IP assignment
+ """
+
+ ErrorMSG = "The VNF server_name-float does not have any suitable" + \
+ " port with ip any of ['floating_ip', 'other_ip'] for" + \
+ " floating IP assignment"
+ log_calls = [call(ErrorMSG)]
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+ mock_ips = ['floating_ip', 'other_ip']
+ mock_server_obj = Mock()
+ mock_port_obj = Mock()
+ mock_server_obj.name = 'server_name'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_port_obj.name = None
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.assign_floating_ip_vnfs('router', mock_ips)
+
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj',
+ 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator', autospec=True)
+ def test_assign_floating_ip_vnfs_raised_exception_ips_not_provided(
+ self, mock_generate_creator, mock_get_stack_servers, mock_log):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function when server name does not have any floating IP assignment
+ """
+
+ ErrorMSG = "The VNF server_name-float does not have any suitable" + \
+ " port for floating IP assignment"
+ log_calls = [call(ErrorMSG)]
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+ mock_server_obj = Mock()
+ mock_port_obj = Mock()
+ mock_server_obj.name = 'server_name'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_port_obj.name = None
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.assign_floating_ip_vnfs('router')
+
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj',
+ 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ self.assertEqual(cm.exception.message, ErrorMSG)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.FloatingIpConfig', autospec=True)
+ @patch('sfc.lib.openstack_utils.cr_inst.generate_creator',
+ autospec=True)
+ @patch('sfc.lib.openstack_utils.heat_utils.get_stack_servers',
+ autospec=True)
+ def test_assign_floating_ip_vnfs(self,
+ mock_get_stack_servers,
+ mock_generate_creator,
+ mock_floating_ip_config):
+ """
+ Checks the proper functionality of assign_floating_ip_vnfs
+ function
+ """
+
+ self.os_sfc.image_settings = 'image_settings'
+ self.heat.stacks.list.return_value = ['stack_obj']
+
+ mock_router = Mock()
+ mock_server_obj = Mock()
+ mock_ip_obj = Mock()
+ mock_port_obj = Mock()
+ mock_router.name = 'm_router'
+ mock_server_obj.name = 'serv_obj'
+ mock_server_obj.ports = [mock_port_obj]
+ mock_ips = ['floating_ip', 'other_ip']
+ mock_ip_obj.ip = 'mocked_ip'
+ mock_port_obj.name = 'port_obj'
+ mock_port_obj.ips = [{'ip_address': 'floating_ip'}]
+ mock_get_stack_servers.return_value = [mock_server_obj]
+ mock_os_vm_ins = mock_generate_creator.return_value
+ float_ip_ins = mock_floating_ip_config.return_value
+ mock_os_vm_ins.add_floating_ip.return_value = mock_ip_obj
+
+ result = self.os_sfc.assign_floating_ip_vnfs(mock_router, mock_ips)
+ self.assertEqual(['mocked_ip'], result)
+ self.assertEqual([mock_os_vm_ins], self.os_sfc.creators)
+ mock_get_stack_servers.assert_called_once_with(self.heat,
+ self.nova,
+ self.neutron_client,
+ self.keystone,
+ 'stack_obj', 'admin')
+ mock_generate_creator.assert_called_once_with(self.os_creds,
+ mock_server_obj,
+ 'image_settings',
+ 'admin')
+ mock_floating_ip_config.assert_called_once_with(name='serv_obj-float',
+ port_name='port_obj',
+ router_name='m_router')
+ mock_os_vm_ins.add_floating_ip.assert_called_once_with(float_ip_ins)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_all_security_groups(self, mock_log):
+ """
+ Checks the proper functionality of delete_all_security_groups
+ function
+ """
+
+ log_calls_info = [call('Deleting remaining security groups...')]
+ secgrp1_obj = Mock()
+ secgrp2_obj = Mock()
+ secgrp_list = MagicMock()
+
+ self.conn.network.create_security_groups.side_effect = [secgrp1_obj,
+ secgrp2_obj]
+ self.conn.network.security_groups.return_value = secgrp_list
+
+ secgrp_list.__iter__.return_value = [secgrp1_obj, secgrp2_obj]
+ del_calls = [call(secgrp1_obj),
+ call(secgrp2_obj)]
+
+ self.os_sfc.delete_all_security_groups()
+ self.conn.network.security_groups.assert_called_once()
+ self.conn.network.delete_security_group.assert_has_calls(del_calls)
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('sfc.lib.openstack_utils.cr_inst.OpenStackVmInstance',
+ autospec=True)
+ def test_wait_for_vnf(self, mock_os_vm):
+ """
+ Checks the proper functionality of wait_for_vnf function
+ """
+
+ mock_os_vm.vm_active.return_value = "x"
+ result = self.os_sfc.wait_for_vnf(mock_os_vm)
+ self.assertEqual('x', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_raises_exception(self, mock_log):
+ """
+ Checks the create_port_groups when length of ports is greater than 2
+ """
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ self.conn.compute.get_server.return_value = instance_obj
+
+ log_calls_info = [call('Creating the port pairs...')]
+ log_calls_err = [call('Only SFs with one or two ports are supported')]
+ exception_message = "Failed to create port pairs"
+ vnf_ports = ['p1', 'p2', 'p3']
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.create_port_groups(vnf_ports, instance_obj)
+ self.assertEqual(exception_message, cm.exception.message)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.error.assert_has_calls(log_calls_err)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_none_from_pp(self, mock_log):
+ """
+ Checks the create_port_groups when something goes wrong in port pair
+ creation
+ """
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ port_obj1 = Mock()
+ port_obj2 = Mock()
+ port_obj1.id = '123abc'
+ port_obj2.id = '456def'
+
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj1
+ self.conn.network.get_port.return_value = port_obj2
+
+ log_calls_info = [call('Creating the port pairs...')]
+ log_calls_warn = [call('Chain creation failed due to port pair '
+ 'creation failed for vnf %(vnf)s',
+ {'vnf': instance_obj.name})]
+ self.neutron_client.create_sfc_port_pair.return_value = None
+ result = self.os_sfc.create_port_groups(
+ [port_obj1, port_obj2], instance_obj)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.warning.assert_has_calls(log_calls_warn)
+
+ @patch('snaps.domain.network.Port', autospec=True)
+ @patch('snaps.domain.vm_inst.VmInst', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_exception_nopp(self, mock_log, mock_osvm,
+ mock_port):
+ """
+ Checks the create_port_groups when openstack does not commit the pp
+ """
+
+ log_calls_info = [call('Creating the port pairs...')]
+ mock_port_ins = mock_port.return_value
+ mock_port_ins.id = '123abc'
+ mock_vm_ins = mock_osvm.return_value
+ mock_vm_ins.name = 'vm'
+ exception_message = "Port pair was not committed in openstack"
+ expected_port_pair = {'name': 'vm-connection-points',
+ 'description': 'port pair for vm',
+ 'ingress': '123abc',
+ 'egress': '123abc'}
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'xxxx'}]}
+ with self.assertRaises(Exception) as cm:
+ self.os_sfc.create_port_groups([mock_port_ins], mock_vm_ins)
+ self.assertEqual(exception_message, cm.exception.message)
+ self.neutron_client.create_sfc_port_pair.assert_has_calls(
+ [call({'port_pair': expected_port_pair})])
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('snaps.domain.network.Port', autospec=True)
+ @patch('snaps.domain.vm_inst.VmInst', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_none_from_ppg(self, mock_log,
+ mock_vm,
+ mock_port):
+ """
+ Checks the create_port_groups when something goes wrong in port pair
+ group creation
+ """
+
+ instance_obj = Mock()
+ instance_obj.name = 'name'
+ port_obj = Mock()
+ port_obj.id = '123abc'
+
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj
+
+ log_calls_info = [call('Creating the port pairs...'),
+ call('Creating the port pair groups for name')]
+ log_calls_warn = [call('Chain creation failed due to port pair group '
+ 'creation failed for vnf '
+ '{}'.format(instance_obj.name))]
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'pp_id'}]}
+ self.neutron_client.create_sfc_port_pair_group.return_value = None
+ result = self.os_sfc.create_port_groups([port_obj], instance_obj)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls_info)
+ mock_log.warning.assert_has_calls(log_calls_warn)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_port_groups_returns_id(self, mock_log):
+ """
+ Checks the create_port_groups when everything goes as expected
+ """
+
+ log_calls_info = [call('Creating the port pairs...')]
+
+ instance_obj = Mock()
+ instance_obj.name = 'vm'
+ port_obj = Mock()
+ port_obj.id = '123abc'
+ self.conn.compute.get_server.return_value = instance_obj
+ self.conn.network.get_port.return_value = port_obj
+
+ expected_port_pair = {'name': 'vm-connection-points',
+ 'description': 'port pair for vm',
+ 'ingress': '123abc',
+ 'egress': '123abc'}
+ self.neutron_client.create_sfc_port_pair.return_value = \
+ {'port_pair': {'id': 'pp_id'}}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'pp_id'}]}
+ self.neutron_client.create_sfc_port_pair_group.return_value = \
+ {'port_pair_group': {'id': 'pp_id'}}
+ expected_port_pair_gr = {'name': 'vm-port-pair-group',
+ 'description': 'port pair group for vm',
+ 'port_pairs': ['pp_id']}
+
+ self.os_sfc.create_port_groups([port_obj], instance_obj)
+ self.neutron_client.create_sfc_port_pair.assert_has_calls(
+ [call({'port_pair': expected_port_pair})])
+ self.neutron_client.create_sfc_port_pair_group.assert_has_calls(
+ [call({'port_pair_group': expected_port_pair_gr})])
+ mock_log.info.assert_has_calls(log_calls_info)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_classifier(self, mock_log):
+ """
+ Checks the create_classifier method
+ """
+
+ log_calls = [call('Creating the classifier...')]
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ fc_name = 'red_http'
+ symmetrical = False
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+
+ expected_sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ self.os_sfc.create_classifier(neutron_port, port,
+ protocol, fc_name, symmetrical)
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_classifier_symmetric(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...')]
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ fc_name = 'red_http'
+ symmetrical = True
+ serv_p = '123'
+ server_ip = '1.1.1.2'
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+
+ expected_sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'destination_ip_prefix': server_ip,
+ 'logical_destination_port': serv_p,
+ 'protocol': protocol}
+ self.os_sfc.create_classifier(neutron_port, port,
+ protocol, fc_name, symmetrical,
+ server_port='123',
+ server_ip='1.1.1.2')
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_chain(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...'),
+ call('Creating the chain...')]
+ port_groups = ['1a', '2b']
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ vnffg_name = 'red_http'
+ symmetrical = False
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+ self.neutron_client.create_sfc_port_chain.return_value = \
+ {'port_chain': {'id': 'pc_id'}}
+
+ expected_sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'description': 'port-chain for SFC',
+ 'port_pair_groups': port_groups,
+ 'flow_classifiers': ['fc_id']}
+
+ self.os_sfc.create_chain(port_groups, neutron_port, port,
+ protocol, vnffg_name, symmetrical)
+
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ self.neutron_client.create_sfc_port_chain.assert_has_calls(
+ [call({'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_chain_symmetric(self, mock_log):
+ """
+ Checks the create_chain method
+ """
+
+ log_calls = [call('Creating the classifier...'),
+ call('Creating the chain...')]
+ port_groups = ['1a', '2b']
+ neutron_port = 'neutron_port_id'
+ port = 80
+ protocol = 'tcp'
+ vnffg_name = 'red_http'
+ symmetrical = True
+ serv_p = '123abc'
+ server_ip = '1.1.1.2'
+ self.neutron_client.create_sfc_flow_classifier.return_value = \
+ {'flow_classifier': {'id': 'fc_id'}}
+ self.neutron_client.create_sfc_port_chain.return_value = \
+ {'port_chain': {'id': 'pc_id'}}
+
+ expected_sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'destination_ip_prefix': server_ip,
+ 'logical_destination_port': serv_p,
+ 'protocol': protocol}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'description': 'port-chain for SFC',
+ 'port_pair_groups': port_groups,
+ 'flow_classifiers': ['fc_id'],
+ 'chain_parameters': {'symmetric': True}}
+
+ self.os_sfc.create_chain(port_groups, neutron_port, port,
+ protocol, vnffg_name, symmetrical,
+ server_port=serv_p, server_ip=server_ip)
+
+ self.neutron_client.create_sfc_flow_classifier.assert_has_calls(
+ [call({'flow_classifier': expected_sfc_classifier_params})])
+ self.neutron_client.create_sfc_port_chain.assert_has_calls(
+ [call({'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_update_chain_symmetric(self, mock_log):
+ """
+ Checks the update_chain method
+ """
+
+ log_calls = [call('Update the chain...')]
+ vnffg_name = 'red_http'
+ fc_name = 'blue_ssh'
+ symmetrical = True
+ self.neutron_client.find_resource.return_value = \
+ {'id': 'fc_id'}
+ expected_chain_config = {'name': vnffg_name + '-port-chain',
+ 'flow_classifiers': ['fc_id'],
+ 'chain_parameters': {'symmetric': True}}
+ self.os_sfc.update_chain(vnffg_name, fc_name, symmetrical)
+ self.neutron_client.update_sfc_port_chain.assert_has_calls(
+ [call('fc_id', {'port_chain': expected_chain_config})])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_swap_classifiers(self, mock_log):
+ """
+ Checks the swap_classifiers method
+ """
+
+ log_calls = [call('Swap classifiers...')]
+ vnffg_1_name = 'red_http'
+ vnffg_2_name = 'blue_ssh'
+ symmetrical = False
+ self.os_sfc.swap_classifiers(vnffg_1_name, vnffg_2_name, symmetrical)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_port_groups(self, mock_log):
+ """
+ Checks the delete_port_groups method
+ """
+ log_calls = [call('Deleting the port groups...'),
+ call('Deleting the port pairs...')]
+ self.neutron_client.list_sfc_port_pair_groups.return_value = \
+ {'port_pair_groups': [{'id': 'id_ppg1'}, {'id': 'id_ppg2'}]}
+ self.neutron_client.list_sfc_port_pairs.return_value = \
+ {'port_pairs': [{'id': 'id_pp1'}, {'id': 'id_pp2'}]}
+ self.os_sfc.delete_port_groups()
+
+ self.neutron_client.delete_sfc_port_pair_group.assert_has_calls(
+ [call('id_ppg1'), call('id_ppg2')])
+ self.neutron_client.delete_sfc_port_pair.assert_has_calls(
+ [call('id_pp1'), call('id_pp2')])
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_chain(self, mock_log):
+ """
+ Checks the delete_chain method
+ """
+ log_calls = [call('Deleting the chain...'),
+ call('Deleting the classifiers...')]
+ self.neutron_client.list_sfc_port_chains.return_value = \
+ {'port_chains': [{'id': 'id_pc1'}]}
+ self.neutron_client.list_sfc_flow_classifiers.return_value = \
+ {'flow_classifiers': [{'id': 'id_fc1'}]}
+ self.os_sfc.delete_chain()
+
+ self.neutron_client.delete_sfc_port_chain.\
+ assert_has_calls([call('id_pc1')])
+ self.neutron_client.delete_sfc_flow_classifier.assert_has_calls(
+ [call('id_fc1')])
+ mock_log.info.assert_has_calls(log_calls)
+
+
+class SfcTackerSectionTesting(unittest.TestCase):
+ def setUp(self):
+ self.patcher = patch.object(tacker_client, 'Client', autospec=True)
+ self.mock_tacker_client = self.patcher.start().return_value
+
+ def tearDown(self):
+ self.patcher.stop()
+
+ @patch('os.getenv', autospec=True, return_value=None)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_client_version_returned_default(self,
+ mock_log,
+ mock_getenv):
+ """
+ Checks the proper functionality of get_tacker_client_version
+ function when the os.getenv returns none
+ """
+ result = os_sfc_utils.get_tacker_client_version()
+ self.assertEqual(result, '1.0')
+ mock_getenv.assert_called_once_with('OS_TACKER_API_VERSION')
+ mock_log.info.assert_not_called()
+
+ @patch('os.getenv', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_client_version(self,
+ mock_log,
+ mock_getenv):
+ """
+ Checks the proper functionality of get_tacker_client_version
+ function when the os.getenv returns version
+ """
+
+ ver = '2.0'
+ mock_getenv.return_value = ver
+ log_calls = [call("OS_TACKER_API_VERSION is set in env as '%s'", ver)]
+
+ result = os_sfc_utils.get_tacker_client_version()
+ self.assertEqual(result, ver)
+ mock_getenv.assert_called_once_with('OS_TACKER_API_VERSION')
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_id_from_name_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of get_id_from_name
+ function when tacker_client.list returns None
+ """
+
+ resource_name = 'mock_resource_name'
+ resource_type = 'mock_resource_type'
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ path = '/' + collection
+ self.mock_tacker_client.list.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [get_id_from_name(tacker_client, '
+ 'resource_type, resource_name)]: ErrorMSG')]
+
+ result = os_sfc_utils.get_id_from_name(self.mock_tacker_client,
+ resource_type,
+ resource_name)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list.assert_called_once_with(collection,
+ path,
+ **params)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.openstack_tests.get_credentials',
+ autospec=True, return_value='os_creds')
+ @patch('sfc.lib.openstack_utils.keystone_utils.keystone_session',
+ autospec=True, return_value='keystone_session_obj')
+ @patch('sfc.lib.openstack_utils.constants.ENV_FILE', autospec=True)
+ @patch('sfc.lib.openstack_utils.tackerclient.Client', autospec=True)
+ def test_get_tacker_client(self, mock_tacker_client,
+ mock_env_file,
+ mock_keystone_session,
+ mock_get_credentials):
+ """
+ checks the proper functionality of get_tacker_client
+ function
+ """
+
+ mock_tacker_client_ins = mock_tacker_client.return_value
+ result = os_sfc_utils.get_tacker_client()
+ assert result is mock_tacker_client_ins
+ mock_get_credentials.assert_called_once_with(os_env_file=mock_env_file,
+ overrides=None)
+ mock_keystone_session.assert_called_once_with('os_creds')
+ mock_tacker_client.assert_called_once_with(
+ '1.0', session='keystone_session_obj')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_id_from_name(self, mock_log):
+ """
+ Checks the proper functionality of get_id_from_name
+ function when tacker_client.list returns id
+ """
+
+ resource_name = 'mock_resource_name'
+ resource_type = 'mock_resource_type'
+ params = {'fields': 'id', 'name': resource_name}
+ collection = resource_type + 's'
+ self.mock_tacker_client.list.return_value = {collection: {0: {'id':
+ 'mock_id'}}}
+ path = '/' + collection
+ result = os_sfc_utils.get_id_from_name(self.mock_tacker_client,
+ resource_type,
+ resource_name)
+ self.assertEqual('mock_id', result)
+ self.mock_tacker_client.list.assert_called_once_with(collection,
+ path,
+ **params)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnfd_id(self, mock_get_id):
+ """
+ Checks the proper functionality of get_vnfd_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vnfd_id(self.mock_tacker_client,
+ 'vnfd_name')
+ self.assertEqual('id', result)
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd',
+ 'vnfd_name')
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vim_id(self, mock_get_id):
+ """
+ Checks the proper fucntionality of get_vim_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vim_id(self.mock_tacker_client, 'vim_name')
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim',
+ 'vim_name')
+ self.assertEqual('id', result)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnf_id(self,
+ mock_get_id,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of get_vnf_id
+ function
+ """
+
+ vnf_name = 'mock_vnf'
+ log_calls = [call("Could not retrieve ID for vnf with name [%s]."
+ " Retrying." % vnf_name)]
+
+ get_id_calls = [call(self.mock_tacker_client, 'vnf', vnf_name)] * 2
+
+ mock_get_id.side_effect = [None, 'vnf_id']
+
+ result = os_sfc_utils.get_vnf_id(self.mock_tacker_client, vnf_name, 2)
+ self.assertEqual('vnf_id', result)
+ mock_sleep.assert_called_once_with(1)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_get_id.assert_has_calls(get_id_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnffg_id(self,
+ mock_get_id,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of get_vnffg_id
+ function
+ """
+
+ vnffg_name = 'mock_vnffg'
+ log_calls = [call("Could not retrieve ID for vnffg with name [%s]."
+ " Retrying." % vnffg_name)]
+
+ get_id_calls = [call(self.mock_tacker_client, 'vnffg', vnffg_name)] * 2
+
+ mock_get_id.side_effect = [None, 'vnf_id']
+
+ result = os_sfc_utils.get_vnffg_id(self.mock_tacker_client,
+ vnffg_name,
+ 2)
+ self.assertEqual('vnf_id', result)
+ mock_sleep.assert_called_once_with(1)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_get_id.assert_has_calls(get_id_calls)
+
+ @patch('sfc.lib.openstack_utils.get_id_from_name', autospec=True)
+ def test_get_vnffgd_id(self, mock_get_id):
+ """
+ Checks the proper functionality of get_vnffgd_id
+ function
+ """
+
+ mock_get_id.return_value = 'id'
+ result = os_sfc_utils.get_vnffgd_id(self.mock_tacker_client,
+ 'vnffgd_name')
+ mock_get_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd',
+ 'vnffgd_name')
+ self.assertEqual('id', result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfds_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfds
+ function when the list_vnfds returns none
+ """
+
+ log_calls = [call('Error [list_vnfds(tacker_client)]: ErrorMSG')]
+ self.mock_tacker_client.list_vnfds.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.list_vnfds(self.mock_tacker_client)
+ mock_log.error.assert_has_calls(log_calls)
+ self.mock_tacker_client.list_vnfds.assert_called_once_with(
+ retrieve_all=True)
+ self.assertIsNone(result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfds(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfds
+ function when the list_vnfds returns vnfds
+ """
+
+ vnfds = {
+ 'vnfds': [{'id': 1},
+ {'id': 2}]
+ }
+ self.mock_tacker_client.list_vnfds.return_value = vnfds
+ result = os_sfc_utils.list_vnfds(self.mock_tacker_client)
+ self.mock_tacker_client.list_vnfds.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.assert_not_called()
+ self.assertEqual([1, 2], result)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd_returned_none_tosca_file_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnfd..."),
+ call("Error [create_vnfd(tacker_client, 'None')]: "
+ "ErrorMSG")]
+
+ self.mock_tacker_client.create_vnfd.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ None,
+ 'vnfd_name')
+ self.assertIsNone(result)
+ self.mock_tacker_client.create_vnfd.assert_called_once_with(
+ body={'vnfd': {'attributes': {'vnfd': {}},
+ 'name': 'vnfd_name'}})
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd_returned_none_tosca_file_provided(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnfd..."),
+ call("VNFD template:\nmock_vnfd"),
+ call("Error [create_vnfd(tacker_client, 'tosca_file')]: "
+ "ErrorMSG")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_vnfd'
+ mock_safe_load.return_value = 'mock_vnfd_body'
+ self.mock_tacker_client.create_vnfd.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnfd_name')
+ self.assertIsNone(result)
+ mock_open.assert_called_once_with('tosca_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('mock_vnfd')
+ mock_log.info.assert_has_calls(log_calls[:2])
+ mock_log.error.assert_has_calls(log_calls[2:])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnfd(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnfd
+ function
+ """
+
+ log_calls = [call("VNFD template:\nmock_vnfd")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_vnfd'
+ mock_safe_load.return_value = 'mock_vnfd_body'
+ result = os_sfc_utils.create_vnfd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnfd_name')
+ assert result is self.mock_tacker_client.create_vnfd.return_value
+ self.mock_tacker_client.create_vnfd.assert_called_once_with(
+ body={"vnfd": {"attributes": {"vnfd": "mock_vnfd_body"},
+ "name": "vnfd_name"}})
+ mock_open.assert_called_once_with('tosca_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('mock_vnfd')
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnfd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnfd
+ function when an exception is raised
+ """
+
+ log_calls = [call("Error [delete_vnfd(tacker_client, 'None', 'None')]:"
+ " You need to provide VNFD id or VNFD name")]
+
+ result = os_sfc_utils.delete_vnfd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnfd_id',
+ autospec=True, return_value='vnfd')
+ def test_delete_vnfd(self, mock_get_vnfd_id):
+ """
+ Checks the proper functionality of delete_vnfd
+ function
+ """
+
+ result = os_sfc_utils.delete_vnfd(self.mock_tacker_client,
+ None,
+ 'vnfd_name')
+ assert result is self.mock_tacker_client.delete_vnfd.return_value
+ mock_get_vnfd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd_name')
+ self.mock_tacker_client.delete_vnfd.assert_called_once_with('vnfd')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnfs_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnfs
+ function
+ """
+
+ log_calls = [call("Error [list_vnfs(tacker_client)]: ErrorMSG")]
+
+ self.mock_tacker_client.list_vnfs.side_effect = Exception('ErrorMSG')
+ result = os_sfc_utils.list_vnfs(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnfs.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnfs(self):
+ """
+ Checks the proper functionality of list_vnfs
+ function
+ """
+ vnfs = {'vnfs': [{'id': 1},
+ {'id': 2}]}
+
+ self.mock_tacker_client.list_vnfs.return_value = vnfs
+ result = os_sfc_utils.list_vnfs(self.mock_tacker_client)
+ self.assertEqual([1, 2], result)
+ self.mock_tacker_client.list_vnfs.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_returned_none_vnfd_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnf
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnf..."),
+ call("error [create_vnf(tacker_client,"
+ " 'vnf_name', 'None', 'None')]: "
+ "vnfd id or vnfd name is required")]
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client, 'vnf_name')
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_returned_none_vnfd_provided(self, mock_log):
+ """
+ Checks the proper functionality of create_vnf
+ function when an exception is raised
+ """
+
+ log_calls = [call("Creating the vnf..."),
+ call("error [create_vnf(tacker_client,"
+ " 'vnf_name', 'None', 'vnfd_name')]: "
+ "vim id or vim name is required")]
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ None,
+ 'vnfd_name',
+ None,
+ None)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vim_id',
+ autospec=True, return_value='vim_id')
+ @patch('sfc.lib.openstack_utils.get_vnfd_id',
+ autospec=True, return_value='vnfd_id')
+ def test_create_vnf_vim_id_not_provided(self,
+ mock_get_vnfd_id,
+ mock_get_vim_id,
+ mock_log,
+ mock_open):
+ """
+ Checks the proper functionality of create_vnf
+ function
+ """
+ mock_body = {'vnf': {'attributes': {'param_values': 'mock_data'},
+ 'vim_id': 'vim_id',
+ 'name': 'vnf_name',
+ 'vnfd_id': 'vnfd_id'}}
+ log_calls = [call('Creating the vnf...')]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ None,
+ 'vnfd_name',
+ None,
+ 'vim_name',
+ 'param_file')
+
+ assert result is self.mock_tacker_client.create_vnf.return_value
+ mock_get_vnfd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnfd_name')
+ mock_get_vim_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim_name')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnf.assert_called_once_with(
+ body=mock_body)
+
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnf_vim_id_provided(self, mock_log, mock_open):
+ """
+ Checks the proper functionality of create_vnf
+ function
+ """
+ mock_body = {'vnf': {'attributes': {},
+ 'vim_id': 'vim_id',
+ 'name': 'vnf_name',
+ 'vnfd_id': 'vnfd_id'}}
+ log_calls = [call('Creating the vnf...')]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+
+ result = os_sfc_utils.create_vnf(self.mock_tacker_client,
+ 'vnf_name',
+ 'vnfd_id',
+ 'vnfd_name',
+ 'vim_id',
+ 'vim_name')
+ assert result is self.mock_tacker_client.create_vnf.return_value
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnf.assert_called_once_with(
+ body=mock_body)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_vnf_returned_none_vnf_not_provided(self, mock_log):
+ """
+ Checks the proper functionality of get_vnf
+ functionality when an exception is raised
+ """
+
+ log_calls = [call("Could not retrieve VNF [vnf_id=None, vnf_name=None]"
+ " - You must specify vnf_id or vnf_name")]
+
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnf_id',
+ autospec=True, return_value=None)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_vnf_returned_none_vnf_provided(self,
+ mock_log,
+ mock_get_vnf_id):
+ """
+ Checks the proper functionality of get_vnf
+ functionality when an exception is raised
+ """
+
+ log_calls = [call("Could not retrieve VNF [vnf_id=None, "
+ "vnf_name=vnf_name] - Could not retrieve ID from "
+ "name [vnf_name]")]
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client,
+ None,
+ 'vnf_name')
+ self.assertIsNone(result)
+ mock_get_vnf_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.list_vnfs', autospec=True)
+ def test_get_vnf(self,
+ mock_list_vnfs,
+ mock_log):
+ """
+ Checks the proper functionality of get_vnf
+ function
+ """
+
+ vnf = {'vnfs': [{'id': 'default'},
+ {'id': 'vnf_id'}]}
+
+ mock_list_vnfs.return_value = vnf
+ result = os_sfc_utils.get_vnf(self.mock_tacker_client, 'vnf_id', None)
+ self.assertDictEqual(vnf['vnfs'][1], result)
+ mock_log.error.assert_not_called()
+
+ @patch('json.loads', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospe=True)
+ def test_get_vnf_ip(self,
+ mock_get_vnf,
+ mock_json_loads):
+ """
+ Checks the proper functionality of get_vnf_ip
+ function
+ """
+
+ vnf = {"mgmt_url": {"VDU1": "192.168.120.3"}}
+ mock_get_vnf.return_value = vnf
+ mock_json_loads.return_value = vnf['mgmt_url']
+ result = os_sfc_utils.get_vnf_ip(self.mock_tacker_client)
+ self.assertEqual("192.168.120.3", result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ None,
+ None)
+ mock_json_loads.assert_called_once_with(vnf['mgmt_url'])
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_unable_to_retrieve_vnf(self,
+ mock_get_vnf,
+ mock_log):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_get_vnf.return_value = None
+ log_calls = [call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Could not retrieve VNF - id='vnf_id',"
+ " name='vnf_name'")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_unable_to_boot_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_vnf_values = [{'id': 'vnf_id',
+ 'status': 'ERROR'},
+ {'id': 'vnf_id',
+ 'status': 'PEDNING_CREATE'}]
+ mock_get_vnf.side_effect = mock_vnf_values
+ log_calls = [call("Waiting for vnf %s" % str(mock_vnf_values[0])),
+ call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Error when booting vnf vnf_id")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf_returned_none_timeout_booting_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of wait_for_vnf
+ function when an Exception is raised
+ """
+
+ mock_vnf_values = [{'id': 'vnf_id',
+ 'status': 'PENDING_CREATE'},
+ {'id': 'vnf_id',
+ 'status': 'PENDING_CREATE'}]
+ mock_get_vnf.side_effect = mock_vnf_values
+ log_calls = [call("Waiting for vnf %s" % str(mock_vnf_values[1])),
+ call("error [wait_for_vnf(tacker_client, 'vnf_id', "
+ "'vnf_name')]: Timeout when booting vnf vnf_id")]
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 0)
+ self.assertIsNone(result)
+ mock_get_vnf.assert_called_with(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name')
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.error.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf', autospec=True)
+ def test_wait_for_vnf(self,
+ mock_get_vnf,
+ mock_log,
+ mock_sleep):
+ """
+ Checks for the proper functionality of wait_for_vnf
+ function
+ """
+
+ mock_vnf_values = [{'status': 'PENDING_CREATE',
+ 'id': 'vnf_id'},
+ {'status': 'ACTIVE',
+ 'id': 'vnf_id'}]
+
+ log_calls = [call("Waiting for vnf %s" % mock_vnf_values[0])]
+
+ mock_get_vnf.side_effect = mock_vnf_values
+
+ result = os_sfc_utils.wait_for_vnf(self.mock_tacker_client,
+ 'vnf_id',
+ 'vnf_name',
+ 3)
+ self.assertEqual('vnf_id', result)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnf_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnf
+ function
+ """
+
+ log_calls = [call("Error [delete_vnf(tacker_client, 'None', 'None')]:"
+ " You need to provide a VNF id or name")]
+ result = os_sfc_utils.delete_vnf(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnf_id', autospec=True)
+ def test_delete_vnf(self,
+ mock_get_vnf_id,
+ mock_log):
+ """
+ Checks the proper functionality of delete_vnf
+ function
+ """
+
+ mock_get_vnf_id.return_value = 'vnf'
+ result = os_sfc_utils.delete_vnf(self.mock_tacker_client,
+ None,
+ 'vnf_name')
+ assert result is self.mock_tacker_client.delete_vnf.return_value
+ mock_get_vnf_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name')
+ self.mock_tacker_client.delete_vnf.assert_called_once_with('vnf')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vim_returned_none(self,
+ mock_log):
+ """
+ Checks the proper functionality of create_vim
+ function when the vim_file is not provided
+ """
+
+ self.mock_tacker_client.create_vim.side_effect = Exception('ErrorMSG')
+ log_calls = [[call("Creating the vim...")],
+ [call("Error [create_vim(tacker_client, 'None')]"
+ ": ErrorMSG")]]
+
+ result = os_sfc_utils.create_vim(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.create_vim.assert_called_once_with(body={})
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('json.load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vim(self,
+ mock_log,
+ mock_open,
+ mock_json_loads):
+ """
+ Checks the proper functionality of create_vim
+ function
+ """
+
+ log_calls = [call("Creating the vim..."),
+ call("VIM template:\nmock_data")]
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_json_loads.return_value = 'mock_data'
+ result = os_sfc_utils.create_vim(self.mock_tacker_client, 'vim_file')
+ assert result is self.mock_tacker_client.create_vim.return_value
+ mock_log.info.assert_has_calls(log_calls)
+ mock_open.assert_called_once_with('vim_file')
+ mock_json_loads.assert_called_once_with(open_handler)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffgd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of create_vnffgd
+ function when create_vnffgd raises an Exception
+ """
+
+ self.mock_tacker_client.create_vnffgd.side_effect = Exception(
+ 'ErrorMSG')
+ log_calls = [[call("Creating the vnffgd...")],
+ [call("Error [create_vnffgd(tacker_client, 'None')]"
+ ": ErrorMSG")]]
+
+ result = os_sfc_utils.create_vnffgd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffgd(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffgd
+ function
+ """
+
+ log_calls = [call('Creating the vnffgd...'),
+ call('VNFFGD template:\nmock_data')]
+
+ vnffgd_body = {'id': 0, 'type': 'dict'}
+
+ mock_vim_body = {'vnffgd': {'name': 'vnffgd_name',
+ 'template': {'vnffgd': vnffgd_body}}}
+
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'mock_data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+ result = os_sfc_utils.create_vnffgd(self.mock_tacker_client,
+ 'tosca_file',
+ 'vnffgd_name')
+ assert result is self.mock_tacker_client.create_vnffgd.return_value
+ mock_open.assert_called_once_with('tosca_file')
+ mock_safe_load.assert_called_once_with('mock_data')
+ self.mock_tacker_client.create_vnffgd.assert_called_once_with(
+ body=mock_vim_body)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffg_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [[call("Creating the vnffg...")],
+ [call("error [create_vnffg(tacker_client,"
+ " 'None', 'None', 'None')]: "
+ "vnffgd id or vnffgd name is required")]]
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.info.assert_has_calls(log_calls[0])
+ mock_log.error.assert_has_calls(log_calls[1])
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ @patch('sfc.lib.openstack_utils.get_vnffgd_id', autospec=True)
+ def test_create_vnffg_vnffgd_id_not_provided(self,
+ mock_get_vnffgd_id,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [call('Creating the vnffg...')]
+ vnffg_calls = [call(body={
+ 'vnffg': {
+ 'attributes': {'param_values': {'type': 'dict',
+ 'id': 0}},
+ 'vnffgd_id': 'mocked_vnffg_id',
+ 'name': 'vnffg_name',
+ 'symmetrical': False}})]
+ mock_get_vnffgd_id.return_value = 'mocked_vnffg_id'
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client,
+ 'vnffg_name',
+ None,
+ 'vnffgd_name',
+ 'param_file')
+ assert result is self.mock_tacker_client.create_vnffg.return_value
+ mock_open.assert_called_once_with('param_file')
+ open_handler.read.assert_called_once_with()
+ mock_get_vnffgd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd_name')
+ mock_safe_load.assert_called_once_with('data')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnffg.assert_has_calls(vnffg_calls)
+
+ @patch('yaml.safe_load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_create_vnffg_vnffgd_id_provided(self,
+ mock_log,
+ mock_open,
+ mock_safe_load):
+ """
+ Checks the proper functionality of create_vnffg
+ function when the vnffgd id or vnffg name is not provided
+ """
+
+ log_calls = [call('Creating the vnffg...')]
+ vnffg_calls = [call(body={
+ 'vnffg': {
+ 'attributes': {'param_values': {'type': 'dict',
+ 'id': 0}},
+ 'vnffgd_id': 'vnffgd_id',
+ 'name': 'vnffg_name',
+ 'symmetrical': False}})]
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_handler.read.return_value = 'data'
+ mock_safe_load.return_value = {'id': 0, 'type': 'dict'}
+
+ result = os_sfc_utils.create_vnffg(self.mock_tacker_client,
+ 'vnffg_name',
+ 'vnffgd_id',
+ 'vnffgd_name',
+ 'param_file')
+ assert result is self.mock_tacker_client.create_vnffg.return_value
+ mock_open.assert_called_once_with('param_file')
+ open_handler.read.assert_called_once_with()
+ mock_safe_load.assert_called_once_with('data')
+ mock_log.info.assert_has_calls(log_calls)
+ self.mock_tacker_client.create_vnffg.assert_has_calls(vnffg_calls)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnffgds_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnffgds
+ function when list_vnffgds raises an Exception
+ """
+
+ self.mock_tacker_client.list_vnffgds.side_effect = Exception(
+ 'ErrorMSG')
+ log_calls = [call('Error [list_vnffgds(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vnffgds(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnffgds.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnffgds(self):
+ """
+ Checks the proper functtionality of list_vnffgds
+ function
+ """
+
+ vnffgds = {'vnffgds': [{'id': 'vnffgd_obj_one'},
+ {'id': 'vnffgd_obj_two'}]}
+
+ mock_vnffgds = ['vnffgd_obj_one', 'vnffgd_obj_two']
+
+ self.mock_tacker_client.list_vnffgds.return_value = vnffgds
+ result = os_sfc_utils.list_vnffgds(self.mock_tacker_client)
+ self.assertEqual(mock_vnffgds, result)
+ self.mock_tacker_client.list_vnffgds.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vnffgs_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vnffgs
+ function when list_vnffgs raises an Exception
+ """
+
+ self.mock_tacker_client.list_vnffgs.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [list_vnffgs(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vnffgs(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vnffgs.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vnffgs(self):
+ """
+ Checks the proper functionality of list_vnffgs
+ function
+ """
+
+ vnffgs = {'vnffgs': [{'id': 'vnffg_obj_one'},
+ {'id': 'vnffg_obj_two'}]}
+
+ mock_vnffgs = ['vnffg_obj_one', 'vnffg_obj_two']
+
+ self.mock_tacker_client.list_vnffgs.return_value = vnffgs
+ result = os_sfc_utils.list_vnffgs(self.mock_tacker_client)
+ self.assertEqual(mock_vnffgs, result)
+ self.mock_tacker_client.list_vnffgs.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnffg_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnffg
+ function
+ """
+
+ log_calls = [call("Error [delete_vnffg(tacker_client, 'None', 'None')]"
+ ": You need to provide a VNFFG id or name")]
+
+ result = os_sfc_utils.delete_vnffg(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnffg_id',
+ autospec=True, return_value='vnffg')
+ def test_delete_vnffg(self, mock_get_vnffg_id):
+ """
+ Checks the proper functionality of delete_vnffg
+ function
+ """
+
+ self.mock_tacker_client.delete_vnffg.return_value = 'deleted'
+ result = os_sfc_utils.delete_vnffg(self.mock_tacker_client,
+ None,
+ 'vnffg_name')
+ self.assertEqual('deleted', result)
+ mock_get_vnffg_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffg_name')
+ self.mock_tacker_client.delete_vnffg.assert_called_once_with('vnffg')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vnffgd_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vnffgd
+ function
+ """
+
+ log_calls = [call("Error [delete_vnffgd(tacker_client, 'None', 'None')"
+ "]: You need to provide VNFFGD id or VNFFGD name")]
+
+ result = os_sfc_utils.delete_vnffgd(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vnffgd_id',
+ autospec=True, return_value='vnffgd')
+ def test_delete_vnffgd(self, mock_get_vnffgd_id):
+ """
+ Checks the proper functionality of delete_vnffgd
+ function
+ """
+
+ self.mock_tacker_client.delete_vnffgd.return_value = 'deleted'
+ result = os_sfc_utils.delete_vnffgd(self.mock_tacker_client,
+ None,
+ 'vnffgd_name')
+ self.assertEqual('deleted', result)
+ mock_get_vnffgd_id.assert_called_once_with(self.mock_tacker_client,
+ 'vnffgd_name')
+ self.mock_tacker_client.delete_vnffgd.assert_called_once_with('vnffgd')
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_list_vims_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of list_vims
+ function when VNFFGD id is not provided
+ """
+
+ self.mock_tacker_client.list_vims.side_effect = Exception('ErrorMSG')
+ log_calls = [call('Error [list_vims(tacker_client)]: ErrorMSG')]
+
+ result = os_sfc_utils.list_vims(self.mock_tacker_client)
+ self.assertIsNone(result)
+ self.mock_tacker_client.list_vims.assert_called_once_with(
+ retrieve_all=True)
+ mock_log.error.assert_has_calls(log_calls)
+
+ def test_list_vims(self):
+ """
+ Checks the proper functionality list_vims
+ function
+ """
+
+ vims = {'vims': [{'id': 'vim_obj_1'},
+ {'id': 'vim_obj_2'}]}
+
+ mock_vims = ['vim_obj_1', 'vim_obj_2']
+
+ self.mock_tacker_client.list_vims.return_value = vims
+ result = os_sfc_utils.list_vims(self.mock_tacker_client)
+ self.assertEqual(mock_vims, result)
+ self.mock_tacker_client.list_vims.assert_called_once_with(
+ retrieve_all=True)
+
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_delete_vim_returned_none(self, mock_log):
+ """
+ Checks the proper functionality of delete_vim
+ function when VIM id and VIM name is not provided
+ """
+
+ log_calls = [call("Error [delete_vim(tacker_client, '%s', '%s')]: %s"
+ % (None, None, 'You need to provide '
+ 'VIM id or VIM name'))]
+
+ result = os_sfc_utils.delete_vim(self.mock_tacker_client)
+ self.assertIsNone(result)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.openstack_utils.get_vim_id',
+ autospec=True, return_value='vim_id')
+ def test_delete_vim(self, mock_get_vim_id):
+ """
+ Checks the proper functionality of delete_vim
+ function
+ """
+
+ result = os_sfc_utils.delete_vim(self.mock_tacker_client,
+ None,
+ 'vim_name')
+ assert result is self.mock_tacker_client.delete_vim.return_value
+ mock_get_vim_id.assert_called_once_with(self.mock_tacker_client,
+ 'vim_name')
+ self.mock_tacker_client.delete_vim.assert_called_once_with('vim_id')
+
+ @patch('sfc.lib.openstack_utils.get_tacker_client',
+ autospec=True, return_value='tacker_client_obj')
+ @patch('sfc.lib.openstack_utils.logger', autospec=True)
+ def test_get_tacker_items(self,
+ mock_log,
+ mock_tacker_client):
+ """
+ Checks the proper functionality of get_tacker_items
+ function
+ """
+
+ mock_dict = {'list_vims': DEFAULT,
+ 'list_vnfds': DEFAULT,
+ 'list_vnfs': DEFAULT,
+ 'list_vnffgds': DEFAULT,
+ 'list_vnffgs': DEFAULT}
+ with patch.multiple('sfc.lib.openstack_utils',
+ **mock_dict) as mock_values:
+
+ os_sfc_utils.get_tacker_items()
+
+ mock_tacker_client.assert_called_once_with()
+ self.assertEqual(5, mock_log.debug.call_count)
+ for key in mock_values:
+ mock_values[key].assert_called_once_with('tacker_client_obj')
+
+ @patch('json.dump', autospec=True)
+ @patch('json.load', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vim', autospec=True)
+ def test_register_vim(self,
+ mock_create_vim,
+ mock_open,
+ mock_json_loads,
+ mock_json_dump):
+ """
+ Checks the proper functionality of register_vim
+ function
+ """
+
+ tmp_file = '/tmp/register-vim.json'
+ open_handler = mock_open.return_value.__enter__.return_value
+ open_calls = [call('vim_file'),
+ call(tmp_file, 'w')]
+
+ mock_json_loads.return_value = {'vim': {'auth_cred':
+ {'password': None},
+ 'auth_url': None}}
+
+ json_dict = {'vim': {'auth_cred': {'password': 'os_auth_cred'},
+ 'auth_url': 'os_auth_url'}}
+
+ patch_dict = {'OS_AUTH_URL': 'os_auth_url',
+ 'OS_PASSWORD': 'os_auth_cred'}
+
+ with patch.dict('os.environ', patch_dict):
+ os_sfc_utils.register_vim(self.mock_tacker_client, 'vim_file')
+ mock_json_loads.assert_called_once_with(open_handler)
+ mock_json_dump.assert_called_once_with(json_dict,
+ mock_open(tmp_file, 'w'))
+ mock_open.assert_has_calls(open_calls, any_order=True)
+ mock_create_vim.assert_called_once_with(self.mock_tacker_client,
+ vim_file=tmp_file)
+
+ @patch('json.dump', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vnf', autospec=True)
+ @patch('os.path.join',
+ autospec=True, return_value='/tmp/param_av_zone.json')
+ def test_create_vnf_in_av_zone(self,
+ mock_path_join,
+ mock_create_vnf,
+ mock_open,
+ mock_json_dump):
+ """
+ Checks the proper fucntionality of test_create_vnf_in_av_zone
+ fucntion
+ """
+
+ data = {'zone': 'av::zone'}
+ param_file = '/tmp/param_av_zone.json'
+ os_sfc_utils.create_vnf_in_av_zone(self.mock_tacker_client,
+ 'vnf_name',
+ 'vnfd_name',
+ 'vim_name',
+ 'param_file',
+ 'av::zone')
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_path_join.assert_called_once_with('/tmp', 'param_av_zone.json')
+ mock_open.assert_called_once_with(param_file, 'w+')
+ mock_json_dump.assert_called_once_with(data, open_handler)
+ mock_create_vnf.assert_called_once_with(self.mock_tacker_client,
+ 'vnf_name',
+ vnfd_name='vnfd_name',
+ vim_name='vim_name',
+ param_file=param_file)
+
+ @patch('json.dump', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('sfc.lib.openstack_utils.create_vnffg', autospec=True)
+ @patch('os.path.join', autospec=True, return_value='/tmp/param_name.json')
+ def test_create_vnffg_with_param_file(self,
+ mock_path_join,
+ mock_create_vnffg,
+ mock_open,
+ mock_json_dump):
+ """
+ Checks the proper functionality of create_vnffg_with_param_file
+ function
+ """
+
+ data = {
+ 'ip_dst_prefix': 'server_ip',
+ 'net_dst_port_id': 'server_port',
+ 'net_src_port_id': 'client_port'
+ }
+ param_file = '/tmp/param_name.json'
+ os_sfc_utils.create_vnffg_with_param_file(self.mock_tacker_client,
+ 'vnffgd_name',
+ 'vnffg_name',
+ 'default_param_file',
+ 'client_port',
+ 'server_port',
+ 'server_ip')
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_path_join.assert_called_once_with('/tmp', 'param_vnffg_name.json')
+ mock_open.assert_called_once_with(param_file, 'w+')
+ mock_json_dump.assert_called_once_with(data, open_handler)
+ mock_create_vnffg.assert_called_once_with(self.mock_tacker_client,
+ vnffgd_name='vnffgd_name',
+ vnffg_name='vnffg_name',
+ param_file=param_file,
+ symmetrical=True)
diff --git a/sfc/unit_tests/unit/lib/test_test_utils.py b/sfc/unit_tests/unit/lib/test_test_utils.py
new file mode 100644
index 00000000..a7d2bfde
--- /dev/null
+++ b/sfc/unit_tests/unit/lib/test_test_utils.py
@@ -0,0 +1,543 @@
+#!/usr/bin/env python
+
+###############################################################################
+# Copyright (c) 2018 Venkata Harshavardhan Reddy Allu and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+import unittest
+
+from mock import Mock
+from mock import call
+from mock import patch
+
+import sfc.lib.test_utils as test_utils
+
+__author__ = "Harshavardhan Reddy <venkataharshavardhan_ven@srmuniv.edu.in>"
+
+
+class SfcTestUtilsTesting(unittest.TestCase):
+
+ def setUp(self):
+ self.ip = '10.10.10.10'
+
+ @patch('subprocess.PIPE', autospec=True)
+ @patch('subprocess.Popen', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ def test_run_cmd(self,
+ mock_log,
+ mock_popen,
+ mock_pipe):
+ """
+ Checks the proper functionality of run_cmd
+ function
+ """
+
+ cmd = 'mock_command'
+ log_calls = [call('Running [mock_command] returns: [0] ' +
+ '- STDOUT: "output" - STDERR: "output"')]
+
+ pipe_mock = Mock()
+ attrs = {'communicate.return_value': ('output', 'error'),
+ 'returncode': 0}
+ pipe_mock.configure_mock(**attrs)
+ mock_popen.return_value = pipe_mock
+ result = test_utils.run_cmd(cmd)
+ self.assertTupleEqual(result, (0, 'output', 'error'))
+ mock_popen.assert_called_once_with(cmd,
+ shell=True,
+ stdout=mock_pipe,
+ stderr=mock_pipe)
+ mock_popen.return_value.communicate.assert_called_once_with()
+ mock_log.debug.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.test_utils.run_cmd', autospec=True)
+ def test_run_cmd_remote(self, mock_run_cmd):
+ """
+ Checks the proper functionality of the run_cmd_remote
+ function
+ """
+
+ cmd = 'mock_command'
+ mock_rc = 'sshpass -p opnfv ssh -q -o UserKnownHostsFile=/dev/null' + \
+ ' -o StrictHostKeyChecking=no -o ConnectTimeout=50 ' + \
+ ' root@10.10.10.10 mock_command'
+ test_utils.run_cmd_remote(self.ip, cmd)
+ mock_run_cmd.assert_called_once_with(mock_rc)
+
+ @patch('shutil.copyfileobj')
+ @patch('urllib.urlopen', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ def test_download_url_with_exception(self,
+ mock_open,
+ mock_urlopen,
+ mock_copyfileobj):
+ """
+ Checks the proper functionality of download_url
+ function when an exception is raised
+ """
+
+ dest_path = 'mocked_/dest_/path'
+ url = 'mocked_/url'
+ mock_urlopen.side_effect = Exception('HttpError')
+ self.assertFalse(test_utils.download_url(url, dest_path))
+ mock_urlopen.assert_called_once_with(url)
+ mock_open.assert_not_called()
+ mock_copyfileobj.assert_not_called()
+
+ @patch('urllib.urlopen', autospec=True)
+ @patch('__builtin__.open', autospec=True)
+ @patch('shutil.copyfileobj', autospec=True)
+ def test_download_url_without_exception(self,
+ mock_copyfileobj,
+ mock_open,
+ mock_urlopen):
+ """
+ Checks the proper functionality of download_url
+ function when any exception isn't raised
+ """
+
+ response = '<mocked_response>'
+ dest_path = 'mocked_/dest_/path'
+ url = 'mocked_/url'
+ open_handler = mock_open.return_value.__enter__.return_value
+ mock_urlopen.return_value = response
+ self.assertTrue(test_utils.download_url(url, dest_path))
+ mock_urlopen.assert_called_once_with(url)
+ mock_open.assert_called_once_with('mocked_/dest_/path/url', 'wb')
+ mock_copyfileobj.assert_called_once_with(response, open_handler)
+
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.download_url', autospec=True)
+ @patch('os.path.isfile', autospec=True, return_value=False)
+ @patch('os.path.dirname', autospec=True, return_value='mocked_')
+ @patch('os.path.basename', autospec=True, return_value='image_path')
+ def test_download_image_file_not_found(self,
+ mock_basename,
+ mock_dirname,
+ mock_isfile,
+ mock_download_url,
+ mock_log):
+ """
+ Checks the proper functionality of download_image
+ function when the image file was not found locally
+ """
+
+ url = 'mocked_/url'
+ image_path = 'mocked_/image_path'
+ log_calls = [call('Downloading image')]
+ test_utils.download_image(url, image_path)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_basename.assert_called_once_with(image_path)
+ mock_dirname.assert_called_once_with(image_path)
+ mock_isfile.assert_called_once_with(image_path)
+ mock_download_url.assert_called_once_with('mocked_/url/image_path',
+ 'mocked_')
+
+ @patch('sfc.lib.test_utils.download_url')
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('os.path.isfile', autospec=True, return_value=True)
+ @patch('os.path.dirname', autospec=True, return_value='mocked_')
+ @patch('os.path.basename', autospec=True, return_value='image_path')
+ def test_download_image_file_found(self,
+ mock_basename,
+ mock_dirname,
+ mock_isfile,
+ mock_log,
+ mock_download_url):
+ """
+ Checks the proper functionality of download_image
+ function when the image file was found locally
+ """
+
+ url = 'mocked_/url'
+ image_path = 'mocked_/image_path'
+ log_calls = [call('Using old image')]
+ test_utils.download_image(url, image_path)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_basename.assert_called_once_with(image_path)
+ mock_dirname.assert_called_once_with(image_path)
+ mock_isfile.assert_called_once_with(image_path)
+ mock_download_url.assert_not_called()
+
+ @patch('sfc.lib.test_utils.run_cmd', autospec=True)
+ def test_ping_gets_error(self, mock_run_cmd):
+ """
+ Checks the proper functionality of ping
+ function when run_cmd returns non-zero returncode
+ """
+
+ mock_cmd = 'ping -c1 -w1 %s' % self.ip
+ mock_run_cmd.return_value = (1, '', '')
+ self.assertFalse(test_utils.ping(self.ip, 1))
+ mock_run_cmd.assert_called_once_with(mock_cmd)
+
+ @patch('sfc.lib.test_utils.run_cmd', autospec=True)
+ def test_ping_gets_no_error(self, mock_run_cmd):
+ """
+ Checks the proper functionality of ping
+ function when run_cmd returns zero as returncode
+ """
+
+ mock_cmd = 'ping -c1 -w1 %s' % self.ip
+ mock_run_cmd.return_value = (0, '', '')
+ self.assertTrue(test_utils.ping(self.ip, 1))
+ mock_run_cmd.assert_called_once_with(mock_cmd)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_http_server_returned_false_failed_to_start(
+ self, mock_run_cmd_remote, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of start_http_server
+ function when http_server is failed to start
+ """
+
+ cmd = "\'python -m SimpleHTTPServer 80 " + \
+ "> /dev/null 2>&1 &\'"
+
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer')]
+ log_calls = [call('Failed to start http server')]
+
+ mock_run_cmd_remote.side_effect = [('', '', ''),
+ ('', '', '')]
+
+ result = test_utils.start_http_server(self.ip, 1)
+ self.assertFalse(result)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_once_with(3)
+ mock_log.error.assert_has_calls(log_calls)
+ mock_log.info.assert_not_called()
+ mock_log.debug.assert_not_called()
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_http_server_returned_false_port_is_down(
+ self, mock_run_cmd_remote, mock_log, mock_sleep):
+ """
+ Checks the proper functionality of start_http_server
+ function when port 80 is down
+ """
+
+ cmd = "\'python -m SimpleHTTPServer 80 " + \
+ "> /dev/null 2>&1 &\'"
+
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer'),
+ call(self.ip, 'netstat -pntl | grep :80')]
+
+ log_calls = [call('output'),
+ call('Port 80 is not up yet')]
+
+ mock_run_cmd_remote.side_effect = [('', '', ''),
+ ('', 'output', ''),
+ ('', '', '')]
+
+ result = test_utils.start_http_server(self.ip, 1)
+ self.assertFalse(result)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_with(5)
+ mock_log.info.assert_has_calls(log_calls[:1])
+ mock_log.debug.assert_has_calls(log_calls[1:])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_http_server_returned_true(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of start_http_server
+ function when the port 80 is up
+ """
+
+ cmd = "\'python -m SimpleHTTPServer 80 " + \
+ "> /dev/null 2>&1 &\'"
+
+ rcr_calls = [call(self.ip, cmd),
+ call(self.ip, 'ps aux | grep SimpleHTTPServer'),
+ call(self.ip, 'netstat -pntl | grep :80')]
+
+ log_calls = [call('output')]
+
+ mock_run_cmd_remote.side_effect = [('', '', ''),
+ ('', 'output', ''),
+ ('', 'output', '')]
+
+ self.assertTrue(test_utils.start_http_server(self.ip, 1))
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_sleep.assert_called_once_with(3)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_log.debug.assert_not_called()
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_vxlan_tool_returned_false(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of start_vxlan_tool
+ function when no output is returned on ps command
+ """
+
+ mock_run_cmd_remote.side_effect = [('', 'output', ''),
+ ('', '', '')]
+
+ mock_rc = 'nohup python /root/vxlan_tool.py --do ' + \
+ 'forward --interface eth0 > /dev/null 2>&1 &'
+
+ rcr_calls = [call(self.ip, mock_rc),
+ call(self.ip, 'ps aux | grep vxlan_tool')]
+
+ log_calls = [call('Failed to start the vxlan tool')]
+
+ self.assertFalse(test_utils.start_vxlan_tool(self.ip))
+ mock_sleep.assert_called_once_with(3)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.error.assert_has_calls(log_calls)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_start_vxlan_tool_returned_output(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of start_vxlan_tool
+ function when output is returned on ps command
+ """
+
+ mock_run_cmd_remote.side_effect = [('', 'output', ''),
+ ('', 'output', '')]
+
+ mock_rc = 'nohup python /root/vxlan_tool.py --do ' + \
+ 'forward --interface eth0 > /dev/null 2>&1 &'
+
+ rcr_calls = [call(self.ip, mock_rc),
+ call(self.ip, 'ps aux | grep vxlan_tool')]
+
+ self.assertIsNotNone(test_utils.start_vxlan_tool(self.ip))
+ mock_sleep.assert_called_once_with(3)
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.error.assert_not_called()
+
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_stop_vxlan_tool(self, mock_run_cmd_remote):
+ """
+ Checks the proper functionality of stop_vxlan_tool
+ function
+ """
+
+ mock_rc = 'pkill -f vxlan_tool.py'
+ test_utils.stop_vxlan_tool(self.ip)
+ mock_run_cmd_remote.assert_called_once_with(self.ip, mock_rc)
+
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_netcat(self,
+ mock_run_cmd_remote,
+ mock_log):
+ """
+ Checks the proper functionality of netcat
+ function
+ """
+
+ dest_ip = 'mock_destination_ip'
+ c = 'nc -z -w 5 %s 1234' % dest_ip
+ log_calls = [call('Running [%s] from [%s] returns [0]' % (c, self.ip))]
+ mock_run_cmd_remote.return_value = (0, '', '')
+ result = test_utils.netcat(self.ip, dest_ip, 1234)
+ self.assertEqual(result, 0)
+ mock_log.info.assert_has_calls(log_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_ssh_blocked_returned_false(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_ssh_blocked
+ function when the returncode is zero
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='22',
+ source_port=None)]
+
+ mock_netcat.return_value = 0
+ self.assertFalse(test_utils.is_ssh_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_ssh_blocked_returned_true(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_ssh_blocked
+ function when the returncode is non-zero integer
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='22',
+ source_port=None)]
+
+ mock_netcat.return_value = 1
+ self.assertTrue(test_utils.is_ssh_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_http_blocked_returned_false(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_http_blocked
+ function when the returncode is zero
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='80',
+ source_port=None)]
+
+ mock_netcat.return_value = 0
+ self.assertFalse(test_utils.is_http_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('sfc.lib.test_utils.netcat', autospec=True)
+ def test_is_http_blocked_returned_true(self, mock_netcat):
+ """
+ Checks the proper funcitonality of is_http_blocked
+ function when the returncode is non-zero integer
+ """
+
+ dest_ip = 'mock_destination_ip'
+ nc_calls = [call('10.10.10.10',
+ 'mock_destination_ip',
+ destination_port='80',
+ source_port=None)]
+
+ mock_netcat.return_value = 1
+ self.assertTrue(test_utils.is_http_blocked(self.ip, dest_ip))
+ mock_netcat.assert_has_calls(nc_calls)
+
+ @patch('time.strftime', autospec=True)
+ @patch('opnfv.utils.ovs_logger.OVSLogger', autospec=True)
+ def test_capture_ovs_logs(self,
+ mock_ovs_log,
+ mock_strftime):
+ """
+ Checks the proper functionality of capture_ovs_logs
+ function
+ """
+
+ log_calls = [call('controller_clients',
+ 'compute_clients',
+ 'error',
+ 'date_time')]
+
+ mock_strftime.return_value = 'date_time'
+ test_utils.capture_ovs_logs(mock_ovs_log,
+ 'controller_clients',
+ 'compute_clients',
+ 'error')
+
+ mock_strftime.assert_called_once_with('%Y%m%d-%H%M%S')
+ mock_ovs_log.dump_ovs_logs.assert_has_calls(log_calls)
+
+ def test_get_ssh_clients(self):
+ """
+ Checks the proper functionality of get_ssh_clients
+ fucntion
+ """
+
+ mock_node_obj_one = Mock()
+ mock_node_obj_two = Mock()
+ mock_node_obj_one.ssh_client = 'ssh_client_one'
+ mock_node_obj_two.ssh_client = 'ssh_client_two'
+ nodes = [mock_node_obj_one, mock_node_obj_two]
+ result = test_utils.get_ssh_clients(nodes)
+ self.assertEqual(result, ['ssh_client_one', 'ssh_client_two'])
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_check_ssh_returned_false(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of check_ssh
+ fucntion when few VNFs can't establish SSH connectivity
+ """
+
+ ips = ["ip_address-1",
+ "ip_address-2"]
+
+ rcr_calls = [call(ips[0], 'exit'),
+ call(ips[1], 'exit')]
+
+ log_calls = [call('Checking SSH connectivity ' +
+ 'to the SFs with ips %s' % str(ips))]
+
+ mock_run_cmd_remote.side_effect = [(1, '', ''),
+ (0, '', '')]
+
+ self.assertFalse(test_utils.check_ssh(ips, retries=1))
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_sleep.assert_called_once_with(3)
+
+ @patch('time.sleep', autospec=True)
+ @patch('sfc.lib.test_utils.logger', autospec=True)
+ @patch('sfc.lib.test_utils.run_cmd_remote', autospec=True)
+ def test_check_ssh_returned_true(self,
+ mock_run_cmd_remote,
+ mock_log,
+ mock_sleep):
+ """
+ Checks the proper functionality of check_ssh
+ fucntion when all VNFs can establish SSH connectivity
+ """
+
+ ips = ["ip_address-1",
+ "ip_address-2"]
+
+ rcr_calls = [call(ips[0], 'exit'),
+ call(ips[1], 'exit')]
+
+ log_calls = [call('Checking SSH connectivity to ' +
+ 'the SFs with ips %s' % str(ips)),
+ call('SSH connectivity to the SFs established')]
+
+ mock_run_cmd_remote.side_effect = [(0, '', ''),
+ (0, '', '')]
+
+ self.assertTrue(test_utils.check_ssh(ips, retries=1))
+ mock_run_cmd_remote.assert_has_calls(rcr_calls)
+ mock_log.info.assert_has_calls(log_calls)
+ mock_sleep.assert_not_called()
+
+ def test_fill_installer_dict(self):
+ """
+ Checks the proper functionality of fill_installer_dict
+ function
+ """
+
+ installer_type = 'mock_installer'
+ installer_yaml_fields = {
+ 'user': 'defaults.installer.mock_installer.user',
+ 'password': 'defaults.installer.mock_installer.password',
+ 'cluster': 'defaults.installer.mock_installer.cluster',
+ 'pkey_file': 'defaults.installer.mock_installer.pkey_file'
+ }
+ result = test_utils.fill_installer_dict(installer_type)
+ self.assertDictEqual(result, installer_yaml_fields)