aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml67
-rw-r--r--requirements.txt6
-rw-r--r--scenarios/os-odl-sfc/role/os-odl-sfc/README59
-rw-r--r--sfc/lib/config.py18
-rw-r--r--sfc/lib/odl_utils.py198
-rw-r--r--sfc/lib/openstack_utils.py125
-rw-r--r--sfc/lib/test_utils.py9
-rw-r--r--sfc/tests/functest/config-pike.yaml84
-rw-r--r--sfc/tests/functest/config.yaml4
-rw-r--r--sfc/tests/functest/sfc_chain_deletion.py7
-rw-r--r--sfc/tests/functest/sfc_one_chain_two_service_functions.py3
-rw-r--r--sfc/tests/functest/sfc_symmetric_chain.py201
-rw-r--r--sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py6
-rw-r--r--sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml21
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml (renamed from sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml-queens)4
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml4
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml (renamed from sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml-queens)4
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml4
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml46
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens46
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml (renamed from sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml-queens)21
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml19
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml (renamed from sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml-queens)22
-rw-r--r--sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml20
24 files changed, 732 insertions, 266 deletions
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644
index 00000000..81a1f9e9
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,67 @@
+---
+project: 'Service Function Chaining (sfc)'
+project_creation_date: 'May 5, 2015'
+project_category: 'Collaborative Development'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_sfc_ptl
+ name: 'Manuel Buil'
+ email: 'manuelbuil87@gmail.com'
+ company: 'gmail.com'
+ timezone: 'Unkown'
+primary_contact: *opnfv_sfc_ptl
+issue_tracking:
+ type: 'jira'
+ url: 'https://jira.opnfv.org/projects/sfc'
+ key: 'sfc'
+mailing_list:
+ type: 'mailman2'
+ url: 'opnfv-tech-discuss@lists.opnfv.org'
+ tag: '[sfc]'
+realtime_discussion:
+ type: irc
+ server: 'freenode.net'
+ channel: '#opnfv-sfc'
+meetings:
+ - type: 'gotomeeting+irc'
+ agenda: # eg: 'https://wiki.opnfv.org/display/'
+ url: # eg: 'https://global.gotomeeting.com/join/819733085'
+ server: 'freenode.net'
+ channel: '#opnfv-meeting'
+ repeats: 'weekly'
+ time: # eg: '16:00 UTC'
+repositories:
+ - 'sfc'
+committers:
+ - <<: *opnfv_sfc_ptl
+ - name: 'Brady Johnson'
+ email: 'brady.allen.johnson@ericsson.com'
+ company: 'ericsson.com'
+ id: 'ebrjohn'
+ - name: 'Reinaldo Penno'
+ email: 'rapenno@gmail.com'
+ company: 'gmail.com'
+ id: 'repenno'
+ - name: 'Sam Hague'
+ email: 'shague@redhat.com'
+ company: 'redhat.com'
+ id: 'shague'
+ - name: 'Vishal Murgai'
+ email: 'vmurgai@cavium.com'
+ company: 'cavium.com'
+ id: 'vmurgai'
+ - name: 'Tim Rozet'
+ email: 'trozet@redhat.com'
+ company: 'redhat.com'
+ id: 'trozet'
+ - name: 'Manuel Buil'
+ email: 'manuelbuil87@gmail.com'
+ company: 'gmail.com'
+ id: 'mbuil'
+ - name: 'Dimitrios Markou'
+ email: 'mardim@intracom-telecom.com'
+ company: 'intracom-telecom.com'
+ id: 'mardim'
+tsc:
+ # yamllint disable rule:line-length
+ approval: ''
+ # yamllint enable rule:line-length
diff --git a/requirements.txt b/requirements.txt
index 93511b97..0a4947bb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,14 +2,14 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
-paramiko>=2.0 # LGPLv2.1+
+paramiko>=2.0.0 # LGPLv2.1+
python-glanceclient>=2.8.0 # Apache-2.0
requests>=2.14.2 # Apache-2.0
xmltodict>=0.10.1 # MIT
python-keystoneclient>=3.8.0 # Apache-2.0
-python-novaclient>=9.0.0 # Apache-2.0
+python-novaclient>=9.1.0 # Apache-2.0
python-tackerclient>=0.8.0 # Apache-2.0
-PyYAML>=3.10.0 # MIT
+PyYAML>=3.12 # MIT
opnfv
snaps
xtesting # Apache-2.0
diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/README b/scenarios/os-odl-sfc/role/os-odl-sfc/README
index 3cb8cb29..a7461ec8 100644
--- a/scenarios/os-odl-sfc/role/os-odl-sfc/README
+++ b/scenarios/os-odl-sfc/role/os-odl-sfc/README
@@ -2,61 +2,10 @@ This is the role which deploys the os-odl-sfc scenarios in xci.
This role currently works with:
-- OpenStack stable/pike
-- ODL Nitrogen RC1
+- OpenStack stable/queens
+- ODL Oxygen
- OVS+NSH patch
- OpenSUSE 42.3 or Ubuntu 16.04
-# PREPARATIONS #
-
-1 - If you don’t have a key already, generate an SSH key in $HOME/.ssh
-ssh-keygen -t rsa
-
-2 - Clone OPNFV releng-xci repository
-git clone https://gerrit.opnfv.org/gerrit/releng-xci.git
-
-3 - Change into directory where the sandbox script is located:
-cd releng-xci/xci
-
-4 - Use a version of releng-xci which we know works
-
-git checkout cf2cd4e4b87a5e392bc4ba49749a349925ba2f86
-
-Then, depending on the scenario which will be run:
-
-## os-odl-sfc-noha ##
-
-To run os-odl-sfc-noha you should export the following variables before
-running xci-deploy.sh. Note that you should change xxxx by the path where
-your releng-xci code is:
-
-export XCI_FLAVOR=noha
-export OPNFV_SCENARIO=os-odl-sfc
-export OPENSTACK_OSA_VERSION=stable/pike
-export VM_MEMORY_SIZE=16384
-export OPENSTACK_BIFROST_VERSION=bd7e99bf7a00e4c9ad7d03d752d7251e3caf8509
-
-## os-odl-sfc-ha ##
-
-To run os-odl-sfc-ha you should export the following variables before
-running xci-deploy.sh:
-
-export XCI_FLAVOR=ha
-export OPNFV_SCENARIO=os-odl-sfc
-export OPENSTACK_OSA_VERSION=stable/pike
-export VM_MEMORY_SIZE=20480
-export OPENSTACK_BIFROST_VERSION=bd7e99bf7a00e4c9ad7d03d752d7251e3caf8509
-
-
-# LIMITATIONS #
-
-1 - It is using a private branch for the os-neutron role. This is because
-there are several patches pending to be upstreamed. This is the branch we
-are using:
-
-https://github.com/manuelbuil/openstack-ansible-os_neutron/tree/testing-ovs-nsh2
-
-We will stop doing this as soon as the patches are merged upstream
-
-2 - It is using a private branch for tacker code because a bug does not
-allow SSL. We will stop doing this as soon as the bug is fixed
+Follow this link:
+https://wiki.opnfv.org/display/sfc/Deploy+OPNFV+SFC+scenarios
diff --git a/sfc/lib/config.py b/sfc/lib/config.py
index 5ee3077a..a4f5d67b 100644
--- a/sfc/lib/config.py
+++ b/sfc/lib/config.py
@@ -30,7 +30,7 @@ class CommonConfig(object):
"""
def __init__(self):
- self.line_length = 30
+ self.line_length = 35
self.functest_repo_path = os.path.dirname(functest.__file__)
self.functest_logging_api = os.path.join(self.functest_repo_path,
"ci", "logging.ini")
@@ -43,7 +43,21 @@ class CommonConfig(object):
self.vnffgd_dir = os.path.join(self.sfc_test_dir, "vnffgd-templates")
self.functest_results_dir = os.path.join(
getattr(config.CONF, 'dir_results'), "odl-sfc")
- self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
+
+ # We need to know the openstack version in order to use one config or
+ # another. For Pike we will use config-pike.yaml. Queens and Rocky
+ # will use config.yaml
+ if 'OPENSTACK_OSA_VERSION' in os.environ:
+ if os.environ['OPENSTACK_OSA_VERSION'] == 'stable/pike':
+ self.config_file = os.path.join(self.sfc_test_dir,
+ "config-pike.yaml")
+ else:
+ self.config_file = os.path.join(self.sfc_test_dir,
+ "config.yaml")
+ else:
+ self.config_file = os.path.join(self.sfc_test_dir,
+ "config-pike.yaml")
+
self.vim_file = os.path.join(self.sfc_test_dir, "register-vim.json")
self.installer_type = env.get('INSTALLER_TYPE')
diff --git a/sfc/lib/odl_utils.py b/sfc/lib/odl_utils.py
index edd52054..e1980423 100644
--- a/sfc/lib/odl_utils.py
+++ b/sfc/lib/odl_utils.py
@@ -12,6 +12,15 @@ import sfc.lib.openstack_utils as os_sfc_utils
logger = logging.getLogger(__name__)
+ODL_MODULE_EXCEPTIONS = {
+ "service-function-path-state": "service-function-path"
+}
+
+ODL_PLURAL_EXCEPTIONS = {
+ "service-function-path-state": "service-function-paths-state"
+}
+
+
def actual_rsps_in_compute(ovs_logger, compute_ssh):
'''
Example flows that match the regex (line wrapped because of flake8)
@@ -20,31 +29,36 @@ def actual_rsps_in_compute(ovs_logger, compute_ssh):
load:0x27->NXM_NX_NSP[0..23],load:0xff->NXM_NX_NSI[],
load:0xffffff->NXM_NX_NSH_C1[],load:0->NXM_NX_NSH_C2[],resubmit(,17)
'''
- match_rsp = re.compile(
- r'.+tp_dst=([0-9]+).+load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\].+')
+ match_rsp = re.compile(r'.+'
+ r'(tp_(?:src|dst)=[0-9]+)'
+ r'.+'
+ r'load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\]'
+ r'.+')
# First line is OFPST_FLOW reply (OF1.3) (xid=0x2):
# This is not a flow so ignore
flows = (ovs_logger.ofctl_dump_flows(compute_ssh, 'br-int', '101')
.strip().split('\n')[1:])
matching_flows = [match_rsp.match(f) for f in flows]
- # group(1) = 22 (tp_dst value) | group(2) = 0xff (rsp value)
- rsps_in_compute = ['{0}_{1}'.format(mf.group(2), mf.group(1))
+ # group(1) = tsp_dst=22 | group(2) = 0xff (rsp value)
+ rsps_in_compute = ['{0}|{1}'.format(mf.group(2), mf.group(1))
for mf in matching_flows if mf is not None]
return rsps_in_compute
-def get_active_rsps(odl_ip, odl_port):
+def get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
'''
Queries operational datastore and returns the RSPs for which we have
- created a classifier (ACL). These are considered as active RSPs
- for which classification rules should exist in the compute nodes
+ created a classifier (ACL) on the specified neutron ports. These are
+ considered as active RSPs on those ports for which classification rules
+ should exist in the compute node on which such ports are located.
- This function enhances the returned dictionary with the
- destination port of the ACL.
+ This function enhances each returned RSP with the openflow matches on
+ the tcp ports that classify traffic into that RSP.
'''
+ port_ids = [port.id for port in neutron_ports]
acls = get_odl_acl_list(odl_ip, odl_port)
- rsps = []
+ rsps = {}
for acl in acls['access-lists']['acl']:
try:
# We get the first ace. ODL creates a new ACL
@@ -55,47 +69,95 @@ def get_active_rsps(odl_ip, odl_port):
acl['acl-name']))
continue
- if not ('netvirt-sfc-acl:rsp-name' in ace['actions']):
+ matches = ace['matches']
+
+ # We are just interested in the destination-port-range matches
+ # that we use throughout the tests
+ if matches.get('destination-port-range') is None:
+ continue
+ tcp_port = matches['destination-port-range']['lower-port']
+
+ # A single ace may classify traffic into a forward path
+ # and optionally into a reverse path if destination port is set
+ src_port = matches.get('netvirt-sfc-acl:source-port-uuid')
+ dst_port = matches.get('netvirt-sfc-acl:destination-port-uuid')
+ forward_of_match = None
+ reverse_of_match = None
+ if src_port in port_ids:
+ forward_of_match = 'tp_dst=' + str(tcp_port)
+ if dst_port in port_ids:
+ # For classification to the reverse path
+ # the openflow match inverts
+ reverse_of_match = 'tp_src=' + str(tcp_port)
+
+ # This ACL does not apply to any of the given ports
+ if not forward_of_match and not reverse_of_match:
continue
- rsp_name = ace['actions']['netvirt-sfc-acl:rsp-name']
- rsp = get_odl_resource_elem(odl_ip,
- odl_port,
- 'rendered-service-path',
- rsp_name,
- datastore='operational')
- '''
- Rsps are returned in the format:
- {
- "rendered-service-path": [
- {
- "name": "Path-red-Path-83",
- "path-id": 83,
- ...
- "rendered-service-path-hop": [
- {
- ...
- "service-function-name": "testVNF1",
- "service-index": 255
- ...
- 'rendered-service-path' Is returned as a list with one
- element (we select by name and the names are unique)
- '''
- rsp_port = rsp['rendered-service-path'][0]
- rsp_port['dst-port'] = (ace['matches']
- ['destination-port-range']['lower-port'])
- rsps.append(rsp_port)
- return rsps
-
-
-def promised_rsps_in_computes(odl_ip, odl_port):
+ actions = ace['actions']
+ rsp_names = get_rsps_from_netvirt_acl_actions(odl_ip,
+ odl_port,
+ actions)
+
+ for rsp_name in rsp_names:
+ rsp = rsps.get(rsp_name)
+ if not rsp:
+ rsp = get_rsp(odl_ip, odl_port, rsp_name)
+ of_matches = rsp.get('of-matches', [])
+ if reverse_of_match and rsp.get('reverse-path'):
+ of_matches.append(reverse_of_match)
+ elif forward_of_match and not rsp.get('reverse-path'):
+ of_matches.append(forward_of_match)
+ rsp['of-matches'] = of_matches
+ rsps[rsp_name] = rsp
+
+ return rsps.values()
+
+
+def get_rsps_from_netvirt_acl_actions(odl_ip, odl_port, netvirt_acl_actions):
+ '''
+ Return the list of RSPs referenced from the netvirt sfc redirect action
+ '''
+ rsp_names = []
+
+ if 'netvirt-sfc-acl:rsp-name' in netvirt_acl_actions:
+ rsp_names.append(netvirt_acl_actions['netvirt-sfc-acl:rsp-name'])
+
+ if 'netvirt-sfc-acl:sfp-name' in netvirt_acl_actions:
+ # If the acl redirect action is a sfp instead of rsp
+ # we need to get the rsps associated to that sfp
+ sfp_name = netvirt_acl_actions['netvirt-sfc-acl:sfp-name']
+ sfp_state = get_odl_resource_elem(odl_ip,
+ odl_port,
+ 'service-function-path-state',
+ sfp_name,
+ datastore='operational')
+ sfp_rsps = sfp_state.get('sfp-rendered-service-path', [])
+ sfp_rsp_names = [rsp['name'] for rsp in sfp_rsps if 'name' in rsp]
+ rsp_names.extend(sfp_rsp_names)
+
+ return rsp_names
+
+
+def get_rsp(odl_ip, odl_port, rsp_name):
+ rsp = get_odl_resource_elem(odl_ip,
+ odl_port,
+ 'rendered-service-path',
+ rsp_name,
+ datastore='operational')
+ return rsp
+
+
+def promised_rsps_in_compute(odl_ip, odl_port, compute_ports):
'''
- Return a list of rsp_port which represents the rsp id and the destination
- port configured in ODL
+ Return a list of rsp|of_match which represents the RSPs and openflow
+ matches on the source/destination port that classify traffic into such
+ RSP as configured in ODL ACLs
'''
- rsps = get_active_rsps(odl_ip, odl_port)
- rsps_in_computes = ['{0}_{1}'.format(hex(rsp['path-id']), rsp['dst-port'])
- for rsp in rsps]
+ rsps = get_active_rsps_on_ports(odl_ip, odl_port, compute_ports)
+ rsps_in_computes = ['{0}|{1}'.format(hex(rsp['path-id']), of_match)
+ for rsp in rsps
+ for of_match in rsp['of-matches']]
return rsps_in_computes
@@ -116,19 +178,20 @@ def timethis(func):
@timethis
def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
- compute_client_name, timeout=200):
+ compute_name, neutron_ports, timeout=200):
'''
Check if the classification rules configured in ODL are implemented in OVS.
We know by experience that this process might take a while
'''
try:
- compute = find_compute(compute_client_name, compute_nodes)
+ compute = find_compute(compute_name, compute_nodes)
# Find the configured rsps in ODL. Its format is nsp_destPort
promised_rsps = []
timeout2 = 10
while not promised_rsps:
- promised_rsps = promised_rsps_in_computes(odl_ip, odl_port)
+ promised_rsps = promised_rsps_in_compute(odl_ip, odl_port,
+ neutron_ports)
timeout2 -= 1
if timeout2 == 0:
os_sfc_utils.get_tacker_items()
@@ -137,7 +200,8 @@ def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
time.sleep(3)
while timeout > 0:
- logger.info("RSPs in ODL Operational DataStore:")
+ logger.info("RSPs in ODL Operational DataStore"
+ "for compute '{}':".format(compute_name))
logger.info("{0}".format(promised_rsps))
# Fetch the rsps implemented in the compute
@@ -181,8 +245,18 @@ def get_odl_ip_port(nodes):
return ip, port
-def pluralize(s):
- return '{0}s'.format(s)
+def pluralize(resource):
+ plural = ODL_PLURAL_EXCEPTIONS.get(resource, None)
+ if not plural:
+ plural = '{0}s'.format(resource)
+ return plural
+
+
+def get_module(resource):
+ module = ODL_MODULE_EXCEPTIONS.get(resource, None)
+ if not module:
+ module = resource
+ return module
def format_odl_resource_list_url(odl_ip, odl_port, resource,
@@ -190,7 +264,8 @@ def format_odl_resource_list_url(odl_ip, odl_port, resource,
odl_pwd='admin'):
return ('http://{usr}:{pwd}@{ip}:{port}/restconf/{ds}/{rsrc}:{rsrcs}'
.format(usr=odl_user, pwd=odl_pwd, ip=odl_ip, port=odl_port,
- ds=datastore, rsrc=resource, rsrcs=pluralize(resource)))
+ ds=datastore, rsrc=get_module(resource),
+ rsrcs=pluralize(resource)))
def format_odl_resource_elem_url(odl_ip, odl_port, resource,
@@ -216,7 +291,12 @@ def get_odl_resource_elem(odl_ip, odl_port, resource,
elem_name, datastore='config'):
url = format_odl_resource_elem_url(
odl_ip, odl_port, resource, elem_name, datastore=datastore)
- return requests.get(url).json()
+ response = requests.get(url).json()
+ # Response is in the format of a dictionary containing
+ # a single value that is an array with the element requested:
+ # {'resource' : [element]}
+ # Return just the element
+ return response.get(resource, [{}])[0]
def delete_odl_resource_elem(odl_ip, odl_port, resource, elem_name,
@@ -302,10 +382,10 @@ def find_compute(compute_client_name, compute_nodes):
return compute
-def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, compute_client_name,
- compute_nodes, retries=20):
+def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
+ compute_client_name, compute_nodes, retries=20):
'''
- First, RSPs are checked in the oprational datastore of ODL. Nothing should
+ First, RSPs are checked in the operational datastore of ODL. Nothing
should exist. As it might take a while for ODL to remove that, some
retries are needed.
@@ -316,7 +396,7 @@ def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, compute_client_name,
# Check RSPs
while retries_counter > 0:
- if (get_active_rsps(odl_ip, odl_port)):
+ if get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
retries_counter -= 1
time.sleep(3)
else:
diff --git a/sfc/lib/openstack_utils.py b/sfc/lib/openstack_utils.py
index 4a1c634d..b7254bf1 100644
--- a/sfc/lib/openstack_utils.py
+++ b/sfc/lib/openstack_utils.py
@@ -157,18 +157,30 @@ class OpenStackSFC:
'''
Return the compute where the client sits
'''
+ return self.get_vm_compute('client')
+
+ def get_compute_server(self):
+ '''
+ Return the compute where the server sits
+ '''
+ return self.get_vm_compute('server')
+
+ def get_vm_compute(self, vm_name):
+ '''
+ Return the compute where the vm sits
+ '''
for creator in self.creators:
# We want to filter the vm creators
if hasattr(creator, 'get_vm_inst'):
- # We want to fetch only the client
- if creator.get_vm_inst().name == 'client':
+ # We want to fetch by vm_name
+ if creator.get_vm_inst().name == vm_name:
return creator.get_vm_inst().compute_host
- raise Exception("There is no client VM!!")
+ raise Exception("There is no VM with name '{}'!!".format(vm_name))
def assign_floating_ip(self, router, vm, vm_creator):
'''
- Assign a floating ips to all the VMs
+ Assign floating ips to all the VMs
'''
name = vm.name + "-float"
port_name = vm.ports[0].name
@@ -180,9 +192,11 @@ class OpenStackSFC:
return ip.ip
# We need this function because tacker VMs cannot be created through SNAPs
- def assign_floating_ip_vnfs(self, router):
+ def assign_floating_ip_vnfs(self, router, ips=None):
'''
- Assign a floating ips to all the SFs
+ Assign floating ips to all the SFs. Optionally specify the
+ subnet IPs that a floating IP should be assigned to, assuming that the
+ SF is connected to a single subnet globally and per port.
'''
stacks = self.heat.stacks.list()
fips = []
@@ -198,8 +212,25 @@ class OpenStackSFC:
servers[0],
self.image_settings,
project_name)
- port_name = servers[0].ports[0].name
+
name = servers[0].name + "-float"
+ if ips is None:
+ port_name = servers[0].ports[0].name
+ else:
+ port_name = None
+ for port in servers[0].ports:
+ if port.ips[0]['ip_address'] in ips:
+ port_name = port.name
+ break
+
+ if port_name is None:
+ err_msg = "The VNF {} does not have any suitable port {} " \
+ "for floating IP assignment".format(
+ name,
+ 'with ip any of ' + str(ips) if ips else '')
+ logger.error(err_msg)
+ raise Exception(err_msg)
+
float_ip = FloatingIpConfig(name=name,
port_name=port_name,
router_name=router.name)
@@ -298,7 +329,7 @@ def list_vnfds(tacker_client, verbose=False):
if not verbose:
vnfds = [vnfd['id'] for vnfd in vnfds['vnfds']]
return vnfds
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnfds(tacker_client)]: %s" % e)
return None
@@ -313,7 +344,7 @@ def create_vnfd(tacker_client, tosca_file=None, vnfd_name=None):
return tacker_client.create_vnfd(
body={"vnfd": {"attributes": {"vnfd": vnfd_body},
"name": vnfd_name}})
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vnfd(tacker_client, '%s')]: %s"
% (tosca_file, e))
return None
@@ -327,7 +358,7 @@ def delete_vnfd(tacker_client, vnfd_id=None, vnfd_name=None):
raise Exception('You need to provide VNFD id or VNFD name')
vnfd = get_vnfd_id(tacker_client, vnfd_name)
return tacker_client.delete_vnfd(vnfd)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnfd(tacker_client, '%s', '%s')]: %s"
% (vnfd_id, vnfd_name, e))
return None
@@ -339,7 +370,7 @@ def list_vnfs(tacker_client, verbose=False):
if not verbose:
vnfs = [vnf['id'] for vnf in vnfs['vnfs']]
return vnfs
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnfs(tacker_client)]: %s" % e)
return None
@@ -374,7 +405,7 @@ def create_vnf(tacker_client, vnf_name, vnfd_id=None,
vnf_body['vnf']['vim_id'] = get_vim_id(tacker_client, vim_name)
return tacker_client.create_vnf(body=vnf_body)
- except Exception, e:
+ except Exception as e:
logger.error("error [create_vnf(tacker_client,"
" '%s', '%s', '%s')]: %s"
% (vnf_name, vnfd_id, vnfd_name, e))
@@ -394,12 +425,28 @@ def get_vnf(tacker_client, vnf_id=None, vnf_name=None):
else:
raise Exception('Could not retrieve ID from name [%s]' % vnf_name)
- except Exception, e:
+ except Exception as e:
logger.error("Could not retrieve VNF [vnf_id=%s, vnf_name=%s] - %s"
% (vnf_id, vnf_name, e))
return None
+def get_vnf_ip(tacker_client, vnf_id=None, vnf_name=None):
+ """
+ Get the management ip of the first VNF component as obtained from the
+ tacker REST API:
+
+ {
+ "vnf": {
+ ...
+ "mgmt_url": "{\"VDU1\": \"192.168.120.3\"}",
+ ...
+ }
+ """
+ vnf = get_vnf(tacker_client, vnf_id, vnf_name)
+ return json.loads(vnf['mgmt_url']).values()[0]
+
+
def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
try:
vnf = get_vnf(tacker_client, vnf_id, vnf_name)
@@ -419,7 +466,7 @@ def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
raise Exception('Timeout when booting vnf %s' % vnf['id'])
return vnf['id']
- except Exception, e:
+ except Exception as e:
logger.error("error [wait_for_vnf(tacker_client, '%s', '%s')]: %s"
% (vnf_id, vnf_name, e))
return None
@@ -433,7 +480,7 @@ def delete_vnf(tacker_client, vnf_id=None, vnf_name=None):
raise Exception('You need to provide a VNF id or name')
vnf = get_vnf_id(tacker_client, vnf_name)
return tacker_client.delete_vnf(vnf)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnf(tacker_client, '%s', '%s')]: %s"
% (vnf_id, vnf_name, e))
return None
@@ -447,7 +494,7 @@ def create_vim(tacker_client, vim_file=None):
vim_body = json.load(vim_fd)
logger.info('VIM template:\n{0}'.format(vim_body))
return tacker_client.create_vim(body=vim_body)
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vim(tacker_client, '%s')]: %s"
% (vim_file, e))
return None
@@ -463,14 +510,14 @@ def create_vnffgd(tacker_client, tosca_file=None, vnffgd_name=None):
return tacker_client.create_vnffgd(
body={'vnffgd': {'name': vnffgd_name,
'template': {'vnffgd': vnffgd_body}}})
- except Exception, e:
+ except Exception as e:
logger.error("Error [create_vnffgd(tacker_client, '%s')]: %s"
% (tosca_file, e))
return None
def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
- vnffgd_name=None, param_file=None):
+ vnffgd_name=None, param_file=None, symmetrical=False):
'''
Creates the vnffg which will provide the RSP and the classifier
'''
@@ -478,7 +525,8 @@ def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
vnffg_body = {
'vnffg': {
'attributes': {},
- 'name': vnffg_name
+ 'name': vnffg_name,
+ 'symmetrical': symmetrical
}
}
if param_file is not None:
@@ -495,7 +543,7 @@ def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
vnffg_body['vnffg']['vnffgd_id'] = get_vnffgd_id(tacker_client,
vnffgd_name)
return tacker_client.create_vnffg(body=vnffg_body)
- except Exception, e:
+ except Exception as e:
logger.error("error [create_vnffg(tacker_client,"
" '%s', '%s', '%s')]: %s"
% (vnffg_name, vnffgd_id, vnffgd_name, e))
@@ -508,7 +556,7 @@ def list_vnffgds(tacker_client, verbose=False):
if not verbose:
vnffgds = [vnffgd['id'] for vnffgd in vnffgds['vnffgds']]
return vnffgds
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnffgds(tacker_client)]: %s" % e)
return None
@@ -519,7 +567,7 @@ def list_vnffgs(tacker_client, verbose=False):
if not verbose:
vnffgs = [vnffg['id'] for vnffg in vnffgs['vnffgs']]
return vnffgs
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vnffgs(tacker_client)]: %s" % e)
return None
@@ -532,7 +580,7 @@ def delete_vnffg(tacker_client, vnffg_id=None, vnffg_name=None):
raise Exception('You need to provide a VNFFG id or name')
vnffg = get_vnffg_id(tacker_client, vnffg_name)
return tacker_client.delete_vnffg(vnffg)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnffg(tacker_client, '%s', '%s')]: %s"
% (vnffg_id, vnffg_name, e))
return None
@@ -546,7 +594,7 @@ def delete_vnffgd(tacker_client, vnffgd_id=None, vnffgd_name=None):
raise Exception('You need to provide VNFFGD id or VNFFGD name')
vnffgd = get_vnffgd_id(tacker_client, vnffgd_name)
return tacker_client.delete_vnffgd(vnffgd)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vnffgd(tacker_client, '%s', '%s')]: %s"
% (vnffgd_id, vnffgd_name, e))
return None
@@ -558,7 +606,7 @@ def list_vims(tacker_client, verbose=False):
if not verbose:
vims = [vim['id'] for vim in vims['vims']]
return vims
- except Exception, e:
+ except Exception as e:
logger.error("Error [list_vims(tacker_client)]: %s" % e)
return None
@@ -571,7 +619,7 @@ def delete_vim(tacker_client, vim_id=None, vim_name=None):
raise Exception('You need to provide VIM id or VIM name')
vim = get_vim_id(tacker_client, vim_name)
return tacker_client.delete_vim(vim)
- except Exception, e:
+ except Exception as e:
logger.error("Error [delete_vim(tacker_client, '%s', '%s')]: %s"
% (vim_id, vim_name, e))
return None
@@ -626,19 +674,28 @@ def create_vnf_in_av_zone(
def create_vnffg_with_param_file(tacker_client, vnffgd_name, vnffg_name,
- default_param_file, neutron_port):
+ default_param_file, client_port,
+ server_port=None, server_ip=None):
param_file = default_param_file
-
- if neutron_port is not None:
+ data = {}
+ if client_port:
+ data['net_src_port_id'] = client_port
+ if server_port:
+ data['net_dst_port_id'] = server_port
+ if server_ip:
+ data['ip_dst_prefix'] = server_ip
+
+ if client_port is not None or server_port is not None:
param_file = os.path.join(
'/tmp',
- 'param_{0}.json'.format(neutron_port))
- data = {
- 'net_src_port_id': neutron_port
- }
+ 'param_{0}.json'.format(vnffg_name))
with open(param_file, 'w+') as f:
json.dump(data, f)
+
+ symmetrical = True if client_port and server_port else False
+
create_vnffg(tacker_client,
vnffgd_name=vnffgd_name,
vnffg_name=vnffg_name,
- param_file=param_file)
+ param_file=param_file,
+ symmetrical=symmetrical)
diff --git a/sfc/lib/test_utils.py b/sfc/lib/test_utils.py
index c495aa11..18c55dc1 100644
--- a/sfc/lib/test_utils.py
+++ b/sfc/lib/test_utils.py
@@ -124,17 +124,20 @@ def start_http_server(ip, iterations_check=10):
return False
-def start_vxlan_tool(remote_ip, interface="eth0", block=None):
+def start_vxlan_tool(remote_ip, interface="eth0", output=None, block=None):
"""
Starts vxlan_tool on a remote host.
vxlan_tool.py converts a regular Service Function into a NSH-aware SF
when the "--do forward" option is used, it decrements the NSI appropiately.
- 'block' parameters allows to specify a port where packets will be dropped.
+ 'output' allows to specify an interface through which to forward if
+ different than the input interface.
+ 'block' parameter allows to specify a port where packets will be dropped.
"""
command = "nohup python /root/vxlan_tool.py"
- options = "{do} {interface} {block_option}".format(
+ options = "{do} {interface} {output_option} {block_option}".format(
do="--do forward",
interface="--interface {}".format(interface),
+ output_option="--output {}".format(output) if output else "",
block_option="--block {}".format(block) if block is not None else "")
output_redirection = "> /dev/null 2>&1"
diff --git a/sfc/tests/functest/config-pike.yaml b/sfc/tests/functest/config-pike.yaml
new file mode 100644
index 00000000..eff95c08
--- /dev/null
+++ b/sfc/tests/functest/config-pike.yaml
@@ -0,0 +1,84 @@
+---
+defaults:
+ # odl-sfc uses custom flavors as per below params
+ flavor: custom
+ ram_size_in_mb: 500
+ disk_size_in_gb: 1
+ vcpu_count: 1
+ image_name: sfc_nsh_fraser
+ installer:
+ fuel:
+ user: root
+ password: r00tme
+ cluster: 1 # Change this to the id of the desired fuel env (1, 2, 3...)
+ apex:
+ user: stack
+ pkey_file: "/root/.ssh/id_rsa"
+ osa:
+ user: root
+ pkey_file: "/root/.ssh/id_rsa"
+ compass:
+ user: root
+ pkey_file: "/root/.ssh/id_rsa"
+ image_format: qcow2
+ image_url: "http://artifacts.opnfv.org/sfc/images/sfc_nsh_fraser.qcow2"
+ vnfd-dir: "vnfd-templates"
+ vnfd-default-params-file: "test-vnfd-default-params.yaml"
+
+
+testcases:
+ sfc_one_chain_two_service_functions:
+ enabled: true
+ order: 0
+ description: "ODL-SFC Testing SFs when they are located on the same chain"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd_red: "test-one-chain-vnfd1.yaml"
+ test_vnfd_blue: "test-one-chain-vnfd2.yaml"
+ test_vnffgd_red: "test-one-chain-vnffgd-pike.yaml"
+
+ sfc_two_chains_SSH_and_HTTP:
+ enabled: false
+ order: 1
+ description: "ODL-SFC tests with two chains and one SF per chain"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd_red: "test-two-chains-vnfd1.yaml"
+ test_vnfd_blue: "test-two-chains-vnfd2.yaml"
+ test_vnffgd_red: "test-two-chains-vnffgd1-pike.yaml"
+ test_vnffgd_blue: "test-two-chains-vnffgd2-pike.yaml"
+
+ sfc_symmetric_chain:
+ enabled: false
+ order: 2
+ description: "Verify the behavior of a symmetric service chain"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd: "test-symmetric-vnfd.yaml"
+ test_vnffgd: "test-symmetric-vnffgd.yaml"
+ source_port: 22222
+
+ sfc_chain_deletion:
+ enabled: false
+ order: 3
+ description: "Verify if chains work correctly after deleting one"
+ net_name: example-net
+ subnet_name: example-subnet
+ router_name: example-router
+ subnet_cidr: "11.0.0.0/24"
+ secgroup_name: "example-sg"
+ secgroup_descr: "Example Security group"
+ test_vnfd_red: "test-one-chain-vnfd1.yaml"
+ test_vnffgd_red: "test-deletion-vnffgd-pike.yaml"
diff --git a/sfc/tests/functest/config.yaml b/sfc/tests/functest/config.yaml
index 3cd1883a..cad3cf72 100644
--- a/sfc/tests/functest/config.yaml
+++ b/sfc/tests/functest/config.yaml
@@ -67,8 +67,8 @@ testcases:
secgroup_name: "example-sg"
secgroup_descr: "Example Security group"
test_vnfd: "test-symmetric-vnfd.yaml"
- allowed_source_port: 22222
- blocked_source_port: 33333
+ test_vnffgd: "test-symmetric-vnffgd.yaml"
+ source_port: 22222
sfc_chain_deletion:
enabled: false
diff --git a/sfc/tests/functest/sfc_chain_deletion.py b/sfc/tests/functest/sfc_chain_deletion.py
index f96eb120..9fde460f 100644
--- a/sfc/tests/functest/sfc_chain_deletion.py
+++ b/sfc/tests/functest/sfc_chain_deletion.py
@@ -163,7 +163,8 @@ def main():
# Start measuring the time it takes to implement the classification rules
t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),))
+ odl_port, openstack_sfc.get_compute_client(),
+ [neutron_port],))
try:
t1.start()
@@ -209,6 +210,7 @@ def main():
os_sfc_utils.delete_vnffgd(tacker_client, vnffgd_name='red')
if not odl_utils.check_vnffg_deletion(odl_ip, odl_port, ovs_logger,
+ [neutron_port],
openstack_sfc.get_compute_client(),
compute_nodes):
logger.debug("The chains were not correctly removed")
@@ -220,7 +222,8 @@ def main():
# Start measuring the time it takes to implement the classification rules
t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),))
+ odl_port, openstack_sfc.get_compute_client(),
+ [neutron_port],))
try:
t2.start()
except Exception as e:
diff --git a/sfc/tests/functest/sfc_one_chain_two_service_functions.py b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
index 58323bf3..07f7814c 100644
--- a/sfc/tests/functest/sfc_one_chain_two_service_functions.py
+++ b/sfc/tests/functest/sfc_one_chain_two_service_functions.py
@@ -187,7 +187,8 @@ def main():
# Start measuring the time it takes to implement the classification rules
t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),))
+ odl_port, openstack_sfc.get_compute_client(),
+ [neutron_port],))
try:
t1.start()
except Exception as e:
diff --git a/sfc/tests/functest/sfc_symmetric_chain.py b/sfc/tests/functest/sfc_symmetric_chain.py
index e3b1d57e..43599d62 100644
--- a/sfc/tests/functest/sfc_symmetric_chain.py
+++ b/sfc/tests/functest/sfc_symmetric_chain.py
@@ -20,7 +20,7 @@ import opnfv.utils.ovs_logger as ovs_log
from opnfv.deployment.factory import Factory as DeploymentFactory
import sfc.lib.config as sfc_config
-import sfc.lib.utils as test_utils
+import sfc.lib.test_utils as test_utils
from sfc.lib.results import Results
import sfc.lib.topology_shuffler as topo_shuffler
@@ -60,7 +60,7 @@ def main():
tacker_client = os_sfc_utils.get_tacker_client()
- _, custom_flavor = openstack_sfc.get_or_create_flavor(
+ custom_flavor = openstack_sfc.create_flavor(
COMMON_CONFIG.flavor,
COMMON_CONFIG.ram_size_in_mb,
COMMON_CONFIG.disk_size_in_gb,
@@ -91,8 +91,9 @@ def main():
sg = openstack_sfc.create_security_group(TESTCASE_CONFIG.secgroup_name)
vnf_name = 'testVNF1'
- # Using seed=0 uses the baseline topology: everything in the same host
- testTopology = topo_shuffler.topology([vnf_name], openstack_sfc, seed=0)
+ topo_seed = topo_shuffler.get_seed()
+ testTopology = topo_shuffler.topology([vnf_name], openstack_sfc,
+ seed=topo_seed)
logger.info('This test is run with the topology {0}'
.format(testTopology['id']))
logger.info('Topology description: {0}'
@@ -100,15 +101,17 @@ def main():
client_instance, client_creator = openstack_sfc.create_instance(
CLIENT, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['client'])
+ av_zone=testTopology[CLIENT])
server_instance, server_creator = openstack_sfc.create_instance(
SERVER, COMMON_CONFIG.flavor, image_creator, network, sg,
- av_zone=testTopology['server'])
+ av_zone=testTopology[SERVER])
server_ip = server_instance.ports[0].ips[0]['ip_address']
logger.info("Server instance received private ip [{}]".format(server_ip))
+ os_sfc_utils.register_vim(tacker_client, vim_file=COMMON_CONFIG.vim_file)
+
tosca_file = os.path.join(
COMMON_CONFIG.sfc_test_dir,
COMMON_CONFIG.vnfd_dir,
@@ -119,11 +122,15 @@ def main():
COMMON_CONFIG.vnfd_dir,
COMMON_CONFIG.vnfd_default_params_file)
- os_sfc_utils.create_vnfd(tacker_client, tosca_file=tosca_file)
- test_utils.create_vnf_in_av_zone(
+ os_sfc_utils.create_vnfd(
+ tacker_client,
+ tosca_file=tosca_file,
+ vnfd_name='test-vnfd1')
+ os_sfc_utils.create_vnf_in_av_zone(
tacker_client,
vnf_name,
'test-vnfd1',
+ 'test-vim',
default_param_file,
testTopology[vnf_name])
@@ -132,37 +139,40 @@ def main():
logger.error('ERROR while booting VNF')
sys.exit(1)
- os_sfc_utils.create_sfc(
+ tosca_file = os.path.join(
+ COMMON_CONFIG.sfc_test_dir,
+ COMMON_CONFIG.vnffgd_dir,
+ TESTCASE_CONFIG.test_vnffgd)
+ os_sfc_utils.create_vnffgd(
+ tacker_client,
+ tosca_file=tosca_file,
+ vnffgd_name='test-vnffgd')
+
+ client_port = openstack_sfc.get_client_port(
+ client_instance,
+ client_creator)
+ server_port = openstack_sfc.get_client_port(
+ server_instance,
+ server_creator)
+
+ server_ip_prefix = server_ip + '/32'
+
+ os_sfc_utils.create_vnffg_with_param_file(
tacker_client,
- sfc_name='red',
- chain_vnf_names=[vnf_name],
- symmetrical=True)
-
- os_sfc_utils.create_sfc_classifier(
- tacker_client, 'red_http', sfc_name='red',
- match={
- 'source_port': 0,
- 'dest_port': 80,
- 'protocol': 6
- })
-
- # FIXME: JIRA SFC-86
- # Tacker does not allow to specify the direction of the chain to be used,
- # only references the SFP (which for symmetric chains results in two RSPs)
- os_sfc_utils.create_sfc_classifier(
- tacker_client, 'red_http_reverse', sfc_name='red',
- match={
- 'source_port': 80,
- 'dest_port': 0,
- 'protocol': 6
- })
-
- logger.info(test_utils.run_cmd('tacker sfc-list'))
- logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))
+ 'test-vnffgd',
+ 'test-vnffg',
+ default_param_file,
+ client_port.id,
+ server_port.id,
+ server_ip_prefix)
# Start measuring the time it takes to implement the classification rules
- t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
- args=(ovs_logger, compute_nodes, odl_ip, odl_port,))
+ t1 = threading.Thread(
+ target=wait_for_classification_rules,
+ args=(ovs_logger, compute_nodes,
+ openstack_sfc.get_compute_server(), server_port,
+ openstack_sfc.get_compute_client(), client_port,
+ odl_ip, odl_port,))
try:
t1.start()
@@ -176,15 +186,19 @@ def main():
server_floating_ip = openstack_sfc.assign_floating_ip(router,
server_instance,
server_creator)
- fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router)
+
+ vnf_ip = os_sfc_utils.get_vnf_ip(tacker_client, vnf_id=vnf_id)
+ fips_sfs = openstack_sfc.assign_floating_ip_vnfs(router, [vnf_ip])
sf_floating_ip = fips_sfs[0]
- fips = [client_floating_ip, server_floating_ip, fips_sfs[0]]
+ fips = [client_floating_ip, server_floating_ip, sf_floating_ip]
for ip in fips:
logger.info("Checking connectivity towards floating IP [%s]" % ip)
if not test_utils.ping(ip, retries=50, retry_timeout=3):
logger.error("Cannot ping floating IP [%s]" % ip)
+ os_sfc_utils.get_tacker_items()
+ odl_utils.get_odl_items(odl_ip, odl_port)
sys.exit(1)
logger.info("Successful ping to floating IP [%s]" % ip)
@@ -197,39 +211,124 @@ def main():
logger.error('\033[91mFailed to start the HTTP server\033[0m')
sys.exit(1)
- blocked_port = TESTCASE_CONFIG.blocked_source_port
- logger.info("Firewall started, blocking traffic port %d" % blocked_port)
- test_utils.start_vxlan_tool(sf_floating_ip, block=blocked_port)
+ logger.info("Starting vxlan_tool on %s" % sf_floating_ip)
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
+ output='eth1')
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
+ output='eth0')
logger.info("Wait for ODL to update the classification rules in OVS")
t1.join()
- allowed_port = TESTCASE_CONFIG.allowed_source_port
- logger.info("Test if HTTP from port %s works" % allowed_port)
- if not test_utils.is_http_blocked(
- client_floating_ip, server_ip, allowed_port):
+ logger.info("Test HTTP")
+ if not test_utils.is_http_blocked(client_floating_ip,
+ server_ip,
+ TESTCASE_CONFIG.source_port):
results.add_to_summary(2, "PASS", "HTTP works")
else:
error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
logger.error(error)
test_utils.capture_ovs_logs(
ovs_logger, controller_clients, compute_clients, error)
+ results.add_to_summary(2, "FAIL", "HTTP blocked")
+
+ logger.info("Changing the vxlan_tool to block HTTP request traffic")
+
+ # Make SF1 block http request traffic
+ test_utils.stop_vxlan_tool(sf_floating_ip)
+ logger.info("Starting HTTP firewall on %s" % sf_floating_ip)
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
+ output='eth1', block="80")
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
+ output='eth0')
+
+ logger.info("Test HTTP again blocking request on SF1")
+ if test_utils.is_http_blocked(client_floating_ip,
+ server_ip,
+ TESTCASE_CONFIG.source_port):
+ results.add_to_summary(2, "PASS", "HTTP uplink blocked")
+ else:
+ error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ ovs_logger, controller_clients, compute_clients, error)
results.add_to_summary(2, "FAIL", "HTTP works")
- logger.info("Test if HTTP from port %s is blocked" % blocked_port)
- if test_utils.is_http_blocked(
- client_floating_ip, server_ip, blocked_port):
- results.add_to_summary(2, "PASS", "HTTP Blocked")
+ logger.info("Changing the vxlan_tool to block HTTP response traffic")
+
+ # Make SF1 block response http traffic
+ test_utils.stop_vxlan_tool(sf_floating_ip)
+ logger.info("Starting HTTP firewall on %s" % sf_floating_ip)
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
+ output='eth1')
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
+ output='eth0',
+ block=TESTCASE_CONFIG.source_port)
+
+ logger.info("Test HTTP again blocking response on SF1")
+ if test_utils.is_http_blocked(client_floating_ip,
+ server_ip,
+ TESTCASE_CONFIG.source_port):
+ results.add_to_summary(2, "PASS", "HTTP downlink blocked")
else:
- error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
+ error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
logger.error(error)
test_utils.capture_ovs_logs(
ovs_logger, controller_clients, compute_clients, error)
- results.add_to_summary(2, "FAIL", "HTTP Blocked")
+ results.add_to_summary(2, "FAIL", "HTTP works")
+
+ logger.info("Changing the vxlan_tool to allow HTTP traffic")
+
+ # Make SF1 allow http traffic
+ test_utils.stop_vxlan_tool(sf_floating_ip)
+ logger.info("Starting HTTP firewall on %s" % sf_floating_ip)
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth0',
+ output='eth1')
+ test_utils.start_vxlan_tool(sf_floating_ip, interface='eth1',
+ output='eth0')
+
+ logger.info("Test HTTP")
+ if not test_utils.is_http_blocked(client_floating_ip, server_ip):
+ results.add_to_summary(2, "PASS", "HTTP restored")
+ else:
+ error = ('\033[91mTEST 4 [FAILED] ==> HTTP BLOCKED\033[0m')
+ logger.error(error)
+ test_utils.capture_ovs_logs(
+ ovs_logger, controller_clients, compute_clients, error)
+ results.add_to_summary(2, "FAIL", "HTTP blocked")
return results.compile_summary(), openstack_sfc.creators
+def wait_for_classification_rules(ovs_logger, compute_nodes,
+ server_compute, server_port,
+ client_compute, client_port,
+ odl_ip, odl_port):
+ if client_compute == server_compute:
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ client_compute,
+ [server_port, client_port])
+ else:
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ server_compute,
+ server_port)
+ odl_utils.wait_for_classification_rules(
+ ovs_logger,
+ compute_nodes,
+ odl_ip,
+ odl_port,
+ client_compute,
+ client_port)
+
+
if __name__ == '__main__':
logging.config.fileConfig(COMMON_CONFIG.functest_logging_api)
main()
diff --git a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
index 5c5abb33..a5133f00 100644
--- a/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
+++ b/sfc/tests/functest/sfc_two_chains_SSH_and_HTTP.py
@@ -185,7 +185,8 @@ def main():
# Start measuring the time it takes to implement the classification rules
t1 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),))
+ odl_port, openstack_sfc.get_compute_client(),
+ [neutron_port],))
try:
t1.start()
@@ -275,7 +276,8 @@ def main():
# Start measuring the time it takes to implement the classification rules
t2 = threading.Thread(target=odl_utils.wait_for_classification_rules,
args=(ovs_logger, compute_nodes, odl_ip,
- odl_port, openstack_sfc.get_compute_client(),))
+ odl_port, openstack_sfc.get_compute_client(),
+ [neutron_port],))
try:
t2.start()
except Exception as e:
diff --git a/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml b/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
index 1f4c11f6..bf175ef7 100644
--- a/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
+++ b/sfc/tests/functest/vnfd-templates/test-symmetric-vnfd.yaml
@@ -15,12 +15,15 @@ topology_template:
nfv_compute:
properties:
num_cpus: 1
- mem_size: 2 GB
- disk_size: 10 GB
+ mem_size: 500 MB
+ disk_size: 1 GB
properties:
- image: sfc_nsh_euphrates
+ image: sfc_nsh_fraser
availability_zone: {get_input: zone}
mgmt_driver: noop
+ config: |
+ param0: key1
+ param1: key2
service_type: firewall
monitoring_policy:
name: ping
@@ -46,6 +49,18 @@ topology_template:
- virtualBinding:
node: VDU1
+ CP2:
+ type: tosca.nodes.nfv.CP.Tacker
+ properties:
+ management: false
+ order: 1
+ anti_spoofing_protection: false
+ requirements:
+ - virtualLink:
+ node: VL1
+ - virtualBinding:
+ node: VDU1
+
VL1:
type: tosca.nodes.nfv.VL
properties:
diff --git a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml-queens b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml
index 28b78ead..3f10e6b8 100644
--- a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml-queens
+++ b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd-pike.yaml
@@ -18,8 +18,8 @@ topology_template:
type: ACL
criteria:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 80-80
- ip_proto: 6
+ - destination_port_range: 80-80
+ - ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
index 3f10e6b8..28b78ead 100644
--- a/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-deletion-vnffgd.yaml
@@ -18,8 +18,8 @@ topology_template:
type: ACL
criteria:
- network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 80-80
- - ip_proto: 6
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml-queens b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml
index 544d6e8e..27c7d545 100644
--- a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml-queens
+++ b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd-pike.yaml
@@ -18,8 +18,8 @@ topology_template:
type: ACL
criteria:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 80-80
- ip_proto: 6
+ - destination_port_range: 80-80
+ - ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
index 27c7d545..544d6e8e 100644
--- a/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-one-chain-vnffgd.yaml
@@ -18,8 +18,8 @@ topology_template:
type: ACL
criteria:
- network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 80-80
- - ip_proto: 6
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
new file mode 100644
index 00000000..6b14df1b
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml
@@ -0,0 +1,46 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case-symmetric
+
+topology_template:
+ description: topology-template-test1
+ inputs:
+ net_src_port_id:
+ type: string
+ net_dst_port_id:
+ type: string
+ ip_dst_prefix:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - network_dst_port_id: {get_input: net_dst_port_id}
+ - ip_dst_prefix: {get_input: ip_dst_prefix}
+ - destination_port_range: 80-80
+ - ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+ - forwarder: test-vnfd1
+ capability: CP2
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 2
+ dependent_virtual_link: [VL1, VL1]
+ connection_point: [CP1, CP2]
+ constituent_vnfs: [test-vnfd1, test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens
new file mode 100644
index 00000000..c40c447c
--- /dev/null
+++ b/sfc/tests/functest/vnffgd-templates/test-symmetric-vnffgd.yaml-queens
@@ -0,0 +1,46 @@
+---
+tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
+description: test-case-symmetric
+
+topology_template:
+ description: topology-template-test1
+ inputs:
+ net_src_port_id:
+ type: string
+ net_dst_port_id:
+ type: string
+ ip_dst_prefix:
+ type: string
+
+ node_templates:
+ Forwarding_path1:
+ type: tosca.nodes.nfv.FP.Tacker
+ description: creates path
+ properties:
+ id: 1
+ policy:
+ type: ACL
+ criteria:
+ - network_src_port_id: {get_input: net_src_port_id}
+ network_dst_port_id: {get_input: net_dst_port_id}
+ ip_dst_prefix: {get_input: ip_dst_prefix}
+ destination_port_range: 80-80
+ ip_proto: 6
+ path:
+ - forwarder: test-vnfd1
+ capability: CP1
+ - forwarder: test-vnfd1
+ capability: CP2
+
+ groups:
+ VNFFG1:
+ type: tosca.groups.nfv.VNFFG
+ description: creates chain
+ properties:
+ vendor: tacker
+ version: 1.0
+ number_of_endpoints: 2
+ dependent_virtual_link: [VL1, VL1]
+ connection_point: [CP1, CP2]
+ constituent_vnfs: [test-vnfd1, test-vnfd1]
+ members: [Forwarding_path1]
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml-queens b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml
index 6123fb50..f0615e4e 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml-queens
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1-pike.yaml
@@ -1,33 +1,26 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-two-chains_HTTP Test
+description: test-case2_HTTP Test
topology_template:
- description: topology-template-test-two-chains
+ description: topology-template-test2
inputs:
net_src_port_id:
type: string
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
+ type: tosca.nodes.nfv.FP.Tacker
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - name: get_ssh
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 22-22
- ip_proto: 6
- - name: get_http
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 80-80
- ip_proto: 6
- path:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - destination_port_range: 22-80
+ - ip_proto: 6
+ path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
index f0615e4e..ceee363b 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd1.yaml
@@ -1,25 +1,32 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_HTTP Test
+description: test-two-chains_HTTP Test
topology_template:
- description: topology-template-test2
+ description: topology-template-test-two-chains
inputs:
net_src_port_id:
type: string
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 1
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
+ - name: get_ssh
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 22-22
+ ip_proto: 6
+ - name: get_http
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd1
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml-queens b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml
index 4d0763aa..ec18c9d6 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml-queens
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2-pike.yaml
@@ -1,33 +1,27 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-two-chains_SSH Test
+description: test-case2_SSH Test
topology_template:
- description: topology-template-test-two-chains
+ description: topology-template-test2
inputs:
net_src_port_id:
type: string
+
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.TackerV2
+ type: tosca.nodes.nfv.FP.Tacker
description: creates path
properties:
id: 2
policy:
type: ACL
criteria:
- - name: get_ssh
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 22-22
- ip_proto: 6
- - name: get_http
- classifier:
- network_src_port_id: {get_input: net_src_port_id}
- destination_port_range: 80-80
- ip_proto: 6
- path:
+ - network_src_port_id: {get_input: net_src_port_id}
+ - destination_port_range: 22-80
+ - ip_proto: 6
+ path:
- forwarder: test-vnfd2
capability: CP1
diff --git a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
index ec18c9d6..15739cc7 100644
--- a/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
+++ b/sfc/tests/functest/vnffgd-templates/test-two-chains-vnffgd2.yaml
@@ -1,26 +1,32 @@
---
tosca_definitions_version: tosca_simple_profile_for_nfv_1_0_0
-description: test-case2_SSH Test
+description: test-two-chains_SSH Test
topology_template:
- description: topology-template-test2
+ description: topology-template-test-two-chains
inputs:
net_src_port_id:
type: string
-
node_templates:
Forwarding_path1:
- type: tosca.nodes.nfv.FP.Tacker
+ type: tosca.nodes.nfv.FP.TackerV2
description: creates path
properties:
id: 2
policy:
type: ACL
criteria:
- - network_src_port_id: {get_input: net_src_port_id}
- - destination_port_range: 22-80
- - ip_proto: 6
+ - name: get_ssh
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 22-22
+ ip_proto: 6
+ - name: get_http
+ classifier:
+ network_src_port_id: {get_input: net_src_port_id}
+ destination_port_range: 80-80
+ ip_proto: 6
path:
- forwarder: test-vnfd2
capability: CP1