aboutsummaryrefslogtreecommitdiffstats
path: root/sfc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'sfc/lib')
-rw-r--r--sfc/lib/cleanup.py83
-rw-r--r--sfc/lib/config.py111
-rw-r--r--sfc/lib/odl_utils.py93
-rw-r--r--sfc/lib/openstack_utils.py517
-rw-r--r--sfc/lib/results.py1
-rw-r--r--sfc/lib/test_utils.py18
6 files changed, 594 insertions, 229 deletions
diff --git a/sfc/lib/cleanup.py b/sfc/lib/cleanup.py
index 7a2f4053..e97034ad 100644
--- a/sfc/lib/cleanup.py
+++ b/sfc/lib/cleanup.py
@@ -1,8 +1,9 @@
+import logging
import sys
import time
-import logging
import sfc.lib.openstack_utils as os_sfc_utils
import sfc.lib.odl_utils as odl_utils
+from openstack import connection
logger = logging.getLogger(__name__)
@@ -73,15 +74,6 @@ def delete_vims():
os_sfc_utils.delete_vim(t, vim_id=vim)
-# Creators is a list full of SNAPs objects
-def delete_openstack_objects(creators):
- for creator in reversed(creators):
- try:
- creator.clean()
- except Exception as e:
- logger.error('Unexpected error cleaning - %s', e)
-
-
# Networking-odl generates a new security group when creating a router
# which is not tracked by SNAPs
def delete_untracked_security_groups():
@@ -91,32 +83,79 @@ def delete_untracked_security_groups():
def cleanup_odl(odl_ip, odl_port):
delete_odl_resources(odl_ip, odl_port, 'service-function-forwarder')
- delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
- delete_odl_resources(odl_ip, odl_port, 'service-function-path')
- delete_odl_resources(odl_ip, odl_port, 'service-function')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-chain')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function-path')
+ # delete_odl_resources(odl_ip, odl_port, 'service-function')
delete_odl_ietf_access_lists(odl_ip, odl_port)
-def cleanup(creators, odl_ip=None, odl_port=None):
+def cleanup_nsfc_objects():
+ '''
+ cleanup the networking-sfc objects created for the test
+ '''
+ # TODO Add n-sfc to snaps so that it can be removed through
+ # delete_openstack_objects
+ openstack_sfc = os_sfc_utils.OpenStackSFC()
+ openstack_sfc.delete_chain()
+ openstack_sfc.delete_port_groups()
+
+
+def cleanup_tacker_objects():
+ '''
+ cleanup the tacker objects created for the test
+ '''
delete_vnffgs()
delete_vnffgds()
delete_vnfs()
time.sleep(20)
delete_vnfds()
delete_vims()
- delete_openstack_objects(creators)
+
+
+def cleanup_mano_objects(mano):
+ '''
+ Cleanup the mano objects (chains, classifiers, etc)
+ '''
+ if mano == 'tacker':
+ cleanup_tacker_objects()
+ elif mano == 'no-mano':
+ cleanup_nsfc_objects()
+
+
+def delete_openstack_objects(testcase_config, creators):
+ conn = connection.from_config(verify=False)
+ for creator in creators:
+ if creator.name == testcase_config.subnet_name:
+ subnet_obj = creator
+
+ for creator in reversed(creators):
+ try:
+ logger.info("Deleting " + creator.name)
+ if creator.name == testcase_config.router_name:
+ logger.info("Removing subnet from router")
+ conn.network.remove_interface_from_router(
+ creator.id, subnet_obj.id)
+ time.sleep(2)
+ logger.info("Deleting router")
+ conn.network.delete_router(creator)
+ else:
+ creator.delete(conn.session)
+ time.sleep(2)
+ creators.remove(creator)
+ except Exception as e:
+ logger.error('Unexpected error cleaning - %s', e)
+
+
+def cleanup(testcase_config, creators, mano, odl_ip=None, odl_port=None):
+ cleanup_mano_objects(mano)
+ delete_openstack_objects(testcase_config, creators)
delete_untracked_security_groups()
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
-def cleanup_from_bash(odl_ip=None, odl_port=None):
- delete_vnffgs()
- delete_vnffgds()
- delete_vnfs()
- time.sleep(20)
- delete_vnfds()
- delete_vims()
+def cleanup_from_bash(odl_ip=None, odl_port=None, mano='no-mano'):
+ cleanup_mano_objects(mano=mano)
if odl_ip is not None and odl_port is not None:
cleanup_odl(odl_ip, odl_port)
diff --git a/sfc/lib/config.py b/sfc/lib/config.py
index a4f5d67b..bf9864a5 100644
--- a/sfc/lib/config.py
+++ b/sfc/lib/config.py
@@ -8,19 +8,19 @@
# http://www.apache.org/licenses/LICENSE-2.0
#
+
import logging
import os
+import sfc
import yaml
import functest
-
-from functest.utils import config
-from functest.utils import env
import functest.utils.functest_utils as ft_utils
-
-import sfc
import sfc.lib.test_utils as test_utils
+from functest.utils import config
+from functest.utils import env
+
logger = logging.getLogger(__name__)
@@ -43,50 +43,58 @@ class CommonConfig(object):
self.vnffgd_dir = os.path.join(self.sfc_test_dir, "vnffgd-templates")
self.functest_results_dir = os.path.join(
getattr(config.CONF, 'dir_results'), "odl-sfc")
-
- # We need to know the openstack version in order to use one config or
- # another. For Pike we will use config-pike.yaml. Queens and Rocky
- # will use config.yaml
- if 'OPENSTACK_OSA_VERSION' in os.environ:
- if os.environ['OPENSTACK_OSA_VERSION'] == 'stable/pike':
- self.config_file = os.path.join(self.sfc_test_dir,
- "config-pike.yaml")
- else:
- self.config_file = os.path.join(self.sfc_test_dir,
- "config.yaml")
- else:
- self.config_file = os.path.join(self.sfc_test_dir,
- "config-pike.yaml")
-
+ self.config_file = os.path.join(self.sfc_test_dir, "config.yaml")
self.vim_file = os.path.join(self.sfc_test_dir, "register-vim.json")
- self.installer_type = env.get('INSTALLER_TYPE')
-
- self.installer_fields = test_utils.fill_installer_dict(
- self.installer_type)
-
- self.installer_ip = env.get('INSTALLER_IP')
+ pod_yaml_exists = os.path.isfile(self.sfc_test_dir + "/pod.yaml")
- self.installer_user = ft_utils.get_parameter_from_yaml(
- self.installer_fields['user'], self.config_file)
+ if pod_yaml_exists:
+ self.pod_file = os.path.join(self.sfc_test_dir, "pod.yaml")
+ self.nodes_pod = ft_utils.get_parameter_from_yaml(
+ "nodes", self.pod_file)
+ self.host_ip = self.nodes_pod[0]['ip']
+ self.host_user = self.nodes_pod[0]['user']
- try:
- self.installer_password = ft_utils.get_parameter_from_yaml(
- self.installer_fields['password'], self.config_file)
- except Exception:
- self.installer_password = None
-
- try:
- self.installer_key_file = ft_utils.get_parameter_from_yaml(
- self.installer_fields['pkey_file'], self.config_file)
- except Exception:
- self.installer_key_file = None
-
- try:
- self.installer_cluster = ft_utils.get_parameter_from_yaml(
- self.installer_fields['cluster'], self.config_file)
- except Exception:
+ self.installer_type = 'configByUser'
+ self.installer_ip = self.host_ip
+ self.installer_user = self.host_user
self.installer_cluster = None
+ try:
+ self.installer_password = self.host_ip[0]['password']
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = self.host_ip[0]['key_filename']
+ except Exception:
+ self.installer_key_file = None
+ else:
+ self.nodes_pod = None
+ self.host_ip = None
+ self.installer_type = env.get('INSTALLER_TYPE')
+ self.installer_fields = test_utils.fill_installer_dict(
+ self.installer_type)
+ self.installer_ip = env.get('INSTALLER_IP')
+ self.installer_user = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['user'], self.config_file)
+
+ try:
+ self.installer_password = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['password'], self.config_file)
+ except Exception:
+ self.installer_password = None
+
+ try:
+ self.installer_key_file = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['pkey_file'], self.config_file)
+ except Exception:
+ self.installer_key_file = None
+
+ try:
+ self.installer_cluster = ft_utils.get_parameter_from_yaml(
+ self.installer_fields['cluster'], self.config_file)
+ except Exception:
+ self.installer_cluster = None
self.flavor = ft_utils.get_parameter_from_yaml(
"defaults.flavor", self.config_file)
@@ -102,6 +110,21 @@ class CommonConfig(object):
"defaults.image_format", self.config_file)
self.image_url = ft_utils.get_parameter_from_yaml(
"defaults.image_url", self.config_file)
+ self.mano_component = ft_utils.get_parameter_from_yaml(
+ "defaults.mano_component", self.config_file)
+ try:
+ self.vnf_image_name = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_name", self.config_file)
+ self.vnf_image_url = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_url", self.config_file)
+ self.vnf_image_format = ft_utils.get_parameter_from_yaml(
+ "defaults.vnf_image_format", self.config_file)
+ except ValueError:
+ # If the parameter does not exist we use the default
+ self.vnf_image_name = self.image_name
+ self.vnf_image_url = self.image_url
+ self.vnf_image_format = self.image_format
+
self.dir_functest_data = getattr(config.CONF, 'dir_functest_data')
diff --git a/sfc/lib/odl_utils.py b/sfc/lib/odl_utils.py
index e1980423..2c657a13 100644
--- a/sfc/lib/odl_utils.py
+++ b/sfc/lib/odl_utils.py
@@ -1,16 +1,17 @@
import ConfigParser
+import functools
+import json
+import logging
import os
+import re
import requests
import time
-import json
-import re
-import logging
-import functools
-import sfc.lib.openstack_utils as os_sfc_utils
+import sfc.lib.openstack_utils as os_sfc_utils
logger = logging.getLogger(__name__)
-
+odl_username = 'admin'
+odl_password = 'admin'
ODL_MODULE_EXCEPTIONS = {
"service-function-path-state": "service-function-path"
@@ -24,15 +25,16 @@ ODL_PLURAL_EXCEPTIONS = {
def actual_rsps_in_compute(ovs_logger, compute_ssh):
'''
Example flows that match the regex (line wrapped because of flake8)
- table=101, n_packets=7, n_bytes=595, priority=500,tcp,in_port=2,tp_dst=80
- actions=push_nsh,load:0x1->NXM_NX_NSH_MDTYPE[],load:0x3->NXM_NX_NSH_NP[],
- load:0x27->NXM_NX_NSP[0..23],load:0xff->NXM_NX_NSI[],
- load:0xffffff->NXM_NX_NSH_C1[],load:0->NXM_NX_NSH_C2[],resubmit(,17)
+ cookie=0xf005ba1100000002, duration=5.843s, table=101, n_packets=0,
+ n_bytes=0, priority=500,tcp,in_port=48,tp_dst=80
+ actions=load:0x169->NXM_NX_REG2[8..31],load:0xff->NXM_NX_REG2[0..7],
+ resubmit(,17)', u' cookie=0xf005ba1100000002, duration=5.825s, table=101,
+ n_packets=2, n_bytes=684, priority=10 actions=resubmit(,17)
'''
match_rsp = re.compile(r'.+'
r'(tp_(?:src|dst)=[0-9]+)'
r'.+'
- r'load:(0x[0-9a-f]+)->NXM_NX_NSP\[0\.\.23\]'
+ r'actions=load:(0x[0-9a-f]+)->NXM_NX_REG2'
r'.+')
# First line is OFPST_FLOW reply (OF1.3) (xid=0x2):
# This is not a flow so ignore
@@ -64,7 +66,7 @@ def get_active_rsps_on_ports(odl_ip, odl_port, neutron_ports):
# We get the first ace. ODL creates a new ACL
# with one ace for each classifier
ace = acl['access-list-entries']['ace'][0]
- except:
+ except Exception:
logger.warn('ACL {0} does not have an ACE'.format(
acl['acl-name']))
continue
@@ -200,6 +202,11 @@ def wait_for_classification_rules(ovs_logger, compute_nodes, odl_ip, odl_port,
time.sleep(3)
while timeout > 0:
+ # When swapping classifiers promised_rsps update takes time to
+ # get updated
+ # TODO: Need to optimise this code
+ promised_rsps = promised_rsps_in_compute(odl_ip, odl_port,
+ neutron_ports)
logger.info("RSPs in ODL Operational DataStore"
"for compute '{}':".format(compute_name))
logger.info("{0}".format(promised_rsps))
@@ -245,6 +252,36 @@ def get_odl_ip_port(nodes):
return ip, port
+def get_odl_ip_port_no_installer(nodes_pod):
+ node_index = 0
+ for n in nodes_pod:
+ if n['role'] == 'Controller':
+ break
+ node_index += 1
+ remote_ml2_conf_etc = '/etc/neutron/plugins/ml2/ml2_conf.ini'
+ os.system('scp {0}@{1}:{2} .'.
+ format(nodes_pod[node_index]['user'],
+ nodes_pod[node_index]['ip'],
+ remote_ml2_conf_etc))
+ file = open('ml2_conf.ini', 'r')
+ string = re.findall(r'[0-9]+(?:\.[0-9]+){3}\:[0-9]+', file.read())
+ file.close()
+ ip = string[0].split(':')[0]
+ port = string[0].split(':')[1]
+ return ip, port
+
+
+def get_odl_username_password():
+ local_ml2_conf_file = os.path.join(os.getcwd(), 'ml2_conf.ini')
+ con_par = ConfigParser.RawConfigParser()
+ con_par.read(local_ml2_conf_file)
+ global odl_username
+ odl_username = con_par.get('ml2_odl', 'username')
+ global odl_password
+ odl_password = con_par.get('ml2_odl', 'password')
+ return odl_username, odl_password
+
+
def pluralize(resource):
plural = ODL_PLURAL_EXCEPTIONS.get(resource, None)
if not plural:
@@ -260,11 +297,11 @@ def get_module(resource):
def format_odl_resource_list_url(odl_ip, odl_port, resource,
- datastore='config', odl_user='admin',
- odl_pwd='admin'):
+ datastore='config', odl_user=odl_username,
+ odl_pwd=odl_password):
return ('http://{usr}:{pwd}@{ip}:{port}/restconf/{ds}/{rsrc}:{rsrcs}'
- .format(usr=odl_user, pwd=odl_pwd, ip=odl_ip, port=odl_port,
- ds=datastore, rsrc=get_module(resource),
+ .format(usr=odl_username, pwd=odl_password, ip=odl_ip,
+ port=odl_port, ds=datastore, rsrc=get_module(resource),
rsrcs=pluralize(resource)))
@@ -314,10 +351,10 @@ def odl_acl_types_names(acl_json):
def format_odl_acl_list_url(odl_ip, odl_port,
- odl_user='admin', odl_pwd='admin'):
+ odl_user=odl_username, odl_pwd=odl_password):
acl_list_url = ('http://{usr}:{pwd}@{ip}:{port}/restconf/config/'
'ietf-access-control-list:access-lists'
- .format(usr=odl_user, pwd=odl_pwd,
+ .format(usr=odl_username, pwd=odl_password,
ip=odl_ip, port=odl_port))
return acl_list_url
@@ -410,7 +447,8 @@ def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
try:
compute = find_compute(compute_client_name, compute_nodes)
except Exception as e:
- logger.debug("There was an error getting the compute: e" % e)
+ logger.debug("There was an error getting the compute: %s" % e)
+ return False
retries_counter = retries
@@ -427,20 +465,3 @@ def check_vnffg_deletion(odl_ip, odl_port, ovs_logger, neutron_ports,
return False
return True
-
-
-def create_chain(tacker_client, default_param_file, neutron_port,
- COMMON_CONFIG, TESTCASE_CONFIG):
-
- tosca_file = os.path.join(COMMON_CONFIG.sfc_test_dir,
- COMMON_CONFIG.vnffgd_dir,
- TESTCASE_CONFIG.test_vnffgd_red)
-
- os_sfc_utils.create_vnffgd(tacker_client,
- tosca_file=tosca_file,
- vnffgd_name='red')
-
- os_sfc_utils.create_vnffg_with_param_file(tacker_client, 'red',
- 'red_http',
- default_param_file,
- neutron_port.id)
diff --git a/sfc/lib/openstack_utils.py b/sfc/lib/openstack_utils.py
index 0b343f37..c46ff123 100644
--- a/sfc/lib/openstack_utils.py
+++ b/sfc/lib/openstack_utils.py
@@ -1,44 +1,31 @@
-import logging
import os
import time
import json
+import logging
import yaml
+import urllib2
+
+
from tackerclient.tacker import client as tackerclient
from functest.utils import constants
from functest.utils import env
-
from snaps.openstack.tests import openstack_tests
-
-from snaps.openstack.create_image import OpenStackImage
-from snaps.config.image import ImageConfig
-
-from snaps.config.flavor import FlavorConfig
-from snaps.openstack.create_flavor import OpenStackFlavor
-
-from snaps.config.network import NetworkConfig, SubnetConfig, PortConfig
-from snaps.openstack.create_network import OpenStackNetwork
-
-from snaps.config.router import RouterConfig
-from snaps.openstack.create_router import OpenStackRouter
-
-from snaps.config.security_group import (
- Protocol, SecurityGroupRuleConfig, Direction, SecurityGroupConfig)
-
-from snaps.openstack.create_security_group import OpenStackSecurityGroup
-
+from snaps.config.vm_inst import FloatingIpConfig
import snaps.openstack.create_instance as cr_inst
-from snaps.config.vm_inst import VmInstanceConfig, FloatingIpConfig
-
from snaps.openstack.utils import (
nova_utils, neutron_utils, heat_utils, keystone_utils)
+from openstack import connection
+from neutronclient.neutron import client as neutronclient
logger = logging.getLogger(__name__)
DEFAULT_TACKER_API_VERSION = '1.0'
+DEFAULT_API_VERSION = '2'
class OpenStackSFC:
def __init__(self):
+ self.conn = self.get_os_connection()
self.os_creds = openstack_tests.get_credentials(
os_env_file=constants.ENV_FILE)
self.creators = []
@@ -46,113 +33,191 @@ class OpenStackSFC:
self.neutron = neutron_utils.neutron_client(self.os_creds)
self.heat = heat_utils.heat_client(self.os_creds)
self.keystone = keystone_utils.keystone_client(self.os_creds)
+ self.neutron_client = neutronclient.\
+ Client(self.get_neutron_client_version(),
+ session=self.conn.session)
+
+ def get_os_connection(self):
+ return connection.from_config(verify=False)
+
+ def get_neutron_client_version(self):
+ api_version = os.getenv('OS_NETWORK_API_VERSION')
+ if api_version is not None:
+ logger.info("OS_NETWORK_API_VERSION is %s" % api_version)
+ return api_version
+ return DEFAULT_API_VERSION
def register_glance_image(self, name, url, img_format, public):
- image_settings = ImageConfig(name=name, img_format=img_format, url=url,
- public=public, image_user='admin')
+ logger.info("Registering the image...")
+ image = self.conn.image.find_image(name)
+ if image:
+ logger.info("Image %s already exists." % image.name)
+ else:
+ if 'http' in url:
+ logger.info("Downloading image")
+ response = urllib2.urlopen(url)
+ image_data = response.read()
+ else:
+ with open(url) as f:
+ image_data = f.read()
- # TODO Remove this when tacker is part of SNAPS
- self.image_settings = image_settings
+ image_settings = {'name': name,
+ 'disk_format': img_format,
+ 'data': image_data,
+ 'is_public': public,
+ 'container_format': 'bare'}
+ image = self.conn.image.upload_image(**image_settings)
+ self.creators.append(image)
+ logger.info("Image created")
- image_creator = OpenStackImage(self.os_creds, image_settings)
- image_creator.create()
+ self.image_settings = image_settings
- self.creators.append(image_creator)
- return image_creator
+ return image
def create_flavor(self, name, ram, disk, vcpus):
- flavor_settings = FlavorConfig(name=name, ram=ram, disk=disk,
- vcpus=vcpus)
- flavor_creator = OpenStackFlavor(self.os_creds, flavor_settings)
- flavor = flavor_creator.create()
+ logger.info("Creating flavor...")
+ flavor_settings = {"name": name, "ram": ram, "disk": disk,
+ "vcpus": vcpus}
+
+ flavor = self.conn.compute.create_flavor(**flavor_settings)
- self.creators.append(flavor_creator)
+ self.creators.append(flavor)
return flavor
def create_network_infrastructure(self, net_name, subnet_name, subnet_cidr,
router_name):
+ logger.info("Creating Networks...")
# Network and subnet
- subnet_settings = SubnetConfig(name=subnet_name, cidr=subnet_cidr)
- network_settings = NetworkConfig(name=net_name,
- subnet_settings=[subnet_settings])
- network_creator = OpenStackNetwork(self.os_creds, network_settings)
- network = network_creator.create()
+ network = self.conn.network.create_network(name=net_name)
+ self.creators.append(network)
- self.creators.append(network_creator)
+ subnet_settings = {"name": subnet_name, "cidr": subnet_cidr,
+ "network_id": network.id, 'ip_version': '4'}
+ subnet = self.conn.network.create_subnet(**subnet_settings)
+ self.creators.append(subnet)
# Router
ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+ router_dict = {'network_id': ext_net.id}
- router_settings = RouterConfig(name=router_name,
- external_gateway=ext_network_name,
- internal_subnets=[subnet_name])
+ logger.info("Creating Router...")
+ router = self.conn.network.create_router(name=router_name)
- router_creator = OpenStackRouter(self.os_creds, router_settings)
- router = router_creator.create()
+ self.conn.network.add_interface_to_router(router.id,
+ subnet_id=subnet.id)
- self.creators.append(router_creator)
+ self.conn.network.update_router(router.id,
+ external_gateway_info=router_dict)
+ router_obj = self.conn.network.get_router(router.id)
+ self.creators.append(router_obj)
- return network, router
+ return network, router_obj
def create_security_group(self, sec_grp_name):
- rule_ping = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.icmp)
-
- rule_ssh = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=22,
- port_range_max=22)
-
- rule_http = SecurityGroupRuleConfig(sec_grp_name=sec_grp_name,
- direction=Direction.ingress,
- protocol=Protocol.tcp,
- port_range_min=80,
- port_range_max=80)
+ logger.info("Creating the security groups...")
+ sec_group = self.conn.network.create_security_group(name=sec_grp_name)
- rules = [rule_ping, rule_ssh, rule_http]
+ rule_ping = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "icmp"}
+
+ rule_ssh = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 22,
+ "port_range_max": 22}
- secgroup_settings = SecurityGroupConfig(name=sec_grp_name,
- rule_settings=rules)
+ rule_http = {"security_group_id": sec_group.id,
+ "direction": "ingress",
+ "protocol": "tcp",
+ "port_range_min": 80,
+ "port_range_max": 80}
- sec_group_creator = OpenStackSecurityGroup(self.os_creds,
- secgroup_settings)
- sec_group = sec_group_creator.create()
+ rules = [rule_ping, rule_ssh, rule_http]
+
+ for rule in rules:
+ self.conn.network.create_security_group_rule(**rule)
- self.creators.append(sec_group_creator)
+ self.creators.append(sec_group)
return sec_group
- def create_instance(self, vm_name, flavor_name, image_creator, network,
- secgrp, av_zone):
+ def create_instance(self, vm_name, flavor, image, network,
+ sec_group, av_zone, ports, port_security=True):
+ logger.info("Creating Key Pair {}...".format(vm_name))
+
+ keypair = self.conn.compute.\
+ create_keypair(name="{}_keypair".format(vm_name))
+ self.creators.append(keypair)
+ flavor_obj = self.conn.compute.find_flavor(flavor)
+
+ logger.info("Creating Port {}...".format(ports))
+ port_list = []
+ for port in ports:
+ if port_security:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id, security_group_ids=[sec_group.id])
+ else:
+ port_obj = self.conn.network.create_port(
+ name=port, is_port_security_enabled=port_security,
+ network_id=network.id)
+ port_list.append(port_obj)
+ self.creators.append(port_obj)
+ logger.info("Creating the instance {}...".format(vm_name))
+
+ if len(port_list) > 1:
+ network_list = [{"port": port_list[0].id},
+ {"port": port_list[1].id}]
+ else:
+ network_list = [{"port": port_obj.id}]
- port_settings = PortConfig(name=vm_name + '-port',
- network_name=network.name)
+ instance = self.conn.compute.create_server(name=vm_name,
+ image_id=image.id,
+ flavor_id=flavor_obj.id,
+ networks=network_list,
+ key_name=keypair.name,
+ availability_zone=av_zone)
- instance_settings = VmInstanceConfig(
- name=vm_name, flavor=flavor_name,
- security_group_names=str(secgrp.name),
- port_settings=[port_settings],
- availability_zone=av_zone)
+ logger.info("Waiting for {} to become Active".format(instance.name))
+ self.conn.compute.wait_for_server(instance)
+ logger.info("{} is active".format(instance.name))
- instance_creator = cr_inst.OpenStackVmInstance(
- self.os_creds,
- instance_settings,
- image_creator.image_settings)
+ self.creators.append(instance)
- instance = instance_creator.create()
+ return instance, port_list
- self.creators.append(instance_creator)
- return instance, instance_creator
+ def get_instance(self, instance_id):
+ """
+ Return a dictionary of metadata for a server instance
+ """
+ return self.conn.compute.get_server_metadata(instance_id)
def get_av_zones(self):
'''
Return the availability zone each host belongs to
'''
- hosts = nova_utils.get_hypervisor_hosts(self.nova)
+ hosts = self.get_hypervisor_hosts()
return ['nova::{0}'.format(host) for host in hosts]
+ def get_hypervisor_hosts(self):
+ """
+ Returns the host names of all nova nodes with active hypervisors
+ :param nova: the Nova client
+ :return: a list of hypervisor host names
+ """
+ try:
+ nodes = []
+ hypervisors = self.conn.compute.hypervisors()
+ for hypervisor in hypervisors:
+ if hypervisor.state == "up":
+ nodes.append(hypervisor.name)
+ return nodes
+ except Exception as e:
+ logger.error("Error [get_hypervisors(compute)]: %s" % e)
+ return None
+
def get_compute_client(self):
'''
Return the compute where the client sits
@@ -178,18 +243,37 @@ class OpenStackSFC:
raise Exception("There is no VM with name '{}'!!".format(vm_name))
- def assign_floating_ip(self, router, vm, vm_creator):
+ def get_port_by_ip(self, ip_address):
+ """
+ Return a dictionary of metadata for a port instance
+ by its ip_address
+ """
+
+ ports = self.conn.network.ports()
+ for port in ports:
+ if port.fixed_ips[0]['ip_address'] == ip_address:
+ return self.conn.network.get_port(port.id)
+
+ def assign_floating_ip(self, vm, vm_port):
'''
Assign floating ips to all the VMs
'''
- name = vm.name + "-float"
- port_name = vm.ports[0].name
- float_ip = FloatingIpConfig(name=name,
- port_name=port_name,
- router_name=router.name)
- ip = vm_creator.add_floating_ip(float_ip)
+ logger.info(" Creating floating ips ")
+
+ ext_network_name = env.get('EXTERNAL_NETWORK')
+ ext_net = self.conn.network.find_network(ext_network_name)
+
+ fip = self.conn.network.create_ip(floating_network_id=ext_net.id,
+ port_id=vm_port.id)
+ logger.info(
+ " FLoating IP address {} created".format(fip.floating_ip_address))
- return ip.ip
+ logger.info(" Adding Floating IPs to instances ")
+ self.conn.compute.add_floating_ip_to_server(
+ vm.id, fip.floating_ip_address)
+
+ self.creators.append(fip)
+ return fip.floating_ip_address
# We need this function because tacker VMs cannot be created through SNAPs
def assign_floating_ip_vnfs(self, router, ips=None):
@@ -204,7 +288,7 @@ class OpenStackSFC:
for stack in stacks:
servers = heat_utils.get_stack_servers(self.heat,
self.nova,
- self.neutron,
+ self.neutron_client,
self.keystone,
stack,
project_name)
@@ -224,10 +308,11 @@ class OpenStackSFC:
break
if port_name is None:
- err_msg = "The VNF {} does not have any suitable port {} " \
- "for floating IP assignment".format(
- name,
- 'with ip any of ' + str(ips) if ips else '')
+ err_msg = ("The VNF {} does not have any suitable port {} "
+ "for floating IP assignment"
+ .format(name,
+ 'with ip any of ' +
+ str(ips) if ips else ''))
logger.error(err_msg)
raise Exception(err_msg)
@@ -240,11 +325,12 @@ class OpenStackSFC:
return fips
- def get_client_port(self, vm, vm_creator):
+ def get_instance_port(self, vm, vm_creator, port_name=None):
'''
Get the neutron port id of the client
'''
- port_name = vm.name + "-port"
+ if not port_name:
+ port_name = vm.name + "-port"
port = vm_creator.get_port_by_name(port_name)
if port is not None:
return port
@@ -256,13 +342,209 @@ class OpenStackSFC:
def delete_all_security_groups(self):
'''
Deletes all the available security groups
-
Needed until this bug is fixed:
https://bugs.launchpad.net/networking-odl/+bug/1763705
'''
- sec_groups = neutron_utils.list_security_groups(self.neutron)
+ logger.info("Deleting remaining security groups...")
+ sec_groups = self.conn.network.security_groups()
for sg in sec_groups:
- neutron_utils.delete_security_group(self.neutron, sg)
+ self.conn.network.delete_security_group(sg)
+
+ def wait_for_vnf(self, vnf_creator):
+ '''
+ Waits for VNF to become active
+ '''
+ return vnf_creator.vm_active(block=True, poll_interval=5)
+
+ def create_port_groups(self, vnf_ports, vm_instance):
+ '''
+ Creates a networking-sfc port pair and group
+ '''
+ logger.info("Creating the port pairs...")
+ port_pair = dict()
+ port_pair['name'] = vm_instance.name + '-connection-points'
+ port_pair['description'] = 'port pair for ' + vm_instance.name
+
+ # In the symmetric testcase ingres != egress (VNF has 2 interfaces)
+ if len(vnf_ports) == 1:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[0].id
+ elif len(vnf_ports) == 2:
+ port_pair['ingress'] = vnf_ports[0].id
+ port_pair['egress'] = vnf_ports[1].id
+ else:
+ logger.error("Only SFs with one or two ports are supported")
+ raise Exception("Failed to create port pairs")
+ port_pair_info = \
+ self.neutron_client.create_sfc_port_pair({'port_pair': port_pair})
+ if not port_pair_info:
+ logger.warning("Chain creation failed due to port pair "
+ "creation failed for vnf %(vnf)s",
+ {'vnf': vm_instance.name})
+ return None
+
+ # Avoid race conditions by checking the port pair is already committed
+ iterations = 5
+ found_it = False
+ for _ in range(iterations):
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ if pp['id'] == port_pair_info['port_pair']['id']:
+ found_it = True
+ break
+ if found_it:
+ break
+ else:
+ time.sleep(3)
+
+ if not found_it:
+ raise Exception("Port pair was not committed in openstack")
+
+ logger.info("Creating the port pair groups for %s" % vm_instance.name)
+
+ port_pair_group = {}
+ port_pair_group['name'] = vm_instance.name + '-port-pair-group'
+ port_pair_group['description'] = \
+ 'port pair group for ' + vm_instance.name
+ port_pair_group['port_pairs'] = []
+ port_pair_group['port_pairs'].append(port_pair_info['port_pair']['id'])
+ ppg_config = {'port_pair_group': port_pair_group}
+ port_pair_group_info = \
+ self.neutron_client.create_sfc_port_pair_group(ppg_config)
+ if not port_pair_group_info:
+ logger.warning("Chain creation failed due to port pair group "
+ "creation failed for vnf "
+ "{}".format(vm_instance.name))
+ return None
+
+ return port_pair_group_info['port_pair_group']['id']
+
+ def create_classifier(self, neutron_port, port, protocol, fc_name,
+ symmetrical, server_port=None, server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': fc_name,
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': fc_name,
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ def create_chain(self, port_groups, neutron_port, port, protocol,
+ vnffg_name, symmetrical, server_port=None,
+ server_ip=None):
+ '''
+ Create the classifier
+ '''
+ logger.info("Creating the classifier...")
+
+ if symmetrical:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'destination_ip_prefix': server_ip,
+ 'logical_source_port': neutron_port,
+ 'logical_destination_port': server_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+ else:
+ sfc_classifier_params = {'name': vnffg_name + '-classifier',
+ 'logical_source_port': neutron_port,
+ 'destination_port_range_min': port,
+ 'destination_port_range_max': port,
+ 'protocol': protocol}
+
+ fc_config = {'flow_classifier': sfc_classifier_params}
+ fc_info = \
+ self.neutron_client.create_sfc_flow_classifier(fc_config)
+
+ logger.info("Creating the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['description'] = 'port-chain for SFC'
+ port_chain['port_pair_groups'] = port_groups
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_info['flow_classifier']['id'])
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ return self.neutron_client.create_sfc_port_chain(chain_config)
+
+ def update_chain(self, vnffg_name, fc_name, symmetrical):
+ '''
+ Update the new Flow Classifier ID
+ '''
+ fc_id = self.neutron_client.find_resource('flow_classifier',
+ fc_name)['id']
+ logger.info("Update the chain...")
+ port_chain = {}
+ port_chain['name'] = vnffg_name + '-port-chain'
+ port_chain['flow_classifiers'] = []
+ port_chain['flow_classifiers'].append(fc_id)
+ if symmetrical:
+ port_chain['chain_parameters'] = {}
+ port_chain['chain_parameters']['symmetric'] = True
+ chain_config = {'port_chain': port_chain}
+ pc_id = self.neutron_client.find_resource('port_chain',
+ port_chain['name'])['id']
+ return self.neutron_client.update_sfc_port_chain(pc_id, chain_config)
+
+ def swap_classifiers(self, vnffg_1_name, vnffg_2_name, symmetric=False):
+
+ '''
+ Swap Classifiers
+ '''
+ logger.info("Swap classifiers...")
+
+ self.update_chain(vnffg_1_name, 'dummy', symmetric)
+ vnffg_1_classifier_name = vnffg_1_name + '-classifier'
+ self.update_chain(vnffg_2_name, vnffg_1_classifier_name, symmetric)
+ vnffg_2_classifier_name = vnffg_2_name + '-classifier'
+ self.update_chain(vnffg_1_name, vnffg_2_classifier_name, symmetric)
+
+ def delete_port_groups(self):
+ '''
+ Delete all port groups and port pairs
+ '''
+ logger.info("Deleting the port groups...")
+ ppg_list = self.neutron_client.\
+ list_sfc_port_pair_groups()['port_pair_groups']
+ for ppg in ppg_list:
+ self.neutron_client.delete_sfc_port_pair_group(ppg['id'])
+
+ logger.info("Deleting the port pairs...")
+ pp_list = self.neutron_client.list_sfc_port_pairs()['port_pairs']
+ for pp in pp_list:
+ self.neutron_client.delete_sfc_port_pair(pp['id'])
+
+ def delete_chain(self):
+ '''
+ Delete the classifiers and the chains
+ '''
+ logger.info("Deleting the chain...")
+ pc_list = self.neutron_client.list_sfc_port_chains()['port_chains']
+ for pc in pc_list:
+ self.neutron_client.delete_sfc_port_chain(pc['id'])
+
+ logger.info("Deleting the classifiers...")
+ fc_list = self.neutron_client.\
+ list_sfc_flow_classifiers()['flow_classifiers']
+ for fc in fc_list:
+ self.neutron_client.delete_sfc_flow_classifier(fc['id'])
# TACKER SECTION #
@@ -344,12 +626,14 @@ def list_vnfds(tacker_client, verbose=False):
def create_vnfd(tacker_client, tosca_file=None, vnfd_name=None):
+ logger.info("Creating the vnfd...")
try:
vnfd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnfd_body = tosca_fd.read()
- logger.info('VNFD template:\n{0}'.format(vnfd_body))
+ vnfd = tosca_fd.read()
+ vnfd_body = yaml.safe_load(vnfd)
+ logger.info('VNFD template:\n{0}'.format(vnfd))
return tacker_client.create_vnfd(
body={"vnfd": {"attributes": {"vnfd": vnfd_body},
"name": vnfd_name}})
@@ -386,6 +670,7 @@ def list_vnfs(tacker_client, verbose=False):
def create_vnf(tacker_client, vnf_name, vnfd_id=None,
vnfd_name=None, vim_id=None, vim_name=None, param_file=None):
+ logger.info("Creating the vnf...")
try:
vnf_body = {
'vnf': {
@@ -444,7 +729,6 @@ def get_vnf_ip(tacker_client, vnf_id=None, vnf_name=None):
"""
Get the management ip of the first VNF component as obtained from the
tacker REST API:
-
{
"vnf": {
...
@@ -461,7 +745,7 @@ def wait_for_vnf(tacker_client, vnf_id=None, vnf_name=None, timeout=100):
vnf = get_vnf(tacker_client, vnf_id, vnf_name)
if vnf is None:
raise Exception("Could not retrieve VNF - id='%s', name='%s'"
- % vnf_id, vnf_name)
+ % (vnf_id, vnf_name))
logger.info('Waiting for vnf {0}'.format(str(vnf)))
while vnf['status'] != 'ACTIVE' and timeout >= 0:
if vnf['status'] == 'ERROR':
@@ -496,6 +780,7 @@ def delete_vnf(tacker_client, vnf_id=None, vnf_name=None):
def create_vim(tacker_client, vim_file=None):
+ logger.info("Creating the vim...")
try:
vim_body = {}
if vim_file is not None:
@@ -510,12 +795,14 @@ def create_vim(tacker_client, vim_file=None):
def create_vnffgd(tacker_client, tosca_file=None, vnffgd_name=None):
+ logger.info("Creating the vnffgd...")
try:
vnffgd_body = {}
if tosca_file is not None:
with open(tosca_file) as tosca_fd:
- vnffgd_body = yaml.safe_load(tosca_fd)
- logger.info('VNFFGD template:\n{0}'.format(vnffgd_body))
+ vnffgd = tosca_fd.read()
+ vnffgd_body = yaml.safe_load(vnffgd)
+ logger.info('VNFFGD template:\n{0}'.format(vnffgd))
return tacker_client.create_vnffgd(
body={'vnffgd': {'name': vnffgd_name,
'template': {'vnffgd': vnffgd_body}}})
@@ -530,6 +817,7 @@ def create_vnffg(tacker_client, vnffg_name=None, vnffgd_id=None,
'''
Creates the vnffg which will provide the RSP and the classifier
'''
+ logger.info("Creating the vnffg...")
try:
vnffg_body = {
'vnffg': {
@@ -657,8 +945,7 @@ def register_vim(tacker_client, vim_file=None):
create_vim(tacker_client, vim_file=tmp_file)
-def create_vnf_in_av_zone(
- tacker_client,
+def create_vnf_in_av_zone(tacker_client,
vnf_name,
vnfd_name,
vim_name,
@@ -670,9 +957,7 @@ def create_vnf_in_av_zone(
param_file = os.path.join(
'/tmp',
'param_{0}.json'.format(av_zone.replace('::', '_')))
- data = {
- 'zone': av_zone
- }
+ data = {'zone': av_zone}
with open(param_file, 'w+') as f:
json.dump(data, f)
create_vnf(tacker_client,
diff --git a/sfc/lib/results.py b/sfc/lib/results.py
index 15d82e02..2f2edfc0 100644
--- a/sfc/lib/results.py
+++ b/sfc/lib/results.py
@@ -7,7 +7,6 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
import logging
logger = logging.getLogger(__name__)
diff --git a/sfc/lib/test_utils.py b/sfc/lib/test_utils.py
index 18c55dc1..ed50c390 100644
--- a/sfc/lib/test_utils.py
+++ b/sfc/lib/test_utils.py
@@ -12,10 +12,8 @@ import subprocess
import time
import shutil
import urllib
-
import logging
-
logger = logging.getLogger(__name__)
SSH_OPTIONS = '-q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
FUNCTEST_RESULTS_DIR = os.path.join("home", "opnfv",
@@ -232,11 +230,11 @@ def check_ssh(ips, retries=100):
def fill_installer_dict(installer_type):
- default_string = "defaults.installer.{}.".format(installer_type)
- installer_yaml_fields = {
- "user": default_string+"user",
- "password": default_string+"password",
- "cluster": default_string+"cluster",
- "pkey_file": default_string+"pkey_file"
- }
- return installer_yaml_fields
+ default_string = "defaults.installer.{}.".format(installer_type)
+ installer_yaml_fields = {
+ "user": default_string+"user",
+ "password": default_string+"password",
+ "cluster": default_string+"cluster",
+ "pkey_file": default_string+"pkey_file"
+ }
+ return installer_yaml_fields