summaryrefslogtreecommitdiffstats
path: root/sdnvpn/lib/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'sdnvpn/lib/utils.py')
-rw-r--r--sdnvpn/lib/utils.py171
1 files changed, 132 insertions, 39 deletions
diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py
index 44641ee..e43750c 100644
--- a/sdnvpn/lib/utils.py
+++ b/sdnvpn/lib/utils.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -7,19 +7,20 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
+import json
import logging
import os
-import sys
import time
import requests
import re
import subprocess
from concurrent.futures import ThreadPoolExecutor
+from requests.auth import HTTPBasicAuth
-import functest.utils.openstack_utils as os_utils
from opnfv.deployment.factory import Factory as DeploymentFactory
from sdnvpn.lib import config as sdnvpn_config
+import sdnvpn.lib.openstack_utils as os_utils
logger = logging.getLogger('sdnvpn_test_utils')
@@ -35,6 +36,7 @@ class ExtraRoute(object):
"""
Class to represent extra route for a router
"""
+
def __init__(self, destination, nexthop):
self.destination = destination
self.nexthop = nexthop
@@ -44,11 +46,19 @@ class AllowedAddressPair(object):
"""
Class to represent allowed address pair for a neutron port
"""
+
def __init__(self, ipaddress, macaddress):
self.ipaddress = ipaddress
self.macaddress = macaddress
+def create_default_flavor():
+ return os_utils.get_or_create_flavor(common_config.default_flavor,
+ common_config.default_flavor_ram,
+ common_config.default_flavor_disk,
+ common_config.default_flavor_vcpus)
+
+
def create_custom_flavor():
return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
common_config.custom_flavor_ram,
@@ -62,7 +72,8 @@ def create_net(neutron_client, name):
if not net_id:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(name))
return net_id
@@ -76,7 +87,8 @@ def create_subnet(neutron_client, name, cidr, net_id):
if not subnet_id:
logger.error(
"There has been a problem when creating the neutron subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron subnet {}".format(name))
return subnet_id
@@ -93,7 +105,8 @@ def create_network(neutron_client, net, subnet1, cidr1,
if not network_dic:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(net))
net_id = network_dic["net_id"]
subnet_id = network_dic["subnet_id"]
router_id = network_dic["router_id"]
@@ -105,7 +118,8 @@ def create_network(neutron_client, net, subnet1, cidr1,
if not subnet_id:
logger.error(
"There has been a problem when creating the second subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the second subnet {}".format(subnet2))
logger.debug("Subnet '%s' created successfully" % subnet_id)
return net_id, subnet_id, router_id
@@ -176,7 +190,7 @@ def create_instance(nova_client,
if instance is None:
logger.error("Error while booting instance.")
- sys.exit(-1)
+ raise Exception("Error while booting instance {}".format(name))
else:
logger.debug("Instance '%s' booted successfully. IP='%s'." %
(name, instance.networks.itervalues().next()[0]))
@@ -422,7 +436,9 @@ def assert_and_get_compute_nodes(nova_client, required_node_number=2):
logger.error("There are %s compute nodes in the deployment. "
"Minimum number of nodes to complete the test is 2."
% num_compute_nodes)
- sys.exit(-1)
+ raise Exception("There are {} compute nodes in the deployment. "
+ "Minimum number of nodes to complete the test"
+ " is 2.".format(num_compute_nodes))
logger.debug("Compute nodes: %s" % compute_nodes)
return compute_nodes
@@ -626,9 +642,9 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
if len(floatingip_ids) != 0:
for floatingip_id in floatingip_ids:
if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
- logging.error('Fail to delete all floating ips. '
- 'Floating ip with id {} was not deleted.'.
- format(floatingip_id))
+ logger.error('Fail to delete all floating ips. '
+ 'Floating ip with id {} was not deleted.'.
+ format(floatingip_id))
return False
if len(bgpvpn_ids) != 0:
@@ -639,39 +655,39 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
for router_id, subnet_id in interfaces:
if not os_utils.remove_interface_router(neutron_client,
router_id, subnet_id):
- logging.error('Fail to delete all interface routers. '
- 'Interface router with id {} was not deleted.'.
- format(router_id))
+ logger.error('Fail to delete all interface routers. '
+ 'Interface router with id {} was not deleted.'.
+ format(router_id))
if len(router_ids) != 0:
for router_id in router_ids:
if not os_utils.remove_gateway_router(neutron_client, router_id):
- logging.error('Fail to delete all gateway routers. '
- 'Gateway router with id {} was not deleted.'.
- format(router_id))
+ logger.error('Fail to delete all gateway routers. '
+ 'Gateway router with id {} was not deleted.'.
+ format(router_id))
if len(subnet_ids) != 0:
for subnet_id in subnet_ids:
if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
- logging.error('Fail to delete all subnets. '
- 'Subnet with id {} was not deleted.'.
- format(subnet_id))
+ logger.error('Fail to delete all subnets. '
+ 'Subnet with id {} was not deleted.'.
+ format(subnet_id))
return False
if len(router_ids) != 0:
for router_id in router_ids:
if not os_utils.delete_neutron_router(neutron_client, router_id):
- logging.error('Fail to delete all routers. '
- 'Router with id {} was not deleted.'.
- format(router_id))
+ logger.error('Fail to delete all routers. '
+ 'Router with id {} was not deleted.'.
+ format(router_id))
return False
if len(network_ids) != 0:
for network_id in network_ids:
if not os_utils.delete_neutron_net(neutron_client, network_id):
- logging.error('Fail to delete all networks. '
- 'Network with id {} was not deleted.'.
- format(network_id))
+ logger.error('Fail to delete all networks. '
+ 'Network with id {} was not deleted.'.
+ format(network_id))
return False
return True
@@ -679,16 +695,13 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
if flavor_ids is not None and len(flavor_ids) != 0:
for flavor_id in flavor_ids:
- if not nova_client.flavors.delete(flavor_id):
- logging.error('Fail to delete flavor. '
- 'Flavor with id {} was not deleted.'.
- format(flavor_id))
+ nova_client.flavors.delete(flavor_id)
if len(instance_ids) != 0:
for instance_id in instance_ids:
if not os_utils.delete_instance(nova_client, instance_id):
- logging.error('Fail to delete all instances. '
- 'Instance with id {} was not deleted.'.
- format(instance_id))
+ logger.error('Fail to delete all instances. '
+ 'Instance with id {} was not deleted.'.
+ format(instance_id))
return False
return True
@@ -697,9 +710,9 @@ def cleanup_glance(glance_client, image_ids):
if len(image_ids) != 0:
for image_id in image_ids:
if not os_utils.delete_glance_image(glance_client, image_id):
- logging.error('Fail to delete all images. '
- 'Image with id {} was not deleted.'.
- format(image_id))
+ logger.error('Fail to delete all images. '
+ 'Image with id {} was not deleted.'.
+ format(image_id))
return False
return True
@@ -770,8 +783,8 @@ def is_fail_mode_secure():
is_secure[openstack_node.name] = True
else:
# failure
- logging.error('The fail_mode for br-int was not secure '
- 'in {} node'.format(openstack_node.name))
+ logger.error('The fail_mode for br-int was not secure '
+ 'in {} node'.format(openstack_node.name))
is_secure[openstack_node.name] = False
return is_secure
@@ -889,3 +902,83 @@ def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
split("\n"))
return cmd_out_lines
+
+
+def get_odl_bgp_entity_owner(controllers):
+ """ Finds the ODL owner of the BGP entity in the cluster.
+
+ When ODL runs in clustering mode we need to execute the BGP speaker
+ related commands to that ODL which is the owner of the BGP entity.
+
+ :param controllers: list of OS controllers
+ :return controller: OS controller in which ODL BGP entity owner runs
+ """
+ if len(controllers) == 1:
+ return controllers[0]
+ else:
+ url = ('http://admin:admin@{ip}:8081/restconf/'
+ 'operational/entity-owners:entity-owners/entity-type/bgp'
+ .format(ip=controllers[0].ip))
+
+ remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
+ 'initial/akka.conf')
+ remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
+ local_tmp_akka_conf = '/tmp/akka.conf'
+ try:
+ json_output = requests.get(url).json()
+ except Exception:
+ logger.error('Failed to find the ODL BGP '
+ 'entity owner through REST')
+ return None
+ odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
+
+ for controller in controllers:
+
+ controller.run_cmd('sudo cp {0} /home/heat-admin/'
+ .format(remote_odl_akka_conf))
+ controller.run_cmd('sudo chmod 777 {0}'
+ .format(remote_odl_home_akka_conf))
+ controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+
+ for line in open(local_tmp_akka_conf):
+ if re.search(odl_bgp_owner, line):
+ return controller
+ return None
+
+
+def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
+ json_body = {'input':
+ {'destination-ip': remote_tep_ip,
+ 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
+ }
+ url = ('http://{ip}:8081/restconf/operations/'
+ 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ try:
+ requests.post(url, data=json.dumps(json_body),
+ headers=headers,
+ auth=HTTPBasicAuth('admin', 'admin'))
+ except Exception as e:
+ logger.error("Failed to create external tunnel endpoint on"
+ " ODL for external tep ip %s with error %s"
+ % (remote_tep_ip, e))
+ return None
+
+
+def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
+ url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
+ 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
+ logger.error("url is %s" % url)
+ try:
+ vrf_table = requests.get(url).json()
+ is_ipprefix_exists = False
+ for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
+ if vrf_entry['destPrefix'] == ip_prefix:
+ is_ipprefix_exists = True
+ break
+ return is_ipprefix_exists
+ except Exception as e:
+ logger.error('Failed to find ip prefix %s with error %s'
+ % (ip_prefix, e))
+ return False