summaryrefslogtreecommitdiffstats
path: root/sdnvpn/lib/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'sdnvpn/lib/utils.py')
-rw-r--r--sdnvpn/lib/utils.py665
1 files changed, 473 insertions, 192 deletions
diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py
index 0ab8b84..4c35edc 100644
--- a/sdnvpn/lib/utils.py
+++ b/sdnvpn/lib/utils.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
@@ -7,26 +7,32 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
+import json
import logging
import os
-import sys
import time
import requests
import re
import subprocess
+import yaml
from concurrent.futures import ThreadPoolExecutor
+from openstack.exceptions import ResourceNotFound, NotFoundException
+from requests.auth import HTTPBasicAuth
-import functest.utils.openstack_utils as os_utils
+from functest.utils import env
from opnfv.deployment.factory import Factory as DeploymentFactory
from sdnvpn.lib import config as sdnvpn_config
+import sdnvpn.lib.openstack_utils as os_utils
logger = logging.getLogger('sdnvpn_test_utils')
common_config = sdnvpn_config.CommonConfig()
-ODL_USER = 'admin'
-ODL_PASS = 'admin'
+ODL_USER = env.get('SDN_CONTROLLER_USER')
+ODL_PASSWORD = env.get('SDN_CONTROLLER_PASSWORD')
+ODL_IP = env.get('SDN_CONTROLLER_IP')
+ODL_PORT = env.get('SDN_CONTROLLER_RESTCONFPORT')
executor = ThreadPoolExecutor(5)
@@ -35,6 +41,7 @@ class ExtraRoute(object):
"""
Class to represent extra route for a router
"""
+
def __init__(self, destination, nexthop):
self.destination = destination
self.nexthop = nexthop
@@ -44,11 +51,19 @@ class AllowedAddressPair(object):
"""
Class to represent allowed address pair for a neutron port
"""
+
def __init__(self, ipaddress, macaddress):
self.ipaddress = ipaddress
self.macaddress = macaddress
+def create_default_flavor():
+ return os_utils.get_or_create_flavor(common_config.default_flavor,
+ common_config.default_flavor_ram,
+ common_config.default_flavor_disk,
+ common_config.default_flavor_vcpus)
+
+
def create_custom_flavor():
return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
common_config.custom_flavor_ram,
@@ -56,36 +71,38 @@ def create_custom_flavor():
common_config.custom_flavor_vcpus)
-def create_net(neutron_client, name):
+def create_net(conn, name):
logger.debug("Creating network %s", name)
- net_id = os_utils.create_neutron_net(neutron_client, name)
+ net_id = os_utils.create_neutron_net(conn, name)
if not net_id:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(name))
return net_id
-def create_subnet(neutron_client, name, cidr, net_id):
+def create_subnet(conn, name, cidr, net_id):
logger.debug("Creating subnet %s in network %s with cidr %s",
name, net_id, cidr)
- subnet_id = os_utils.create_neutron_subnet(neutron_client,
+ subnet_id = os_utils.create_neutron_subnet(conn,
name,
cidr,
net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the neutron subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron subnet {}".format(name))
return subnet_id
-def create_network(neutron_client, net, subnet1, cidr1,
+def create_network(conn, net, subnet1, cidr1,
router, subnet2=None, cidr2=None):
"""Network assoc won't work for networks/subnets created by this function.
It is an ODL limitation due to it handling routers as vpns.
See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
- network_dic = os_utils.create_network_full(neutron_client,
+ network_dic = os_utils.create_network_full(conn,
net,
subnet1,
router,
@@ -93,7 +110,8 @@ def create_network(neutron_client, net, subnet1, cidr1,
if not network_dic:
logger.error(
"There has been a problem when creating the neutron network")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the neutron network {}".format(net))
net_id = network_dic["net_id"]
subnet_id = network_dic["subnet_id"]
router_id = network_dic["router_id"]
@@ -101,25 +119,25 @@ def create_network(neutron_client, net, subnet1, cidr1,
if subnet2 is not None:
logger.debug("Creating and attaching a second subnet...")
subnet_id = os_utils.create_neutron_subnet(
- neutron_client, subnet2, cidr2, net_id)
+ conn, subnet2, cidr2, net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the second subnet")
- sys.exit(-1)
+ raise Exception("There has been a problem when creating"
+ " the second subnet {}".format(subnet2))
logger.debug("Subnet '%s' created successfully" % subnet_id)
return net_id, subnet_id, router_id
-def get_port(neutron_client, instance_id):
- ports = os_utils.get_port_list(neutron_client)
- if ports is not None:
- for port in ports:
- if port['device_id'] == instance_id:
- return port
+def get_port(conn, instance_id):
+ ports = os_utils.get_port_list(conn)
+ for port in ports:
+ if port.device_id == instance_id:
+ return port
return None
-def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
+def update_port_allowed_address_pairs(conn, port_id, address_pairs):
if len(address_pairs) <= 0:
return
allowed_address_pairs = []
@@ -127,30 +145,27 @@ def update_port_allowed_address_pairs(neutron_client, port_id, address_pairs):
address_pair_dict = {'ip_address': address_pair.ipaddress,
'mac_address': address_pair.macaddress}
allowed_address_pairs.append(address_pair_dict)
- json_body = {'port': {
- "allowed_address_pairs": allowed_address_pairs
- }}
try:
- port = neutron_client.update_port(port=port_id,
- body=json_body)
- return port['port']['id']
+ port = conn.network.\
+ update_port(port_id, allowed_address_pairs=allowed_address_pairs)
+ return port.id
except Exception as e:
- logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
+ logger.error("Error [update_neutron_port(network, '%s', '%s')]:"
" %s" % (port_id, address_pairs, e))
return None
-def create_instance(nova_client,
+def create_instance(conn,
name,
image_id,
network_id,
sg_id,
secgroup_name=None,
fixed_ip=None,
- compute_node='',
+ compute_node=None,
userdata=None,
- files=None,
+ files=[],
**kwargs
):
if 'flavor' not in kwargs:
@@ -176,12 +191,14 @@ def create_instance(nova_client,
if instance is None:
logger.error("Error while booting instance.")
- sys.exit(-1)
+ raise Exception("Error while booting instance {}".format(name))
else:
+ # Retrieve IP of INSTANCE
+ network_name = conn.network.get_network(network_id).name
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.get(network_name)[0]['addr']
logger.debug("Instance '%s' booted successfully. IP='%s'." %
- (name, instance.networks.itervalues().next()[0]))
- # Retrieve IP of INSTANCE
- # instance_ip = instance.networks.get(network_id)[0]
+ (name, instance_ip))
if secgroup_name:
logger.debug("Adding '%s' to security group '%s'..."
@@ -189,7 +206,7 @@ def create_instance(nova_client,
else:
logger.debug("Adding '%s' to security group '%s'..."
% (name, sg_id))
- os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
+ os_utils.add_secgroup_to_instance(conn, instance.id, sg_id)
return instance
@@ -283,18 +300,16 @@ def get_installerHandler():
return None
else:
if installer_type in ["apex"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- pkey_file="/root/.ssh/id_rsa")
-
- if installer_type in ["fuel"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- 'r00tme')
+ installer_user = "root"
+ elif installer_type in ["fuel"]:
+ installer_user = "ubuntu"
+
+ developHandler = DeploymentFactory.get_handler(
+ installer_type,
+ installer_ip,
+ installer_user,
+ pkey_file="/root/.ssh/id_rsa")
+
return developHandler
@@ -307,18 +322,21 @@ def get_installer_ip():
return str(os.environ['INSTALLER_IP'])
-def get_instance_ip(instance):
- instance_ip = instance.networks.itervalues().next()[0]
+def get_instance_ip(conn, instance):
+ instance_ip = conn.compute.get_server(instance).\
+ addresses.values()[0][0]['addr']
return instance_ip
def wait_for_instance(instance, pattern=".* login:", tries=40):
logger.info("Waiting for instance %s to boot up" % instance.id)
+ conn = os_utils.get_os_connection()
sleep_time = 2
expected_regex = re.compile(pattern)
console_log = ""
while tries > 0 and not expected_regex.search(console_log):
- console_log = instance.get_console_output()
+ console_log = conn.compute.\
+ get_server_console_output(instance)['output']
time.sleep(sleep_time)
tries -= 1
@@ -357,6 +375,21 @@ def async_Wait_for_instances(instances, tries=40):
logger.error("one or more instances is not yet booted up")
+def wait_for_instance_delete(conn, instance_id, tries=30):
+ sleep_time = 2
+ instances = [instance_id]
+ logger.debug("Waiting for instance %s to be deleted"
+ % (instance_id))
+ while tries > 0 and instance_id in instances:
+ instances = [instance.id for instance in
+ os_utils.get_instances(conn)]
+ time.sleep(sleep_time)
+ tries -= 1
+ if instance_id in instances:
+ logger.error("Deletion of instance %s failed" %
+ (instance_id))
+
+
def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
tries = 30
sleep_time = 1
@@ -412,29 +445,31 @@ def wait_before_subtest(*args, **kwargs):
time.sleep(30)
-def assert_and_get_compute_nodes(nova_client, required_node_number=2):
+def assert_and_get_compute_nodes(conn, required_node_number=2):
"""Get the compute nodes in the deployment
Exit if the deployment doesn't have enough compute nodes"""
- compute_nodes = os_utils.get_hypervisors(nova_client)
+ compute_nodes = os_utils.get_hypervisors(conn)
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
logger.error("There are %s compute nodes in the deployment. "
"Minimum number of nodes to complete the test is 2."
% num_compute_nodes)
- sys.exit(-1)
+ raise Exception("There are {} compute nodes in the deployment. "
+ "Minimum number of nodes to complete the test"
+ " is 2.".format(num_compute_nodes))
logger.debug("Compute nodes: %s" % compute_nodes)
return compute_nodes
-def open_icmp(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_icmp(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'icmp'):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'icmp'):
@@ -444,14 +479,14 @@ def open_icmp(neutron_client, security_group_id):
% security_group_id)
-def open_http_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_http_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
80, 80):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
@@ -463,14 +498,14 @@ def open_http_port(neutron_client, security_group_id):
% security_group_id)
-def open_bgp_port(neutron_client, security_group_id):
- if os_utils.check_security_group_rules(neutron_client,
+def open_bgp_port(conn, security_group_id):
+ if os_utils.check_security_group_rules(conn,
security_group_id,
'ingress',
'tcp',
179, 179):
- if not os_utils.create_secgroup_rule(neutron_client,
+ if not os_utils.create_secgroup_rule(conn,
security_group_id,
'ingress',
'tcp',
@@ -502,17 +537,19 @@ def exec_cmd(cmd, verbose):
return output, success
-def check_odl_fib(ip, controller_ip):
+def check_odl_fib(ip):
"""Check that there is an entry in the ODL Fib for `ip`"""
- url = "http://" + controller_ip + \
- ":8181/restconf/config/odl-fib:fibEntries/"
+ url = ("http://{user}:{password}@{ip}:{port}/restconf/config/"
+ "odl-fib:fibEntries/".format(user=ODL_USER,
+ password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
logger.debug("Querring '%s' for FIB entries", url)
- res = requests.get(url, auth=(ODL_USER, ODL_PASS))
+ res = requests.get(url, auth=(ODL_USER, ODL_PASSWORD))
if res.status_code != 200:
logger.error("OpenDaylight response status code: %s", res.status_code)
return False
logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
- % controller_ip)
+ % ODL_IP)
logger.debug("OpenDaylight FIB: \n%s" % res.text)
return ip in res.text
@@ -530,7 +567,7 @@ def run_odl_cmd(odl_node, cmd):
return odl_node.run_cmd(karaf_cmd)
-def wait_for_cloud_init(instance):
+def wait_for_cloud_init(conn, instance):
success = True
# ubuntu images take a long time to start
tries = 20
@@ -538,7 +575,8 @@ def wait_for_cloud_init(instance):
logger.info("Waiting for cloud init of instance: {}"
"".format(instance.name))
while tries > 0:
- instance_log = instance.get_console_output()
+ instance_log = conn.compute.\
+ get_server_console_output(instance)['output']
if "Failed to run module" in instance_log:
success = False
logger.error("Cloud init failed to run. Reason: %s",
@@ -561,36 +599,52 @@ def wait_for_cloud_init(instance):
def attach_instance_to_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
+ # In Apex, br-ex (or br-floating for Fuel) is an ovs bridge and virsh
+ # attach-interface won't just work. We work around it by creating a linux
+ # bridge, attaching that to br-ex (or br-floating for Fuel) with a
+ # veth pair and virsh-attaching the instance to the linux-bridge
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- set -e
- if ! sudo brctl show |grep -q ^{bridge};then
- sudo brctl addbr {bridge}
- sudo ip link set {bridge} up
- sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
- sudo ip link set dev ovs-quagga-tap up
- sudo ip link set dev quagga-tap up
- sudo ovs-vsctl add-port br-ex ovs-quagga-tap
- sudo brctl addif {bridge} quagga-tap
- fi
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
+
+ cmd = """
+ set -e
+ if ! sudo brctl show |grep -q ^br-quagga;then
+ sudo brctl addbr br-quagga
+ sudo ip link set br-quagga up
+ sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
+ sudo ip link set dev ovs-quagga-tap up
+ sudo ip link set dev quagga-tap up
+ sudo ovs-vsctl add-port {bridge} ovs-quagga-tap
+ sudo brctl addif br-quagga quagga-tap
+ fi
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
compute_node.run_cmd("sudo virsh attach-interface %s"
- " bridge %s" % (libvirt_instance_name, bridge))
+ " bridge br-quagga" % (libvirt_instance_name))
def detach_instance_from_ext_br(instance, compute_node):
- libvirt_instance_name = getattr(instance, "OS-EXT-SRV-ATTR:instance_name")
+ libvirt_instance_name = instance.instance_name
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ # This function undoes all the actions performed by
+ # attach_instance_to_ext_br on Fuel and Apex installers.
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
+ bridge = "br-ex"
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
"grep running | awk '{print $2}'); "
"do echo -n ; sudo virsh dumpxml $vm| "
@@ -599,36 +653,26 @@ def detach_instance_from_ext_br(instance, compute_node):
" --type bridge --mac %s"
% (libvirt_instance_name, mac))
- installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
- bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- sudo brctl delif {bridge} quagga-tap &&
- sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
- sudo ip link set dev quagga-tap down &&
- sudo ip link set dev ovs-quagga-tap down &&
- sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
- sudo ip link set {bridge} down &&
- sudo brctl delbr {bridge}
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
-
-
-def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
- subnet_ids, router_ids, network_ids):
+ cmd = """
+ sudo brctl delif br-quagga quagga-tap &&
+ sudo ovs-vsctl del-port {bridge} ovs-quagga-tap &&
+ sudo ip link set dev quagga-tap down &&
+ sudo ip link set dev ovs-quagga-tap down &&
+ sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
+ sudo ip link set br-quagga down &&
+ sudo brctl delbr br-quagga
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
+
+def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
+ interfaces, subnet_ids, router_ids, network_ids):
if len(floatingip_ids) != 0:
for floatingip_id in floatingip_ids:
- if not os_utils.delete_floating_ip(neutron_client, floatingip_id):
- logging.error('Fail to delete all floating ips. '
- 'Floating ip with id {} was not deleted.'.
- format(floatingip_id))
+ if not os_utils.delete_floating_ip(conn, floatingip_id):
+ logger.error('Fail to delete all floating ips. '
+ 'Floating ip with id {} was not deleted.'.
+ format(floatingip_id))
return False
if len(bgpvpn_ids) != 0:
@@ -637,69 +681,67 @@ def cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids, interfaces,
if len(interfaces) != 0:
for router_id, subnet_id in interfaces:
- if not os_utils.remove_interface_router(neutron_client,
+ if not os_utils.remove_interface_router(conn,
router_id, subnet_id):
- logging.error('Fail to delete all interface routers. '
- 'Interface router with id {} was not deleted.'.
- format(router_id))
+ logger.error('Fail to delete all interface routers. '
+ 'Interface router with id {} was not deleted.'.
+ format(router_id))
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.remove_gateway_router(neutron_client, router_id):
- logging.error('Fail to delete all gateway routers. '
- 'Gateway router with id {} was not deleted.'.
- format(router_id))
+ if not os_utils.remove_gateway_router(conn, router_id):
+ logger.error('Fail to delete all gateway routers. '
+ 'Gateway router with id {} was not deleted.'.
+ format(router_id))
if len(subnet_ids) != 0:
for subnet_id in subnet_ids:
- if not os_utils.delete_neutron_subnet(neutron_client, subnet_id):
- logging.error('Fail to delete all subnets. '
- 'Subnet with id {} was not deleted.'.
- format(subnet_id))
+ if not os_utils.delete_neutron_subnet(conn, subnet_id):
+ logger.error('Fail to delete all subnets. '
+ 'Subnet with id {} was not deleted.'.
+ format(subnet_id))
return False
if len(router_ids) != 0:
for router_id in router_ids:
- if not os_utils.delete_neutron_router(neutron_client, router_id):
- logging.error('Fail to delete all routers. '
- 'Router with id {} was not deleted.'.
- format(router_id))
+ if not os_utils.delete_neutron_router(conn, router_id):
+ logger.error('Fail to delete all routers. '
+ 'Router with id {} was not deleted.'.
+ format(router_id))
return False
if len(network_ids) != 0:
for network_id in network_ids:
- if not os_utils.delete_neutron_net(neutron_client, network_id):
- logging.error('Fail to delete all networks. '
- 'Network with id {} was not deleted.'.
- format(network_id))
+ if not os_utils.delete_neutron_net(conn, network_id):
+ logger.error('Fail to delete all networks. '
+ 'Network with id {} was not deleted.'.
+ format(network_id))
return False
return True
-def cleanup_nova(nova_client, instance_ids, flavor_ids=None):
+def cleanup_nova(conn, instance_ids, flavor_ids=None):
if flavor_ids is not None and len(flavor_ids) != 0:
for flavor_id in flavor_ids:
- if not nova_client.flavors.delete(flavor_id):
- logging.error('Fail to delete flavor. '
- 'Flavor with id {} was not deleted.'.
- format(flavor_id))
+ conn.compute.delete_flavor(flavor_id)
if len(instance_ids) != 0:
for instance_id in instance_ids:
- if not os_utils.delete_instance(nova_client, instance_id):
- logging.error('Fail to delete all instances. '
- 'Instance with id {} was not deleted.'.
- format(instance_id))
- return False
+ if not os_utils.delete_instance(conn, instance_id):
+ logger.error('Fail to delete all instances. '
+ 'Instance with id {} was not deleted.'.
+ format(instance_id))
+ else:
+ wait_for_instance_delete(conn, instance_id)
return True
-def cleanup_glance(glance_client, image_ids):
+def cleanup_glance(conn, image_ids):
if len(image_ids) != 0:
for image_id in image_ids:
- if not os_utils.delete_glance_image(glance_client, image_id):
- logging.error('Fail to delete all images. '
- 'Image with id {} was not deleted.'.
- format(image_id))
+ if not os_utils.delete_glance_image(conn, image_id):
+ logger.error('Fail to delete all images. '
+ 'Image with id {} was not deleted.'.
+ format(image_id))
return False
return True
@@ -759,6 +801,15 @@ def is_fail_mode_secure():
if not openstack_node.is_active():
continue
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['fuel']:
+ if (
+ 'controller' in openstack_node.roles or
+ 'opendaylight' in openstack_node.roles or
+ 'installer' in openstack_node.roles
+ ):
+ continue
+
ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
strip().split('\n'))
if 'br-int' in ovs_int_list:
@@ -770,59 +821,55 @@ def is_fail_mode_secure():
is_secure[openstack_node.name] = True
else:
# failure
- logging.error('The fail_mode for br-int was not secure '
- 'in {} node'.format(openstack_node.name))
+ logger.error('The fail_mode for br-int was not secure '
+ 'in {} node'.format(openstack_node.name))
is_secure[openstack_node.name] = False
return is_secure
-def update_nw_subnet_port_quota(neutron_client, tenant_id, nw_quota,
- subnet_quota, port_quota):
- json_body = {"quota": {
- "network": nw_quota,
- "subnet": subnet_quota,
- "port": port_quota
- }}
-
+def update_nw_subnet_port_quota(conn, tenant_id, nw_quota,
+ subnet_quota, port_quota, router_quota):
try:
- neutron_client.update_quota(tenant_id=tenant_id,
- body=json_body)
+ conn.network.update_quota(tenant_id, networks=nw_quota,
+ subnets=subnet_quota, ports=port_quota,
+ routers=router_quota)
return True
except Exception as e:
- logger.error("Error [update_nw_subnet_port_quota(neutron_client,"
- " '%s', '%s', '%s', '%s')]: %s" %
- (tenant_id, nw_quota, subnet_quota, port_quota, e))
+ logger.error("Error [update_nw_subnet_port_quota(network,"
+ " '%s', '%s', '%s', '%s, %s')]: %s" %
+ (tenant_id, nw_quota, subnet_quota,
+ port_quota, router_quota, e))
return False
-def update_instance_quota_class(nova_client, instances_quota):
+def update_instance_quota_class(cloud, instances_quota):
try:
- nova_client.quota_classes.update("default", instances=instances_quota)
+ cloud.set_compute_quotas('admin', instances=instances_quota)
return True
except Exception as e:
- logger.error("Error [update_instance_quota_class(nova_client,"
+ logger.error("Error [update_instance_quota_class(compute,"
" '%s' )]: %s" % (instances_quota, e))
return False
-def get_neutron_quota(neutron_client, tenant_id):
+def get_neutron_quota(conn, tenant_id):
try:
- return neutron_client.show_quota(tenant_id=tenant_id)['quota']
- except Exception as e:
- logger.error("Error in getting neutron quota for tenant "
+ return conn.network.get_quota(tenant_id)
+ except ResourceNotFound as e:
+ logger.error("Error in getting network quota for tenant "
" '%s' )]: %s" % (tenant_id, e))
raise
-def get_nova_instances_quota(nova_client):
+def get_nova_instances_quota(cloud):
try:
- return nova_client.quota_classes.get("default").instances
+ return cloud.get_compute_quotas('admin').instances
except Exception as e:
logger.error("Error in getting nova instances quota: %s" % e)
raise
-def update_router_extra_route(neutron_client, router_id, extra_routes):
+def update_router_extra_route(conn, router_id, extra_routes):
if len(extra_routes) <= 0:
return
routes_list = []
@@ -830,26 +877,19 @@ def update_router_extra_route(neutron_client, router_id, extra_routes):
route_dict = {'destination': extra_route.destination,
'nexthop': extra_route.nexthop}
routes_list.append(route_dict)
- json_body = {'router': {
- "routes": routes_list
- }}
try:
- neutron_client.update_router(router_id, body=json_body)
+ conn.network.update_router(router_id, routes=routes_list)
return True
except Exception as e:
logger.error("Error in updating router with extra route: %s" % e)
raise
-def update_router_no_extra_route(neutron_client, router_ids):
- json_body = {'router': {
- "routes": [
- ]}}
-
+def update_router_no_extra_route(conn, router_ids):
for router_id in router_ids:
try:
- neutron_client.update_router(router_id, body=json_body)
+ conn.network.update_router(router_id, routes=[])
return True
except Exception as e:
logger.error("Error in clearing extra route: %s" % e)
@@ -887,3 +927,244 @@ def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
cmd_out_lines += (compute_node.run_cmd(ovs_flows_cmd).strip().
split("\n"))
return cmd_out_lines
+
+
+def get_node_ip_and_netmask(node, iface):
+ cmd = "ip a | grep {iface} | grep inet | awk '{{print $2}}'"\
+ .format(iface=iface)
+ mgmt_net_cidr = node.run_cmd(cmd).strip().split('\n')
+ mgmt_ip = mgmt_net_cidr[0].split('/')[0]
+ mgmt_netmask = mgmt_net_cidr[0].split('/')[1]
+
+ return mgmt_ip, mgmt_netmask
+
+
+def get_odl_bgp_entity_owner(odl_nodes):
+ """ Finds the ODL owner of the BGP entity in the cluster.
+
+ When ODL runs in clustering mode we need to execute the BGP speaker
+ related commands to that ODL which is the owner of the BGP entity.
+
+ :param odl_nodes: list of Opendaylight nodes
+ :return odl_node: Opendaylight node in which ODL BGP entity owner runs
+ """
+ if len(odl_nodes) == 1:
+ return odl_nodes[0]
+ else:
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/'
+ 'operational/entity-owners:entity-owners/entity-type/bgp'
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
+
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['apex']:
+ node_user = 'heat-admin'
+ elif installer_type in ['fuel']:
+ node_user = 'ubuntu'
+
+ remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
+ 'initial/akka.conf')
+ remote_odl_home_akka_conf = '/home/{0}/akka.conf'.format(node_user)
+ local_tmp_akka_conf = '/tmp/akka.conf'
+ try:
+ json_output = requests.get(url).json()
+ except Exception:
+ logger.error('Failed to find the ODL BGP '
+ 'entity owner through REST')
+ return None
+ odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
+
+ for odl_node in odl_nodes:
+ if installer_type in ['apex']:
+ get_odl_id_cmd = 'sudo docker ps -qf name=opendaylight_api'
+ odl_id = odl_node.run_cmd(get_odl_id_cmd)
+ odl_node.run_cmd('sudo docker cp '
+ '{container_id}:{odl_akka_conf} '
+ '/home/{user}/'
+ .format(container_id=odl_id,
+ odl_akka_conf=remote_odl_akka_conf,
+ user=node_user))
+ elif installer_type in ['fuel']:
+ odl_node.run_cmd('sudo cp {0} /home/{1}/'
+ .format(remote_odl_akka_conf, node_user))
+ odl_node.run_cmd('sudo chmod 777 {0}'
+ .format(remote_odl_home_akka_conf))
+ odl_node.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+
+ for line in open(local_tmp_akka_conf):
+ if re.search(odl_bgp_owner, line):
+ return odl_node
+ return None
+
+
+def add_quagga_external_gre_end_point(odl_nodes, remote_tep_ip):
+ json_body = {'input':
+ {'destination-ip': remote_tep_ip,
+ 'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
+ }
+ url = ('http://{ip}:{port}/restconf/operations/'
+ 'itm-rpc:add-external-tunnel-endpoint'.format(ip=ODL_IP,
+ port=ODL_PORT))
+ headers = {'Content-type': 'application/yang.data+json',
+ 'Accept': 'application/yang.data+json'}
+ try:
+ requests.post(url, data=json.dumps(json_body),
+ headers=headers,
+ auth=HTTPBasicAuth(ODL_USER, ODL_PASSWORD))
+ except Exception as e:
+ logger.error("Failed to create external tunnel endpoint on"
+ " ODL for external tep ip %s with error %s"
+ % (remote_tep_ip, e))
+ return None
+
+
+def is_fib_entry_present_on_odl(odl_nodes, ip_prefix, vrf_id):
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/config/'
+ 'odl-fib:fibEntries/vrfTables/{vrf}/'
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT, vrf=vrf_id))
+ logger.error("url is %s" % url)
+ try:
+ vrf_table = requests.get(url).json()
+ is_ipprefix_exists = False
+ for vrf_entry in vrf_table['vrfTables'][0]['vrfEntry']:
+ if vrf_entry['destPrefix'] == ip_prefix:
+ is_ipprefix_exists = True
+ break
+ return is_ipprefix_exists
+ except Exception as e:
+ logger.error('Failed to find ip prefix %s with error %s'
+ % (ip_prefix, e))
+ return False
+
+
+def wait_stack_for_status(conn, stack_id, stack_status, limit=12):
+ """ Waits to reach specified stack status. To be used with
+ CREATE_COMPLETE and UPDATE_COMPLETE.
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the stack id returned by create_stack api call
+ :param stack_status: the stack status waiting for
+ :param limit: the maximum number of attempts
+ """
+ logger.debug("Stack '%s' create started" % stack_id)
+
+ stack_create_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ except NotFoundException:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+ if stack_st == stack_status:
+ stack_create_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if stack_create_complete is False:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+
+ return True
+
+
+def delete_stack_and_wait(conn, stack_id, limit=12):
+ """ Starts and waits for completion of delete stack
+
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the id of the stack to be deleted
+ :param limit: the maximum number of attempts
+ """
+ delete_started = False
+ if stack_id is not None:
+ delete_started = os_utils.delete_stack(conn, stack_id)
+
+ if delete_started is True:
+ logger.debug("Stack delete succesfully started")
+ else:
+ logger.error("Stack delete start failed")
+
+ stack_delete_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ if stack_st == 'DELETE_COMPLETE':
+ stack_delete_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+ except NotFoundException:
+ stack_delete_complete = True
+ break
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if not stack_delete_complete:
+ logger.error("Stack delete failed")
+ raise SystemError("Stack delete failed")
+ return False
+
+ return True
+
+
+def get_heat_environment(testcase, common_config):
+ """ Reads the heat parameters of a testcase into a yaml object
+
+ Each testcase where Heat Orchestratoin Template (HOT) is introduced
+ has an associated parameters section.
+ Reads testcase.heat_parameters section and read COMMON_CONFIG.flavor
+ and place it under parameters tree.
+
+ :param testcase: the tescase for which the HOT file is fetched
+ :param common_config: the common config section
+ :return environment: a yaml object to be used as environment
+ """
+ fl = common_config.default_flavor
+ param_dict = testcase.heat_parameters
+ param_dict['flavor'] = fl
+ env_dict = {'parameters': param_dict}
+ return env_dict
+
+
+def get_vms_from_stack_outputs(conn, stack_id, vm_stack_output_keys):
+ """ Converts a vm name from a heat stack output to a nova vm object
+
+ :param stack_id: the id of the stack to fetch the vms from
+ :param vm_stack_output_keys: a list of stack outputs with the vm names
+ :return vms: a list of vm objects corresponding to the outputs
+ """
+ vms = []
+ for vmk in vm_stack_output_keys:
+ vm_output = os_utils.get_output(conn, stack_id, vmk)
+ if vm_output is not None:
+ vm_name = vm_output['output_value']
+ logger.debug("vm '%s' read from heat output" % vm_name)
+ vm = os_utils.get_instance_by_name(conn, vm_name)
+ if vm is not None:
+ vms.append(vm)
+ return vms
+
+
+def merge_yaml(y1, y2):
+ """ Merge two yaml HOT into one
+
+ The parameters, resources and outputs sections are merged.
+
+ :param y1: the first yaml
+ :param y2: the second yaml
+ :return y: merged yaml
+ """
+ d1 = yaml.load(y1)
+ d2 = yaml.load(y2)
+ for key in ('parameters', 'resources', 'outputs'):
+ if key in d2:
+ d1[key].update(d2[key])
+ return yaml.dump(d1, default_flow_style=False)