summaryrefslogtreecommitdiffstats
path: root/sdnvpn/lib/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'sdnvpn/lib/utils.py')
-rw-r--r--sdnvpn/lib/utils.py347
1 files changed, 264 insertions, 83 deletions
diff --git a/sdnvpn/lib/utils.py b/sdnvpn/lib/utils.py
index 9a5e181..4c35edc 100644
--- a/sdnvpn/lib/utils.py
+++ b/sdnvpn/lib/utils.py
@@ -14,10 +14,12 @@ import time
import requests
import re
import subprocess
+import yaml
from concurrent.futures import ThreadPoolExecutor
-from openstack.exceptions import ResourceNotFound
+from openstack.exceptions import ResourceNotFound, NotFoundException
from requests.auth import HTTPBasicAuth
+from functest.utils import env
from opnfv.deployment.factory import Factory as DeploymentFactory
from sdnvpn.lib import config as sdnvpn_config
@@ -27,8 +29,10 @@ logger = logging.getLogger('sdnvpn_test_utils')
common_config = sdnvpn_config.CommonConfig()
-ODL_USER = 'admin'
-ODL_PASS = 'admin'
+ODL_USER = env.get('SDN_CONTROLLER_USER')
+ODL_PASSWORD = env.get('SDN_CONTROLLER_PASSWORD')
+ODL_IP = env.get('SDN_CONTROLLER_IP')
+ODL_PORT = env.get('SDN_CONTROLLER_RESTCONFPORT')
executor = ThreadPoolExecutor(5)
@@ -296,18 +300,16 @@ def get_installerHandler():
return None
else:
if installer_type in ["apex"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- pkey_file="/root/.ssh/id_rsa")
-
- if installer_type in ["fuel"]:
- developHandler = DeploymentFactory.get_handler(
- installer_type,
- installer_ip,
- 'root',
- 'r00tme')
+ installer_user = "root"
+ elif installer_type in ["fuel"]:
+ installer_user = "ubuntu"
+
+ developHandler = DeploymentFactory.get_handler(
+ installer_type,
+ installer_ip,
+ installer_user,
+ pkey_file="/root/.ssh/id_rsa")
+
return developHandler
@@ -535,17 +537,19 @@ def exec_cmd(cmd, verbose):
return output, success
-def check_odl_fib(ip, controller_ip):
+def check_odl_fib(ip):
"""Check that there is an entry in the ODL Fib for `ip`"""
- url = "http://" + controller_ip + \
- ":8181/restconf/config/odl-fib:fibEntries/"
+ url = ("http://{user}:{password}@{ip}:{port}/restconf/config/"
+ "odl-fib:fibEntries/".format(user=ODL_USER,
+ password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
logger.debug("Querring '%s' for FIB entries", url)
- res = requests.get(url, auth=(ODL_USER, ODL_PASS))
+ res = requests.get(url, auth=(ODL_USER, ODL_PASSWORD))
if res.status_code != 200:
logger.error("OpenDaylight response status code: %s", res.status_code)
return False
logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
- % controller_ip)
+ % ODL_IP)
logger.debug("OpenDaylight FIB: \n%s" % res.text)
return ip in res.text
@@ -597,34 +601,50 @@ def wait_for_cloud_init(conn, instance):
def attach_instance_to_ext_br(instance, compute_node):
libvirt_instance_name = instance.instance_name
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
+ # In Apex, br-ex (or br-floating for Fuel) is an ovs bridge and virsh
+ # attach-interface won't just work. We work around it by creating a linux
+ # bridge, attaching that to br-ex (or br-floating for Fuel) with a
+ # veth pair and virsh-attaching the instance to the linux-bridge
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- set -e
- if ! sudo brctl show |grep -q ^{bridge};then
- sudo brctl addbr {bridge}
- sudo ip link set {bridge} up
- sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
- sudo ip link set dev ovs-quagga-tap up
- sudo ip link set dev quagga-tap up
- sudo ovs-vsctl add-port br-ex ovs-quagga-tap
- sudo brctl addif {bridge} quagga-tap
- fi
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
+
+ cmd = """
+ set -e
+ if ! sudo brctl show |grep -q ^br-quagga;then
+ sudo brctl addbr br-quagga
+ sudo ip link set br-quagga up
+ sudo ip link add quagga-tap type veth peer name ovs-quagga-tap
+ sudo ip link set dev ovs-quagga-tap up
+ sudo ip link set dev quagga-tap up
+ sudo ovs-vsctl add-port {bridge} ovs-quagga-tap
+ sudo brctl addif br-quagga quagga-tap
+ fi
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
compute_node.run_cmd("sudo virsh attach-interface %s"
- " bridge %s" % (libvirt_instance_name, bridge))
+ " bridge br-quagga" % (libvirt_instance_name))
def detach_instance_from_ext_br(instance, compute_node):
libvirt_instance_name = instance.instance_name
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ # This function undoes all the actions performed by
+ # attach_instance_to_ext_br on Fuel and Apex installers.
+ if installer_type in ["fuel"]:
+ bridge = "br-floating"
+ elif installer_type in ["apex"]:
+ bridge = "br-ex"
+ else:
+ logger.warn("installer type %s is neither fuel nor apex."
+ % installer_type)
+ return
mac = compute_node.run_cmd("for vm in $(sudo virsh list | "
"grep running | awk '{print $2}'); "
"do echo -n ; sudo virsh dumpxml $vm| "
@@ -633,25 +653,16 @@ def detach_instance_from_ext_br(instance, compute_node):
" --type bridge --mac %s"
% (libvirt_instance_name, mac))
- installer_type = str(os.environ['INSTALLER_TYPE'].lower())
- if installer_type == "fuel":
- bridge = "br-ex"
- elif installer_type == "apex":
- # In Apex, br-ex is an ovs bridge and virsh attach-interface
- # won't just work. We work around it by creating a linux
- # bridge, attaching that to br-ex with a veth pair
- # and virsh-attaching the instance to the linux-bridge
- bridge = "br-quagga"
- cmd = """
- sudo brctl delif {bridge} quagga-tap &&
- sudo ovs-vsctl del-port br-ex ovs-quagga-tap &&
- sudo ip link set dev quagga-tap down &&
- sudo ip link set dev ovs-quagga-tap down &&
- sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
- sudo ip link set {bridge} down &&
- sudo brctl delbr {bridge}
- """
- compute_node.run_cmd(cmd.format(bridge=bridge))
+ cmd = """
+ sudo brctl delif br-quagga quagga-tap &&
+ sudo ovs-vsctl del-port {bridge} ovs-quagga-tap &&
+ sudo ip link set dev quagga-tap down &&
+ sudo ip link set dev ovs-quagga-tap down &&
+ sudo ip link del quagga-tap type veth peer name ovs-quagga-tap &&
+ sudo ip link set br-quagga down &&
+ sudo brctl delbr br-quagga
+ """
+ compute_node.run_cmd(cmd.format(bridge=bridge))
def cleanup_neutron(conn, neutron_client, floatingip_ids, bgpvpn_ids,
@@ -790,6 +801,15 @@ def is_fail_mode_secure():
if not openstack_node.is_active():
continue
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['fuel']:
+ if (
+ 'controller' in openstack_node.roles or
+ 'opendaylight' in openstack_node.roles or
+ 'installer' in openstack_node.roles
+ ):
+ continue
+
ovs_int_list = (openstack_node.run_cmd(get_ovs_int_cmd).
strip().split('\n'))
if 'br-int' in ovs_int_list:
@@ -909,25 +929,42 @@ def get_ovs_flows(compute_node_list, ovs_br_list, of_protocol="OpenFlow13"):
return cmd_out_lines
-def get_odl_bgp_entity_owner(controllers):
+def get_node_ip_and_netmask(node, iface):
+ cmd = "ip a | grep {iface} | grep inet | awk '{{print $2}}'"\
+ .format(iface=iface)
+ mgmt_net_cidr = node.run_cmd(cmd).strip().split('\n')
+ mgmt_ip = mgmt_net_cidr[0].split('/')[0]
+ mgmt_netmask = mgmt_net_cidr[0].split('/')[1]
+
+ return mgmt_ip, mgmt_netmask
+
+
+def get_odl_bgp_entity_owner(odl_nodes):
""" Finds the ODL owner of the BGP entity in the cluster.
When ODL runs in clustering mode we need to execute the BGP speaker
related commands to that ODL which is the owner of the BGP entity.
- :param controllers: list of OS controllers
- :return controller: OS controller in which ODL BGP entity owner runs
+ :param odl_nodes: list of Opendaylight nodes
+ :return odl_node: Opendaylight node in which ODL BGP entity owner runs
"""
- if len(controllers) == 1:
- return controllers[0]
+ if len(odl_nodes) == 1:
+ return odl_nodes[0]
else:
- url = ('http://admin:admin@{ip}:8081/restconf/'
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/'
'operational/entity-owners:entity-owners/entity-type/bgp'
- .format(ip=controllers[0].ip))
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT))
+
+ installer_type = str(os.environ['INSTALLER_TYPE'].lower())
+ if installer_type in ['apex']:
+ node_user = 'heat-admin'
+ elif installer_type in ['fuel']:
+ node_user = 'ubuntu'
remote_odl_akka_conf = ('/opt/opendaylight/configuration/'
'initial/akka.conf')
- remote_odl_home_akka_conf = '/home/heat-admin/akka.conf'
+ remote_odl_home_akka_conf = '/home/{0}/akka.conf'.format(node_user)
local_tmp_akka_conf = '/tmp/akka.conf'
try:
json_output = requests.get(url).json()
@@ -937,33 +974,43 @@ def get_odl_bgp_entity_owner(controllers):
return None
odl_bgp_owner = json_output['entity-type'][0]['entity'][0]['owner']
- for controller in controllers:
-
- controller.run_cmd('sudo cp {0} /home/heat-admin/'
- .format(remote_odl_akka_conf))
- controller.run_cmd('sudo chmod 777 {0}'
- .format(remote_odl_home_akka_conf))
- controller.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
+ for odl_node in odl_nodes:
+ if installer_type in ['apex']:
+ get_odl_id_cmd = 'sudo docker ps -qf name=opendaylight_api'
+ odl_id = odl_node.run_cmd(get_odl_id_cmd)
+ odl_node.run_cmd('sudo docker cp '
+ '{container_id}:{odl_akka_conf} '
+ '/home/{user}/'
+ .format(container_id=odl_id,
+ odl_akka_conf=remote_odl_akka_conf,
+ user=node_user))
+ elif installer_type in ['fuel']:
+ odl_node.run_cmd('sudo cp {0} /home/{1}/'
+ .format(remote_odl_akka_conf, node_user))
+ odl_node.run_cmd('sudo chmod 777 {0}'
+ .format(remote_odl_home_akka_conf))
+ odl_node.get_file(remote_odl_home_akka_conf, local_tmp_akka_conf)
for line in open(local_tmp_akka_conf):
if re.search(odl_bgp_owner, line):
- return controller
+ return odl_node
return None
-def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
+def add_quagga_external_gre_end_point(odl_nodes, remote_tep_ip):
json_body = {'input':
{'destination-ip': remote_tep_ip,
'tunnel-type': "odl-interface:tunnel-type-mpls-over-gre"}
}
- url = ('http://{ip}:8081/restconf/operations/'
- 'itm-rpc:add-external-tunnel-endpoint'.format(ip=controllers[0].ip))
+ url = ('http://{ip}:{port}/restconf/operations/'
+ 'itm-rpc:add-external-tunnel-endpoint'.format(ip=ODL_IP,
+ port=ODL_PORT))
headers = {'Content-type': 'application/yang.data+json',
'Accept': 'application/yang.data+json'}
try:
requests.post(url, data=json.dumps(json_body),
headers=headers,
- auth=HTTPBasicAuth('admin', 'admin'))
+ auth=HTTPBasicAuth(ODL_USER, ODL_PASSWORD))
except Exception as e:
logger.error("Failed to create external tunnel endpoint on"
" ODL for external tep ip %s with error %s"
@@ -971,9 +1018,11 @@ def add_quagga_external_gre_end_point(controllers, remote_tep_ip):
return None
-def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
- url = ('http://admin:admin@{ip}:8081/restconf/config/odl-fib:fibEntries/'
- 'vrfTables/{vrf}/'.format(ip=controllers[0].ip, vrf=vrf_id))
+def is_fib_entry_present_on_odl(odl_nodes, ip_prefix, vrf_id):
+ url = ('http://{user}:{password}@{ip}:{port}/restconf/config/'
+ 'odl-fib:fibEntries/vrfTables/{vrf}/'
+ .format(user=ODL_USER, password=ODL_PASSWORD, ip=ODL_IP,
+ port=ODL_PORT, vrf=vrf_id))
logger.error("url is %s" % url)
try:
vrf_table = requests.get(url).json()
@@ -987,3 +1036,135 @@ def is_fib_entry_present_on_odl(controllers, ip_prefix, vrf_id):
logger.error('Failed to find ip prefix %s with error %s'
% (ip_prefix, e))
return False
+
+
+def wait_stack_for_status(conn, stack_id, stack_status, limit=12):
+ """ Waits to reach specified stack status. To be used with
+ CREATE_COMPLETE and UPDATE_COMPLETE.
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the stack id returned by create_stack api call
+ :param stack_status: the stack status waiting for
+ :param limit: the maximum number of attempts
+ """
+ logger.debug("Stack '%s' create started" % stack_id)
+
+ stack_create_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ except NotFoundException:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+ if stack_st == stack_status:
+ stack_create_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if stack_create_complete is False:
+ logger.error("Stack create failed")
+ raise SystemError("Stack create failed")
+ return False
+
+ return True
+
+
+def delete_stack_and_wait(conn, stack_id, limit=12):
+ """ Starts and waits for completion of delete stack
+
+ Will try a specific number of attempts at 10sec intervals
+ (default 2min)
+
+ :param stack_id: the id of the stack to be deleted
+ :param limit: the maximum number of attempts
+ """
+ delete_started = False
+ if stack_id is not None:
+ delete_started = os_utils.delete_stack(conn, stack_id)
+
+ if delete_started is True:
+ logger.debug("Stack delete succesfully started")
+ else:
+ logger.error("Stack delete start failed")
+
+ stack_delete_complete = False
+ attempts = 0
+ while attempts < limit:
+ try:
+ stack_st = conn.orchestration.get_stack(stack_id).status
+ if stack_st == 'DELETE_COMPLETE':
+ stack_delete_complete = True
+ break
+ attempts += 1
+ time.sleep(10)
+ except NotFoundException:
+ stack_delete_complete = True
+ break
+
+ logger.debug("Stack status check: %s times" % attempts)
+ if not stack_delete_complete:
+ logger.error("Stack delete failed")
+ raise SystemError("Stack delete failed")
+ return False
+
+ return True
+
+
+def get_heat_environment(testcase, common_config):
+ """ Reads the heat parameters of a testcase into a yaml object
+
+ Each testcase where Heat Orchestratoin Template (HOT) is introduced
+ has an associated parameters section.
+ Reads testcase.heat_parameters section and read COMMON_CONFIG.flavor
+ and place it under parameters tree.
+
+ :param testcase: the tescase for which the HOT file is fetched
+ :param common_config: the common config section
+ :return environment: a yaml object to be used as environment
+ """
+ fl = common_config.default_flavor
+ param_dict = testcase.heat_parameters
+ param_dict['flavor'] = fl
+ env_dict = {'parameters': param_dict}
+ return env_dict
+
+
+def get_vms_from_stack_outputs(conn, stack_id, vm_stack_output_keys):
+ """ Converts a vm name from a heat stack output to a nova vm object
+
+ :param stack_id: the id of the stack to fetch the vms from
+ :param vm_stack_output_keys: a list of stack outputs with the vm names
+ :return vms: a list of vm objects corresponding to the outputs
+ """
+ vms = []
+ for vmk in vm_stack_output_keys:
+ vm_output = os_utils.get_output(conn, stack_id, vmk)
+ if vm_output is not None:
+ vm_name = vm_output['output_value']
+ logger.debug("vm '%s' read from heat output" % vm_name)
+ vm = os_utils.get_instance_by_name(conn, vm_name)
+ if vm is not None:
+ vms.append(vm)
+ return vms
+
+
+def merge_yaml(y1, y2):
+ """ Merge two yaml HOT into one
+
+ The parameters, resources and outputs sections are merged.
+
+ :param y1: the first yaml
+ :param y2: the second yaml
+ :return y: merged yaml
+ """
+ d1 = yaml.load(y1)
+ d2 = yaml.load(y2)
+ for key in ('parameters', 'resources', 'outputs'):
+ if key in d2:
+ d1[key].update(d2[key])
+ return yaml.dump(d1, default_flow_style=False)