aboutsummaryrefslogtreecommitdiffstats
path: root/functest/utils
diff options
context:
space:
mode:
Diffstat (limited to 'functest/utils')
-rw-r--r--functest/utils/__init__.py0
-rw-r--r--functest/utils/functest_logger.py53
-rw-r--r--functest/utils/functest_utils.py449
-rw-r--r--functest/utils/functest_vacation.py52
-rwxr-xr-xfunctest/utils/openstack_clean.py424
-rwxr-xr-xfunctest/utils/openstack_snapshot.py166
-rw-r--r--functest/utils/openstack_tacker.py249
-rwxr-xr-xfunctest/utils/openstack_utils.py1190
8 files changed, 2583 insertions, 0 deletions
diff --git a/functest/utils/__init__.py b/functest/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/utils/__init__.py
diff --git a/functest/utils/functest_logger.py b/functest/utils/functest_logger.py
new file mode 100644
index 00000000..b154f563
--- /dev/null
+++ b/functest/utils/functest_logger.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+#
+# jose.lausuch@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Logging levels:
+# Level Numeric value
+# CRITICAL 50
+# ERROR 40
+# WARNING 30
+# INFO 20
+# DEBUG 10
+# NOTSET 0
+#
+# Usage:
+# import functest_logger as fl
+# logger = fl.Logger("script_name").getLogger()
+# logger.info("message to be shown with - INFO - ")
+# logger.debug("message to be shown with - DEBUG -")
+
+import logging
+import os
+
+
+class Logger:
+ def __init__(self, logger_name):
+
+ CI_DEBUG = os.getenv('CI_DEBUG')
+
+ self.logger = logging.getLogger(logger_name)
+ self.logger.propagate = 0
+ self.logger.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler()
+ formatter = logging.Formatter('%(asctime)s - %(name)s - '
+ '%(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ if CI_DEBUG is not None and CI_DEBUG.lower() == "true":
+ ch.setLevel(logging.DEBUG)
+ else:
+ ch.setLevel(logging.INFO)
+ self.logger.addHandler(ch)
+
+ hdlr = logging.FileHandler('/home/opnfv/functest/results/functest.log')
+ hdlr.setFormatter(formatter)
+ hdlr.setLevel(logging.DEBUG)
+ self.logger.addHandler(hdlr)
+
+ def getLogger(self):
+ return self.logger
diff --git a/functest/utils/functest_utils.py b/functest/utils/functest_utils.py
new file mode 100644
index 00000000..41b6485d
--- /dev/null
+++ b/functest/utils/functest_utils.py
@@ -0,0 +1,449 @@
+#!/usr/bin/env python
+#
+# jose.lausuch@ericsson.com
+# valentin.boucher@orange.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+import json
+import os
+import re
+import shutil
+import subprocess
+import sys
+import urllib2
+from datetime import datetime as dt
+
+import dns.resolver
+import requests
+import yaml
+from git import Repo
+
+import functest.utils.functest_logger as ft_logger
+
+logger = ft_logger.Logger("functest_utils").getLogger()
+
+REPOS_DIR = os.getenv('repos_dir')
+FUNCTEST_REPO = ("%s/functest" % REPOS_DIR)
+
+
+# ----------------------------------------------------------
+#
+# INTERNET UTILS
+#
+# -----------------------------------------------------------
+def check_internet_connectivity(url='http://www.opnfv.org/'):
+ """
+ Check if there is access to the internet
+ """
+ try:
+ urllib2.urlopen(url, timeout=5)
+ return True
+ except urllib2.URLError:
+ return False
+
+
+def download_url(url, dest_path):
+ """
+ Download a file to a destination path given a URL
+ """
+ name = url.rsplit('/')[-1]
+ dest = dest_path + "/" + name
+ try:
+ response = urllib2.urlopen(url)
+ except (urllib2.HTTPError, urllib2.URLError):
+ return False
+
+ with open(dest, 'wb') as f:
+ shutil.copyfileobj(response, f)
+ return True
+
+
+# ----------------------------------------------------------
+#
+# CI UTILS
+#
+# -----------------------------------------------------------
+def get_git_branch(repo_path):
+ """
+ Get git branch name
+ """
+ repo = Repo(repo_path)
+ branch = repo.active_branch
+ return branch.name
+
+
+def get_installer_type():
+ """
+ Get installer type (fuel, apex, joid, compass)
+ """
+ try:
+ installer = os.environ['INSTALLER_TYPE']
+ except KeyError:
+ logger.error("Impossible to retrieve the installer type")
+ installer = "Unknown_installer"
+
+ return installer
+
+
+def get_scenario():
+ """
+ Get scenario
+ """
+ try:
+ scenario = os.environ['DEPLOY_SCENARIO']
+ except KeyError:
+ logger.error("Impossible to retrieve the scenario")
+ scenario = "Unknown_scenario"
+
+ return scenario
+
+
+def get_version():
+ """
+ Get version
+ """
+ # Use the build tag to retrieve the version
+ # By default version is unknown
+ # if launched through CI the build tag has the following format
+ # jenkins-<project>-<installer>-<pod>-<job>-<branch>-<id>
+ # e.g. jenkins-functest-fuel-opnfv-jump-2-daily-master-190
+ # use regex to match branch info
+ rule = "daily-(.+?)-[0-9]*"
+ build_tag = get_build_tag()
+ m = re.search(rule, build_tag)
+ if m:
+ return m.group(1)
+ else:
+ return "unknown"
+
+
+def get_pod_name():
+ """
+ Get PoD Name from env variable NODE_NAME
+ """
+ try:
+ return os.environ['NODE_NAME']
+ except KeyError:
+ logger.error(
+ "Unable to retrieve the POD name from environment. " +
+ "Using pod name 'unknown-pod'")
+ return "unknown-pod"
+
+
+def get_build_tag():
+ """
+ Get build tag of jenkins jobs
+ """
+ try:
+ build_tag = os.environ['BUILD_TAG']
+ except KeyError:
+ logger.error("Impossible to retrieve the build tag")
+ build_tag = "unknown_build_tag"
+
+ return build_tag
+
+
+def get_db_url():
+ """
+ Returns DB URL
+ """
+ return get_functest_config('results.test_db_url')
+
+
+def logger_test_results(project, case_name, status, details):
+ pod_name = get_pod_name()
+ scenario = get_scenario()
+ version = get_version()
+ build_tag = get_build_tag()
+
+ logger.info(
+ "\n"
+ "****************************************\n"
+ "\t %(p)s/%(n)s results \n\n"
+ "****************************************\n"
+ "DB:\t%(db)s\n"
+ "pod:\t%(pod)s\n"
+ "version:\t%(v)s\n"
+ "scenario:\t%(s)s\n"
+ "status:\t%(c)s\n"
+ "build tag:\t%(b)s\n"
+ "details:\t%(d)s\n"
+ % {'p': project,
+ 'n': case_name,
+ 'db': get_db_url(),
+ 'pod': pod_name,
+ 'v': version,
+ 's': scenario,
+ 'c': status,
+ 'b': build_tag,
+ 'd': details})
+
+
+def push_results_to_db(project, case_name,
+ start_date, stop_date, criteria, details):
+ """
+ POST results to the Result target DB
+ """
+ # Retrieve params from CI and conf
+ url = get_db_url() + "/results"
+
+ try:
+ installer = os.environ['INSTALLER_TYPE']
+ scenario = os.environ['DEPLOY_SCENARIO']
+ pod_name = os.environ['NODE_NAME']
+ build_tag = os.environ['BUILD_TAG']
+ except KeyError as e:
+ logger.error("Please set env var: " + str(e))
+ return False
+ rule = "daily-(.+?)-[0-9]*"
+ m = re.search(rule, build_tag)
+ if m:
+ version = m.group(1)
+ else:
+ logger.error("Please fix BUILD_TAG env var: " + build_tag)
+ return False
+ test_start = dt.fromtimestamp(start_date).strftime('%Y-%m-%d %H:%M:%S')
+ test_stop = dt.fromtimestamp(stop_date).strftime('%Y-%m-%d %H:%M:%S')
+
+ params = {"project_name": project, "case_name": case_name,
+ "pod_name": pod_name, "installer": installer,
+ "version": version, "scenario": scenario, "criteria": criteria,
+ "build_tag": build_tag, "start_date": test_start,
+ "stop_date": test_stop, "details": details}
+
+ error = None
+ headers = {'Content-Type': 'application/json'}
+ try:
+ r = requests.post(url, data=json.dumps(params), headers=headers)
+ logger.debug(r)
+ r.raise_for_status()
+ except requests.RequestException as exc:
+ if 'r' in locals():
+ error = ("Pushing Result to DB(%s) failed: %s" %
+ (r.url, r.content))
+ else:
+ error = ("Pushing Result to DB(%s) failed: %s" % (url, exc))
+ except Exception as e:
+ error = ("Error [push_results_to_db("
+ "DB: '%(db)s', "
+ "project: '%(project)s', "
+ "case: '%(case)s', "
+ "pod: '%(pod)s', "
+ "version: '%(v)s', "
+ "scenario: '%(s)s', "
+ "criteria: '%(c)s', "
+ "build_tag: '%(t)s', "
+ "details: '%(d)s')]: "
+ "%(error)s" %
+ {
+ 'db': url,
+ 'project': project,
+ 'case': case_name,
+ 'pod': pod_name,
+ 'v': version,
+ 's': scenario,
+ 'c': criteria,
+ 't': build_tag,
+ 'd': details,
+ 'error': e
+ })
+ finally:
+ if error:
+ logger.error(error)
+ return False
+ return True
+
+
+def get_resolvconf_ns():
+ """
+ Get nameservers from current resolv.conf
+ """
+ nameservers = []
+ rconf = open("/etc/resolv.conf", "r")
+ line = rconf.readline()
+ resolver = dns.resolver.Resolver()
+ while line:
+ ip = re.search(r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b", line)
+ if ip:
+ resolver.nameservers = [str(ip)]
+ try:
+ result = resolver.query('opnfv.org')[0]
+ if result != "":
+ nameservers.append(ip.group())
+ except dns.exception.Timeout:
+ pass
+ line = rconf.readline()
+ return nameservers
+
+
+def get_ci_envvars():
+ """
+ Get the CI env variables
+ """
+ ci_env_var = {
+ "installer": os.environ.get('INSTALLER_TYPE'),
+ "scenario": os.environ.get('DEPLOY_SCENARIO')}
+ return ci_env_var
+
+
+def execute_command(cmd, info=False, error_msg="",
+ verbose=True, output_file=None):
+ if not error_msg:
+ error_msg = ("The command '%s' failed." % cmd)
+ msg_exec = ("Executing command: '%s'" % cmd)
+ if verbose:
+ if info:
+ logger.info(msg_exec)
+ else:
+ logger.debug(msg_exec)
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ if output_file:
+ f = open(output_file, "w")
+ for line in iter(p.stdout.readline, b''):
+ if output_file:
+ f.write(line)
+ else:
+ line = line.replace('\n', '')
+ print line
+ sys.stdout.flush()
+ if output_file:
+ f.close()
+ p.stdout.close()
+ returncode = p.wait()
+ if returncode != 0:
+ if verbose:
+ logger.error(error_msg)
+
+ return returncode
+
+
+def get_deployment_dir():
+ """
+ Returns current Rally deployment directory
+ """
+ deployment_name = get_functest_config('rally.deployment_name')
+ rally_dir = get_functest_config('general.directories.dir_rally_inst')
+ cmd = ("rally deployment list | awk '/" + deployment_name +
+ "/ {print $2}'")
+ p = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ deployment_uuid = p.stdout.readline().rstrip()
+ if deployment_uuid == "":
+ logger.error("Rally deployment not found.")
+ exit(-1)
+ deployment_dir = (rally_dir + "/tempest/for-deployment-" +
+ deployment_uuid)
+ return deployment_dir
+
+
+def get_dict_by_test(testname):
+ with open(get_testcases_file()) as f:
+ testcases_yaml = yaml.safe_load(f)
+
+ for dic_tier in testcases_yaml.get("tiers"):
+ for dic_testcase in dic_tier['testcases']:
+ if dic_testcase['name'] == testname:
+ return dic_testcase
+
+ logger.error('Project %s is not defined in testcases.yaml' % testname)
+ return None
+
+
+def get_criteria_by_test(testname):
+ dict = get_dict_by_test(testname)
+ if dict:
+ return dict['criteria']
+ return None
+
+
+# ----------------------------------------------------------
+#
+# YAML UTILS
+#
+# -----------------------------------------------------------
+def get_parameter_from_yaml(parameter, file):
+ """
+ Returns the value of a given parameter in file.yaml
+ parameter must be given in string format with dots
+ Example: general.openstack.image_name
+ """
+ with open(file) as f:
+ file_yaml = yaml.safe_load(f)
+ f.close()
+ value = file_yaml
+ for element in parameter.split("."):
+ value = value.get(element)
+ if value is None:
+ raise ValueError("The parameter %s is not defined in"
+ " config_functest.yaml" % parameter)
+ return value
+
+
+def get_functest_config(parameter):
+ yaml_ = os.environ["CONFIG_FUNCTEST_YAML"]
+ return get_parameter_from_yaml(parameter, yaml_)
+
+
+def check_success_rate(case_name, success_rate):
+ success_rate = float(success_rate)
+ criteria = get_criteria_by_test(case_name)
+
+ def get_criteria_value(op):
+ return float(criteria.split(op)[1].rstrip('%'))
+
+ status = 'FAIL'
+ ops = ['==', '>=']
+ for op in ops:
+ if op in criteria:
+ c_value = get_criteria_value(op)
+ if eval("%s %s %s" % (success_rate, op, c_value)):
+ status = 'PASS'
+ break
+
+ return status
+
+
+def merge_dicts(dict1, dict2):
+ for k in set(dict1.keys()).union(dict2.keys()):
+ if k in dict1 and k in dict2:
+ if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
+ yield (k, dict(merge_dicts(dict1[k], dict2[k])))
+ else:
+ yield (k, dict2[k])
+ elif k in dict1:
+ yield (k, dict1[k])
+ else:
+ yield (k, dict2[k])
+
+
+def check_test_result(test_name, ret, start_time, stop_time):
+ def get_criteria_value():
+ return get_criteria_by_test(test_name).split('==')[1].strip()
+
+ status = 'FAIL'
+ if str(ret) == get_criteria_value():
+ status = 'PASS'
+
+ details = {
+ 'timestart': start_time,
+ 'duration': round(stop_time - start_time, 1),
+ 'status': status,
+ }
+
+ return status, details
+
+
+def get_testcases_file():
+ return FUNCTEST_REPO + "/ci/testcases.yaml"
+
+
+def get_functest_yaml():
+ with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
+ functest_yaml = yaml.safe_load(f)
+ f.close()
+ return functest_yaml
diff --git a/functest/utils/functest_vacation.py b/functest/utils/functest_vacation.py
new file mode 100644
index 00000000..0ba09447
--- /dev/null
+++ b/functest/utils/functest_vacation.py
@@ -0,0 +1,52 @@
+from os import environ
+from curses import initscr, curs_set, newwin, endwin,\
+ KEY_RIGHT, KEY_LEFT, KEY_DOWN, KEY_UP
+from random import randrange
+
+
+def main():
+ environ["TERM"] = 'Eterm'
+ initscr()
+ curs_set(0)
+ try:
+ win = newwin(16, 60, 0, 0)
+ win.keypad(True)
+ win.nodelay(True)
+ win.border('|', '|', '-', '-', '+', '+', '+', '+')
+ win.addch(4, 44, '@')
+ win.addstr(0, 5, ' Eat all the OPNFV bugs by FunTest! ')
+ win.addstr(15, 7, ' Left,Right,Up,Down: move; other keys: quit ')
+ snake = [[20, 7], [19, 7], [18, 7], [17, 7],
+ [16, 7], [15, 7], [14, 7], [13, 7]]
+ key = KEY_RIGHT
+ body = '~FUNTEST'
+ ind = 0
+ while key != 27:
+ win.addstr(0, 44, ' Score: '+str(len(snake)-len(body))+' ')
+ win.timeout(140 - 2 * len(snake))
+ getkey = win.getch()
+ key = key if getkey == -1 else getkey
+ snake.insert(
+ 0, [snake[0][0]+(key == KEY_RIGHT and 1 or
+ key == KEY_LEFT and -1),
+ snake[0][1]+(key == KEY_DOWN and 1 or
+ key == KEY_UP and -1)])
+ win.addch(snake[len(snake)-1][1], snake[len(snake)-1][0], ' ')
+ if win.inch(snake[0][1], snake[0][0]) & 255 == 32:
+ snake.pop()
+ elif win.inch(snake[0][1], snake[0][0]) & 255 == ord('@'):
+ c = [n for n in [[randrange(1, 58, 1), randrange(1, 14, 1)]
+ for x in range(len(snake))] if n not in snake]
+ win.addch(c == [] and 4 or c[0][1],
+ c == [] and 44 or c[0][0], '@')
+ else:
+ break
+ ind += 1
+ win.addch(snake[0][1], snake[0][0], body[ind % len(body)])
+ finally:
+ endwin()
+
+ print '\nSnake.PY-26lines by Kris Cieslak (defaultset.blogspot.com).'
+ print 'OPNFV adaptation by Functest dream team.'
+ print 'Thanks for playing, your score: '+str(len(snake)-len(body)-1)+'.'
+ print 'Find and fix more bugs in your real OPNFV setup!\n'
diff --git a/functest/utils/openstack_clean.py b/functest/utils/openstack_clean.py
new file mode 100755
index 00000000..bf582dea
--- /dev/null
+++ b/functest/utils/openstack_clean.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python
+#
+# Description:
+# Cleans possible leftovers after running functest tests:
+# - Nova instances
+# - Glance images
+# - Cinder volumes
+# - Floating IPs
+# - Neutron networks, subnets and ports
+# - Routers
+# - Users and tenants
+#
+# Author:
+# jose.lausuch@ericsson.com
+#
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import time
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import yaml
+
+
+logger = ft_logger.Logger("openstack_clean").getLogger()
+
+OS_SNAPSHOT_FILE = \
+ ft_utils.get_functest_config("general.openstack.snapshot_file")
+
+
+def separator():
+ logger.debug("-------------------------------------------")
+
+
+def remove_instances(nova_client, default_instances):
+ logger.debug("Removing Nova instances...")
+ instances = os_utils.get_instances(nova_client)
+ if instances is None or len(instances) == 0:
+ logger.debug("No instances found.")
+ return
+
+ for instance in instances:
+ instance_name = getattr(instance, 'name')
+ instance_id = getattr(instance, 'id')
+ logger.debug("'%s', ID=%s " % (instance_name, instance_id))
+ if (instance_id not in default_instances and
+ instance_name not in default_instances.values()):
+ logger.debug("Removing instance '%s' ..." % instance_id)
+ if os_utils.delete_instance(nova_client, instance_id):
+ logger.debug(" > Request sent.")
+ else:
+ logger.error("There has been a problem removing the "
+ "instance %s..." % instance_id)
+ else:
+ logger.debug(" > this is a default instance and will "
+ "NOT be deleted.")
+
+ timeout = 50
+ while timeout > 0:
+ instances = os_utils.get_instances(nova_client)
+ for instance in instances:
+ instance_id = getattr(instance, 'id')
+ if instance_id not in default_instances:
+ logger.debug("Waiting for instances to be terminated...")
+ timeout -= 1
+ time.sleep(1)
+ continue
+ break
+
+
+def remove_images(nova_client, default_images):
+ logger.debug("Removing Glance images...")
+ images = os_utils.get_images(nova_client)
+ if images is None or len(images) == 0:
+ logger.debug("No images found.")
+ return
+
+ for image in images:
+ image_name = getattr(image, 'name')
+ image_id = getattr(image, 'id')
+ logger.debug("'%s', ID=%s " % (image_name, image_id))
+ if (image_id not in default_images and
+ image_name not in default_images.values()):
+ logger.debug("Removing image '%s', ID=%s ..."
+ % (image_name, image_id))
+ if os_utils.delete_glance_image(nova_client, image_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the"
+ "image %s..." % image_id)
+ else:
+ logger.debug(" > this is a default image and will "
+ "NOT be deleted.")
+
+
+def remove_volumes(cinder_client, default_volumes):
+ logger.debug("Removing Cinder volumes...")
+ volumes = os_utils.get_volumes(cinder_client)
+ if volumes is None or len(volumes) == 0:
+ logger.debug("No volumes found.")
+ return
+
+ for volume in volumes:
+ volume_id = getattr(volume, 'id')
+ volume_name = getattr(volume, 'display_name')
+ logger.debug("'%s', ID=%s " % (volume_name, volume_id))
+ if (volume_id not in default_volumes and
+ volume_name not in default_volumes.values()):
+ logger.debug("Removing cinder volume %s ..." % volume_id)
+ if os_utils.delete_volume(cinder_client, volume_id):
+ logger.debug(" > Done!")
+ else:
+ logger.debug("Trying forced removal...")
+ if os_utils.delete_volume(cinder_client,
+ volume_id,
+ forced=True):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "volume %s..." % volume_id)
+ else:
+ logger.debug(" > this is a default volume and will "
+ "NOT be deleted.")
+
+
+def remove_floatingips(nova_client, default_floatingips):
+ logger.debug("Removing floating IPs...")
+ floatingips = os_utils.get_floating_ips(nova_client)
+ if floatingips is None or len(floatingips) == 0:
+ logger.debug("No floating IPs found.")
+ return
+
+ init_len = len(floatingips)
+ deleted = 0
+ for fip in floatingips:
+ fip_id = getattr(fip, 'id')
+ fip_ip = getattr(fip, 'ip')
+ logger.debug("'%s', ID=%s " % (fip_ip, fip_id))
+ if (fip_id not in default_floatingips and
+ fip_ip not in default_floatingips.values()):
+ logger.debug("Removing floating IP %s ..." % fip_id)
+ if os_utils.delete_floating_ip(nova_client, fip_id):
+ logger.debug(" > Done!")
+ deleted += 1
+ else:
+ logger.error("There has been a problem removing the "
+ "floating IP %s..." % fip_id)
+ else:
+ logger.debug(" > this is a default floating IP and will "
+ "NOT be deleted.")
+
+ timeout = 50
+ while timeout > 0:
+ floatingips = os_utils.get_floating_ips(nova_client)
+ if floatingips is None or len(floatingips) == (init_len - deleted):
+ break
+ else:
+ logger.debug("Waiting for floating ips to be released...")
+ timeout -= 1
+ time.sleep(1)
+
+
+def remove_networks(neutron_client, default_networks, default_routers):
+ logger.debug("Removing Neutron objects")
+ network_ids = []
+ networks = os_utils.get_network_list(neutron_client)
+ if networks is None:
+ logger.debug("There are no networks in the deployment. ")
+ else:
+ logger.debug("Existing networks:")
+ for network in networks:
+ net_id = network['id']
+ net_name = network['name']
+ logger.debug(" '%s', ID=%s " % (net_name, net_id))
+ if (net_id in default_networks and
+ net_name in default_networks.values()):
+ logger.debug(" > this is a default network and will "
+ "NOT be deleted.")
+ elif network['router:external'] is True:
+ logger.debug(" > this is an external network and will "
+ "NOT be deleted.")
+ else:
+ logger.debug(" > this network will be deleted.")
+ network_ids.append(net_id)
+
+ # delete ports
+ ports = os_utils.get_port_list(neutron_client)
+ if ports is None:
+ logger.debug("There are no ports in the deployment. ")
+ else:
+ remove_ports(neutron_client, ports, network_ids)
+
+ # remove routers
+ routers = os_utils.get_router_list(neutron_client)
+ if routers is None:
+ logger.debug("There are no routers in the deployment. ")
+ else:
+ remove_routers(neutron_client, routers, default_routers)
+
+ # trozet: wait for Neutron to auto-cleanup HA networks when HA router is
+ # deleted
+ time.sleep(5)
+
+ # remove networks
+ if network_ids is not None:
+ for net_id in network_ids:
+ networks = os_utils.get_network_list(neutron_client)
+ if networks is None:
+ logger.debug("No networks left to remove")
+ break
+ elif not any(network['id'] == net_id for network in networks):
+ logger.debug("Network %s has already been removed" % net_id)
+ continue
+ logger.debug("Removing network %s ..." % net_id)
+ if os_utils.delete_neutron_net(neutron_client, net_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "network %s..." % net_id)
+
+
+def remove_ports(neutron_client, ports, network_ids):
+ for port in ports:
+ if port['network_id'] in network_ids:
+ port_id = port['id']
+ try:
+ subnet_id = port['fixed_ips'][0]['subnet_id']
+ except:
+ logger.debug(" > WARNING: Port %s does not contain fixed_ips"
+ % port_id)
+ logger.info(port)
+ router_id = port['device_id']
+ if len(port['fixed_ips']) == 0 and router_id == '':
+ logger.debug("Removing port %s ..." % port_id)
+ if (os_utils.delete_neutron_port(neutron_client, port_id)):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "port %s ..." % port_id)
+ force_remove_port(neutron_client, port_id)
+
+ elif port['device_owner'] == 'network:router_interface':
+ logger.debug("Detaching port %s (subnet %s) from router %s ..."
+ % (port_id, subnet_id, router_id))
+ if os_utils.remove_interface_router(
+ neutron_client, router_id, subnet_id):
+ time.sleep(5) # leave 5 seconds to detach
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "interface %s from router %s..."
+ % (subnet_id, router_id))
+ force_remove_port(neutron_client, port_id)
+ else:
+ force_remove_port(neutron_client, port_id)
+
+
+def force_remove_port(neutron_client, port_id):
+ logger.debug("Clearing device_owner for port %s ..." % port_id)
+ os_utils.update_neutron_port(neutron_client, port_id,
+ device_owner='clear')
+ logger.debug("Removing port %s ..." % port_id)
+ if os_utils.delete_neutron_port(neutron_client, port_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the port %s..."
+ % port_id)
+
+
+def remove_routers(neutron_client, routers, default_routers):
+ for router in routers:
+ router_id = router['id']
+ router_name = router['name']
+ if (router_id not in default_routers and
+ router_name not in default_routers.values()):
+ logger.debug("Checking '%s' with ID=(%s) ..." % (router_name,
+ router_id))
+ if router['external_gateway_info'] is not None:
+ logger.debug("Router has gateway to external network."
+ "Removing link...")
+ if os_utils.remove_gateway_router(neutron_client, router_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing "
+ "the gateway...")
+ else:
+ logger.debug("Router is not connected to anything."
+ "Ready to remove...")
+ logger.debug("Removing router %s(%s) ..."
+ % (router_name, router_id))
+ if os_utils.delete_neutron_router(neutron_client, router_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "router '%s'(%s)..." % (router_name, router_id))
+
+
+def remove_security_groups(neutron_client, default_security_groups):
+ logger.debug("Removing Security groups...")
+ secgroups = os_utils.get_security_groups(neutron_client)
+ if secgroups is None or len(secgroups) == 0:
+ logger.debug("No security groups found.")
+ return
+
+ for secgroup in secgroups:
+ secgroup_name = secgroup['name']
+ secgroup_id = secgroup['id']
+ logger.debug("'%s', ID=%s " % (secgroup_name, secgroup_id))
+ if secgroup_id not in default_security_groups:
+ logger.debug(" Removing '%s'..." % secgroup_name)
+ if os_utils.delete_security_group(neutron_client, secgroup_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "security group %s..." % secgroup_id)
+ else:
+ logger.debug(" > this is a default security group and will NOT "
+ "be deleted.")
+
+
+def remove_users(keystone_client, default_users):
+ logger.debug("Removing Users...")
+ users = os_utils.get_users(keystone_client)
+ if users is None:
+ logger.debug("There are no users in the deployment. ")
+ return
+
+ for user in users:
+ user_name = getattr(user, 'name')
+ user_id = getattr(user, 'id')
+ logger.debug("'%s', ID=%s " % (user_name, user_id))
+ if (user_id not in default_users and
+ user_name not in default_users.values()):
+ logger.debug(" Removing '%s'..." % user_name)
+ if os_utils.delete_user(keystone_client, user_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "user '%s'(%s)..." % (user_name, user_id))
+ else:
+ logger.debug(" > this is a default user and will "
+ "NOT be deleted.")
+
+
+def remove_tenants(keystone_client, default_tenants):
+ logger.debug("Removing Tenants...")
+ tenants = os_utils.get_tenants(keystone_client)
+ if tenants is None:
+ logger.debug("There are no tenants in the deployment. ")
+ return
+
+ for tenant in tenants:
+ tenant_name = getattr(tenant, 'name')
+ tenant_id = getattr(tenant, 'id')
+ logger.debug("'%s', ID=%s " % (tenant_name, tenant_id))
+ if (tenant_id not in default_tenants and
+ tenant_name not in default_tenants.values()):
+ logger.debug(" Removing '%s'..." % tenant_name)
+ if os_utils.delete_tenant(keystone_client, tenant_id):
+ logger.debug(" > Done!")
+ else:
+ logger.error("There has been a problem removing the "
+ "tenant '%s'(%s)..." % (tenant_name, tenant_id))
+ else:
+ logger.debug(" > this is a default tenant and will "
+ "NOT be deleted.")
+
+
+def main():
+ logger.info("Cleaning OpenStack resources...")
+
+ nova_client = os_utils.get_nova_client()
+ neutron_client = os_utils.get_neutron_client()
+ keystone_client = os_utils.get_keystone_client()
+ cinder_client = os_utils.get_cinder_client()
+
+ try:
+ with open(OS_SNAPSHOT_FILE) as f:
+ snapshot_yaml = yaml.safe_load(f)
+ except Exception:
+ logger.info("The file %s does not exist. The OpenStack snapshot must"
+ " be created first. Aborting cleanup." % OS_SNAPSHOT_FILE)
+ exit(0)
+
+ default_images = snapshot_yaml.get('images')
+ default_instances = snapshot_yaml.get('instances')
+ default_volumes = snapshot_yaml.get('volumes')
+ default_networks = snapshot_yaml.get('networks')
+ default_routers = snapshot_yaml.get('routers')
+ default_security_groups = snapshot_yaml.get('secgroups')
+ default_floatingips = snapshot_yaml.get('floatingips')
+ default_users = snapshot_yaml.get('users')
+ default_tenants = snapshot_yaml.get('tenants')
+
+ if not os_utils.check_credentials():
+ logger.error("Please source the openrc credentials and run "
+ "the script again.")
+ exit(-1)
+
+ remove_instances(nova_client, default_instances)
+ separator()
+ remove_images(nova_client, default_images)
+ separator()
+ remove_volumes(cinder_client, default_volumes)
+ separator()
+ remove_floatingips(nova_client, default_floatingips)
+ separator()
+ remove_networks(neutron_client, default_networks, default_routers)
+ separator()
+ remove_security_groups(neutron_client, default_security_groups)
+ separator()
+ remove_users(keystone_client, default_users)
+ separator()
+ remove_tenants(keystone_client, default_tenants)
+ separator()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/functest/utils/openstack_snapshot.py b/functest/utils/openstack_snapshot.py
new file mode 100755
index 00000000..560cadbd
--- /dev/null
+++ b/functest/utils/openstack_snapshot.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+#
+# Description:
+# Generates a list of the current Openstack objects in the deployment:
+# - Nova instances
+# - Glance images
+# - Cinder volumes
+# - Floating IPs
+# - Neutron networks, subnets and ports
+# - Routers
+# - Users and tenants
+#
+# Author:
+# jose.lausuch@ericsson.com
+#
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+import yaml
+
+
+logger = ft_logger.Logger("openstack_snapshot").getLogger()
+
+
+OS_SNAPSHOT_FILE = \
+ ft_utils.get_functest_config("general.openstack.snapshot_file")
+
+
+def separator():
+ logger.info("-------------------------------------------")
+
+
+def get_instances(nova_client):
+ logger.debug("Getting instances...")
+ dic_instances = {}
+ instances = os_utils.get_instances(nova_client)
+ if not (instances is None or len(instances) == 0):
+ for instance in instances:
+ dic_instances.update({getattr(instance, 'id'): getattr(instance,
+ 'name')})
+ return {'instances': dic_instances}
+
+
+def get_images(nova_client):
+ logger.debug("Getting images...")
+ dic_images = {}
+ images = os_utils.get_images(nova_client)
+ if not (images is None or len(images) == 0):
+ for image in images:
+ dic_images.update({getattr(image, 'id'): getattr(image, 'name')})
+ return {'images': dic_images}
+
+
+def get_volumes(cinder_client):
+ logger.debug("Getting volumes...")
+ dic_volumes = {}
+ volumes = os_utils.get_volumes(cinder_client)
+ if volumes is not None:
+ for volume in volumes:
+ dic_volumes.update({volume.id: volume.display_name})
+ return {'volumes': dic_volumes}
+
+
+def get_networks(neutron_client):
+ logger.debug("Getting networks")
+ dic_networks = {}
+ networks = os_utils.get_network_list(neutron_client)
+ if networks is not None:
+ for network in networks:
+ dic_networks.update({network['id']: network['name']})
+ return {'networks': dic_networks}
+
+
+def get_routers(neutron_client):
+ logger.debug("Getting routers")
+ dic_routers = {}
+ routers = os_utils.get_router_list(neutron_client)
+ if routers is not None:
+ for router in routers:
+ dic_routers.update({router['id']: router['name']})
+ return {'routers': dic_routers}
+
+
+def get_security_groups(neutron_client):
+ logger.debug("Getting Security groups...")
+ dic_secgroups = {}
+ secgroups = os_utils.get_security_groups(neutron_client)
+ if not (secgroups is None or len(secgroups) == 0):
+ for secgroup in secgroups:
+ dic_secgroups.update({secgroup['id']: secgroup['name']})
+ return {'secgroups': dic_secgroups}
+
+
+def get_floatinips(nova_client):
+ logger.debug("Getting Floating IPs...")
+ dic_floatingips = {}
+ floatingips = os_utils.get_floating_ips(nova_client)
+ if not (floatingips is None or len(floatingips) == 0):
+ for floatingip in floatingips:
+ dic_floatingips.update({floatingip.id: floatingip.ip})
+ return {'floatingips': dic_floatingips}
+
+
+def get_users(keystone_client):
+ logger.debug("Getting users...")
+ dic_users = {}
+ users = os_utils.get_users(keystone_client)
+ if not (users is None or len(users) == 0):
+ for user in users:
+ dic_users.update({getattr(user, 'id'): getattr(user, 'name')})
+ return {'users': dic_users}
+
+
+def get_tenants(keystone_client):
+ logger.debug("Getting tenants...")
+ dic_tenants = {}
+ tenants = os_utils.get_tenants(keystone_client)
+ if not (tenants is None or len(tenants) == 0):
+ for tenant in tenants:
+ dic_tenants.update({getattr(tenant, 'id'):
+ getattr(tenant, 'name')})
+ return {'tenants': dic_tenants}
+
+
+def main():
+ logger.info("Generating OpenStack snapshot...")
+
+ nova_client = os_utils.get_nova_client()
+ neutron_client = os_utils.get_neutron_client()
+ keystone_client = os_utils.get_keystone_client()
+ cinder_client = os_utils.get_cinder_client()
+
+ if not os_utils.check_credentials():
+ logger.error("Please source the openrc credentials and run the" +
+ "script again.")
+ exit(-1)
+
+ snapshot = {}
+ snapshot.update(get_instances(nova_client))
+ snapshot.update(get_images(nova_client))
+ snapshot.update(get_volumes(cinder_client))
+ snapshot.update(get_networks(neutron_client))
+ snapshot.update(get_routers(neutron_client))
+ snapshot.update(get_security_groups(neutron_client))
+ snapshot.update(get_floatinips(nova_client))
+ snapshot.update(get_users(keystone_client))
+ snapshot.update(get_tenants(keystone_client))
+
+ with open(OS_SNAPSHOT_FILE, 'w+') as yaml_file:
+ yaml_file.write(yaml.safe_dump(snapshot, default_flow_style=False))
+ yaml_file.seek(0)
+ logger.debug("Openstack Snapshot found in the deployment:\n%s"
+ % yaml_file.read())
+ logger.debug("NOTE: These objects will NOT be deleted after " +
+ "running the test.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/functest/utils/openstack_tacker.py b/functest/utils/openstack_tacker.py
new file mode 100644
index 00000000..3e0c9cf4
--- /dev/null
+++ b/functest/utils/openstack_tacker.py
@@ -0,0 +1,249 @@
+###########################################################################
+# Copyright (c) 2016 Ericsson AB and others.
+# Author: George Paraskevopoulos <geopar@intracom-telecom.com>
+#
+# Wrappers for trozet's python-tackerclient v1.0
+# (https://github.com/trozet/python-tackerclient)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##########################################################################
+
+
+from tackerclient.v1_0 import client as tackerclient
+import functest.utils.functest_logger as ft_logger
+import functest.utils.openstack_utils as os_utils
+import yaml
+
+logger = ft_logger.Logger("tacker_utils").getLogger()
+
+
+def get_tacker_client():
+ creds_tacker = os_utils.get_credentials('tacker')
+ return tackerclient.Client(**creds_tacker)
+
+
+# *********************************************
+# TACKER
+# *********************************************
+def get_id_from_name(tacker_client, resource_type, resource_name):
+ try:
+ req_params = {'fields': 'id', 'name': resource_name}
+ endpoint = '/{0}s'.format(resource_type)
+ resp = tacker_client.get(endpoint, params=req_params)
+ return resp[endpoint[1:]][0]['id']
+ except Exception, e:
+ logger.error("Error [get_id_from_name(tacker_client, "
+ "resource_type, resource_name)]: %s" % e)
+ return None
+
+
+def get_vnfd_id(tacker_client, vnfd_name):
+ return get_id_from_name(tacker_client, 'vnfd', vnfd_name)
+
+
+def get_vnf_id(tacker_client, vnf_name):
+ return get_id_from_name(tacker_client, 'vnf', vnf_name)
+
+
+def get_sfc_id(tacker_client, sfc_name):
+ return get_id_from_name(tacker_client, 'sfc', sfc_name)
+
+
+def get_sfc_classifier_id(tacker_client, sfc_clf_name):
+ return get_id_from_name(tacker_client, 'sfc_classifier', sfc_clf_name)
+
+
+def list_vnfds(tacker_client, verbose=False):
+ try:
+ vnfds = tacker_client.list_vnfds(retrieve_all=True)
+ if not verbose:
+ vnfds = [vnfd['id'] for vnfd in vnfds['vnfds']]
+ return vnfds
+ except Exception, e:
+ logger.error("Error [list_vnfds(tacker_client)]: %s" % e)
+ return None
+
+
+def create_vnfd(tacker_client, tosca_file=None):
+ try:
+ vnfd_body = {}
+ if tosca_file is not None:
+ with open(tosca_file) as tosca_fd:
+ vnfd_body = yaml.safe_load(tosca_fd)
+ return tacker_client.create_vnfd(body=vnfd_body)
+ except Exception, e:
+ logger.error("Error [create_vnfd(tacker_client, '%s')]: %s"
+ % (tosca_file, e))
+ return None
+
+
+def delete_vnfd(tacker_client, vnfd_id=None, vnfd_name=None):
+ try:
+ vnfd = vnfd_id
+ if vnfd is None:
+ if vnfd_name is None:
+ raise Exception('You need to provide VNFD id or VNFD name')
+ vnfd = get_vnfd_id(tacker_client, vnfd_name)
+ return tacker_client.delete_vnfd(vnfd)
+ except Exception, e:
+ logger.error("Error [delete_vnfd(tacker_client, '%s', '%s')]: %s"
+ % (vnfd_id, vnfd_name, e))
+ return None
+
+
+def list_vnfs(tacker_client, verbose=False):
+ try:
+ vnfs = tacker_client.list_vnfs(retrieve_all=True)
+ if not verbose:
+ vnfs = [vnf['id'] for vnf in vnfs['vnfs']]
+ return vnfs
+ except Exception, e:
+ logger.error("Error [list_vnfs(tacker_client)]: %s" % e)
+ return None
+
+
+def create_vnf(tacker_client, vnf_name, vnfd_id=None, vnfd_name=None):
+ try:
+ vnf_body = {
+ 'vnf': {
+ 'attributes': {},
+ 'name': vnf_name
+ }
+ }
+ if vnfd_id is not None:
+ vnf_body['vnf']['vnfd_id'] = vnfd_id
+ else:
+ if vnfd_name is None:
+ raise Exception('vnfd id or vnfd name is required')
+ vnf_body['vnf']['vnfd_id'] = get_vnfd_id(tacker_client, vnfd_name)
+ return tacker_client.create_vnf(body=vnf_body)
+ except Exception, e:
+ logger.error("error [create_vnf(tacker_client, '%s', '%s', '%s')]: %s"
+ % (vnf_name, vnfd_id, vnfd_name, e))
+ return None
+
+
+def delete_vnf(tacker_client, vnf_id=None, vnf_name=None):
+ try:
+ vnf = vnf_id
+ if vnf is None:
+ if vnf_name is None:
+ raise Exception('You need to provide a VNF id or name')
+ vnf = get_vnf_id(tacker_client, vnf_name)
+ return tacker_client.delete_vnf(vnf)
+ except Exception, e:
+ logger.error("Error [delete_vnf(tacker_client, '%s', '%s')]: %s"
+ % (vnf_id, vnf_name, e))
+ return None
+
+
+def list_sfcs(tacker_client, verbose=False):
+ try:
+ sfcs = tacker_client.list_sfcs(retrieve_all=True)
+ if not verbose:
+ sfcs = [sfc['id'] for sfc in sfcs['sfcs']]
+ return sfcs
+ except Exception, e:
+ logger.error("Error [list_sfcs(tacker_client)]: %s" % e)
+ return None
+
+
+def create_sfc(tacker_client, sfc_name,
+ chain_vnf_ids=None,
+ chain_vnf_names=None):
+ try:
+ sfc_body = {
+ 'sfc': {
+ 'attributes': {},
+ 'name': sfc_name,
+ 'chain': []
+ }
+ }
+ if chain_vnf_ids is not None:
+ sfc_body['sfc']['chain'] = chain_vnf_ids
+ else:
+ if chain_vnf_names is None:
+ raise Exception('You need to provide a chain of VNFs')
+ sfc_body['sfc']['chain'] = [get_vnf_id(tacker_client, name)
+ for name in chain_vnf_names]
+ return tacker_client.create_sfc(body=sfc_body)
+ except Exception, e:
+ logger.error("error [create_sfc(tacker_client, '%s', '%s', '%s')]: %s"
+ % (sfc_name, chain_vnf_ids, chain_vnf_names, e))
+ return None
+
+
+def delete_sfc(tacker_client, sfc_id=None, sfc_name=None):
+ try:
+ sfc = sfc_id
+ if sfc is None:
+ if sfc_name is None:
+ raise Exception('You need to provide an SFC id or name')
+ sfc = get_sfc_id(tacker_client, sfc_name)
+ return tacker_client.delete_sfc(sfc)
+ except Exception, e:
+ logger.error("Error [delete_sfc(tacker_client, '%s', '%s')]: %s"
+ % (sfc_id, sfc_name, e))
+ return None
+
+
+def list_sfc_clasifiers(tacker_client, verbose=False):
+ try:
+ sfc_clfs = tacker_client.list_sfc_classifiers(retrieve_all=True)
+ if not verbose:
+ sfc_clfs = [sfc_clf['id']
+ for sfc_clf in sfc_clfs['sfc_classifiers']]
+ return sfc_clfs
+ except Exception, e:
+ logger.error("Error [list_sfc_classifiers(tacker_client)]: %s" % e)
+ return None
+
+
+def create_sfc_classifier(tacker_client, sfc_clf_name, sfc_id=None,
+ sfc_name=None, match={}):
+ # Example match:
+ # match: {
+ # "source_port": "0",
+ # "protocol": "6",
+ # "dest_port": "80"
+ # }
+ try:
+ sfc_clf_body = {
+ 'sfc_classifier': {
+ 'attributes': {},
+ 'name': sfc_clf_name,
+ 'match': match,
+ 'chain': ''
+ }
+ }
+ if sfc_id is not None:
+ sfc_clf_body['sfc_classifier']['chain'] = sfc_id
+ else:
+ if sfc_name is None:
+ raise Exception('You need to provide an SFC id or name')
+ sfc_clf_body['sfc']['chain'] = get_sfc_id(tacker_client, sfc_name)
+ return tacker_client.create_sfc_classifier(body=sfc_clf_body)
+ except Exception, e:
+ logger.error("error [create_sfc_classifier(tacker_client, '%s', '%s', "
+ "'%s')]: %s" % (sfc_clf_name, sfc_id, sfc_name, match, e))
+ return None
+
+
+def delete_sfc_classifier(tacker_client,
+ sfc_clf_id=None,
+ sfc_clf_name=None):
+ try:
+ sfc_clf = sfc_clf_id
+ if sfc_clf is None:
+ if sfc_clf_name is None:
+ raise Exception('You need to provide an SFC'
+ 'classifier id or name')
+ sfc_clf = get_sfc_classifier_id(tacker_client, sfc_clf_name)
+ return tacker_client.delete_sfc_classifier(sfc_clf)
+ except Exception, e:
+ logger.error("Error [delete_sfc_classifier(tacker_client, '%s', "
+ "'%s')]: %s" % (sfc_clf_id, sfc_clf_name, e))
+ return None
diff --git a/functest/utils/openstack_utils.py b/functest/utils/openstack_utils.py
new file mode 100755
index 00000000..df6fb5d1
--- /dev/null
+++ b/functest/utils/openstack_utils.py
@@ -0,0 +1,1190 @@
+#!/usr/bin/env python
+#
+# jose.lausuch@ericsson.com
+# valentin.boucher@orange.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import os
+import os.path
+import subprocess
+import sys
+import time
+
+from cinderclient import client as cinderclient
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+from glanceclient import client as glanceclient
+from keystoneclient.v2_0 import client as keystoneclient
+from neutronclient.v2_0 import client as neutronclient
+from novaclient import client as novaclient
+
+logger = ft_logger.Logger("openstack_utils").getLogger()
+
+
+# *********************************************
+# CREDENTIALS
+# *********************************************
+class MissingEnvVar(Exception):
+
+ def __init__(self, var):
+ self.var = var
+
+ def __str__(self):
+ return str.format("Please set the mandatory env var: {}", self.var)
+
+
+def check_credentials():
+ """
+ Check if the OpenStack credentials (openrc) are sourced
+ """
+ env_vars = ['OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME']
+ return all(map(lambda v: v in os.environ and os.environ[v], env_vars))
+
+
+def get_credentials(service):
+ """Returns a creds dictionary filled with the following keys:
+ * username
+ * password/api_key (depending on the service)
+ * tenant_name/project_id (depending on the service)
+ * auth_url
+ :param service: a string indicating the name of the service
+ requesting the credentials.
+ """
+ creds = {}
+
+ # Check that the env vars exists:
+ envvars = ('OS_USERNAME', 'OS_PASSWORD', 'OS_AUTH_URL', 'OS_TENANT_NAME')
+ for envvar in envvars:
+ if os.getenv(envvar) is None:
+ raise MissingEnvVar(envvar)
+
+ # Unfortunately, each of the OpenStack client will request slightly
+ # different entries in their credentials dict.
+ if service.lower() in ("nova", "cinder"):
+ password = "api_key"
+ tenant = "project_id"
+ else:
+ password = "password"
+ tenant = "tenant_name"
+
+ # The most common way to pass these info to the script is to do it through
+ # environment variables.
+ creds.update({
+ "username": os.environ.get("OS_USERNAME"),
+ password: os.environ.get("OS_PASSWORD"),
+ "auth_url": os.environ.get("OS_AUTH_URL"),
+ tenant: os.environ.get("OS_TENANT_NAME")
+ })
+ if os.getenv('OS_ENDPOINT_TYPE') is not None:
+ creds.update({
+ "endpoint_type": os.environ.get("OS_ENDPOINT_TYPE")
+ })
+ if os.getenv('OS_REGION_NAME') is not None:
+ creds.update({
+ "region_name": os.environ.get("OS_REGION_NAME")
+ })
+ cacert = os.environ.get("OS_CACERT")
+ if cacert is not None:
+ # each openstack client uses differnt kwargs for this
+ creds.update({"cacert": cacert,
+ "ca_cert": cacert,
+ "https_ca_cert": cacert,
+ "https_cacert": cacert,
+ "ca_file": cacert})
+ creds.update({"insecure": "True", "https_insecure": "True"})
+ if not os.path.isfile(cacert):
+ logger.info("WARNING: The 'OS_CACERT' environment variable is "
+ "set to %s but the file does not exist." % cacert)
+ return creds
+
+
+def source_credentials(rc_file):
+ pipe = subprocess.Popen(". %s; env" % rc_file, stdout=subprocess.PIPE,
+ shell=True)
+ output = pipe.communicate()[0]
+ env = dict((line.split("=", 1) for line in output.splitlines()))
+ os.environ.update(env)
+ return env
+
+
+def get_credentials_for_rally():
+ creds = get_credentials("keystone")
+ admin_keys = ['username', 'tenant_name', 'password']
+ endpoint_types = [('internalURL', 'internal'),
+ ('publicURL', 'public'), ('adminURL', 'admin')]
+ if 'endpoint_type' in creds.keys():
+ for k, v in endpoint_types:
+ if creds['endpoint_type'] == k:
+ creds['endpoint_type'] = v
+ rally_conf = {"type": "ExistingCloud", "admin": {}}
+ for key in creds:
+ if key in admin_keys:
+ rally_conf['admin'][key] = creds[key]
+ else:
+ rally_conf[key] = creds[key]
+ return rally_conf
+
+
+# *********************************************
+# CLIENTS
+# *********************************************
+def get_keystone_client():
+ creds_keystone = get_credentials("keystone")
+ return keystoneclient.Client(**creds_keystone)
+
+
+def get_nova_client():
+ creds_nova = get_credentials("nova")
+ return novaclient.Client('2', **creds_nova)
+
+
+def get_cinder_client():
+ creds_cinder = get_credentials("cinder")
+ creds_cinder.update({
+ "service_type": "volume"
+ })
+ return cinderclient.Client('2', **creds_cinder)
+
+
+def get_neutron_client():
+ creds_neutron = get_credentials("neutron")
+ return neutronclient.Client(**creds_neutron)
+
+
+def get_glance_client():
+ keystone_client = get_keystone_client()
+ glance_endpoint_type = 'publicURL'
+ os_endpoint_type = os.getenv('OS_ENDPOINT_TYPE')
+ if os_endpoint_type is not None:
+ glance_endpoint_type = os_endpoint_type
+ glance_endpoint = keystone_client.service_catalog.url_for(
+ service_type='image', endpoint_type=glance_endpoint_type)
+ return glanceclient.Client(1, glance_endpoint,
+ token=keystone_client.auth_token)
+
+
+# *********************************************
+# NOVA
+# *********************************************
+def get_instances(nova_client):
+ try:
+ instances = nova_client.servers.list(search_opts={'all_tenants': 1})
+ return instances
+ except Exception, e:
+ logger.error("Error [get_instances(nova_client)]: %s" % e)
+ return None
+
+
+def get_instance_status(nova_client, instance):
+ try:
+ instance = nova_client.servers.get(instance.id)
+ return instance.status
+ except Exception, e:
+ logger.error("Error [get_instance_status(nova_client)]: %s" % e)
+ return None
+
+
+def get_instance_by_name(nova_client, instance_name):
+ try:
+ instance = nova_client.servers.find(name=instance_name)
+ return instance
+ except Exception, e:
+ logger.error("Error [get_instance_by_name(nova_client, '%s')]: %s"
+ % (instance_name, e))
+ return None
+
+
+def get_flavor_id(nova_client, flavor_name):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if f.name == flavor_name:
+ id = f.id
+ break
+ return id
+
+
+def get_flavor_id_by_ram_range(nova_client, min_ram, max_ram):
+ flavors = nova_client.flavors.list(detailed=True)
+ id = ''
+ for f in flavors:
+ if min_ram <= f.ram and f.ram <= max_ram:
+ id = f.id
+ break
+ return id
+
+
+def create_flavor(nova_client, flavor_name, ram, disk, vcpus, public=True):
+ try:
+ flavor = nova_client.flavors.create(
+ flavor_name, ram, vcpus, disk, is_public=public)
+ try:
+ extra_specs = ft_utils.get_functest_config(
+ 'general.flavor_extra_specs')
+ flavor.set_keys(extra_specs)
+ except ValueError:
+ # flavor extra specs are not configured, therefore skip the update
+ pass
+
+ except Exception, e:
+ logger.error("Error [create_flavor(nova_client, '%s', '%s', '%s', "
+ "'%s')]: %s" % (flavor_name, ram, disk, vcpus, e))
+ return None
+ return flavor.id
+
+
+def get_or_create_flavor(flavor_name, ram, disk, vcpus, public=True):
+ flavor_exists = False
+ nova_client = get_nova_client()
+
+ flavor_id = get_flavor_id(nova_client, flavor_name)
+ if flavor_id != '':
+ logger.info("Using existing flavor '%s'..." % flavor_name)
+ flavor_exists = True
+ else:
+ logger.info("Creating flavor '%s' with '%s' RAM, '%s' disk size, "
+ "'%s' vcpus..." % (flavor_name, ram, disk, vcpus))
+ flavor_id = create_flavor(
+ nova_client, flavor_name, ram, disk, vcpus, public=public)
+ if not flavor_id:
+ logger.error("Failed to create flavor '%s'..." % (flavor_name))
+ else:
+ logger.debug("Flavor '%s' with ID=%s created successfully."
+ % (flavor_name, flavor_id))
+
+ return flavor_exists, flavor_id
+
+
+def get_floating_ips(nova_client):
+ try:
+ floating_ips = nova_client.floating_ips.list()
+ return floating_ips
+ except Exception, e:
+ logger.error("Error [get_floating_ips(nova_client)]: %s" % e)
+ return None
+
+
+def get_hypervisors(nova_client):
+ try:
+ nodes = []
+ hypervisors = nova_client.hypervisors.list()
+ for hypervisor in hypervisors:
+ if hypervisor.state == "up":
+ nodes.append(hypervisor.hypervisor_hostname)
+ return nodes
+ except Exception, e:
+ logger.error("Error [get_hypervisors(nova_client)]: %s" % e)
+ return None
+
+
+def create_instance(flavor_name,
+ image_id,
+ network_id,
+ instance_name="functest-vm",
+ confdrive=True,
+ userdata=None,
+ av_zone='',
+ fixed_ip=None,
+ files=None):
+ nova_client = get_nova_client()
+ try:
+ flavor = nova_client.flavors.find(name=flavor_name)
+ except:
+ flavors = nova_client.flavors.list()
+ logger.error("Error: Flavor '%s' not found. Available flavors are: "
+ "\n%s" % (flavor_name, flavors))
+ return None
+ if fixed_ip is not None:
+ nics = {"net-id": network_id, "v4-fixed-ip": fixed_ip}
+ else:
+ nics = {"net-id": network_id}
+ if userdata is None:
+ instance = nova_client.servers.create(
+ name=instance_name,
+ flavor=flavor,
+ image=image_id,
+ nics=[nics],
+ availability_zone=av_zone,
+ files=files
+ )
+ else:
+ instance = nova_client.servers.create(
+ name=instance_name,
+ flavor=flavor,
+ image=image_id,
+ nics=[nics],
+ config_drive=confdrive,
+ userdata=userdata,
+ availability_zone=av_zone,
+ files=files
+ )
+ return instance
+
+
+def create_instance_and_wait_for_active(flavor_name,
+ image_id,
+ network_id,
+ instance_name="",
+ config_drive=False,
+ userdata="",
+ av_zone='',
+ fixed_ip=None,
+ files=None):
+ SLEEP = 3
+ VM_BOOT_TIMEOUT = 180
+ nova_client = get_nova_client()
+ instance = create_instance(flavor_name,
+ image_id,
+ network_id,
+ instance_name,
+ config_drive,
+ userdata,
+ av_zone=av_zone,
+ fixed_ip=fixed_ip,
+ files=files)
+ count = VM_BOOT_TIMEOUT / SLEEP
+ for n in range(count, -1, -1):
+ status = get_instance_status(nova_client, instance)
+ if status.lower() == "active":
+ return instance
+ elif status.lower() == "error":
+ logger.error("The instance %s went to ERROR status."
+ % instance_name)
+ return None
+ time.sleep(SLEEP)
+ logger.error("Timeout booting the instance %s." % instance_name)
+ return None
+
+
+def create_floating_ip(neutron_client):
+ extnet_id = get_external_net_id(neutron_client)
+ props = {'floating_network_id': extnet_id}
+ try:
+ ip_json = neutron_client.create_floatingip({'floatingip': props})
+ fip_addr = ip_json['floatingip']['floating_ip_address']
+ fip_id = ip_json['floatingip']['id']
+ except Exception, e:
+ logger.error("Error [create_floating_ip(neutron_client)]: %s" % e)
+ return None
+ return {'fip_addr': fip_addr, 'fip_id': fip_id}
+
+
+def add_floating_ip(nova_client, server_id, floatingip_id):
+ try:
+ nova_client.servers.add_floating_ip(server_id, floatingip_id)
+ return True
+ except Exception, e:
+ logger.error("Error [add_floating_ip(nova_client, '%s', '%s')]: %s"
+ % (server_id, floatingip_id, e))
+ return False
+
+
+def delete_instance(nova_client, instance_id):
+ try:
+ nova_client.servers.force_delete(instance_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_instance(nova_client, '%s')]: %s"
+ % (instance_id, e))
+ return False
+
+
+def delete_floating_ip(nova_client, floatingip_id):
+ try:
+ nova_client.floating_ips.delete(floatingip_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_floating_ip(nova_client, '%s')]: %s"
+ % (floatingip_id, e))
+ return False
+
+
+# *********************************************
+# NEUTRON
+# *********************************************
+def get_network_list(neutron_client):
+ network_list = neutron_client.list_networks()['networks']
+ if len(network_list) == 0:
+ return None
+ else:
+ return network_list
+
+
+def get_router_list(neutron_client):
+ router_list = neutron_client.list_routers()['routers']
+ if len(router_list) == 0:
+ return None
+ else:
+ return router_list
+
+
+def get_port_list(neutron_client):
+ port_list = neutron_client.list_ports()['ports']
+ if len(port_list) == 0:
+ return None
+ else:
+ return port_list
+
+
+def get_network_id(neutron_client, network_name):
+ networks = neutron_client.list_networks()['networks']
+ id = ''
+ for n in networks:
+ if n['name'] == network_name:
+ id = n['id']
+ break
+ return id
+
+
+def get_subnet_id(neutron_client, subnet_name):
+ subnets = neutron_client.list_subnets()['subnets']
+ id = ''
+ for s in subnets:
+ if s['name'] == subnet_name:
+ id = s['id']
+ break
+ return id
+
+
+def get_router_id(neutron_client, router_name):
+ routers = neutron_client.list_routers()['routers']
+ id = ''
+ for r in routers:
+ if r['name'] == router_name:
+ id = r['id']
+ break
+ return id
+
+
+def get_private_net(neutron_client):
+ # Checks if there is an existing shared private network
+ networks = neutron_client.list_networks()['networks']
+ if len(networks) == 0:
+ return None
+ for net in networks:
+ if (net['router:external'] is False) and (net['shared'] is True):
+ return net
+ return None
+
+
+def get_external_net(neutron_client):
+ for network in neutron_client.list_networks()['networks']:
+ if network['router:external']:
+ return network['name']
+ return None
+
+
+def get_external_net_id(neutron_client):
+ for network in neutron_client.list_networks()['networks']:
+ if network['router:external']:
+ return network['id']
+ return None
+
+
+def check_neutron_net(neutron_client, net_name):
+ for network in neutron_client.list_networks()['networks']:
+ if network['name'] == net_name:
+ for subnet in network['subnets']:
+ return True
+ return False
+
+
+def create_neutron_net(neutron_client, name):
+ json_body = {'network': {'name': name,
+ 'admin_state_up': True}}
+ try:
+ network = neutron_client.create_network(body=json_body)
+ network_dict = network['network']
+ return network_dict['id']
+ except Exception, e:
+ logger.error("Error [create_neutron_net(neutron_client, '%s')]: %s"
+ % (name, e))
+ return None
+
+
+def create_neutron_subnet(neutron_client, name, cidr, net_id):
+ json_body = {'subnets': [{'name': name, 'cidr': cidr,
+ 'ip_version': 4, 'network_id': net_id}]}
+ try:
+ subnet = neutron_client.create_subnet(body=json_body)
+ return subnet['subnets'][0]['id']
+ except Exception, e:
+ logger.error("Error [create_neutron_subnet(neutron_client, '%s', "
+ "'%s', '%s')]: %s" % (name, cidr, net_id, e))
+ return None
+
+
+def create_neutron_router(neutron_client, name):
+ json_body = {'router': {'name': name, 'admin_state_up': True}}
+ try:
+ router = neutron_client.create_router(json_body)
+ return router['router']['id']
+ except Exception, e:
+ logger.error("Error [create_neutron_router(neutron_client, '%s')]: %s"
+ % (name, e))
+ return None
+
+
+def create_neutron_port(neutron_client, name, network_id, ip):
+ json_body = {'port': {
+ 'admin_state_up': True,
+ 'name': name,
+ 'network_id': network_id,
+ 'fixed_ips': [{"ip_address": ip}]
+ }}
+ try:
+ port = neutron_client.create_port(body=json_body)
+ return port['port']['id']
+ except Exception, e:
+ logger.error("Error [create_neutron_port(neutron_client, '%s', '%s', "
+ "'%s')]: %s" % (name, network_id, ip, e))
+ return None
+
+
+def update_neutron_net(neutron_client, network_id, shared=False):
+ json_body = {'network': {'shared': shared}}
+ try:
+ neutron_client.update_network(network_id, body=json_body)
+ return True
+ except Exception, e:
+ logger.error("Error [update_neutron_net(neutron_client, '%s', '%s')]: "
+ "%s" % (network_id, str(shared), e))
+ return False
+
+
+def update_neutron_port(neutron_client, port_id, device_owner):
+ json_body = {'port': {
+ 'device_owner': device_owner,
+ }}
+ try:
+ port = neutron_client.update_port(port=port_id,
+ body=json_body)
+ return port['port']['id']
+ except Exception, e:
+ logger.error("Error [update_neutron_port(neutron_client, '%s', '%s')]:"
+ " %s" % (port_id, device_owner, e))
+ return None
+
+
+def add_interface_router(neutron_client, router_id, subnet_id):
+ json_body = {"subnet_id": subnet_id}
+ try:
+ neutron_client.add_interface_router(router=router_id, body=json_body)
+ return True
+ except Exception, e:
+ logger.error("Error [add_interface_router(neutron_client, '%s', "
+ "'%s')]: %s" % (router_id, subnet_id, e))
+ return False
+
+
+def add_gateway_router(neutron_client, router_id):
+ ext_net_id = get_external_net_id(neutron_client)
+ router_dict = {'network_id': ext_net_id}
+ try:
+ neutron_client.add_gateway_router(router_id, router_dict)
+ return True
+ except Exception, e:
+ logger.error("Error [add_gateway_router(neutron_client, '%s')]: %s"
+ % (router_id, e))
+ return False
+
+
+def delete_neutron_net(neutron_client, network_id):
+ try:
+ neutron_client.delete_network(network_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_neutron_net(neutron_client, '%s')]: %s"
+ % (network_id, e))
+ return False
+
+
+def delete_neutron_subnet(neutron_client, subnet_id):
+ try:
+ neutron_client.delete_subnet(subnet_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_neutron_subnet(neutron_client, '%s')]: %s"
+ % (subnet_id, e))
+ return False
+
+
+def delete_neutron_router(neutron_client, router_id):
+ try:
+ neutron_client.delete_router(router=router_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_neutron_router(neutron_client, '%s')]: %s"
+ % (router_id, e))
+ return False
+
+
+def delete_neutron_port(neutron_client, port_id):
+ try:
+ neutron_client.delete_port(port_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_neutron_port(neutron_client, '%s')]: %s"
+ % (port_id, e))
+ return False
+
+
+def remove_interface_router(neutron_client, router_id, subnet_id):
+ json_body = {"subnet_id": subnet_id}
+ try:
+ neutron_client.remove_interface_router(router=router_id,
+ body=json_body)
+ return True
+ except Exception, e:
+ logger.error("Error [remove_interface_router(neutron_client, '%s', "
+ "'%s')]: %s" % (router_id, subnet_id, e))
+ return False
+
+
+def remove_gateway_router(neutron_client, router_id):
+ try:
+ neutron_client.remove_gateway_router(router_id)
+ return True
+ except Exception, e:
+ logger.error("Error [remove_gateway_router(neutron_client, '%s')]: %s"
+ % (router_id, e))
+ return False
+
+
+def create_network_full(neutron_client,
+ net_name,
+ subnet_name,
+ router_name,
+ cidr):
+
+ # Check if the network already exists
+ network_id = get_network_id(neutron_client, net_name)
+ subnet_id = get_subnet_id(neutron_client, subnet_name)
+ router_id = get_router_id(neutron_client, router_name)
+
+ if network_id != '' and subnet_id != '' and router_id != '':
+ logger.info("A network with name '%s' already exists..." % net_name)
+ else:
+ neutron_client.format = 'json'
+ logger.info('Creating neutron network %s...' % net_name)
+ network_id = create_neutron_net(neutron_client, net_name)
+
+ if not network_id:
+ return False
+
+ logger.debug("Network '%s' created successfully" % network_id)
+ logger.debug('Creating Subnet....')
+ subnet_id = create_neutron_subnet(neutron_client, subnet_name,
+ cidr, network_id)
+ if not subnet_id:
+ return None
+
+ logger.debug("Subnet '%s' created successfully" % subnet_id)
+ logger.debug('Creating Router...')
+ router_id = create_neutron_router(neutron_client, router_name)
+
+ if not router_id:
+ return None
+
+ logger.debug("Router '%s' created successfully" % router_id)
+ logger.debug('Adding router to subnet...')
+
+ if not add_interface_router(neutron_client, router_id, subnet_id):
+ return None
+
+ logger.debug("Interface added successfully.")
+
+ logger.debug('Adding gateway to router...')
+ if not add_gateway_router(neutron_client, router_id):
+ return None
+
+ logger.debug("Gateway added successfully.")
+
+ network_dic = {'net_id': network_id,
+ 'subnet_id': subnet_id,
+ 'router_id': router_id}
+ return network_dic
+
+
+def create_shared_network_full(net_name, subnt_name, router_name, subnet_cidr):
+ neutron_client = get_neutron_client()
+
+ network_dic = create_network_full(neutron_client,
+ net_name,
+ subnt_name,
+ router_name,
+ subnet_cidr)
+ if network_dic:
+ if not update_neutron_net(neutron_client,
+ network_dic['net_id'],
+ shared=True):
+ logger.error("Failed to update network %s..." % net_name)
+ return None
+ else:
+ logger.debug("Network '%s' is available..." % net_name)
+ else:
+ logger.error("Network %s creation failed" % net_name)
+ return None
+ return network_dic
+
+
+def create_bgpvpn(neutron_client, **kwargs):
+ # route_distinguishers
+ # route_targets
+ json_body = {"bgpvpn": kwargs}
+ return neutron_client.create_bgpvpn(json_body)
+
+
+def create_network_association(neutron_client, bgpvpn_id, neutron_network_id):
+ json_body = {"network_association": {"network_id": neutron_network_id}}
+ return neutron_client.create_network_association(bgpvpn_id, json_body)
+
+
+def create_router_association(neutron_client, bgpvpn_id, router_id):
+ json_body = {"router_association": {"router_id": router_id}}
+ return neutron_client.create_router_association(bgpvpn_id, json_body)
+
+
+def update_bgpvpn(neutron_client, bgpvpn_id, **kwargs):
+ json_body = {"bgpvpn": kwargs}
+ return neutron_client.update_bgpvpn(bgpvpn_id, json_body)
+
+
+def delete_bgpvpn(neutron_client, bgpvpn_id):
+ return neutron_client.delete_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn(neutron_client, bgpvpn_id):
+ return neutron_client.show_bgpvpn(bgpvpn_id)
+
+
+def get_bgpvpn_routers(neutron_client, bgpvpn_id):
+ return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['routers']
+
+
+def get_bgpvpn_networks(neutron_client, bgpvpn_id):
+ return get_bgpvpn(neutron_client, bgpvpn_id)['bgpvpn']['networks']
+
+# *********************************************
+# SEC GROUPS
+# *********************************************
+
+
+def get_security_groups(neutron_client):
+ try:
+ security_groups = neutron_client.list_security_groups()[
+ 'security_groups']
+ return security_groups
+ except Exception, e:
+ logger.error("Error [get_security_groups(neutron_client)]: %s" % e)
+ return None
+
+
+def get_security_group_id(neutron_client, sg_name):
+ security_groups = get_security_groups(neutron_client)
+ id = ''
+ for sg in security_groups:
+ if sg['name'] == sg_name:
+ id = sg['id']
+ break
+ return id
+
+
+def create_security_group(neutron_client, sg_name, sg_description):
+ json_body = {'security_group': {'name': sg_name,
+ 'description': sg_description}}
+ try:
+ secgroup = neutron_client.create_security_group(json_body)
+ return secgroup['security_group']
+ except Exception, e:
+ logger.error("Error [create_security_group(neutron_client, '%s', "
+ "'%s')]: %s" % (sg_name, sg_description, e))
+ return None
+
+
+def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
+ port_range_min=None, port_range_max=None):
+ if port_range_min is None and port_range_max is None:
+ json_body = {'security_group_rule': {'direction': direction,
+ 'security_group_id': sg_id,
+ 'protocol': protocol}}
+ elif port_range_min is not None and port_range_max is not None:
+ json_body = {'security_group_rule': {'direction': direction,
+ 'security_group_id': sg_id,
+ 'port_range_min': port_range_min,
+ 'port_range_max': port_range_max,
+ 'protocol': protocol}}
+ else:
+ logger.error("Error [create_secgroup_rule(neutron_client, '%s', '%s', "
+ "'%s', '%s', '%s', '%s')]:" % (neutron_client,
+ sg_id, direction,
+ port_range_min,
+ port_range_max,
+ protocol),
+ " Invalid values for port_range_min, port_range_max")
+ return False
+ try:
+ neutron_client.create_security_group_rule(json_body)
+ return True
+ except Exception, e:
+ logger.error("Error [create_secgroup_rule(neutron_client, '%s', '%s', "
+ "'%s', '%s', '%s', '%s')]: %s" % (neutron_client,
+ sg_id,
+ direction,
+ port_range_min,
+ port_range_max,
+ protocol, e))
+ return False
+
+
+def create_security_group_full(neutron_client,
+ sg_name, sg_description):
+ sg_id = get_security_group_id(neutron_client, sg_name)
+ if sg_id != '':
+ logger.info("Using existing security group '%s'..." % sg_name)
+ else:
+ logger.info("Creating security group '%s'..." % sg_name)
+ SECGROUP = create_security_group(neutron_client,
+ sg_name,
+ sg_description)
+ if not SECGROUP:
+ logger.error("Failed to create the security group...")
+ return None
+
+ sg_id = SECGROUP['id']
+
+ logger.debug("Security group '%s' with ID=%s created successfully."
+ % (SECGROUP['name'], sg_id))
+
+ logger.debug("Adding ICMP rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(neutron_client, sg_id,
+ 'ingress', 'icmp'):
+ logger.error("Failed to create the security group rule...")
+ return None
+
+ logger.debug("Adding SSH rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
+ logger.error("Failed to create the security group rule...")
+ return None
+
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
+ logger.error("Failed to create the security group rule...")
+ return None
+ return sg_id
+
+
+def add_secgroup_to_instance(nova_client, instance_id, secgroup_id):
+ try:
+ nova_client.servers.add_security_group(instance_id, secgroup_id)
+ return True
+ except Exception, e:
+ logger.error("Error [add_secgroup_to_instance(nova_client, '%s', "
+ "'%s')]: %s" % (instance_id, secgroup_id, e))
+ return False
+
+
+def update_sg_quota(neutron_client, tenant_id, sg_quota, sg_rule_quota):
+ json_body = {"quota": {
+ "security_group": sg_quota,
+ "security_group_rule": sg_rule_quota
+ }}
+
+ try:
+ neutron_client.update_quota(tenant_id=tenant_id,
+ body=json_body)
+ return True
+ except Exception, e:
+ logger.error("Error [update_sg_quota(neutron_client, '%s', '%s', "
+ "'%s')]: %s" % (tenant_id, sg_quota, sg_rule_quota, e))
+ return False
+
+
+def delete_security_group(neutron_client, secgroup_id):
+ try:
+ neutron_client.delete_security_group(secgroup_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_security_group(neutron_client, '%s')]: %s"
+ % (secgroup_id, e))
+ return False
+
+
+# *********************************************
+# GLANCE
+# *********************************************
+def get_images(nova_client):
+ try:
+ images = nova_client.images.list()
+ return images
+ except Exception, e:
+ logger.error("Error [get_images]: %s" % e)
+ return None
+
+
+def get_image_id(glance_client, image_name):
+ images = glance_client.images.list()
+ id = ''
+ for i in images:
+ if i.name == image_name:
+ id = i.id
+ break
+ return id
+
+
+def create_glance_image(glance_client, image_name, file_path, disk="qcow2",
+ container="bare", public=True):
+ if not os.path.isfile(file_path):
+ logger.error("Error: file %s does not exist." % file_path)
+ return None
+ try:
+ image_id = get_image_id(glance_client, image_name)
+ if image_id != '':
+ if logger:
+ logger.info("Image %s already exists." % image_name)
+ else:
+ if logger:
+ logger.info("Creating image '%s' from '%s'..." % (image_name,
+ file_path))
+ try:
+ properties = ft_utils.get_functest_config(
+ 'general.image_properties')
+ except ValueError:
+ # image properties are not configured
+ # therefore don't add any properties
+ properties = {}
+ with open(file_path) as fimage:
+ image = glance_client.images.create(name=image_name,
+ is_public=public,
+ disk_format=disk,
+ container_format=container,
+ properties=properties,
+ data=fimage)
+ image_id = image.id
+ return image_id
+ except Exception, e:
+ logger.error("Error [create_glance_image(glance_client, '%s', '%s', "
+ "'%s')]: %s" % (image_name, file_path, str(public), e))
+ return None
+
+
+def get_or_create_image(name, path, format):
+ image_exists = False
+ glance_client = get_glance_client()
+
+ image_id = get_image_id(glance_client, name)
+ if image_id != '':
+ logger.info("Using existing image '%s'..." % name)
+ image_exists = True
+ else:
+ logger.info("Creating image '%s' from '%s'..." % (name, path))
+ image_id = create_glance_image(glance_client, name, path, format)
+ if not image_id:
+ logger.error("Failed to create a Glance image...")
+ else:
+ logger.debug("Image '%s' with ID=%s created successfully."
+ % (name, image_id))
+
+ return image_exists, image_id
+
+
+def delete_glance_image(nova_client, image_id):
+ try:
+ nova_client.images.delete(image_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_glance_image(nova_client, '%s')]: %s"
+ % (image_id, e))
+ return False
+
+
+# *********************************************
+# CINDER
+# *********************************************
+def get_volumes(cinder_client):
+ try:
+ volumes = cinder_client.volumes.list(search_opts={'all_tenants': 1})
+ return volumes
+ except Exception, e:
+ logger.error("Error [get_volumes(cinder_client)]: %s" % e)
+ return None
+
+
+def list_volume_types(cinder_client, public=True, private=True):
+ try:
+ volume_types = cinder_client.volume_types.list()
+ if not public:
+ volume_types = [vt for vt in volume_types if not vt.is_public]
+ if not private:
+ volume_types = [vt for vt in volume_types if vt.is_public]
+ return volume_types
+ except Exception, e:
+ logger.error("Error [list_volume_types(cinder_client)]: %s" % e)
+ return None
+
+
+def create_volume_type(cinder_client, name):
+ try:
+ volume_type = cinder_client.volume_types.create(name)
+ return volume_type
+ except Exception, e:
+ logger.error("Error [create_volume_type(cinder_client, '%s')]: %s"
+ % (name, e))
+ return None
+
+
+def update_cinder_quota(cinder_client, tenant_id, vols_quota,
+ snapshots_quota, gigabytes_quota):
+ quotas_values = {"volumes": vols_quota,
+ "snapshots": snapshots_quota,
+ "gigabytes": gigabytes_quota}
+
+ try:
+ cinder_client.quotas.update(tenant_id, **quotas_values)
+ return True
+ except Exception, e:
+ logger.error("Error [update_cinder_quota(cinder_client, '%s', '%s', "
+ "'%s' '%s')]: %s" % (tenant_id, vols_quota,
+ snapshots_quota, gigabytes_quota, e))
+ return False
+
+
+def delete_volume(cinder_client, volume_id, forced=False):
+ try:
+ if forced:
+ try:
+ cinder_client.volumes.detach(volume_id)
+ except:
+ logger.error(sys.exc_info()[0])
+ cinder_client.volumes.force_delete(volume_id)
+ else:
+ cinder_client.volumes.delete(volume_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_volume(cinder_client, '%s', '%s')]: %s"
+ % (volume_id, str(forced), e))
+ return False
+
+
+def delete_volume_type(cinder_client, volume_type):
+ try:
+ cinder_client.volume_types.delete(volume_type)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_volume_type(cinder_client, '%s')]: %s"
+ % (volume_type, e))
+ return False
+
+
+# *********************************************
+# KEYSTONE
+# *********************************************
+def get_tenants(keystone_client):
+ try:
+ tenants = keystone_client.tenants.list()
+ return tenants
+ except Exception, e:
+ logger.error("Error [get_tenants(keystone_client)]: %s" % e)
+ return None
+
+
+def get_users(keystone_client):
+ try:
+ users = keystone_client.users.list()
+ return users
+ except Exception, e:
+ logger.error("Error [get_users(keystone_client)]: %s" % e)
+ return None
+
+
+def get_tenant_id(keystone_client, tenant_name):
+ tenants = keystone_client.tenants.list()
+ id = ''
+ for t in tenants:
+ if t.name == tenant_name:
+ id = t.id
+ break
+ return id
+
+
+def get_user_id(keystone_client, user_name):
+ users = keystone_client.users.list()
+ id = ''
+ for u in users:
+ if u.name == user_name:
+ id = u.id
+ break
+ return id
+
+
+def get_role_id(keystone_client, role_name):
+ roles = keystone_client.roles.list()
+ id = ''
+ for r in roles:
+ if r.name == role_name:
+ id = r.id
+ break
+ return id
+
+
+def create_tenant(keystone_client, tenant_name, tenant_description):
+ try:
+ tenant = keystone_client.tenants.create(tenant_name,
+ tenant_description,
+ enabled=True)
+ return tenant.id
+ except Exception, e:
+ logger.error("Error [create_tenant(keystone_client, '%s', '%s')]: %s"
+ % (tenant_name, tenant_description, e))
+ return None
+
+
+def create_user(keystone_client, user_name, user_password,
+ user_email, tenant_id):
+ try:
+ user = keystone_client.users.create(user_name, user_password,
+ user_email, tenant_id,
+ enabled=True)
+ return user.id
+ except Exception, e:
+ logger.error("Error [create_user(keystone_client, '%s', '%s', '%s'"
+ "'%s')]: %s" % (user_name, user_password,
+ user_email, tenant_id, e))
+ return None
+
+
+def add_role_user(keystone_client, user_id, role_id, tenant_id):
+ try:
+ keystone_client.roles.add_user_role(user_id, role_id, tenant_id)
+ return True
+ except Exception, e:
+ logger.error("Error [add_role_user(keystone_client, '%s', '%s'"
+ "'%s')]: %s " % (user_id, role_id, tenant_id, e))
+ return False
+
+
+def delete_tenant(keystone_client, tenant_id):
+ try:
+ keystone_client.tenants.delete(tenant_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_tenant(keystone_client, '%s')]: %s"
+ % (tenant_id, e))
+ return False
+
+
+def delete_user(keystone_client, user_id):
+ try:
+ keystone_client.users.delete(user_id)
+ return True
+ except Exception, e:
+ logger.error("Error [delete_user(keystone_client, '%s')]: %s"
+ % (user_id, e))
+ return False