summaryrefslogtreecommitdiffstats
path: root/dovetail/utils/dovetail_utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'dovetail/utils/dovetail_utils.py')
-rw-r--r--dovetail/utils/dovetail_utils.py318
1 files changed, 183 insertions, 135 deletions
diff --git a/dovetail/utils/dovetail_utils.py b/dovetail/utils/dovetail_utils.py
index dc7dbafb..1c4aca9d 100644
--- a/dovetail/utils/dovetail_utils.py
+++ b/dovetail/utils/dovetail_utils.py
@@ -1,28 +1,31 @@
#!/usr/bin/env python
#
-# jose.lausuch@ericsson.com
-# valentin.boucher@orange.com
-# grakiss.wanglei@huawei.com
+# Copyright (c) 2018 grakiss.wanglei@huawei.com and others.
+#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
+from __future__ import print_function
import sys
import os
import re
+import requests
import subprocess
from collections import Mapping, Set, Sequence
import json
-import urllib2
from datetime import datetime
from distutils.version import LooseVersion
import yaml
import python_hosts
+import docker
+from docker.types import Mount
from dovetail import constants
-from dovetail_config import DovetailConfig as dt_cfg
+from dovetail.utils.dovetail_config import DovetailConfig as dt_cfg
+from dovetail.utils.openstack_utils import OS_Utils
def exec_log(verbose, logger, msg, level, flush=False):
@@ -36,8 +39,6 @@ def exec_log(verbose, logger, msg, level, flush=False):
logger.error(msg)
elif level == 'debug':
logger.debug(msg)
- else:
- pass
else:
print(msg)
if flush:
@@ -45,7 +46,8 @@ def exec_log(verbose, logger, msg, level, flush=False):
def exec_cmd(cmd, logger=None, exit_on_error=False, info=False,
- exec_msg_on=True, err_msg="", verbose=True):
+ exec_msg_on=True, err_msg='', verbose=True,
+ progress_bar=False):
msg_err = ("The command '%s' failed." % cmd) if not err_msg else err_msg
msg_exec = ("Executing command: '%s'" % cmd)
level = 'info' if info else 'debug'
@@ -55,14 +57,16 @@ def exec_cmd(cmd, logger=None, exit_on_error=False, info=False,
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout = ''
- # count = 1
- # DEBUG = os.getenv('DEBUG')
+ if progress_bar:
+ count = 1
+ DEBUG = os.getenv('DEBUG')
for line in iter(p.stdout.readline, b''):
- exec_log(verbose, logger, line.strip(), level, True)
- stdout += line
- # if DEBUG is None or DEBUG.lower() != "true":
- # show_progress_bar(count)
- # count += 1
+ exec_log(verbose, logger, line.strip().decode('unicode-escape'),
+ level, True)
+ stdout += str(line)
+ if progress_bar and (DEBUG is None or DEBUG.lower() != 'true'):
+ show_progress_bar(count)
+ count += 1
stdout = stdout.strip()
returncode = p.wait()
p.stdout.close()
@@ -77,13 +81,13 @@ def exec_cmd(cmd, logger=None, exit_on_error=False, info=False,
# walkthrough the object, yield path and value
-# dual python 2/3 compatability, inspired by the "six" library
-string_types = (str, unicode) if str is bytes else (str, bytes)
-# iteritems = lambda mapping: getattr(mapping, 'iteritems', mapping.items)()
+# dual python 2/3 compatibility, inspired by the "six" library
+string_types = (str, 'unicode') if str is bytes else (str, bytes)
+# items = lambda mapping: getattr(mapping, 'items', mapping.items)()
-def iteritems(mapping):
- return getattr(mapping, 'iteritems', mapping.items)()
+def items(mapping):
+ return getattr(mapping, 'items', mapping.items)()
def objwalk(obj, path=(), memo=None):
@@ -91,7 +95,7 @@ def objwalk(obj, path=(), memo=None):
memo = set()
iterator = None
if isinstance(obj, Mapping):
- iterator = iteritems
+ iterator = items
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj,
string_types):
iterator = enumerate
@@ -110,6 +114,7 @@ def get_obj_by_path(obj, dst_path):
for path, obj in objwalk(obj):
if path == dst_path:
return obj
+ return None
def source_env(env_file):
@@ -124,74 +129,25 @@ def source_env(env_file):
def check_https_enabled(logger=None):
- logger.debug("Checking if https enabled or not...")
+ logger.debug('Checking if https enabled or not...')
os_auth_url = os.getenv('OS_AUTH_URL')
if os_auth_url.startswith('https'):
- logger.debug("https is enabled")
+ logger.debug('https is enabled')
return True
- logger.debug("https is not enabled")
+ logger.debug('https is not enabled')
return False
-def get_ext_net_name(env_file, logger=None):
- ext_net = os.getenv('EXTERNAL_NETWORK')
- if ext_net:
- return ext_net
- else:
- https_enabled = check_https_enabled(logger)
- insecure_option = ''
- insecure = os.getenv('OS_INSECURE')
- if https_enabled:
- logger.debug("https enabled...")
- if insecure:
- if insecure.lower() == "true":
- insecure_option = ' --insecure '
- else:
- logger.warn("Env variable OS_INSECURE is {}, if https + "
- "no credential used, should be set as True."
- .format(insecure))
-
- cmd_check = "openstack %s network list" % insecure_option
- ret, msg = exec_cmd(cmd_check, logger)
- if ret:
- logger.error("The credentials info in {} is invalid."
- .format(env_file))
- return None
- cmd = "openstack %s network list --long | grep 'External' | head -1 | \
- awk '{print $4}'" % insecure_option
- ret, msg = exec_cmd(cmd, logger)
- if not ret:
- return msg
- return None
-
-
-def store_db_results(db_url, build_tag, testcase, dest_file, logger):
- url = "%s?build_tag=%s-%s" % (db_url, build_tag, testcase)
- logger.debug("Query to rest api: {}".format(url))
- try:
- data = json.load(urllib2.urlopen(url))
- if data['results']:
- with open(dest_file, 'a') as f:
- f.write(json.dumps(data['results'][0]) + '\n')
- return True
- else:
- return False
- except Exception as e:
- logger.exception(
- "Cannot read content from {}, exception: {}".format(url, e))
- return False
-
-
def get_duration(start_date, stop_date, logger):
fmt = '%Y-%m-%d %H:%M:%S'
try:
datetime_start = datetime.strptime(start_date, fmt)
datetime_stop = datetime.strptime(stop_date, fmt)
delta = (datetime_stop - datetime_start).seconds
- res = "%sm%ss" % (delta / 60, delta % 60)
+ res = '%sm%ss' % (delta / 60, delta % 60)
return res
except ValueError as e:
- logger.exception("ValueError: {}".format(e))
+ logger.exception('ValueError: {}'.format(e))
return None
@@ -205,20 +161,17 @@ def show_progress_bar(length):
def check_docker_version(logger=None):
- server_ret, server_ver = \
- exec_cmd("sudo docker version -f'{{.Server.Version}}'", logger=logger)
- client_ret, client_ver = \
- exec_cmd("sudo docker version -f'{{.Client.Version}}'", logger=logger)
- if server_ret == 0:
- logger.debug("docker server version: {}".format(server_ver))
- if server_ret != 0 or (LooseVersion(server_ver) < LooseVersion('1.12.3')):
+ client = docker.from_env()
+ server_ver = None
+ try:
+ server_ver = client.version()['Version']
+ except Exception:
+ logger.error('Failed to get Docker server version')
+ if server_ver and (LooseVersion(server_ver) >= LooseVersion('1.12.3')):
+ logger.debug('Docker server version: {}'.format(server_ver))
+ else:
logger.error("Don't support this Docker server version. "
"Docker server should be updated to at least 1.12.3.")
- if client_ret == 0:
- logger.debug("docker client version: {}".format(client_ver))
- if client_ret != 0 or (LooseVersion(client_ver) < LooseVersion('1.12.3')):
- logger.error("Don't support this Docker client version. "
- "Docker client should be updated to at least 1.12.3.")
def add_hosts_info(ip, hostnames):
@@ -236,25 +189,25 @@ def add_hosts_info(ip, hostnames):
def get_hardware_info(logger=None):
pod_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
dt_cfg.dovetail_config['pod_file'])
- logger.info("Get hardware info of all nodes list in file {} ..."
+ logger.info('Get hardware info of all nodes list in file {} ...'
.format(pod_file))
result_dir = dt_cfg.dovetail_config['result_dir']
info_file_path = os.path.join(result_dir, 'sut_hardware_info')
all_info_file = os.path.join(result_dir, 'all_hosts_info.json')
inventory_file = os.path.join(result_dir, 'inventory.ini')
if not get_inventory_file(pod_file, inventory_file, logger):
- logger.error("Failed to get SUT hardware info.")
+ logger.error('Failed to get SUT hardware info.')
return None
- ret, msg = exec_cmd("cd {} && ansible all -m setup -i {} --tree {}"
+ ret, msg = exec_cmd('cd {} && ansible all -m setup -i {} --tree {}'
.format(constants.USERCONF_PATH, inventory_file,
info_file_path), verbose=False)
if not os.path.exists(info_file_path) or ret != 0:
- logger.error("Failed to get SUT hardware info.")
+ logger.error('Failed to get SUT hardware info.')
return None
if not combine_files(info_file_path, all_info_file, logger):
- logger.error("Failed to get all hardware info.")
+ logger.error('Failed to get all hardware info.')
return None
- logger.info("Hardware info of all nodes are stored in file {}."
+ logger.info('Hardware info of all nodes are stored in file {}.'
.format(all_info_file))
return all_info_file
@@ -282,13 +235,13 @@ def get_inventory_file(pod_file, inventory_file, logger=None):
.format(pod_file))
return False
out_f.write(host_info)
- logger.debug("Ansible inventory file is {}.".format(inventory_file))
+ logger.debug('Ansible inventory file is {}.'.format(inventory_file))
return True
except KeyError as e:
- logger.exception("KeyError {}.".format(e))
+ logger.exception('KeyError {}.'.format(e))
return False
except Exception:
- logger.exception("Failed to read file {}.".format(pod_file))
+ logger.exception('Failed to read file {}.'.format(pod_file))
return False
@@ -301,84 +254,96 @@ def combine_files(file_path, result_file, logger=None):
with open(absolute_file_path, 'r') as f:
all_info[info_file] = json.load(f)
except Exception:
- logger.error("Failed to read file {}.".format(absolute_file_path))
+ logger.error('Failed to read file {}.'.format(absolute_file_path))
return None
try:
with open(result_file, 'w') as f:
f.write(json.dumps(all_info))
except Exception:
- logger.exception("Failed to write file {}.".format(result_file))
+ logger.exception('Failed to write file {}.'.format(result_file))
return None
return result_file
def get_openstack_endpoint(logger=None):
https_enabled = check_https_enabled(logger)
- insecure_option = ''
insecure = os.getenv('OS_INSECURE')
- if https_enabled:
- if insecure:
- if insecure.lower() == "true":
- insecure_option = ' --insecure '
- cmd = ("openstack {} endpoint list --interface admin -f json"
- .format(insecure_option))
- ret, msg = exec_cmd(cmd, logger, verbose=False)
- if ret != 0:
- logger.error("Failed to get the endpoint info.")
+ if https_enabled and insecure and insecure.lower() == 'true':
+ os_utils = OS_Utils(verify=False)
+ else:
+ os_utils = OS_Utils()
+ res_endpoints, msg_endpoints = os_utils.search_endpoints()
+ if not res_endpoints:
+ logger.error('Failed to list endpoints. Exception message, {}'
+ .format(msg_endpoints))
return None
+ endpoints_info = []
+ for item in msg_endpoints:
+ endpoint = {'URL': item['url'], 'Enabled': item['enabled']}
+ res_services, msg_services = os_utils.search_services(
+ service_id=item['service_id'])
+ if not res_services:
+ logger.error('Failed to list services. Exception message, {}'
+ .format(msg_services))
+ return None
+ endpoint['Service Type'] = msg_services[0]['service_type']
+ endpoint['Service Name'] = msg_services[0]['name']
+ endpoints_info.append(endpoint)
+
result_file = os.path.join(dt_cfg.dovetail_config['result_dir'],
'endpoint_info.json')
try:
with open(result_file, 'w') as f:
- f.write(msg)
- logger.debug("Record all endpoint info into file {}."
+ json.dump(endpoints_info, f)
+ logger.debug('Record all endpoint info into file {}.'
.format(result_file))
- return result_file
+ return endpoints_info
except Exception:
- logger.exception("Failed to write endpoint info into file.")
+ logger.exception('Failed to write endpoint info into file.')
return None
def check_cacert_file(cacert, logger=None):
if not os.path.isfile(cacert):
- logger.error("OS_CACERT is {}, but the file does not exist."
+ logger.error('OS_CACERT is {}, but the file does not exist.'
.format(cacert))
return False
if not dt_cfg.dovetail_config['config_dir'] == os.path.dirname(cacert):
- logger.error("Credential file must be put under {}, "
- "which can be mounted into other container."
+ logger.error('Credential file must be put under {}, '
+ 'which can be mounted into other container.'
.format(dt_cfg.dovetail_config['config_dir']))
return False
return True
def get_hosts_info(logger=None):
- hosts_config = ""
+ hosts_config = {}
hosts_config_file = os.path.join(dt_cfg.dovetail_config['config_dir'],
'hosts.yaml')
if not os.path.isfile(hosts_config_file):
+ logger.warn('There is no hosts file {}. This may cause some issues '
+ 'with domain name resolution.'.format(hosts_config_file))
return hosts_config
with open(hosts_config_file) as f:
hosts_yaml = yaml.safe_load(f)
if not hosts_yaml:
- logger.debug("File {} is empty.".format(hosts_config_file))
+ logger.debug('File {} is empty.'.format(hosts_config_file))
return hosts_config
- try:
- if not hosts_yaml['hosts_info']:
- return hosts_config
- for ip, hostnames in hosts_yaml['hosts_info'].iteritems():
- if not hostnames:
- continue
- add_hosts_info(ip, hostnames)
- names_str = ' '.join(hostname for hostname in hostnames
- if hostname)
- if not names_str:
- continue
- hosts_config += ' --add-host=\'{}\':{} '.format(names_str, ip)
- logger.debug('Get hosts info {}:{}.'.format(ip, names_str))
- except KeyError as e:
- logger.error("There is no key {} in file {}"
- .format(e, hosts_config_file))
+ hosts_info = hosts_yaml.get('hosts_info', None)
+ if not hosts_info:
+ logger.error('There is no key hosts_info in file {}'
+ .format(hosts_config_file))
+ return hosts_config
+ for ip, hostnames in hosts_info.items():
+ if not hostnames:
+ continue
+ add_hosts_info(ip, hostnames)
+ names_str = ' '.join(hostname for hostname in hostnames
+ if hostname)
+ if not names_str:
+ continue
+ hosts_config[names_str] = ip
+ logger.debug('Get hosts info {}:{}.'.format(ip, names_str))
return hosts_config
@@ -391,7 +356,7 @@ def read_yaml_file(file_path, logger=None):
content = yaml.safe_load(f)
return content
except Exception as e:
- logger.exception("Failed to read file {}, exception: {}"
+ logger.exception('Failed to read file {}, exception: {}'
.format(file_path, e))
return None
@@ -405,6 +370,89 @@ def read_plain_file(file_path, logger=None):
content = f.read()
return content
except Exception as e:
- logger.exception("Failed to read file {}, exception: {}"
+ logger.exception('Failed to read file {}, exception: {}'
.format(file_path, e))
return None
+
+
+def get_value_from_dict(key_path, input_dict):
+ """
+ Returns the value of a key in input_dict
+ key_path must be given in string format with dots
+ Example: result.dir
+ """
+ if not isinstance(key_path, str) or not isinstance(input_dict, dict):
+ return None
+ for key in key_path.split('.'):
+ input_dict = input_dict.get(key)
+ if not input_dict:
+ return None
+ return input_dict
+
+
+def get_openstack_info(logger):
+ """
+ When the sut is an OpenStack deployment, its software and hardware info
+ are needed.
+ Software info is the endpoint list.
+ Hardware info is every node's cpu, disk ...
+ """
+ openrc = os.path.join(dt_cfg.dovetail_config['config_dir'],
+ dt_cfg.dovetail_config['env_file'])
+ if not os.path.isfile(openrc):
+ logger.error('File {} does not exist.'.format(openrc))
+ return
+ source_env(openrc)
+ get_hosts_info(logger)
+ get_openstack_endpoint(logger)
+ get_hardware_info(logger)
+
+
+def push_results_to_db(case_name, details, start_date, stop_date, logger):
+ """
+ Push results to OPNFV TestAPI DB when running with OPNFV CI jobs.
+ All results can be filtered with TestAPI.
+ http://testresults.opnfv.org/test/#/results
+ """
+ try:
+ url = os.getenv('TEST_DB_URL')
+ data = {'project_name': 'dovetail', 'case_name': case_name,
+ 'details': details, 'start_date': start_date,
+ 'stop_date': stop_date}
+ data['criteria'] = details['criteria'] if details else 'FAIL'
+ data['installer'] = os.getenv('INSTALLER_TYPE')
+ data['scenario'] = os.getenv('DEPLOY_SCENARIO')
+ data['pod_name'] = os.getenv('NODE_NAME')
+ data['build_tag'] = os.getenv('BUILD_TAG')
+ data['version'] = os.getenv('VERSION')
+ req = requests.post(url, data=json.dumps(data, sort_keys=True),
+ headers={'Content-Type': 'application/json'})
+ req.raise_for_status()
+ logger.debug('The results were successfully pushed to DB.')
+ return True
+ except Exception:
+ logger.exception('The results cannot be pushed to DB.')
+ return False
+
+
+def get_mount_list(project_cfg):
+ mount_list = []
+ mounts = get_value_from_dict('mounts', project_cfg)
+ for mount in mounts:
+ if mount:
+ param_dict = {}
+ for param in mount.split(','):
+ key_word = param.split('=')
+
+ if len(key_word) != 2:
+ return None, 'Error mount {}.'.format(mount)
+
+ param_dict[key_word[0]] = key_word[1]
+ try:
+ mount_list.append(Mount(target=param_dict['target'],
+ source=param_dict['source'],
+ type='bind'))
+ except Exception as e:
+ return None, e
+
+ return mount_list, 'Successfully to get mount list.'