aboutsummaryrefslogtreecommitdiffstats
path: root/functest
diff options
context:
space:
mode:
Diffstat (limited to 'functest')
-rwxr-xr-xfunctest/ci/config_functest.yaml5
-rwxr-xr-xfunctest/ci/exec_test.sh7
-rw-r--r--functest/ci/logging.json29
-rwxr-xr-xfunctest/ci/prepare_env.py5
-rwxr-xr-xfunctest/ci/run_tests.py4
-rwxr-xr-xfunctest/ci/testcases.yaml11
-rw-r--r--[-rwxr-xr-x]functest/core/pytest_suite_runner.py8
-rw-r--r--functest/core/vnf_base.py12
-rwxr-xr-xfunctest/opnfv_tests/features/copper.py2
-rwxr-xr-xfunctest/opnfv_tests/features/doctor.py81
-rw-r--r--functest/opnfv_tests/openstack/rally/__init__.py0
-rw-r--r--functest/opnfv_tests/openstack/rally/rally.py554
-rwxr-xr-xfunctest/opnfv_tests/openstack/rally/run_rally-cert.py613
-rw-r--r--functest/opnfv_tests/openstack/tempest/conf_utils.py76
-rw-r--r--functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt20
-rw-r--r--functest/opnfv_tests/openstack/tempest/tempest.py81
-rw-r--r--[-rwxr-xr-x]functest/opnfv_tests/openstack/vping/vping_base.py0
-rw-r--r--functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py8
-rwxr-xr-xfunctest/opnfv_tests/sdn/onos/teston/onos.py6
-rw-r--r--functest/opnfv_tests/vnf/aaa/aaa.py3
-rw-r--r--functest/opnfv_tests/vnf/ims/cloudify_ims.py3
-rw-r--r--functest/opnfv_tests/vnf/ims/opera_ims.py3
-rw-r--r--functest/opnfv_tests/vnf/ims/orchestra_ims.py3
-rw-r--r--functest/tests/unit/cli/commands/test_cli_env.py1
-rwxr-xr-x[-rw-r--r--]functest/tests/unit/core/test_testcase_base.py2
-rw-r--r--functest/tests/unit/odl/test_odl.py1
-rw-r--r--functest/tests/unit/test_logging.ini27
-rw-r--r--functest/tests/unit/utils/test_functest_utils.py1
-rw-r--r--functest/utils/functest_constants.py4
-rwxr-xr-x[-rw-r--r--]functest/utils/functest_logger.py60
-rw-r--r--[-rwxr-xr-x]functest/utils/openstack_tacker.py0
31 files changed, 779 insertions, 851 deletions
diff --git a/functest/ci/config_functest.yaml b/functest/ci/config_functest.yaml
index 25be1724..2feab771 100755
--- a/functest/ci/config_functest.yaml
+++ b/functest/ci/config_functest.yaml
@@ -21,7 +21,7 @@ general:
repo_sfc: /home/opnfv/repos/sfc
dir_repo_onos: /home/opnfv/repos/onos
dir_repo_promise: /home/opnfv/repos/promise
- dir_repo_doctor: /home/opnfv/repos/doctor
+ repo_doctor: /home/opnfv/repos/doctor
repo_copper: /home/opnfv/repos/copper
dir_repo_ovno: /home/opnfv/repos/ovno
repo_parser: /home/opnfv/repos/parser
@@ -30,6 +30,7 @@ general:
functest: /home/opnfv/functest
functest_test: /home/opnfv/repos/functest/functest/opnfv_tests
results: /home/opnfv/functest/results
+ functest_logging_cfg: /home/opnfv/repos/functest/functest/ci/logging.json
functest_conf: /home/opnfv/functest/conf
functest_data: /home/opnfv/functest/data
dir_vIMS_data: /home/opnfv/functest/data/vIMS/
@@ -96,6 +97,8 @@ tempest:
user_password: tempest
validation:
ssh_timeout: 130
+ object_storage:
+ operator_role: SwiftOperator
private_net_name: tempest-net
private_subnet_name: tempest-subnet
private_subnet_cidr: 192.168.150.0/24
diff --git a/functest/ci/exec_test.sh b/functest/ci/exec_test.sh
index 7c96d69c..54a7c624 100755
--- a/functest/ci/exec_test.sh
+++ b/functest/ci/exec_test.sh
@@ -93,13 +93,6 @@ function run_test(){
"vims")
python ${FUNCTEST_TEST_DIR}/vnf/ims/vims.py $clean_flag $report
;;
- "rally_full")
- python ${FUNCTEST_TEST_DIR}/openstack/rally/run_rally-cert.py $clean_flag all $report
- ;;
- "rally_sanity")
- python ${FUNCTEST_TEST_DIR}/openstack/rally/run_rally-cert.py \
- $clean_flag --sanity all $report
- ;;
"onos")
python ${FUNCTEST_TEST_DIR}/sdn/onos/teston/onos.py
;;
diff --git a/functest/ci/logging.json b/functest/ci/logging.json
new file mode 100644
index 00000000..3f454e8f
--- /dev/null
+++ b/functest/ci/logging.json
@@ -0,0 +1,29 @@
+{
+ "version": 1,
+ "disable_existing_loggers": false,
+ "formatters": {
+ "standard": {
+ "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ }
+ },
+ "handlers": {
+ "console": {
+ "level": "INFO",
+ "class": "logging.StreamHandler",
+ "formatter": "standard"
+ },
+ "file": {
+ "level": "DEBUG",
+ "class": "logging.FileHandler",
+ "formatter": "standard",
+ "filename": "/home/opnfv/functest/results/functest.log"
+ }
+ },
+ "loggers": {
+ "": {
+ "handlers": ["console", "file"],
+ "level": "DEBUG",
+ "propagate": "yes"
+ }
+ }
+}
diff --git a/functest/ci/prepare_env.py b/functest/ci/prepare_env.py
index 74c751af..8bbdf18b 100755
--- a/functest/ci/prepare_env.py
+++ b/functest/ci/prepare_env.py
@@ -184,11 +184,6 @@ def source_rc_file():
CONST.OS_TENANT_NAME = value
elif key == 'OS_PASSWORD':
CONST.OS_PASSWORD = value
- logger.debug("Used credentials: %s" % str)
- logger.debug("OS_AUTH_URL:%s" % CONST.OS_AUTH_URL)
- logger.debug("OS_USERNAME:%s" % CONST.OS_USERNAME)
- logger.debug("OS_TENANT_NAME:%s" % CONST.OS_TENANT_NAME)
- logger.debug("OS_PASSWORD:%s" % CONST.OS_PASSWORD)
def patch_config_file():
diff --git a/functest/ci/run_tests.py b/functest/ci/run_tests.py
index a5f1ab9e..ef080016 100755
--- a/functest/ci/run_tests.py
+++ b/functest/ci/run_tests.py
@@ -93,10 +93,6 @@ def source_rc_file():
elif key == 'OS_PASSWORD':
ft_constants.OS_PASSWORD = value
CONST.OS_PASSWORD = value
- logger.debug("OS_AUTH_URL:%s" % CONST.OS_AUTH_URL)
- logger.debug("OS_USERNAME:%s" % CONST.OS_USERNAME)
- logger.debug("OS_TENANT_NAME:%s" % CONST.OS_TENANT_NAME)
- logger.debug("OS_PASSWORD:%s" % CONST.OS_PASSWORD)
def generate_os_snapshot():
diff --git a/functest/ci/testcases.yaml b/functest/ci/testcases.yaml
index ede08285..27d358bf 100755
--- a/functest/ci/testcases.yaml
+++ b/functest/ci/testcases.yaml
@@ -81,6 +81,9 @@ tiers:
dependencies:
installer: ''
scenario: '^((?!bgpvpn).)*$'
+ run:
+ module: 'functest.opnfv_tests.openstack.rally.rally'
+ class: 'RallySanity'
-
name: odl
@@ -190,8 +193,11 @@ tiers:
description: >-
Test suite from Doctor project.
dependencies:
- installer: 'apex'
+ installer: '(apex)|(fuel)|(joid)'
scenario: '^((?!fdio).)*$'
+ run:
+ module: 'functest.opnfv_tests.features.doctor'
+ class: 'Doctor'
-
name: bgpvpn
@@ -325,6 +331,9 @@ tiers:
dependencies:
installer: '^((?!netvirt).)*$'
scenario: ''
+ run:
+ module: 'functest.opnfv_tests.openstack.rally.rally'
+ class: 'RallyFull'
-
name: vnf
diff --git a/functest/core/pytest_suite_runner.py b/functest/core/pytest_suite_runner.py
index 1eed92b5..c168d7d9 100755..100644
--- a/functest/core/pytest_suite_runner.py
+++ b/functest/core/pytest_suite_runner.py
@@ -41,14 +41,18 @@ class PyTestSuiteRunner(base.TestcaseBase):
for test, message in result.failures:
self.logger.error(str(test) + " FAILED with " + message)
+ # a result can be PASS or FAIL
+ # But in this case it means that the Execution was OK
+ # we shall distinguish Execution Error from FAIL results
+ # TestcaseBase.EX_RUN_ERROR means that the test case was not run
+ # not that it was run but the result was FAIL
+ exit_code = base.TestcaseBase.EX_OK
if ((result.errors and len(result.errors) > 0)
or (result.failures and len(result.failures) > 0)):
self.logger.info("%s FAILED" % self.case_name)
self.criteria = 'FAIL'
- exit_code = base.TestcaseBase.EX_RUN_ERROR
else:
self.logger.info("%s OK" % self.case_name)
- exit_code = base.TestcaseBase.EX_OK
self.criteria = 'PASS'
self.details = {}
diff --git a/functest/core/vnf_base.py b/functest/core/vnf_base.py
index 99520494..4d019858 100644
--- a/functest/core/vnf_base.py
+++ b/functest/core/vnf_base.py
@@ -35,6 +35,7 @@ class VnfOnBoardingBase(base.TestcaseBase):
self.details['orchestrator'] = {}
self.details['vnf'] = {}
self.details['test_vnf'] = {}
+ self.images = {}
try:
self.tenant_name = CONST.__getattribute__(
'vnf_{}_tenant_name'.format(self.case_name))
@@ -44,7 +45,7 @@ class VnfOnBoardingBase(base.TestcaseBase):
raise Exception("Unknown VNF case=" + self.case_name)
try:
- self.tenant_images = CONST.__getattribute__(
+ self.images = CONST.__getattribute__(
'vnf_{}_tenant_images'.format(self.case_name))
except:
self.logger.warn("No tenant image defined for this VNF")
@@ -152,14 +153,15 @@ class VnfOnBoardingBase(base.TestcaseBase):
self.logger.info("Update OpenStack creds informations")
self.creds.update({
- "username": self.tenant_name,
- "password": self.tenant_name,
"tenant": self.tenant_name,
})
- self.glance_client = os_utils.get_glance_client(self.creds)
self.neutron_client = os_utils.get_neutron_client(self.creds)
self.nova_client = os_utils.get_nova_client(self.creds)
-
+ self.creds.update({
+ "username": self.tenant_name,
+ "password": self.tenant_name,
+ })
+ self.glance_client = os_utils.get_glance_client(self.creds)
self.logger.info("Upload some OS images if it doesn't exist")
temp_dir = os.path.join(self.data_dir, "tmp/")
diff --git a/functest/opnfv_tests/features/copper.py b/functest/opnfv_tests/features/copper.py
index a10364e2..735b315d 100755
--- a/functest/opnfv_tests/features/copper.py
+++ b/functest/opnfv_tests/features/copper.py
@@ -22,4 +22,4 @@ class Copper(base.FeatureBase):
super(Copper, self).__init__(project='copper',
case='copper-notification',
repo='dir_repo_copper')
- self.cmd = 'bash %s/tests/run.sh' % self.repo
+ self.cmd = 'cd %s/tests && bash run.sh && cd -' % self.repo
diff --git a/functest/opnfv_tests/features/doctor.py b/functest/opnfv_tests/features/doctor.py
index dbd803a6..4d295a67 100755
--- a/functest/opnfv_tests/features/doctor.py
+++ b/functest/opnfv_tests/features/doctor.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
#
-# Copyright (c) 2015 All rights reserved
+# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -13,77 +13,12 @@
# 0.2: measure test duration and publish results under json format
#
#
-import argparse
-import os
-import time
+import functest.core.feature_base as base
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-import functest.utils.functest_constants as ft_constants
-parser = argparse.ArgumentParser()
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-args = parser.parse_args()
-
-functest_yaml = functest_utils.get_functest_yaml()
-
-DOCTOR_REPO_DIR = ft_constants.DOCTOR_REPO_DIR
-RESULTS_DIR = ft_constants.FUNCTEST_RESULTS_DIR
-
-logger = ft_logger.Logger("doctor").getLogger()
-
-
-def main():
- exit_code = -1
-
- # if the image name is explicitly set for the doctor suite, set it as
- # enviroment variable
- if 'doctor' in functest_yaml and 'image_name' in functest_yaml['doctor']:
- os.environ["IMAGE_NAME"] = functest_yaml['doctor']['image_name']
-
- cmd = 'cd %s/tests && ./run.sh' % DOCTOR_REPO_DIR
- log_file = RESULTS_DIR + "/doctor.log"
-
- start_time = time.time()
-
- ret = functest_utils.execute_command(cmd,
- info=True,
- output_file=log_file)
-
- stop_time = time.time()
- duration = round(stop_time - start_time, 1)
- if ret == 0:
- logger.info("Doctor test case OK")
- test_status = 'OK'
- exit_code = 0
- else:
- logger.info("Doctor test case FAILED")
- test_status = 'NOK'
-
- details = {
- 'timestart': start_time,
- 'duration': duration,
- 'status': test_status,
- }
- status = "FAIL"
- if details['status'] == "OK":
- status = "PASS"
- functest_utils.logger_test_results("Doctor",
- "doctor-notification",
- status, details)
- if args.report:
- functest_utils.push_results_to_db("doctor",
- "doctor-notification",
- start_time,
- stop_time,
- status,
- details)
- logger.info("Doctor results pushed to DB")
-
- exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
+class Doctor(base.FeatureBase):
+ def __init__(self):
+ super(Doctor, self).__init__(project='doctor',
+ case='doctor-notification',
+ repo='dir_repo_doctor')
+ self.cmd = 'cd %s/tests && ./run.sh' % self.repo
diff --git a/functest/opnfv_tests/openstack/rally/__init__.py b/functest/opnfv_tests/openstack/rally/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/__init__.py
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
new file mode 100644
index 00000000..e7cac7af
--- /dev/null
+++ b/functest/opnfv_tests/openstack/rally/rally.py
@@ -0,0 +1,554 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import json
+import os
+import re
+import subprocess
+import time
+
+import iniparse
+import yaml
+
+from functest.core import testcase_base
+from functest.utils.constants import CONST
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+
+logger = ft_logger.Logger('Rally').getLogger()
+
+
+class RallyBase(testcase_base.TestcaseBase):
+ TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
+ 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
+ GLANCE_IMAGE_NAME = CONST.openstack_image_name
+ GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
+ GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
+ GLANCE_IMAGE_FILENAME)
+ GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
+ FLAVOR_NAME = "m1.tiny"
+
+ RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
+ RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
+ TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
+ SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
+ USERS_AMOUNT = 2
+ TENANTS_AMOUNT = 3
+ ITERATIONS_AMOUNT = 10
+ CONCURRENCY = 4
+ RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
+ TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
+ 'tempest/tempest.conf')
+ BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
+ TEMP_DIR = os.path.join(RALLY_DIR, "var")
+
+ CINDER_VOLUME_TYPE_NAME = "volume_test"
+ RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
+ RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
+ RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
+ RALLY_ROUTER_NAME = CONST.rally_router_name
+
+ def __init__(self):
+ super(RallyBase, self).__init__()
+ self.mode = ''
+ self.summary = []
+ self.scenario_dir = ''
+ self.nova_client = os_utils.get_nova_client()
+ self.neutron_client = os_utils.get_neutron_client()
+ self.cinder_client = os_utils.get_cinder_client()
+ self.network_dict = {}
+ self.volume_type = None
+
+ def _build_task_args(self, test_file_name):
+ task_args = {'service_list': [test_file_name]}
+ task_args['image_name'] = self.GLANCE_IMAGE_NAME
+ task_args['flavor_name'] = self.FLAVOR_NAME
+ task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
+ task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
+ task_args['tmpl_dir'] = self.TEMPLATE_DIR
+ task_args['sup_dir'] = self.SUPPORT_DIR
+ task_args['users_amount'] = self.USERS_AMOUNT
+ task_args['tenants_amount'] = self.TENANTS_AMOUNT
+ task_args['use_existing_users'] = False
+ task_args['iterations'] = self.ITERATIONS_AMOUNT
+ task_args['concurrency'] = self.CONCURRENCY
+ task_args['smoke'] = self.smoke
+
+ ext_net = os_utils.get_external_net(self.neutron_client)
+ if ext_net:
+ task_args['floating_network'] = str(ext_net)
+ else:
+ task_args['floating_network'] = ''
+
+ net_id = self.network_dict['net_id']
+ if net_id:
+ task_args['netid'] = str(net_id)
+ else:
+ task_args['netid'] = ''
+
+ auth_url = CONST.OS_AUTH_URL
+ if auth_url is not None:
+ task_args['request_url'] = auth_url.rsplit(":", 1)[0]
+ else:
+ task_args['request_url'] = ''
+
+ return task_args
+
+ def _prepare_test_list(self, test_name):
+ test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
+ scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
+ test_yaml_file_name)
+
+ if not os.path.exists(scenario_file_name):
+ scenario_file_name = os.path.join(self.scenario_dir,
+ test_yaml_file_name)
+
+ if not os.path.exists(scenario_file_name):
+ raise Exception("The scenario '%s' does not exist."
+ % scenario_file_name)
+
+ logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
+ test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
+
+ if not os.path.exists(self.TEMP_DIR):
+ os.makedirs(self.TEMP_DIR)
+
+ self.apply_blacklist(scenario_file_name, test_file_name)
+ return test_file_name
+
+ @staticmethod
+ def get_task_id(cmd_raw):
+ """
+ get task id from command rally result
+ :param cmd_raw:
+ :return: task_id as string
+ """
+ taskid_re = re.compile('^Task +(.*): started$')
+ for line in cmd_raw.splitlines(True):
+ line = line.strip()
+ match = taskid_re.match(line)
+ if match:
+ return match.group(1)
+ return None
+
+ @staticmethod
+ def task_succeed(json_raw):
+ """
+ Parse JSON from rally JSON results
+ :param json_raw:
+ :return: Bool
+ """
+ rally_report = json.loads(json_raw)
+ for report in rally_report:
+ if report is None or report.get('result') is None:
+ return False
+
+ for result in report.get('result'):
+ if result is None or len(result.get('error')) > 0:
+ return False
+
+ return True
+
+ @staticmethod
+ def live_migration_supported():
+ config = iniparse.ConfigParser()
+ if (config.read(RallyBase.TEMPEST_CONF_FILE) and
+ config.has_section('compute-feature-enabled') and
+ config.has_option('compute-feature-enabled',
+ 'live_migration')):
+ return config.getboolean('compute-feature-enabled',
+ 'live_migration')
+
+ return False
+
+ @staticmethod
+ def get_cmd_output(proc):
+ result = ""
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ result += line
+ return result
+
+ @staticmethod
+ def excl_scenario():
+ black_tests = []
+ try:
+ with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+ black_list_yaml = yaml.safe_load(black_list_file)
+
+ installer_type = CONST.INSTALLER_TYPE
+ deploy_scenario = CONST.DEPLOY_SCENARIO
+ if (bool(installer_type) * bool(deploy_scenario)):
+ if 'scenario' in black_list_yaml.keys():
+ for item in black_list_yaml['scenario']:
+ scenarios = item['scenarios']
+ installers = item['installers']
+ if (deploy_scenario in scenarios and
+ installer_type in installers):
+ tests = item['tests']
+ black_tests.extend(tests)
+ except Exception:
+ logger.debug("Scenario exclusion not applied.")
+
+ return black_tests
+
+ @staticmethod
+ def excl_func():
+ black_tests = []
+ func_list = []
+
+ try:
+ with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+ black_list_yaml = yaml.safe_load(black_list_file)
+
+ if not RallyBase.live_migration_supported():
+ func_list.append("no_live_migration")
+
+ if 'functionality' in black_list_yaml.keys():
+ for item in black_list_yaml['functionality']:
+ functions = item['functions']
+ for func in func_list:
+ if func in functions:
+ tests = item['tests']
+ black_tests.extend(tests)
+ except Exception:
+ logger.debug("Functionality exclusion not applied.")
+
+ return black_tests
+
+ @staticmethod
+ def apply_blacklist(case_file_name, result_file_name):
+ logger.debug("Applying blacklist...")
+ cases_file = open(case_file_name, 'r')
+ result_file = open(result_file_name, 'w')
+
+ black_tests = list(set(RallyBase.excl_func() +
+ RallyBase.excl_scenario()))
+
+ include = True
+ for cases_line in cases_file:
+ if include:
+ for black_tests_line in black_tests:
+ if re.search(black_tests_line,
+ cases_line.strip().rstrip(':')):
+ include = False
+ break
+ else:
+ result_file.write(str(cases_line))
+ else:
+ if cases_line.isspace():
+ include = True
+
+ cases_file.close()
+ result_file.close()
+
+ @staticmethod
+ def file_is_empty(file_name):
+ try:
+ if os.stat(file_name).st_size > 0:
+ return False
+ except:
+ pass
+
+ return True
+
+ def _run_task(self, test_name):
+ logger.info('Starting test scenario "{}" ...'.format(test_name))
+
+ task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
+ if not os.path.exists(task_file):
+ logger.error("Task file '%s' does not exist." % task_file)
+ raise Exception("Task file '%s' does not exist." % task_file)
+
+ file_name = self._prepare_test_list(test_name)
+ if self.file_is_empty(file_name):
+ logger.info('No tests for scenario "{}"'.format(test_name))
+ return
+
+ cmd_line = ("rally task start --abort-on-sla-failure "
+ "--task {0} "
+ "--task-args \"{1}\""
+ .format(task_file, self._build_task_args(test_name)))
+ logger.debug('running command line: {}'.format(cmd_line))
+
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, shell=True)
+ output = self._get_output(p, test_name)
+ task_id = self.get_task_id(output)
+ logger.debug('task_id : {}'.format(task_id))
+
+ if task_id is None:
+ logger.error('Failed to retrieve task_id, validating task...')
+ cmd_line = ("rally task validate "
+ "--task {0} "
+ "--task-args \"{1}\""
+ .format(task_file, self.__build_task_args(test_name)))
+ logger.debug('running command line: {}'.format(cmd_line))
+ p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, shell=True)
+ output = self.get_cmd_output(p)
+ logger.error("Task validation result:" + "\n" + output)
+ return
+
+ # check for result directory and create it otherwise
+ if not os.path.exists(self.RESULTS_DIR):
+ logger.debug('{} does not exist, we create it.'
+ .format(self.RESULTS_DIR))
+ os.makedirs(self.RESULTS_DIR)
+
+ # write html report file
+ report_html_name = 'opnfv-{}.html'.format(test_name)
+ report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
+ cmd_line = "rally task report {} --out {}".format(task_id,
+ report_html_dir)
+
+ logger.debug('running command line: {}'.format(cmd_line))
+ os.popen(cmd_line)
+
+ # get and save rally operation JSON result
+ cmd_line = "rally task results %s" % task_id
+ logger.debug('running command line: {}'.format(cmd_line))
+ cmd = os.popen(cmd_line)
+ json_results = cmd.read()
+ report_json_name = 'opnfv-{}.json'.format(test_name)
+ report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
+ with open(report_json_dir, 'w') as f:
+ logger.debug('saving json file')
+ f.write(json_results)
+
+ """ parse JSON operation result """
+ if self.task_succeed(json_results):
+ logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
+ else:
+ logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+
+ def _get_output(self, proc, test_name):
+ result = ""
+ nb_tests = 0
+ overall_duration = 0.0
+ success = 0.0
+ nb_totals = 0
+
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if ("Load duration" in line or
+ "started" in line or
+ "finished" in line or
+ " Preparing" in line or
+ "+-" in line or
+ "|" in line):
+ result += line
+ elif "test scenario" in line:
+ result += "\n" + line
+ elif "Full duration" in line:
+ result += line + "\n\n"
+
+ # parse output for summary report
+ if ("| " in line and
+ "| action" not in line and
+ "| Starting" not in line and
+ "| Completed" not in line and
+ "| ITER" not in line and
+ "| " not in line and
+ "| total" not in line):
+ nb_tests += 1
+ elif "| total" in line:
+ percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+ try:
+ success += float(percentage)
+ except ValueError:
+ logger.info('Percentage error: %s, %s' %
+ (percentage, line))
+ nb_totals += 1
+ elif "Full duration" in line:
+ duration = line.split(': ')[1]
+ try:
+ overall_duration += float(duration)
+ except ValueError:
+ logger.info('Duration error: %s, %s' % (duration, line))
+
+ overall_duration = "{:10.2f}".format(overall_duration)
+ if nb_totals == 0:
+ success_avg = 0
+ else:
+ success_avg = "{:0.2f}".format(success / nb_totals)
+
+ scenario_summary = {'test_name': test_name,
+ 'overall_duration': overall_duration,
+ 'nb_tests': nb_tests,
+ 'success': success_avg}
+ self.summary.append(scenario_summary)
+
+ logger.debug("\n" + result)
+
+ return result
+
+ def _prepare_env(self):
+ logger.debug('Validating the test name...')
+ if not (self.test_name in self.TESTS):
+ raise Exception("Test name '%s' is invalid" % self.test_name)
+
+ volume_types = os_utils.list_volume_types(self.cinder_client,
+ private=False)
+ if volume_types:
+ logger.debug("Using existing volume type(s)...")
+ else:
+ logger.debug('Creating volume type...')
+ self.volume_type = os_utils.create_volume_type(
+ self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
+ if self.volume_type is None:
+ raise Exception("Failed to create volume type '%s'" %
+ self.CINDER_VOLUME_TYPE_NAME)
+ logger.debug("Volume type '%s' is created succesfully." %
+ self.CINDER_VOLUME_TYPE_NAME)
+
+ logger.debug('Getting or creating image...')
+ self.image_exists, self.image_id = os_utils.get_or_create_image(
+ self.GLANCE_IMAGE_NAME,
+ self.GLANCE_IMAGE_PATH,
+ self.GLANCE_IMAGE_FORMAT)
+ if self.image_id is None:
+ raise Exception("Failed to get or create image '%s'" %
+ self.GLANCE_IMAGE_NAME)
+
+ logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
+ self.network_dict = os_utils.create_shared_network_full(
+ self.RALLY_PRIVATE_NET_NAME,
+ self.RALLY_PRIVATE_SUBNET_NAME,
+ self.RALLY_ROUTER_NAME,
+ self.RALLY_PRIVATE_SUBNET_CIDR)
+ if self.network_dict is None:
+ raise Exception("Failed to create shared network '%s'" %
+ self.RALLY_PRIVATE_NET_NAME)
+
+ def _run_tests(self):
+ if self.test_name == 'all':
+ for test in self.TESTS:
+ if (test == 'all' or test == 'vm'):
+ continue
+ self._run_task(test)
+ else:
+ self._run_task(self.test_name)
+
+ def _generate_report(self):
+ report = (
+ "\n"
+ " "
+ "\n"
+ " Rally Summary Report\n"
+ "\n"
+ "+===================+============+===============+===========+"
+ "\n"
+ "| Module | Duration | nb. Test Run | Success |"
+ "\n"
+ "+===================+============+===============+===========+"
+ "\n")
+ payload = []
+
+ # for each scenario we draw a row for the table
+ total_duration = 0.0
+ total_nb_tests = 0
+ total_success = 0.0
+ for s in self.summary:
+ name = "{0:<17}".format(s['test_name'])
+ duration = float(s['overall_duration'])
+ total_duration += duration
+ duration = time.strftime("%M:%S", time.gmtime(duration))
+ duration = "{0:<10}".format(duration)
+ nb_tests = "{0:<13}".format(s['nb_tests'])
+ total_nb_tests += int(s['nb_tests'])
+ success = "{0:<10}".format(str(s['success']) + '%')
+ total_success += float(s['success'])
+ report += ("" +
+ "| " + name + " | " + duration + " | " +
+ nb_tests + " | " + success + "|\n" +
+ "+-------------------+------------"
+ "+---------------+-----------+\n")
+ payload.append({'module': name,
+ 'details': {'duration': s['overall_duration'],
+ 'nb tests': s['nb_tests'],
+ 'success': s['success']}})
+
+ total_duration_str = time.strftime("%H:%M:%S",
+ time.gmtime(total_duration))
+ total_duration_str2 = "{0:<10}".format(total_duration_str)
+ total_nb_tests_str = "{0:<13}".format(total_nb_tests)
+
+ if len(self.summary):
+ success_rate = total_success / len(self.summary)
+ else:
+ success_rate = 100
+ success_rate = "{:0.2f}".format(success_rate)
+ success_rate_str = "{0:<10}".format(str(success_rate) + '%')
+ report += ("+===================+============"
+ "+===============+===========+")
+ report += "\n"
+ report += ("| TOTAL: | " + total_duration_str2 + " | " +
+ total_nb_tests_str + " | " + success_rate_str + "|\n")
+ report += ("+===================+============"
+ "+===============+===========+")
+ report += "\n"
+
+ logger.info("\n" + report)
+ payload.append({'summary': {'duration': total_duration,
+ 'nb tests': total_nb_tests,
+ 'nb success': success_rate}})
+
+ self.criteria = ft_utils.check_success_rate(
+ self.case_name, success_rate)
+ self.details = payload
+
+ logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
+ % (self.case_name, success_rate, self.criteria))
+
+ def _clean_up(self):
+ if self.volume_type:
+ logger.debug("Deleting volume type '%s'..." % self.volume_type)
+ os_utils.delete_volume_type(self.cinder_client, self.volume_type)
+
+ if not self.image_exists:
+ logger.debug("Deleting image '%s' with ID '%s'..."
+ % (self.GLANCE_IMAGE_NAME, self.image_id))
+ if not os_utils.delete_glance_image(self.nova_client,
+ self.image_id):
+ logger.error("Error deleting the glance image")
+
+ def run(self):
+ self.start_time = time.time()
+ try:
+ self._prepare_env()
+ self._run_tests()
+ self._generate_report()
+ self._clean_up()
+ except Exception as e:
+ logger.error('Error with run: %s' % e)
+ return testcase_base.TestcaseBase.EX_RUN_ERROR
+ self.stop_time = time.time()
+
+
+class RallySanity(RallyBase):
+ def __init__(self):
+ super(RallySanity, self).__init__()
+ self.case_name = 'rally_sanity'
+ self.mode = 'sanity'
+ self.test_name = 'all'
+ self.smoke = True
+ self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
+
+
+class RallyFull(RallyBase):
+ def __init__(self):
+ super(RallyFull, self).__init__()
+ self.case_name = 'rally_full'
+ self.mode = 'full'
+ self.test_name = 'all'
+ self.smoke = False
+ self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')
diff --git a/functest/opnfv_tests/openstack/rally/run_rally-cert.py b/functest/opnfv_tests/openstack/rally/run_rally-cert.py
deleted file mode 100755
index b02fd427..00000000
--- a/functest/opnfv_tests/openstack/rally/run_rally-cert.py
+++ /dev/null
@@ -1,613 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import argparse
-import json
-import os
-import re
-import subprocess
-import time
-
-import iniparse
-import yaml
-
-from functest.utils.constants import CONST
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-
-tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
- 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
-parser = argparse.ArgumentParser()
-parser.add_argument("test_name",
- help="Module name to be tested. "
- "Possible values are : "
- "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
- "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
- "{d[10]} ] "
- "The 'all' value "
- "performs all possible test scenarios"
- .format(d=tests))
-
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
- help="Create json result file",
- action="store_true")
-parser.add_argument("-s", "--smoke",
- help="Smoke test mode",
- action="store_true")
-parser.add_argument("-v", "--verbose",
- help="Print verbose info about the progress",
- action="store_true")
-parser.add_argument("-n", "--noclean",
- help="Don't clean the created resources for this test.",
- action="store_true")
-parser.add_argument("-z", "--sanity",
- help="Sanity test mode, execute only a subset of tests",
- action="store_true")
-
-args = parser.parse_args()
-
-
-if args.verbose:
- RALLY_STDERR = subprocess.STDOUT
-else:
- RALLY_STDERR = open(os.devnull, 'w')
-
-""" logging configuration """
-logger = ft_logger.Logger("run_rally-cert").getLogger()
-
-RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
-RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
-SANITY_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "sanity")
-FULL_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "full")
-TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
-SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
-TEMP_DIR = os.path.join(RALLY_DIR, "var")
-BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
-
-FLAVOR_NAME = "m1.tiny"
-USERS_AMOUNT = 2
-TENANTS_AMOUNT = 3
-ITERATIONS_AMOUNT = 10
-CONCURRENCY = 4
-
-RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
-TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
- 'tempest/tempest.conf')
-
-RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
-RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
-RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
-RALLY_ROUTER_NAME = CONST.rally_router_name
-
-GLANCE_IMAGE_NAME = CONST.openstack_image_name
-GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
-GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
-GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
- GLANCE_IMAGE_FILENAME)
-CINDER_VOLUME_TYPE_NAME = "volume_test"
-
-
-class GlobalVariables:
- SUMMARY = []
- neutron_client = None
- network_dict = {}
-
-
-def get_task_id(cmd_raw):
- """
- get task id from command rally result
- :param cmd_raw:
- :return: task_id as string
- """
- taskid_re = re.compile('^Task +(.*): started$')
- for line in cmd_raw.splitlines(True):
- line = line.strip()
- match = taskid_re.match(line)
- if match:
- return match.group(1)
- return None
-
-
-def task_succeed(json_raw):
- """
- Parse JSON from rally JSON results
- :param json_raw:
- :return: Bool
- """
- rally_report = json.loads(json_raw)
- for report in rally_report:
- if report is None or report.get('result') is None:
- return False
-
- for result in report.get('result'):
- if result is None or len(result.get('error')) > 0:
- return False
-
- return True
-
-
-def live_migration_supported():
- config = iniparse.ConfigParser()
- if (config.read(TEMPEST_CONF_FILE) and
- config.has_section('compute-feature-enabled') and
- config.has_option('compute-feature-enabled', 'live_migration')):
- return config.getboolean('compute-feature-enabled', 'live_migration')
-
- return False
-
-
-def build_task_args(test_file_name):
- task_args = {'service_list': [test_file_name]}
- task_args['image_name'] = GLANCE_IMAGE_NAME
- task_args['flavor_name'] = FLAVOR_NAME
- task_args['glance_image_location'] = GLANCE_IMAGE_PATH
- task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
- task_args['tmpl_dir'] = TEMPLATE_DIR
- task_args['sup_dir'] = SUPPORT_DIR
- task_args['users_amount'] = USERS_AMOUNT
- task_args['tenants_amount'] = TENANTS_AMOUNT
- task_args['use_existing_users'] = False
- task_args['iterations'] = ITERATIONS_AMOUNT
- task_args['concurrency'] = CONCURRENCY
-
- if args.sanity:
- task_args['smoke'] = True
- else:
- task_args['smoke'] = args.smoke
-
- ext_net = os_utils.get_external_net(GlobalVariables.neutron_client)
- if ext_net:
- task_args['floating_network'] = str(ext_net)
- else:
- task_args['floating_network'] = ''
-
- net_id = GlobalVariables.network_dict['net_id']
- task_args['netid'] = str(net_id)
-
- auth_url = CONST.OS_AUTH_URL
- if auth_url is not None:
- task_args['request_url'] = auth_url.rsplit(":", 1)[0]
- else:
- task_args['request_url'] = ''
-
- return task_args
-
-
-def get_output(proc, test_name):
- result = ""
- nb_tests = 0
- overall_duration = 0.0
- success = 0.0
- nb_totals = 0
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- if args.verbose:
- result += line
- else:
- if ("Load duration" in line or
- "started" in line or
- "finished" in line or
- " Preparing" in line or
- "+-" in line or
- "|" in line):
- result += line
- elif "test scenario" in line:
- result += "\n" + line
- elif "Full duration" in line:
- result += line + "\n\n"
-
- # parse output for summary report
- if ("| " in line and
- "| action" not in line and
- "| Starting" not in line and
- "| Completed" not in line and
- "| ITER" not in line and
- "| " not in line and
- "| total" not in line):
- nb_tests += 1
- elif "| total" in line:
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- try:
- success += float(percentage)
- except ValueError:
- logger.info('Percentage error: %s, %s' % (percentage, line))
- nb_totals += 1
- elif "Full duration" in line:
- duration = line.split(': ')[1]
- try:
- overall_duration += float(duration)
- except ValueError:
- logger.info('Duration error: %s, %s' % (duration, line))
-
- overall_duration = "{:10.2f}".format(overall_duration)
- if nb_totals == 0:
- success_avg = 0
- else:
- success_avg = "{:0.2f}".format(success / nb_totals)
-
- scenario_summary = {'test_name': test_name,
- 'overall_duration': overall_duration,
- 'nb_tests': nb_tests,
- 'success': success_avg}
- GlobalVariables.SUMMARY.append(scenario_summary)
-
- logger.debug("\n" + result)
-
- return result
-
-
-def get_cmd_output(proc):
- result = ""
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- result += line
-
- return result
-
-
-def excl_scenario():
- black_tests = []
-
- try:
- with open(BLACKLIST_FILE, 'r') as black_list_file:
- black_list_yaml = yaml.safe_load(black_list_file)
-
- installer_type = CONST.INSTALLER_TYPE
- deploy_scenario = CONST.DEPLOY_SCENARIO
- if (bool(installer_type) * bool(deploy_scenario)):
- if 'scenario' in black_list_yaml.keys():
- for item in black_list_yaml['scenario']:
- scenarios = item['scenarios']
- installers = item['installers']
- if (deploy_scenario in scenarios and
- installer_type in installers):
- tests = item['tests']
- black_tests.extend(tests)
- except:
- logger.debug("Scenario exclusion not applied.")
-
- return black_tests
-
-
-def excl_func():
- black_tests = []
- func_list = []
-
- try:
- with open(BLACKLIST_FILE, 'r') as black_list_file:
- black_list_yaml = yaml.safe_load(black_list_file)
-
- if not live_migration_supported():
- func_list.append("no_live_migration")
-
- if 'functionality' in black_list_yaml.keys():
- for item in black_list_yaml['functionality']:
- functions = item['functions']
- for func in func_list:
- if func in functions:
- tests = item['tests']
- black_tests.extend(tests)
- except:
- logger.debug("Functionality exclusion not applied.")
-
- return black_tests
-
-
-def apply_blacklist(case_file_name, result_file_name):
- logger.debug("Applying blacklist...")
- cases_file = open(case_file_name, 'r')
- result_file = open(result_file_name, 'w')
-
- black_tests = list(set(excl_func() + excl_scenario()))
-
- include = True
- for cases_line in cases_file:
- if include:
- for black_tests_line in black_tests:
- if re.search(black_tests_line, cases_line.strip().rstrip(':')):
- include = False
- break
- else:
- result_file.write(str(cases_line))
- else:
- if cases_line.isspace():
- include = True
-
- cases_file.close()
- result_file.close()
-
-
-def prepare_test_list(test_name):
- test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
- scenario_file_name = os.path.join(RALLY_SCENARIO_DIR, test_yaml_file_name)
-
- if not os.path.exists(scenario_file_name):
- if args.sanity:
- scenario_file_name = os.path.join(SANITY_MODE_DIR,
- test_yaml_file_name)
- else:
- scenario_file_name = os.path.join(FULL_MODE_DIR,
- test_yaml_file_name)
-
- if not os.path.exists(scenario_file_name):
- logger.info("The scenario '%s' does not exist."
- % scenario_file_name)
- exit(-1)
-
- logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
- test_file_name = os.path.join(TEMP_DIR, test_yaml_file_name)
-
- if not os.path.exists(TEMP_DIR):
- os.makedirs(TEMP_DIR)
-
- apply_blacklist(scenario_file_name, test_file_name)
- return test_file_name
-
-
-def file_is_empty(file_name):
- try:
- if os.stat(file_name).st_size > 0:
- return False
- except:
- pass
-
- return True
-
-
-def run_task(test_name):
- #
- # the "main" function of the script who launch rally for a task
- # :param test_name: name for the rally test
- # :return: void
- #
- logger.info('Starting test scenario "{}" ...'.format(test_name))
- start_time = time.time()
-
- task_file = os.path.join(RALLY_DIR, 'task.yaml')
- if not os.path.exists(task_file):
- logger.error("Task file '%s' does not exist." % task_file)
- exit(-1)
-
- file_name = prepare_test_list(test_name)
- if file_is_empty(file_name):
- logger.info('No tests for scenario "{}"'.format(test_name))
- return
-
- cmd_line = ("rally task start --abort-on-sla-failure "
- "--task {0} "
- "--task-args \"{1}\""
- .format(task_file, build_task_args(test_name)))
- logger.debug('running command line: {}'.format(cmd_line))
-
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=RALLY_STDERR, shell=True)
- output = get_output(p, test_name)
- task_id = get_task_id(output)
- logger.debug('task_id : {}'.format(task_id))
-
- if task_id is None:
- logger.error('Failed to retrieve task_id, validating task...')
- cmd_line = ("rally task validate "
- "--task {0} "
- "--task-args \"{1}\""
- .format(task_file, build_task_args(test_name)))
- logger.debug('running command line: {}'.format(cmd_line))
- p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, shell=True)
- output = get_cmd_output(p)
- logger.error("Task validation result:" + "\n" + output)
- return
-
- # check for result directory and create it otherwise
- if not os.path.exists(RESULTS_DIR):
- logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
- os.makedirs(RESULTS_DIR)
-
- # write html report file
- report_html_name = 'opnfv-{}.html'.format(test_name)
- report_html_dir = os.path.join(RESULTS_DIR, report_html_name)
- cmd_line = "rally task report {} --out {}".format(task_id,
- report_html_dir)
-
- logger.debug('running command line: {}'.format(cmd_line))
- os.popen(cmd_line)
-
- # get and save rally operation JSON result
- cmd_line = "rally task results %s" % task_id
- logger.debug('running command line: {}'.format(cmd_line))
- cmd = os.popen(cmd_line)
- json_results = cmd.read()
- report_json_name = 'opnfv-{}.json'.format(test_name)
- report_json_dir = os.path.join(RESULTS_DIR, report_json_name)
- with open(report_json_dir, 'w') as f:
- logger.debug('saving json file')
- f.write(json_results)
-
- with open(report_json_dir) as json_file:
- json_data = json.load(json_file)
-
- """ parse JSON operation result """
- status = "FAIL"
- if task_succeed(json_results):
- logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
- status = "PASS"
- else:
- logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
-
- # Push results in payload of testcase
- if args.report:
- stop_time = time.time()
- logger.debug("Push Rally detailed results into DB")
- ft_utils.push_results_to_db("functest",
- "Rally_details",
- start_time,
- stop_time,
- status,
- json_data)
-
-
-def main():
-
- GlobalVariables.nova_client = os_utils.get_nova_client()
- GlobalVariables.neutron_client = os_utils.get_neutron_client()
- cinder_client = os_utils.get_cinder_client()
-
- start_time = time.time()
-
- # configure script
- if not (args.test_name in tests):
- logger.error('argument not valid')
- exit(-1)
-
- GlobalVariables.SUMMARY = []
-
- volume_types = os_utils.list_volume_types(cinder_client,
- private=False)
- if not volume_types:
- volume_type = os_utils.create_volume_type(
- cinder_client, CINDER_VOLUME_TYPE_NAME)
- if not volume_type:
- logger.error("Failed to create volume type...")
- exit(-1)
- else:
- logger.debug("Volume type '%s' created succesfully..."
- % CINDER_VOLUME_TYPE_NAME)
- else:
- logger.debug("Using existing volume type(s)...")
-
- image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
- GLANCE_IMAGE_PATH,
- GLANCE_IMAGE_FORMAT)
- if not image_id:
- exit(-1)
-
- logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
- GlobalVariables.network_dict = \
- os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
- RALLY_PRIVATE_SUBNET_NAME,
- RALLY_ROUTER_NAME,
- RALLY_PRIVATE_SUBNET_CIDR)
- if not GlobalVariables.network_dict:
- exit(1)
-
- if args.test_name == "all":
- for test_name in tests:
- if not (test_name == 'all' or
- test_name == 'vm'):
- run_task(test_name)
- else:
- logger.debug("Test name: " + args.test_name)
- run_task(args.test_name)
-
- report = ("\n"
- " "
- "\n"
- " Rally Summary Report\n"
- "\n"
- "+===================+============+===============+===========+"
- "\n"
- "| Module | Duration | nb. Test Run | Success |"
- "\n"
- "+===================+============+===============+===========+"
- "\n")
- payload = []
- stop_time = time.time()
-
- # for each scenario we draw a row for the table
- total_duration = 0.0
- total_nb_tests = 0
- total_success = 0.0
- for s in GlobalVariables.SUMMARY:
- name = "{0:<17}".format(s['test_name'])
- duration = float(s['overall_duration'])
- total_duration += duration
- duration = time.strftime("%M:%S", time.gmtime(duration))
- duration = "{0:<10}".format(duration)
- nb_tests = "{0:<13}".format(s['nb_tests'])
- total_nb_tests += int(s['nb_tests'])
- success = "{0:<10}".format(str(s['success']) + '%')
- total_success += float(s['success'])
- report += ("" +
- "| " + name + " | " + duration + " | " +
- nb_tests + " | " + success + "|\n" +
- "+-------------------+------------"
- "+---------------+-----------+\n")
- payload.append({'module': name,
- 'details': {'duration': s['overall_duration'],
- 'nb tests': s['nb_tests'],
- 'success': s['success']}})
-
- total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
- total_duration_str2 = "{0:<10}".format(total_duration_str)
- total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
- if len(GlobalVariables.SUMMARY):
- success_rate = total_success / len(GlobalVariables.SUMMARY)
- else:
- success_rate = 100
- success_rate = "{:0.2f}".format(success_rate)
- success_rate_str = "{0:<10}".format(str(success_rate) + '%')
- report += "+===================+============+===============+===========+"
- report += "\n"
- report += ("| TOTAL: | " + total_duration_str2 + " | " +
- total_nb_tests_str + " | " + success_rate_str + "|\n")
- report += "+===================+============+===============+===========+"
- report += "\n"
-
- logger.info("\n" + report)
- payload.append({'summary': {'duration': total_duration,
- 'nb tests': total_nb_tests,
- 'nb success': success_rate}})
-
- if args.sanity:
- case_name = "rally_sanity"
- else:
- case_name = "rally_full"
-
- # Evaluation of the success criteria
- status = ft_utils.check_success_rate(case_name, success_rate)
-
- exit_code = -1
- if status == "PASS":
- exit_code = 0
-
- if args.report:
- logger.debug("Pushing Rally summary into DB...")
- ft_utils.push_results_to_db("functest",
- case_name,
- start_time,
- stop_time,
- status,
- payload)
- if args.noclean:
- exit(exit_code)
-
- if not image_exists:
- logger.debug("Deleting image '%s' with ID '%s'..."
- % (GLANCE_IMAGE_NAME, image_id))
- if not os_utils.delete_glance_image(GlobalVariables.nova_client,
- image_id):
- logger.error("Error deleting the glance image")
-
- if not volume_types:
- logger.debug("Deleting volume type '%s'..."
- % CINDER_VOLUME_TYPE_NAME)
- if not os_utils.delete_volume_type(cinder_client, volume_type):
- logger.error("Error in deleting volume type...")
-
- exit(exit_code)
-
-
-if __name__ == '__main__':
- main()
diff --git a/functest/opnfv_tests/openstack/tempest/conf_utils.py b/functest/opnfv_tests/openstack/tempest/conf_utils.py
index 67b52796..4c5e8663 100644
--- a/functest/opnfv_tests/openstack/tempest/conf_utils.py
+++ b/functest/opnfv_tests/openstack/tempest/conf_utils.py
@@ -11,10 +11,12 @@ import ConfigParser
import os
import re
import shutil
+import subprocess
import opnfv.utils.constants as releng_constants
from functest.utils.constants import CONST
+import functest.utils.functest_logger as ft_logger
import functest.utils.functest_utils as ft_utils
import functest.utils.openstack_utils as os_utils
@@ -39,8 +41,74 @@ TEMPEST_LIST = os.path.join(TEMPEST_RESULTS_DIR, 'test_list.txt')
CI_INSTALLER_TYPE = CONST.INSTALLER_TYPE
CI_INSTALLER_IP = CONST.INSTALLER_IP
+""" logging configuration """
+logger = ft_logger.Logger("Tempest").getLogger()
-def configure_tempest(logger, deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
+
+def get_verifier_id():
+ """
+ Returns verifer id for current Tempest
+ """
+ cmd = ("rally verify list-verifiers | awk '/" +
+ CONST.tempest_deployment_name +
+ "/ {print $2}'")
+ p = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ deployment_uuid = p.stdout.readline().rstrip()
+ if deployment_uuid == "":
+ logger.error("Tempest verifier not found.")
+ raise Exception('Error with command:%s' % cmd)
+ return deployment_uuid
+
+
+def get_verifier_deployment_id():
+ """
+ Returns deployment id for active Rally deployment
+ """
+ cmd = ("rally deployment list | awk '/" +
+ CONST.rally_deployment_name +
+ "/ {print $2}'")
+ p = subprocess.Popen(cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ deployment_uuid = p.stdout.readline().rstrip()
+ if deployment_uuid == "":
+ logger.error("Rally deployment not found.")
+ raise Exception('Error with command:%s' % cmd)
+ return deployment_uuid
+
+
+def get_verifier_repo_dir(verifier_id):
+ """
+ Returns installed verfier repo directory for Tempest
+ """
+ if not verifier_id:
+ verifier_id = get_verifier_id()
+
+ return os.path.join(CONST.dir_rally_inst,
+ 'verification',
+ 'verifier-{}'.format(verifier_id),
+ 'repo')
+
+
+def get_verifier_deployment_dir(verifier_id, deployment_id):
+ """
+ Returns Rally deployment directory for current verifier
+ """
+ if not verifier_id:
+ verifier_id = get_verifier_id()
+
+ if not deployment_id:
+ deployment_id = get_verifier_deployment_id()
+
+ return os.path.join(CONST.dir_rally_inst,
+ 'verification',
+ 'verifier-{}'.format(verifier_id),
+ 'for-deployment-{}'.format(deployment_id))
+
+
+def configure_tempest(deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
"""
Add/update needed parameters into tempest.conf file generated by Rally
"""
@@ -82,6 +150,8 @@ def configure_tempest(logger, deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
config.set('identity', 'password', CONST.tempest_identity_user_password)
config.set(
'validation', 'ssh_timeout', CONST.tempest_validation_ssh_timeout)
+ config.set('object-storage', 'operator_role',
+ CONST.tempest_object_storage_operator_role)
if CONST.OS_ENDPOINT_TYPE is not None:
services_list = ['compute',
@@ -108,12 +178,12 @@ def configure_tempest(logger, deployment_dir, IMAGE_ID=None, FLAVOR_ID=None):
return releng_constants.EXIT_OK
-def configure_tempest_multisite(logger, deployment_dir):
+def configure_tempest_multisite(deployment_dir):
"""
Add/update needed parameters into tempest.conf file generated by Rally
"""
logger.debug("configure the tempest")
- configure_tempest(logger, deployment_dir)
+ configure_tempest(deployment_dir)
logger.debug("Finding tempest.conf file...")
tempest_conf_old = os.path.join(deployment_dir, 'tempest.conf')
diff --git a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
index 5c8581f6..0a4256ce 100644
--- a/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
+++ b/functest/opnfv_tests/openstack/tempest/custom_tests/blacklist.txt
@@ -74,23 +74,3 @@
- tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPatternV2.test_volume_boot_pattern
-
--
- # https://bugs.launchpad.net/tempest/+bug/1586931
- scenarios:
- - os-odl_l2-nofeature-ha
- - os-odl_l2-nofeature-noha
- - os-odl_l2-sfc-ha
- - os-odl_l2-sfc-noha
- - os-odl_l3-nofeature-ha
- - os-odl_l3-nofeature-noha
- - os-nosdn-kvm-ha
- - os-nosdn-kvm-noha
- - os-nosdn-nofeature-ha
- - os-nosdn-nofeature-noha
- - os-nosdn-ovs-ha
- - os-nosdn-ovs-noha
- installers:
- - fuel
- tests:
- - tempest.scenario.test_server_basic_ops.TestServerBasicOps.test_server_basic_ops
diff --git a/functest/opnfv_tests/openstack/tempest/tempest.py b/functest/opnfv_tests/openstack/tempest/tempest.py
index 0014b718..2bdbe47f 100644
--- a/functest/opnfv_tests/openstack/tempest/tempest.py
+++ b/functest/opnfv_tests/openstack/tempest/tempest.py
@@ -35,74 +35,15 @@ class TempestCommon(testcase_base.TestcaseBase):
self.OPTION = ""
self.FLAVOR_ID = None
self.IMAGE_ID = None
- self.VERIFIER_ID = self.get_verifier_id()
- self.VERIFIER_REPO_DIR = self.get_verifier_repo_dir()
- self.DEPLOYMENT_ID = self.get_verifier_deployment_id()
- self.DEPLOYMENT_DIR = self.get_verifier_deployment_dir()
+ self.VERIFIER_ID = conf_utils.get_verifier_id()
+ self.VERIFIER_REPO_DIR = conf_utils.get_verifier_repo_dir(
+ self.VERIFIER_ID)
+ self.DEPLOYMENT_ID = conf_utils.get_verifier_deployment_id()
+ self.DEPLOYMENT_DIR = conf_utils.get_verifier_deployment_dir(
+ self.VERIFIER_ID, self.DEPLOYMENT_ID)
self.VERIFICATION_ID = None
@staticmethod
- def get_verifier_id():
- """
- Returns verifer id for current Tempest
- """
- cmd = ("rally verify list-verifiers | awk '/" +
- CONST.tempest_deployment_name +
- "/ {print $2}'")
- p = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- deployment_uuid = p.stdout.readline().rstrip()
- if deployment_uuid == "":
- logger.error("Tempest verifier not found.")
- raise Exception('Error with command:%s' % cmd)
- return deployment_uuid
-
- @staticmethod
- def get_verifier_deployment_id():
- """
- Returns deployment id for active Rally deployment
- """
- cmd = ("rally deployment list | awk '/" +
- CONST.rally_deployment_name +
- "/ {print $2}'")
- p = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- deployment_uuid = p.stdout.readline().rstrip()
- if deployment_uuid == "":
- logger.error("Rally deployment not found.")
- raise Exception('Error with command:%s' % cmd)
- return deployment_uuid
-
- def get_verifier_repo_dir(self):
- """
- Returns installed verfier repo directory for Tempest
- """
- if not self.VERIFIER_ID:
- self.VERIFIER_ID = self.get_verifier_id()
-
- return os.path.join(CONST.dir_rally_inst,
- 'verification',
- 'verifier-{}'.format(self.VERIFIER_ID),
- 'repo')
-
- def get_verifier_deployment_dir(self):
- """
- Returns Rally deployment directory for current verifier
- """
- if not self.VERIFIER_ID:
- self.VERIFIER_ID = self.get_verifier_id()
-
- if not self.DEPLOYMENT_ID:
- self.DEPLOYMENT_ID = self.get_verifier_deployment_id()
-
- return os.path.join(CONST.dir_rally_inst,
- 'verification',
- 'verifier-{}'.format(self.VERIFIER_ID),
- 'for-deployment-{}'.format(self.DEPLOYMENT_ID))
-
- @staticmethod
def read_file(filename):
with open(filename) as src:
return [line.strip() for line in src.readlines()]
@@ -321,8 +262,7 @@ class TempestCommon(testcase_base.TestcaseBase):
if res != testcase_base.TestcaseBase.EX_OK:
return res
- res = conf_utils.configure_tempest(logger,
- self.DEPLOYMENT_DIR,
+ res = conf_utils.configure_tempest(self.DEPLOYMENT_DIR,
self.IMAGE_ID,
self.FLAVOR_ID)
if res != testcase_base.TestcaseBase.EX_OK:
@@ -341,11 +281,6 @@ class TempestCommon(testcase_base.TestcaseBase):
self.stop_time = time.time()
- if self.criteria == "PASS":
- return testcase_base.TestcaseBase.EX_OK
- else:
- return testcase_base.TestcaseBase.EX_TESTCASE_FAILED
-
class TempestSmokeSerial(TempestCommon):
@@ -380,7 +315,7 @@ class TempestMultisite(TempestCommon):
self.case_name = "multisite"
self.MODE = "feature_multisite"
self.OPTION = "--concurrency 1"
- conf_utils.configure_tempest_multisite(logger, self.DEPLOYMENT_DIR)
+ conf_utils.configure_tempest_multisite(self.DEPLOYMENT_DIR)
class TempestCustom(TempestCommon):
diff --git a/functest/opnfv_tests/openstack/vping/vping_base.py b/functest/opnfv_tests/openstack/vping/vping_base.py
index 8285d93f..8285d93f 100755..100644
--- a/functest/opnfv_tests/openstack/vping/vping_base.py
+++ b/functest/opnfv_tests/openstack/vping/vping_base.py
diff --git a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
index 349b42a8..090502ba 100644
--- a/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
+++ b/functest/opnfv_tests/sdn/onos/sfc/sfc_onos.py
@@ -232,10 +232,8 @@ class SfcOnos:
else:
return(response.status_code)
- url = ("http://%s:8774//v2.1/%s/ports/"
- "%s/flavors?name=m1.tiny" % (self.nova_hostname,
- self.tenant_id))
-
+ url = ("http://%s:8774/v2.1/%s/flavors?"
+ "name=m1.tiny" % (self.nova_hostname, self.tenant_id))
headers = {"Accept": "application/json", "Content-Type":
"application/json", "X-Auth-Token": self.token_id}
response = requests.get(url, headers=headers)
@@ -344,7 +342,7 @@ class SfcOnos:
def getPortPair(self):
"""Query the Portpair id value."""
for p in range(0, 1):
- url = ("http://%s:9696/%s/ports/"
+ url = ("http://%s:9696/%s/"
"sfc/port_pairs?name=PP1" % (self.neutron_hostname,
self.osver))
headers = {"Accept": "application/json",
diff --git a/functest/opnfv_tests/sdn/onos/teston/onos.py b/functest/opnfv_tests/sdn/onos/teston/onos.py
index 213bdb7d..2537e18d 100755
--- a/functest/opnfv_tests/sdn/onos/teston/onos.py
+++ b/functest/opnfv_tests/sdn/onos/teston/onos.py
@@ -175,7 +175,7 @@ def CreateImage():
def SfcTest():
- cmd = "python " + ONOS_SFC_PATH + "/Sfc.py"
+ cmd = "python " + ONOS_SFC_PATH + "/sfc.py"
logger.debug("Run sfc tests")
os.system(cmd)
@@ -187,7 +187,7 @@ def GetIp(type):
def Replace(before, after):
- file = "/Sfc_fun.py"
+ file = "/sfc_onos.py"
cmd = "sed -i 's/" + before + "/" + after + "/g' " + ONOS_SFC_PATH + file
os.system(cmd)
@@ -199,7 +199,7 @@ def SetSfcConf():
Replace("glance_ip", GetIp("glance"))
pwd = ft_constants.OS_PASSWORD
Replace("console", pwd)
- creds_neutron = openstack_utils.get_credentials("neutron")
+ creds_neutron = openstack_utils.get_credentials()
neutron_client = neutronclient.Client(**creds_neutron)
ext_net = openstack_utils.get_external_net(neutron_client)
Replace("admin_floating_net", ext_net)
diff --git a/functest/opnfv_tests/vnf/aaa/aaa.py b/functest/opnfv_tests/vnf/aaa/aaa.py
index 8898b9fc..f1c265f4 100644
--- a/functest/opnfv_tests/vnf/aaa/aaa.py
+++ b/functest/opnfv_tests/vnf/aaa/aaa.py
@@ -21,8 +21,7 @@ class AaaVnf(vnf_base.VnfOnBoardingBase):
logger = ft_logger.Logger("VNF AAA").getLogger()
def __init__(self):
- super(AaaVnf, self).__init__()
- self.case_name = "aaa"
+ super(AaaVnf, self).__init__(case="aaa")
def deploy_orchestrator(self):
self.logger.info("No VNFM needed to deploy a free radius here")
diff --git a/functest/opnfv_tests/vnf/ims/cloudify_ims.py b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
index e584519b..13a5af4f 100644
--- a/functest/opnfv_tests/vnf/ims/cloudify_ims.py
+++ b/functest/opnfv_tests/vnf/ims/cloudify_ims.py
@@ -25,7 +25,8 @@ from orchestrator_cloudify import Orchestrator
class ImsVnf(vnf_base.VnfOnBoardingBase):
- def __init__(self, project='functest', case='', repo='', cmd=''):
+ def __init__(self, project='functest', case='cloudify_ims',
+ repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
self.logger = ft_logger.Logger("vIMS").getLogger()
self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
diff --git a/functest/opnfv_tests/vnf/ims/opera_ims.py b/functest/opnfv_tests/vnf/ims/opera_ims.py
index fa8f9ec9..073a56c3 100644
--- a/functest/opnfv_tests/vnf/ims/opera_ims.py
+++ b/functest/opnfv_tests/vnf/ims/opera_ims.py
@@ -21,7 +21,8 @@ from functest.utils.constants import CONST
class ImsVnf(vnf_base.VnfOnBoardingBase):
- def __init__(self, project='functest', case='', repo='', cmd=''):
+ def __init__(self, project='functest', case='opera_ims',
+ repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
self.logger = ft_logger.Logger("vIMS").getLogger()
self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
diff --git a/functest/opnfv_tests/vnf/ims/orchestra_ims.py b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
index ebd6c9ba..28f37f05 100644
--- a/functest/opnfv_tests/vnf/ims/orchestra_ims.py
+++ b/functest/opnfv_tests/vnf/ims/orchestra_ims.py
@@ -21,7 +21,8 @@ from functest.utils.constants import CONST
class ImsVnf(vnf_base.VnfOnBoardingBase):
- def __init__(self, project='functest', case='', repo='', cmd=''):
+ def __init__(self, project='functest', case='orchestra_ims',
+ repo='', cmd=''):
super(ImsVnf, self).__init__(project, case, repo, cmd)
self.logger = ft_logger.Logger("vIMS").getLogger()
self.case_dir = os.path.join(CONST.functest_test, 'vnf/ims/')
diff --git a/functest/tests/unit/cli/commands/test_cli_env.py b/functest/tests/unit/cli/commands/test_cli_env.py
index f70761dc..4b6ea57a 100644
--- a/functest/tests/unit/cli/commands/test_cli_env.py
+++ b/functest/tests/unit/cli/commands/test_cli_env.py
@@ -11,7 +11,6 @@ import unittest
from git.exc import NoSuchPathError
import mock
-mock.patch('logging.FileHandler').start() # noqa
from functest.cli.commands import cli_env
from functest.utils.constants import CONST
from functest.tests.unit import test_utils
diff --git a/functest/tests/unit/core/test_testcase_base.py b/functest/tests/unit/core/test_testcase_base.py
index 8df524b0..94d2e966 100644..100755
--- a/functest/tests/unit/core/test_testcase_base.py
+++ b/functest/tests/unit/core/test_testcase_base.py
@@ -12,8 +12,6 @@ import mock
import os
import unittest
-mock.patch('logging.FileHandler').start() # noqa
-
from functest.core import testcase_base
diff --git a/functest/tests/unit/odl/test_odl.py b/functest/tests/unit/odl/test_odl.py
index 59ab2c65..568fdc82 100644
--- a/functest/tests/unit/odl/test_odl.py
+++ b/functest/tests/unit/odl/test_odl.py
@@ -19,7 +19,6 @@ from robot.errors import DataError, RobotError
from robot.result import testcase
from robot.utils.robottime import timestamp_to_secs
-mock.patch('logging.FileHandler').start() # noqa
from functest.core import testcase_base
from functest.opnfv_tests.sdn.odl import odl
diff --git a/functest/tests/unit/test_logging.ini b/functest/tests/unit/test_logging.ini
new file mode 100644
index 00000000..3d5b947c
--- /dev/null
+++ b/functest/tests/unit/test_logging.ini
@@ -0,0 +1,27 @@
+[loggers]
+keys=root,functest_logger
+
+[logger_root]
+level=DEBUG
+handlers=console
+
+[logger_functest_logger]
+level=DEBUG
+handlers=console
+qualname=functest.utils.functest_logger
+propagate=0
+
+[handlers]
+keys=console
+
+[handler_console]
+class=StreamHandler
+level=INFO
+formatter=standard
+args=(sys.stdout,)
+
+[formatters]
+keys=standard
+
+[formatter_standard]
+format=%(asctime)s - %(name)s - %(levelname)s - %(message)s \ No newline at end of file
diff --git a/functest/tests/unit/utils/test_functest_utils.py b/functest/tests/unit/utils/test_functest_utils.py
index c4b56660..ce9086a7 100644
--- a/functest/tests/unit/utils/test_functest_utils.py
+++ b/functest/tests/unit/utils/test_functest_utils.py
@@ -18,7 +18,6 @@ import mock
import requests
from functest.tests.unit import test_utils
-mock.patch('logging.FileHandler').start() # noqa
from functest.utils import functest_utils
diff --git a/functest/utils/functest_constants.py b/functest/utils/functest_constants.py
index 7fb03e8a..bd109785 100644
--- a/functest/utils/functest_constants.py
+++ b/functest/utils/functest_constants.py
@@ -146,6 +146,8 @@ TEMPEST_USER_PASSWORD = get_value('tempest.identity.user_password',
'TEMPEST_USER_PASSWORD')
TEMPEST_SSH_TIMEOUT = get_value('tempest.validation.ssh_timeout',
'TEMPEST_SSH_TIMEOUT')
+TEMPEST_OPERATOR_ROLE = get_value('tempest.object_storage.operator_role',
+ 'TEMPEST_OPERATOR_ROLE')
TEMPEST_USE_CUSTOM_IMAGES = get_value('tempest.use_custom_images',
'TEMPEST_USE_CUSTOM_IMAGES')
TEMPEST_USE_CUSTOM_FLAVORS = get_value('tempest.use_custom_flavors',
@@ -218,8 +220,6 @@ PROMISE_SUBNET_CIDR = get_value('promise.subnet_cidr',
'PROMISE_SUBNET_CIDR')
PROMISE_ROUTER_NAME = get_value('promise.router_name',
'PROMISE_ROUTER_NAME')
-DOCTOR_REPO_DIR = get_value('general.dir.dir_repo_doctor',
- 'DOCTOR_REPO_DIR')
COPPER_REPO_DIR = get_value('general.dir.repo_copper',
'COPPER_REPO_DIR')
EXAMPLE_INSTANCE_NAME = get_value('example.vm_name',
diff --git a/functest/utils/functest_logger.py b/functest/utils/functest_logger.py
index c0fba082..f09f56be 100644..100755
--- a/functest/utils/functest_logger.py
+++ b/functest/utils/functest_logger.py
@@ -20,36 +20,50 @@
# logger = fl.Logger("script_name").getLogger()
# logger.info("message to be shown with - INFO - ")
# logger.debug("message to be shown with - DEBUG -")
-
import logging
+import logging.config
import os
+import json
-class Logger:
- def __init__(self, logger_name):
+from functest.utils.constants import CONST
+
+logger = logging.getLogger(__name__)
+
+
+def is_debug():
+ if CONST.CI_DEBUG and CONST.CI_DEBUG.lower() == "true":
+ return True
+ return False
- CI_DEBUG = os.getenv('CI_DEBUG')
+def setup_logging(default_path=CONST.dir_functest_logging_cfg,
+ default_level=logging.INFO,
+ env_key='LOG_CFG'):
+ path = default_path
+ value = os.getenv(env_key, None)
+ if value:
+ path = value
+ if os.path.exists(path):
+ with open(path, 'rt') as f:
+ config = json.load(f)
+ if (config['handlers'] and
+ config['handlers']['console']):
+ stream_level = logging.INFO
+ if is_debug():
+ stream_level = logging.DEBUG
+ config['handlers']['console']['level'] = stream_level
+ logging.config.dictConfig(config)
+ else:
+ logging.basicConfig(level=default_level)
+
+
+setup_logging()
+
+
+class Logger:
+ def __init__(self, logger_name):
self.logger = logging.getLogger(logger_name)
- self.logger.propagate = 0
- self.logger.setLevel(logging.DEBUG)
-
- ch = logging.StreamHandler()
- formatter = logging.Formatter('%(asctime)s - %(name)s - '
- '%(levelname)s - %(message)s')
- ch.setFormatter(formatter)
- if CI_DEBUG is not None and CI_DEBUG.lower() == "true":
- ch.setLevel(logging.DEBUG)
- self.logger.parent.level = logging.DEBUG
- else:
- ch.setLevel(logging.INFO)
- self.logger.parent.level = logging.INFO
- self.logger.addHandler(ch)
-
- hdlr = logging.FileHandler('/home/opnfv/functest/results/functest.log')
- hdlr.setFormatter(formatter)
- hdlr.setLevel(logging.DEBUG)
- self.logger.addHandler(hdlr)
def getLogger(self):
return self.logger
diff --git a/functest/utils/openstack_tacker.py b/functest/utils/openstack_tacker.py
index f17b421e..f17b421e 100755..100644
--- a/functest/utils/openstack_tacker.py
+++ b/functest/utils/openstack_tacker.py