1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
|
#!/usr/bin/env python
# Copyright (c) 2019 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""Define classes required to run any Behave test suites."""
from __future__ import division
import logging
import os
import time
import json
from behave.__main__ import main as behave_main
from xtesting.core import testcase
__author__ = "Deepak Chandella <deepak.chandella@orange.com>"
class BehaveFramework(testcase.TestCase):
"""BehaveFramework runner."""
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
dir_results = "/var/lib/xtesting/results"
def __init__(self, **kwargs):
super(BehaveFramework, self).__init__(**kwargs)
self.json_file = os.path.join(self.res_dir, 'output.json')
self.total_tests = 0
self.pass_tests = 0
self.fail_tests = 0
self.skip_tests = 0
self.response = None
def parse_results(self):
"""Parse output.json and get the details in it."""
try:
with open(self.json_file) as stream_:
self.response = json.load(stream_)
except IOError:
self.__logger.error("Error reading the file %s", self.json_file)
try:
if self.response:
self.total_tests = len(self.response)
for item in self.response:
if item['status'] == 'passed':
self.pass_tests += 1
elif item['status'] == 'failed':
self.fail_tests += 1
elif item['status'] == 'skipped':
self.skip_tests += 1
except KeyError:
self.__logger.error("Error in json - %s", self.response)
try:
self.result = 100 * (
self.pass_tests / self.total_tests)
except ZeroDivisionError:
self.__logger.error("No test has been run")
self.details = {}
self.details['total_tests'] = self.total_tests
self.details['pass_tests'] = self.pass_tests
self.details['fail_tests'] = self.fail_tests
self.details['skip_tests'] = self.skip_tests
self.details['tests'] = self.response
def run(self, **kwargs):
"""Run the BehaveFramework feature files
Here are the steps:
* create the output directories if required,
* run behave features with parameters
* get the results in output.json,
Args:
kwargs: Arbitrary keyword arguments.
Returns:
EX_OK if all suites ran well.
EX_RUN_ERROR otherwise.
"""
try:
suites = kwargs["suites"]
tags = kwargs.get("tags", [])
except KeyError:
self.__logger.exception("Mandatory args were not passed")
return self.EX_RUN_ERROR
if not os.path.exists(self.res_dir):
try:
os.makedirs(self.res_dir)
except Exception: # pylint: disable=broad-except
self.__logger.exception("Cannot create %s", self.res_dir)
return self.EX_RUN_ERROR
config = ['--tags='+','.join(tags),
'--format=json',
'--outfile='+self.json_file]
for feature in suites:
config.append(feature)
self.start_time = time.time()
behave_main(config)
self.stop_time = time.time()
try:
self.parse_results()
self.__logger.info("Results were successfully parsed")
except Exception: # pylint: disable=broad-except
self.__logger.exception("Cannot parse results")
return self.EX_RUN_ERROR
return self.EX_OK
|