aboutsummaryrefslogtreecommitdiffstats
path: root/xtesting/core/behaveframework.py
blob: e288fff8c017552f244b65b8cc274aa9f205a007 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/usr/bin/env python

# Copyright (c) 2019 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0

"""Define classes required to run any Behave test suites."""

from __future__ import division

import logging
import os
import time

import json

from behave.__main__ import main as behave_main

from xtesting.core import testcase

__author__ = "Deepak Chandella <deepak.chandella@orange.com>"


class BehaveFramework(testcase.TestCase):
    """BehaveFramework runner."""
    # pylint: disable=too-many-instance-attributes

    __logger = logging.getLogger(__name__)

    def __init__(self, **kwargs):
        super(BehaveFramework, self).__init__(**kwargs)
        self.json_file = os.path.join(self.res_dir, 'output.json')
        self.total_tests = 0
        self.pass_tests = 0
        self.fail_tests = 0
        self.skip_tests = 0
        self.response = None

    def parse_results(self):
        """Parse output.json and get the details in it."""
        with open(self.json_file) as stream_:
            self.response = json.load(stream_)
            if self.response:
                self.total_tests = len(self.response)
            for item in self.response:
                if item['status'] == 'passed':
                    self.pass_tests += 1
                elif item['status'] == 'failed':
                    self.fail_tests += 1
                elif item['status'] == 'skipped':
                    self.skip_tests += 1
            self.result = 100 * (
                self.pass_tests / self.total_tests)
            self.details = {}
            self.details['total_tests'] = self.total_tests
            self.details['pass_tests'] = self.pass_tests
            self.details['fail_tests'] = self.fail_tests
            self.details['skip_tests'] = self.skip_tests
            self.details['tests'] = self.response

    def run(self, **kwargs):
        """Run the BehaveFramework feature files

        Here are the steps:
           * create the output directories if required,
           * run behave features with parameters
           * get the results in output.json,

        Args:
            kwargs: Arbitrary keyword arguments.

        Returns:
            EX_OK if all suites ran well.
            EX_RUN_ERROR otherwise.
        """
        try:
            suites = kwargs["suites"]
            tags = kwargs.get("tags", [])
        except KeyError:
            self.__logger.exception("Mandatory args were not passed")
            return self.EX_RUN_ERROR
        if not os.path.exists(self.res_dir):
            try:
                os.makedirs(self.res_dir)
            except Exception:  # pylint: disable=broad-except
                self.__logger.exception("Cannot create %s", self.res_dir)
                return self.EX_RUN_ERROR
        config = ['--tags='+','.join(tags),
                  '--junit', '--junit-directory={}'.format(self.res_dir),
                  '--format=json', '--outfile={}'.format(self.json_file)]
        html_file = os.path.join(self.res_dir, 'output.html')
        config += ['--format=behave_html_formatter:HTMLFormatter',
                   '--outfile={}'.format(html_file)]
        if kwargs.get("console", False):
            config += ['--format=pretty', '--outfile=-']
        for feature in suites:
            config.append(feature)
        self.start_time = time.time()
        behave_main(config)
        self.stop_time = time.time()

        try:
            self.parse_results()
            self.__logger.info("Results were successfully parsed")
        except Exception:  # pylint: disable=broad-except
            self.__logger.exception("Cannot parse results")
            return self.EX_RUN_ERROR
        return self.EX_OK