summaryrefslogtreecommitdiffstats
path: root/dovetail/api/app/server.py
blob: d44e2ee5747d1570a3d0d53458f2cfeba16df09f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
import json
import os
import shutil

import app.constants as constants
from app.utils import Utils

from dovetail.testcase import Testsuite, Testcase


class Server(object):

    def __init__(self, dovetail_home=None, requestId=None, requestData=None):
        self.dovetail_home = dovetail_home
        self.requestId = requestId
        self.requestData = requestData

    @staticmethod
    def list_testsuites():
        return Testsuite.load()

    @staticmethod
    def list_testcases():
        testcases = Testcase.load()
        testcase_list = []
        for key, value in testcases.items():
            testcase = {'testCaseName': key,
                        'description': value.objective(),
                        'subTestCase': value.sub_testcase()}
            if value.validate_type() in constants.NFVI_PROJECT:
                testcase['scenario'] = 'nfvi'
            elif value.validate_type() in constants.VNF_PROJECT:
                testcase['scenario'] = 'vnf'
            else:
                testcase['scenario'] = 'unknown'
            testcase_list.append(testcase)
        return testcase_list

    def set_vm_images(self):
        image_path = os.path.join(self.dovetail_home, str(self.requestId),
                                  'images')
        try:
            origin_image_path = self.requestData['conf']['vm_images']
        except KeyError:
            origin_image_path = os.path.join(self.dovetail_home, 'images')
        if os.path.exists(origin_image_path):
            try:
                shutil.copytree(origin_image_path, image_path)
            except Exception as e:
                return str(e), False
            return "Success to set vm images.\n", True
        else:
            return "Could not find vm images.\n", False

    def set_conf_files(self):
        config_path = os.path.join(self.dovetail_home, str(self.requestId),
                                   'pre_config')
        origin_config_path = os.path.join(self.dovetail_home, 'pre_config')
        if os.path.exists(origin_config_path):
            try:
                shutil.copytree(origin_config_path, config_path)
            except Exception as e:
                return str(e), False

        # check and prepare mandatory env_config.sh file
        # if there are envs in request body, use it
        # otherwise, use the file in pre_config
        # if don't have this file, return False with error message
        env_file = os.path.join(config_path, 'env_config.sh')
        try:
            Utils.write_env_file(self.requestData['conf']['envs'], env_file)
        except KeyError:
            if not os.path.isfile(env_file):
                return "No 'envs' found in the request body.\n", False
            else:
                pass
        except Exception as e:
            return str(e), False

        # check and prepare other optional yaml files
        for key, value in constants.CONFIG_YAML_FILES.items():
            config_file = os.path.join(config_path, value)
            try:
                Utils.write_yaml_file(self.requestData['conf'][key],
                                      config_file)
            except KeyError:
                pass
            except Exception as e:
                return str(e), False

        return 'Success to prepare all config files.\n', True

    def parse_request(self):
        output = ''
        default_args = constants.RUN_TEST_ITEMS['arguments']
        default_options = constants.RUN_TEST_ITEMS['options']

        for arg in default_args['no_multiple']:
            if arg in self.requestData.keys():
                output = output + ' --{} {}'.format(arg, self.requestData[arg])
        for arg in default_args['multiple']:
            if arg in self.requestData.keys() and self.requestData[arg]:
                for item in self.requestData[arg]:
                    output = output + ' --{} {}'.format(arg, item)

        if 'options' not in self.requestData.keys():
            return output

        for option in default_options:
            if option in self.requestData['options']:
                output = output + ' --{}'.format(option)

        return output

    def get_execution_status(self, testsuite, request_testcases,
                             exec_testcases):
        results_dir = os.path.join(self.dovetail_home, str(self.requestId),
                                   'results')
        results = []
        for tc in request_testcases:
            if tc not in exec_testcases:
                res = {'testCaseName': tc, 'status': 'NOT_EXECUTED'}
                results.append(res)
                continue

            tc_type = tc.split('.')[0]
            checker = CheckerFactory.create(tc_type)
            status, result = checker.get_status(results_dir, tc)

            res = {'testCaseName': tc, 'testSuiteName': testsuite,
                   'scenario': 'nfvi', 'executionId': self.requestId,
                   'results': result, 'status': status, 'timestart': None,
                   'endTime': None}
            try:
                res['timestart'] = result['timestart']
                res['endTime'] = result['timestop']
            except Exception:
                pass

            results.append(res)

        return results


class Checker(object):

    def __init__(self):
        pass

    @staticmethod
    def get_status_from_total_file(total_file, testcase):
        with open(total_file, 'r') as f:
            for jsonfile in f:
                try:
                    data = json.loads(jsonfile)
                    for item in data['testcases_list']:
                        if item['name'] == testcase:
                            return item['result'], item['sub_testcase']
                except KeyError as e:
                    return 'FAILED', None
                except ValueError:
                    continue
        return 'FAILED', None


class FunctestChecker(Checker):

    def get_status(self, results_dir, testcase):
        functest_file = os.path.join(results_dir, 'functest_results.txt')
        total_file = os.path.join(results_dir, 'results.json')
        if not os.path.isfile(functest_file):
            if not os.path.isfile(total_file):
                return 'IN_PROGRESS', None
            return 'FAILED', None
        criteria = None
        sub_testcase = []
        timestart = None
        timestop = None

        # get criteria and sub_testcase when all tests completed
        if os.path.isfile(total_file):
            criteria, sub_testcase = self.get_status_from_total_file(
                total_file, testcase)
            if criteria == 'FAILED':
                return 'FAILED', None

        # get detailed results from functest_results.txt
        with open(functest_file, 'r') as f:
            for jsonfile in f:
                try:
                    data = json.loads(jsonfile)
                    if data['build_tag'].endswith(testcase):
                        criteria = data['criteria'] if not criteria \
                            else criteria
                        timestart = data['start_date']
                        timestop = data['stop_date']
                        break
                except KeyError:
                    return 'FAILED', None
                except ValueError:
                    continue
            else:
                if not criteria:
                    return 'IN_PROGRESS', None

        status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
        results = {'criteria': criteria, 'sub_testcase': sub_testcase,
                   'timestart': timestart, 'timestop': timestop}
        return status, results


class YardstickChecker(Checker):

    def get_status(self, results_dir, testcase):
        yardstick_file = os.path.join(results_dir, 'ha_logs',
                                      '{}.out'.format(testcase))
        total_file = os.path.join(results_dir, 'results.json')
        if not os.path.isfile(yardstick_file):
            if not os.path.isfile(total_file):
                return 'IN_PROGRESS', None
            return 'FAILED', None

        criteria = None

        # get criteria and sub_testcase when all tests completed
        if os.path.isfile(total_file):
            criteria, _ = self.get_status_from_total_file(total_file, testcase)
            if criteria == 'FAILED':
                return 'FAILED', None

        with open(yardstick_file, 'r') as f:
            for jsonfile in f:
                data = json.loads(jsonfile)
                try:
                    if not criteria:
                        criteria = data['result']['criteria']
                    if criteria == 'PASS':
                        details = data['result']['testcases']
                        for key, value in details.items():
                            sla_pass = value['tc_data'][0]['data']['sla_pass']
                            if not 1 == sla_pass:
                                criteria = 'FAIL'
                except KeyError:
                    return 'FAILED', None

        status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
        results = {'criteria': criteria, 'timestart': None, 'timestop': None}
        return status, results


class BottlenecksChecker(Checker):

    def get_status(self, results_dir, testcase):
        bottlenecks_file = os.path.join(results_dir, 'stress_logs',
                                        '{}.out'.format(testcase))
        total_file = os.path.join(results_dir, 'results.json')
        if not os.path.isfile(bottlenecks_file):
            if not os.path.isfile(total_file):
                return 'IN_PROGRESS', None
            return 'FAILED', None

        criteria = None

        # get criteria and sub_testcase when all tests completed
        if os.path.isfile(total_file):
            criteria, _ = self.get_status_from_total_file(total_file, testcase)
            if criteria == 'FAILED':
                return 'FAILED', None

        with open(bottlenecks_file, 'r') as f:
            for jsonfile in f:
                data = json.loads(jsonfile)
                try:
                    if not criteria:
                        criteria = data['data_body']['result']
                except KeyError:
                    return 'FAILED', None

        status = 'COMPLETED' if criteria == 'PASS' else 'FAILED'
        results = {'criteria': criteria, 'timestart': None, 'timestop': None}
        return status, results


class CheckerFactory(object):

    CHECKER_MAP = {
        'functest': FunctestChecker,
        'yardstick': YardstickChecker,
        'bottlenecks': BottlenecksChecker
    }

    @classmethod
    def create(cls, tc_type):
        try:
            return cls.CHECKER_MAP[tc_type]()
        except KeyError:
            return None