1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
|
#!/usr/bin/env python
# Copyright 2021 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
import os
import logging
import pathlib
import time
def before_all(context):
context.data = {'config': os.getenv('NFVBENCH_CONFIG_PATH', '/etc/nfvbench/nfvbench.cfg')}
context.data['PROJECT_NAME'] = os.getenv('PROJECT_NAME', 'nfvbench')
context.data['TEST_DB_URL'] = os.getenv('TEST_DB_URL')
context.data['BASE_TEST_DB_URL'] = ''
if context.data['TEST_DB_URL']:
context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '')
context.data['INSTALLER_TYPE'] = os.getenv('INSTALLER_TYPE')
context.data['DEPLOY_SCENARIO'] = os.getenv('DEPLOY_SCENARIO')
context.data['NODE_NAME'] = os.getenv('NODE_NAME', 'nfvbench')
context.data['BUILD_TAG'] = os.getenv('BUILD_TAG')
# NFVbench server host and port
context.host_ip = os.getenv('NFVBENCH_SERVER_HOST', '127.0.0.1')
context.port = int(os.getenv('NFVBENCH_SERVER_PORT', '7555'))
def before_feature(context, feature):
context.rates = {}
context.results = {}
context.start_time = time.time()
context.CASE_NAME = feature.name
# Create results dir if needed
results_dir = pathlib.Path('/var/lib/xtesting/results/' + context.CASE_NAME)
if not results_dir.exists():
results_dir.mkdir()
# Setup a second logger to be able to understand why a test passed or failed
# (The main logger is used by behave itself)
context.logger = logging.getLogger('behave_tests')
context.logger.setLevel(logging.INFO)
fh = logging.FileHandler(filename=results_dir / pathlib.Path('behave_tests.log'),
mode='w') # Re-create the file at the beginning of the feature
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
context.logger.addHandler(fh)
context.logger.info('before_feature: ' + feature.name)
def before_scenario(context, scenario):
context.tag = scenario.tags[0]
context.json = {'log_file': '/var/lib/xtesting/results/' + context.CASE_NAME + '/nfvbench.log'}
user_label = os.getenv('NFVBENCH_USER_LABEL', None)
if user_label:
context.json['user_label'] = user_label
loopvm_flavor = os.getenv('NFVBENCH_LOOPVM_FLAVOR_NAME', None)
if loopvm_flavor:
context.json['flavor_type'] = loopvm_flavor
context.synthesis = {}
context.percentage_rate = None
context.logger.info('before_scenario: ' + scenario.name)
def after_feature(context, feature):
if len(context.results) == 0:
# No result to dump
return
results_dir = pathlib.Path('/var/lib/xtesting/results/' + context.CASE_NAME)
results_file = results_dir / pathlib.Path('campaign_result.json')
results_file.write_text(json.dumps(context.results, indent=4))
|