aboutsummaryrefslogtreecommitdiffstats
path: root/vsperf
blob: 4c41a7b1e52030cb662449c8124321fd276bdc81 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
#!/usr/bin/env python3

# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""VSPERF main script.
"""

import logging
import os
import sys
import argparse
import re
import time
import csv
import datetime
import shutil
import unittest
import locale
import copy
import glob
import subprocess
import ast
import xmlrunner
from tabulate import tabulate
from conf import merge_spec
from conf import settings
import core.component_factory as component_factory
from core.loader import Loader
from testcases import PerformanceTestCase
from testcases import IntegrationTestCase
from tools import tasks
from tools import networkcard
from tools import functions
from tools.pkt_gen import trafficgen
from tools.opnfvdashboard import opnfvdashboard
sys.dont_write_bytecode = True

VERBOSITY_LEVELS = {
    'debug': logging.DEBUG,
    'info': logging.INFO,
    'warning': logging.WARNING,
    'error': logging.ERROR,
    'critical': logging.CRITICAL
}

_CURR_DIR = os.path.dirname(os.path.realpath(__file__))

_TEMPLATE_RST = {'head'  : os.path.join(_CURR_DIR, 'tools/report/report_head.rst'),
                 'foot'  : os.path.join(_CURR_DIR, 'tools/report/report_foot.rst'),
                 'final' : 'test_report.rst',
                 'tmp'   : os.path.join(_CURR_DIR, 'tools/report/report_tmp_caption.rst')
                }

_TEMPLATE_MATRIX = "Performance Matrix\n------------------\n\n"\
                   "The following performance matrix was generated with the results of all the\n"\
                   "currently run tests. The metric used for comparison is {}.\n\n{}\n\n"

_LOGGER = logging.getLogger()

def parse_param_string(values):
    """
    Parse and split a single '--test-params' argument.

    This expects either 'x=y', 'x=y,z' or 'x' (implicit true)
    values. For multiple overrides use a ; separated list for
    e.g. --test-params 'x=z; y=(a,b)'
    """
    results = {}

    if values == '':
        return {}

    for param, _, value in re.findall('([^;=]+)(=([^;]+))?', values):
        param = param.strip()
        value = value.strip()
        if param:
            if value:
                # values are passed inside string from CLI, so we must retype them accordingly
                try:
                    results[param] = ast.literal_eval(value)
                except ValueError:
                    # for backward compatibility, we have to accept strings without quotes
                    _LOGGER.warning("Adding missing quotes around string value: %s = %s",
                                    param, str(value))
                    results[param] = str(value)
            else:
                results[param] = True
    return results


def parse_arguments():
    """
    Parse command line arguments.
    """
    class _SplitTestParamsAction(argparse.Action):
        """
        Parse and split '--test-params' arguments.

        This expects either a single list of ; separated overrides
        as 'x=y', 'x=y,z' or 'x' (implicit true) values.
        e.g. --test-params 'x=z; y=(a,b)'
        Or a list of these ; separated lists with overrides for
        multiple tests.
        e.g. --test-params "['x=z; y=(a,b)','x=z']"
        """
        def __call__(self, parser, namespace, values, option_string=None):
            if values[0] == '[':
                input_list = ast.literal_eval(values)
                parameter_list = []
                for test_params in input_list:
                    parameter_list.append(parse_param_string(test_params))
            else:
                parameter_list = parse_param_string(values)
            results = {'_PARAMS_LIST':parameter_list}
            setattr(namespace, self.dest, results)

    class _ValidateFileAction(argparse.Action):
        """Validate a file can be read from before using it.
        """
        def __call__(self, parser, namespace, values, option_string=None):
            if not os.path.isfile(values):
                raise argparse.ArgumentTypeError(
                    'the path \'%s\' is not a valid path' % values)
            elif not os.access(values, os.R_OK):
                raise argparse.ArgumentTypeError(
                    'the path \'%s\' is not accessible' % values)

            setattr(namespace, self.dest, values)

    class _ValidateDirAction(argparse.Action):
        """Validate a directory can be written to before using it.
        """
        def __call__(self, parser, namespace, values, option_string=None):
            if not os.path.isdir(values):
                raise argparse.ArgumentTypeError(
                    'the path \'%s\' is not a valid path' % values)
            elif not os.access(values, os.W_OK):
                raise argparse.ArgumentTypeError(
                    'the path \'%s\' is not accessible' % values)

            setattr(namespace, self.dest, values)

    def list_logging_levels():
        """Give a summary of all available logging levels.

        :return: List of verbosity level names in decreasing order of
            verbosity
        """
        return sorted(VERBOSITY_LEVELS.keys(),
                      key=lambda x: VERBOSITY_LEVELS[x])

    parser = argparse.ArgumentParser(prog=__file__, formatter_class=
                                     argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--version', action='version', version='%(prog)s 0.2')
    parser.add_argument('--list', '--list-tests', action='store_true',
                        help='list all tests and exit')
    parser.add_argument('--list-trafficgens', action='store_true',
                        help='list all traffic generators and exit')
    parser.add_argument('--list-collectors', action='store_true',
                        help='list all system metrics loggers and exit')
    parser.add_argument('--list-vswitches', action='store_true',
                        help='list all system vswitches and exit')
    parser.add_argument('--list-fwdapps', action='store_true',
                        help='list all system forwarding applications and exit')
    parser.add_argument('--list-vnfs', action='store_true',
                        help='list all system vnfs and exit')
    parser.add_argument('--list-loadgens', action='store_true',
                        help='list all background load generators')
    parser.add_argument('--list-settings', action='store_true',
                        help='list effective settings configuration and exit')
    parser.add_argument('exact_test_name', nargs='*', help='Exact names of\
            tests to run. E.g "vsperf phy2phy_tput phy2phy_cont"\
            runs only the two tests with those exact names.\
            To run all tests omit both positional args and --tests arg.')

    group = parser.add_argument_group('test selection options')
    group.add_argument('-m', '--mode', help='vsperf mode of operation;\
            Values: "normal" - execute vSwitch, VNF and traffic generator;\
            "trafficgen" - execute only traffic generator; "trafficgen-off" \
            - execute vSwitch and VNF; trafficgen-pause - execute vSwitch \
            and VNF but pause before traffic transmission ', default='normal')

    group.add_argument('-f', '--test-spec', help='test specification file')
    group.add_argument('-d', '--test-dir', help='directory containing tests')
    group.add_argument('-t', '--tests', help='Comma-separated list of terms \
            indicating tests to run. e.g. "RFC2544,!p2p" - run all tests whose\
            name contains RFC2544 less those containing "p2p"; "!back2back" - \
            run all tests except those containing back2back')
    group.add_argument('--verbosity', choices=list_logging_levels(),
                       help='debug level')
    group.add_argument('--integration', action='store_true', help='execute integration tests')
    group.add_argument('--trafficgen', help='traffic generator to use')
    group.add_argument('--vswitch', help='vswitch implementation to use')
    group.add_argument('--fwdapp', help='packet forwarding application to use')
    group.add_argument('--vnf', help='vnf to use')
    group.add_argument('--loadgen', help='loadgen to use')
    group.add_argument('--sysmetrics', help='system metrics logger to use')
    group = parser.add_argument_group('test behavior options')
    group.add_argument('--xunit', action='store_true',
                       help='enable xUnit-formatted output')
    group.add_argument('--xunit-dir', action=_ValidateDirAction,
                       help='output directory of xUnit-formatted output')
    group.add_argument('--load-env', action='store_true',
                       help='enable loading of settings from the environment')
    group.add_argument('--conf-file', action=_ValidateFileAction,
                       help='settings file')
    group.add_argument('--test-params', action=_SplitTestParamsAction,
                       help='csv list of test parameters: key=val; e.g. '
                       'TRAFFICGEN_PKT_SIZES=(64,128);TRAFFICGEN_DURATION=30; '
                       'GUEST_LOOPBACK=["l2fwd"] ...'
                       ' or a list of csv lists of test parameters: key=val; e.g. '
                       '[\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(128,)\','
                       '\'TRAFFICGEN_DURATION=10;TRAFFICGEN_PKT_SIZES=(64,)\']')
    group.add_argument('--opnfvpod', help='name of POD in opnfv')
    group.add_argument('--matrix', help='enable performance matrix analysis',
                       action='store_true', default=False)

    args = vars(parser.parse_args())

    return args


def configure_logging(level):
    """Configure logging.
    """
    date = datetime.datetime.fromtimestamp(time.time())
    timestamp = date.strftime('%Y-%m-%d_%H-%M-%S')
    settings.setValue('LOG_TIMESTAMP', timestamp)
    name, ext = os.path.splitext(settings.getValue('LOG_FILE_DEFAULT'))
    rename_default = "{name}_{uid}{ex}".format(name=name, uid=timestamp, ex=ext)
    log_file_default = os.path.join(
        settings.getValue('LOG_DIR'), rename_default)
    log_file_host_cmds = os.path.join(
        settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_HOST_CMDS'))
    log_file_traffic_gen = os.path.join(
        settings.getValue('LOG_DIR'),
        settings.getValue('LOG_FILE_TRAFFIC_GEN'))
    metrics_file = (settings.getValue('LOG_FILE_INFRA_METRICS_PFX') +
                    timestamp + '.log')
    log_file_infra_metrics = os.path.join(settings.getValue('LOG_DIR'),
                                          metrics_file)

    _LOGGER.setLevel(logging.DEBUG)

    stream_logger = logging.StreamHandler(sys.stdout)
    stream_logger.setLevel(VERBOSITY_LEVELS[level])
    stream_logger.setFormatter(logging.Formatter(
        '[%(levelname)-5s]  %(asctime)s : (%(name)s) - %(message)s'))
    _LOGGER.addHandler(stream_logger)

    file_logger = logging.FileHandler(filename=log_file_default)
    file_logger.setLevel(logging.DEBUG)
    file_logger.setFormatter(logging.Formatter(
        '%(asctime)s : %(message)s'))
    _LOGGER.addHandler(file_logger)

    class CommandFilter(logging.Filter):
        """Filter out strings beginning with 'cmd :'"""
        def filter(self, record):
            return record.getMessage().startswith(tasks.CMD_PREFIX)

    class TrafficGenCommandFilter(logging.Filter):
        """Filter out strings beginning with 'gencmd :'"""
        def filter(self, record):
            return record.getMessage().startswith(trafficgen.CMD_PREFIX)

    class CollectdMetricsFilter(logging.Filter):
        """Filter out strings beginning with 'COLLECTD' :'"""
        def filter(self, record):
            return record.getMessage().startswith('COLLECTD')

    cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
    cmd_logger.setLevel(logging.DEBUG)
    cmd_logger.addFilter(CommandFilter())
    _LOGGER.addHandler(cmd_logger)

    gen_logger = logging.FileHandler(filename=log_file_traffic_gen)
    gen_logger.setLevel(logging.DEBUG)
    gen_logger.addFilter(TrafficGenCommandFilter())
    _LOGGER.addHandler(gen_logger)

    if settings.getValue('COLLECTOR') == 'Collectd':
        met_logger = logging.FileHandler(filename=log_file_infra_metrics)
        met_logger.setLevel(logging.DEBUG)
        met_logger.addFilter(CollectdMetricsFilter())
        _LOGGER.addHandler(met_logger)


def apply_filter(tests, tc_filter):
    """Allow a subset of tests to be conveniently selected

    :param tests: The list of Tests from which to select.
    :param tc_filter: A case-insensitive string of comma-separated terms
        indicating the Tests to select.
        e.g. 'RFC' - select all tests whose name contains 'RFC'
        e.g. 'RFC,burst' - select all tests whose name contains 'RFC' or
            'burst'
        e.g. 'RFC,burst,!p2p' - select all tests whose name contains 'RFC'
            or 'burst' and from these remove any containing 'p2p'.
        e.g. '' - empty string selects all tests.
    :return: A list of the selected Tests.
    """
    # if negative filter is first we have to start with full list of tests
    if tc_filter.strip()[0] == '!':
        result = tests
    else:
        result = []
    if tc_filter is None:
        tc_filter = ""

    for term in [x.strip() for x in tc_filter.lower().split(",")]:
        if not term or term[0] != '!':
            # Add matching tests from 'tests' into results
            result.extend([test for test in tests \
                if test['Name'].lower().find(term) >= 0])
        else:
            # Term begins with '!' so we remove matching tests
            result = [test for test in result \
                if test['Name'].lower().find(term[1:]) < 0]

    return result


def check_and_set_locale():
    """ Function will check locale settings. In case, that it isn't configured
    properly, then default values specified by DEFAULT_LOCALE will be used.
    """

    system_locale = locale.getdefaultlocale()
    if None in system_locale:
        os.environ['LC_ALL'] = settings.getValue('DEFAULT_LOCALE')
        _LOGGER.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s",
                        system_locale, locale.getdefaultlocale())

def get_vswitch_names(rst_files):
    """ Function will return a list of vSwitches detected in given ``rst_files``.
    """
    vswitch_names = set()
    if rst_files:
        try:
            output = subprocess.check_output(['grep', '-h', '^* vSwitch'] + rst_files).decode().splitlines()
            for line in output:
                match = re.search(r'^\* vSwitch: ([^,]+)', str(line))
                if match:
                    vswitch_names.add(match.group(1))

            if vswitch_names:
                return list(vswitch_names)

        except subprocess.CalledProcessError:
            _LOGGER.warning('Cannot detect vSwitches used during testing.')

    # fallback to the default value
    return ['vSwitch']

def get_build_tag():
    """ Function will return a Jenkins job ID environment variable.
    """

    try:
        build_tag = os.environ['BUILD_TAG']

    except KeyError:
        _LOGGER.warning('Cannot detect Jenkins job ID')
        build_tag = "none"

    return build_tag

def generate_final_report():
    """ Function will check if partial test results are available
    and generates final report in rst format.
    """

    path = settings.getValue('RESULTS_PATH')
    # check if there are any results in rst format
    rst_results = glob.glob(os.path.join(path, 'result*rst'))
    pkt_processors = get_vswitch_names(rst_results)
    if rst_results:
        try:
            test_report = os.path.join(path, '{}_{}'.format('_'.join(pkt_processors), _TEMPLATE_RST['final']))
            # create report caption directly - it is not worth to execute jinja machinery
            report_caption = '{}\n{} {}\n{}\n\n'.format(
                '============================================================',
                'Performance report for',
                ', '.join(pkt_processors),
                '============================================================')

            with open(_TEMPLATE_RST['tmp'], 'w') as file_:
                file_.write(report_caption)

            retval = subprocess.call('cat {} {} {} {} > {}'.format(_TEMPLATE_RST['tmp'], _TEMPLATE_RST['head'],
                                                                   ' '.join(rst_results), _TEMPLATE_RST['foot'],
                                                                   test_report), shell=True)
            if retval == 0 and os.path.isfile(test_report):
                _LOGGER.info('Overall test report written to "%s"', test_report)
            else:
                _LOGGER.error('Generation of overall test report has failed.')

            # remove temporary file
            os.remove(_TEMPLATE_RST['tmp'])

        except subprocess.CalledProcessError:
            _LOGGER.error('Generatrion of overall test report has failed.')


def generate_performance_matrix(selected_tests, results_path):
    """
    Loads the results of all the currently run tests, compares them
    based on the MATRIX_METRIC, outputs and saves the generated table.
    :selected_tests: list of currently run test
    :results_path: directory path to the results of current tests
    """
    _LOGGER.info('Performance Matrix:')
    test_list = []

    for test in selected_tests:
        test_name = test.get('Name', '<Name not set>')
        test_deployment = test.get('Deployment', '<Deployment not set>')
        test_list.append({'test_name':test_name, 'test_deployment':test_deployment, 'csv_data':False})

    test_params = {}
    output = []
    all_params = settings.getValue('_PARAMS_LIST')
    for i in range(len(selected_tests)):
        test = test_list[i]
        if isinstance(all_params, list):
            list_index = i
            if i >= len(all_params):
                list_index = len(all_params) - 1
            if settings.getValue('CUMULATIVE_PARAMS') and (i > 0):
                test_params.update(all_params[list_index])
            else:
                test_params = all_params[list_index]
        else:
            test_params = all_params
        settings.setValue('TEST_PARAMS', test_params)
        test['test_params'] = copy.deepcopy(test_params)
        try:
            with open("{}/result_{}_{}_{}.csv".format(results_path, str(i),
                                                      test['test_name'], test['test_deployment'])) as csvfile:
                reader = list(csv.DictReader(csvfile))
                test['csv_data'] = reader[0]
        # pylint: disable=broad-except
        except (Exception) as ex:
            _LOGGER.error("Result file not found: %s", ex)

    metric = settings.getValue('MATRIX_METRIC')
    change = {}
    output_header = ("ID", "Name", metric, "Change [%]", "Parameters, "\
                     "CUMULATIVE_PARAMS = {}".format(settings.getValue('CUMULATIVE_PARAMS')))
    if not test_list[0]['csv_data'] or float(test_list[0]['csv_data'][metric]) == 0:
        _LOGGER.error("Incorrect format of test results")
        return
    for i, test in enumerate(test_list):
        if test['csv_data']:
            change[i] = float(test['csv_data'][metric])/\
                        (float(test_list[0]['csv_data'][metric]) / 100) - 100
            output.append([i, test['test_name'], float(test['csv_data'][metric]),
                           change[i], str(test['test_params'])[1:-1]])
        else:
            change[i] = 0
            output.append([i, test['test_name'], "Test Failed", 0, test['test_params']])
    print(tabulate(output, headers=output_header, tablefmt="grid", floatfmt="0.3f"))
    with open(results_path + '/result_performance_matrix.rst', 'w+') as output_file:
        output_file.write(_TEMPLATE_MATRIX.format(metric, tabulate(output, headers=output_header,
                                                                   tablefmt="rst", floatfmt="0.3f")))
        _LOGGER.info('Performance matrix written to: "%s/result_performance_matrix.rst"', results_path)

def enable_sriov(nic_list):
    """ Enable SRIOV for given enhanced PCI IDs

    :param nic_list: A list of enhanced PCI IDs
    """
    # detect if sriov is required
    sriov_nic = {}
    for nic in nic_list:
        if networkcard.is_sriov_nic(nic):
            tmp_nic = nic.split('|')
            if tmp_nic[0] in sriov_nic:
                if int(tmp_nic[1][2:]) > sriov_nic[tmp_nic[0]]:
                    sriov_nic[tmp_nic[0]] = int(tmp_nic[1][2:])
            else:
                sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])})

    # sriov is required for some NICs
    if sriov_nic:
        for nic in sriov_nic:
            # check if SRIOV is supported and enough virt interfaces are available
            if not networkcard.is_sriov_supported(nic) \
                or networkcard.get_sriov_numvfs(nic) <= sriov_nic[nic]:
                # if not, enable and set appropriate number of VFs
                if not networkcard.set_sriov_numvfs(nic, sriov_nic[nic] + 1):
                    raise RuntimeError('SRIOV cannot be enabled for NIC {}'.format(nic))
                else:
                    _LOGGER.debug("SRIOV enabled for NIC %s", nic)

                # ensure that path to the bind tool is valid
                functions.settings_update_paths()

                # WORKAROUND: it has been observed with IXGBE(VF) driver,
                # that NIC doesn't correclty dispatch traffic to VFs based
                # on their MAC address. Unbind and bind to the same driver
                # solves this issue.
                networkcard.reinit_vfs(nic)

        # After SRIOV is enabled it takes some time until network drivers
        # properly initialize all cards.
        # Wait also in case, that SRIOV was already configured as it can be
        # configured automatically just before vsperf execution.
        time.sleep(2)

        return True

    return False


def disable_sriov(nic_list):
    """ Disable SRIOV for given PCI IDs

    :param nic_list: A list of enhanced PCI IDs
    """
    for nic in nic_list:
        if networkcard.is_sriov_nic(nic):
            if not networkcard.set_sriov_numvfs(nic.split('|')[0], 0):
                raise RuntimeError('SRIOV cannot be disabled for NIC {}'.format(nic))
            else:
                _LOGGER.debug("SRIOV disabled for NIC %s", nic.split('|')[0])


def handle_list_options(args):
    """ Process --list cli arguments if needed

    :param args: A dictionary with all CLI arguments
    """
    if args['list_trafficgens']:
        print(Loader().get_trafficgens_printable())
        sys.exit(0)

    if args['list_collectors']:
        print(Loader().get_collectors_printable())
        sys.exit(0)

    if args['list_vswitches']:
        print(Loader().get_vswitches_printable())
        sys.exit(0)

    if args['list_vnfs']:
        print(Loader().get_vnfs_printable())
        sys.exit(0)

    if args['list_fwdapps']:
        print(Loader().get_pktfwds_printable())
        sys.exit(0)

    if args['list_loadgens']:
        print(Loader().get_loadgens_printable())
        sys.exit(0)

    if args['list_settings']:
        print(str(settings))
        sys.exit(0)

    if args['list']:
        list_testcases(args)
        sys.exit(0)


def list_testcases(args):
    """ Print list of testcases requested by --list CLI argument

    :param args: A dictionary with all CLI arguments
    """
    # configure tests
    if args['integration']:
        testcases = settings.getValue('INTEGRATION_TESTS')
    else:
        testcases = settings.getValue('PERFORMANCE_TESTS')

    print("Available Tests:")
    print("================")

    for test in testcases:
        description = functions.format_description(test['Description'], 70)
        if len(test['Name']) < 40:
            print('* {:40} {}'.format('{}:'.format(test['Name']), description[0]))
        else:
            print('* {}'.format('{}:'.format(test['Name'])))
            print('  {:40} {}'.format('', description[0]))
        for i in range(1, len(description)):
            print('  {:40} {}'.format('', description[i]))


def vsperf_finalize():
    """ Clean up before exit
    """
    # remove directory if no result files were created
    try:
        results_path = settings.getValue('RESULTS_PATH')
        if os.path.exists(results_path):
            files_list = os.listdir(results_path)
            if files_list == []:
                _LOGGER.info("Removing empty result directory: %s", results_path)
                shutil.rmtree(results_path)
    except AttributeError:
        # skip it if parameter doesn't exist
        pass

    # disable SRIOV if needed
    try:
        if settings.getValue('SRIOV_ENABLED'):
            disable_sriov(settings.getValue('WHITELIST_NICS_ORIG'))
    except AttributeError:
        # skip it if parameter doesn't exist
        pass


class MockTestCase(unittest.TestCase):
    """Allow use of xmlrunner to generate Jenkins compatible output without
    using xmlrunner to actually run tests.

    Usage:
        suite = unittest.TestSuite()
        suite.addTest(MockTestCase('Test1 passed ', True, 'Test1'))
        suite.addTest(MockTestCase('Test2 failed because...', False, 'Test2'))
        xmlrunner.XMLTestRunner(...).run(suite)
    """

    def __init__(self, msg, is_pass, test_name):
        #remember the things
        self.msg = msg
        self.is_pass = is_pass

        #dynamically create a test method with the right name
        #but point the method at our generic test method
        setattr(MockTestCase, test_name, self.generic_test)

        super(MockTestCase, self).__init__(test_name)

    def generic_test(self):
        """Provide a generic function that raises or not based
        on how self.is_pass was set in the constructor"""
        self.assertTrue(self.is_pass, self.msg)

# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def main():
    """Main function.
    """
    args = parse_arguments()

    # configure settings

    settings.load_from_dir(os.path.join(_CURR_DIR, 'conf'))

    # Load non performance/integration tests
    if args['integration']:
        settings.load_from_dir(os.path.join(_CURR_DIR, 'conf/integration'))

    # load command line parameters first in case there are settings files
    # to be used
    settings.load_from_dict(args)

    if args['conf_file']:
        settings.load_from_file(args['conf_file'])

    if args['load_env']:
        settings.load_from_env()

    # reload command line parameters since these should take higher priority
    # than both a settings file and environment variables
    settings.load_from_dict(args)

    settings.setValue('mode', args['mode'])

    # update paths to trafficgens if required
    if settings.getValue('mode') == 'trafficgen':
        functions.settings_update_paths()

    # if required, handle list-* operations
    handle_list_options(args)

    configure_logging(settings.getValue('VERBOSITY'))

    # check and fix locale
    check_and_set_locale()

    # configure trafficgens
    if args['trafficgen']:
        trafficgens = Loader().get_trafficgens()
        if args['trafficgen'] not in trafficgens:
            _LOGGER.error('There are no trafficgens matching \'%s\' found in'
                          ' \'%s\'. Exiting...', args['trafficgen'],
                          settings.getValue('TRAFFICGEN_DIR'))
            sys.exit(1)

    # configuration validity checks
    if args['vswitch']:
        vswitch_none = args['vswitch'].strip().lower() == 'none'
        if vswitch_none:
            settings.setValue('VSWITCH', 'none')
        else:
            vswitches = Loader().get_vswitches()
            if args['vswitch'] not in vswitches:
                _LOGGER.error('There are no vswitches matching \'%s\' found in'
                              ' \'%s\'. Exiting...', args['vswitch'],
                              settings.getValue('VSWITCH_DIR'))
                sys.exit(1)

    if args['fwdapp']:
        settings.setValue('PKTFWD', args['fwdapp'])
        fwdapps = Loader().get_pktfwds()
        if args['fwdapp'] not in fwdapps:
            _LOGGER.error('There are no forwarding application'
                          ' matching \'%s\' found in'
                          ' \'%s\'. Exiting...', args['fwdapp'],
                          settings.getValue('PKTFWD_DIR'))
            sys.exit(1)

    if args['vnf']:
        vnfs = Loader().get_vnfs()
        if args['vnf'] not in vnfs:
            _LOGGER.error('there are no vnfs matching \'%s\' found in'
                          ' \'%s\'. exiting...', args['vnf'],
                          settings.getValue('VNF_DIR'))
            sys.exit(1)

    if args['loadgen']:
        loadgens = Loader().get_loadgens()
        if args['loadgen'] not in loadgens:
            _LOGGER.error('There are no loadgens matching \'%s\' found in'
                          ' \'%s\'. Exiting...', args['loadgen'],
                          settings.getValue('LOADGEN_DIR'))
            sys.exit(1)

    if args['exact_test_name'] and args['tests']:
        _LOGGER.error("Cannot specify tests with both positional args and --test.")
        sys.exit(1)

    # modify NIC configuration to decode enhanced PCI IDs
    wl_nics_orig = list(networkcard.check_pci(pci) for pci in settings.getValue('WHITELIST_NICS'))
    settings.setValue('WHITELIST_NICS_ORIG', wl_nics_orig)

    # sriov handling is performed on checked/expanded PCI IDs
    settings.setValue('SRIOV_ENABLED', enable_sriov(wl_nics_orig))

    nic_list = []
    for nic in wl_nics_orig:
        tmp_nic = networkcard.get_nic_info(nic)
        if tmp_nic:
            nic_list.append({'pci' : tmp_nic,
                             'type' : 'vf' if networkcard.get_sriov_pf(tmp_nic) else 'pf',
                             'mac' : networkcard.get_mac(tmp_nic),
                             'driver' : networkcard.get_driver(tmp_nic),
                             'device' : networkcard.get_device_name(tmp_nic)})
        else:
            vsperf_finalize()
            raise RuntimeError("Invalid network card PCI ID: '{}'".format(nic))

    settings.setValue('NICS', nic_list)
    # for backward compatibility
    settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list))

    # generate results directory name
    date = datetime.datetime.fromtimestamp(time.time())
    results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S')
    results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir)
    settings.setValue('RESULTS_PATH', results_path)

    # create results directory
    if not os.path.exists(results_path):
        _LOGGER.info("Creating result directory: %s", results_path)
        os.makedirs(results_path)
    # pylint: disable=too-many-nested-blocks
    if settings.getValue('mode') == 'trafficgen':
        # execute only traffic generator
        _LOGGER.debug("Executing traffic generator:")
        loader = Loader()
        # set traffic details, so they can be passed to traffic ctl
        traffic = copy.deepcopy(settings.getValue('TRAFFIC'))
        traffic = functions.check_traffic(traffic)

        traffic_ctl = component_factory.create_traffic(
            traffic['traffic_type'],
            loader.get_trafficgen_class())
        with traffic_ctl:
            traffic_ctl.send_traffic(traffic)
        _LOGGER.debug("Traffic Results:")
        traffic_ctl.print_results()

        # write results into CSV file
        result_file = os.path.join(results_path, "result.csv")
        PerformanceTestCase.write_result_to_file(traffic_ctl.get_results(), result_file)
    else:
        # configure tests
        if args['integration']:
            testcases = settings.getValue('INTEGRATION_TESTS')
        else:
            testcases = settings.getValue('PERFORMANCE_TESTS')

        if args['exact_test_name']:
            exact_names = args['exact_test_name']
            # positional args => exact matches only
            selected_tests = []
            for test_name in exact_names:
                for test in testcases:
                    if test['Name'] == test_name:
                        selected_tests.append(test)
        elif args['tests']:
            # --tests => apply filter to select requested tests
            selected_tests = apply_filter(testcases, args['tests'])
        else:
            # Default - run all tests
            selected_tests = testcases

        if not selected_tests:
            _LOGGER.error("No tests matched --tests option or positional args. Done.")
            vsperf_finalize()
            sys.exit(1)

        suite = unittest.TestSuite()
        settings_snapshot = copy.deepcopy(settings.__dict__)

        for i, cfg in enumerate(selected_tests):
            settings.setValue('_TEST_INDEX', i)
            test_name = cfg.get('Name', '<Name not set>')
            try:
                test_params = settings.getValue('_PARAMS_LIST')
                if isinstance(test_params, list):
                    list_index = i
                    if i >= len(test_params):
                        list_index = len(test_params) - 1
                    test_params = test_params[list_index]
                if settings.getValue('CUMULATIVE_PARAMS'):
                    test_params = merge_spec(settings.getValue('TEST_PARAMS'), test_params)
                settings.setValue('TEST_PARAMS', test_params)

                if args['integration']:
                    test = IntegrationTestCase(cfg)
                else:
                    test = PerformanceTestCase(cfg)

                test.run()
                suite.addTest(MockTestCase('', True, test.name))

            # pylint: disable=broad-except
            except (Exception) as ex:
                _LOGGER.exception("Failed to run test: %s", test_name)
                suite.addTest(MockTestCase(str(ex), False, test_name))
                _LOGGER.info("Continuing with next test...")
            finally:
                if not settings.getValue('CUMULATIVE_PARAMS'):
                    settings.restore_from_dict(settings_snapshot)

        settings.restore_from_dict(settings_snapshot)


        # Generate and printout Performance Matrix
        if args['matrix']:
            generate_performance_matrix(selected_tests, results_path)

        # generate final rst report with results of all executed TCs
        generate_final_report()



        if settings.getValue('XUNIT'):
            xmlrunner.XMLTestRunner(
                output=settings.getValue('XUNIT_DIR'), outsuffix="",
                verbosity=0).run(suite)

        if args['opnfvpod']:
            pod_name = args['opnfvpod']
            installer_name = str(settings.getValue('OPNFV_INSTALLER')).lower()
            opnfv_url = settings.getValue('OPNFV_URL')
            pkg_list = settings.getValue('PACKAGE_LIST')

            int_data = {'pod': pod_name,
                        'build_tag': get_build_tag(),
                        'installer': installer_name,
                        'pkg_list': pkg_list,
                        'db_url': opnfv_url,
                        # pass vswitch name from configuration to be used for failed
                        # TCs; In case of successful TCs it is safer to use vswitch
                        # name from CSV as TC can override global configuration
                        'vswitch': str(settings.getValue('VSWITCH')).lower()}
            tc_names = [tc['Name'] for tc in selected_tests]
            opnfvdashboard.results2opnfv_dashboard(tc_names, results_path, int_data)

    # cleanup before exit
    vsperf_finalize()

if __name__ == "__main__":
    main()