summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
blob: f990a230f14d1f177be7c7b9dbfe9b156e4db125 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
#!/usr/bin/python

##
## Copyright (c) 2020 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
##
##     http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##

from past.utils import old_div
import sys
import time
import requests
from rapid_log import RapidLog
from rapid_test import RapidTest

class IrqTest(RapidTest):
    """
    Class to manage the irq testing
    """
    def __init__(self, test_param, runtime, testname, environment_file,
            machines):
        super().__init__(test_param, runtime, testname, environment_file)
        self.machines = machines

    def run(self):
        RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
        RapidLog.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic   |")
        RapidLog.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and    |")
        RapidLog.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was       |")
        RapidLog.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout   |")
        RapidLog.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 |") 
        RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
        sys.stdout.flush()
        max_loop_duration = 0
        machine_details = {}
        for machine in self.machines:
            buckets=machine.socket.show_irq_buckets(1)
            if max_loop_duration == 0:
                # First time we go through the loop, we need to initialize
                # result_details
                result_details = {'test': self.test['testname'],
                        'environment_file': self.test['environment_file'],
                        'buckets': buckets}
            print('Measurement ongoing ... ',end='\r')
            machine.start() # PROX cores will be started within 0 to 1 seconds
            # That is why we sleep a bit over 1 second to make sure all cores
            # are started
            time.sleep(1.2)
            old_irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))] 
            irq     = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
            column_names = []
            for bucket in buckets:
                column_names.append('<{}'.format(bucket))
            column_names[-1] = '>{}'.format(buckets[-2])
            for j,bucket in enumerate(buckets):
                for i,irqcore in enumerate(machine.get_cores()):
                    old_irq[i][j] = machine.socket.irq_stats(irqcore,j)
            # Measurements in the loop above, are updated by PROX every second
            # This means that taking the same measurement 0.5 second later
            # might result in the same data or data from the next 1s window
            time.sleep(float(self.test['runtime']))
            row_names = []
            for i,irqcore in enumerate(machine.get_cores()):
                row_names.append(irqcore)
                for j,bucket in enumerate(buckets):
                    diff =  machine.socket.irq_stats(irqcore,j) - old_irq[i][j]
                    if diff == 0:
                        irq[i][j] = '0'
                    else:
                        irq[i][j] = str(round(old_div(diff,
                            float(self.test['runtime'])), 2))
                        if max_loop_duration < int(bucket):
                            max_loop_duration = int(bucket)
            # Measurements in the loop above, are updated by PROX every second
            # This means that taking the same measurement 0.5 second later
            # might result in the same data or data from the next 1s window
            # Conclusion: we don't know the exact window size.
            # Real measurement windows might be wrong by 1 second
            # This could be fixed in this script by checking this data every
            # 0.5 seconds Not implemented since we can also run this test for
            # a longer time and decrease the error. The absolute number of
            # interrupts is not so important.
            machine.stop()
            core_details = {}
            RapidLog.info('Results for PROX instance %s'%machine.name)
            RapidLog.info('{:>12}'.format('bucket us') +
                    ''.join(['{:>12}'.format(item) for item in column_names]))
            for j, row in enumerate(irq):
                RapidLog.info('Core {:>7}'.format(row_names[j]) +
                        ''.join(['{:>12}'.format(item) for item in row]))
                core_details['Core {}'.format(row_names[j])] = row
            machine_details[machine.name] = core_details
        result_details['machine_data'] = machine_details
        result_details = self.post_data('rapid_irqtest', result_details)
        return (500000 - max_loop_duration, result_details)