diff options
-rwxr-xr-x | docs/configguide/installation.rst | 12 | ||||
-rw-r--r-- | testcases/testcase.py | 92 | ||||
-rw-r--r-- | tools/hugepages.py | 75 |
3 files changed, 152 insertions, 27 deletions
diff --git a/docs/configguide/installation.rst b/docs/configguide/installation.rst index 048c2675..70572b07 100755 --- a/docs/configguide/installation.rst +++ b/docs/configguide/installation.rst @@ -186,9 +186,19 @@ parameter. In recent vswitchd versions, option VSWITCHD_DPDK_CONFIG will be used to configure vswitchd via ovs-vsctl calls. With the --socket-mem argument set to use 1 hugepage on the specified sockets as -seen above, the configuration will need 9 hugepages total to run all tests +seen above, the configuration will need 10 hugepages total to run all tests within vsperf if the pagesize is set correctly to 1GB. +VSPerf will verify hugepage amounts are free before executing test +environments. In case of hugepage amounts not being free, test initialization +will fail and testing will stop. + +**Please Note**: In some instances on a test failure dpdk resources may not +release hugepages used in dpdk configuration. It is recommended to configure a +few extra hugepages to prevent a false detection by VSPerf that not enough free +hugepages are available to execute the test environment. Normally dpdk would use +previously allocated hugepages upon initialization. + Depending on your OS selection configuration of hugepages may vary. Please refer to your OS documentation to set hugepages correctly. It is recommended to set the required amount of hugepages to be allocated by default on reboots. diff --git a/testcases/testcase.py b/testcases/testcase.py index 5f5c9358..d88840d7 100644 --- a/testcases/testcase.py +++ b/testcases/testcase.py @@ -14,14 +14,18 @@ """TestCase base class """ +from collections import OrderedDict +import copy import csv +import logging +import math import os +import re import time -import logging import subprocess -import copy -from collections import OrderedDict +from conf import settings as S +from conf import get_test_param import core.component_factory as component_factory from core.loader import Loader from core.results.results_constants import ResultsConstants @@ -29,8 +33,7 @@ from tools import tasks from tools import hugepages from tools import functions from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS -from conf import settings as S -from conf import get_test_param + class TestCase(object): """TestCase base class @@ -189,6 +192,10 @@ class TestCase(object): # mount hugepages if needed self._mount_hugepages() + # verify enough hugepages are free to run the testcase + if not self._check_for_enough_hugepages(): + raise RuntimeError('Not enough hugepages free to run test.') + # copy sources of l2 forwarding tools into VM shared dir if needed self._copy_fwd_tools_for_all_guests() @@ -391,7 +398,6 @@ class TestCase(object): except subprocess.CalledProcessError: self._logger.error('Unable to copy DPDK and l2fwd to shared directory') - def _mount_hugepages(self): """Mount hugepages if usage of DPDK or Qemu is detected """ @@ -411,6 +417,80 @@ class TestCase(object): hugepages.umount_hugepages() self._hugepages_mounted = False + def _check_for_enough_hugepages(self): + """Check to make sure enough hugepages are free to satisfy the + test environment. + """ + hugepages_needed = 0 + hugepage_size = hugepages.get_hugepage_size() + # get hugepage amounts per guest + for guest in range(self.deployment.count('v')): + hugepages_needed += math.ceil((int(S.getValue( + 'GUEST_MEMORY')[guest]) * 1000) / hugepage_size) + + # get hugepage amounts for each socket on dpdk + sock0_mem, sock1_mem = 0, 0 + if S.getValue('VSWITCH').lower().count('dpdk'): + # the import below needs to remain here and not put into the module + # imports because of an exception due to settings not yet loaded + from vswitches import ovs_dpdk_vhost + if ovs_dpdk_vhost.OvsDpdkVhost.old_dpdk_config(): + match = re.search( + '-socket-mem\s+(\d+),(\d+)', + ''.join(S.getValue('VSWITCHD_DPDK_ARGS'))) + if match: + sock0_mem, sock1_mem = (int(match.group(1)) / 1024, + int(match.group(2)) / 1024) + else: + logging.info( + 'Could not parse socket memory config in dpdk params.') + else: + sock0_mem, sock1_mem = ( + S.getValue( + 'VSWITCHD_DPDK_CONFIG')['dpdk-socket-mem'].split(',')) + sock0_mem, sock1_mem = (int(sock0_mem) / 1024, + int(sock1_mem) / 1024) + + # If hugepages needed, verify the amounts are free + if any([hugepages_needed, sock0_mem, sock1_mem]): + free_hugepages = hugepages.get_free_hugepages() + if hugepages_needed: + logging.info('Need %s hugepages free for guests', + hugepages_needed) + result1 = free_hugepages >= hugepages_needed + free_hugepages -= hugepages_needed + else: + result1 = True + + if sock0_mem: + logging.info('Need %s hugepages free for dpdk socket 0', + sock0_mem) + result2 = hugepages.get_free_hugepages('0') >= sock0_mem + free_hugepages -= sock0_mem + else: + result2 = True + + if sock1_mem: + logging.info('Need %s hugepages free for dpdk socket 1', + sock1_mem) + result3 = hugepages.get_free_hugepages('1') >= sock1_mem + free_hugepages -= sock1_mem + else: + result3 = True + + logging.info('Need a total of {} total hugepages'.format( + hugepages_needed + sock1_mem + sock0_mem)) + + # The only drawback here is sometimes dpdk doesn't release + # its hugepages on a test failure. This could cause a test + # to fail when dpdk would be OK to start because it will just + # use the previously allocated hugepages. + result4 = True if free_hugepages >= 0 else False + + return all([result1, result2, result3, result4]) + else: + return True + @staticmethod def write_result_to_file(results, output): """Write list of dictionaries to a CSV file. diff --git a/tools/hugepages.py b/tools/hugepages.py index 119f94b5..02e4f29c 100644 --- a/tools/hugepages.py +++ b/tools/hugepages.py @@ -31,6 +31,7 @@ _LOGGER = logging.getLogger(__name__) # hugepage management # + def get_hugepage_size(): """Return the size of the configured hugepages """ @@ -48,7 +49,6 @@ def get_hugepage_size(): return 0 - def allocate_hugepages(): """Allocate hugepages on the fly """ @@ -72,31 +72,65 @@ def allocate_hugepages(): return False +def get_free_hugepages(socket=None): + """Get the free hugepage totals on the system. + + :param socket: optional socket param to get free hugepages on a socket. To + be passed a string. + :returns: hugepage amount as int + """ + hugepage_free_re = re.compile(r'HugePages_Free:\s+(?P<free_hp>\d+)$') + if socket: + if os.path.exists( + '/sys/devices/system/node/node{}/meminfo'.format(socket)): + meminfo_path = '/sys/devices/system/node/node{}/meminfo'.format( + socket) + else: + _LOGGER.info('No hugepage info found for socket {}'.format(socket)) + return 0 + else: + meminfo_path = '/proc/meminfo' + + with open(meminfo_path, 'r') as fh: + data = fh.readlines() + for line in data: + match = hugepage_free_re.search(line) + if match: + _LOGGER.info('Hugepages free: %s %s', match.group('free_hp'), + 'on socket {}'.format(socket) if socket else '') + return int(match.group('free_hp')) + else: + _LOGGER.info('Could not parse for hugepage size') + return 0 + + def is_hugepage_available(): - """Check if hugepages are available on the system. + """Check if hugepages are configured/available on the system. """ - hugepage_re = re.compile(r'^HugePages_Free:\s+(?P<num_hp>\d+)$') + hugepage_size_re = re.compile(r'^Hugepagesize:\s+(?P<size_hp>\d+)\s+kB', + re.IGNORECASE) # read in meminfo with open('/proc/meminfo') as mem_file: mem_info = mem_file.readlines() - # first check if module is loaded + # see if the hugepage size is the recommended value for line in mem_info: - result = hugepage_re.match(line) - if not result: - continue - - num_huge = result.group('num_hp') - if num_huge == '0': - _LOGGER.info('No free hugepages.') - if not allocate_hugepages(): - return False - else: - _LOGGER.info('Found \'%s\' free hugepage(s).', num_huge) - return True - - return False + match_size = hugepage_size_re.match(line) + if match_size: + if match_size.group('size_hp') != '1048576': + _LOGGER.info( + '%s%s%s kB', + 'Hugepages not configured for recommend 1GB size. ', + 'Currently set at ', match_size.group('size_hp')) + num_huge = get_free_hugepages() + if num_huge == 0: + _LOGGER.info('No free hugepages.') + if not allocate_hugepages(): + return False + else: + _LOGGER.info('Found \'%s\' free hugepage(s).', num_huge) + return True def is_hugepage_mounted(): @@ -112,10 +146,11 @@ def is_hugepage_mounted(): def mount_hugepages(): - """Ensure hugepages are mounted. + """Ensure hugepages are mounted. Raises RuntimeError if no configured + hugepages are available. """ if not is_hugepage_available(): - return + raise RuntimeError('No Hugepages configured.') if is_hugepage_mounted(): return |