diff options
-rw-r--r-- | conf/02_vswitch.conf | 5 | ||||
-rwxr-xr-x | docs/configguide/installation.rst | 5 | ||||
-rw-r--r-- | tools/hugepages.py | 45 |
3 files changed, 54 insertions, 1 deletions
diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf index 79f0afbd..e5736138 100644 --- a/conf/02_vswitch.conf +++ b/conf/02_vswitch.conf @@ -100,6 +100,11 @@ VSWITCH_BRIDGE_NAME = 'br0' # directory where hugepages will be mounted on system init HUGEPAGE_DIR = '/dev/hugepages' +# If no hugepages are available, try to allocate HUGEPAGE_RAM_ALLOCATION. +# Default is 2 x 1048576 = 2097152 kB. +# 10 GB (10485760 kB) or more is recommended for PVP & PVVP testing scenarios. +HUGEPAGE_RAM_ALLOCATION = 2097152 + # Sets OVS PMDs core mask to 30 for affinitization to 5th and 6th CPU core. # Note that the '0x' notation should not be used. VSWITCH_PMD_CPU_MASK = '30' diff --git a/docs/configguide/installation.rst b/docs/configguide/installation.rst index 5072dee0..1755ca6f 100755 --- a/docs/configguide/installation.rst +++ b/docs/configguide/installation.rst @@ -190,3 +190,8 @@ You can review your hugepage amounts by executing the following command .. code:: bash cat /proc/meminfo | grep Huge + +If no hugepages are available vsperf will try to automatically allocate some. +Allocation is controlled by HUGEPAGE_RAM_ALLOCATION configuration parameter in +``02_vswitch.conf`` file. Default is 2GB, resulting in either 2 1GB hugepages +or 1024 2MB hugepages. diff --git a/tools/hugepages.py b/tools/hugepages.py index 3a434d6e..119f94b5 100644 --- a/tools/hugepages.py +++ b/tools/hugepages.py @@ -20,6 +20,7 @@ import re import subprocess import logging import locale +import math from tools import tasks from conf import settings @@ -30,6 +31,46 @@ _LOGGER = logging.getLogger(__name__) # hugepage management # +def get_hugepage_size(): + """Return the size of the configured hugepages + """ + hugepage_size_re = re.compile(r'^Hugepagesize:\s+(?P<size_hp>\d+)\s+kB', + re.IGNORECASE) + with open('/proc/meminfo', 'r') as fh: + data = fh.readlines() + for line in data: + match = hugepage_size_re.search(line) + if match: + _LOGGER.info('Hugepages size: %s', match.group('size_hp')) + return int(match.group('size_hp')) + else: + _LOGGER.error('Could not parse for hugepage size') + return 0 + + + +def allocate_hugepages(): + """Allocate hugepages on the fly + """ + hp_size = get_hugepage_size() + + if hp_size > 0: + nr_hp = int(math.ceil(settings.getValue('HUGEPAGE_RAM_ALLOCATION')/hp_size)) + _LOGGER.info('Will allocate %s hugepages.', nr_hp) + + nr_hugepages = 'vm.nr_hugepages=' + str(nr_hp) + try: + tasks.run_task(['sudo', 'sysctl', nr_hugepages], + _LOGGER, 'Trying to allocate hugepages..', True) + except subprocess.CalledProcessError: + _LOGGER.error('Unable to allocate hugepages.') + return False + return True + + else: + _LOGGER.error('Division by 0 will be supported in next release') + return False + def is_hugepage_available(): """Check if hugepages are available on the system. @@ -47,8 +88,10 @@ def is_hugepage_available(): continue num_huge = result.group('num_hp') - if not num_huge: + if num_huge == '0': _LOGGER.info('No free hugepages.') + if not allocate_hugepages(): + return False else: _LOGGER.info('Found \'%s\' free hugepage(s).', num_huge) return True |