aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Trautman <ctrautma@redhat.com>2016-07-22 15:54:18 -0400
committerMaryam Tahhan <maryam.tahhan@intel.com>2016-08-24 08:43:16 +0000
commit0fb65d2afadad1f092788f5a4f251b09851b3ae7 (patch)
tree33a8fa5f6dd575d30bc748deb137433a53031218
parentbd8712a917d9a0c7272506fd85816f857c32529e (diff)
Vanilla_Multi_Queue: Add vanilla ovs multi-queue functionality
Adds multi-queue for vanilla OVS using virtio-net. TunTap ports will use multi_queue parameter when ports are created/deleted if guest nic queues are enabled and vswitch is ovs vanilla. Virtio net will now add guest nic queues to qemu command line if appropriate. Reworked multi-queue documentation to reflect these changes. Adds vhost net thread affinitization which is recommended when performing vanilla ovs multi-queue. Guests will require ethtool if using l2fwd or linux bridge as the loopback application when vanilla ovs multi-queue is enabled. Modified dpdk setting in vswitch conf to be better worded for separation from vanilla ovs multi-queue. Updated release and installation doc for new vloop image that includes required ethtool utility. JIRA: VSPERF-373 Change-Id: Idb550515190b1a93390308c11f54da368f962512 Signed-off-by: Christian Trautman <ctrautma@redhat.com>
-rw-r--r--conf/02_vswitch.conf10
-rw-r--r--conf/04_vnf.conf11
-rwxr-xr-xdocs/configguide/installation.rst1
-rw-r--r--docs/release/NEWS.rst2
-rwxr-xr-xdocs/userguide/testusage.rst51
-rw-r--r--vnfs/qemu/qemu.py48
-rw-r--r--vnfs/qemu/qemu_virtio_net.py26
-rw-r--r--vswitches/ovs_dpdk_vhost.py13
-rw-r--r--vswitches/ovs_vanilla.py27
9 files changed, 139 insertions, 50 deletions
diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf
index 228ff057..cd2b8d26 100644
--- a/conf/02_vswitch.conf
+++ b/conf/02_vswitch.conf
@@ -88,12 +88,12 @@ VSWITCHD_DPDK_CONFIG = {
# Note: VSPERF will automatically detect, which type of DPDK configuration should
# be used.
-# To enable multi queue modify the below param to the number of queues.
-# 0 = disabled
-VSWITCH_MULTI_QUEUES = 0
+# To enable multi queue with dpdk modify the below param to the number of
+# queues for dpdk. 0 = disabled
+VSWITCH_DPDK_MULTI_QUEUES = 0
-# Use old style OVS Multi-queue startup. If testing versions of OVS 2.5.0 or
-# before, enable this setting to allow multi-queue to enable correctly.
+# Use old style OVS DPDK Multi-queue startup. If testing versions of OVS 2.5.0
+# or before, enable this setting to allow DPDK Multi-queue to enable correctly.
OVS_OLD_STYLE_MQ = False
# parameters passed to ovs-vswitchd in case that OvsVanilla is selected
diff --git a/conf/04_vnf.conf b/conf/04_vnf.conf
index 8069ef0d..05893fb8 100644
--- a/conf/04_vnf.conf
+++ b/conf/04_vnf.conf
@@ -119,9 +119,18 @@ GUEST_SMP = ['2', '2', '2', '2', '2', '2']
GUEST_CORE_BINDING = [(6, 7), (9, 10), (11, 12), (13, 14), (15, 16), (17, 18)]
# Queues per NIC inside guest for multi-queue configuration, requires switch
-# multi-queue to be enabled. Set to 0 for disabled.
+# multi-queue to be enabled for dpdk. Set to 0 for disabled. Can be enabled if
+# using Vanilla OVS without enabling switch multi-queue.
GUEST_NIC_QUEUES = 0
+# Virtio-Net vhost thread CPU mapping. If using vanilla OVS with virtio-net,
+# you can affinitize the vhost-net threads by enabling the below setting. There
+# is one vhost-net thread per port per queue so one guest with 2 queues will
+# have 4 vhost-net threads. If more threads are present than CPUs given, the
+# affinitize will overlap CPUs.
+VSWITCH_VHOST_NET_AFFINITIZATION = False
+VSWITCH_VHOST_CPU_MAP = [4,5,8,11]
+
GUEST_START_TIMEOUT = 120
GUEST_OVS_DPDK_DIR = '/root/ovs_dpdk'
OVS_DPDK_SHARE = '/mnt/ovs_dpdk_share'
diff --git a/docs/configguide/installation.rst b/docs/configguide/installation.rst
index 2dabfc7f..3933ee55 100755
--- a/docs/configguide/installation.rst
+++ b/docs/configguide/installation.rst
@@ -150,6 +150,7 @@ running any of the above. For example:
.. _a link: http://www.softwarecollections.org/en/scls/rhscl/python33/
.. _virtualenv: https://virtualenv.readthedocs.org/en/latest/
+.. _vloop-vnf-ubuntu-14.04_20160823: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160823.qcow2
.. _vloop-vnf-ubuntu-14.04_20160804: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160804.qcow2
.. _vloop-vnf-ubuntu-14.04_20160303: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160303.qcow2
.. _vloop-vnf-ubuntu-14.04_20151216: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20151216.qcow2
diff --git a/docs/release/NEWS.rst b/docs/release/NEWS.rst
index a259870b..e1a9fa3b 100644
--- a/docs/release/NEWS.rst
+++ b/docs/release/NEWS.rst
@@ -5,6 +5,8 @@
OPNFV D Release
===============
* Remove support for vhost cuse
+* Add Vanilla OVS Multi-queue with non testpmd options
+* Add support for Multi-queue with OVS 2.5.0 or less
OPNFV Colorado Release
======================
diff --git a/docs/userguide/testusage.rst b/docs/userguide/testusage.rst
index 9eeddc27..c55b5a2c 100755
--- a/docs/userguide/testusage.rst
+++ b/docs/userguide/testusage.rst
@@ -427,30 +427,25 @@ Multi-Queue Configuration
VSPerf currently supports multi-queue with the following limitations:
- 1. Execution of pvp/pvvp tests require testpmd as the loopback if multi-queue
- is enabled at the guest.
-
- 2. Requires QemuDpdkVhostUser as the vnf.
-
- 3. Requires switch to be set to OvsDpdkVhost.
-
- 4. Requires QEMU 2.5 or greater and any OVS version higher than 2.5. The
- default upstream package versions installed by VSPerf satisfy this
+ 1. Requires QEMU 2.5 or greater and any OVS version higher than 2.5. The
+ default upstream package versions installed by VSPerf satisfies this
requirement.
- 5. If using OVS versions 2.5.0 or less enable old style multi-queue as shown in
- the ''02_vswitch.conf'' file.
+ 2. Guest image must have ethtool utility installed if using l2fwd or linux
+ bridge inside guest for loopback.
+
+ 3. If using OVS versions 2.5.0 or less enable old style multi-queue as shown
+ in the ''02_vswitch.conf'' file.
.. code-block:: console
OVS_OLD_STYLE_MQ = True
-To enable multi-queue modify the ''02_vswitch.conf'' file to enable multi-queue
-on the switch.
+To enable multi-queue for dpdk modify the ''02_vswitch.conf'' file.
.. code-block:: console
- VSWITCH_MULTI_QUEUES = 2
+ VSWITCH_DPDK_MULTI_QUEUES = 2
**NOTE:** you should consider using the switch affinity to set a pmd cpu mask
that can optimize your performance. Consider the numa of the NIC in use if this
@@ -471,8 +466,12 @@ To enable multi-queue on the guest modify the ''04_vnf.conf'' file.
Enabling multi-queue at the guest will add multiple queues to each NIC port when
qemu launches the guest.
-Testpmd should be configured to take advantage of multi-queue on the guest. This
-can be done by modifying the ''04_vnf.conf'' file.
+In case of Vanilla OVS, multi-queue is enabled on the tuntap ports and nic
+queues will be enabled inside the guest with ethtool. Simply enabling the
+multi-queue on the guest is sufficient for Vanilla OVS multi-queue.
+
+Testpmd should be configured to take advantage of multi-queue on the guest if
+using DPDKVhostUser. This can be done by modifying the ''04_vnf.conf'' file.
.. code-block:: console
@@ -485,9 +484,23 @@ can be done by modifying the ''04_vnf.conf'' file.
**NOTE:** The guest SMP cores must be configured to allow for testpmd to use the
optimal number of cores to take advantage of the multiple guest queues.
-**NOTE:** For optimal performance guest SMPs should be on the same numa as the
-NIC in use if possible/applicable. Testpmd should be assigned at least
-(nb_cores +1) total cores with the cpu mask.
+In case of using Vanilla OVS and qemu virtio-net you can increase performance
+by binding vhost-net threads to cpus. This can be done by enabling the affinity
+in the ''04_vnf.conf'' file. This can be done to non multi-queue enabled
+configurations as well as there will be 2 vhost-net threads.
+
+ .. code-block:: console
+
+ VSWITCH_VHOST_NET_AFFINITIZATION = True
+
+ VSWITCH_VHOST_CPU_MAP = [4,5,8,11]
+
+**NOTE:** This method of binding would require a custom script in a real
+environment.
+
+**NOTE:** For optimal performance guest SMPs and/or vhost-net threads should be
+on the same numa as the NIC in use if possible/applicable. Testpmd should be
+assigned at least (nb_cores +1) total cores with the cpu mask.
Executing Packet Forwarding tests
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/vnfs/qemu/qemu.py b/vnfs/qemu/qemu.py
index e91eaa29..dd9ad818 100644
--- a/vnfs/qemu/qemu.py
+++ b/vnfs/qemu/qemu.py
@@ -129,6 +129,10 @@ class IVnfQemu(IVnf):
if S.getValue('VNF_AFFINITIZATION_ON'):
self._affinitize()
+ if S.getValue('VSWITCH_VHOST_NET_AFFINITIZATION') and S.getValue(
+ 'VNF') == 'QemuVirtioNet':
+ self._affinitize_vhost_net()
+
if self._timeout:
self._config_guest_loopback()
@@ -236,6 +240,34 @@ class IVnfQemu(IVnf):
self._logger.error('Failed to affinitize guest core #%d. Could'
' not parse tid.', cpu)
+ def _affinitize_vhost_net(self):
+ """
+ Affinitize the vhost net threads for Vanilla OVS and guest nic queues.
+
+ :return: None
+ """
+ self._logger.info('Affinitizing VHOST Net threads.')
+ args1 = ['ps', 'ax']
+ process1 = subprocess.Popen(args1, stdout=subprocess.PIPE,
+ shell=False)
+ out = process1.communicate()[0]
+ processes = list()
+ for line in out.decode(locale.getdefaultlocale()[1]).split('\n'):
+ if re.search('\[vhost-(\d+)', line):
+ processes.append(re.match('\s*(\d+)', line).group(1))
+ self._logger.info('Found %s vhost net threads...', len(processes))
+
+ cpumap = S.getValue('VSWITCH_VHOST_CPU_MAP')
+ mapcount = 0
+ for proc in processes:
+ self._affinitize_pid(cpumap[mapcount], proc)
+ mapcount += 1
+ if mapcount + 1 > len(cpumap):
+ # Not enough cpus were given in the mapping to cover all the
+ # threads on a 1 to 1 ratio with cpus so reset the list counter
+ # to 0.
+ mapcount = 0
+
def _config_guest_loopback(self):
"""
Configure VM to run VNF, e.g. port forwarding application based on the configuration
@@ -380,6 +412,8 @@ class IVnfQemu(IVnf):
"""
Configure VM to perform L2 forwarding between NICs by l2fwd module
"""
+ if int(S.getValue('GUEST_NIC_QUEUES')):
+ self._set_multi_queue_nic()
self._configure_copy_sources('l2fwd')
self._configure_disable_firewall()
@@ -397,6 +431,8 @@ class IVnfQemu(IVnf):
"""
Configure VM to perform L2 forwarding between NICs by linux bridge
"""
+ if int(S.getValue('GUEST_NIC_QUEUES')):
+ self._set_multi_queue_nic()
self._configure_disable_firewall()
self.execute('ip addr add ' +
@@ -441,3 +477,15 @@ class IVnfQemu(IVnf):
self.execute('sysctl -w net.ipv4.conf.all.rp_filter=0')
self.execute('sysctl -w net.ipv4.conf.' + self._net1 + '.rp_filter=0')
self.execute('sysctl -w net.ipv4.conf.' + self._net2 + '.rp_filter=0')
+
+ def _set_multi_queue_nic(self):
+ """
+ Enable multi-queue in guest kernel with ethool.
+ :return: None
+ """
+ self.execute_and_wait('ethtool -L {} combined {}'.format(
+ self._net1, S.getValue('GUEST_NIC_QUEUES')))
+ self.execute_and_wait('ethtool -l {}'.format(self._net1))
+ self.execute_and_wait('ethtool -L {} combined {}'.format(
+ self._net2, S.getValue('GUEST_NIC_QUEUES')))
+ self.execute_and_wait('ethtool -l {}'.format(self._net2))
diff --git a/vnfs/qemu/qemu_virtio_net.py b/vnfs/qemu/qemu_virtio_net.py
index e5e895f2..afb519c3 100644
--- a/vnfs/qemu/qemu_virtio_net.py
+++ b/vnfs/qemu/qemu_virtio_net.py
@@ -41,22 +41,34 @@ class QemuVirtioNet(IVnfQemu):
if1 = str(i)
if2 = str(i + 1)
+ # multi-queue values
+ if int(S.getValue('GUEST_NIC_QUEUES')):
+ queue_str = ',queues={}'.format(S.getValue('GUEST_NIC_QUEUES'))
+ mq_vector_str = ',mq=on,vectors={}'.format(
+ int(S.getValue('GUEST_NIC_QUEUES')) * 2 + 2)
+ else:
+ queue_str, mq_vector_str = '', ''
+
self._cmd += ['-netdev',
- 'type=tap,id=' + self._net1 +
+ 'tap,id=' + self._net1 + queue_str +
',script=no,downscript=no,' +
'ifname=tap' + if1 + ',vhost=on',
'-device',
'virtio-net-pci,mac=' +
S.getValue('GUEST_NET1_MAC')[self._number] +
- ',netdev=' + self._net1 + ',csum=off,gso=off,' +
- 'guest_tso4=off,guest_tso6=off,guest_ecn=off',
+ ',netdev=' + self._net1 +
+ ',csum=off,gso=off,' +
+ 'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+ mq_vector_str,
'-netdev',
- 'type=tap,id=' + self._net2 +
+ 'tap,id=' + self._net2 + queue_str +
',script=no,downscript=no,' +
'ifname=tap' + if2 + ',vhost=on',
'-device',
'virtio-net-pci,mac=' +
S.getValue('GUEST_NET2_MAC')[self._number] +
- ',netdev=' + self._net2 + ',csum=off,gso=off,' +
- 'guest_tso4=off,guest_tso6=off,guest_ecn=off',
- ]
+ ',netdev=' + self._net2 +
+ ',csum=off,gso=off,' +
+ 'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+ mq_vector_str,
+ ]
diff --git a/vswitches/ovs_dpdk_vhost.py b/vswitches/ovs_dpdk_vhost.py
index cebc14b2..c0764c87 100644
--- a/vswitches/ovs_dpdk_vhost.py
+++ b/vswitches/ovs_dpdk_vhost.py
@@ -70,11 +70,11 @@ class OvsDpdkVhost(IVSwitchOvs):
super(OvsDpdkVhost, self).start()
# old style OVS <= 2.5.0 multi-queue enable
if settings.getValue('OVS_OLD_STYLE_MQ') and \
- int(settings.getValue('VSWITCH_MULTI_QUEUES')):
+ int(settings.getValue('VSWITCH_DPDK_MULTI_QUEUES')):
tmp_br = OFBridge(timeout=-1)
tmp_br.set_db_attribute(
'Open_vSwitch', '.', 'other_config:' +
- 'n-dpdk-rxqs', settings.getValue('VSWITCH_MULTI_QUEUES'))
+ 'n-dpdk-rxqs', settings.getValue('VSWITCH_DPDK_MULTI_QUEUES'))
def stop(self):
"""See IVswitch for general description
@@ -112,10 +112,11 @@ class OvsDpdkVhost(IVSwitchOvs):
port_name = 'dpdk' + str(dpdk_count)
params = ['--', 'set', 'Interface', port_name, 'type=dpdk']
# multi-queue enable
- if int(settings.getValue('VSWITCH_MULTI_QUEUES')) and \
+
+ if int(settings.getValue('VSWITCH_DPDK_MULTI_QUEUES')) and \
not settings.getValue('OVS_OLD_STYLE_MQ'):
params += ['options:n_rxq={}'.format(
- settings.getValue('VSWITCH_MULTI_QUEUES'))]
+ settings.getValue('VSWITCH_DPDK_MULTI_QUEUES'))]
of_port = bridge.add_port(port_name, params)
return (port_name, of_port)
@@ -131,10 +132,10 @@ class OvsDpdkVhost(IVSwitchOvs):
port_name = 'dpdkvhostuser' + str(vhost_count)
params = ['--', 'set', 'Interface', port_name, 'type=dpdkvhostuser']
# multi queue enable
- if int(settings.getValue('VSWITCH_MULTI_QUEUES')) and \
+ if int(settings.getValue('VSWITCH_DPDK_MULTI_QUEUES')) and \
not settings.getValue('OVS_OLD_STYLE_MQ'):
params += ['options:n_rxq={}'.format(
- settings.getValue('VSWITCH_MULTI_QUEUES'))]
+ settings.getValue('VSWITCH_DPDK_MULTI_QUEUES'))]
of_port = bridge.add_port(port_name, params)
return (port_name, of_port)
diff --git a/vswitches/ovs_vanilla.py b/vswitches/ovs_vanilla.py
index f880dfaf..332725e3 100644
--- a/vswitches/ovs_vanilla.py
+++ b/vswitches/ovs_vanilla.py
@@ -60,9 +60,10 @@ class OvsVanilla(IVSwitchOvs):
# remove all tap interfaces
for i in range(self._vport_id):
tapx = 'tap' + str(i)
- tasks.run_task(['sudo', 'ip', 'tuntap', 'del',
- tapx, 'mode', 'tap'],
- self._logger, 'Deleting ' + tapx, False)
+ tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tapx, 'mode', 'tap']
+ if int(settings.getValue('GUEST_NIC_QUEUES')):
+ tap_cmd_list += ['multi_queue']
+ tasks.run_task(tap_cmd_list, self._logger, 'Deleting ' + tapx, False)
self._vport_id = 0
super(OvsVanilla, self).stop()
@@ -71,7 +72,6 @@ class OvsVanilla(IVSwitchOvs):
self._module_manager.remove_modules()
-
def add_phy_port(self, switch_name):
"""
Method adds port based on detected device names.
@@ -111,14 +111,17 @@ class OvsVanilla(IVSwitchOvs):
# Create tap devices for the VM
tap_name = 'tap' + str(self._vport_id)
self._vport_id += 1
-
- tasks.run_task(['sudo', 'ip', 'tuntap', 'del',
- tap_name, 'mode', 'tap'],
- self._logger, 'Creating tap device...', False)
-
- tasks.run_task(['sudo', 'ip', 'tuntap', 'add',
- tap_name, 'mode', 'tap'],
- self._logger, 'Creating tap device...', False)
+ tap_cmd_list = ['sudo', 'ip', 'tuntap', 'del', tap_name, 'mode', 'tap']
+ if int(settings.getValue('GUEST_NIC_QUEUES')):
+ tap_cmd_list += ['multi_queue']
+ tasks.run_task(tap_cmd_list, self._logger,
+ 'Creating tap device...', False)
+
+ tap_cmd_list = ['sudo', 'ip', 'tuntap', 'add', tap_name, 'mode', 'tap']
+ if int(settings.getValue('GUEST_NIC_QUEUES')):
+ tap_cmd_list += ['multi_queue']
+ tasks.run_task(tap_cmd_list, self._logger,
+ 'Creating tap device...', False)
tasks.run_task(['sudo', 'ip', 'addr', 'flush', 'dev', tap_name],
self._logger, 'Remove IP', False)