aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Trautman <ctrautma@redhat.com>2016-07-01 22:44:38 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2016-07-01 22:44:38 +0000
commit26a7262cee1a2961ed5aa76bd969dddeff8ea472 (patch)
tree31017520e60b8bbec9a992fc43ac7c0195495f4e
parent4bd09fc0c95f89f3041e6a5a66613eb7cd7eaff8 (diff)
parent095fa73e80f7a9485e72a7f3ba23c4e4608627cd (diff)
Merge "multi-queue: Add basic multi-queue functionality"
-rw-r--r--conf/02_vswitch.conf4
-rw-r--r--conf/04_vnf.conf15
-rwxr-xr-xdocs/userguide/testusage.rst58
-rw-r--r--vnfs/qemu/qemu.py19
-rw-r--r--vnfs/qemu/qemu_dpdk_vhost_user.py19
-rw-r--r--vswitches/ovs_dpdk_vhost.py10
6 files changed, 115 insertions, 10 deletions
diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf
index 7f9daf1c..79f0afbd 100644
--- a/conf/02_vswitch.conf
+++ b/conf/02_vswitch.conf
@@ -84,6 +84,10 @@ VSWITCHD_DPDK_CONFIG = {
# Note: VSPERF will automatically detect, which type of DPDK configuration should
# be used.
+# To enable multi queue modify the below param to the number of queues.
+# 0 = disabled
+VSWITCH_MULTI_QUEUES = 0
+
# parameters passed to ovs-vswitchd in case that OvsVanilla is selected
VSWITCHD_VANILLA_ARGS = []
diff --git a/conf/04_vnf.conf b/conf/04_vnf.conf
index 926ea50a..0a80c1af 100644
--- a/conf/04_vnf.conf
+++ b/conf/04_vnf.conf
@@ -98,10 +98,25 @@ GUEST_SMP = ['2', '2']
# For 2 VNFs you may use [(4,5), (6, 7)]
GUEST_CORE_BINDING = [(6, 7), (9, 10)]
+# Queues per NIC inside guest for multi-queue configuration, requires switch
+# multi-queue to be enabled. Set to 0 for disabled.
+GUEST_NIC_QUEUES = 0
+
GUEST_START_TIMEOUT = 120
GUEST_OVS_DPDK_DIR = '/root/ovs_dpdk'
OVS_DPDK_SHARE = '/mnt/ovs_dpdk_share'
+# Set the CPU mask for testpmd loopback. To bind to specific guest CPUs use -l
+# GUEST_TESTPMD_CPU_MASK = '-l 0,1'
+GUEST_TESTPMD_CPU_MASK = '-c 0x3'
+
+# Testpmd multi-core config. Leave at 0's for disabled. Will not enable unless
+# GUEST_NIC_QUEUES are > 0. For bi directional traffic NB_CORES must be equal
+# to (RXQ + TXQ).
+GUEST_TESTPMD_NB_CORES = 0
+GUEST_TESTPMD_TXQ = 0
+GUEST_TESTPMD_RXQ = 0
+
# IP addresses to use for Vanilla OVS PVP testing
# Consider using RFC 2544/3330 recommended IP addresses for benchmark testing.
# Network: 198.18.0.0/15
diff --git a/docs/userguide/testusage.rst b/docs/userguide/testusage.rst
index 104723e3..d807590d 100755
--- a/docs/userguide/testusage.rst
+++ b/docs/userguide/testusage.rst
@@ -437,6 +437,64 @@ Guest loopback application must be configured, otherwise traffic
will not be forwarded by VM and testcases with PVP and PVVP deployments
will fail. Guest loopback application is set to 'testpmd' by default.
+Multi-Queue Configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+VSPerf currently supports multi-queue with the following limitations:
+
+ 1. Execution of pvp/pvvp tests require testpmd as the loopback if multi-queue
+ is enabled at the guest.
+
+ 2. Requires QemuDpdkVhostUser as the vnf.
+
+ 3. Requires switch to be set to OvsDpdkVhost.
+
+ 4. Requires QEMU 2.5 or greater and any OVS version higher than 2.5. The
+ default upstream package versions installed by VSPerf satisfy this
+ requirement.
+
+To enable multi-queue modify the ''02_vswitch.conf'' file to enable multi-queue
+on the switch.
+
+ .. code-block:: console
+
+ VSWITCH_MULTI_QUEUES = 2
+
+**NOTE:** you should consider using the switch affinity to set a pmd cpu mask
+that can optimize your performance. Consider the numa of the NIC in use if this
+applies by checking /sys/class/net/<eth_name>/device/numa_node and setting an
+appropriate mask to create PMD threads on the same numa node.
+
+When multi-queue is enabled, each dpdk or dpdkvhostuser port that is created
+on the switch will set the option for multiple queues.
+
+To enable multi-queue on the guest modify the ''04_vnf.conf'' file.
+
+ .. code-block:: console
+
+ GUEST_NIC_QUEUES = 2
+
+Enabling multi-queue at the guest will add multiple queues to each NIC port when
+qemu launches the guest.
+
+Testpmd should be configured to take advantage of multi-queue on the guest. This
+can be done by modifying the ''04_vnf.conf'' file.
+
+ .. code-block:: console
+
+ GUEST_TESTPMD_CPU_MASK = '-l 0,1,2,3,4'
+
+ GUEST_TESTPMD_NB_CORES = 4
+ GUEST_TESTPMD_TXQ = 2
+ GUEST_TESTPMD_RXQ = 2
+
+**NOTE:** The guest SMP cores must be configured to allow for testpmd to use the
+optimal number of cores to take advantage of the multiple guest queues.
+
+**NOTE:** For optimal performance guest SMPs should be on the same numa as the
+NIC in use if possible/applicable. Testpmd should be assigned at least
+(nb_cores +1) total cores with the cpu mask.
+
Executing Packet Forwarding tests
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/vnfs/qemu/qemu.py b/vnfs/qemu/qemu.py
index 2de8df2a..9382edef 100644
--- a/vnfs/qemu/qemu.py
+++ b/vnfs/qemu/qemu.py
@@ -354,9 +354,22 @@ class IVnfQemu(IVnf):
'/DPDK/app/test-pmd')
self.execute_and_wait('make clean')
self.execute_and_wait('make')
- self.execute_and_wait('./testpmd -c 0x3 -n 4 --socket-mem 512 --'
- ' --burst=64 -i --txqflags=0xf00 ' +
- '--disable-hw-vlan', 60, "Done")
+ if int(S.getValue('GUEST_NIC_QUEUES')):
+ self.execute_and_wait(
+ './testpmd {} -n4 --socket-mem 512 --'.format(
+ S.getValue('GUEST_TESTPMD_CPU_MASK')) +
+ ' --burst=64 -i --txqflags=0xf00 ' +
+ '--nb-cores={} --rxq={} --txq={} '.format(
+ S.getValue('GUEST_TESTPMD_NB_CORES'),
+ S.getValue('GUEST_TESTPMD_TXQ'),
+ S.getValue('GUEST_TESTPMD_RXQ')) +
+ '--disable-hw-vlan', 60, "Done")
+ else:
+ self.execute_and_wait(
+ './testpmd {} -n 4 --socket-mem 512 --'.format(
+ S.getValue('GUEST_TESTPMD_CPU_MASK')) +
+ ' --burst=64 -i --txqflags=0xf00 ' +
+ '--disable-hw-vlan', 60, "Done")
self.execute('set fwd ' + self._testpmd_fwd_mode, 1)
self.execute_and_wait('start', 20,
'TX RS bit threshold=.+ - TXQ flags=0xf00')
diff --git a/vnfs/qemu/qemu_dpdk_vhost_user.py b/vnfs/qemu/qemu_dpdk_vhost_user.py
index f0f97d8a..49131423 100644
--- a/vnfs/qemu/qemu_dpdk_vhost_user.py
+++ b/vnfs/qemu/qemu_dpdk_vhost_user.py
@@ -38,6 +38,14 @@ class QemuDpdkVhostUser(IVnfQemu):
net1 = 'net' + str(i + 1)
net2 = 'net' + str(i + 2)
+ # multi-queue values
+ if int(S.getValue('GUEST_NIC_QUEUES')):
+ queue_str = ',queues={}'.format(S.getValue('GUEST_NIC_QUEUES'))
+ mq_vector_str = ',mq=on,vectors={}'.format(
+ int(S.getValue('GUEST_NIC_QUEUES')) * 2 + 2)
+ else:
+ queue_str, mq_vector_str = '', ''
+
self._cmd += ['-chardev',
'socket,id=char' + if1 +
',path=' + S.getValue('OVS_VAR_DIR') +
@@ -48,19 +56,20 @@ class QemuDpdkVhostUser(IVnfQemu):
'dpdkvhostuser' + if2,
'-netdev',
'type=vhost-user,id=' + net1 +
- ',chardev=char' + if1 + ',vhostforce',
+ ',chardev=char' + if1 + ',vhostforce' + queue_str,
'-device',
'virtio-net-pci,mac=' +
S.getValue('GUEST_NET1_MAC')[self._number] +
',netdev=' + net1 + ',csum=off,gso=off,' +
- 'guest_tso4=off,guest_tso6=off,guest_ecn=off',
+ 'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+ mq_vector_str,
'-netdev',
'type=vhost-user,id=' + net2 +
- ',chardev=char' + if2 + ',vhostforce',
+ ',chardev=char' + if2 + ',vhostforce' + queue_str,
'-device',
'virtio-net-pci,mac=' +
S.getValue('GUEST_NET2_MAC')[self._number] +
',netdev=' + net2 + ',csum=off,gso=off,' +
- 'guest_tso4=off,guest_tso6=off,guest_ecn=off',
+ 'guest_tso4=off,guest_tso6=off,guest_ecn=off' +
+ mq_vector_str,
]
-
diff --git a/vswitches/ovs_dpdk_vhost.py b/vswitches/ovs_dpdk_vhost.py
index 82b952de..2d424bc5 100644
--- a/vswitches/ovs_dpdk_vhost.py
+++ b/vswitches/ovs_dpdk_vhost.py
@@ -109,8 +109,11 @@ class OvsDpdkVhost(IVSwitchOvs):
dpdk_count = self._get_port_count('type=dpdk')
port_name = 'dpdk' + str(dpdk_count)
params = ['--', 'set', 'Interface', port_name, 'type=dpdk']
+ # multi-queue enable
+ if int(settings.getValue('VSWITCH_MULTI_QUEUES')):
+ params += ['options:n_rxq={}'.format(
+ settings.getValue('VSWITCH_MULTI_QUEUES'))]
of_port = bridge.add_port(port_name, params)
-
return (port_name, of_port)
def add_vport(self, switch_name):
@@ -130,7 +133,10 @@ class OvsDpdkVhost(IVSwitchOvs):
vhost_count = self._get_port_count('type=dpdkvhostuser')
port_name = 'dpdkvhostuser' + str(vhost_count)
params = ['--', 'set', 'Interface', port_name, 'type=dpdkvhostuser']
-
+ # multi queue enable
+ if int(settings.getValue('VSWITCH_MULTI_QUEUES')):
+ params += ['options:n_rxq={}'.format(
+ settings.getValue('VSWITCH_MULTI_QUEUES'))]
of_port = bridge.add_port(port_name, params)
return (port_name, of_port)