aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorahothan <ahothan@cisco.com>2019-05-28 16:13:43 -0700
committerahothan <ahothan@cisco.com>2019-05-29 09:39:48 -0700
commitcdbb08859533a4c3e698735ab2ee98d2532aa1c8 (patch)
tree1bf8709b5e708bbbc31e717ae777fc94b1cf0638
parent7e9ef3835fdc7dc95c42e32fc4d0f804e90efac5 (diff)
NFVBENCH-136 Add support for multiqueue for PVP/PVVP chains3.3.0
Change-Id: Ia6bc2b1f97ecdf1d94206f9cda46e62910eb6546 Signed-off-by: ahothan <ahothan@cisco.com>
-rwxr-xr-xnfvbench/cfg.default.yaml26
-rw-r--r--nfvbench/chaining.py7
-rw-r--r--nfvbench/compute.py18
-rw-r--r--nfvbench/nfvbench.py8
-rw-r--r--nfvbench/nfvbenchvm/nfvbenchvm.conf1
-rwxr-xr-xnfvbenchvm/dib/build-image.sh2
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local45
-rw-r--r--nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf1
-rw-r--r--test/test_chains.py2
9 files changed, 97 insertions, 13 deletions
diff --git a/nfvbench/cfg.default.yaml b/nfvbench/cfg.default.yaml
index eb5fa11..b2b9f49 100755
--- a/nfvbench/cfg.default.yaml
+++ b/nfvbench/cfg.default.yaml
@@ -51,9 +51,9 @@ vm_image_file:
# Otherwise, a new flavor will be created with attributes listed below.
flavor_type: 'nfvbench.medium'
-# Custom flavor attributes
+# Custom flavor attributes for the test VM
flavor:
- # Number of vCPUs for the flavor
+ # Number of vCPUs for the flavor, must be at least 2!
vcpus: 2
# Memory for the flavor in MB
ram: 4096
@@ -68,6 +68,21 @@ flavor:
"hw:cpu_policy": dedicated
"hw:mem_page_size": large
+# Enable multiqueue for all test VM interfaces (PVP and PVVP only).
+# When enabled, the test VM image will get added the property to enable
+# multiqueue (hw_vif_multiqueue_enabled='true').
+# The number of queues per interace will be set to the number of vCPUs configured for
+# the VM.
+# By default there is only 1 queue per interface
+# The max allowed queue per interface is 8.
+# The valid range for this parameter is [1..min(8, vcpu_count)]
+# When multiqueue is used the recommended setting is to set it to same value as the
+# number of vCPU used - up to a max of 8 queues.
+# Setting to a lower value than vCPU should also work. For example if using 4 vCPU and
+# vif_multiqueue_size is set to 2, openstack will create 4 queues per interface but the
+# test VM will only use the first 2 queues.
+vif_multiqueue_size: 1
+
# Name of the availability zone to use for the test VMs
# Must be one of the zones listed by 'nova availability-zone-list'
# availability_zone: 'nova'
@@ -398,9 +413,10 @@ idle_interfaces_per_vm: 0
# If service_chain_shared_net is true, the options below will be ignored
# and no idle interfaces will be added.
idle_networks:
- # Prefix for all idle networks
+ # Prefix for all idle networks, the final name will append the chain ID and idle index
+ # e.g. "nfvbench-idle-net.0.4" chain 0 idle index 4
name: 'nfvbench-idle-net'
- # Prefix for all idle subnetworks
+ # Subnet name to use for all idle subnetworks
subnet: 'nfvbench-idle-subnet'
# CIDR to use for all idle networks (value should not matter)
cidr: '192.169.1.0/24'
@@ -408,7 +424,7 @@ idle_networks:
network_type: 'vlan'
# segmentation ID to use for the network attached to the idle virtual interfaces
# vlan: leave empty to let neutron pick the segmentation ID
- # vxlan: must specify the VNI value to be used (cannot be empty)
+ # vxlan: must specify the starting VNI value to be used (cannot be empty)
# Note that NFVbench will use as many consecutive segmentation IDs as needed.
# For example, for 4 PVP chains and 8 idle
# interfaces per VM, NFVbench will use 32 consecutive values of segmentation ID
diff --git a/nfvbench/chaining.py b/nfvbench/chaining.py
index 60f3832..898e9ea 100644
--- a/nfvbench/chaining.py
+++ b/nfvbench/chaining.py
@@ -421,7 +421,8 @@ class ChainVnf(object):
'vnf_gateway1_cidr': g1cidr,
'vnf_gateway2_cidr': g2cidr,
'tg_mac1': remote_mac_pair[0],
- 'tg_mac2': remote_mac_pair[1]
+ 'tg_mac2': remote_mac_pair[1],
+ 'vif_mq_size': config.vif_multiqueue_size
}
return content.format(**vm_config)
@@ -1088,6 +1089,10 @@ class ChainManager(object):
LOG.info('Image %s successfully uploaded.', self.image_name)
self.image_instance = self.comp.find_image(self.image_name)
+ # image multiqueue property must be set according to the vif_multiqueue_size
+ # config value (defaults to 1 or disabled)
+ self.comp.image_set_multiqueue(self.image_instance, self.config.vif_multiqueue_size > 1)
+
def _ensure_instances_active(self):
instances = []
for chain in self.chains:
diff --git a/nfvbench/compute.py b/nfvbench/compute.py
index 556ade4..84e3774 100644
--- a/nfvbench/compute.py
+++ b/nfvbench/compute.py
@@ -95,6 +95,24 @@ class Compute(object):
return True
+ def image_multiqueue_enabled(self, img):
+ """Check if multiqueue property is enabled on given image."""
+ try:
+ return img['hw_vif_multiqueue_enabled'] == 'true'
+ except KeyError:
+ return False
+
+ def image_set_multiqueue(self, img, enabled):
+ """Set multiqueue property as enabled or disabled on given image."""
+ cur_mqe = self.image_multiqueue_enabled(img)
+ LOG.info('Image %s hw_vif_multiqueue_enabled property is "%s"',
+ img.name, str(cur_mqe).lower())
+ if cur_mqe != enabled:
+ mqe = str(enabled).lower()
+ self.glance_client.images.update(img.id, hw_vif_multiqueue_enabled=mqe)
+ img['hw_vif_multiqueue_enabled'] = mqe
+ LOG.info('Image %s hw_vif_multiqueue_enabled property changed to "%s"', img.name, mqe)
+
# Create a server instance with name vmname
# and check that it gets into the ACTIVE state
def create_server(self, vmname, image, flavor, key_name,
diff --git a/nfvbench/nfvbench.py b/nfvbench/nfvbench.py
index e585154..b2163ba 100644
--- a/nfvbench/nfvbench.py
+++ b/nfvbench/nfvbench.py
@@ -203,6 +203,9 @@ class NFVBench(object):
if config.openrc_file:
config.openrc_file = os.path.expanduser(config.openrc_file)
+ if config.flavor.vcpus < 2:
+ raise Exception("Flavor vcpus must be >= 2")
+
config.ndr_run = (not config.no_traffic and
'ndr' in config.rate.strip().lower().split('_'))
@@ -224,6 +227,11 @@ class NFVBench(object):
raise Exception('Please provide existing path for storing results in JSON file. '
'Path used: {path}'.format(path=config.std_json_path))
+ # Check that multiqueue is between 1 and 8 (8 is the max allowed by libvirt/qemu)
+ if config.vif_multiqueue_size < 1 or config.vif_multiqueue_size > 8:
+ raise Exception('vif_multiqueue_size (%d) must be in [1..8]' %
+ config.vif_multiqueue_size)
+
# VxLAN sanity checks
if config.vxlan:
if config.vlan_tagging:
diff --git a/nfvbench/nfvbenchvm/nfvbenchvm.conf b/nfvbench/nfvbenchvm/nfvbenchvm.conf
index 3bc6ace..a8e2551 100644
--- a/nfvbench/nfvbenchvm/nfvbenchvm.conf
+++ b/nfvbench/nfvbenchvm/nfvbenchvm.conf
@@ -9,3 +9,4 @@ TG_NET1={tg_net1}
TG_NET2={tg_net2}
TG_GATEWAY1_IP={tg_gateway1_ip}
TG_GATEWAY2_IP={tg_gateway2_ip}
+VIF_MQ_SIZE={vif_mq_size}
diff --git a/nfvbenchvm/dib/build-image.sh b/nfvbenchvm/dib/build-image.sh
index 2291844..b021484 100755
--- a/nfvbenchvm/dib/build-image.sh
+++ b/nfvbenchvm/dib/build-image.sh
@@ -11,7 +11,7 @@ set -e
gs_url=artifacts.opnfv.org/nfvbench/images
# image version number
-__version__=0.7
+__version__=0.8
image_name=nfvbenchvm_centos-$__version__
# if image exists skip building
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
index 94fbd74..59cb4a1 100644
--- a/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local
@@ -19,10 +19,37 @@ echo "Generating configurations for forwarder..."
eval $(cat $NFVBENCH_CONF)
touch /nfvbench_configured.flag
+# WE assume there are at least 2 cores available for the VM
+CPU_CORES=$(grep -c ^processor /proc/cpuinfo)
-CPU_CORES=`grep -c ^processor /proc/cpuinfo`
-CPU_MASKS=0x`echo "obase=16; 2 ^ $CPU_CORES - 1" | bc`
-WORKER_CORES=`expr $CPU_CORES - 1`
+# We need at least 1 admin core.
+if [ $CPU_CORES -le 2 ]; then
+ ADMIN_CORES=1
+else
+ # If the number of cores is even we
+ # reserve 2 cores for admin (second being idle) so the number of
+ # workers is either 1 (if CPU_CORES is 2) or always even
+ if (( $CPU_CORES % 2 )); then
+ ADMIN_CORES=1
+ else
+ ADMIN_CORES=2
+ fi
+fi
+# 2 vcpus: AW (core 0: Admin, core 1: Worker)
+# 3 vcpus: AWW (core 0: Admin, core 1,2: Worker)
+# 4 vcpus: AWWU (core 0: Admin, core 1,2: Worker, core 3: Unused)
+# 5 vcpus: AWWWW
+# 6 vcpus: AWWWWU
+WORKER_CORES=$(expr $CPU_CORES - $ADMIN_CORES)
+# worker cores are all cores except the admin core (core 0) and the eventual unused core
+# AW -> 1
+# AWW -> 1,2
+# AWWU -> 1,2
+WORKER_CORE_LIST=$(seq -s, $ADMIN_CORES $WORKER_CORES)
+# always use all cores
+CORE_MASK=0x$(echo "obase=16; 2 ^ $CPU_CORES - 1" | bc)
+
+logger "NFVBENCHVM: CPU_CORES=$CPU_CORES, ADMIN_CORES=$ADMIN_CORES, WORKER_CORES=$WORKER_CORES ($WORKER_CORE_LIST)"
# CPU isolation optimizations
echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
@@ -33,7 +60,9 @@ for irq in `ls /proc/irq/`; do
echo 1 > /proc/irq/$irq/smp_affinity
fi
done
-tuna -c $(seq -s, 1 1 $WORKER_CORES) --isolate
+
+# Isolate all cores that are reserved for workers
+tuna -c $WORKER_CORE_LIST --isolate
NET_PATH=/sys/class/net
@@ -52,6 +81,8 @@ get_pci_address() {
pci_addr=$(readlink $NET_PATH/$f | cut -d "/" -f5)
# some virtual interfaces match on MAC and do not have a PCI address
if [ "$pci_addr" -a "$pci_addr" != "N/A" ]; then
+ # Found matching interface
+ logger "NFVBENCHVM: found interface $f ($pci_addr) matching $mac"
break
else
pci_addr=""
@@ -93,9 +124,10 @@ if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
/dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_1
/dpdk/dpdk-devbind.py -b igb_uio $PCI_ADDRESS_2
screen -dmSL testpmd /dpdk/testpmd \
- -c $CPU_MASKS \
+ -c $CORE_MASK \
-n 4 \
-- \
+ --nb-ports=2 \
--burst=32 \
--txd=256 \
--rxd=1024 \
@@ -103,6 +135,8 @@ if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
--eth-peer=1,$TG_MAC2 \
--forward-mode=mac \
--nb-cores=$WORKER_CORES \
+ --txq=$VIF_MQ_SIZE \
+ --rxq=$VIF_MQ_SIZE \
--max-pkt-len=9000 \
--cmdline-file=/dpdk/testpmd_cmd.txt
echo "testpmd running in screen 'testpmd'"
@@ -115,6 +149,7 @@ if [ $PCI_ADDRESS_1 ] && [ $PCI_ADDRESS_2 ]; then
sed -i "s/{{PCI_ADDRESS_1}}/$PCI_ADDRESS_1/g" /etc/vpp/startup.conf
sed -i "s/{{PCI_ADDRESS_2}}/$PCI_ADDRESS_2/g" /etc/vpp/startup.conf
sed -i "s/{{WORKER_CORES}}/$WORKER_CORES/g" /etc/vpp/startup.conf
+ sed -i "s/{{VIF_MQ_SIZE}}/${VIF_MQ_SIZE}/g" /etc/vpp/startup.conf
service vpp start
sleep 10
diff --git a/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf b/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
index e3f1486..ce5ab45 100644
--- a/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
+++ b/nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf
@@ -14,6 +14,7 @@ dpdk {
dev default {
num-rx-desc 1024
num-tx-desc 1024
+ num-rx-queues {{VIF_MQ_SIZE}}
}
socket-mem 1024
dev {{PCI_ADDRESS_1}}
diff --git a/test/test_chains.py b/test/test_chains.py
index f7a2ce3..5490dfc 100644
--- a/test/test_chains.py
+++ b/test/test_chains.py
@@ -103,7 +103,7 @@ def test_chain_runner_ext_no_openstack():
def _mock_find_image(self, image_name):
- return True
+ return MagicMock()
@patch.object(Compute, 'find_image', _mock_find_image)
@patch('nfvbench.chaining.Client')