aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/contexts/standalone/ovs_dpdk.py')
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py131
1 files changed, 95 insertions, 36 deletions
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index b9e66a481..c6e19f614 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -20,11 +20,13 @@ import re
import time
from yardstick import ssh
-from yardstick.network_services.utils import get_nsb_option
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.common import exceptions
+from yardstick.common import utils as common_utils
from yardstick.network_services import utils
+from yardstick.network_services.utils import get_nsb_option
LOG = logging.getLogger(__name__)
@@ -32,12 +34,12 @@ LOG = logging.getLogger(__name__)
MAIN_BRIDGE = 'br0'
-class OvsDpdkContext(Context):
+class OvsDpdkContext(base.Context):
""" This class handles OVS standalone nodes - VM running on Non-Managed NFVi
Configuration: ovs_dpdk
"""
- __context_type__ = "StandaloneOvsDpdk"
+ __context_type__ = contexts.CONTEXT_STANDALONEOVSDPDK
SUPPORTED_OVS_TO_DPDK_MAP = {
'2.6.0': '16.07.1',
@@ -45,7 +47,8 @@ class OvsDpdkContext(Context):
'2.7.0': '16.11.1',
'2.7.1': '16.11.2',
'2.7.2': '16.11.3',
- '2.8.0': '17.05.2'
+ '2.8.0': '17.05.2',
+ '2.8.1': '17.05.2'
}
DEFAULT_OVS = '2.6.0'
@@ -71,6 +74,11 @@ class OvsDpdkContext(Context):
self.wait_for_vswitchd = 10
super(OvsDpdkContext, self).__init__()
+ def get_dpdk_socket_mem_size(self, socket_id):
+ """Get the size of OvS DPDK socket memory (Mb)"""
+ ram = self.ovs_properties.get("ram", {})
+ return ram.get('socket_%d' % (socket_id), 2048)
+
def init(self, attrs):
"""initializes itself from the supplied arguments"""
super(OvsDpdkContext, self).init(attrs)
@@ -131,9 +139,6 @@ class OvsDpdkContext(Context):
if pmd_cpu_mask:
pmd_mask = pmd_cpu_mask
- socket0 = self.ovs_properties.get("ram", {}).get("socket_0", "2048")
- socket1 = self.ovs_properties.get("ram", {}).get("socket_1", "2048")
-
ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}"
detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}"
@@ -141,16 +146,23 @@ class OvsDpdkContext(Context):
if lcore_mask:
lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask)
+ max_idle = self.ovs_properties.get("max_idle", '')
+ if max_idle:
+ max_idle = ovs_other_config.format("", "max-idle=%s" % max_idle)
+
cmd_list = [
"mkdir -p /usr/local/var/run/openvswitch",
"mkdir -p {}".format(os.path.dirname(log_path)),
- "ovsdb-server --remote=punix:/{0}/{1} --pidfile --detach".format(vpath,
- ovs_sock_path),
+ ("ovsdb-server --remote=punix:/{0}/{1} --remote=ptcp:6640"
+ " --pidfile --detach").format(vpath, ovs_sock_path),
ovs_other_config.format("--no-wait ", "dpdk-init=true"),
- ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%s,%s'" % (socket0, socket1)),
+ ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%d,%d'" % (
+ self.get_dpdk_socket_mem_size(0),
+ self.get_dpdk_socket_mem_size(1))),
lcore_mask,
detach_cmd.format(vpath, ovs_sock_path, log_path),
ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask),
+ max_idle,
]
for cmd in cmd_list:
@@ -160,13 +172,12 @@ class OvsDpdkContext(Context):
def setup_ovs_bridge_add_flows(self):
dpdk_args = ""
- dpdk_list = []
vpath = self.ovs_properties.get("vpath", "/usr/local")
version = self.ovs_properties.get('version', {})
ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
- 'set Interface {port} type={type_}{dpdk_args}')
- ovs_add_queue = 'ovs-vsctl set Interface {port} options:n_rxq={queue}'
+ 'set Interface {port} type={type_}{dpdk_args}'
+ '{dpdk_rxq}{pmd_rx_aff}')
chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
cmd_list = [
@@ -175,27 +186,43 @@ class OvsDpdkContext(Context):
'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
format(MAIN_BRIDGE)
]
+ dpdk_rxq = ""
+ queues = self.ovs_properties.get("queues")
+ if queues:
+ dpdk_rxq = " options:n_rxq={queue}".format(queue=queues)
- ordered_network = collections.OrderedDict(self.networks)
+ # Sorting the array to make sure we execute dpdk0... in the order
+ ordered_network = collections.OrderedDict(
+ sorted(self.networks.items(), key=lambda t: t[1].get('port_num', 0)))
+ pmd_rx_aff_ports = self.ovs_properties.get("dpdk_pmd-rxq-affinity", {})
for index, vnf in enumerate(ordered_network.values()):
if ovs_ver >= [2, 7, 0]:
dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
- dpdk_list.append(ovs_add_port.format(
+ affinity = pmd_rx_aff_ports.get(vnf.get("port_num", -1), "")
+ if affinity:
+ pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
+ '"{affinity}"'.format(affinity=affinity)
+ else:
+ pmd_rx_aff = ""
+ cmd_list.append(ovs_add_port.format(
br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
- type_='dpdk', dpdk_args=dpdk_args))
- dpdk_list.append(ovs_add_queue.format(
- port='dpdk%s' % vnf.get("port_num", 0),
- queue=self.ovs_properties.get("queues", 1)))
-
- # Sorting the array to make sure we execute dpdk0... in the order
- list.sort(dpdk_list)
- cmd_list.extend(dpdk_list)
+ type_='dpdk', dpdk_args=dpdk_args, dpdk_rxq=dpdk_rxq,
+ pmd_rx_aff=pmd_rx_aff))
# Need to do two for loop to maintain the dpdk/vhost ports.
+ pmd_rx_aff_ports = self.ovs_properties.get("vhost_pmd-rxq-affinity",
+ {})
for index, _ in enumerate(ordered_network):
+ affinity = pmd_rx_aff_ports.get(index)
+ if affinity:
+ pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
+ '"{affinity}"'.format(affinity=affinity)
+ else:
+ pmd_rx_aff = ""
cmd_list.append(ovs_add_port.format(
br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
- type_='dpdkvhostuser', dpdk_args=""))
+ type_='dpdkvhostuser', dpdk_args="", dpdk_rxq=dpdk_rxq,
+ pmd_rx_aff=pmd_rx_aff))
ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
format(MAIN_BRIDGE))
@@ -235,7 +262,6 @@ class OvsDpdkContext(Context):
def check_ovs_dpdk_env(self):
self.cleanup_ovs_dpdk_env()
- self._check_hugepages()
version = self.ovs_properties.get("version", {})
ovs_ver = version.get("ovs", self.DEFAULT_OVS)
@@ -298,13 +324,28 @@ class OvsDpdkContext(Context):
for vm in self.vm_names:
model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+ def _get_physical_nodes(self):
+ return self.nfvi_host
+
+ def _get_physical_node_for_server(self, server_name):
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ # self.nfvi_host always contain only one host
+ return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
+
def _get_server(self, attr_name):
"""lookup server info by name from context
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
@@ -360,6 +401,7 @@ class OvsDpdkContext(Context):
def _enable_interfaces(self, index, vfs, xml_str):
vpath = self.ovs_properties.get("vpath", "/usr/local")
+ queue = self.ovs_properties.get("queues", 1)
vf = self.networks[vfs[0]]
port_num = vf.get('port_num', 0)
vpci = utils.PciAddress(vf['vpci'].strip())
@@ -368,23 +410,31 @@ class OvsDpdkContext(Context):
vf['vpci'] = \
"{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
return model.Libvirt.add_ovs_interface(
- vpath, port_num, vf['vpci'], vf['mac'], xml_str)
+ vpath, port_num, vf['vpci'], vf['mac'], xml_str, queue)
def setup_ovs_dpdk_context(self):
nodes = []
self.configure_nics_for_ovs_dpdk()
+ hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers)
+ common_utils.setup_hugepages(self.connection, (hp_total_mb + \
+ self.get_dpdk_socket_mem_size(0) + \
+ self.get_dpdk_socket_mem_size(1)) * 1024)
+
+ self._check_hugepages()
+
for index, (key, vnf) in enumerate(collections.OrderedDict(
self.servers).items()):
cfg = '/tmp/vm_ovs_%d.xml' % index
- vm_name = "vm_%d" % index
+ vm_name = "vm-%d" % index
+ cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
# 1. Check and delete VM if already exists
model.Libvirt.check_if_vm_exists_and_delete(vm_name,
self.connection)
xml_str, mac = model.Libvirt.build_vm_xml(
- self.connection, self.vm_flavor, vm_name, index)
+ self.connection, self.vm_flavor, vm_name, index, cdrom_img)
# 2: Cleanup already available VMs
for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
@@ -395,16 +445,25 @@ class OvsDpdkContext(Context):
model.Libvirt.write_file(cfg, xml_str)
self.connection.put(cfg, cfg)
+ node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
+ self.networks,
+ self.host_mgmt.get('ip'),
+ key, vnf, mac)
+ # Generate public/private keys if password or private key file is not provided
+ node = model.StandaloneContextHelper.check_update_key(self.connection,
+ node,
+ vm_name,
+ self.name,
+ cdrom_img,
+ mac)
+
+ # store vnf node details
+ nodes.append(node)
+
# NOTE: launch through libvirt
LOG.info("virsh create ...")
model.Libvirt.virsh_create_vm(self.connection, cfg)
self.vm_names.append(vm_name)
- # build vnf node details
- nodes.append(self.vnf_node.generate_vnf_instance(self.vm_flavor,
- self.networks,
- self.host_mgmt.get('ip'),
- key, vnf, mac))
-
return nodes