aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/contexts
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/contexts')
-rw-r--r--yardstick/benchmark/contexts/__init__.py20
-rw-r--r--yardstick/benchmark/contexts/base.py88
-rw-r--r--yardstick/benchmark/contexts/dummy.py13
-rw-r--r--yardstick/benchmark/contexts/heat.py129
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py126
-rw-r--r--yardstick/benchmark/contexts/node.py66
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py168
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py131
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py62
9 files changed, 634 insertions, 169 deletions
diff --git a/yardstick/benchmark/contexts/__init__.py b/yardstick/benchmark/contexts/__init__.py
index e69de29bb..d50f08cc3 100644
--- a/yardstick/benchmark/contexts/__init__.py
+++ b/yardstick/benchmark/contexts/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+CONTEXT_DUMMY = "Dummy"
+CONTEXT_HEAT = "Heat"
+CONTEXT_KUBERNETES = "Kubernetes"
+CONTEXT_NODE = "Node"
+CONTEXT_STANDALONEOVSDPDK = "StandaloneOvsDpdk"
+CONTEXT_STANDALONESRIOV = "StandaloneSriov"
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index ae8319e37..f3f5879eb 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -6,17 +6,24 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+
import abc
+import errno
import six
+import os
-import yardstick.common.utils as utils
+from yardstick.common import constants
+from yardstick.common import utils
+from yardstick.common import yaml_loader
+from yardstick.common.constants import YARDSTICK_ROOT_PATH
class Flags(object):
"""Class to represent the status of the flags in a context"""
_FLAGS = {'no_setup': False,
- 'no_teardown': False}
+ 'no_teardown': False,
+ 'os_cloud_config': constants.OS_CLOUD_DEFAULT_CONFIG}
def __init__(self, **kwargs):
for name, value in self._FLAGS.items():
@@ -42,20 +49,13 @@ class Context(object):
list = []
SHORT_TASK_ID_LEN = 8
- @staticmethod
- def split_name(name, sep='.'):
- try:
- name_iter = iter(name.split(sep))
- except AttributeError:
- # name is not a string
- return None, None
- return next(name_iter), next(name_iter, None)
-
- def __init__(self):
+ def __init__(self, host_name_separator='.'):
Context.list.append(self)
self._flags = Flags()
self._name = None
self._task_id = None
+ self.file_path = None
+ self._host_name_separator = host_name_separator
def init(self, attrs):
"""Initiate context"""
@@ -65,6 +65,35 @@ class Context(object):
self._name_task_id = '{}-{}'.format(
self._name, self._task_id[:self.SHORT_TASK_ID_LEN])
+ def split_host_name(self, name):
+ if (isinstance(name, six.string_types)
+ and self._host_name_separator in name):
+ return tuple(name.split(self._host_name_separator, 1))
+ return None, None
+
+ def read_pod_file(self, attrs):
+ self.file_path = file_path = attrs.get("file", "pod.yaml")
+ try:
+ cfg = yaml_loader.read_yaml_file(self.file_path)
+ except IOError as io_error:
+ if io_error.errno != errno.ENOENT:
+ raise
+
+ self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
+ cfg = yaml_loader.read_yaml_file(self.file_path)
+
+ for node in cfg["nodes"]:
+ node["ctx_type"] = self.__context_type__
+
+ self.nodes.extend(cfg["nodes"])
+ self.controllers.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Controller"])
+ self.computes.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Compute"])
+ self.baremetals.extend([node for node in cfg["nodes"]
+ if node.get("role") == "Baremetal"])
+ return cfg
+
@property
def name(self):
if self._flags.no_setup or self._flags.no_teardown:
@@ -76,6 +105,10 @@ class Context(object):
def assigned_name(self):
return self._name
+ @property
+ def host_name_separator(self):
+ return self._host_name_separator
+
@staticmethod
def get_cls(context_type):
"""Return class of specified type."""
@@ -126,6 +159,25 @@ class Context(object):
attr_name)
@staticmethod
+ def get_physical_nodes():
+ """return physical node names for all contexts"""
+ physical_nodes = {}
+ for context in Context.list:
+ nodes = context._get_physical_nodes()
+ physical_nodes.update({context._name: nodes})
+
+ return physical_nodes
+
+ @staticmethod
+ def get_physical_node_from_server(server_name):
+ """return physical nodes for all contexts"""
+ context = Context.get_context_from_server(server_name)
+ if context == None:
+ return None
+
+ return context._get_physical_node_for_server(server_name)
+
+ @staticmethod
def get_context_from_server(attr_name):
"""lookup context info by name from node config
attr_name: either a name of the node created by yardstick or a dict
@@ -154,3 +206,15 @@ class Context(object):
except StopIteration:
raise ValueError("context not found for server %r" %
attr_name)
+
+ @abc.abstractmethod
+ def _get_physical_nodes(self):
+ """return the list of physical nodes in context"""
+
+ @abc.abstractmethod
+ def _get_physical_node_for_server(self, server_name):
+ """ Find physical node for given server
+
+ :param server_name: (string) Server name in scenario
+ :return string: <node_name>.<context_name>
+ """
diff --git a/yardstick/benchmark/contexts/dummy.py b/yardstick/benchmark/contexts/dummy.py
index a9e4564fe..9faca4c63 100644
--- a/yardstick/benchmark/contexts/dummy.py
+++ b/yardstick/benchmark/contexts/dummy.py
@@ -7,17 +7,18 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base
-class DummyContext(Context):
+class DummyContext(base.Context):
"""Class that handle dummy info.
This class is also used to test the abstract class Context because it
provides a minimal concrete implementation of a subclass.
"""
- __context_type__ = "Dummy"
+ __context_type__ = contexts.CONTEXT_DUMMY
def deploy(self):
"""Don't need to deploy"""
@@ -32,3 +33,9 @@ class DummyContext(Context):
def _get_network(self, attr_name):
return None
+
+ def _get_physical_nodes(self):
+ return None
+
+ def _get_physical_node_for_server(self, server_name):
+ return None
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 0d1dfb86f..917aa9c39 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -7,9 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-from __future__ import print_function
-
import collections
import logging
import os
@@ -19,6 +16,7 @@ from collections import OrderedDict
import ipaddress
import pkg_resources
+from yardstick.benchmark import contexts
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts.model import Network
from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
@@ -32,6 +30,7 @@ from yardstick.common import constants as consts
from yardstick.common import utils
from yardstick.common.utils import source_env
from yardstick.ssh import SSH
+from yardstick.common import openstack_utils
LOG = logging.getLogger(__name__)
@@ -49,7 +48,7 @@ def h_join(*args):
class HeatContext(Context):
"""Class that represents a context in the logical model"""
- __context_type__ = "Heat"
+ __context_type__ = contexts.CONTEXT_HEAT
def __init__(self):
self.stack = None
@@ -60,6 +59,7 @@ class HeatContext(Context):
self.server_groups = []
self.keypair_name = None
self.secgroup_name = None
+ self.security_group = None
self._server_map = {}
self.attrs = {}
self._image = None
@@ -71,6 +71,13 @@ class HeatContext(Context):
self.shade_client = None
self.heat_timeout = None
self.key_filename = None
+ self.yardstick_gen_key_file = True
+ self.shade_client = None
+ self.operator_client = None
+ self.nodes = []
+ self.controllers = []
+ self.computes = []
+ self.baremetals = []
super(HeatContext, self).__init__()
@staticmethod
@@ -99,14 +106,33 @@ class HeatContext(Context):
self.template_file = attrs.get("heat_template")
+ # try looking for external private key when using external heat template
+ if self.template_file is not None:
+ self.key_filename = attrs.get("key_filename", None)
+ if self.key_filename is not None:
+ # Disable key file generation if an external private key
+ # has been provided
+ self.yardstick_gen_key_file = False
+
+ self.shade_client = openstack_utils.get_shade_client()
+ self.operator_client = openstack_utils.get_shade_operator_client()
+
+ try:
+ self.read_pod_file(attrs)
+ except IOError:
+ LOG.warning("No pod file specified. NVFi metrics will be disabled")
+
self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
if self.template_file:
self.heat_parameters = attrs.get("heat_parameters")
return
self.keypair_name = h_join(self.name, "key")
+
self.secgroup_name = h_join(self.name, "secgroup")
+ self.security_group = attrs.get("security_group")
+
self._image = attrs.get("image")
self._flavor = attrs.get("flavor")
@@ -172,7 +198,7 @@ class HeatContext(Context):
self.flavors.add(flavor)
template.add_keypair(self.keypair_name, self.name)
- template.add_security_group(self.secgroup_name)
+ template.add_security_group(self.secgroup_name, self.security_group)
for network in self.networks.values():
# Using existing network
@@ -318,18 +344,22 @@ class HeatContext(Context):
"""deploys template into a stack using cloud"""
LOG.info("Deploying context '%s' START", self.name)
- self.key_filename = ''.join(
- [consts.YARDSTICK_ROOT_PATH,
- 'yardstick/resources/files/yardstick_key-',
- self.name])
+ # Check if there was no external private key provided
+ if self.key_filename is None:
+ self.key_filename = ''.join(
+ [consts.YARDSTICK_ROOT_PATH,
+ 'yardstick/resources/files/yardstick_key-',
+ self.name])
# Permissions may have changed since creation; this can be fixed. If we
# overwrite the file, we lose future access to VMs using this key.
# As long as the file exists, even if it is unreadable, keep it intact
- if not os.path.exists(self.key_filename):
+ if self.yardstick_gen_key_file and not os.path.exists(self.key_filename):
SSH.gen_keys(self.key_filename)
- heat_template = HeatTemplate(self.name, self.template_file,
- self.heat_parameters)
+ heat_template = HeatTemplate(
+ self.name, template_file=self.template_file,
+ heat_parameters=self.heat_parameters,
+ os_cloud_config=self._flags.os_cloud_config)
if self.template_file is None:
self._add_resources_to_template(heat_template)
@@ -423,12 +453,14 @@ class HeatContext(Context):
}
def _delete_key_file(self):
- try:
- utils.remove_file(self.key_filename)
- utils.remove_file(self.key_filename + ".pub")
- except OSError:
- LOG.exception("There was an error removing the key file %s",
- self.key_filename)
+ # Only remove the key file if it has been generated by yardstick
+ if self.yardstick_gen_key_file:
+ try:
+ utils.remove_file(self.key_filename)
+ utils.remove_file(self.key_filename + ".pub")
+ except OSError:
+ LOG.exception("There was an error removing the key file %s",
+ self.key_filename)
def undeploy(self):
"""undeploys stack from cloud"""
@@ -466,7 +498,7 @@ class HeatContext(Context):
with attribute name mapping when using external heat templates
"""
if isinstance(attr_name, collections.Mapping):
- node_name, cname = self.split_name(attr_name['name'])
+ node_name, cname = self.split_host_name(attr_name['name'])
if cname is None or cname != self.name:
return None
@@ -477,6 +509,14 @@ class HeatContext(Context):
server.private_ip = self.stack.outputs.get(
attr_name.get("private_ip_attr", object()), None)
+
+ # Try to find interfaces
+ for key, value in attr_name.get("interfaces", {}).items():
+ value["local_ip"] = server.private_ip
+ for k in ["local_mac", "netmask", "gateway_ip"]:
+ # Keep explicit None or missing entry as is
+ value[k] = self.stack.outputs.get(value[k])
+ server.interfaces.update({key: value})
else:
try:
server = self._server_map[attr_name]
@@ -486,13 +526,29 @@ class HeatContext(Context):
if server is None:
return None
- pkey = pkg_resources.resource_string(
- 'yardstick.resources',
- h_join('files/yardstick_key', self.name)).decode('utf-8')
-
+ # Get the pkey
+ if self.yardstick_gen_key_file:
+ pkey = pkg_resources.resource_string(
+ 'yardstick.resources',
+ h_join('files/yardstick_key', self.name)).decode('utf-8')
+ key_filename = pkg_resources.resource_filename('yardstick.resources',
+ h_join('files/yardstick_key', self.name))
+ else:
+ # make sure the file exists before attempting to open it
+ if not os.path.exists(self.key_filename):
+ LOG.error("The key_filename provided %s does not exist!",
+ self.key_filename)
+ else:
+ try:
+ pkey = open(self.key_filename, 'r').read().decode('utf-8')
+ key_filename = self.key_filename
+ except IOError:
+ LOG.error("The key_filename provided (%s) is unreadable.",
+ self.key_filename)
result = {
"user": server.context.user,
"pkey": pkey,
+ "key_filename": key_filename,
"private_ip": server.private_ip,
"interfaces": server.interfaces,
"routing_table": self.generate_routing_table(server),
@@ -529,3 +585,30 @@ class HeatContext(Context):
"physical_network": network.physical_network,
}
return result
+
+ def _get_physical_nodes(self):
+ return self.nodes
+
+ def _get_physical_node_for_server(self, server_name):
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s.name == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ server = openstack_utils.get_server(self.shade_client,
+ name_or_id=server_name)
+
+ if server:
+ server = server.toDict()
+ list_hypervisors = self.operator_client.list_hypervisors()
+
+ for hypervisor in list_hypervisors:
+ if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
+ for node in self.nodes:
+ if node['ip'] == hypervisor.host_ip:
+ return "{}.{}".format(node['name'], self._name)
+
+ return None
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 4bea991ea..e1553c72b 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -7,50 +7,57 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
+import collections
import logging
-import time
import pkg_resources
+import time
import paramiko
-from yardstick.benchmark.contexts.base import Context
-from yardstick.orchestrator.kubernetes import KubernetesTemplate
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base as ctx_base
+from yardstick.benchmark.contexts import model
+from yardstick.common import constants
+from yardstick.common import exceptions
from yardstick.common import kubernetes_utils as k8s_utils
from yardstick.common import utils
+from yardstick.orchestrator import kubernetes
+
LOG = logging.getLogger(__name__)
BITS_LENGTH = 2048
-class KubernetesContext(Context):
+class KubernetesContext(ctx_base.Context):
"""Class that handle nodes info"""
- __context_type__ = "Kubernetes"
+ __context_type__ = contexts.CONTEXT_KUBERNETES
def __init__(self):
self.ssh_key = ''
self.key_path = ''
self.public_key_path = ''
self.template = None
-
- super(KubernetesContext, self).__init__()
+ super(KubernetesContext, self).__init__(host_name_separator='-')
def init(self, attrs):
super(KubernetesContext, self).init(attrs)
- template_cfg = attrs.get('servers', {})
- self.template = KubernetesTemplate(self.name, template_cfg)
-
+ networks = attrs.get('networks', {})
+ self.template = kubernetes.KubernetesTemplate(self.name, attrs)
self.ssh_key = '{}-key'.format(self.name)
-
self.key_path = self._get_key_path()
self.public_key_path = '{}.pub'.format(self.key_path)
+ self._networks = collections.OrderedDict(
+ (net_name, model.Network(net_name, self, network))
+ for net_name, network in networks.items())
def deploy(self):
LOG.info('Creating ssh key')
self._set_ssh_key()
+ self._create_crd()
+ self._create_networks()
LOG.info('Launch containers')
self._create_rcs()
self._create_services()
@@ -64,6 +71,8 @@ class KubernetesContext(Context):
self._delete_rcs()
self._delete_pods()
self._delete_services()
+ self._delete_networks()
+ self._delete_crd()
super(KubernetesContext, self).undeploy()
@@ -90,7 +99,7 @@ class KubernetesContext(Context):
obj.delete()
def _create_rcs(self):
- for obj in self.template.k8s_objs:
+ for obj in self.template.rc_objs:
self._create_rc(obj.get_template())
def _create_rc(self, template):
@@ -101,14 +110,34 @@ class KubernetesContext(Context):
self._delete_rc(rc)
def _delete_rc(self, rc):
- k8s_utils.delete_replication_controller(rc)
+ k8s_utils.delete_replication_controller(rc, skip_codes=[404])
def _delete_pods(self):
for pod in self.template.pods:
self._delete_pod(pod)
def _delete_pod(self, pod):
- k8s_utils.delete_pod(pod)
+ k8s_utils.delete_pod(pod, skip_codes=[404])
+
+ def _create_crd(self):
+ LOG.info('Create Custom Resource Definition elements')
+ for crd in self.template.crd:
+ crd.create()
+
+ def _delete_crd(self):
+ LOG.info('Delete Custom Resource Definition elements')
+ for crd in self.template.crd:
+ crd.delete()
+
+ def _create_networks(self): # pragma: no cover
+ LOG.info('Create Network elements')
+ for net in self.template.network_objs:
+ net.create()
+
+ def _delete_networks(self): # pragma: no cover
+ LOG.info('Create Network elements')
+ for net in self.template.network_objs:
+ net.delete()
def _get_key_path(self):
task_id = self.name.split('-')[-1]
@@ -130,27 +159,76 @@ class KubernetesContext(Context):
k8s_utils.create_config_map(self.ssh_key, {'authorized_keys': key})
def _delete_ssh_key(self):
- k8s_utils.delete_config_map(self.ssh_key)
+ k8s_utils.delete_config_map(self.ssh_key, skip_codes=[404])
utils.remove_file(self.key_path)
utils.remove_file(self.public_key_path)
def _get_server(self, name):
- service_name = '{}-service'.format(name)
- service = k8s_utils.get_service_by_name(service_name).ports[0]
-
- host = {
- 'name': service.name,
+ node_ports = self._get_service_ports(name)
+ for sn_port in (sn_port for sn_port in node_ports
+ if sn_port['port'] == constants.SSH_PORT):
+ node_port = sn_port['node_port']
+ break
+ else:
+ raise exceptions.KubernetesSSHPortNotDefined()
+
+ return {
+ 'name': name,
'ip': self._get_node_ip(),
'private_ip': k8s_utils.get_pod_by_name(name).status.pod_ip,
- 'ssh_port': service.node_port,
+ 'ssh_port': node_port,
'user': 'root',
'key_filename': self.key_path,
+ 'interfaces': self._get_interfaces(name),
+ 'service_ports': node_ports
}
- return host
+ def _get_network(self, net_name):
+ """Retrieves the network object, searching by name
+
+ :param net_name: (str) replication controller name
+ :return: (dict) network information (name)
+ """
+ network = self._networks.get(net_name)
+ if not network:
+ return
+ return {'name': net_name}
+
+ def _get_interfaces(self, rc_name):
+ """Retrieves the network list of a replication controller
+
+ :param rc_name: (str) replication controller name
+ :return: (dict) names and information of the networks used in this
+ replication controller; those networks must be defined in the
+ Kubernetes cluster
+ """
+ rc = self.template.get_rc_by_name(rc_name)
+ if not rc:
+ return {}
+ return {name: {'network_name': name,
+ 'local_mac': None,
+ 'local_ip': None}
+ for name in rc.networks}
def _get_node_ip(self):
return k8s_utils.get_node_list().items[0].status.addresses[0].address
- def _get_network(self, attr_name):
+ def _get_physical_nodes(self):
return None
+
+ def _get_physical_node_for_server(self, server_name):
+ return None
+
+ def _get_service_ports(self, name):
+ service_name = '{}-service'.format(name)
+ service = k8s_utils.get_service_by_name(service_name)
+ if not service:
+ raise exceptions.KubernetesServiceObjectNotDefined()
+ ports = []
+ for port in service.ports:
+ ports.append({'name': port.name,
+ 'node_port': port.node_port,
+ 'port': port.port,
+ 'protocol': port.protocol,
+ 'target_port': port.target_port})
+ return ports
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index fa619a9aa..d233e02ae 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -7,8 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-import errno
import subprocess
import os
import collections
@@ -19,10 +17,11 @@ import six
import pkg_resources
from yardstick import ssh
+from yardstick.benchmark import contexts
from yardstick.benchmark.contexts.base import Context
from yardstick.common.constants import ANSIBLE_DIR, YARDSTICK_ROOT_PATH
from yardstick.common.ansible_common import AnsibleCommon
-from yardstick.common.yaml_loader import yaml_load
+from yardstick.common.exceptions import ContextUpdateCollectdForNodeError
LOG = logging.getLogger(__name__)
@@ -32,7 +31,7 @@ DEFAULT_DISPATCH = 'script'
class NodeContext(Context):
"""Class that handle nodes info"""
- __context_type__ = "Node"
+ __context_type__ = contexts.CONTEXT_NODE
def __init__(self):
self.file_path = None
@@ -49,40 +48,11 @@ class NodeContext(Context):
}
super(NodeContext, self).__init__()
- def read_config_file(self):
- """Read from config file"""
-
- with open(self.file_path) as stream:
- LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml_load(stream)
- return cfg
-
def init(self, attrs):
"""initializes itself from the supplied arguments"""
super(NodeContext, self).init(attrs)
- self.file_path = file_path = attrs.get("file", "pod.yaml")
-
- try:
- cfg = self.read_config_file()
- except IOError as io_error:
- if io_error.errno != errno.ENOENT:
- raise
-
- self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
- cfg = self.read_config_file()
-
- self.nodes.extend(cfg["nodes"])
- self.controllers.extend([node for node in cfg["nodes"]
- if node.get("role") == "Controller"])
- self.computes.extend([node for node in cfg["nodes"]
- if node.get("role") == "Compute"])
- self.baremetals.extend([node for node in cfg["nodes"]
- if node.get("role") == "Baremetal"])
- LOG.debug("Nodes: %r", self.nodes)
- LOG.debug("Controllers: %r", self.controllers)
- LOG.debug("Computes: %r", self.computes)
- LOG.debug("BareMetals: %r", self.baremetals)
+ cfg = self.read_pod_file(attrs)
self.env = attrs.get('env', {})
self.attrs = attrs
@@ -135,11 +105,37 @@ class NodeContext(Context):
playbook = os.path.join(ANSIBLE_DIR, playbook)
return playbook
+ def _get_physical_nodes(self):
+ return self.nodes
+
+ def _get_physical_node_for_server(self, server_name):
+
+ node_name, context_name = self.split_host_name(server_name)
+
+ if context_name is None or self.name != context_name:
+ return None
+
+ for n in (n for n in self.nodes if n["name"] == node_name):
+ return "{}.{}".format(n["name"], self._name)
+
+ return None
+
+ def update_collectd_options_for_node(self, options, attr_name):
+ node_name, _ = self.split_host_name(attr_name)
+
+ matching_nodes = (n for n in self.nodes if n["name"] == node_name)
+ try:
+ node = next(matching_nodes)
+ except StopIteration:
+ raise ContextUpdateCollectdForNodeError(attr_name=attr_name)
+
+ node["collectd"] = options
+
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: a name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index 4d43f2611..a15426872 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -26,7 +26,8 @@ import xml.etree.ElementTree as ET
from yardstick import ssh
from yardstick.common import constants
from yardstick.common import exceptions
-from yardstick.common.yaml_loader import yaml_load
+from yardstick.common import utils as common_utils
+from yardstick.common import yaml_loader
from yardstick.network_services.utils import PciAddress
from yardstick.network_services.helpers.cpu import CpuSysCores
@@ -45,7 +46,7 @@ VM_TEMPLATE = """
<vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
{cputune}
<os>
- <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
+ <type arch="x86_64" machine="{machine}">hvm</type>
<boot dev="hd" />
</os>
<features>
@@ -89,6 +90,30 @@ VM_TEMPLATE = """
</devices>
</domain>
"""
+
+USER_DATA_TEMPLATE = """
+cat > {user_file} <<EOF
+#cloud-config
+preserve_hostname: false
+hostname: {host}
+users:
+{user_config}
+EOF
+"""
+
+NETWORK_DATA_TEMPLATE = """
+cat > {network_file} <<EOF
+#cloud-config
+version: 2
+ethernets:
+ ens3:
+ match:
+ macaddress: {mac_address}
+ addresses:
+ - {ip_address}
+EOF
+"""
+
WAIT_FOR_BOOT = 30
@@ -137,7 +162,8 @@ class Libvirt(object):
return vm_pci
@classmethod
- def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str):
+ def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str,
+ queues):
"""Add a DPDK OVS 'interface' XML node in 'devices' node
<devices>
@@ -179,7 +205,7 @@ class Libvirt(object):
model.set('type', 'virtio')
driver = ET.SubElement(interface, 'driver')
- driver.set('queues', '4')
+ driver.set('queues', str(queues))
host = ET.SubElement(driver, 'host')
host.set('mrg_rxbuf', 'off')
@@ -268,7 +294,7 @@ class Libvirt(object):
return vm_image
@classmethod
- def build_vm_xml(cls, connection, flavor, vm_name, index):
+ def build_vm_xml(cls, connection, flavor, vm_name, index, cdrom_img):
"""Build the XML from the configuration parameters"""
memory = flavor.get('ram', '4096')
extra_spec = flavor.get('extra_specs', {})
@@ -281,6 +307,7 @@ class Libvirt(object):
cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
cputune = extra_spec.get('cputune', '')
+ machine = extra_spec.get('machine_type', 'pc-i440fx-xenial')
mac = StandaloneContextHelper.get_mac_address(0x00)
image = cls.create_snapshot_qemu(connection, index,
flavor.get("images", None))
@@ -291,7 +318,11 @@ class Libvirt(object):
memory=memory, vcpu=vcpu, cpu=cpu,
numa_cpus=numa_cpus,
socket=socket, threads=threads,
- vm_image=image, cpuset=cpuset, cputune=cputune)
+ vm_image=image, cpuset=cpuset,
+ machine=machine, cputune=cputune)
+
+ # Add CD-ROM device
+ vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml)
return vm_xml, mac
@@ -320,6 +351,75 @@ class Libvirt(object):
et = ET.ElementTree(element=root)
et.write(file_name, encoding='utf-8', method='xml')
+ @classmethod
+ def add_cdrom(cls, file_path, xml_str):
+ """Add a CD-ROM disk XML node in 'devices' node
+
+ <devices>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <source file='/var/lib/libvirt/images/data.img'/>
+ <target dev='hdb'/>
+ <readonly/>
+ </disk>
+ ...
+ </devices>
+ """
+
+ root = ET.fromstring(xml_str)
+ device = root.find('devices')
+
+ disk = ET.SubElement(device, 'disk')
+ disk.set('type', 'file')
+ disk.set('device', 'cdrom')
+
+ driver = ET.SubElement(disk, 'driver')
+ driver.set('name', 'qemu')
+ driver.set('type', 'raw')
+
+ source = ET.SubElement(disk, 'source')
+ source.set('file', file_path)
+
+ target = ET.SubElement(disk, 'target')
+ target.set('dev', 'hdb')
+
+ ET.SubElement(disk, 'readonly')
+ return ET.tostring(root)
+
+ @staticmethod
+ def gen_cdrom_image(connection, file_path, vm_name, vm_user, key_filename, mac, ip):
+ """Generate ISO image for CD-ROM """
+
+ user_config = [" - name: {user_name}",
+ " ssh_authorized_keys:",
+ " - {pub_key_str}"]
+ if vm_user != "root":
+ user_config.append(" sudo: ALL=(ALL) NOPASSWD:ALL")
+
+ meta_data = "/tmp/meta-data"
+ user_data = "/tmp/user-data"
+ network_data = "/tmp/network-config"
+ with open(".".join([key_filename, "pub"]), "r") as pub_key_file:
+ pub_key_str = pub_key_file.read().rstrip()
+ user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=vm_user)
+
+ cmd_lst = [
+ "touch %s" % meta_data,
+ USER_DATA_TEMPLATE.format(user_file=user_data, host=vm_name, user_config=user_conf),
+ NETWORK_DATA_TEMPLATE.format(network_file=network_data, mac_address=mac,
+ ip_address=ip),
+ "genisoimage -output {0} -volid cidata -joliet -r {1} {2} {3}".format(file_path,
+ meta_data,
+ user_data,
+ network_data),
+ "rm {0} {1} {2}".format(meta_data, user_data, network_data),
+ ]
+ for cmd in cmd_lst:
+ LOG.info(cmd)
+ status, _, error = connection.execute(cmd)
+ if status:
+ raise exceptions.LibvirtQemuImageCreateError(error=error)
+
class StandaloneContextHelper(object):
""" This class handles all the common code for standalone
@@ -331,7 +431,7 @@ class StandaloneContextHelper(object):
@staticmethod
def install_req_libs(connection, extra_pkgs=None):
extra_pkgs = extra_pkgs or []
- pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
+ pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping", "genisoimage"]
pkgs.extend(extra_pkgs)
cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
for pkg in pkgs:
@@ -394,26 +494,18 @@ class StandaloneContextHelper(object):
return pf_vfs
- def read_config_file(self):
- """Read from config file"""
-
- with open(self.file_path) as stream:
- LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml_load(stream)
- return cfg
-
def parse_pod_file(self, file_path, nfvi_role='Sriov'):
self.file_path = file_path
nodes = []
nfvi_host = []
try:
- cfg = self.read_config_file()
+ cfg = yaml_loader.read_yaml_file(self.file_path)
except IOError as io_error:
if io_error.errno != errno.ENOENT:
raise
self.file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
file_path)
- cfg = self.read_config_file()
+ cfg = yaml_loader.read_yaml_file(self.file_path)
nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
@@ -463,8 +555,41 @@ class StandaloneContextHelper(object):
ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
if ip:
node["ip"] = ip
+ client = ssh.SSH.from_node(node)
+ LOG.debug("OS version: %s",
+ common_utils.get_os_version(client))
+ LOG.debug("Kernel version: %s",
+ common_utils.get_kernel_version(client))
+ vnfs_data = common_utils.get_sample_vnf_info(client)
+ for vnf_name, vnf_data in vnfs_data.items():
+ LOG.debug("VNF name: '%s', commit ID/branch: '%s'",
+ vnf_name, vnf_data["branch_commit"])
+ LOG.debug("%s", vnf_data["md5_result"])
return nodes
+ @classmethod
+ def check_update_key(cls, connection, node, vm_name, id_name, cdrom_img, mac):
+ # Generate public/private keys if private key file is not provided
+ user_name = node.get('user')
+ if not user_name:
+ node['user'] = 'root'
+ user_name = node.get('user')
+ if not node.get('key_filename'):
+ key_filename = ''.join(
+ [constants.YARDSTICK_ROOT_PATH,
+ 'yardstick/resources/files/yardstick_key-',
+ id_name, '-', vm_name])
+ ssh.SSH.gen_keys(key_filename)
+ node['key_filename'] = key_filename
+ # Update image with public key
+ key_filename = node.get('key_filename')
+ ip_netmask = "{0}/{1}".format(node.get('ip'), node.get('netmask'))
+ ip_netmask = "{0}/{1}".format(node.get('ip'),
+ IPNetwork(ip_netmask).prefixlen)
+ Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename, mac,
+ ip_netmask)
+ return node
+
class Server(object):
""" This class handles geting vnf nodes
@@ -477,7 +602,7 @@ class Server(object):
for key, vfs in vnf["network_ports"].items():
if key == "mgmt":
- mgmtip = str(IPNetwork(vfs['cidr']).ip)
+ mgmt_cidr = IPNetwork(vfs['cidr'])
continue
vf = ports[vfs[0]]
@@ -494,14 +619,15 @@ class Server(object):
})
index = index + 1
- return mgmtip, interfaces
+ return mgmt_cidr, interfaces
@classmethod
def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
- mgmtip, interfaces = cls.build_vnf_interfaces(vnf, ports)
+ mgmt_cidr, interfaces = cls.build_vnf_interfaces(vnf, ports)
result = {
- "ip": mgmtip,
+ "ip": str(mgmt_cidr.ip),
+ "netmask": str(mgmt_cidr.netmask),
"mac": mac,
"host": ip,
"user": flavor.get('user', 'root'),
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index b9e66a481..c6e19f614 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -20,11 +20,13 @@ import re
import time
from yardstick import ssh
-from yardstick.network_services.utils import get_nsb_option
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.common import exceptions
+from yardstick.common import utils as common_utils
from yardstick.network_services import utils
+from yardstick.network_services.utils import get_nsb_option
LOG = logging.getLogger(__name__)
@@ -32,12 +34,12 @@ LOG = logging.getLogger(__name__)
MAIN_BRIDGE = 'br0'
-class OvsDpdkContext(Context):
+class OvsDpdkContext(base.Context):
""" This class handles OVS standalone nodes - VM running on Non-Managed NFVi
Configuration: ovs_dpdk
"""
- __context_type__ = "StandaloneOvsDpdk"
+ __context_type__ = contexts.CONTEXT_STANDALONEOVSDPDK
SUPPORTED_OVS_TO_DPDK_MAP = {
'2.6.0': '16.07.1',
@@ -45,7 +47,8 @@ class OvsDpdkContext(Context):
'2.7.0': '16.11.1',
'2.7.1': '16.11.2',
'2.7.2': '16.11.3',
- '2.8.0': '17.05.2'
+ '2.8.0': '17.05.2',
+ '2.8.1': '17.05.2'
}
DEFAULT_OVS = '2.6.0'
@@ -71,6 +74,11 @@ class OvsDpdkContext(Context):
self.wait_for_vswitchd = 10
super(OvsDpdkContext, self).__init__()
+ def get_dpdk_socket_mem_size(self, socket_id):
+ """Get the size of OvS DPDK socket memory (Mb)"""
+ ram = self.ovs_properties.get("ram", {})
+ return ram.get('socket_%d' % (socket_id), 2048)
+
def init(self, attrs):
"""initializes itself from the supplied arguments"""
super(OvsDpdkContext, self).init(attrs)
@@ -131,9 +139,6 @@ class OvsDpdkContext(Context):
if pmd_cpu_mask:
pmd_mask = pmd_cpu_mask
- socket0 = self.ovs_properties.get("ram", {}).get("socket_0", "2048")
- socket1 = self.ovs_properties.get("ram", {}).get("socket_1", "2048")
-
ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}"
detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}"
@@ -141,16 +146,23 @@ class OvsDpdkContext(Context):
if lcore_mask:
lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask)
+ max_idle = self.ovs_properties.get("max_idle", '')
+ if max_idle:
+ max_idle = ovs_other_config.format("", "max-idle=%s" % max_idle)
+
cmd_list = [
"mkdir -p /usr/local/var/run/openvswitch",
"mkdir -p {}".format(os.path.dirname(log_path)),
- "ovsdb-server --remote=punix:/{0}/{1} --pidfile --detach".format(vpath,
- ovs_sock_path),
+ ("ovsdb-server --remote=punix:/{0}/{1} --remote=ptcp:6640"
+ " --pidfile --detach").format(vpath, ovs_sock_path),
ovs_other_config.format("--no-wait ", "dpdk-init=true"),
- ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%s,%s'" % (socket0, socket1)),
+ ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%d,%d'" % (
+ self.get_dpdk_socket_mem_size(0),
+ self.get_dpdk_socket_mem_size(1))),
lcore_mask,
detach_cmd.format(vpath, ovs_sock_path, log_path),
ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask),
+ max_idle,
]
for cmd in cmd_list:
@@ -160,13 +172,12 @@ class OvsDpdkContext(Context):
def setup_ovs_bridge_add_flows(self):
dpdk_args = ""
- dpdk_list = []
vpath = self.ovs_properties.get("vpath", "/usr/local")
version = self.ovs_properties.get('version', {})
ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
- 'set Interface {port} type={type_}{dpdk_args}')
- ovs_add_queue = 'ovs-vsctl set Interface {port} options:n_rxq={queue}'
+ 'set Interface {port} type={type_}{dpdk_args}'
+ '{dpdk_rxq}{pmd_rx_aff}')
chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
cmd_list = [
@@ -175,27 +186,43 @@ class OvsDpdkContext(Context):
'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
format(MAIN_BRIDGE)
]
+ dpdk_rxq = ""
+ queues = self.ovs_properties.get("queues")
+ if queues:
+ dpdk_rxq = " options:n_rxq={queue}".format(queue=queues)
- ordered_network = collections.OrderedDict(self.networks)
+ # Sorting the array to make sure we execute dpdk0... in the order
+ ordered_network = collections.OrderedDict(
+ sorted(self.networks.items(), key=lambda t: t[1].get('port_num', 0)))
+ pmd_rx_aff_ports = self.ovs_properties.get("dpdk_pmd-rxq-affinity", {})
for index, vnf in enumerate(ordered_network.values()):
if ovs_ver >= [2, 7, 0]:
dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
- dpdk_list.append(ovs_add_port.format(
+ affinity = pmd_rx_aff_ports.get(vnf.get("port_num", -1), "")
+ if affinity:
+ pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
+ '"{affinity}"'.format(affinity=affinity)
+ else:
+ pmd_rx_aff = ""
+ cmd_list.append(ovs_add_port.format(
br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
- type_='dpdk', dpdk_args=dpdk_args))
- dpdk_list.append(ovs_add_queue.format(
- port='dpdk%s' % vnf.get("port_num", 0),
- queue=self.ovs_properties.get("queues", 1)))
-
- # Sorting the array to make sure we execute dpdk0... in the order
- list.sort(dpdk_list)
- cmd_list.extend(dpdk_list)
+ type_='dpdk', dpdk_args=dpdk_args, dpdk_rxq=dpdk_rxq,
+ pmd_rx_aff=pmd_rx_aff))
# Need to do two for loop to maintain the dpdk/vhost ports.
+ pmd_rx_aff_ports = self.ovs_properties.get("vhost_pmd-rxq-affinity",
+ {})
for index, _ in enumerate(ordered_network):
+ affinity = pmd_rx_aff_ports.get(index)
+ if affinity:
+ pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
+ '"{affinity}"'.format(affinity=affinity)
+ else:
+ pmd_rx_aff = ""
cmd_list.append(ovs_add_port.format(
br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
- type_='dpdkvhostuser', dpdk_args=""))
+ type_='dpdkvhostuser', dpdk_args="", dpdk_rxq=dpdk_rxq,
+ pmd_rx_aff=pmd_rx_aff))
ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
format(MAIN_BRIDGE))
@@ -235,7 +262,6 @@ class OvsDpdkContext(Context):
def check_ovs_dpdk_env(self):
self.cleanup_ovs_dpdk_env()
- self._check_hugepages()
version = self.ovs_properties.get("version", {})
ovs_ver = version.get("ovs", self.DEFAULT_OVS)
@@ -298,13 +324,28 @@ class OvsDpdkContext(Context):
for vm in self.vm_names:
model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+ def _get_physical_nodes(self):
+ return self.nfvi_host
+
+ def _get_physical_node_for_server(self, server_name):
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ # self.nfvi_host always contain only one host
+ return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
+
def _get_server(self, attr_name):
"""lookup server info by name from context
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
@@ -360,6 +401,7 @@ class OvsDpdkContext(Context):
def _enable_interfaces(self, index, vfs, xml_str):
vpath = self.ovs_properties.get("vpath", "/usr/local")
+ queue = self.ovs_properties.get("queues", 1)
vf = self.networks[vfs[0]]
port_num = vf.get('port_num', 0)
vpci = utils.PciAddress(vf['vpci'].strip())
@@ -368,23 +410,31 @@ class OvsDpdkContext(Context):
vf['vpci'] = \
"{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
return model.Libvirt.add_ovs_interface(
- vpath, port_num, vf['vpci'], vf['mac'], xml_str)
+ vpath, port_num, vf['vpci'], vf['mac'], xml_str, queue)
def setup_ovs_dpdk_context(self):
nodes = []
self.configure_nics_for_ovs_dpdk()
+ hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers)
+ common_utils.setup_hugepages(self.connection, (hp_total_mb + \
+ self.get_dpdk_socket_mem_size(0) + \
+ self.get_dpdk_socket_mem_size(1)) * 1024)
+
+ self._check_hugepages()
+
for index, (key, vnf) in enumerate(collections.OrderedDict(
self.servers).items()):
cfg = '/tmp/vm_ovs_%d.xml' % index
- vm_name = "vm_%d" % index
+ vm_name = "vm-%d" % index
+ cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
# 1. Check and delete VM if already exists
model.Libvirt.check_if_vm_exists_and_delete(vm_name,
self.connection)
xml_str, mac = model.Libvirt.build_vm_xml(
- self.connection, self.vm_flavor, vm_name, index)
+ self.connection, self.vm_flavor, vm_name, index, cdrom_img)
# 2: Cleanup already available VMs
for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
@@ -395,16 +445,25 @@ class OvsDpdkContext(Context):
model.Libvirt.write_file(cfg, xml_str)
self.connection.put(cfg, cfg)
+ node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
+ self.networks,
+ self.host_mgmt.get('ip'),
+ key, vnf, mac)
+ # Generate public/private keys if password or private key file is not provided
+ node = model.StandaloneContextHelper.check_update_key(self.connection,
+ node,
+ vm_name,
+ self.name,
+ cdrom_img,
+ mac)
+
+ # store vnf node details
+ nodes.append(node)
+
# NOTE: launch through libvirt
LOG.info("virsh create ...")
model.Libvirt.virsh_create_vm(self.connection, cfg)
self.vm_names.append(vm_name)
- # build vnf node details
- nodes.append(self.vnf_node.generate_vnf_instance(self.vm_flavor,
- self.networks,
- self.host_mgmt.get('ip'),
- key, vnf, mac))
-
return nodes
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 95472fdda..e037dd85a 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -18,20 +18,22 @@ import logging
import collections
from yardstick import ssh
-from yardstick.network_services.utils import get_nsb_option
-from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark import contexts
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
+from yardstick.common import utils
+from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.utils import PciAddress
LOG = logging.getLogger(__name__)
-class SriovContext(Context):
+class SriovContext(base.Context):
""" This class handles SRIOV standalone nodes - VM running on Non-Managed NFVi
Configuration: sr-iov
"""
- __context_type__ = "StandaloneSriov"
+ __context_type__ = contexts.CONTEXT_STANDALONESRIOV
def __init__(self):
self.file_path = None
@@ -106,13 +108,29 @@ class SriovContext(Context):
build_vfs = "echo 0 > /sys/bus/pci/devices/{0}/sriov_numvfs"
self.connection.execute(build_vfs.format(ports.get('phy_port')))
+ def _get_physical_nodes(self):
+ return self.nfvi_host
+
+ def _get_physical_node_for_server(self, server_name):
+
+ # self.nfvi_host always contain only one host.
+ node_name, ctx_name = self.split_host_name(server_name)
+ if ctx_name is None or self.name != ctx_name:
+ return None
+
+ matching_nodes = [s for s in self.servers if s == node_name]
+ if len(matching_nodes) == 0:
+ return None
+
+ return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
+
def _get_server(self, attr_name):
"""lookup server info by name from context
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
@@ -194,10 +212,10 @@ class SriovContext(Context):
slot = index + idx + 10
vf['vpci'] = \
"{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
- model.Libvirt.add_sriov_interfaces(
- vf['vpci'], vf['vf_pci']['vf_pci'], vf['mac'], str(cfg))
self.connection.execute("ifconfig %s up" % vf['interface'])
self.connection.execute(vf_spoofchk.format(vf['interface']))
+ return model.Libvirt.add_sriov_interfaces(
+ vf['vpci'], vf['vf_pci']['vf_pci'], vf['mac'], str(cfg))
def setup_sriov_context(self):
nodes = []
@@ -205,38 +223,52 @@ class SriovContext(Context):
# 1 : modprobe host_driver with num_vfs
self.configure_nics_for_sriov()
+ hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers)
+ utils.setup_hugepages(self.connection, hp_total_mb * 1024)
+
for index, (key, vnf) in enumerate(collections.OrderedDict(
self.servers).items()):
cfg = '/tmp/vm_sriov_%s.xml' % str(index)
- vm_name = "vm_%s" % str(index)
+ vm_name = "vm-%s" % str(index)
+ cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
# 1. Check and delete VM if already exists
model.Libvirt.check_if_vm_exists_and_delete(vm_name,
self.connection)
xml_str, mac = model.Libvirt.build_vm_xml(
- self.connection, self.vm_flavor, vm_name, index)
+ self.connection, self.vm_flavor, vm_name, index, cdrom_img)
# 2: Cleanup already available VMs
network_ports = collections.OrderedDict(
{k: v for k, v in vnf["network_ports"].items() if k != 'mgmt'})
for idx, vfs in enumerate(network_ports.values()):
- self._enable_interfaces(index, idx, vfs, cfg)
+ xml_str = self._enable_interfaces(index, idx, vfs, xml_str)
# copy xml to target...
model.Libvirt.write_file(cfg, xml_str)
self.connection.put(cfg, cfg)
+ node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
+ self.networks,
+ self.host_mgmt.get('ip'),
+ key, vnf, mac)
+ # Generate public/private keys if password or private key file is not provided
+ node = model.StandaloneContextHelper.check_update_key(self.connection,
+ node,
+ vm_name,
+ self.name,
+ cdrom_img,
+ mac)
+
+ # store vnf node details
+ nodes.append(node)
+
# NOTE: launch through libvirt
LOG.info("virsh create ...")
model.Libvirt.virsh_create_vm(self.connection, cfg)
self.vm_names.append(vm_name)
- # build vnf node details
- nodes.append(self.vnf_node.generate_vnf_instance(
- self.vm_flavor, self.networks, self.host_mgmt.get('ip'),
- key, vnf, mac))
-
return nodes
def _get_vf_data(self, value, vfmac, pfif):