aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark')
-rw-r--r--yardstick/benchmark/contexts/base.py29
-rw-r--r--yardstick/benchmark/contexts/heat.py127
-rw-r--r--yardstick/benchmark/contexts/node.py42
-rw-r--r--yardstick/benchmark/contexts/standalone.py41
-rw-r--r--yardstick/benchmark/core/task.py8
-rw-r--r--yardstick/benchmark/scenarios/lib/migrate.py11
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py292
7 files changed, 313 insertions, 237 deletions
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index e362c6a3d..c9b5b51c9 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -18,6 +18,15 @@ class Context(object):
"""Class that represents a context in the logical model"""
list = []
+ @staticmethod
+ def split_name(name, sep='.'):
+ try:
+ name_iter = iter(name.split(sep))
+ except AttributeError:
+ # name is not a string
+ return None, None
+ return next(name_iter), next(name_iter, None)
+
def __init__(self):
Context.list.append(self)
@@ -71,7 +80,23 @@ class Context(object):
try:
return next(s for s in servers if s)
except StopIteration:
- raise ValueError("context not found for server '%r'" %
+ raise ValueError("context not found for server %r" %
+ attr_name)
+
+ @staticmethod
+ def get_context_from_server(attr_name):
+ """lookup context info by name from node config
+ attr_name: either a name of the node created by yardstick or a dict
+ with attribute name mapping when using external templates
+
+ :returns Context instance
+ """
+ servers = ((context._get_server(attr_name), context)
+ for context in Context.list)
+ try:
+ return next(con for s, con in servers if s)
+ except StopIteration:
+ raise ValueError("context not found for name %r" %
attr_name)
@staticmethod
@@ -85,5 +110,5 @@ class Context(object):
try:
return next(n for n in networks if n)
except StopIteration:
- raise ValueError("context not found for server '%r'" %
+ raise ValueError("context not found for server %r" %
attr_name)
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index d5349eab5..c8d53e324 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -17,7 +17,6 @@ import uuid
from collections import OrderedDict
import ipaddress
-import paramiko
import pkg_resources
from yardstick.benchmark.contexts.base import Context
@@ -28,12 +27,21 @@ from yardstick.benchmark.contexts.model import update_scheduler_hints
from yardstick.common.openstack_utils import get_neutron_client
from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
from yardstick.common.constants import YARDSTICK_ROOT_PATH
+from yardstick.ssh import SSH
LOG = logging.getLogger(__name__)
DEFAULT_HEAT_TIMEOUT = 3600
+def join_args(sep, *args):
+ return sep.join(args)
+
+
+def h_join(*args):
+ return '-'.join(args)
+
+
class HeatContext(Context):
"""Class that represents a context in the logical model"""
@@ -43,12 +51,14 @@ class HeatContext(Context):
self.name = None
self.stack = None
self.networks = OrderedDict()
+ self.heat_timeout = None
self.servers = []
self.placement_groups = []
self.server_groups = []
self.keypair_name = None
self.secgroup_name = None
self._server_map = {}
+ self.attrs = {}
self._image = None
self._flavor = None
self.flavors = set()
@@ -65,7 +75,8 @@ class HeatContext(Context):
get_short_key_uuid(self.key_uuid)])
super(HeatContext, self).__init__()
- def assign_external_network(self, networks):
+ @staticmethod
+ def assign_external_network(networks):
sorted_networks = sorted(networks.items())
external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
@@ -74,8 +85,7 @@ class HeatContext(Context):
# no external net defined, assign it to first network using os.environ
sorted_networks[0][1]["external_network"] = external_network
- self.networks = OrderedDict((name, Network(name, self, attrs))
- for name, attrs in sorted_networks)
+ return sorted_networks
def init(self, attrs):
"""initializes itself from the supplied arguments"""
@@ -88,8 +98,8 @@ class HeatContext(Context):
self.heat_parameters = attrs.get("heat_parameters")
return
- self.keypair_name = self.name + "-key"
- self.secgroup_name = self.name + "-secgroup"
+ self.keypair_name = h_join(self.name, "key")
+ self.secgroup_name = h_join(self.name, "secgroup")
self._image = attrs.get("image")
@@ -97,29 +107,29 @@ class HeatContext(Context):
self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
- self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
- for name, pgattrs in attrs.get(
+ self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
+ for name, pg_attrs in attrs.get(
"placement_groups", {}).items()]
- self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
- for name, sgattrs in attrs.get(
+ self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
+ for name, sg_attrs in attrs.get(
"server_groups", {}).items()]
# we have to do this first, because we are injecting external_network
# into the dict
- self.assign_external_network(attrs["networks"])
+ sorted_networks = self.assign_external_network(attrs["networks"])
+
+ self.networks = OrderedDict(
+ (name, Network(name, self, net_attrs)) for name, net_attrs in
+ sorted_networks)
- for name, serverattrs in sorted(attrs["servers"].items()):
- server = Server(name, self, serverattrs)
+ for name, server_attrs in sorted(attrs["servers"].items()):
+ server = Server(name, self, server_attrs)
self.servers.append(server)
self._server_map[server.dn] = server
- rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
- rsa_key.write_private_key_file(self.key_filename)
- print("Writing %s ..." % self.key_filename)
- with open(self.key_filename + ".pub", "w") as pubkey_file:
- pubkey_file.write(
- "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
+ self.attrs = attrs
+ SSH.gen_keys(self.key_filename)
@property
def image(self):
@@ -188,7 +198,7 @@ class HeatContext(Context):
try:
self.flavors.add(server.flavor["name"])
except KeyError:
- self.flavors.add(server.stack_name + "-flavor")
+ self.flavors.add(h_join(server.stack_name, "flavor"))
# add servers with availability policy
added_servers = []
@@ -286,7 +296,7 @@ class HeatContext(Context):
# let the other failures happen, we want stack trace
raise
- # TODO: use Neutron to get segementation-id
+ # TODO: use Neutron to get segmentation-id
self.get_neutron_info()
# copy some vital stack output into server objects
@@ -311,24 +321,26 @@ class HeatContext(Context):
def make_interface_dict(self, network_name, stack_name, outputs):
private_ip = outputs[stack_name]
- mac_addr = outputs[stack_name + "-mac_address"]
- subnet_cidr_key = "-".join([self.name, network_name, 'subnet', 'cidr'])
- gateway_key = "-".join([self.name, network_name, 'subnet', 'gateway_ip'])
- subnet_cidr = outputs[subnet_cidr_key]
- subnet_ip = ipaddress.ip_network(subnet_cidr)
+ mac_address = outputs[h_join(stack_name, "mac_address")]
+ output_subnet_cidr = outputs[h_join(self.name, network_name,
+ 'subnet', 'cidr')]
+
+ output_subnet_gateway = outputs[h_join(self.name, network_name,
+ 'subnet', 'gateway_ip')]
+
return {
"private_ip": private_ip,
- "subnet_id": outputs[stack_name + "-subnet_id"],
- "subnet_cidr": subnet_cidr,
- "network": str(subnet_ip.network_address),
- "netmask": str(subnet_ip.netmask),
- "gateway_ip": outputs[gateway_key],
- "mac_address": mac_addr,
- "device_id": outputs[stack_name + "-device_id"],
- "network_id": outputs[stack_name + "-network_id"],
+ "subnet_id": outputs[h_join(stack_name, "subnet_id")],
+ "subnet_cidr": output_subnet_cidr,
+ "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
+ "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
+ "gateway_ip": output_subnet_gateway,
+ "mac_address": mac_address,
+ "device_id": outputs[h_join(stack_name, "device_id")],
+ "network_id": outputs[h_join(stack_name, "network_id")],
"network_name": network_name,
# to match vnf_generic
- "local_mac": mac_addr,
+ "local_mac": mac_address,
"local_ip": private_ip,
"vld_id": self.networks[network_name].vld_id,
}
@@ -357,7 +369,8 @@ class HeatContext(Context):
"network": intf["network"],
"netmask": intf["netmask"],
"if": name,
- "gateway": intf["gateway_ip"],
+ # We have to encode a None gateway as '' for Jinja2 to YAML conversion
+ "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
}
for name, intf in server.interfaces.items()
]
@@ -370,31 +383,24 @@ class HeatContext(Context):
"""
key_filename = pkg_resources.resource_filename(
'yardstick.resources',
- 'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
-
- if not isinstance(attr_name, collections.Mapping):
- server = self._server_map.get(attr_name, None)
+ h_join('files/yardstick_key', get_short_key_uuid(self.key_uuid)))
- else:
- cname = attr_name["name"].split(".")[1]
- if cname != self.name:
+ if isinstance(attr_name, collections.Mapping):
+ node_name, cname = self.split_name(attr_name['name'])
+ if cname is None or cname != self.name:
return None
- public_ip = None
- private_ip = None
- if "public_ip_attr" in attr_name:
- public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
- if "private_ip_attr" in attr_name:
- private_ip = self.stack.outputs[
- attr_name["private_ip_attr"]]
-
# Create a dummy server instance for holding the *_ip attributes
- server = Server(attr_name["name"].split(".")[0], self, {})
- server.public_ip = public_ip
- server.private_ip = private_ip
+ server = Server(node_name, self, {})
+ server.public_ip = self.stack.outputs.get(
+ attr_name.get("public_ip_attr", object()), None)
- if server is None:
- return None
+ server.private_ip = self.stack.outputs.get(
+ attr_name.get("private_ip_attr", object()), None)
+ else:
+ server = self._server_map.get(attr_name, None)
+ if server is None:
+ return None
result = {
"user": server.context.user,
@@ -417,12 +423,9 @@ class HeatContext(Context):
else:
# Don't generalize too much Just support vld_id
- vld_id = attr_name.get('vld_id')
- if vld_id is None:
- return None
-
- network = next((n for n in self.networks.values() if
- getattr(n, "vld_id", None) == vld_id), None)
+ vld_id = attr_name.get('vld_id', {})
+ network_iter = (n for n in self.networks.values() if n.vld_id == vld_id)
+ network = next(network_iter, None)
if network is None:
return None
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index b3f0aca0e..78a2d1f46 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -19,7 +19,7 @@ import pkg_resources
from yardstick import ssh
from yardstick.benchmark.contexts.base import Context
-from yardstick.common import constants as consts
+from yardstick.common.constants import ANSIBLE_DIR, YARDSTICK_ROOT_PATH
LOG = logging.getLogger(__name__)
@@ -38,6 +38,7 @@ class NodeContext(Context):
self.computes = []
self.baremetals = []
self.env = {}
+ self.attrs = {}
super(NodeContext, self).__init__()
def read_config_file(self):
@@ -45,24 +46,23 @@ class NodeContext(Context):
with open(self.file_path) as stream:
LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml.load(stream)
+ cfg = yaml.safe_load(stream)
return cfg
def init(self, attrs):
"""initializes itself from the supplied arguments"""
self.name = attrs["name"]
- self.file_path = attrs.get("file", "pod.yaml")
+ self.file_path = file_path = attrs.get("file", "pod.yaml")
try:
cfg = self.read_config_file()
- except IOError as ioerror:
- if ioerror.errno == errno.ENOENT:
- self.file_path = \
- os.path.join(consts.YARDSTICK_ROOT_PATH, self.file_path)
- cfg = self.read_config_file()
- else:
+ except IOError as io_error:
+ if io_error.errno != errno.ENOENT:
raise
+ self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
+ cfg = self.read_config_file()
+
self.nodes.extend(cfg["nodes"])
self.controllers.extend([node for node in cfg["nodes"]
if node["role"] == "Controller"])
@@ -76,6 +76,7 @@ class NodeContext(Context):
LOG.debug("BareMetals: %r", self.baremetals)
self.env = attrs.get('env', {})
+ self.attrs = attrs
LOG.debug("Env: %r", self.env)
# add optional static network definition
@@ -112,19 +113,17 @@ class NodeContext(Context):
def _do_ansible_job(self, path):
cmd = 'ansible-playbook -i inventory.ini %s' % path
- p = subprocess.Popen(cmd, shell=True, cwd=consts.ANSIBLE_DIR)
+ p = subprocess.Popen(cmd, shell=True, cwd=ANSIBLE_DIR)
p.communicate()
def _get_server(self, attr_name):
"""lookup server info by name from context
attr_name: a name for a server listed in nodes config file
"""
- if isinstance(attr_name, collections.Mapping):
+ node_name, name = self.split_name(attr_name)
+ if name is None or self.name != name:
return None
- if self.name != attr_name.split(".")[1]:
- return None
- node_name = attr_name.split(".")[0]
matching_nodes = (n for n in self.nodes if n["name"] == node_name)
try:
@@ -140,9 +139,10 @@ class NodeContext(Context):
pass
else:
raise ValueError("Duplicate nodes!!! Nodes: %s %s",
- (matching_nodes, duplicate))
+ (node, duplicate))
node["name"] = attr_name
+ node.setdefault("interfaces", {})
return node
def _get_network(self, attr_name):
@@ -151,12 +151,10 @@ class NodeContext(Context):
else:
# Don't generalize too much Just support vld_id
- vld_id = attr_name.get('vld_id')
- if vld_id is None:
- return None
-
- network = next((n for n in self.networks.values() if
- n.get("vld_id") == vld_id), None)
+ vld_id = attr_name.get('vld_id', {})
+ # for node context networks are dicts
+ iter1 = (n for n in self.networks.values() if n.get('vld_id') == vld_id)
+ network = next(iter1, None)
if network is None:
return None
@@ -193,7 +191,7 @@ class NodeContext(Context):
def _execute_local_script(self, info):
script, options = self._get_script(info)
- script = os.path.join(consts.YARDSTICK_ROOT_PATH, script)
+ script = os.path.join(YARDSTICK_ROOT_PATH, script)
cmd = ['bash', script, options]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
diff --git a/yardstick/benchmark/contexts/standalone.py b/yardstick/benchmark/contexts/standalone.py
index 2bc1f3755..ae1046974 100644
--- a/yardstick/benchmark/contexts/standalone.py
+++ b/yardstick/benchmark/contexts/standalone.py
@@ -15,6 +15,7 @@
from __future__ import absolute_import
import logging
+import os
import errno
import collections
import yaml
@@ -41,14 +42,15 @@ class StandaloneContext(Context):
self.networks = {}
self.nfvi_node = []
self.nfvi_obj = None
- super(self.__class__, self).__init__()
+ self.attrs = {}
+ super(StandaloneContext, self).__init__()
def read_config_file(self):
"""Read from config file"""
with open(self.file_path) as stream:
LOG.info("Parsing pod file: %s", self.file_path)
- cfg = yaml.load(stream)
+ cfg = yaml.safe_load(stream)
return cfg
def get_nfvi_obj(self):
@@ -63,17 +65,15 @@ class StandaloneContext(Context):
"""initializes itself from the supplied arguments"""
self.name = attrs["name"]
- self.file_path = attrs.get("file", "pod.yaml")
- LOG.info("Parsing pod file: %s", self.file_path)
+ self.file_path = file_path = attrs.get("file", "pod.yaml")
try:
cfg = self.read_config_file()
- except IOError as ioerror:
- if ioerror.errno == errno.ENOENT:
- self.file_path = YARDSTICK_ROOT_PATH + self.file_path
- cfg = self.read_config_file()
- else:
+ except IOError as io_error:
+ if io_error.errno != errno.ENOENT:
raise
+ self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
+ cfg = self.read_config_file()
self.vm_deploy = attrs.get("vm_deploy", True)
self.nodes.extend([node for node in cfg["nodes"]
@@ -90,6 +90,7 @@ class StandaloneContext(Context):
else:
LOG.debug("Node role is other than SRIOV and OVS")
self.nfvi_obj = self.get_nfvi_obj()
+ self.attrs = attrs
# add optional static network definition
self.networks.update(cfg.get("networks", {}))
self.nfvi_obj = self.get_nfvi_obj()
@@ -146,11 +147,10 @@ class StandaloneContext(Context):
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- if isinstance(attr_name, collections.Mapping):
- return None
- if self.name != attr_name.split(".")[1]:
+ node_name, name = self.split_name(attr_name)
+ if name is None or self.name != name:
return None
- node_name = attr_name.split(".")[0]
+
matching_nodes = (n for n in self.nodes if n["name"] == node_name)
try:
# A clone is created in order to avoid affecting the
@@ -165,7 +165,8 @@ class StandaloneContext(Context):
pass
else:
raise ValueError("Duplicate nodes!!! Nodes: %s %s",
- (matching_nodes, duplicate))
+ (node, duplicate))
+
node["name"] = attr_name
return node
@@ -175,14 +176,10 @@ class StandaloneContext(Context):
else:
# Don't generalize too much Just support vld_id
- vld_id = attr_name.get('vld_id')
- if vld_id is None:
- return None
- try:
- network = next(n for n in self.networks.values() if
- n.get("vld_id") == vld_id)
- except StopIteration:
- return None
+ vld_id = attr_name.get('vld_id', {})
+ # for standalone context networks are dicts
+ iter1 = (n for n in self.networks.values() if n.get('vld_id') == vld_id)
+ network = next(iter1, None)
if network is None:
return None
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index af508496f..2b10c61b3 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -13,6 +13,8 @@ from __future__ import absolute_import
from __future__ import print_function
import sys
import os
+from collections import OrderedDict
+
import yaml
import atexit
import ipaddress
@@ -121,6 +123,7 @@ class Task(object): # pragma: no cover
except KeyboardInterrupt:
raise
except Exception:
+ LOG.exception('')
testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
else:
testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
@@ -591,8 +594,9 @@ def _is_background_scenario(scenario):
def parse_nodes_with_context(scenario_cfg):
"""parse the 'nodes' fields in scenario """
- nodes = scenario_cfg["nodes"]
- return {nodename: Context.get_server(node) for nodename, node in nodes.items()}
+ # ensure consistency in node instantiation order
+ return OrderedDict((nodename, Context.get_server(scenario_cfg["nodes"][nodename]))
+ for nodename in sorted(scenario_cfg["nodes"]))
def get_networks_from_nodes(nodes):
diff --git a/yardstick/benchmark/scenarios/lib/migrate.py b/yardstick/benchmark/scenarios/lib/migrate.py
index 116bae69e..dd244c7ce 100644
--- a/yardstick/benchmark/scenarios/lib/migrate.py
+++ b/yardstick/benchmark/scenarios/lib/migrate.py
@@ -16,7 +16,7 @@ import threading
import time
from datetime import datetime
-import ping
+
from yardstick.common import openstack_utils
from yardstick.common.utils import change_obj_to_dict
@@ -28,6 +28,15 @@ TIMEOUT = 0.05
PACKAGE_SIZE = 64
+try:
+ import ping
+except ImportError:
+ # temp fix for ping module import error on Python3
+ # we need to replace the ping module anyway
+ import mock
+ ping = mock.MagicMock()
+
+
class Migrate(base.Scenario): # pragma: no cover
"""
Execute a live migration for two hosts
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 9607e3005..af17a3150 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -20,11 +20,11 @@ import errno
import os
import re
+from itertools import chain
+import yaml
from operator import itemgetter
from collections import defaultdict
-import yaml
-
from yardstick.benchmark.scenarios import base
from yardstick.common.utils import import_modules_from_package, itersubclasses
from yardstick.network_services.collector.subscriber import Collector
@@ -80,6 +80,22 @@ class SshManager(object):
self.conn.close()
+def find_relative_file(path, task_path):
+ # fixme: create schema to validate all fields have been provided
+ try:
+ with open(path):
+ pass
+ return path
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ rel_path = os.path.join(task_path, path)
+ with open(rel_path):
+ pass
+ return rel_path
+
+
def open_relative_file(path, task_path):
try:
return open(path)
@@ -103,166 +119,176 @@ class NetworkServiceTestCase(base.Scenario):
# fixme: create schema to validate all fields have been provided
with open_relative_file(scenario_cfg["topology"],
scenario_cfg['task_path']) as stream:
- topology_yaml = yaml.load(stream)
+ topology_yaml = yaml.safe_load(stream)
self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
self.vnfs = []
self.collector = None
self.traffic_profile = None
- @classmethod
- def _get_traffic_flow(cls, scenario_cfg):
+ def _get_traffic_flow(self):
try:
- with open(scenario_cfg["traffic_options"]["flow"]) as fflow:
- flow = yaml.load(fflow)
+ with open(self.scenario_cfg["traffic_options"]["flow"]) as fflow:
+ flow = yaml.safe_load(fflow)
except (KeyError, IOError, OSError):
flow = {}
return flow
- @classmethod
- def _get_traffic_imix(cls, scenario_cfg):
+ def _get_traffic_imix(self):
try:
- with open(scenario_cfg["traffic_options"]["imix"]) as fimix:
- imix = yaml.load(fimix)
+ with open(self.scenario_cfg["traffic_options"]["imix"]) as fimix:
+ imix = yaml.safe_load(fimix)
except (KeyError, IOError, OSError):
imix = {}
return imix
- @classmethod
- def _get_traffic_profile(cls, scenario_cfg, context_cfg):
- traffic_profile_tpl = ""
- private = {}
- public = {}
- try:
- with open_relative_file(scenario_cfg["traffic_profile"],
- scenario_cfg["task_path"]) as infile:
- traffic_profile_tpl = infile.read()
-
- except (KeyError, IOError, OSError):
- raise
-
- return [traffic_profile_tpl, private, public]
-
- def _fill_traffic_profile(self, scenario_cfg, context_cfg):
- flow = self._get_traffic_flow(scenario_cfg)
-
- imix = self._get_traffic_imix(scenario_cfg)
-
- traffic_mapping, private, public = \
- self._get_traffic_profile(scenario_cfg, context_cfg)
-
- traffic_profile = vnfdgen.generate_vnfd(traffic_mapping,
- {"imix": imix, "flow": flow,
- "private": private,
- "public": public})
-
- return TrafficProfile.get(traffic_profile)
-
- @classmethod
- def _find_vnf_name_from_id(cls, topology, vnf_id):
+ def _get_traffic_profile(self):
+ profile = self.scenario_cfg["traffic_profile"]
+ path = self.scenario_cfg["task_path"]
+ with open_relative_file(profile, path) as infile:
+ return infile.read()
+
+ def _fill_traffic_profile(self):
+ traffic_mapping = self._get_traffic_profile()
+ traffic_map_data = {
+ 'flow': self._get_traffic_flow(),
+ 'imix': self._get_traffic_imix(),
+ 'private': {},
+ 'public': {},
+ }
+
+ traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data)
+ self.traffic_profile = TrafficProfile.get(traffic_vnfd)
+ return self.traffic_profile
+
+ def _find_vnf_name_from_id(self, vnf_id):
return next((vnfd["vnfd-id-ref"]
- for vnfd in topology["constituent-vnfd"]
+ for vnfd in self.topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
@staticmethod
def get_vld_networks(networks):
return {n['vld_id']: n for n in networks.values()}
- def _resolve_topology(self, context_cfg, topology):
- for vld in topology["vld"]:
+ def _resolve_topology(self):
+ for vld in self.topology["vld"]:
try:
- node_0, node_1 = vld["vnfd-connection-point-ref"]
- except (TypeError, ValueError):
+ node0_data, node1_data = vld["vnfd-connection-point-ref"]
+ except (ValueError, TypeError):
raise IncorrectConfig("Topology file corrupted, "
- "wrong number of endpoints for connection")
+ "wrong endpoint count for connection")
- node_0_name = self._find_vnf_name_from_id(topology,
- node_0["member-vnf-index-ref"])
- node_1_name = self._find_vnf_name_from_id(topology,
- node_1["member-vnf-index-ref"])
+ node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
+ node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
- node_0_ifname = node_0["vnfd-connection-point-ref"]
- node_1_ifname = node_1["vnfd-connection-point-ref"]
+ node0_if_name = node0_data["vnfd-connection-point-ref"]
+ node1_if_name = node1_data["vnfd-connection-point-ref"]
- node_0_if = context_cfg["nodes"][node_0_name]["interfaces"][node_0_ifname]
- node_1_if = context_cfg["nodes"][node_1_name]["interfaces"][node_1_ifname]
try:
- vld_networks = self.get_vld_networks(context_cfg["networks"])
+ nodes = self.context_cfg["nodes"]
+ node0_if = nodes[node0_name]["interfaces"][node0_if_name]
+ node1_if = nodes[node1_name]["interfaces"][node1_if_name]
- node_0_if["vld_id"] = vld["id"]
- node_1_if["vld_id"] = vld["id"]
+ # names so we can do reverse lookups
+ node0_if["ifname"] = node0_if_name
+ node1_if["ifname"] = node1_if_name
+
+ node0_if["node_name"] = node0_name
+ node1_if["node_name"] = node1_name
+
+ vld_networks = self.get_vld_networks(self.context_cfg["networks"])
+ node0_if["vld_id"] = vld["id"]
+ node1_if["vld_id"] = vld["id"]
# set peer name
- node_0_if["peer_name"] = node_1_name
- node_1_if["peer_name"] = node_0_name
+ node0_if["peer_name"] = node1_name
+ node1_if["peer_name"] = node0_name
# set peer interface name
- node_0_if["peer_ifname"] = node_1_ifname
- node_1_if["peer_ifname"] = node_0_ifname
+ node0_if["peer_ifname"] = node1_if_name
+ node1_if["peer_ifname"] = node0_if_name
- # just load the whole network dict
- node_0_if["network"] = vld_networks.get(vld["id"], {})
- node_1_if["network"] = vld_networks.get(vld["id"], {})
+ # just load the network
+ node0_if["network"] = vld_networks.get(vld["id"], {})
+ node1_if["network"] = vld_networks.get(vld["id"], {})
- node_0_if["dst_mac"] = node_1_if["local_mac"]
- node_0_if["dst_ip"] = node_1_if["local_ip"]
+ node0_if["dst_mac"] = node1_if["local_mac"]
+ node0_if["dst_ip"] = node1_if["local_ip"]
- node_1_if["dst_mac"] = node_0_if["local_mac"]
- node_1_if["dst_ip"] = node_0_if["local_ip"]
+ node1_if["dst_mac"] = node0_if["local_mac"]
+ node1_if["dst_ip"] = node0_if["local_ip"]
- # add peer interface dict, but remove circular link
- # TODO: don't waste memory
- node_0_copy = node_0_if.copy()
- node_1_copy = node_1_if.copy()
- node_0_if["peer_intf"] = node_1_copy
- node_1_if["peer_intf"] = node_0_copy
except KeyError:
+ LOG.exception("")
raise IncorrectConfig("Required interface not found, "
"topology file corrupted")
- @classmethod
- def _find_list_index_from_vnf_idx(cls, topology, vnf_idx):
- return next((topology["constituent-vnfd"].index(vnfd)
- for vnfd in topology["constituent-vnfd"]
+ for vld in self.topology['vld']:
+ try:
+ node0_data, node1_data = vld["vnfd-connection-point-ref"]
+ except (ValueError, TypeError):
+ raise IncorrectConfig("Topology file corrupted, "
+ "wrong endpoint count for connection")
+
+ node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
+ node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
+
+ node0_if_name = node0_data["vnfd-connection-point-ref"]
+ node1_if_name = node1_data["vnfd-connection-point-ref"]
+
+ nodes = self.context_cfg["nodes"]
+ node0_if = nodes[node0_name]["interfaces"][node0_if_name]
+ node1_if = nodes[node1_name]["interfaces"][node1_if_name]
+
+ # add peer interface dict, but remove circular link
+ # TODO: don't waste memory
+ node0_copy = node0_if.copy()
+ node1_copy = node1_if.copy()
+ node0_if["peer_intf"] = node1_copy
+ node1_if["peer_intf"] = node0_copy
+
+ def _find_vnfd_from_vnf_idx(self, vnf_idx):
+ return next((vnfd for vnfd in self.topology["constituent-vnfd"]
if vnf_idx == vnfd["member-vnf-index"]), None)
- def _update_context_with_topology(self, context_cfg, topology):
- for idx in topology["constituent-vnfd"]:
- vnf_idx = idx["member-vnf-index"]
- nodes = context_cfg["nodes"]
- node = self._find_vnf_name_from_id(topology, vnf_idx)
- list_idx = self._find_list_index_from_vnf_idx(topology, vnf_idx)
- nodes[node].update(topology["constituent-vnfd"][list_idx])
+ def _update_context_with_topology(self):
+ for vnfd in self.topology["constituent-vnfd"]:
+ vnf_idx = vnfd["member-vnf-index"]
+ vnf_name = self._find_vnf_name_from_id(vnf_idx)
+ vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
+ self.context_cfg["nodes"][vnf_name].update(vnfd)
@staticmethod
def _sort_dpdk_port_num(netdevs):
# dpdk_port_num is PCI BUS ID ordering, lowest first
s = sorted(netdevs.values(), key=itemgetter('pci_bus_id'))
- for dpdk_port_num, netdev in enumerate(s, 1):
+ for dpdk_port_num, netdev in enumerate(s):
netdev['dpdk_port_num'] = dpdk_port_num
@classmethod
def _probe_missing_values(cls, netdevs, network, missing):
- mac = network['local_mac']
+ mac_lower = network['local_mac'].lower()
for netdev in netdevs.values():
- if netdev['address'].lower() == mac.lower():
- network['driver'] = netdev['driver']
- network['vpci'] = netdev['pci_bus_id']
- network['dpdk_port_num'] = netdev['dpdk_port_num']
- network['ifindex'] = netdev['ifindex']
+ if netdev['address'].lower() != mac_lower:
+ continue
+ network.update({
+ 'driver': netdev['driver'],
+ 'vpci': netdev['pci_bus_id'],
+ 'ifindex': netdev['ifindex'],
+ })
TOPOLOGY_REQUIRED_KEYS = frozenset({
- "vpci", "local_ip", "netmask", "local_mac", "driver", "dpdk_port_num"})
+ "vpci", "local_ip", "netmask", "local_mac", "driver"})
- def map_topology_to_infrastructure(self, context_cfg, topology):
+ def map_topology_to_infrastructure(self):
""" This method should verify if the available resources defined in pod.yaml
match the topology.yaml file.
+ :param context_cfg:
:param topology:
:return: None. Side effect: context_cfg is updated
"""
-
- for node, node_dict in context_cfg["nodes"].items():
+ for node, node_dict in self.context_cfg["nodes"].items():
cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
with SshManager(node_dict) as conn:
@@ -276,28 +302,28 @@ class NetworkServiceTestCase(base.Scenario):
"Cannot find netdev info in sysfs" % node)
netdevs = node_dict['netdevs'] = self.parse_netdev_info(
stdout)
- self._sort_dpdk_port_num(netdevs)
for network in node_dict["interfaces"].values():
missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
+ if not missing:
+ continue
+
+ try:
+ self._probe_missing_values(netdevs, network,
+ missing)
+ except KeyError:
+ pass
+ else:
+ missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
+ network)
if missing:
- try:
- self._probe_missing_values(netdevs, network,
- missing)
- except KeyError:
- pass
- else:
- missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
- network)
- if missing:
- raise IncorrectConfig(
- "Require interface fields '%s' "
- "not found, topology file "
- "corrupted" % ', '.join(missing))
+ raise IncorrectConfig(
+ "Require interface fields '%s' not found, topology file "
+ "corrupted" % ', '.join(missing))
# 3. Use topology file to find connections & resolve dest address
- self._resolve_topology(context_cfg, topology)
- self._update_context_with_topology(context_cfg, topology)
+ self._resolve_topology()
+ self._update_context_with_topology()
FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
@@ -361,7 +387,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
node_intf = node['interfaces'][intf['name']]
intf['virtual-interface'].update(node_intf)
- def load_vnf_models(self, scenario_cfg, context_cfg):
+ def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
""" Create VNF objects based on YAML descriptors
:param scenario_cfg:
@@ -369,21 +395,29 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
:param context_cfg:
:return:
"""
+ if scenario_cfg is None:
+ scenario_cfg = self.scenario_cfg
+
+ if context_cfg is None:
+ context_cfg = self.context_cfg
+
vnfs = []
+ # we assume OrderedDict for consistenct in instantiation
for node_name, node in context_cfg["nodes"].items():
LOG.debug(node)
- with open_relative_file(node["VNF model"],
- scenario_cfg['task_path']) as stream:
+ file_name = node["VNF model"]
+ file_path = scenario_cfg['task_path']
+ with open_relative_file(file_name, file_path) as stream:
vnf_model = stream.read()
vnfd = vnfdgen.generate_vnfd(vnf_model, node)
# TODO: here add extra context_cfg["nodes"] regardless of template
vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
self.update_interfaces_from_node(vnfd, node)
vnf_impl = self.get_vnf_impl(vnfd['id'])
- vnf_instance = vnf_impl(vnfd)
- vnf_instance.name = node_name
+ vnf_instance = vnf_impl(node_name, vnfd)
vnfs.append(vnf_instance)
+ self.vnfs = vnfs
return vnfs
def setup(self):
@@ -392,18 +426,25 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
:return:
"""
# 1. Verify if infrastructure mapping can meet topology
- self.map_topology_to_infrastructure(self.context_cfg, self.topology)
+ self.map_topology_to_infrastructure()
# 1a. Load VNF models
- self.vnfs = self.load_vnf_models(self.scenario_cfg, self.context_cfg)
+ self.load_vnf_models()
# 1b. Fill traffic profile with information from topology
- self.traffic_profile = self._fill_traffic_profile(self.scenario_cfg,
- self.context_cfg)
+ self._fill_traffic_profile()
# 2. Provision VNFs
+
+ # link events will cause VNF application to exit
+ # so we should start traffic runners before VNFs
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+ non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
try:
- for vnf in self.vnfs:
+ for vnf in chain(traffic_runners, non_traffic_runners):
LOG.info("Instantiating %s", vnf.name)
vnf.instantiate(self.scenario_cfg, self.context_cfg)
+ for vnf in chain(traffic_runners, non_traffic_runners):
+ LOG.info("Waiting for %s to instantiate", vnf.name)
+ vnf.wait_for_instantiate()
except RuntimeError:
for vnf in self.vnfs:
vnf.terminate()
@@ -411,7 +452,6 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
# 3. Run experiment
# Start listeners first to avoid losing packets
- traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
for traffic_gen in traffic_runners:
traffic_gen.listen_traffic(self.traffic_profile)