summaryrefslogtreecommitdiffstats
path: root/yardstick/network_services
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/network_services')
-rw-r--r--yardstick/network_services/collector/subscriber.py31
-rw-r--r--yardstick/network_services/nfvi/resource.py73
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py16
-rw-r--r--yardstick/network_services/vnf_generic/vnf/router_vnf.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py58
-rw-r--r--yardstick/network_services/vnf_generic/vnf/udp_replay.py11
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py5
7 files changed, 157 insertions, 41 deletions
diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py
index 322b3f5a2..937c266a6 100644
--- a/yardstick/network_services/collector/subscriber.py
+++ b/yardstick/network_services/collector/subscriber.py
@@ -14,17 +14,36 @@
"""This module implements stub for publishing results in yardstick format."""
import logging
+from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.utils import get_nsb_option
+
+
LOG = logging.getLogger(__name__)
class Collector(object):
"""Class that handles dictionary of results in yardstick-plot format."""
- def __init__(self, vnfs):
+ def __init__(self, vnfs, contexts_nodes, timeout=3600):
super(Collector, self).__init__()
self.vnfs = vnfs
+ self.nodes = contexts_nodes
+ self.bin_path = get_nsb_option('bin_path', '')
+ self.resource_profiles = {}
+
+ for ctx_name, nodes in contexts_nodes.items():
+ for node in (node for node in nodes if node.get('collectd')):
+ name = ".".join([node['name'], ctx_name])
+ self.resource_profiles.update(
+ {name: ResourceProfile.make_from_node(node, timeout)}
+ )
def start(self):
+ for resource in self.resource_profiles.values():
+ resource.initiate_systemagent(self.bin_path)
+ resource.start()
+ resource.amqp_process_for_nfvi_kpi()
+
for vnf in self.vnfs:
vnf.start_collect()
@@ -32,6 +51,9 @@ class Collector(object):
for vnf in self.vnfs:
vnf.stop_collect()
+ for resource in self.resource_profiles.values():
+ resource.stop()
+
def get_kpi(self):
"""Returns dictionary of results in yardstick-plot format
@@ -42,7 +64,12 @@ class Collector(object):
for vnf in self.vnfs:
# Result example:
# {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
- LOG.debug("collect KPI for %s", vnf.name)
+ LOG.debug("collect KPI for vnf %s", vnf.name)
results[vnf.name] = vnf.collect_kpi()
+ for node_name, resource in self.resource_profiles.items():
+ LOG.debug("collect KPI for nfvi_node %s", node_name)
+ results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()}
+ LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core'])
+
return results
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index 0c0bf223a..5922bd3b9 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -31,6 +31,7 @@ from yardstick.common.exceptions import ResourceCommandError
from yardstick.common.task_template import finalize_for_yaml
from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
+from yardstick.benchmark.contexts import heat
LOG = logging.getLogger(__name__)
@@ -52,7 +53,8 @@ class ResourceProfile(object):
DEFAULT_TIMEOUT = 3600
OVS_SOCKET_PATH = "/usr/local/var/run/openvswitch/db.sock"
- def __init__(self, mgmt, port_names=None, plugins=None, interval=None, timeout=None):
+ def __init__(self, mgmt, port_names=None, plugins=None,
+ interval=None, timeout=None, reset_mq_flag=True):
if plugins is None:
self.plugins = {}
@@ -77,6 +79,7 @@ class ResourceProfile(object):
# we need to save mgmt so we can connect to port 5672
self.mgmt = mgmt
self.connection = ssh.AutoConnectSSH.from_node(mgmt)
+ self._reset_mq_flag = reset_mq_flag
@classmethod
def make_from_node(cls, node, timeout):
@@ -87,7 +90,10 @@ class ResourceProfile(object):
plugins = collectd_options.get("plugins", {})
interval = collectd_options.get("interval")
- return cls(node, plugins=plugins, interval=interval, timeout=timeout)
+ reset_mq_flag = (False if node.get("ctx_type") == heat.HeatContext.__context_type__
+ else True)
+ return cls(node, plugins=plugins, interval=interval,
+ timeout=timeout, reset_mq_flag=reset_mq_flag)
def check_if_system_agent_running(self, process):
""" verify if system agent is running """
@@ -210,11 +216,14 @@ class ResourceProfile(object):
if not self.enable:
return {}
+ if self.check_if_system_agent_running("collectd")[0] != 0:
+ return {}
+
metric = {}
while not self._queue.empty():
metric.update(self._queue.get())
- msg = self.parse_collectd_result(metric)
- return msg
+
+ return self.parse_collectd_result(metric)
def _provide_config_file(self, config_file_path, nfvi_cfg, template_kwargs):
template = pkg_resources.resource_string("yardstick.network_services.nfvi",
@@ -250,7 +259,7 @@ class ResourceProfile(object):
if status != 0:
LOG.error("cannot find OVS socket %s", socket_path)
- def _start_rabbitmq(self, connection):
+ def _reset_rabbitmq(self, connection):
# Reset amqp queue
LOG.debug("reset and setup amqp to collect data from collectd")
# ensure collectd.conf.d exists to avoid error/warning
@@ -263,10 +272,37 @@ class ResourceProfile(object):
"sudo rabbitmqctl authenticate_user admin admin",
"sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
]
+
for cmd in cmd_list:
- exit_status, stdout, stderr = connection.execute(cmd)
- if exit_status != 0:
- raise ResourceCommandError(command=cmd, stderr=stderr)
+ exit_status, _, stderr = connection.execute(cmd)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
+ def _check_rabbitmq_user(self, connection, user='admin'):
+ exit_status, stdout, _ = connection.execute("sudo rabbitmqctl list_users")
+ if exit_status == 0:
+ for line in stdout.split('\n')[1:]:
+ if line.split('\t')[0] == user:
+ return True
+
+ def _set_rabbitmq_admin_user(self, connection):
+ LOG.debug("add admin user to amqp")
+ cmd_list = ["sudo rabbitmqctl add_user admin admin",
+ "sudo rabbitmqctl authenticate_user admin admin",
+ "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
+ ]
+
+ for cmd in cmd_list:
+ exit_status, stdout, stderr = connection.execute(cmd)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stdout=stdout, stderr=stderr)
+
+ def _start_rabbitmq(self, connection):
+ if self._reset_mq_flag:
+ self._reset_rabbitmq(connection)
+ else:
+ if not self._check_rabbitmq_user(connection):
+ self._set_rabbitmq_admin_user(connection)
# check stdout for "sudo rabbitmqctl status" command
cmd = "sudo rabbitmqctl status"
@@ -282,10 +318,11 @@ class ResourceProfile(object):
self._prepare_collectd_conf(config_file_path)
connection.execute('sudo pkill -x -9 collectd')
- exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
+ cmd = "which %s > /dev/null 2>&1" % collectd_path
+ exit_status, _, stderr = connection.execute(cmd)
if exit_status != 0:
- LOG.warning("%s is not present disabling", collectd_path)
- return
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
if "ovs_stats" in self.plugins:
self._setup_ovs_stats(connection)
@@ -293,8 +330,12 @@ class ResourceProfile(object):
LOG.debug("Start collectd service..... %s second timeout", self.timeout)
# intel_pmu plug requires large numbers of files open, so try to set
# ulimit -n to a large value
- connection.execute("sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path,
- timeout=self.timeout)
+
+ cmd = "sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path
+ exit_status, _, stderr = connection.execute(cmd, timeout=self.timeout)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
LOG.debug("Done")
def initiate_systemagent(self, bin_path):
@@ -334,5 +375,7 @@ class ResourceProfile(object):
if pid:
self.connection.execute('sudo kill -9 "%s"' % pid)
self.connection.execute('sudo pkill -9 "%s"' % agent)
- self.connection.execute('sudo service rabbitmq-server stop')
- self.connection.execute("sudo rabbitmqctl stop_app")
+
+ if self._reset_mq_flag:
+ self.connection.execute('sudo service rabbitmq-server stop')
+ self.connection.execute("sudo rabbitmqctl stop_app")
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index cb97f7711..9d90ddb47 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -23,6 +23,7 @@ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfS
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
from yardstick.network_services import constants
+from yardstick.benchmark.contexts import base as context_base
LOG = logging.getLogger(__name__)
@@ -68,15 +69,20 @@ class ProxApproxVnf(SampleVNF):
def collect_kpi(self):
# we can't get KPIs if the VNF is down
- check_if_process_failed(self._vnf_process)
+ check_if_process_failed(self._vnf_process, 0.01)
+
+ physical_node = context_base.Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
if self.resource_helper is None:
- result = {
+ result.update({
"packets_in": 0,
"packets_dropped": 0,
"packets_fwd": 0,
"collect_stats": {"core": {}},
- }
+ })
return result
# use all_ports so we only use ports matched in topology
@@ -96,14 +102,14 @@ class ProxApproxVnf(SampleVNF):
LOG.error("Invalid data ...")
return {}
- result = {
+ result.update({
"packets_in": rx_total,
"packets_dropped": max((tx_total - rx_total), 0),
"packets_fwd": tx_total,
# we share ProxResourceHelper with TG, but we want to collect
# collectd KPIs here and not TG KPIs, so use a different method name
"collect_stats": self.resource_helper.collect_collectd_kpi(),
- }
+ })
try:
curr_packets_in = int((rx_total - self.prev_packets_in)
/ (curr_time - self.prev_time))
diff --git a/yardstick/network_services/vnf_generic/vnf/router_vnf.py b/yardstick/network_services/vnf_generic/vnf/router_vnf.py
index aea27ffa6..90b7b215e 100644
--- a/yardstick/network_services/vnf_generic/vnf/router_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/router_vnf.py
@@ -47,7 +47,6 @@ class RouterVNF(SampleVNF):
def instantiate(self, scenario_cfg, context_cfg):
self.scenario_helper.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name])
self.configure_routes(self.name, scenario_cfg, context_cfg)
def wait_for_instantiate(self):
@@ -107,8 +106,11 @@ class RouterVNF(SampleVNF):
stdout = self.ssh_helper.execute(ip_link_stats)[1]
link_stats = self.get_stats(stdout)
# get RX/TX from link_stats and assign to results
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
result = {
+ "physical_node": physical_node,
"packets_in": 0,
"packets_dropped": 0,
"packets_fwd": 0,
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 9ff2e7d1a..3fe3f8b75 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -39,7 +39,7 @@ from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
-
+from yardstick.benchmark.contexts.node import NodeContext
LOG = logging.getLogger(__name__)
@@ -319,6 +319,7 @@ class ResourceHelper(object):
self.resource = None
self.setup_helper = setup_helper
self.ssh_helper = setup_helper.ssh_helper
+ self._enable = True
def setup(self):
self.resource = self.setup_helper.setup_vnf_environment()
@@ -326,22 +327,33 @@ class ResourceHelper(object):
def generate_cfg(self):
pass
+ def update_from_context(self, context, attr_name):
+ """Disable resource helper in case of baremetal context.
+
+ And update appropriate node collectd options in context
+ """
+ if isinstance(context, NodeContext):
+ self._enable = False
+ context.update_collectd_options_for_node(self.setup_helper.collectd_options,
+ attr_name)
+
def _collect_resource_kpi(self):
result = {}
status = self.resource.check_if_system_agent_running("collectd")[0]
- if status == 0:
+ if status == 0 and self._enable:
result = self.resource.amqp_collect_nfvi_kpi()
result = {"core": result}
return result
def start_collect(self):
- self.resource.initiate_systemagent(self.ssh_helper.bin_path)
- self.resource.start()
- self.resource.amqp_process_for_nfvi_kpi()
+ if self._enable:
+ self.resource.initiate_systemagent(self.ssh_helper.bin_path)
+ self.resource.start()
+ self.resource.amqp_process_for_nfvi_kpi()
def stop_collect(self):
- if self.resource:
+ if self.resource and self._enable:
self.resource.stop()
def collect_kpi(self):
@@ -631,7 +643,6 @@ class SampleVNF(GenericVNF):
self.resource_helper = resource_helper_type(self.setup_helper)
self.context_cfg = None
- self.nfvi_context = None
self.pipeline_kwargs = {}
self.uplink_ports = None
self.downlink_ports = None
@@ -658,8 +669,10 @@ class SampleVNF(GenericVNF):
self._update_collectd_options(scenario_cfg, context_cfg)
self.scenario_helper.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name])
- # self.nfvi_context = None
+ self.resource_helper.update_from_context(
+ Context.get_context_from_server(self.scenario_helper.nodes[self.name]),
+ self.scenario_helper.nodes[self.name]
+ )
# vnf deploy is unsupported, use ansible playbooks
if self.scenario_helper.options.get("vnf_deploy", False):
@@ -813,15 +826,18 @@ class SampleVNF(GenericVNF):
check_if_process_failed(self._vnf_process)
stats = self.get_stats()
m = re.search(self.COLLECT_KPI, stats, re.MULTILINE)
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
if m:
- result = {k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()}
+ result.update({k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()})
result["collect_stats"] = self.resource_helper.collect_kpi()
else:
- result = {
- "packets_in": 0,
- "packets_fwd": 0,
- "packets_dropped": 0,
- }
+ result.update({"packets_in": 0,
+ "packets_fwd": 0,
+ "packets_dropped": 0})
+
LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
return result
@@ -867,6 +883,11 @@ class SampleVNFTrafficGen(GenericTrafficGen):
def instantiate(self, scenario_cfg, context_cfg):
self.scenario_helper.scenario_cfg = scenario_cfg
+ self.resource_helper.update_from_context(
+ Context.get_context_from_server(self.scenario_helper.nodes[self.name]),
+ self.scenario_helper.nodes[self.name]
+ )
+
self.resource_helper.setup()
# must generate_cfg after DPDK bind because we need port number
self.resource_helper.generate_cfg()
@@ -921,9 +942,14 @@ class SampleVNFTrafficGen(GenericTrafficGen):
def collect_kpi(self):
# check if the tg processes have exited
+ physical_node = Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
+ result = {"physical_node": physical_node}
for proc in (self._tg_process, self._traffic_process):
check_if_process_failed(proc)
- result = self.resource_helper.collect_kpi()
+
+ result["collect_stats"] = self.resource_helper.collect_kpi()
LOG.debug("%s collect KPIs %s", self.APP_NAME, result)
return result
diff --git a/yardstick/network_services/vnf_generic/vnf/udp_replay.py b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
index a57f53bc7..fa92744d8 100644
--- a/yardstick/network_services/vnf_generic/vnf/udp_replay.py
+++ b/yardstick/network_services/vnf_generic/vnf/udp_replay.py
@@ -19,7 +19,7 @@ from yardstick.common.process import check_if_process_failed
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
-
+from yardstick.benchmark.contexts import base as ctx_base
LOG = logging.getLogger(__name__)
@@ -79,9 +79,11 @@ class UdpReplayApproxVnf(SampleVNF):
ports_mask_hex = hex(sum(2 ** num for num in port_nums))
# one core extra for master
cpu_mask_hex = hex(2 ** (number_of_ports + 1) - 1)
+ nfvi_context = ctx_base.Context.get_context_from_server(
+ self.scenario_helper.nodes[self.name])
hw_csum = ""
if (not self.scenario_helper.options.get('hw_csum', False) or
- self.nfvi_context.attrs.get('nfvi_type') not in self.HW_OFFLOADING_NFVI_TYPES):
+ nfvi_context.attrs.get('nfvi_type') not in self.HW_OFFLOADING_NFVI_TYPES):
hw_csum = '--no-hw-csum'
# tuples of (FLD_PORT, FLD_QUEUE, FLD_LCORE)
@@ -116,7 +118,12 @@ class UdpReplayApproxVnf(SampleVNF):
stats = self.get_stats()
stats_words = stats.split()
split_stats = stats_words[stats_words.index('0'):][:number_of_ports * 5]
+
+ physical_node = ctx_base.Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
result = {
+ "physical_node": physical_node,
"packets_in": get_sum(1),
"packets_fwd": get_sum(2),
"packets_dropped": get_sum(3) + get_sum(4),
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index 9deef5cfa..bfff45c67 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -28,6 +28,7 @@ from yardstick.common.process import check_if_process_failed
from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.pipeline import PipelineRules
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper
+from yardstick.benchmark.contexts import base as ctx_base
LOG = logging.getLogger(__name__)
@@ -302,7 +303,11 @@ class VpeApproxVnf(SampleVNF):
def collect_kpi(self):
# we can't get KPIs if the VNF is down
check_if_process_failed(self._vnf_process)
+ physical_node = ctx_base.Context.get_physical_node_from_server(
+ self.scenario_helper.nodes[self.name])
+
result = {
+ "physical_node": physical_node,
'pkt_in_up_stream': 0,
'pkt_drop_up_stream': 0,
'pkt_in_down_stream': 0,