aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/benchmark/scenarios
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/benchmark/scenarios')
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py27
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py12
-rw-r--r--yardstick/benchmark/scenarios/base.py4
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py209
4 files changed, 192 insertions, 60 deletions
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
index 971bae1e9..8f1f53cde 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
@@ -62,20 +62,19 @@ class MultiMonitor(basemonitor.BaseMonitor):
outage_time = (
last_outage - first_outage if last_outage > first_outage else 0
)
+ self._result = {"outage_time": outage_time}
LOG.debug("outage_time is: %f", outage_time)
max_outage_time = 0
- if "max_outage_time" in self._config["sla"]:
- max_outage_time = self._config["sla"]["max_outage_time"]
- elif "max_recover_time" in self._config["sla"]:
- max_outage_time = self._config["sla"]["max_recover_time"]
- else:
- raise RuntimeError("'max_outage_time' or 'max_recover_time' "
- "config is not found")
- self._result = {"outage_time": outage_time}
-
- if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
- return False
- else:
- return True
+ if self._config.get("sla"):
+ if "max_outage_time" in self._config["sla"]:
+ max_outage_time = self._config["sla"]["max_outage_time"]
+ elif "max_recover_time" in self._config["sla"]:
+ max_outage_time = self._config["sla"]["max_recover_time"]
+ else:
+ raise RuntimeError("'max_outage_time' or 'max_recover_time' "
+ "config is not found")
+ if outage_time > max_outage_time:
+ LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
+ return False
+ return True
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index 8d2f2633c..280e5811d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -46,12 +46,12 @@ class MonitorProcess(basemonitor.BaseMonitor):
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
- max_outage_time = self._config["sla"]["max_recover_time"]
- if outage_time > max_outage_time:
- LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
- return False
- else:
- return True
+ if self._config.get("sla"):
+ max_outage_time = self._config["sla"]["max_recover_time"]
+ if outage_time > max_outage_time:
+ LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
+ return False
+ return True
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 1737bb942..ae8bfad71 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -122,7 +122,3 @@ class Scenario(object):
except TypeError:
dic[k] = v
return dic
-
- def get_mq_ids(self): # pragma: no cover
- """Return stored MQ producer IDs, if defined"""
- pass
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index 5ac51cdfc..c5e75d093 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2016-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -44,14 +44,13 @@ traffic_profile.register_modules()
LOG = logging.getLogger(__name__)
-class NetworkServiceTestCase(scenario_base.Scenario):
- """Class handles Generic framework to do pre-deployment VNF &
- Network service testing """
+class NetworkServiceBase(scenario_base.Scenario):
+ """Base class for Network service testing scenarios"""
- __scenario_type__ = "NSPerf"
+ __scenario_type__ = ""
def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
- super(NetworkServiceTestCase, self).__init__()
+ super(NetworkServiceBase, self).__init__()
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
@@ -61,7 +60,32 @@ class NetworkServiceTestCase(scenario_base.Scenario):
self.traffic_profile = None
self.node_netdevs = {}
self.bin_path = get_nsb_option('bin_path', '')
- self._mq_ids = []
+
+ def run(self, *args):
+ pass
+
+ def teardown(self):
+ """ Stop the collector and terminate VNF & TG instance
+
+ :return
+ """
+
+ try:
+ try:
+ self.collector.stop()
+ for vnf in self.vnfs:
+ LOG.info("Stopping %s", vnf.name)
+ vnf.terminate()
+ LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
+ finally:
+ terminate_children()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise RuntimeError("Error in teardown")
def is_ended(self):
return self.traffic_profile is not None and self.traffic_profile.is_ended()
@@ -140,6 +164,13 @@ class NetworkServiceTestCase(scenario_base.Scenario):
imix = {}
return imix
+ def _get_ip_priority(self):
+ try:
+ priority = self.scenario_cfg['options']['priority']
+ except KeyError:
+ priority = {}
+ return priority
+
def _get_traffic_profile(self):
profile = self.scenario_cfg["traffic_profile"]
path = self.scenario_cfg["task_path"]
@@ -177,6 +208,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
tprofile_data = {
'flow': self._get_traffic_flow(),
'imix': self._get_traffic_imix(),
+ 'priority': self._get_ip_priority(),
tprofile_base.TrafficProfile.UPLINK: {},
tprofile_base.TrafficProfile.DOWNLINK: {},
'extra_args': extra_args,
@@ -446,12 +478,30 @@ class NetworkServiceTestCase(scenario_base.Scenario):
pass
self.create_interfaces_from_node(vnfd, node)
vnf_impl = self.get_vnf_impl(vnfd['id'])
- vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
+ vnf_instance = vnf_impl(node_name, vnfd)
vnfs.append(vnf_instance)
self.vnfs = vnfs
return vnfs
+ def pre_run_wait_time(self, time_seconds): # pragma: no cover
+ """Time waited before executing the run method"""
+ time.sleep(time_seconds)
+
+ def post_run_wait_time(self, time_seconds): # pragma: no cover
+ """Time waited after executing the run method"""
+ pass
+
+
+class NetworkServiceTestCase(NetworkServiceBase):
+ """Class handles Generic framework to do pre-deployment VNF &
+ Network service testing """
+
+ __scenario_type__ = "NSPerf"
+
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
+ super(NetworkServiceTestCase, self).__init__(scenario_cfg, context_cfg)
+
def setup(self):
"""Setup infrastructure, provission VNFs & start traffic"""
# 1. Verify if infrastructure mapping can meet topology
@@ -495,11 +545,6 @@ class NetworkServiceTestCase(scenario_base.Scenario):
for traffic_gen in traffic_runners:
LOG.info("Starting traffic on %s", traffic_gen.name)
traffic_gen.run_traffic(self.traffic_profile)
- self._mq_ids.append(traffic_gen.get_mq_producer_id())
-
- def get_mq_ids(self): # pragma: no cover
- """Return stored MQ producer IDs"""
- return self._mq_ids
def run(self, result): # yardstick API
""" Yardstick calls run() at intervals defined in the yaml and
@@ -515,33 +560,125 @@ class NetworkServiceTestCase(scenario_base.Scenario):
result.update(self.collector.get_kpi())
- def teardown(self):
- """ Stop the collector and terminate VNF & TG instance
- :return
+class NetworkServiceRFC2544(NetworkServiceBase):
+ """Class handles RFC2544 Network service testing"""
+
+ __scenario_type__ = "NSPerf-RFC2544"
+
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
+ super(NetworkServiceRFC2544, self).__init__(scenario_cfg, context_cfg)
+
+ def setup(self):
+ """Setup infrastructure, provision VNFs"""
+ self.map_topology_to_infrastructure()
+ self.load_vnf_models()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+ non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
+ try:
+ for vnf in chain(traffic_runners, non_traffic_runners):
+ LOG.info("Instantiating %s", vnf.name)
+ vnf.instantiate(self.scenario_cfg, self.context_cfg)
+ LOG.info("Waiting for %s to instantiate", vnf.name)
+ vnf.wait_for_instantiate()
+ except:
+ LOG.exception("")
+ for vnf in self.vnfs:
+ vnf.terminate()
+ raise
+
+ self._generate_pod_yaml()
+
+ def run(self, output):
+ """ Run experiment
+
+ :param output: scenario output to push results
+ :return: None
"""
+ self._fill_traffic_profile()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+
+ for traffic_gen in traffic_runners:
+ traffic_gen.listen_traffic(self.traffic_profile)
+
+ self.collector = Collector(self.vnfs,
+ context_base.Context.get_physical_nodes())
+ self.collector.start()
+
+ test_completed = False
+ while not test_completed:
+ for traffic_gen in traffic_runners:
+ LOG.info("Run traffic on %s", traffic_gen.name)
+ traffic_gen.run_traffic_once(self.traffic_profile)
+
+ test_completed = True
+ for traffic_gen in traffic_runners:
+ # wait for all tg to complete running traffic
+ status = traffic_gen.wait_on_traffic()
+ LOG.info("Run traffic on %s complete status=%s",
+ traffic_gen.name, status)
+ if status == 'CONTINUE':
+ # continue running if at least one tg is running
+ test_completed = False
+
+ output.push(self.collector.get_kpi())
+
+ self.collector.stop()
+
+class NetworkServiceRFC3511(NetworkServiceBase):
+ """Class handles RFC3511 Network service testing"""
+
+ __scenario_type__ = "NSPerf-RFC3511"
+
+ def __init__(self, scenario_cfg, context_cfg): # pragma: no cover
+ super(NetworkServiceRFC3511, self).__init__(scenario_cfg, context_cfg)
+
+ def setup(self):
+ """Setup infrastructure, provision VNFs"""
+ self.map_topology_to_infrastructure()
+ self.load_vnf_models()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+ non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
try:
- try:
- self.collector.stop()
- for vnf in self.vnfs:
- LOG.info("Stopping %s", vnf.name)
- vnf.terminate()
- LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
- finally:
- terminate_children()
- except Exception:
- # catch any exception in teardown and convert to simple exception
- # never pass exceptions back to multiprocessing, because some exceptions can
- # be unpicklable
- # https://bugs.python.org/issue9400
+ for vnf in chain(traffic_runners, non_traffic_runners):
+ LOG.info("Instantiating %s", vnf.name)
+ vnf.instantiate(self.scenario_cfg, self.context_cfg)
+ LOG.info("Waiting for %s to instantiate", vnf.name)
+ vnf.wait_for_instantiate()
+ except:
LOG.exception("")
- raise RuntimeError("Error in teardown")
+ for vnf in self.vnfs:
+ vnf.terminate()
+ raise
- def pre_run_wait_time(self, time_seconds): # pragma: no cover
- """Time waited before executing the run method"""
- time.sleep(time_seconds)
+ self._generate_pod_yaml()
- def post_run_wait_time(self, time_seconds): # pragma: no cover
- """Time waited after executing the run method"""
- pass
+ def run(self, output):
+ """ Run experiment
+
+ :param output: scenario output to push results
+ :return: None
+ """
+
+ self._fill_traffic_profile()
+
+ traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
+
+ for traffic_gen in traffic_runners:
+ traffic_gen.listen_traffic(self.traffic_profile)
+
+ self.collector = Collector(self.vnfs,
+ context_base.Context.get_physical_nodes())
+ self.collector.start()
+
+ for traffic_gen in traffic_runners:
+ LOG.info("Run traffic on %s", traffic_gen.name)
+ traffic_gen.run_traffic(self.traffic_profile)
+
+ output.push(self.collector.get_kpi())
+
+ self.collector.stop()