summaryrefslogtreecommitdiffstats
path: root/vstf/vstf/controller/sw_perf
diff options
context:
space:
mode:
authorYiting.Li <liyiting@huawei.com>2015-12-22 17:11:12 -0800
committerYiting.Li <liyiting@huawei.com>2015-12-22 17:11:12 -0800
commit8f1101df131a4d3e03b377738507d88b745831c0 (patch)
tree73f140474fcec2a77c85a453f6946957ca0742d1 /vstf/vstf/controller/sw_perf
parent1a24ebbda3f95600c0e7d5ed8661317a8ff7e265 (diff)
Upload the contribution of vstf as bottleneck network framework.
End to End Performance test JIRA:BOTTLENECK-29 Change-Id: Ib2c553c8b60d6cda9e7a7b52b737c9139f706ebd Signed-off-by: Yiting.Li <liyiting@huawei.com>
Diffstat (limited to 'vstf/vstf/controller/sw_perf')
-rwxr-xr-xvstf/vstf/controller/sw_perf/README39
-rwxr-xr-xvstf/vstf/controller/sw_perf/__init__.py14
-rwxr-xr-xvstf/vstf/controller/sw_perf/flow_producer.py137
-rwxr-xr-xvstf/vstf/controller/sw_perf/model.py190
-rwxr-xr-xvstf/vstf/controller/sw_perf/perf_provider.py209
-rwxr-xr-xvstf/vstf/controller/sw_perf/performance.py396
-rwxr-xr-xvstf/vstf/controller/sw_perf/raw_data.py124
7 files changed, 1109 insertions, 0 deletions
diff --git a/vstf/vstf/controller/sw_perf/README b/vstf/vstf/controller/sw_perf/README
new file mode 100755
index 00000000..02844a3e
--- /dev/null
+++ b/vstf/vstf/controller/sw_perf/README
@@ -0,0 +1,39 @@
+Tree
+
+|--- flow_producer.py
+|--- model.py
+|--- performance.py
+|--- perf_provider.py
+|--- raw_data.py
+
+Entry:
+ performance.py
+ usage: performance.py [-h] [-case CASE]
+ [-tool {pktgen,netperf,qperf,iperf,netmap}]
+ [-protocol {tcp,udp}] [-profile {rdp,fastlink,l2switch}]
+ [-type {throughput,latency,frameloss}] [-sizes SIZES]
+ [--monitor MONITOR]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -case CASE test case like Ti-1, Tn-1, Tnv-1, Tu-1...
+ -tool {pktgen,netperf,qperf,iperf,netmap}
+ -protocol {tcp,udp}
+ -profile {rdp,fastlink,l2switch}
+ -type {throughput,latency,frameloss}
+ -sizes SIZES test size list "64 128"
+ --monitor MONITOR which ip to be monitored
+
+Interface:
+ usage:
+ conn = Server(host=args.monitor)
+ flows_settings = FlowsSettings()
+ tool_settings = ToolSettings()
+ tester_settings = TesterSettings()
+ flow_producer = FlowsProducer(conn, flows_settings)
+ provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ perf = Performance(conn, provider)
+ flow_producer.create(scenario, case)
+ LOG.info(flows_settings.settings())
+ result = perf.run(tool, protocol, type, sizes)
+
diff --git a/vstf/vstf/controller/sw_perf/__init__.py b/vstf/vstf/controller/sw_perf/__init__.py
new file mode 100755
index 00000000..89dcd4e2
--- /dev/null
+++ b/vstf/vstf/controller/sw_perf/__init__.py
@@ -0,0 +1,14 @@
+# Copyright Huawei Technologies Co., Ltd. 1998-2015.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the License); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/vstf/vstf/controller/sw_perf/flow_producer.py b/vstf/vstf/controller/sw_perf/flow_producer.py
new file mode 100755
index 00000000..1de4161c
--- /dev/null
+++ b/vstf/vstf/controller/sw_perf/flow_producer.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author: wly
+# date: 2015-11-19
+# see license for license details
+
+import logging
+
+from vstf.controller.settings.device_settings import DeviceSettings
+from vstf.controller.settings.forwarding_settings import ForwardingSettings
+from vstf.controller.settings.cpu_settings import CpuSettings
+from vstf.controller.fabricant import Fabricant
+from vstf.controller.settings.flows_settings import FlowsSettings
+import vstf.common.constants as cst
+
+LOG = logging.getLogger(__name__)
+
+
+class FlowsProducer(object):
+ def __init__(self, conn, flows_settings):
+ self._perf = flows_settings
+ self._forwarding = ForwardingSettings().settings
+ self._device = DeviceSettings().settings
+ self._cpu = CpuSettings().settings
+ self._conn = conn
+ self._devs_map = {}
+
+ def get_dev(self, item):
+ agent = self._device[item[0]]["agent"]
+ devs = self._device[item[0]]["devs"][item[1]]
+
+ keys = ["bdf", "iface", "mac"]
+
+ key = devs.keys()[0]
+
+ if key in keys:
+ name = devs[key]
+ else:
+ raise Exception("error devs :%s", devs)
+ LOG.info(agent)
+ LOG.info(name)
+ if not self._devs_map.has_key((agent, name)):
+ query = Fabricant(agent, self._conn)
+ query.clean_all_namespace()
+ dev_info = query.get_device_verbose(identity=name)
+ if not isinstance(dev_info, dict):
+ err = "get device detail failed, agent:%s net:%s" % (agent, name)
+ raise Exception(err)
+ dev = {
+ "agent": agent,
+ "dev": {
+ "bdf": dev_info["bdf"],
+ "iface": dev_info["iface"],
+ "mac": dev_info["mac"],
+ "ip": None,
+ "namespace": None
+ }
+ }
+
+ self._devs_map[(agent, name)] = dev
+ LOG.info(dev)
+
+ return self._devs_map[(agent, name)]
+
+ def get_host(self):
+ result = {
+ "agent": self._device["host"]["agent"],
+ "affctl": self._cpu["affctl"]
+ }
+ return result
+
+ def create(self, scenario, case):
+ self._devs_map = {}
+ flows_indexes = self._forwarding[scenario]["flows"]
+ flows_infos = []
+ for index in flows_indexes:
+ if not index:
+ raise Exception("error flows %s" % flows_indexes)
+ dev = self.get_dev(index)
+ flows_infos.append(dev)
+
+ flows_infos[0]['dev'].update(self._forwarding["head"])
+ flows_infos[-1]['dev'].update(self._forwarding["tail"])
+
+ LOG.info(flows_infos)
+
+ actor_info = cst.CASE_ACTOR_MAP[case]
+
+ self._perf.clear_all()
+ senders = actor_info["senders"]
+ LOG.info(senders)
+ for sender in senders:
+ dev = flows_infos[sender]
+ if dev:
+ self._perf.add_senders(dev)
+
+ receivers = actor_info["receivers"]
+ for receiver in receivers:
+ dev = flows_infos[receiver]
+ if dev:
+ self._perf.add_receivers(dev)
+
+ watchers = self._forwarding[scenario]["watchers"]
+ for watcher in watchers:
+ dev = flows_infos[watcher]
+ if dev:
+ self._perf.add_watchers(dev)
+
+ namespaces = [0, -1]
+ for namespace in namespaces:
+ dev = flows_infos[namespace]
+ if dev:
+ self._perf.add_namespaces(dev)
+
+ host = self.get_host()
+ if host:
+ self._perf.add_cpu_listens(host)
+
+ self._perf.set_flows(actor_info["flows"])
+ return True
+
+
+def unit_test():
+ from vstf.rpc_frame_work.rpc_producer import Server
+ from vstf.common.log import setup_logging
+ setup_logging(level=logging.INFO, log_file="/var/log/vstf/vstf-producer.log", clevel=logging.INFO)
+
+ conn = Server("192.168.188.10")
+ flow_settings = FlowsSettings()
+ flow_producer = FlowsProducer(conn, flow_settings)
+ scenario = "Tn"
+ case = "Tn-1"
+ flow_producer.create(scenario, case)
+
+
+if __name__ == '__main__':
+ unit_test()
diff --git a/vstf/vstf/controller/sw_perf/model.py b/vstf/vstf/controller/sw_perf/model.py
new file mode 100755
index 00000000..672daade
--- /dev/null
+++ b/vstf/vstf/controller/sw_perf/model.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author:
+# date:
+# see license for license details
+
+import logging
+
+from vstf.controller.fabricant import Fabricant
+from vstf.controller.sw_perf.raw_data import RawDataProcess
+from vstf.common import perfmark as mark
+
+LOG = logging.getLogger(__name__)
+
+
+class NetDeviceMgr(Fabricant):
+ @classmethod
+ def add(cls, dst, conn, dev):
+ self = cls(dst, conn)
+ LOG.info(dev)
+ ret = self.config_dev(netdev=dev)
+ LOG.info(ret)
+
+ @classmethod
+ def remove(cls, dst, conn, dev):
+ self = cls(dst, conn)
+ LOG.info(dev)
+ ret = self.recover_dev(netdev=dev)
+ LOG.info(ret)
+
+ @classmethod
+ def clear(cls, dst, conn):
+ self = cls(dst, conn)
+ self.clean_all_namespace()
+
+
+class Actor(Fabricant):
+ def __init__(self, dst, conn, tool, params):
+ super(Actor, self).__init__(dst, conn)
+ self._tool = tool
+ self._params = params
+ self._data = {}
+
+ def __repr__(self):
+ repr_dict = self.__dict__
+ repr_keys = list(repr_dict.keys())
+ repr_keys.sort()
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(['%s=%r' % (k, repr_dict[k]) for k in repr_keys]))
+
+
+class Sender(Actor):
+ def start(self, pktsize, **kwargs):
+ LOG.info("Sender.start")
+ if 'ratep' in kwargs and kwargs['ratep']:
+ self._params['ratep'] = kwargs['ratep']
+ self._params['size'] = pktsize
+
+ ret, info = self.perf_run(
+ operation="start",
+ action="send",
+ tool=self._tool,
+ params=self._params
+ )
+ LOG.info(ret)
+ if ret:
+ raise Exception(info)
+ LOG.info(info)
+ print ret
+
+ def stop(self):
+ LOG.info(self._params)
+ rets = self.perf_run(
+ operation="stop",
+ action="send",
+ tool=self._tool,
+ params={}
+ )
+ LOG.info(rets)
+ minlatency, avglatency, maxlatency = 0, 0, 0
+ count = 0
+ for (ret, info) in rets:
+ if ret:
+ raise Exception(info)
+ if self.is_data() and ret == 0:
+ count += 1
+ minlatency += info[mark.minLatency]
+ avglatency += info[mark.avgLatency]
+ maxlatency += info[mark.maxLatency]
+ count = 1 if not count else count
+ self._data[mark.minLatency] = minlatency / count
+ self._data[mark.avgLatency] = avglatency / count
+ self._data[mark.maxLatency] = maxlatency / count
+
+ print rets
+
+ def is_data(self):
+ if '_lat' in self._params['protocol']:
+ return True
+ return False
+
+ def result(self):
+ return self._data
+
+
+class Receiver(Actor):
+ def start(self, **kwargs):
+ LOG.info("Receiver.start")
+ ret, info = self.perf_run(
+ operation="start",
+ action="receive",
+ tool=self._tool,
+ params=self._params
+ )
+ LOG.info(ret)
+ if ret:
+ raise Exception(info)
+ LOG.info(info)
+ return ret
+
+ def stop(self):
+ LOG.info("Receiver.stop")
+ ret, info = self.perf_run(
+ operation="stop",
+ action="receive",
+ tool=self._tool,
+ params=self._params
+ )
+ LOG.info(ret)
+ if ret:
+ raise Exception(info)
+ LOG.info(info)
+ return ret
+
+
+class NicWatcher(Fabricant):
+ def __init__(self, dst, conn, params):
+ super(NicWatcher, self).__init__(dst, conn)
+ self._params = params
+ self._pid = None
+ self._data = {}
+
+ def start(self):
+ print "NicWatcher.start"
+ self._pid = self.run_vnstat(device=self._params["iface"], namespace=self._params["namespace"])
+ print self._pid
+
+ def stop(self):
+ print "NicWatcher.stop"
+ if self._pid:
+ data = self.kill_vnstat(pid=self._pid)
+ self._data = RawDataProcess.process(data)
+ print "---------------------------------"
+ print self._data
+ print "---------------------------------"
+
+ def result(self, **kwargs):
+ return self._data
+
+
+class CpuWatcher(Fabricant):
+ def __init__(self, dst, conn):
+ super(CpuWatcher, self).__init__(dst, conn)
+ self._pid = None
+ self._data = {}
+
+ def start(self):
+ print "CpuWatcher.start"
+ self._pid = self.run_cpuwatch()
+ print self._pid
+
+ def stop(self):
+ print "CpuWatcher.stop"
+ if self._pid:
+ print self._pid
+ data = self.kill_cpuwatch(pid=self._pid)
+ self._data = RawDataProcess.process(data)
+ print "---------------------------------"
+ print self._data
+ print "---------------------------------"
+
+ def result(self, **kwargs):
+ return self._data
+
+
+def unit_test():
+ pass
+
+
+if __name__ == '__main__':
+ unit_test()
diff --git a/vstf/vstf/controller/sw_perf/perf_provider.py b/vstf/vstf/controller/sw_perf/perf_provider.py
new file mode 100755
index 00000000..bd1027ad
--- /dev/null
+++ b/vstf/vstf/controller/sw_perf/perf_provider.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author: wly
+# date: 2015-09-21
+# see license for license details
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+def get_agent_dict(nodes):
+ """
+ :param:
+ nodes: list of flow info
+ and ever element must be a dict and kas key "agent"
+ :return : list for agent
+ :rtype : dict
+ """
+ agent_list = map(lambda x: x["agent"], nodes)
+ return {}.fromkeys(agent_list, False)
+
+
+class PerfProvider(object):
+ def __init__(self, flows_info, tool_info, tester_info):
+ self._flows_info = flows_info
+ self._tool_info = tool_info
+ self._tester_info = tester_info
+
+ def _islation(self):
+ flows = self._flows_info["flows"]
+ if flows == 2 and self._flows_info["senders"][0]["agent"] == self._flows_info["senders"][1]["agent"]:
+ return True
+ return False
+
+ def get_senders(self, tool, protocol):
+ result = []
+ flows = self._flows_info["flows"]
+ if self._islation() and "pktgen" == tool:
+ sender = {
+ "agent": self._flows_info["senders"][0]["agent"],
+ "params": {
+ "protocol": protocol,
+ "namespace": None,
+ "src": [],
+ "dst": [],
+ "time": self._tool_info[tool]["time"],
+ "threads": self._tool_info[tool]["threads"]
+ }
+ }
+ for i in range(flows):
+ sender['params']['src'].append(self._flows_info["senders"][i]['dev'])
+ sender['params']['dst'].append(self._flows_info["receivers"][i]['dev'])
+ result.append(sender)
+ else:
+ for i in range(flows):
+ sender = {
+ "agent": self._flows_info["senders"][i]["agent"],
+ "params": {
+ "protocol": protocol,
+ "namespace": None if "netmap" == tool else self._flows_info["senders"][i]['dev']['namespace'],
+ "src": [self._flows_info["senders"][i]['dev']],
+ "dst": [self._flows_info["receivers"][i]['dev']],
+ "time": self._tool_info[tool]["time"],
+ "threads": self._tool_info[tool]["threads"]
+ }
+ }
+ result.append(sender)
+ return result
+
+ def get_receivers(self, tool, protocol):
+ result = []
+ flows = self._flows_info["flows"]
+ if self._islation() and "pktgen" == tool:
+ receiver = {
+ "agent": self._flows_info["receivers"][0]["agent"],
+ "params": {
+ "namespace": None,
+ "protocol": protocol,
+ }
+ }
+ result.append(receiver)
+ else:
+ for i in range(flows):
+ receiver = {
+ "agent": self._flows_info["receivers"][i]["agent"],
+ "params": {
+ "namespace": None if "netmap" == tool else self._flows_info["receivers"][i]['dev']['namespace'],
+ "protocol": protocol,
+ "dst": [self._flows_info["receivers"][i]['dev']]
+ }
+ }
+ result.append(receiver)
+ return result
+
+ def get_watchers(self, tool):
+ result = []
+ for watcher in self._flows_info["watchers"]:
+ node = {
+ "agent": watcher["agent"],
+ "params": {
+ "iface": watcher['dev']["iface"],
+ "namespace": None if tool in ["pktgen", "netmap"] else watcher['dev']["namespace"],
+ }
+ }
+ result.append(node)
+ return result
+
+ def get_namespaces(self, tool):
+ result = []
+
+ for watcher in self._flows_info["namespaces"]:
+ node = {
+ "agent": watcher["agent"],
+ "params": {
+ "iface": watcher['dev']["iface"],
+ "namespace": watcher['dev']["namespace"] if tool not in ["pktgen", "netmap"] else None,
+ "ip": watcher['dev']["ip"] + '/24',
+ }
+ }
+ result.append(node)
+ return result
+
+ @property
+ def get_cpuwatcher(self):
+ LOG.info(self._flows_info["cpu_listens"])
+ result = {
+ "agent": self._flows_info["cpu_listens"][0]["agent"],
+ "params": {
+ }
+ }
+ return result
+
+ @property
+ def get_cpu_affctl(self):
+ LOG.info(self._flows_info["cpu_listens"])
+ result = {
+ "agent": self._flows_info["cpu_listens"][0]["agent"],
+ "params": {
+ "policy": self._flows_info["cpu_listens"][0]["affctl"]["policy"]
+ }
+ }
+ return result
+
+ def get_cleaners(self, tool, protocol):
+ nodes = self.get_senders(tool, protocol) + \
+ self.get_receivers(tool, protocol) + \
+ self.get_watchers(tool) + \
+ [self.get_cpuwatcher]
+ return get_agent_dict(nodes).keys()
+
+ @property
+ def get_testers(self):
+ agents = get_agent_dict(self._flows_info["namespaces"]).keys()
+ result = []
+ for agent in agents:
+ node = {
+ "agent": agent,
+ "params": {
+ "drivers": self._tester_info["drivers"]
+ }
+ }
+ result.append(node)
+ return result
+
+ def duration(self, tool):
+ return self._tool_info[tool]["time"]
+
+ def wait_balance(self, tool):
+ return self._tool_info[tool]["wait"]
+
+
+def unit_test():
+ from vstf.common.log import setup_logging
+ setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-perf-provider.log", clevel=logging.INFO)
+
+ from vstf.controller.settings.flows_settings import FlowsSettings
+ from vstf.controller.settings.tool_settings import ToolSettings
+ from vstf.controller.settings.tester_settings import TesterSettings
+
+ flows_settings = FlowsSettings()
+ tool_settings = ToolSettings()
+ tester_settings = TesterSettings()
+
+ provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+
+ tools = ['pktgen']
+ protocols = ['udp_bw', 'udp_lat']
+
+ for tool in tools:
+ LOG.info(tool)
+ for protocol in protocols:
+ LOG.info(protocol)
+ senders = provider.get_senders(tool, protocols)
+ LOG.info(len(senders))
+ LOG.info(senders)
+
+ receivers = provider.get_receivers(tool, protocols)
+ LOG.info(len(receivers))
+ LOG.info(receivers)
+
+ LOG.info(provider.get_cpuwatcher)
+ LOG.info(provider.get_watchers(tool))
+ LOG.info(provider.get_namespaces(tool))
+ LOG.info(provider.duration(tool))
+
+
+if __name__ == '__main__':
+ unit_test()
diff --git a/vstf/vstf/controller/sw_perf/performance.py b/vstf/vstf/controller/sw_perf/performance.py
new file mode 100755
index 00000000..6ca8160e
--- /dev/null
+++ b/vstf/vstf/controller/sw_perf/performance.py
@@ -0,0 +1,396 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author: wly
+# date: 2015-09-19
+# see license for license details
+
+import time
+import argparse
+import logging
+
+from vstf.controller.sw_perf import model
+from vstf.common import perfmark as mark
+import vstf.common.constants as cst
+from vstf.rpc_frame_work.rpc_producer import Server
+from vstf.controller.settings.flows_settings import FlowsSettings
+from vstf.controller.settings.tool_settings import ToolSettings
+from vstf.controller.settings.perf_settings import PerfSettings
+from vstf.controller.sw_perf.perf_provider import PerfProvider, get_agent_dict
+from vstf.controller.sw_perf.flow_producer import FlowsProducer
+from vstf.controller.settings.tester_settings import TesterSettings
+from vstf.controller.fabricant import Fabricant
+
+LOG = logging.getLogger(__name__)
+
+
+class Performance(object):
+ def __init__(self, conn, provider):
+ self._provider = provider
+ self._conn = conn
+ self._init()
+
+ def _init(self):
+ self._senders = []
+ self._receivers = []
+ self._watchers = []
+ self._cpuwatcher = None
+
+ def create(self, tool, tpro):
+ self._init()
+ agents = self._provider.get_cleaners(tool, tpro)
+ LOG.info(agents)
+ for agent in agents:
+ cleaner = Fabricant(agent, self._conn)
+ cleaner.clean_all_namespace()
+
+ for tester_info in self._provider.get_testers:
+ dst = tester_info["agent"]
+ params = tester_info["params"]
+ LOG.info(tester_info)
+ driver_mgr = Fabricant(dst, self._conn)
+ ret = driver_mgr.install_drivers(drivers=params["drivers"])
+ LOG.info(ret)
+
+ self.create_namespace(tool)
+ self.create_senders(tool, tpro)
+ self.create_receivers(tool, tpro)
+ self.create_watchers(tool)
+ self.create_cpuwatcher()
+
+ def destory(self, tool):
+ self.clear_namespace(tool)
+
+ def create_namespace(self, tool):
+ devices = self._provider.get_namespaces(tool)
+ agents = get_agent_dict(devices)
+ LOG.info(agents)
+ for device in devices:
+ dst = device["agent"]
+ params = device["params"]
+ if not agents[dst]:
+ model.NetDeviceMgr.clear(dst, self._conn)
+ agents[dst] = True
+
+ model.NetDeviceMgr.add(dst, self._conn, params)
+
+ def clear_namespace(self, tool):
+ devices = self._provider.get_namespaces(tool)
+ for device in devices:
+ dst = device["agent"]
+ params = device["params"]
+ model.NetDeviceMgr.remove(dst, self._conn, params)
+
+ def create_senders(self, tool, tpro):
+ sender_infos = self._provider.get_senders(tool, tpro)
+ LOG.info(sender_infos)
+ for sender_info in sender_infos:
+ dst = sender_info["agent"]
+ params = sender_info["params"]
+ send = model.Sender(dst, self._conn, tool, params)
+ self._senders.append(send)
+
+ def create_receivers(self, tool, tpro):
+ receiver_infos = self._provider.get_receivers(tool, tpro)
+ LOG.info(receiver_infos)
+ for receiver_info in receiver_infos:
+ dst = receiver_info["agent"]
+ params = receiver_info["params"]
+ receive = model.Receiver(dst, self._conn, tool, params)
+ self._receivers.append(receive)
+
+ def create_watchers(self, tool):
+ watcher_infos = self._provider.get_watchers(tool)
+ LOG.info(watcher_infos)
+ for watcher_info in watcher_infos:
+ dst = watcher_info["agent"]
+ params = watcher_info["params"]
+ watch = model.NicWatcher(dst, self._conn, params)
+ self._watchers.append(watch)
+
+ def create_cpuwatcher(self):
+ watcher_info = self._provider.get_cpuwatcher
+ LOG.info(watcher_info)
+ dst = watcher_info["agent"]
+ self._cpuwatcher = model.CpuWatcher(dst, self._conn)
+
+ def start_receivers(self, **kwargs):
+ for receiver in self._receivers:
+ receiver.start(**kwargs)
+
+ def start_senders(self, pktsize, **kwargs):
+ for sender in self._senders:
+ sender.start(pktsize, **kwargs)
+
+ def start_watchers(self):
+ for watcher in self._watchers:
+ watcher.start()
+
+ def stop_receivers(self):
+ for receiver in self._receivers:
+ receiver.stop()
+
+ def stop_senders(self):
+ for sender in self._senders:
+ sender.stop()
+
+ def stop_watchers(self):
+ for watcher in self._watchers:
+ watcher.stop()
+
+ def start_cpuwatcher(self):
+ if self._cpuwatcher:
+ self._cpuwatcher.start()
+
+ def stop_cpuwatcher(self):
+ if self._cpuwatcher:
+ self._cpuwatcher.stop()
+
+ def getlimitspeed(self, ptype, size):
+ return 0
+
+ def affctl(self):
+ ctl = self._provider.get_cpu_affctl
+ LOG.info(ctl)
+ driver_mgr = Fabricant(ctl["agent"], self._conn)
+ ret = driver_mgr.affctl_load(policy=ctl["params"]["policy"])
+ LOG.info(ret)
+
+ def run_pre_affability_settings(self, tool, tpro, pktsize, **kwargs):
+ LOG.info("run_pre_affability_settings start")
+ self.create(tool, tpro)
+ self.start_receivers()
+ self.start_senders(pktsize, **kwargs)
+ self.affctl()
+ time.sleep(2)
+ self.stop_senders()
+ self.stop_receivers()
+ self.destory(tool)
+ LOG.info("run_pre_affability_settings end")
+
+ def run_bandwidth_test(self, tool, tpro, pktsize, **kwargs):
+ LOG.info("run_bandwidth_test ")
+ self.create(tool, tpro)
+ self.start_receivers()
+ self.start_senders(pktsize, **kwargs)
+ time.sleep(self._provider.wait_balance(tool))
+ self.start_watchers()
+ self.start_cpuwatcher()
+ time.sleep(self._provider.duration(tool))
+ self.stop_watchers()
+ self.stop_cpuwatcher()
+ self.stop_senders()
+ self.stop_receivers()
+ self.destory(tool)
+ LOG.info("run_bandwidth_test end")
+
+ def run_latency_test(self, tool, tpro, pktsize, **kwargs):
+ LOG.info("run_latency_test start")
+ self.create(tool, tpro)
+ self.start_receivers()
+ self.start_senders(pktsize, **kwargs)
+ time.sleep(self._provider.duration(tool))
+ self.stop_senders()
+ self.stop_receivers()
+ self.destory(tool)
+ LOG.info("run_latency_test end")
+
+ def run(self, tool, protocol, ttype, sizes, affctl=False):
+ result = {}
+ if affctl:
+ pre_tpro = protocol + "_bw"
+ size = sizes[0]
+ self.run_pre_affability_settings(tool, pre_tpro, size, ratep=0)
+
+ for size in sizes:
+ if ttype in ['throughput', 'frameloss']:
+ realspeed = self.getlimitspeed(ttype, size)
+ bw_tpro = protocol + "_bw"
+ bw_type = ttype
+ self.run_bandwidth_test(tool, bw_tpro, size, ratep=realspeed)
+ bw_result = self.result(tool, bw_type)
+
+ lat_tool = "qperf"
+ lat_type = 'latency'
+ lat_tpro = protocol + '_lat'
+ self.run_latency_test(lat_tool, lat_tpro, size, ratep=realspeed)
+ lat_result = self.result(tool, lat_type)
+ LOG.info(bw_result)
+ LOG.info(lat_result)
+ lat_result.pop('OfferedLoad')
+ bw_result.update(lat_result)
+ result[size] = bw_result
+
+ elif ttype in ['latency']:
+ lat_tpro = protocol + '_lat'
+ lat_type = ttype
+ self.run_latency_test(tool, lat_tpro, size, ratep=None)
+ lat_result = self.result(tool, lat_type)
+ result[size] = lat_result
+ else:
+ raise Exception("error:protocol type:%s" % (ttype))
+ return result
+
+ def result(self, tool, ttype):
+ if ttype in {'throughput', 'frameloss'}:
+ record = {
+ mark.rxCount: 0,
+ mark.txCount: 0,
+ mark.bandwidth: 0,
+ mark.offLoad: 100.0,
+ mark.mppsGhz: 0,
+ mark.percentLoss: 0,
+ mark.avgLatency: 0,
+ mark.maxLatency: 0,
+ mark.minLatency: 0,
+ mark.rxMbps:0,
+ mark.txMbps:0
+ }
+
+ cpu_data = self._cpuwatcher.result()
+ print self._cpuwatcher, cpu_data
+ if cpu_data:
+ cpu_usage = cpu_data['cpu_num'] * (100 - cpu_data['idle'])
+ cpu_mhz = cpu_data['cpu_mhz']
+ record[mark.cpu] = round(cpu_usage, cst.CPU_USAGE_ROUND)
+ record[mark.duration] = self._provider.duration(tool)
+
+ for watcher in self._watchers:
+ nic_data = watcher.result()
+ record[mark.rxCount] += nic_data['rxpck']
+ record[mark.txCount] += nic_data['txpck']
+ record[mark.bandwidth] += nic_data['rxpck/s']
+ record[mark.rxMbps] += nic_data['rxmB/s']
+ record[mark.txMbps] += nic_data['txmB/s']
+
+ if record[mark.txCount]:
+ record[mark.percentLoss] = round(100 * (1 - record[mark.rxCount] / record[mark.txCount]),
+ cst.PKTLOSS_ROUND)
+ else:
+ record[mark.percentLoss] = 100
+
+ record[mark.bandwidth] /= 1000000.0
+ if cpu_mhz and record[mark.cpu]:
+ record[mark.mppsGhz] = round(record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000),
+ cst.CPU_USAGE_ROUND)
+
+ record[mark.bandwidth] = round(record[mark.bandwidth], cst.RATEP_ROUND)
+
+ elif ttype in {'latency'}:
+ record = {
+ mark.offLoad: 0.0,
+ mark.avgLatency: 0,
+ mark.maxLatency: 0,
+ mark.minLatency: 0
+ }
+ minlatency, avglatency, maxlatency = 0, 0, 0
+ count = 0
+ for sender in self._senders:
+ info = sender.result()
+ LOG.info(info)
+ minlatency += info[mark.minLatency]
+ avglatency += info[mark.avgLatency]
+ maxlatency += info[mark.maxLatency]
+ count = 1 if not count else count
+ record[mark.minLatency] = round(minlatency / count, cst.TIME_ROUND)
+ record[mark.avgLatency] = round(avglatency / count, cst.TIME_ROUND)
+ record[mark.maxLatency] = round(maxlatency / count, cst.TIME_ROUND)
+
+ else:
+ raise Exception('error:protocol type:%s' % ttype)
+
+ LOG.info('record:%s' % record)
+ return record
+
+
+def unit_test():
+ from vstf.common.log import setup_logging
+ setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-sw_perf.log", clevel=logging.INFO)
+
+ conn = Server("192.168.188.10")
+ perf_settings = PerfSettings()
+ flows_settings = FlowsSettings()
+ tool_settings = ToolSettings()
+ tester_settings = TesterSettings()
+ flow_producer = FlowsProducer(conn, flows_settings)
+ provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ perf = Performance(conn, provider)
+ tests = perf_settings.settings
+ for scenario, cases in tests.items():
+ if not cases:
+ continue
+ for case in cases:
+ casetag = case['case']
+ tool = case['tool']
+ protocol = case['protocol']
+ profile = case['profile']
+ ttype = case['type']
+ sizes = case['sizes']
+
+ flow_producer.create(scenario, casetag)
+ result = perf.run(tool, protocol, ttype, sizes)
+ LOG.info(result)
+
+
+def main():
+ from vstf.common.log import setup_logging
+ setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-performance.log", clevel=logging.INFO)
+ from vstf.controller.database.dbinterface import DbManage
+ parser = argparse.ArgumentParser(add_help=True)
+ parser.add_argument("case",
+ action="store",
+ help="test case like Ti-1, Tn-1, Tnv-1, Tu-1...")
+ parser.add_argument("tool",
+ action="store",
+ choices=cst.TOOLS,
+ )
+ parser.add_argument("protocol",
+ action="store",
+ choices=cst.TPROTOCOLS,
+ )
+ parser.add_argument("profile",
+ action="store",
+ choices=cst.PROFILES,
+ )
+ parser.add_argument("type",
+ action="store",
+ choices=cst.TTYPES,
+ )
+ parser.add_argument("sizes",
+ action="store",
+ default="64",
+ help='test size list "64 128"')
+ parser.add_argument("--affctl",
+ action="store_true",
+ help="when input '--affctl', the performance will do affctl before testing")
+ parser.add_argument("--monitor",
+ dest="monitor",
+ default="localhost",
+ action="store",
+ help="which ip to be monitored")
+ args = parser.parse_args()
+
+ LOG.info(args.monitor)
+ conn = Server(host=args.monitor)
+ db_mgr = DbManage()
+
+ casetag = args.case
+ tool = args.tool
+ protocol = args.protocol
+ profile = args.profile
+ ttype = args.type
+ sizes = map(lambda x: int(x), args.sizes.strip().split())
+
+ flows_settings = FlowsSettings()
+ tool_settings = ToolSettings()
+ tester_settings = TesterSettings()
+ flow_producer = FlowsProducer(conn, flows_settings)
+ provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+ perf = Performance(conn, provider)
+ scenario = db_mgr.query_scenario(casetag)
+ flow_producer.create(scenario, casetag)
+ LOG.info(flows_settings.settings)
+ result = perf.run(tool, protocol, ttype, sizes, affctl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/vstf/vstf/controller/sw_perf/raw_data.py b/vstf/vstf/controller/sw_perf/raw_data.py
new file mode 100755
index 00000000..dab749eb
--- /dev/null
+++ b/vstf/vstf/controller/sw_perf/raw_data.py
@@ -0,0 +1,124 @@
+import subprocess
+import re
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+class RawDataProcess(object):
+ def __init__(self):
+ pass
+
+ def process_vnstat(self, data):
+ buf = data.splitlines()
+ buf = buf[9:]
+ buf = ' '.join(buf)
+ m = {}
+ digits = re.compile(r"\d{1,}\.?\d*")
+ units = re.compile(r"(?:gib|mib|kib|kbit/s|gbit/s|mbit/s|p/s)", re.IGNORECASE | re.MULTILINE)
+ units_arr = units.findall(buf)
+ LOG.debug(units_arr)
+ digits_arr = digits.findall(buf)
+
+ for i in range(len(digits_arr)):
+ digits_arr[i] = round(float(digits_arr[i]), 2)
+
+ LOG.info("-------------digit_arr------------------")
+ LOG.info(digits_arr)
+ LOG.info(units_arr)
+ LOG.info("-----------------------------------------")
+ m['rxpck'], m['txpck'] = digits_arr[8], digits_arr[9]
+ m['time'] = digits_arr[-1]
+ digits_arr = digits_arr[:8] + digits_arr[10:-1]
+ index = 0
+ for unit in units_arr:
+ unit = unit.lower()
+ if unit == 'gib':
+ digits_arr[index] *= 1024
+ elif unit == 'kib':
+ digits_arr[index] /= 1024
+ elif unit == 'gbit/s':
+ digits_arr[index] *= 1000
+ elif unit == 'kbit/s':
+ digits_arr[index] /= 1000
+ else:
+ pass
+ index += 1
+
+ for i in range(len(digits_arr)):
+ digits_arr[i] = round(digits_arr[i], 2)
+
+ m['rxmB'], m['txmB'] = digits_arr[0:2]
+ m['rxmB_max/s'], m['txmB_max/s'] = digits_arr[2:4]
+ m['rxmB/s'], m['txmB/s'] = digits_arr[4:6]
+ m['rxmB_min/s'], m['txmB_min/s'] = digits_arr[6:8]
+ m['rxpck_max/s'], m['txpck_max/s'] = digits_arr[8:10]
+ m['rxpck/s'], m['txpck/s'] = digits_arr[10:12]
+ m['rxpck_min/s'], m['txpck_min/s'] = digits_arr[12:14]
+ LOG.info("---------------vnstat data start-------------")
+ LOG.info(m)
+ LOG.info("---------------vnstat data end---------------")
+ return m
+
+ def process_sar_cpu(self, raw):
+ lines = raw.splitlines()
+ # print lines
+ head = lines[2].split()[3:]
+ average = lines[-1].split()[2:]
+ data = {}
+ for h, d in zip(head, average):
+ data[h.strip('%')] = float(d)
+ return data
+
+ def process_qperf(self, raw):
+ buf = raw.splitlines()
+ data = buf[1].strip().split()
+ key = data[0]
+ value = float(data[2])
+ unit = data[3]
+ return {key: value, 'unit': unit}
+
+ @classmethod
+ def process(cls, raw):
+ self = cls()
+ tool, data_type, data = raw['tool'], raw['type'], raw['raw_data']
+ m = {}
+ if tool == 'vnstat' and data_type == 'nic':
+ m = self.process_vnstat(data)
+ if tool == 'sar' and data_type == 'cpu':
+ m = self.process_sar_cpu(data)
+ if raw.has_key('cpu_num'):
+ m['cpu_num'] = raw['cpu_num']
+ if raw.has_key('cpu_mhz'):
+ m['cpu_mhz'] = raw['cpu_mhz']
+ if tool == 'qperf':
+ m = self.process_qperf(data)
+ return m
+
+
+if __name__ == '__main__':
+ logging.basicConfig(level=logging.DEBUG)
+ p = RawDataProcess()
+ cmd = "vnstat -i eth0 -l"
+ child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
+ import time
+ import os
+ from signal import SIGINT
+
+ time.sleep(20)
+ os.kill(child.pid, SIGINT)
+ data = child.stdout.read()
+ print data
+ print p.process_vnstat(data)
+
+ cmd = "sar -u 2"
+ child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ import time
+ import os
+ from signal import SIGINT
+
+ time.sleep(20)
+ os.kill(child.pid, SIGINT)
+ data = child.stdout.read()
+ print data
+ print p.process_sar_cpu(data)