aboutsummaryrefslogtreecommitdiffstats
path: root/tools/docker/results/notebooks/testresult-analysis.ipynb
diff options
context:
space:
mode:
authorSridhar Rao <sridhar.rao@spirent.com>2019-08-07 08:39:04 +0000
committerGerrit Code Review <gerrit@opnfv.org>2019-08-07 08:39:04 +0000
commit391e5ceb8958eff1f108cc2d589dd3a11c7c48a2 (patch)
tree9433aef428e9c4a27fdf16ad0f5edfda4fb8efdd /tools/docker/results/notebooks/testresult-analysis.ipynb
parentd835dbe3fd144c2144669cdf31a96263be21ab51 (diff)
parentd691cc89e106d710f4d36bc3998501415588e2e1 (diff)
Merge "Docker: VSPERF Results Container."
Diffstat (limited to 'tools/docker/results/notebooks/testresult-analysis.ipynb')
-rw-r--r--tools/docker/results/notebooks/testresult-analysis.ipynb784
1 files changed, 784 insertions, 0 deletions
diff --git a/tools/docker/results/notebooks/testresult-analysis.ipynb b/tools/docker/results/notebooks/testresult-analysis.ipynb
new file mode 100644
index 00000000..a7e9335c
--- /dev/null
+++ b/tools/docker/results/notebooks/testresult-analysis.ipynb
@@ -0,0 +1,784 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "hide_input": true
+ },
+ "source": [
+ "# OPNFV VSPERF\n",
+ "# Beyond Performance Metrics: Towards Causation Analysis"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### sridhar.rao@spirent.com and acm@research.att.com"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Import packages\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import matplotlib.pyplot as plt\n",
+ "import seaborn as sns\n",
+ "from graphviz import Digraph\n",
+ "import collections\n",
+ "import glob\n",
+ "import os"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Get the results to analyze: \n",
+ "Getting Latest one, if ``directory_to_download`` is empty"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import paramiko\n",
+ "import tarfile\n",
+ "import os\n",
+ "from stat import S_ISDIR\n",
+ "RECV_BYTES = 4096\n",
+ "hostname = '10.10.120.24'\n",
+ "port = 22\n",
+ "uname='opnfv'\n",
+ "pwd='opnfv' \n",
+ "stdout_data = []\n",
+ "stderr_data = []\n",
+ "client = paramiko.Transport((hostname, port))\n",
+ "client.connect(username=uname, password=pwd)\n",
+ "session = client.open_channel(kind='session')\n",
+ "directory_to_download = ''\n",
+ "\n",
+ "session.exec_command('ls /tmp | grep results')\n",
+ "if not directory_to_download:\n",
+ " while True:\n",
+ " if session.recv_ready():\n",
+ " stdout_data.append(session.recv(RECV_BYTES))\n",
+ " if session.recv_stderr_ready():\n",
+ " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
+ " if session.exit_status_ready():\n",
+ " break\n",
+ " if stdout_data:\n",
+ " line = stdout_data[0]\n",
+ " filenames = line.decode(\"utf-8\").rstrip('\\n').split('\\n')\n",
+ " filenames = sorted(filenames)\n",
+ " latest = filenames[-1]\n",
+ " directory_to_download = os.path.join('/tmp', latest).replace(\"\\\\\",\"/\")\n",
+ " print(directory_to_download)\n",
+ "stdout_data = []\n",
+ "stderr_data = []\n",
+ "if directory_to_download:\n",
+ " # zip the collectd results to make the download faster\n",
+ " zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + directory_to_download + '/csv .'\n",
+ " session = client.open_channel(kind='session')\n",
+ " session.get_pty()\n",
+ " session.exec_command(zip_command)\n",
+ " while True:\n",
+ " if session.recv_ready():\n",
+ " stdout_data.append(session.recv(RECV_BYTES))\n",
+ " if session.recv_stderr_ready():\n",
+ " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
+ " if session.exit_status_ready():\n",
+ " break\n",
+ " if stderr_data:\n",
+ " print(stderr_data[0])\n",
+ " if stdout_data:\n",
+ " print(stdout_data[0])\n",
+ "\n",
+ " # Begin the actual downlaod\n",
+ " sftp = paramiko.SFTPClient.from_transport(client)\n",
+ " def sftp_walk(remotepath):\n",
+ " path=remotepath\n",
+ " files=[]\n",
+ " folders=[]\n",
+ " for f in sftp.listdir_attr(remotepath):\n",
+ " if S_ISDIR(f.st_mode):\n",
+ " folders.append(f.filename)\n",
+ " else:\n",
+ " files.append(f.filename)\n",
+ " if files:\n",
+ " yield path, files\n",
+ " # Filewise download happens here\n",
+ " for path,files in sftp_walk(directory_to_download):\n",
+ " for file in files:\n",
+ " remote = os.path.join(path,file).replace(\"\\\\\",\"/\")\n",
+ " local = os.path.join('./results', file).replace(\"\\/\",\"/\")\n",
+ " sftp.get(remote, local)\n",
+ "# Untar the collectd results if we got it.\n",
+ "path = os.path.join('./results', 'collectd.tar.gz')\n",
+ "if os.path.exists(path):\n",
+ " tar = tarfile.open(path)\n",
+ " tar.extractall()\n",
+ " tar.close()\n",
+ "# Ready to work with downloaded data, close the session and client.\n",
+ "session.close()\n",
+ "client.close()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "strings = ('* OS:', '* Kernel Version:', '* Board:', '* CPU:', '* CPU cores:',\n",
+ " '* Memory:', '* Virtual Switch Set-up:',\n",
+ " '* Traffic Generator:','* vSwitch:', '* DPDK Version:', '* VNF:')\n",
+ "filename = os.path.basename(glob.glob('./results/result*.rst')[0])\n",
+ "info_dict = {}\n",
+ "with open(os.path.join('./results', filename), 'r') as file:\n",
+ " for line in file:\n",
+ " if any(s in line for s in strings):\n",
+ " info_dict[line.split(':', 1)[0]] = line.split(':', 1)[1].rstrip()\n",
+ "df = pd.DataFrame.from_dict(info_dict, orient='index', columns=['Value'])\n",
+ "df"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Understand the configuration used for the test."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "filename = os.path.basename(glob.glob('./results/vsperf*.conf')[0])\n",
+ "file = os.path.join('./results', filename)\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('TRAFFICGEN_DURATION'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " value = value.lstrip()\n",
+ " traffic_duration = int(value)\n",
+ " elif line.startswith('VSWITCH_PMD_CPU_MASK'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " pmd_cores_mask = value.lstrip()\n",
+ " elif line.startswith('GUEST_CORE_BINDING'):\n",
+ " value = line.split('=')[1]\n",
+ " value = value.rstrip()\n",
+ " value = value.lstrip()\n",
+ " guest_cores = value[1:-2]\n",
+ "\n",
+ "print(traffic_duration)\n",
+ "print(pmd_cores_mask)\n",
+ "print(guest_cores)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## OVS-Ports and Cores"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import collections\n",
+ "portcores = collections.OrderedDict()\n",
+ "chunks = []\n",
+ "current_chunk = []\n",
+ "file = os.path.join('./results', 'ovs-cores.log')\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('pmd') and current_chunk:\n",
+ " # if line starts with token and the current chunk is not empty\n",
+ " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
+ " current_chunk = [] # make current chunk blank\n",
+ " # just append a line to the current chunk on each iteration\n",
+ " if \"port:\" in line or 'pmd' in line:\n",
+ " current_chunk.append(line)\n",
+ " chunks.append(current_chunk) # append the last chunk outside the loop\n",
+ "\n",
+ "core_ids = []\n",
+ "for ch in chunks:\n",
+ " port_id = ''\n",
+ " core_id = ''\n",
+ " for line in ch:\n",
+ " if 'pmd' in line:\n",
+ " core_id = line.split()[-1][:-1]\n",
+ " if core_id not in core_ids:\n",
+ " core_ids.append(core_id)\n",
+ " elif 'port:' in line:\n",
+ " port_id = line.split()[1]\n",
+ " if port_id and core_id:\n",
+ " if port_id not in portcores:\n",
+ " portcores[port_id] = core_id\n",
+ "\n",
+ "# import graphviz\n",
+ "from graphviz import Digraph\n",
+ "ps = Digraph(name='ovs-ports-cores', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='green')\n",
+ " c.node('t0', 'TGen-Port-0')\n",
+ " c.node('t1', 'TGen-Port-1')\n",
+ " c.attr(label='TGEN')\n",
+ " c.attr(color='blue')\n",
+ "with ps.subgraph(name=\"cluster_1\") as c:\n",
+ " c.node_attr.update(style='filled', color='yellow')\n",
+ " c.node('v0', 'VNF-Port-0')\n",
+ " c.node('v1', 'VNF-Port-1')\n",
+ " c.attr(label='VNF')\n",
+ " c.attr(color='blue')\n",
+ " \n",
+ "with ps.subgraph(name='cluster_2') as c: \n",
+ " c.attr(label='OVS-DPDK')\n",
+ " c.attr(color='blue')\n",
+ " count = 0\n",
+ " for port, core in portcores.items():\n",
+ " id = 'o'+str(count)\n",
+ " c.node(id, port+'\\nCore-ID:'+ core)\n",
+ " count += 1\n",
+ " num = port[-1]\n",
+ " if 'dpdkvhost' in port:\n",
+ " ps.edge(id, 'v'+num)\n",
+ " else:\n",
+ " ps.edge(id, 't'+num)\n",
+ "\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Dropped Packets"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "portcores = collections.OrderedDict()\n",
+ "chunks = []\n",
+ "current_chunk = []\n",
+ "file = os.path.join('./results', 'ovs-cores.log')\n",
+ "with open(file, 'r') as f:\n",
+ " for line in f:\n",
+ " if line.startswith('pmd') and current_chunk:\n",
+ " # if line starts with token and the current chunk is not empty\n",
+ " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
+ " current_chunk = [] # make current chunk blank\n",
+ " # just append a line to the current chunk on each iteration\n",
+ " if \"port:\" in line or 'pmd' in line:\n",
+ " current_chunk.append(line)\n",
+ " chunks.append(current_chunk) # append the last chunk outside the loop\n",
+ "\n",
+ "core_ids = []\n",
+ "for ch in chunks:\n",
+ " port_id = ''\n",
+ " core_id = ''\n",
+ " for line in ch:\n",
+ " if 'pmd' in line:\n",
+ " core_id = line.split()[-1][:-1]\n",
+ " if core_id not in core_ids:\n",
+ " core_ids.append(core_id)\n",
+ " elif 'port:' in line:\n",
+ " port_id = line.split()[1]\n",
+ " if port_id and core_id:\n",
+ " if port_id not in portcores:\n",
+ " portcores[port_id] = core_id\n",
+ "\n",
+ "ps = Digraph(name='ovs-dropped', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "\n",
+ "def get_dropped(port_id):\n",
+ " # port_id = 'dpdk0'\n",
+ " if glob.glob('./pod12-node4/*'+port_id):\n",
+ " dirname = os.path.basename(glob.glob('./pod12-node4/*'+port_id)[0])\n",
+ " if dirname:\n",
+ " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
+ " filename = os.path.basename(glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0])\n",
+ " if filename:\n",
+ " with open(os.path.join('./pod12-node4', dirname, filename), 'r') as f:\n",
+ " line = f.readlines()[-1]\n",
+ " fields = line.split(',')\n",
+ " return fields[1], fields[2]\n",
+ " return 'NA','NA'\n",
+ "\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='pink')\n",
+ " c.attr(label='OVS-DPDK')\n",
+ " c.attr(color='blue')\n",
+ " count = 0\n",
+ " for port, core in portcores.items():\n",
+ " id = 'o'+str(count)\n",
+ " rx,tx = get_dropped(port)\n",
+ " c.node(id, port+'\\nRX-Dropped:'+ rx + '\\nTX-Dropped:' + tx)\n",
+ " count += 1\n",
+ " num = port[-1]\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Plotting Live Results - T-Rex"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "lines_seen = set() # holds lines already seen\n",
+ "outfile = open('./counts.dat', \"w\")\n",
+ "file = os.path.join('./results', 'trex-liveresults-counts.dat')\n",
+ "for line in open(file, \"r\"):\n",
+ " if line not in lines_seen: # not a duplicate\n",
+ " outfile.write(line)\n",
+ " lines_seen.add(line)\n",
+ "outfile.close()\n",
+ "tdf = pd.read_csv('./counts.dat')\n",
+ "print(tdf.columns)\n",
+ "ax = tdf.loc[(tdf.rx_port == 1)].plot(y='rx_pkts')\n",
+ "def highlight(indices,ax):\n",
+ " i=0\n",
+ " while i<len(indices):\n",
+ " ax.axvspan(indices[i][0], indices[i][1], facecolor='RED', edgecolor='BLUE', alpha=.2)\n",
+ " i+=1\n",
+ "\n",
+ "ind = 0\n",
+ "indv = tdf.ts[0]\n",
+ "ax.set_xlabel(\"Index\")\n",
+ "ax.set_ylabel('Count')\n",
+ "for i in range(len(tdf.ts)):\n",
+ " if tdf.ts[i] - indv > int(traffic_duration):\n",
+ " highlight([(ind, i)], ax)\n",
+ " ind = i\n",
+ " indv = tdf.ts[i]\n",
+ "highlight([(ind,i)], ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## IRQ Latency Histogram"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "file = os.path.join('./results', 'RUNirq.irq.log')\n",
+ "tdf = pd.read_csv(file)\n",
+ "tdf.columns\n",
+ "exclude = [' <1', ' < 5', ' < 10',' < 50', ' < 100', ' < 500', ' < 1000']\n",
+ "ax = tdf.loc[:, tdf.columns.difference(exclude)].plot(x=' number', xticks=tdf[' number'], figsize=(20,10))\n",
+ "ax.set_xlabel('Core #')\n",
+ "ax.set_ylabel('Count')\n",
+ "#tdf.plot(x='number')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Sample Collectd Metric Display - L3 Cache Occupancy in Bytes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import math\n",
+ "def cpumask2coreids(mask):\n",
+ " intmask = int(mask, 16)\n",
+ " i = 1\n",
+ " coreids = []\n",
+ " while (i < intmask):\n",
+ " if (i & intmask):\n",
+ " coreids.append(str(math.frexp(i)[-1]-1))\n",
+ " i = i << 1\n",
+ " return (coreids)\n",
+ "\n",
+ "vswitch_cpus = \"['2']\"\n",
+ "ps = Digraph(name='cpu-map', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
+ "with ps.subgraph(name=\"cluster_0\") as c:\n",
+ " c.node_attr.update(style='filled', color='pink')\n",
+ " c.attr(label='CPU-MAPPINGS')\n",
+ " c.attr(color='blue')\n",
+ " c.node('vscpus', 'vSwitch: \\n' + vswitch_cpus)\n",
+ " # vnf_cpus = cpumask2coreids(guest_cores)\n",
+ " c.node('vncpus', 'VNF: \\n' + guest_cores)\n",
+ " pmd_cpus = cpumask2coreids(pmd_cores_mask[1:-1])\n",
+ " c.node('pmcpus', 'PMDs: \\n' + str(pmd_cpus))\n",
+ "\n",
+ "ps"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "# Path where collectd results are stored.\n",
+ "mypath = \"./pod12-node4\"\n",
+ "file_count = 0\n",
+ "cpu_names = []\n",
+ "for level1 in os.listdir(mypath):\n",
+ " if \"intel_rdt\" in level1:\n",
+ " l2path = os.path.join(mypath, level1)\n",
+ " for level2 in os.listdir(l2path):\n",
+ " if \"bytes\" in level2:\n",
+ " l3path = os.path.join(l2path, level2)\n",
+ " if file_count == 0:\n",
+ " file_count += 1\n",
+ " df = pd.read_csv(l3path)\n",
+ " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
+ " # nn = 'cpu-'+ level1.split('-')[1]\n",
+ " cpu_names.append(nn)\n",
+ " # print(nn)\n",
+ " df.rename(columns={'value': nn}, inplace=True)\n",
+ " else:\n",
+ " file_count += 1\n",
+ " tdf = pd.read_csv(l3path)\n",
+ " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
+ " cpu_names.append(nn)\n",
+ " tdf.rename(columns={'value': nn}, inplace=True)\n",
+ " df[nn] = tdf[nn] \n",
+ "\n",
+ "ax = df.plot(x='epoch', y=cpu_names)\n",
+ "ax.set_ylabel(\"MBytes\")\n",
+ "ax.set_xlabel('Time')\n",
+ "\n",
+ "\n",
+ " \n",
+ "# df = pd.read_csv()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Events "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "from datetime import datetime\n",
+ "filename = os.path.basename(glob.glob('./results/vsperf-overall*.log')[0])\n",
+ "logfile = os.path.join('./results', filename)\n",
+ "linecnt = 0\n",
+ "times = {}\n",
+ "with open(logfile) as f:\n",
+ " for line in f:\n",
+ " line = line.strip('\\n')\n",
+ " if linecnt == 0:\n",
+ " times['Start-Test'] = line.split(\" : \")[0]\n",
+ " linecnt += 1\n",
+ " if 'Binding NICs' in line:\n",
+ " times['Binding-NICs'] = line.split(\" : \")[0]\n",
+ " if 'Starting traffic at' in line:\n",
+ " sline = line.split(\" : \")[1]\n",
+ " time = line.split(\" : \")[0]\n",
+ " speed = sline.split('at',1)[1]\n",
+ " times[speed] = time \n",
+ " elif 'Starting vswitchd' in line:\n",
+ " times['vSwitch-Start'] = line.split(\" : \")[0]\n",
+ " elif 'Starting ovs-vswitchd' in line:\n",
+ " times['ovsvswitch-start'] = line.split(\" : \")[0]\n",
+ " elif 'Adding Ports' in line:\n",
+ " times['Ports-Added'] = line.split(\" : \")[0]\n",
+ " elif 'Flows Added' in line:\n",
+ " times['Flows-Added'] = line.split(\" : \")[0]\n",
+ " elif 'send_traffic with' in line:\n",
+ " times['Traffic Start'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1280' in line:\n",
+ " times['Traffic-Start-1280'] = line.split(\" : \")[0]\n",
+ " elif 'Starting qemu' in line:\n",
+ " times['VNF-Start'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 64' in line:\n",
+ " times['Traffic-Start-64'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 128' in line:\n",
+ " times['Traffic-Start-128'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 256' in line:\n",
+ " times['Traffic-Start-256'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 512' in line:\n",
+ " times['Traffic-Start-512'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1024' in line:\n",
+ " times['Traffic-Start-1024'] = line.split(\" : \")[0]\n",
+ " elif 'l2 framesize 1518' in line:\n",
+ " times['Traffic-Start-1518'] = line.split(\" : \")[0]\n",
+ " elif 'dump flows' in line:\n",
+ " times['Traffic-End'] = line.split(\" : \")[0]\n",
+ " elif 'Wait for QEMU' in line:\n",
+ " times['VNF-Stop'] = line.split(\" : \")[0]\n",
+ " elif 'delete flow' in line:\n",
+ " times['flow-removed'] = line.split(\" : \")[0]\n",
+ " elif 'delete port' in line:\n",
+ " times['port-removed'] = line.split(\" : \")[0]\n",
+ " elif 'Killing ovs-vswitchd' in line:\n",
+ " times['vSwitch-Stop'] = line.split(\" : \")[0]\n",
+ "\n",
+ "times['Test-Stop'] = line.split(\" : \")[0]\n",
+ "#print(times)\n",
+ "ddf = pd.DataFrame.from_dict(times, orient='index', columns=['timestamp'])\n",
+ "names = ddf.index.values\n",
+ "dates = ddf['timestamp'].tolist()\n",
+ "datefmt=\"%Y-%m-%d %H:%M:%S,%f\"\n",
+ "dates = [datetime.strptime(ii, datefmt) for ii in dates]\n",
+ "# print(names)\n",
+ "# print(dates)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.dates as mdates\n",
+ "from matplotlib import ticker\n",
+ "\n",
+ "levels = np.array([-5, 5, -3, 3, -1, 1])\n",
+ "fig, ax = plt.subplots(figsize=(40, 5))\n",
+ "\n",
+ "# Create the base line\n",
+ "start = min(dates)\n",
+ "stop = max(dates)\n",
+ "ax.plot((start, stop), (0, 0), 'k', alpha=.5)\n",
+ "\n",
+ "pos_list = np.arange(len(dates))\n",
+ "\n",
+ "# Iterate through releases annotating each one\n",
+ "for ii, (iname, idate) in enumerate(zip(names, dates)):\n",
+ " level = levels[ii % 6]\n",
+ " vert = 'top' if level < 0 else 'bottom'\n",
+ " ax.scatter(idate, 0, s=100, facecolor='w', edgecolor='k', zorder=9999)\n",
+ " # Plot a line up to the text\n",
+ " ax.plot((idate, idate), (0, level), c='r', alpha=.7)\n",
+ " # Give the text a faint background and align it properly\n",
+ " ax.text(idate, level, iname,\n",
+ " horizontalalignment='right', verticalalignment=vert, fontsize=14,\n",
+ " backgroundcolor=(1., 1., 1., .3))\n",
+ "ax.set(title=\"VSPERF Main Events\")\n",
+ "# Set the xticks formatting\n",
+ "ax.get_xaxis().set_major_locator(mdates.SecondLocator(interval=30))\n",
+ "ax.get_xaxis().set_major_formatter(mdates.DateFormatter(\"%M %S\"))\n",
+ "fig.autofmt_xdate()\n",
+ "plt.setp((ax.get_yticklabels() + ax.get_yticklines() +\n",
+ " list(ax.spines.values())), visible=False)\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Current and old."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Current Result"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import glob\n",
+ "filename = os.path.basename(glob.glob('./results/result*.csv')[0])\n",
+ "filename\n",
+ "tdf = pd.read_csv(os.path.join('./results', filename))\n",
+ "pkts = ['tx_frames', 'rx_frames']\n",
+ "fps = ['tx_rate_fps', 'throughput_rx_fps']\n",
+ "mbps = ['tx_rate_mbps', 'throughput_rx_mbps']\n",
+ "pcents = ['tx_rate_percent', 'throughput_rx_percent', 'frame_loss_percent']\n",
+ "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 12))\n",
+ "tdf.plot.bar(y= pkts,ax=axes[0,0])\n",
+ "tdf.plot.bar(y= fps,ax=axes[0,1])\n",
+ "tdf.plot.bar(y= mbps,ax=axes[1,0])\n",
+ "tdf.plot.bar(y= pcents,ax=axes[1,1])\n",
+ "current_pkt_size = str(tdf['packet_size'].iloc[-1])\n",
+ "current_rx_fps = str(tdf['throughput_rx_fps'].iloc[-1])\n",
+ "print(current_rx_fps)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## How Current Result compares to Previous ones?"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "import urllib\n",
+ "import json\n",
+ "import requests\n",
+ "#json_data = requests.get('http://testresults.opnfv.org/test/api/v1/results?project=vsperf').json()\n",
+ "json_data = requests.get('http://10.10.120.22:8000/api/v1/results?project=vsperf').json()\n",
+ "res = json_data['results']\n",
+ "df1 = pd.DataFrame(res)\n",
+ "sort_by_date = df1.sort_values('start_date')\n",
+ "details = df1['details'].apply(pd.Series)\n",
+ "details[current_pkt_size] = pd.to_numeric(pd.Series(details[current_pkt_size]))\n",
+ "# details.plot.bar(y = current_pkt_size)\n",
+ "details_cur_pkt = details[[current_pkt_size]].copy()\n",
+ "details_cur_pkt.loc[-1]= float(current_rx_fps)\n",
+ "details_cur_pkt.index = details_cur_pkt.index + 1 # shifting index\n",
+ "details_cur_pkt.sort_index(inplace=True) \n",
+ "ax = details_cur_pkt.plot.bar()\n",
+ "ax.set_ylabel(\"Frames per sec\")\n",
+ "ax.set_xlabel(\"Run Number\")\n",
+ "def highlight(indices,ax):\n",
+ " i=0\n",
+ " while i<len(indices):\n",
+ " ax.axvspan(indices[i]-0.5, indices[i]+0.5, facecolor='RED', edgecolor='none', alpha=.2)\n",
+ " i+=1\n",
+ "highlight([0], ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Heatmaps"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "hide_input": true
+ },
+ "outputs": [],
+ "source": [
+ "array_of_dfs = []\n",
+ "for dirs in glob.glob('./pod12-node4/ovs_stats-vsperf*'):\n",
+ " dirname = os.path.basename(dirs)\n",
+ " if dirname:\n",
+ " port = dirname.split('.')[1]\n",
+ " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
+ " full_path = glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0]\n",
+ " filename = os.path.basename(full_path)\n",
+ " if filename:\n",
+ " df = pd.read_csv(full_path)\n",
+ " df.rename(index=str, columns={\"rx\": port+\"-rx\" , \"tx\": port+\"-tx\"}, inplace=True)\n",
+ " df = df.drop(columns=['epoch'])\n",
+ " array_of_dfs.append(df)\n",
+ "master_df = pd.concat(array_of_dfs, axis=1, sort=True)\n",
+ "master_df.columns\n",
+ "\n",
+ "# get the correlation coefficient between the different columns\n",
+ "corr = master_df.iloc[:, 0:].corr()\n",
+ "arr_corr = corr.values\n",
+ "# mask out the top triangle\n",
+ "arr_corr[np.triu_indices_from(arr_corr)] = np.nan\n",
+ "fig, ax = plt.subplots(figsize=(18, 12))\n",
+ "sns.set(font_scale=3.0)\n",
+ "hm = sns.heatmap(arr_corr, cbar=True, vmin=-0.5, vmax=0.5,\n",
+ " fmt='.2f', annot_kws={'size': 20}, annot=True, \n",
+ " square=True, cmap=plt.cm.Reds)\n",
+ "ticks = np.arange(corr.shape[0]) + 0.5\n",
+ "ax.set_xticks(ticks)\n",
+ "ax.set_xticklabels(corr.columns, rotation=90, fontsize=20)\n",
+ "ax.set_yticks(ticks)\n",
+ "ax.set_yticklabels(corr.index, rotation=360, fontsize=20)\n",
+ "\n",
+ "ax.set_title('Heatmap')\n",
+ "plt.tight_layout()\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "author": {
+ "@type": "Person",
+ "name": "Sridhar K. N. Rao",
+ "worksFor": {
+ "@type": "Organization",
+ "name": "Spirent Communications"
+ }
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}