summaryrefslogtreecommitdiffstats
path: root/tools/docker/results
diff options
context:
space:
mode:
authoropensource-tnbt <sridhar.rao@spirent.com>2020-01-15 14:52:05 +0530
committeropensource-tnbt <sridhar.rao@spirent.com>2020-01-15 14:54:20 +0530
commitf3abab212ef37c9d0f2b65a091e39af6e8f9d70e (patch)
tree24c77ec45792bc324b812da5da25a57e5a6b5f02 /tools/docker/results
parenta94395daf8d3312659b56a306ea64960a2cdd64a (diff)
Tools: Jupyter Notebook Bug-fixes and Update.
Separate volume for Jupyter Data is created. Results will be stored in that volume. Notebook is updated to use the volume. Signed-off-by: Sridhar K. N. Rao <sridhar.rao@spirent.com> Change-Id: I2a359d64cf3d4281686d4d3c3d6f3ee6587c1e13
Diffstat (limited to 'tools/docker/results')
-rw-r--r--tools/docker/results/docker-compose.yml3
-rw-r--r--tools/docker/results/jupyter/Dockerfile3
-rw-r--r--tools/docker/results/notebooks/testresult-analysis.ipynb36
3 files changed, 23 insertions, 19 deletions
diff --git a/tools/docker/results/docker-compose.yml b/tools/docker/results/docker-compose.yml
index 5c3ab1dc..87ba7fc0 100644
--- a/tools/docker/results/docker-compose.yml
+++ b/tools/docker/results/docker-compose.yml
@@ -4,6 +4,7 @@ volumes:
influx-data:
grafana-data:
mongo-data:
+ jupyter-data:
testapi-logs:
services:
influxdb:
@@ -69,7 +70,7 @@ services:
volumes:
- ./notebooks:/notebooks
- ./notebooks/testresult-analysis.ipynb:/notebooks/testresult-analysis.ipynb
- - ./data:/data
+ - jupyter-data:/data
postgres:
image: postgres
restart: always
diff --git a/tools/docker/results/jupyter/Dockerfile b/tools/docker/results/jupyter/Dockerfile
index d2816951..94f9bd36 100644
--- a/tools/docker/results/jupyter/Dockerfile
+++ b/tools/docker/results/jupyter/Dockerfile
@@ -10,4 +10,7 @@ RUN pip install -U graphviz paramiko
RUN echo "c.NotebookApp.token=''" >> $HOME/.jupyter/jupyter_notebook_config.py
VOLUME /notebooks
+VOLUME /data
+
+RUN mkdir /data/results
WORKDIR /notebooks
diff --git a/tools/docker/results/notebooks/testresult-analysis.ipynb b/tools/docker/results/notebooks/testresult-analysis.ipynb
index a7e9335c..4f12ed63 100644
--- a/tools/docker/results/notebooks/testresult-analysis.ipynb
+++ b/tools/docker/results/notebooks/testresult-analysis.ipynb
@@ -86,7 +86,7 @@
"stderr_data = []\n",
"if directory_to_download:\n",
" # zip the collectd results to make the download faster\n",
- " zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + directory_to_download + '/csv .'\n",
+ " zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + '/tmp/csv .'\n",
" session = client.open_channel(kind='session')\n",
" session.get_pty()\n",
" session.exec_command(zip_command)\n",
@@ -119,10 +119,10 @@
" for path,files in sftp_walk(directory_to_download):\n",
" for file in files:\n",
" remote = os.path.join(path,file).replace(\"\\\\\",\"/\")\n",
- " local = os.path.join('./results', file).replace(\"\\/\",\"/\")\n",
+ " local = os.path.join('/data/results', file).replace(\"\\/\",\"/\")\n",
" sftp.get(remote, local)\n",
"# Untar the collectd results if we got it.\n",
- "path = os.path.join('./results', 'collectd.tar.gz')\n",
+ "path = os.path.join('/data/results', 'collectd.tar.gz')\n",
"if os.path.exists(path):\n",
" tar = tarfile.open(path)\n",
" tar.extractall()\n",
@@ -143,9 +143,9 @@
"strings = ('* OS:', '* Kernel Version:', '* Board:', '* CPU:', '* CPU cores:',\n",
" '* Memory:', '* Virtual Switch Set-up:',\n",
" '* Traffic Generator:','* vSwitch:', '* DPDK Version:', '* VNF:')\n",
- "filename = os.path.basename(glob.glob('./results/result*.rst')[0])\n",
+ "filename = os.path.basename(glob.glob('/data/results/result*.rst')[0])\n",
"info_dict = {}\n",
- "with open(os.path.join('./results', filename), 'r') as file:\n",
+ "with open(os.path.join('/data/results', filename), 'r') as file:\n",
" for line in file:\n",
" if any(s in line for s in strings):\n",
" info_dict[line.split(':', 1)[0]] = line.split(':', 1)[1].rstrip()\n",
@@ -166,8 +166,8 @@
"metadata": {},
"outputs": [],
"source": [
- "filename = os.path.basename(glob.glob('./results/vsperf*.conf')[0])\n",
- "file = os.path.join('./results', filename)\n",
+ "filename = os.path.basename(glob.glob('/data/results/vsperf*.conf')[0])\n",
+ "file = os.path.join('/data/results', filename)\n",
"with open(file, 'r') as f:\n",
" for line in f:\n",
" if line.startswith('TRAFFICGEN_DURATION'):\n",
@@ -175,19 +175,19 @@
" value = value.rstrip()\n",
" value = value.lstrip()\n",
" traffic_duration = int(value)\n",
+ " print(traffic_duration)\n",
" elif line.startswith('VSWITCH_PMD_CPU_MASK'):\n",
" value = line.split('=')[1]\n",
" value = value.rstrip()\n",
" pmd_cores_mask = value.lstrip()\n",
+ " print(pmd_cores_mask)\n",
" elif line.startswith('GUEST_CORE_BINDING'):\n",
" value = line.split('=')[1]\n",
" value = value.rstrip()\n",
" value = value.lstrip()\n",
" guest_cores = value[1:-2]\n",
+ " print(guest_cores)"
"\n",
- "print(traffic_duration)\n",
- "print(pmd_cores_mask)\n",
- "print(guest_cores)"
]
},
{
@@ -209,7 +209,7 @@
"portcores = collections.OrderedDict()\n",
"chunks = []\n",
"current_chunk = []\n",
- "file = os.path.join('./results', 'ovs-cores.log')\n",
+ "file = os.path.join('/data/results', 'ovs-cores.log')\n",
"with open(file, 'r') as f:\n",
" for line in f:\n",
" if line.startswith('pmd') and current_chunk:\n",
@@ -287,7 +287,7 @@
"portcores = collections.OrderedDict()\n",
"chunks = []\n",
"current_chunk = []\n",
- "file = os.path.join('./results', 'ovs-cores.log')\n",
+ "file = os.path.join('/data/results', 'ovs-cores.log')\n",
"with open(file, 'r') as f:\n",
" for line in f:\n",
" if line.startswith('pmd') and current_chunk:\n",
@@ -361,7 +361,7 @@
"source": [
"lines_seen = set() # holds lines already seen\n",
"outfile = open('./counts.dat', \"w\")\n",
- "file = os.path.join('./results', 'trex-liveresults-counts.dat')\n",
+ "file = os.path.join('/data/results', 'trex-liveresults-counts.dat')\n",
"for line in open(file, \"r\"):\n",
" if line not in lines_seen: # not a duplicate\n",
" outfile.write(line)\n",
@@ -403,7 +403,7 @@
},
"outputs": [],
"source": [
- "file = os.path.join('./results', 'RUNirq.irq.log')\n",
+ "file = os.path.join('/data/results', 'RUNirq.irq.log')\n",
"tdf = pd.read_csv(file)\n",
"tdf.columns\n",
"exclude = [' <1', ' < 5', ' < 10',' < 50', ' < 100', ' < 500', ' < 1000']\n",
@@ -511,8 +511,8 @@
"outputs": [],
"source": [
"from datetime import datetime\n",
- "filename = os.path.basename(glob.glob('./results/vsperf-overall*.log')[0])\n",
- "logfile = os.path.join('./results', filename)\n",
+ "filename = os.path.basename(glob.glob('/data/results/vsperf-overall*.log')[0])\n",
+ "logfile = os.path.join('/data/results', filename)\n",
"linecnt = 0\n",
"times = {}\n",
"with open(logfile) as f:\n",
@@ -641,9 +641,9 @@
"outputs": [],
"source": [
"import glob\n",
- "filename = os.path.basename(glob.glob('./results/result*.csv')[0])\n",
+ "filename = os.path.basename(glob.glob('/data/results/result*.csv')[0])\n",
"filename\n",
- "tdf = pd.read_csv(os.path.join('./results', filename))\n",
+ "tdf = pd.read_csv(os.path.join('/data/results', filename))\n",
"pkts = ['tx_frames', 'rx_frames']\n",
"fps = ['tx_rate_fps', 'throughput_rx_fps']\n",
"mbps = ['tx_rate_mbps', 'throughput_rx_mbps']\n",