diff --git a/jupyter/compare_executions.ipynb b/jupyter/compare_executions.ipynb
index f680c459317a988236f071c4b6f748574897cd4f..1dd0e917b1ffc26d2a9a5975f1df37a7bbfeeefc 100755
--- a/jupyter/compare_executions.ipynb
+++ b/jupyter/compare_executions.ipynb
@@ -6,7 +6,8 @@
    "source": [
     "## Compare experiment executions\n",
     "\n",
-    "This notebook let's you qickly compare the training progress of your experiments.\n",
+    "This notebook lets you quickly compare the training progress of your experiments, from within a notebook.  For other use-cases we advise to use TensorBoard which is equipped with many more features than these few lines of code.\n",
+    "\n",
     "You will need to have the tfevents files (these are TensorBoard formatted log files that Distiller creates)."
    ]
   },
@@ -22,6 +23,12 @@
     "import numpy as np\n",
     "import matplotlib.pyplot as plt\n",
     "\n",
+    "\n",
+    "def get_tags_list(path_to_events_file):\n",
+    "    tags = [v.tag for e in tf.train.summary_iterator(path_to_events_file) for v in e.summary.value]\n",
+    "    return set(tags)\n",
+    "\n",
+    "\n",
     "def get_performance_data(path_to_events_file, tag):\n",
     "    \"\"\"Extract the performance history of data named 'tag'\n",
     "\n",
@@ -35,7 +42,15 @@
     "            if v.tag == tag:\n",
     "                data.append(v.simple_value)\n",
     "                steps.append(e.step)\n",
-    "    return steps, data"
+    "    return steps, data\n",
+    "\n",
+    "\n",
+    "def add_experiment(axs, tags, results, label):\n",
+    "    for i, tag in enumerate(tags):\n",
+    "        steps, prec1_ssl = get_performance_data(results, tag)\n",
+    "        axs[i//2, i%2].plot(steps, prec1_ssl, label=label)\n",
+    "        axs[i//2][i%2].set_title(tag)\n",
+    "        axs[i//2][i%2].legend()\n"
    ]
   },
   {
@@ -50,20 +65,29 @@
     "# WARNING: these files do not exist in the repositroy (too large) and will give you an error\n",
     "experiment_files = [('events.out.tfevents.1523290172.one-machine', 'experiment 1'),\n",
     "                    ('events.out.tfevents.1520430112.one-machine', 'experiment 2')]\n",
-    "\n",
+    "                    \n",
     "# Choose which performance indicators you wish to graph\n",
-    "tags = ['Peformance/Validation/Top1', 'Peformance/Validation/Loss', \n",
+    "tags = ['Peformance/Validation/Top1', 'Peformance/Validation/Loss',\n",
     "        'sparsity/weights/total',     'Peformance/Training/Reg Loss']\n",
     "\n",
     "f, axs = plt.subplots(2, 2, figsize=(20,20))\n",
     "f.suptitle('Performance')\n",
     "\n",
+    "print(get_tags_list(experiment_files[0][0]))\n",
+    "\n",
     "for experiment in experiment_files:\n",
     "    add_experiment(axs, tags, experiment[0], label=experiment[1])\n",
     "plt.tight_layout()\n",
     "\n",
     "plt.show()"
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
   }
  ],
  "metadata": {
@@ -82,7 +106,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.5.2"
+   "version": "3.6.7"
   }
  },
  "nbformat": 4,