diff --git a/GTSRB/05-Full-convolutions.ipynb b/GTSRB/05-Full-convolutions.ipynb
index 95d1d49ae4ce345a8ab162afd20996f438851202..47bec69c47b04276f6693b8595f53dd979907e92 100644
--- a/GTSRB/05-Full-convolutions.ipynb
+++ b/GTSRB/05-Full-convolutions.ipynb
@@ -14,23 +14,14 @@
     "Our main steps:\n",
     " - Try n models with n datasets\n",
     " - Save a Pandas/h5 report\n",
-    " - Can be run in :\n",
-    "    - Notebook mode\n",
-    "    - Batch mode \n",
-    "    - Tensorboard follow up\n",
-    "    \n",
-    "To export a notebook as a script :  \n",
-    "```jupyter nbconvert --to script <notebook>```\n",
-    "\n",
-    "To run a notebook :  \n",
-    "```jupyter nbconvert --to notebook --execute <notebook>```\n",
+    " - Write to be run in batch mode\n",
     "\n",
     "## 1/ Import"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -38,9 +29,8 @@
     "from tensorflow import keras\n",
     "\n",
     "import numpy as np\n",
-    "import pandas as pd\n",
     "import h5py\n",
-    "import os,time\n",
+    "import os,time,json\n",
     "\n",
     "from IPython.display import display\n",
     "\n",
@@ -56,22 +46,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "Full Convolutions Notebook\n",
-      "  Version            : 1.0\n",
-      "  Run time           : Sunday 19 January 2020, 12:37:56\n",
-      "  TensorFlow version : 2.0.0\n",
-      "  Keras version      : 2.2.4-tf\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "print('\\nFull Convolutions Notebook')\n",
     "print('  Version            : {}'.format(VERSION))\n",
@@ -89,7 +66,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -118,7 +95,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -207,7 +184,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -252,7 +229,7 @@
     "                bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)\n",
     "                # ---- Train\n",
     "                start_time = time.time()\n",
-    "                history = model.fit(  x_train[:1000], y_train[:1000],\n",
+    "                history = model.fit(  x_train, y_train,\n",
     "                                    batch_size      = batch_size,\n",
     "                                    epochs          = epochs,\n",
     "                                    verbose         = 0,\n",
@@ -283,18 +260,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "Reset directories : ./run/logs and ./run/models .\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "%%bash\n",
     "\n",
@@ -309,61 +277,14 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### 6.2 Start Tensorboard"
+    "### 6.2/ run and save report"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 22,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Tensorbord started with pid 1610\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "tensorboard_start --logdir ./run/logs"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3/ run and save report"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 24,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "---- Run --------------------------------------------------\n",
-      "\n",
-      "Dataset :  set-24x24-L\n",
-      "    Run model v1  : -\n",
-      "    Run model v3  : -\n",
-      "\n",
-      "Dataset :  set-24x24-RGB\n",
-      "    Run model v1  : -\n",
-      "    Run model v3  : -\n",
-      "\n",
-      "Report saved as  ./run/report-2020-01-19_14h56m27s.h5\n",
-      "-----------------------------------------------------------\n",
-      "CPU times: user 29.2 s, sys: 4 s, total: 33.2 s\n",
-      "Wall time: 7.57 s\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "%%time\n",
     "\n",
@@ -372,16 +293,16 @@
     "# ---- Datasets and models list\n",
     "\n",
     "# For tests\n",
-    "datasets = ['set-24x24-L', 'set-24x24-RGB']\n",
-    "models   = {'v1':get_model_v1, 'v3':get_model_v3}\n",
+    "# datasets = ['set-24x24-L', 'set-24x24-RGB']\n",
+    "# models   = {'v1':get_model_v1, 'v3':get_model_v3}\n",
     "\n",
     "# The real one\n",
-    "# datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']\n",
-    "# models   = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}\n",
+    "datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']\n",
+    "models   = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}\n",
     "\n",
     "# ---- Report name\n",
     "\n",
-    "report_name='./run/report-{}.h5'.format(time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\"))\n",
+    "report_name='./run/report-{}.json'.format(time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\"))\n",
     "\n",
     "# ---- Run\n",
     "\n",
@@ -389,41 +310,13 @@
     "\n",
     "# ---- Save report\n",
     "\n",
-    "output = pd.DataFrame (out)\n",
-    "params = pd.DataFrame( {'datasets':datasets, 'models':list(models.keys())} )\n",
-    "\n",
-    "output.to_hdf(report_name, 'output')\n",
-    "params.to_hdf(report_name, 'params')\n",
+    "with open(report_name, 'w') as outfile:\n",
+    "    json.dump(out, outfile)\n",
     "\n",
     "print('\\nReport saved as ',report_name)\n",
     "print('-'*59)\n"
    ]
   },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.4/ Stop Tensorboard"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 23,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Tensorbord stopped (1610)\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "tensorboard_stop"
-   ]
-  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -433,20 +326,9 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 21,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "Sunday 19 January 2020, 14:32:36\n",
-      "The work is done.\n",
-      "\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "print('\\n{}'.format(time.strftime(\"%A %-d %B %Y, %H:%M:%S\")))\n",
     "print(\"The work is done.\\n\")"
diff --git a/GTSRB/05-Full-convolutions.nbconvert.ipynb b/GTSRB/05-Full-convolutions.nbconvert.ipynb
deleted file mode 100644
index 8f71569c32ea8709a07bffe031f4fef03ce505fe..0000000000000000000000000000000000000000
--- a/GTSRB/05-Full-convolutions.nbconvert.ipynb
+++ /dev/null
@@ -1,484 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "German Traffic Sign Recognition Benchmark (GTSRB)\n",
-    "=================================================\n",
-    "---\n",
-    "Introduction au Deep Learning  (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020  \n",
-    "\n",
-    "## Episode 5 : Full Convolutions\n",
-    "\n",
-    "Our main steps:\n",
-    " - Try n models with n datasets\n",
-    " - Save a Pandas/h5 report\n",
-    " - Can be run in :\n",
-    "    - Notebook mode\n",
-    "    - Batch mode \n",
-    "    - Tensorboard follow up\n",
-    "    \n",
-    "To export a notebook as a script :  \n",
-    "```jupyter nbconvert --to script <notebook>```\n",
-    "\n",
-    "To run a notebook :  \n",
-    "```jupyter nbconvert --to notebook --execute <notebook>```\n",
-    "\n",
-    "## 1/ Import"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "\n",
-    "import numpy as np\n",
-    "import pandas as pd\n",
-    "import h5py\n",
-    "import os,time\n",
-    "\n",
-    "from IPython.display import display\n",
-    "\n",
-    "VERSION='1.2'"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 2/ Init and start"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "Full Convolutions Notebook\n",
-      "  Version            : 1.2\n",
-      "  Run time           : Sunday 19 January 2020, 15:35:20\n",
-      "  TensorFlow version : 2.0.0\n",
-      "  Keras version      : 2.2.4-tf\n"
-     ]
-    }
-   ],
-   "source": [
-    "print('\\nFull Convolutions Notebook')\n",
-    "print('  Version            : {}'.format(VERSION))\n",
-    "print('  Run time           : {}'.format(time.strftime(\"%A %-d %B %Y, %H:%M:%S\")))\n",
-    "print('  TensorFlow version :',tf.__version__)\n",
-    "print('  Keras version      :',tf.keras.__version__)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 3/ Dataset loading"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def read_dataset(name):\n",
-    "    '''Reads h5 dataset from ./data\n",
-    "\n",
-    "    Arguments:  dataset name, without .h5\n",
-    "    Returns:    x_train,y_train,x_test,y_test data'''\n",
-    "    # ---- Read dataset\n",
-    "    filename='./data/'+name+'.h5'\n",
-    "    with  h5py.File(filename) as f:\n",
-    "        x_train = f['x_train'][:]\n",
-    "        y_train = f['y_train'][:]\n",
-    "        x_test  = f['x_test'][:]\n",
-    "        y_test  = f['y_test'][:]\n",
-    "\n",
-    "    return x_train,y_train,x_test,y_test"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 4/ Models collection"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "\n",
-    "# A basic model\n",
-    "#\n",
-    "def get_model_v1(lx,ly,lz):\n",
-    "    \n",
-    "    model = keras.models.Sequential()\n",
-    "    \n",
-    "    model.add( keras.layers.Conv2D(96, (3,3), activation='relu', input_shape=(lx,ly,lz)))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.2))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(192, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.2))\n",
-    "\n",
-    "    model.add( keras.layers.Flatten()) \n",
-    "    model.add( keras.layers.Dense(1500, activation='relu'))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Dense(43, activation='softmax'))\n",
-    "    return model\n",
-    "    \n",
-    "# A more sophisticated model\n",
-    "#\n",
-    "def get_model_v2(lx,ly,lz):\n",
-    "    model = keras.models.Sequential()\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(64, (3, 3), padding='same', input_shape=(lx,ly,lz), activation='relu'))\n",
-    "    model.add( keras.layers.Conv2D(64, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.2))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'))\n",
-    "    model.add( keras.layers.Conv2D(128, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.2))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(256, (3, 3), padding='same',activation='relu'))\n",
-    "    model.add( keras.layers.Conv2D(256, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.2))\n",
-    "\n",
-    "    model.add( keras.layers.Flatten())\n",
-    "    model.add( keras.layers.Dense(512, activation='relu'))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "    model.add( keras.layers.Dense(43, activation='softmax'))\n",
-    "    return model\n",
-    "\n",
-    "# My sphisticated model, but small and fast\n",
-    "#\n",
-    "def get_model_v3(lx,ly,lz):\n",
-    "    model = keras.models.Sequential()\n",
-    "    model.add( keras.layers.Conv2D(32, (3,3),   activation='relu', input_shape=(lx,ly,lz)))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(64, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(128, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(256, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Flatten()) \n",
-    "    model.add( keras.layers.Dense(1152, activation='relu'))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Dense(43, activation='softmax'))\n",
-    "    return model\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 5/ Multiple datasets, multiple models ;-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def multi_run(datasets, models, batch_size=64, epochs=16):\n",
-    "\n",
-    "    # ---- Columns of report\n",
-    "    #\n",
-    "    report={}\n",
-    "    report['Dataset']=[]\n",
-    "    report['Size']   =[]\n",
-    "    for m in models:\n",
-    "        report[m+' Accuracy'] = []\n",
-    "        report[m+' Duration'] = []\n",
-    "\n",
-    "    # ---- Let's go\n",
-    "    #\n",
-    "    for d_name in datasets:\n",
-    "        print(\"\\nDataset : \",d_name)\n",
-    "\n",
-    "        # ---- Read dataset\n",
-    "        x_train,y_train,x_test,y_test = read_dataset(d_name)\n",
-    "        d_size=os.path.getsize('./data/'+d_name+'.h5')/(1024*1024)\n",
-    "        report['Dataset'].append(d_name)\n",
-    "        report['Size'].append(d_size)\n",
-    "        \n",
-    "        # ---- Get the shape\n",
-    "        (n,lx,ly,lz) = x_train.shape\n",
-    "\n",
-    "        # ---- For each model\n",
-    "        for m_name,m_function in models.items():\n",
-    "            print(\"    Run model {}  : \".format(m_name), end='')\n",
-    "            # ---- get model\n",
-    "            try:\n",
-    "                model=m_function(lx,ly,lz)\n",
-    "                # ---- Compile it\n",
-    "                model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n",
-    "                # ---- Callbacks tensorboard\n",
-    "                log_dir = \"./run/logs/tb_{}_{}\".format(d_name,m_name)\n",
-    "                tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
-    "                # ---- Callbacks bestmodel\n",
-    "                save_dir = \"./run/models/model_{}_{}.h5\".format(d_name,m_name)\n",
-    "                bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)\n",
-    "                # ---- Train\n",
-    "                start_time = time.time()\n",
-    "                history = model.fit(  x_train[:1000], y_train[:1000],\n",
-    "                                    batch_size      = batch_size,\n",
-    "                                    epochs          = epochs,\n",
-    "                                    verbose         = 0,\n",
-    "                                    validation_data = (x_test, y_test),\n",
-    "                                    callbacks       = [tensorboard_callback, bestmodel_callback])\n",
-    "                # ---- Result\n",
-    "                end_time = time.time()\n",
-    "                duration = end_time-start_time\n",
-    "                accuracy = max(history.history[\"val_accuracy\"])*100\n",
-    "                #\n",
-    "                report[m_name+' Accuracy'].append(accuracy)\n",
-    "                report[m_name+' Duration'].append(duration)\n",
-    "                print(\"Accuracy={:.2f} and Duration={:.2f})\".format(accuracy,duration))\n",
-    "            except:\n",
-    "                report[m_name+' Accuracy'].append('-')\n",
-    "                report[m_name+' Duration'].append('-')\n",
-    "                print('-')\n",
-    "    return report"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 6/ Run\n",
-    "### 6.1/ Clean"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "Reset directories : ./run/logs and ./run/models .\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "\n",
-    "/bin/rm -r ./run/logs   2>/dev/null\n",
-    "/bin/rm -r ./run/models 2>/dev/null\n",
-    "/bin/mkdir -p -m 755 ./run/logs\n",
-    "/bin/mkdir -p -m 755 ./run/models\n",
-    "echo -e \"\\nReset directories : ./run/logs and ./run/models .\""
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 Start Tensorboard"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Tensorbord started with pid 2128\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "tensorboard_start --logdir ./run/logs"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3/ run and save report"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "---- Run --------------------------------------------------\n",
-      "\n",
-      "Dataset :  set-24x24-L\n",
-      "    Run model v1  : Accuracy=6.76 and Duration=8.58)\n",
-      "    Run model v3  : -\n",
-      "\n",
-      "Dataset :  set-24x24-RGB\n",
-      "    Run model v1  : Accuracy=17.55 and Duration=8.21)\n",
-      "    Run model v3  : -\n",
-      "\n",
-      "Report saved as  ./run/report-2020-01-19_15h35m25s.h5\n",
-      "-----------------------------------------------------------\n",
-      "CPU times: user 1min 37s, sys: 8.44 s, total: 1min 45s\n",
-      "Wall time: 18.5 s\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%time\n",
-    "\n",
-    "print('\\n---- Run','-'*50)\n",
-    "\n",
-    "# ---- Datasets and models list\n",
-    "\n",
-    "# For tests\n",
-    "datasets = ['set-24x24-L', 'set-24x24-RGB']\n",
-    "models   = {'v1':get_model_v1, 'v3':get_model_v3}\n",
-    "\n",
-    "# The real one\n",
-    "# datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']\n",
-    "# models   = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}\n",
-    "\n",
-    "# ---- Report name\n",
-    "\n",
-    "report_name='./run/report-{}.h5'.format(time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\"))\n",
-    "\n",
-    "# ---- Run\n",
-    "\n",
-    "out    = multi_run(datasets, models, batch_size=64, epochs=2)\n",
-    "\n",
-    "# ---- Save report\n",
-    "\n",
-    "output = pd.DataFrame (out)\n",
-    "params = pd.DataFrame( {'datasets':datasets, 'models':list(models.keys())} )\n",
-    "\n",
-    "output.to_hdf(report_name, 'output')\n",
-    "params.to_hdf(report_name, 'params')\n",
-    "\n",
-    "print('\\nReport saved as ',report_name)\n",
-    "print('-'*59)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.4/ Stop Tensorboard"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 9,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Tensorbord stopped (2128)\n"
-     ]
-    }
-   ],
-   "source": [
-    "%%bash\n",
-    "tensorboard_stop"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 7/ That's all folks.."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "Sunday 19 January 2020, 15:35:44\n",
-      "The work is done.\n",
-      "\n"
-     ]
-    }
-   ],
-   "source": [
-    "print('\\n{}'.format(time.strftime(\"%A %-d %B %Y, %H:%M:%S\")))\n",
-    "print(\"The work is done.\\n\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.7.5"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/GTSRB/05-Full-convolutions.py b/GTSRB/05-Full-convolutions.py
deleted file mode 100644
index 780579b8a1e395556079d92f96b02459247c6273..0000000000000000000000000000000000000000
--- a/GTSRB/05-Full-convolutions.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# German Traffic Sign Recognition Benchmark (GTSRB)
-# =================================================
-# ---
-# Introduction au Deep Learning  (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020  
-# 
-# ## Episode 5 : Full Convolutions
-# 
-# Our main steps:
-#  - Try n models with n datasets
-#  - Save a Pandas/h5 report
-#  - Can be run in :
-#     - Notebook mode
-#     - Batch mode 
-#     - Tensorboard follow up
-#     
-# To export a notebook as a script :  
-# ```jupyter nbconvert --to script <notebook>```
-# 
-# To run a notebook :  
-# ```jupyter nbconvert --to notebook --execute <notebook>```
-# 
-# ## 1/ Import
-
-# In[1]:
-
-
-import tensorflow as tf
-from tensorflow import keras
-
-import numpy as np
-import pandas as pd
-import h5py
-import os,time
-
-from IPython.display import display
-
-VERSION='1.2'
-
-
-# ## 2/ Init and start
-
-# In[2]:
-
-
-print('\nFull Convolutions Notebook')
-print('  Version            : {}'.format(VERSION))
-print('  Run time           : {}'.format(time.strftime("%A %-d %B %Y, %H:%M:%S")))
-print('  TensorFlow version :',tf.__version__)
-print('  Keras version      :',tf.keras.__version__)
-
-
-# ## 3/ Dataset loading
-
-# In[3]:
-
-
-def read_dataset(name):
-    '''Reads h5 dataset from ./data
-
-    Arguments:  dataset name, without .h5
-    Returns:    x_train,y_train,x_test,y_test data'''
-    # ---- Read dataset
-    filename='./data/'+name+'.h5'
-    with  h5py.File(filename) as f:
-        x_train = f['x_train'][:]
-        y_train = f['y_train'][:]
-        x_test  = f['x_test'][:]
-        y_test  = f['y_test'][:]
-
-    return x_train,y_train,x_test,y_test
-
-
-# ## 4/ Models collection
-
-# In[4]:
-
-
-
-# A basic model
-#
-def get_model_v1(lx,ly,lz):
-    
-    model = keras.models.Sequential()
-    
-    model.add( keras.layers.Conv2D(96, (3,3), activation='relu', input_shape=(lx,ly,lz)))
-    model.add( keras.layers.MaxPooling2D((2, 2)))
-    model.add( keras.layers.Dropout(0.2))
-
-    model.add( keras.layers.Conv2D(192, (3, 3), activation='relu'))
-    model.add( keras.layers.MaxPooling2D((2, 2)))
-    model.add( keras.layers.Dropout(0.2))
-
-    model.add( keras.layers.Flatten()) 
-    model.add( keras.layers.Dense(1500, activation='relu'))
-    model.add( keras.layers.Dropout(0.5))
-
-    model.add( keras.layers.Dense(43, activation='softmax'))
-    return model
-    
-# A more sophisticated model
-#
-def get_model_v2(lx,ly,lz):
-    model = keras.models.Sequential()
-
-    model.add( keras.layers.Conv2D(64, (3, 3), padding='same', input_shape=(lx,ly,lz), activation='relu'))
-    model.add( keras.layers.Conv2D(64, (3, 3), activation='relu'))
-    model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
-    model.add( keras.layers.Dropout(0.2))
-
-    model.add( keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
-    model.add( keras.layers.Conv2D(128, (3, 3), activation='relu'))
-    model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
-    model.add( keras.layers.Dropout(0.2))
-
-    model.add( keras.layers.Conv2D(256, (3, 3), padding='same',activation='relu'))
-    model.add( keras.layers.Conv2D(256, (3, 3), activation='relu'))
-    model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
-    model.add( keras.layers.Dropout(0.2))
-
-    model.add( keras.layers.Flatten())
-    model.add( keras.layers.Dense(512, activation='relu'))
-    model.add( keras.layers.Dropout(0.5))
-    model.add( keras.layers.Dense(43, activation='softmax'))
-    return model
-
-# My sphisticated model, but small and fast
-#
-def get_model_v3(lx,ly,lz):
-    model = keras.models.Sequential()
-    model.add( keras.layers.Conv2D(32, (3,3),   activation='relu', input_shape=(lx,ly,lz)))
-    model.add( keras.layers.MaxPooling2D((2, 2)))
-    model.add( keras.layers.Dropout(0.5))
-
-    model.add( keras.layers.Conv2D(64, (3, 3), activation='relu'))
-    model.add( keras.layers.MaxPooling2D((2, 2)))
-    model.add( keras.layers.Dropout(0.5))
-
-    model.add( keras.layers.Conv2D(128, (3, 3), activation='relu'))
-    model.add( keras.layers.MaxPooling2D((2, 2)))
-    model.add( keras.layers.Dropout(0.5))
-
-    model.add( keras.layers.Conv2D(256, (3, 3), activation='relu'))
-    model.add( keras.layers.MaxPooling2D((2, 2)))
-    model.add( keras.layers.Dropout(0.5))
-
-    model.add( keras.layers.Flatten()) 
-    model.add( keras.layers.Dense(1152, activation='relu'))
-    model.add( keras.layers.Dropout(0.5))
-
-    model.add( keras.layers.Dense(43, activation='softmax'))
-    return model
-
-
-# ## 5/ Multiple datasets, multiple models ;-)
-
-# In[5]:
-
-
-def multi_run(datasets, models, batch_size=64, epochs=16):
-
-    # ---- Columns of report
-    #
-    report={}
-    report['Dataset']=[]
-    report['Size']   =[]
-    for m in models:
-        report[m+' Accuracy'] = []
-        report[m+' Duration'] = []
-
-    # ---- Let's go
-    #
-    for d_name in datasets:
-        print("\nDataset : ",d_name)
-
-        # ---- Read dataset
-        x_train,y_train,x_test,y_test = read_dataset(d_name)
-        d_size=os.path.getsize('./data/'+d_name+'.h5')/(1024*1024)
-        report['Dataset'].append(d_name)
-        report['Size'].append(d_size)
-        
-        # ---- Get the shape
-        (n,lx,ly,lz) = x_train.shape
-
-        # ---- For each model
-        for m_name,m_function in models.items():
-            print("    Run model {}  : ".format(m_name), end='')
-            # ---- get model
-            try:
-                model=m_function(lx,ly,lz)
-                # ---- Compile it
-                model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
-                # ---- Callbacks tensorboard
-                log_dir = "./run/logs/tb_{}_{}".format(d_name,m_name)
-                tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
-                # ---- Callbacks bestmodel
-                save_dir = "./run/models/model_{}_{}.h5".format(d_name,m_name)
-                bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)
-                # ---- Train
-                start_time = time.time()
-                history = model.fit(  x_train[:1000], y_train[:1000],
-                                    batch_size      = batch_size,
-                                    epochs          = epochs,
-                                    verbose         = 0,
-                                    validation_data = (x_test, y_test),
-                                    callbacks       = [tensorboard_callback, bestmodel_callback])
-                # ---- Result
-                end_time = time.time()
-                duration = end_time-start_time
-                accuracy = max(history.history["val_accuracy"])*100
-                #
-                report[m_name+' Accuracy'].append(accuracy)
-                report[m_name+' Duration'].append(duration)
-                print("Accuracy={:.2f} and Duration={:.2f})".format(accuracy,duration))
-            except:
-                report[m_name+' Accuracy'].append('-')
-                report[m_name+' Duration'].append('-')
-                print('-')
-    return report
-
-
-# ## 6/ Run
-# ### 6.1/ Clean
-
-# In[6]:
-
-
-get_ipython().run_cell_magic('bash', '', '\n/bin/rm -r ./run/logs   2>/dev/null\n/bin/rm -r ./run/models 2>/dev/null\n/bin/mkdir -p -m 755 ./run/logs\n/bin/mkdir -p -m 755 ./run/models\necho -e "\\nReset directories : ./run/logs and ./run/models ."')
-
-
-# ### 6.2 Start Tensorboard
-
-# In[22]:
-
-
-get_ipython().run_cell_magic('bash', '', 'tensorboard_start --logdir ./run/logs')
-
-
-# ### 6.3/ run and save report
-
-# In[24]:
-
-
-get_ipython().run_cell_magic('time', '', '\nprint(\'\\n---- Run\',\'-\'*50)\n\n# ---- Datasets and models list\n\n# For tests\ndatasets = [\'set-24x24-L\', \'set-24x24-RGB\']\nmodels   = {\'v1\':get_model_v1, \'v3\':get_model_v3}\n\n# The real one\n# datasets = [\'set-24x24-L\', \'set-24x24-RGB\', \'set-48x48-L\', \'set-48x48-RGB\', \'set-24x24-L-LHE\', \'set-24x24-RGB-HE\', \'set-48x48-L-LHE\', \'set-48x48-RGB-HE\']\n# models   = {\'v1\':get_model_v1, \'v2\':get_model_v2, \'v3\':get_model_v3}\n\n# ---- Report name\n\nreport_name=\'./run/report-{}.h5\'.format(time.strftime("%Y-%m-%d_%Hh%Mm%Ss"))\n\n# ---- Run\n\nout    = multi_run(datasets, models, batch_size=64, epochs=2)\n\n# ---- Save report\n\noutput = pd.DataFrame (out)\nparams = pd.DataFrame( {\'datasets\':datasets, \'models\':list(models.keys())} )\n\noutput.to_hdf(report_name, \'output\')\nparams.to_hdf(report_name, \'params\')\n\nprint(\'\\nReport saved as \',report_name)\nprint(\'-\'*59)')
-
-
-# ### 6.4/ Stop Tensorboard
-
-# In[23]:
-
-
-get_ipython().run_cell_magic('bash', '', 'tensorboard_stop')
-
-
-# ## 7/ That's all folks..
-
-# In[21]:
-
-
-print('\n{}'.format(time.strftime("%A %-d %B %Y, %H:%M:%S")))
-print("The work is done.\n")
-
-
-# In[ ]:
-
-
-
-
diff --git a/GTSRB/05.1-Full-convolutions-run.ipynb b/GTSRB/05.1-Full-convolutions-batch.ipynb
similarity index 59%
rename from GTSRB/05.1-Full-convolutions-run.ipynb
rename to GTSRB/05.1-Full-convolutions-batch.ipynb
index 608973b46a2ba7304f80ef836f4365f01f717965..cd78bdb09743e8dcbaec8f577011c7be7e42239d 100644
--- a/GTSRB/05.1-Full-convolutions-run.ipynb
+++ b/GTSRB/05.1-Full-convolutions-batch.ipynb
@@ -40,21 +40,23 @@
     "\n",
     "# ---- This will execute and save a notebook\n",
     "#\n",
-    "jupyter nbconvert --to notebook --execute '05-Full-convolutions.ipynb'\n"
+    "jupyter nbconvert --ExecutePreprocessor.timeout=-1 --to notebook --output='./run/full_convolutions' --execute '05-Full-convolutions.ipynb'\n"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## 2/ Export as a script\n",
+    "## 2/ Export as a script (better choice)\n",
     "To export a notebook as a script :  \n",
-    "```jupyter nbconvert --to script <notebook>```"
+    "```jupyter nbconvert --to script <notebook>```  \n",
+    "To run the script :  \n",
+    "```ipython <script>```"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 26,
+   "execution_count": 5,
    "metadata": {},
    "outputs": [
     {
@@ -62,48 +64,33 @@
      "output_type": "stream",
      "text": [
       "[NbConvertApp] Converting notebook 05-Full-convolutions.ipynb to script\n",
-      "[NbConvertApp] Writing 8775 bytes to 05-Full-convolutions.py\n"
+      "[NbConvertApp] Writing 8155 bytes to ./run/full_convolutions.py\n"
      ]
     }
    ],
    "source": [
     "%%bash\n",
     "\n",
-    "# ---- This will convert notebook to a notebook.py script\n",
+    "# ---- This will convert a notebook to a notebook.py script\n",
     "#\n",
-    "jupyter nbconvert --to script '05-Full-convolutions.ipynb'\n"
+    "jupyter nbconvert --to script --output='./run/full_convolutions' '05-Full-convolutions.ipynb'"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 27,
+   "execution_count": 6,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "total 8156\n",
-      "-rw-r--r-- 1 pjluc pjluc   16893 Jan 19 15:07 01-Preparation-of-data.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc    9181 Jan 19 15:08 02-First-convolutions.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc   12879 Jan 19 15:09 03-Tracking-and-visualizing.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc   10667 Jan 19 15:10 04-Data-augmentation.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc    3325 Jan 19 15:50 05.1-Full-convolutions-run.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc   19865 Jan 19 15:17 05.2-Full-convolutions-reports.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc   14466 Jan 19 15:11 05-Full-convolutions.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc   14533 Jan 19 15:35 05-Full-convolutions.nbconvert.ipynb\n",
-      "-rw-r--r-- 1 pjluc pjluc    8775 Jan 19 15:43 05-Full-convolutions.py\n",
-      "-rw-r--r-- 1 pjluc pjluc   12346 Jan 19 15:47 99 Scripts-Tensorboard.ipynb\n",
-      "drwxr-xr-x 1 pjluc pjluc     512 Jan 10 22:10 data\n",
-      "drwxr-xr-x 1 pjluc pjluc     512 Jan 19 15:07 fidle\n",
-      "-rw-r--r-- 1 pjluc pjluc 7391072 Jan 19 00:41 foo.h5\n",
-      "-rw-r--r-- 1 pjluc pjluc    2816 Jan 11 15:45 README.ipynb\n",
-      "drwxr-xr-x 1 pjluc pjluc     512 Jan 19 15:35 run\n"
+      "-rwxr-xr-x 1 paroutyj l-simap 8155 Jan 19 22:24 ./run/full_convolutions.py\n"
      ]
     }
    ],
    "source": [
-    "!ls -l"
+    "!ls -l ./run/*.py"
    ]
   },
   {
@@ -116,21 +103,31 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 28,
+   "execution_count": 3,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Writing ./run/batch_full_convolutions.sh\n"
+      "Overwriting ./run/batch_full_convolutions.sh\n"
      ]
     }
    ],
    "source": [
     "%%writefile \"./run/batch_full_convolutions.sh\"\n",
+    "#!/bin/bash\n",
+    "#OAR -n Full convolutions\n",
+    "#OAR -t gpu\n",
+    "#OAR -l /nodes=1/gpudevice=1,walltime=01:00:00\n",
+    "#OAR --stdout full_convolutions.out\n",
+    "#OAR --stderr full_convolutions.err\n",
+    "#OAR --project deeplearningshs\n",
     "\n",
-    "#!/bin/batch\n",
+    "#---- For cpu\n",
+    "# use :\n",
+    "# OAR -l /nodes=1/core=32,walltime=01:00:00\n",
+    "# and add a 2>/dev/null to ipython xxx\n",
     "\n",
     "# ----------------------------------\n",
     "#   _           _       _\n",
@@ -141,13 +138,17 @@
     "#                  Full convolutions\n",
     "# ----------------------------------\n",
     "#\n",
-    "CONDA_ENV=\"deeplearning2\"\n",
-    "RUN_DIR=\"~/fidle/GTSRB\"\n",
-    "RUN_SCRIPT=\"05-Full-convolutions.py\"\n",
+    "\n",
+    "CONDA_ENV=deeplearning2\n",
+    "RUN_DIR=~/fidle/GTSRB\n",
+    "RUN_SCRIPT=./run/full_convolutions.py\n",
     "\n",
     "# ---- Cuda Conda initialization\n",
     "#\n",
-    "echo -e 'Init environment with cuda and conda...\\n'\n",
+    "echo '------------------------------------------------------------'\n",
+    "echo \"Start : $0\"\n",
+    "echo '------------------------------------------------------------'\n",
+    "#\n",
     "source /applis/environments/cuda_env.sh dahu 10.0\n",
     "source /applis/environments/conda.sh\n",
     "#\n",
@@ -156,7 +157,36 @@
     "# ---- Run it...\n",
     "#\n",
     "cd $RUN_DIR\n",
-    "$RUN_SCRIPT"
+    "ipython $RUN_SCRIPT"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "-rwxr-xr-x 1 paroutyj l-simap  955 Jan 19 22:13 ./run/batch_full_convolutions.sh\n",
+      "-rwxr-xr-x 1 paroutyj l-simap 8155 Jan 19 22:24 ./run/full_convolutions.py\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%bash\n",
+    "chmod 755 \"./run/batch_full_convolutions.sh\"\n",
+    "chmod 755 \"./run/full_convolutions.py\"\n",
+    "ls -l ./run/*full_convolutions*"
+   ]
+  },
+  {
+   "cell_type": "raw",
+   "metadata": {},
+   "source": [
+    "%%bash\n",
+    "./run/batch_full_convolutions.sh"
    ]
   },
   {
diff --git a/GTSRB/05.2-Full-convolutions-reports.ipynb b/GTSRB/05.2-Full-convolutions-reports.ipynb
index c02d06d86acd664c8bebc53dc434be86efa87290..61e5387684e077c595ea8b5436e25fb1c37c5bf4 100644
--- a/GTSRB/05.2-Full-convolutions-reports.ipynb
+++ b/GTSRB/05.2-Full-convolutions-reports.ipynb
@@ -24,7 +24,7 @@
    "outputs": [],
    "source": [
     "import pandas as pd\n",
-    "import os,glob\n",
+    "import os,glob,json\n",
     "from pathlib import Path\n",
     "from IPython.display import display, Markdown"
    ]
@@ -43,17 +43,19 @@
    "outputs": [],
    "source": [
     "def show_report(report_name):\n",
-    "    # ---- Read report\n",
-    "    output = pd.read_hdf(report_name, 'output')\n",
-    "    params = pd.read_hdf(report_name, 'params')\n",
-    "    # ---- Build format\n",
+    "    # ---- Read json file\n",
+    "    with open(report_name) as infile:\n",
+    "        dict_report = json.load( infile )\n",
+    "    # ---- Create a pandas\n",
+    "    report = pd.DataFrame (dict_report)\n",
+    "    models = list(dict_report.keys())[2:]\n",
+    "    # ---- Build formats\n",
     "    lambda_acc = lambda x : '{:.2f} %'.format(x) if (isinstance(x, float)) else '{:}'.format(x)\n",
     "    lambda_dur = lambda x : '{:.1f} s'.format(x) if (isinstance(x, float)) else '{:}'.format(x)\n",
-    "    format_dict = {'Size':'{:.2f} Mo'}\n",
-    "    for m in params['models'].tolist():\n",
-    "        format_dict[m+' Accuracy']=lambda_acc\n",
-    "        format_dict[m+' Duration']=lambda_dur\n",
-    "    t=output.style.format(format_dict).hide_index()\n",
+    "    formats = {'Size':'{:.2f} Mo'}\n",
+    "    for m in models:\n",
+    "        formats[m]=lambda_acc if (m.endswith('Accuracy')) else lambda_dur\n",
+    "    t=report.style.format(formats).hide_index()\n",
     "    display(t)"
    ]
   },
@@ -66,7 +68,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 6,
    "metadata": {},
    "outputs": [
     {
@@ -75,7 +77,7 @@
      "text": [
       "\n",
       "\n",
-      "Report :  report-2020-01-19_12h45m57s \n",
+      "Report :  report-2020-01-19_22h14m29s \n",
       "\n"
      ]
     },
@@ -83,227 +85,91 @@
      "data": {
       "text/html": [
        "<style  type=\"text/css\" >\n",
-       "</style><table id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v3 Accuracy</th>        <th class=\"col_heading level0 col5\" >v3 Duration</th>    </tr></thead><tbody>\n",
+       "</style><table id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v2 Accuracy</th>        <th class=\"col_heading level0 col5\" >v2 Duration</th>        <th class=\"col_heading level0 col6\" >v3 Accuracy</th>        <th class=\"col_heading level0 col7\" >v3 Duration</th>    </tr></thead><tbody>\n",
        "                <tr>\n",
-       "                                <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row0_col2\" class=\"data row0 col2\" >11.99 %</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row0_col3\" class=\"data row0 col3\" >8.5 s</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row0_col4\" class=\"data row0 col4\" >-</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row0_col5\" class=\"data row0 col5\" >-</td>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col2\" class=\"data row0 col2\" >5.72 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col3\" class=\"data row0 col3\" >5.0 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col4\" class=\"data row0 col4\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col5\" class=\"data row0 col5\" >2.4 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col6\" class=\"data row0 col6\" >-</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col7\" class=\"data row0 col7\" >-</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row1_col2\" class=\"data row1 col2\" >16.95 %</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row1_col3\" class=\"data row1 col3\" >8.4 s</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row1_col4\" class=\"data row1 col4\" >-</td>\n",
-       "                        <td id=\"T_5d373846_3ac6_11ea_820c_836bead0fd53row1_col5\" class=\"data row1 col5\" >-</td>\n",
-       "            </tr>\n",
-       "    </tbody></table>"
-      ],
-      "text/plain": [
-       "<pandas.io.formats.style.Styler at 0x7fb458777110>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "\n",
-      "Report :  report-2020-01-19_13h53m43s \n",
-      "\n"
-     ]
-    },
-    {
-     "data": {
-      "text/html": [
-       "<style  type=\"text/css\" >\n",
-       "</style><table id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v3 Accuracy</th>        <th class=\"col_heading level0 col5\" >v3 Duration</th>    </tr></thead><tbody>\n",
-       "                <tr>\n",
-       "                                <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row0_col2\" class=\"data row0 col2\" >10.48 %</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row0_col3\" class=\"data row0 col3\" >8.4 s</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row0_col4\" class=\"data row0 col4\" >-</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row0_col5\" class=\"data row0 col5\" >-</td>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col2\" class=\"data row1 col2\" >16.62 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col3\" class=\"data row1 col3\" >2.9 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col4\" class=\"data row1 col4\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col5\" class=\"data row1 col5\" >2.5 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col6\" class=\"data row1 col6\" >-</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col7\" class=\"data row1 col7\" >-</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row1_col2\" class=\"data row1 col2\" >15.67 %</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row1_col3\" class=\"data row1 col3\" >8.3 s</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row1_col4\" class=\"data row1 col4\" >-</td>\n",
-       "                        <td id=\"T_5d39a356_3ac6_11ea_820c_836bead0fd53row1_col5\" class=\"data row1 col5\" >-</td>\n",
-       "            </tr>\n",
-       "    </tbody></table>"
-      ],
-      "text/plain": [
-       "<pandas.io.formats.style.Styler at 0x7fb45815d350>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "\n",
-      "Report :  report-2020-01-19_14h26m21s \n",
-      "\n"
-     ]
-    },
-    {
-     "data": {
-      "text/html": [
-       "<style  type=\"text/css\" >\n",
-       "</style><table id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v3 Accuracy</th>        <th class=\"col_heading level0 col5\" >v3 Duration</th>    </tr></thead><tbody>\n",
-       "                <tr>\n",
-       "                                <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row0_col2\" class=\"data row0 col2\" >10.68 %</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row0_col3\" class=\"data row0 col3\" >8.5 s</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row0_col4\" class=\"data row0 col4\" >-</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row0_col5\" class=\"data row0 col5\" >-</td>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col0\" class=\"data row2 col0\" >set-48x48-L</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col1\" class=\"data row2 col1\" >913.90 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col2\" class=\"data row2 col2\" >6.99 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col3\" class=\"data row2 col3\" >8.0 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col4\" class=\"data row2 col4\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col5\" class=\"data row2 col5\" >3.7 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col6\" class=\"data row2 col6\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col7\" class=\"data row2 col7\" >2.0 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row1_col2\" class=\"data row1 col2\" >18.24 %</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row1_col3\" class=\"data row1 col3\" >8.4 s</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row1_col4\" class=\"data row1 col4\" >-</td>\n",
-       "                        <td id=\"T_5d3bfef8_3ac6_11ea_820c_836bead0fd53row1_col5\" class=\"data row1 col5\" >-</td>\n",
-       "            </tr>\n",
-       "    </tbody></table>"
-      ],
-      "text/plain": [
-       "<pandas.io.formats.style.Styler at 0x7fb457f9d350>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "\n",
-      "Report :  report-2020-01-19_14h27m02s \n",
-      "\n"
-     ]
-    },
-    {
-     "data": {
-      "text/html": [
-       "<style  type=\"text/css\" >\n",
-       "</style><table id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v3 Accuracy</th>        <th class=\"col_heading level0 col5\" >v3 Duration</th>    </tr></thead><tbody>\n",
-       "                <tr>\n",
-       "                                <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row0_col2\" class=\"data row0 col2\" >7.00 %</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row0_col3\" class=\"data row0 col3\" >9.2 s</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row0_col4\" class=\"data row0 col4\" >-</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row0_col5\" class=\"data row0 col5\" >-</td>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col0\" class=\"data row3 col0\" >set-48x48-RGB</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col1\" class=\"data row3 col1\" >2736.36 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col2\" class=\"data row3 col2\" >14.96 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col3\" class=\"data row3 col3\" >8.5 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col4\" class=\"data row3 col4\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col5\" class=\"data row3 col5\" >3.7 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col6\" class=\"data row3 col6\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col7\" class=\"data row3 col7\" >2.1 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row1_col2\" class=\"data row1 col2\" >11.59 %</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row1_col3\" class=\"data row1 col3\" >8.9 s</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row1_col4\" class=\"data row1 col4\" >-</td>\n",
-       "                        <td id=\"T_5d3f7fba_3ac6_11ea_820c_836bead0fd53row1_col5\" class=\"data row1 col5\" >-</td>\n",
-       "            </tr>\n",
-       "    </tbody></table>"
-      ],
-      "text/plain": [
-       "<pandas.io.formats.style.Styler at 0x7fb45849c750>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "\n",
-      "Report :  report-2020-01-19_14h31m50s \n",
-      "\n"
-     ]
-    },
-    {
-     "data": {
-      "text/html": [
-       "<style  type=\"text/css\" >\n",
-       "</style><table id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v3 Accuracy</th>        <th class=\"col_heading level0 col5\" >v3 Duration</th>    </tr></thead><tbody>\n",
-       "                <tr>\n",
-       "                                <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row0_col2\" class=\"data row0 col2\" >10.22 %</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row0_col3\" class=\"data row0 col3\" >8.6 s</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row0_col4\" class=\"data row0 col4\" >-</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row0_col5\" class=\"data row0 col5\" >-</td>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col0\" class=\"data row4 col0\" >set-24x24-L-LHE</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col1\" class=\"data row4 col1\" >228.77 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col2\" class=\"data row4 col2\" >8.14 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col3\" class=\"data row4 col3\" >2.7 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col4\" class=\"data row4 col4\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col5\" class=\"data row4 col5\" >2.5 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col6\" class=\"data row4 col6\" >-</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col7\" class=\"data row4 col7\" >-</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row1_col2\" class=\"data row1 col2\" >12.98 %</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row1_col3\" class=\"data row1 col3\" >8.2 s</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row1_col4\" class=\"data row1 col4\" >-</td>\n",
-       "                        <td id=\"T_5d42c210_3ac6_11ea_820c_836bead0fd53row1_col5\" class=\"data row1 col5\" >-</td>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col0\" class=\"data row5 col0\" >set-24x24-RGB-HE</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col1\" class=\"data row5 col1\" >684.39 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col2\" class=\"data row5 col2\" >19.96 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col3\" class=\"data row5 col3\" >2.5 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col4\" class=\"data row5 col4\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col5\" class=\"data row5 col5\" >2.7 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col6\" class=\"data row5 col6\" >-</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col7\" class=\"data row5 col7\" >-</td>\n",
        "            </tr>\n",
-       "    </tbody></table>"
-      ],
-      "text/plain": [
-       "<pandas.io.formats.style.Styler at 0x7fb45850fed0>"
-      ]
-     },
-     "metadata": {},
-     "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\n",
-      "\n",
-      "Report :  report-2020-01-19_14h56m27s \n",
-      "\n"
-     ]
-    },
-    {
-     "data": {
-      "text/html": [
-       "<style  type=\"text/css\" >\n",
-       "</style><table id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v3 Accuracy</th>        <th class=\"col_heading level0 col5\" >v3 Duration</th>    </tr></thead><tbody>\n",
-       "                <tr>\n",
-       "                                <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row0_col2\" class=\"data row0 col2\" >-</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row0_col3\" class=\"data row0 col3\" >-</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row0_col4\" class=\"data row0 col4\" >-</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row0_col5\" class=\"data row0 col5\" >-</td>\n",
+       "            <tr>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col0\" class=\"data row6 col0\" >set-48x48-L-LHE</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col1\" class=\"data row6 col1\" >913.90 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col2\" class=\"data row6 col2\" >12.69 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col3\" class=\"data row6 col3\" >7.6 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col4\" class=\"data row6 col4\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col5\" class=\"data row6 col5\" >4.0 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col6\" class=\"data row6 col6\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col7\" class=\"data row6 col7\" >2.0 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row1_col2\" class=\"data row1 col2\" >-</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row1_col3\" class=\"data row1 col3\" >-</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row1_col4\" class=\"data row1 col4\" >-</td>\n",
-       "                        <td id=\"T_5d461c80_3ac6_11ea_820c_836bead0fd53row1_col5\" class=\"data row1 col5\" >-</td>\n",
+       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col0\" class=\"data row7 col0\" >set-48x48-RGB-HE</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col1\" class=\"data row7 col1\" >2736.36 Mo</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col2\" class=\"data row7 col2\" >20.77 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col3\" class=\"data row7 col3\" >9.5 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col4\" class=\"data row7 col4\" >5.56 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col5\" class=\"data row7 col5\" >3.8 s</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col6\" class=\"data row7 col6\" >5.94 %</td>\n",
+       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col7\" class=\"data row7 col7\" >2.4 s</td>\n",
        "            </tr>\n",
        "    </tbody></table>"
       ],
       "text/plain": [
-       "<pandas.io.formats.style.Styler at 0x7fb4587551d0>"
+       "<pandas.io.formats.style.Styler at 0x7f5585503390>"
       ]
      },
      "metadata": {},
@@ -311,9 +177,9 @@
     }
    ],
    "source": [
-    "for file in glob.glob(\"./run/*.h5\"):\n",
+    "for file in glob.glob(\"./run/*.json\"):\n",
     "    print(\"\\n\\nReport : \",Path(file).stem,'\\n') \n",
-    "    show_report(file)"
+    "    show_report(file)\n"
    ]
   },
   {
diff --git a/GTSRB/99 Scripts-Tensorboard.ipynb b/GTSRB/99 Scripts-Tensorboard.ipynb
index 290e20a92c015034e43150daf1f8f0a16b960de3..daba57fae76a8abc2454e3a6f9d34f55cca76c37 100644
--- a/GTSRB/99 Scripts-Tensorboard.ipynb	
+++ b/GTSRB/99 Scripts-Tensorboard.ipynb	
@@ -21,9 +21,17 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 13,
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Tensorbord started - pid is  49306\n"
+     ]
+    }
+   ],
    "source": [
     "%%bash\n",
     "tensorboard_start --logdir ./run/logs"
@@ -31,9 +39,17 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 18,
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Tensorboard status - not found...\n"
+     ]
+    }
+   ],
    "source": [
     "%%bash\n",
     "tensorboard_status"
@@ -41,9 +57,17 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 17,
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Tensorboard process not found...\n"
+     ]
+    }
+   ],
    "source": [
     "%%bash\n",
     "tensorboard_stop"
@@ -58,9 +82,17 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 1,
    "metadata": {},
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Overwriting /home/paroutyj/bin/tensorboard_start\n"
+     ]
+    }
+   ],
    "source": [
     "%%writefile \"~/bin/tensorboard_start\"\n",
     "\n",
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..146861fcd8998f71db32de2aece738cb039453e3
--- /dev/null
+++ b/README.md
@@ -0,0 +1,18 @@
+
+
+German Traffic Sign Recognition Benchmark (GTSRB)
+=================================================
+---
+Introduction au Deep Learning  (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020  
+
+## 1/ Environment
+To install your conda environment :  
+```
+conda env create -f environment.yml
+```
+
+## 4/ Misc
+To update an existing environment :  
+```
+conda env update --name=deeplearning2 --file=environment.yml
+```
\ No newline at end of file