From b69e3b60a55426dfefb4fae22ea1cba2013cae26 Mon Sep 17 00:00:00 2001
From: "Jean-Luc Parouty Jean-Luc.Parouty@simap.grenoble-inp.fr"
 <paroutyj@f-dahu.u-ga.fr>
Date: Mon, 20 Jan 2020 23:38:16 +0100
Subject: [PATCH] Jupyter to Batch cooking

---
 GTSRB/03-Tracking-and-visualizing.ipynb    |   2 +-
 GTSRB/04-Data-augmentation.ipynb           |   5 +-
 GTSRB/05-Full-convolutions.ipynb           | 236 +++++++++++++--------
 GTSRB/05.1-Full-convolutions-batch.ipynb   |  42 ++--
 GTSRB/05.2-Full-convolutions-reports.ipynb | 204 +++++++++---------
 GTSRB/99 Scripts-Tensorboard.ipynb         |  28 ++-
 6 files changed, 290 insertions(+), 227 deletions(-)

diff --git a/GTSRB/03-Tracking-and-visualizing.ipynb b/GTSRB/03-Tracking-and-visualizing.ipynb
index b185c9c..1c93d55 100644
--- a/GTSRB/03-Tracking-and-visualizing.ipynb
+++ b/GTSRB/03-Tracking-and-visualizing.ipynb
@@ -270,7 +270,7 @@
     "                      batch_size=batch_size,\n",
     "                      epochs=epochs,\n",
     "                      verbose=1,\n",
-    "                      validation_data=(x_test[:200], y_test[:200]),\n",
+    "                      validation_data=(x_test, y_test),\n",
     "                      callbacks=[tensorboard_callback, bestmodel_callback, savemodel_callback] )\n",
     "\n",
     "model.save('./run/models/last-model.h5')"
diff --git a/GTSRB/04-Data-augmentation.ipynb b/GTSRB/04-Data-augmentation.ipynb
index 6a8bfc9..2b87edc 100644
--- a/GTSRB/04-Data-augmentation.ipynb
+++ b/GTSRB/04-Data-augmentation.ipynb
@@ -203,7 +203,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## 5/ Train the model\n",
+    "## 6/ Train the model\n",
     "**Get the shape of my data :**"
    ]
   },
@@ -265,6 +265,7 @@
     "# ---- Train\n",
     "#\n",
     "history = model.fit(  datagen.flow(x_train, y_train, batch_size=batch_size),\n",
+    "                      steps_per_epoch = int(x_train.shape[0]/batch_size),\n",
     "                      epochs=epochs,\n",
     "                      verbose=1,\n",
     "                      validation_data=(x_test, y_test),\n",
@@ -306,7 +307,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## 6/ History\n",
+    "## 7/ History\n",
     "The return of model.fit() returns us the learning history"
    ]
   },
diff --git a/GTSRB/05-Full-convolutions.ipynb b/GTSRB/05-Full-convolutions.ipynb
index 47bec69..364e330 100644
--- a/GTSRB/05-Full-convolutions.ipynb
+++ b/GTSRB/05-Full-convolutions.ipynb
@@ -31,10 +31,11 @@
     "import numpy as np\n",
     "import h5py\n",
     "import os,time,json\n",
+    "import random\n",
     "\n",
     "from IPython.display import display\n",
     "\n",
-    "VERSION='1.2'"
+    "VERSION='1.6'"
    ]
   },
   {
@@ -50,11 +51,27 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "# ---- Where I am ?\n",
+    "now    = time.strftime(\"%A %d %B %Y - %Hh%Mm%Ss\")\n",
+    "here   = os.getcwd()\n",
+    "random.seed(time.time())\n",
+    "tag_id = '{:06}'.format(random.randint(0,99999))\n",
+    "\n",
+    "# ---- Who I am ?\n",
+    "if 'OAR_JOB_ID' in os.environ:\n",
+    "    oar_id=os.environ['OAR_JOB_ID']\n",
+    "else:\n",
+    "    oar_id='???'\n",
+    "\n",
     "print('\\nFull Convolutions Notebook')\n",
     "print('  Version            : {}'.format(VERSION))\n",
-    "print('  Run time           : {}'.format(time.strftime(\"%A %-d %B %Y, %H:%M:%S\")))\n",
+    "print('  Now is             : {}'.format(now))\n",
+    "print('  OAR id             : {}'.format(oar_id))\n",
+    "print('  Tag id             : {}'.format(tag_id))\n",
+    "print('  Working directory  : {}'.format(here))\n",
     "print('  TensorFlow version :',tf.__version__)\n",
-    "print('  Keras version      :',tf.keras.__version__)"
+    "print('  Keras version      :',tf.keras.__version__)\n",
+    "print('  for tensorboard    : --logdir {}/run/logs_{}'.format(here,tag_id))"
    ]
   },
   {
@@ -77,7 +94,7 @@
     "    Returns:    x_train,y_train,x_test,y_test data'''\n",
     "    # ---- Read dataset\n",
     "    filename='./data/'+name+'.h5'\n",
-    "    with  h5py.File(filename) as f:\n",
+    "    with  h5py.File(filename,'r') as f:\n",
     "        x_train = f['x_train'][:]\n",
     "        y_train = f['y_train'][:]\n",
     "        x_test  = f['x_test'][:]\n",
@@ -147,32 +164,27 @@
     "    model.add( keras.layers.Dense(43, activation='softmax'))\n",
     "    return model\n",
     "\n",
-    "# My sphisticated model, but small and fast\n",
-    "#\n",
     "def get_model_v3(lx,ly,lz):\n",
     "    model = keras.models.Sequential()\n",
-    "    model.add( keras.layers.Conv2D(32, (3,3),   activation='relu', input_shape=(lx,ly,lz)))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(64, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(128, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Conv2D(256, (3, 3), activation='relu'))\n",
-    "    model.add( keras.layers.MaxPooling2D((2, 2)))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Flatten()) \n",
-    "    model.add( keras.layers.Dense(1152, activation='relu'))\n",
-    "    model.add( keras.layers.Dropout(0.5))\n",
-    "\n",
-    "    model.add( keras.layers.Dense(43, activation='softmax'))\n",
-    "    return model\n"
+    "    model.add(tf.keras.layers.Conv2D(32, (5, 5), padding='same',  activation='relu', input_shape=(lx,ly,lz)))\n",
+    "    model.add(tf.keras.layers.BatchNormalization(axis=-1))      \n",
+    "    model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n",
+    "    model.add(tf.keras.layers.Dropout(0.2))\n",
+    "\n",
+    "    model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same',  activation='relu'))\n",
+    "    model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+    "    model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='relu'))\n",
+    "    model.add(tf.keras.layers.BatchNormalization(axis=-1))\n",
+    "    model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n",
+    "    model.add(tf.keras.layers.Dropout(0.2))\n",
+    "\n",
+    "    model.add(tf.keras.layers.Flatten())\n",
+    "    model.add(tf.keras.layers.Dense(512, activation='relu'))\n",
+    "    model.add(tf.keras.layers.BatchNormalization())\n",
+    "    model.add(tf.keras.layers.Dropout(0.4))\n",
+    "\n",
+    "    model.add(tf.keras.layers.Dense(43, activation='softmax'))\n",
+    "    return model"
    ]
   },
   {
@@ -188,16 +200,23 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def multi_run(datasets, models, batch_size=64, epochs=16):\n",
+    "def multi_run(datasets, models, datagen=None,\n",
+    "              train_size=1, test_size=1, batch_size=64, epochs=16, \n",
+    "              verbose=0, extension_dir='last'):\n",
     "\n",
-    "    # ---- Columns of report\n",
+    "    # ---- Logs and models dir\n",
+    "    #\n",
+    "    os.makedirs('./run/logs_{}'.format(extension_dir),   mode=0o750, exist_ok=True)\n",
+    "    os.makedirs('./run/models_{}'.format(extension_dir), mode=0o750, exist_ok=True)\n",
+    "    \n",
+    "    # ---- Columns of output\n",
     "    #\n",
-    "    report={}\n",
-    "    report['Dataset']=[]\n",
-    "    report['Size']   =[]\n",
+    "    output={}\n",
+    "    output['Dataset']=[]\n",
+    "    output['Size']   =[]\n",
     "    for m in models:\n",
-    "        report[m+' Accuracy'] = []\n",
-    "        report[m+' Duration'] = []\n",
+    "        output[m+'_Accuracy'] = []\n",
+    "        output[m+'_Duration'] = []\n",
     "\n",
     "    # ---- Let's go\n",
     "    #\n",
@@ -207,11 +226,13 @@
     "        # ---- Read dataset\n",
     "        x_train,y_train,x_test,y_test = read_dataset(d_name)\n",
     "        d_size=os.path.getsize('./data/'+d_name+'.h5')/(1024*1024)\n",
-    "        report['Dataset'].append(d_name)\n",
-    "        report['Size'].append(d_size)\n",
+    "        output['Dataset'].append(d_name)\n",
+    "        output['Size'].append(d_size)\n",
     "        \n",
     "        # ---- Get the shape\n",
     "        (n,lx,ly,lz) = x_train.shape\n",
+    "        n_train = int(x_train.shape[0]*train_size)\n",
+    "        n_test  = int(x_test.shape[0]*test_size)\n",
     "\n",
     "        # ---- For each model\n",
     "        for m_name,m_function in models.items():\n",
@@ -222,62 +243,51 @@
     "                # ---- Compile it\n",
     "                model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n",
     "                # ---- Callbacks tensorboard\n",
-    "                log_dir = \"./run/logs/tb_{}_{}\".format(d_name,m_name)\n",
+    "                log_dir = \"./run/logs_{}/tb_{}_{}\".format(extension_dir, d_name, m_name)\n",
     "                tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
     "                # ---- Callbacks bestmodel\n",
-    "                save_dir = \"./run/models/model_{}_{}.h5\".format(d_name,m_name)\n",
+    "                save_dir = \"./run/models_{}/model_{}_{}.h5\".format(extension_dir, d_name, m_name)\n",
     "                bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)\n",
     "                # ---- Train\n",
     "                start_time = time.time()\n",
-    "                history = model.fit(  x_train, y_train,\n",
-    "                                    batch_size      = batch_size,\n",
-    "                                    epochs          = epochs,\n",
-    "                                    verbose         = 0,\n",
-    "                                    validation_data = (x_test, y_test),\n",
-    "                                    callbacks       = [tensorboard_callback, bestmodel_callback])\n",
+    "                if datagen==None:\n",
+    "                    # ---- No data augmentation (datagen=None) --------------------------------------\n",
+    "                    history = model.fit(x_train[:n_train], y_train[:n_train],\n",
+    "                                        batch_size      = batch_size,\n",
+    "                                        epochs          = epochs,\n",
+    "                                        verbose         = verbose,\n",
+    "                                        validation_data = (x_test[:n_test], y_test[:n_test]),\n",
+    "                                        callbacks       = [tensorboard_callback, bestmodel_callback])\n",
+    "                else:\n",
+    "                    # ---- Data augmentation (datagen given) ----------------------------------------\n",
+    "                    datagen.fit(x_train)\n",
+    "                    history = model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),\n",
+    "                                        steps_per_epoch = int(n_train/batch_size),\n",
+    "                                        epochs          = epochs,\n",
+    "                                        verbose         = verbose,\n",
+    "                                        validation_data = (x_test[:n_test], y_test[:n_test]),\n",
+    "                                        callbacks       = [tensorboard_callback, bestmodel_callback])\n",
+    "                    \n",
     "                # ---- Result\n",
     "                end_time = time.time()\n",
     "                duration = end_time-start_time\n",
     "                accuracy = max(history.history[\"val_accuracy\"])*100\n",
     "                #\n",
-    "                report[m_name+' Accuracy'].append(accuracy)\n",
-    "                report[m_name+' Duration'].append(duration)\n",
+    "                output[m_name+'_Accuracy'].append(accuracy)\n",
+    "                output[m_name+'_Duration'].append(duration)\n",
     "                print(\"Accuracy={:.2f} and Duration={:.2f})\".format(accuracy,duration))\n",
     "            except:\n",
-    "                report[m_name+' Accuracy'].append('-')\n",
-    "                report[m_name+' Duration'].append('-')\n",
+    "                output[m_name+'_Accuracy'].append('0')\n",
+    "                output[m_name+'_Duration'].append('999')\n",
     "                print('-')\n",
-    "    return report"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## 6/ Run\n",
-    "### 6.1/ Clean"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%%bash\n",
-    "\n",
-    "/bin/rm -r ./run/logs   2>/dev/null\n",
-    "/bin/rm -r ./run/models 2>/dev/null\n",
-    "/bin/mkdir -p -m 755 ./run/logs\n",
-    "/bin/mkdir -p -m 755 ./run/models\n",
-    "echo -e \"\\nReset directories : ./run/logs and ./run/models .\""
+    "    return output"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "### 6.2/ run and save report"
+    "## 6/ Run !"
    ]
   },
   {
@@ -290,28 +300,72 @@
     "\n",
     "print('\\n---- Run','-'*50)\n",
     "\n",
-    "# ---- Datasets and models list\n",
-    "\n",
-    "# For tests\n",
-    "# datasets = ['set-24x24-L', 'set-24x24-RGB']\n",
-    "# models   = {'v1':get_model_v1, 'v3':get_model_v3}\n",
-    "\n",
-    "# The real one\n",
-    "datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']\n",
-    "models   = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}\n",
-    "\n",
-    "# ---- Report name\n",
-    "\n",
-    "report_name='./run/report-{}.json'.format(time.strftime(\"%Y-%m-%d_%Hh%Mm%Ss\"))\n",
+    "# --------- Datasets, models, and more.. -----------------------------------\n",
+    "#\n",
+    "# ---- For tests\n",
+    "# datasets   = ['set-24x24-L', 'set-24x24-RGB']\n",
+    "# models     = {'v1':get_model_v1, 'v4':get_model_v2}\n",
+    "# batch_size = 64\n",
+    "# epochs     = 2\n",
+    "# train_size = 0.1\n",
+    "# test_size  = 0.1\n",
+    "# with_datagen = False\n",
+    "# verbose      = 0\n",
+    "#\n",
+    "# ---- All possibilities -> Run A\n",
+    "datasets     = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']\n",
+    "models       = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}\n",
+    "batch_size   = 64\n",
+    "epochs       = 16\n",
+    "train_size   = 1\n",
+    "test_size    = 1\n",
+    "with_datagen = False\n",
+    "verbose      = 0\n",
+    "#\n",
+    "# ---- Data augmentation -> Run B\n",
+    "# datasets     = ['set-48x48-RGB']\n",
+    "# models       = {'v2':get_model_v2}\n",
+    "# batch_size   = 64\n",
+    "# epochs       = 20\n",
+    "# train_size   = 1\n",
+    "# test_size    = 1\n",
+    "# with_datagen = True\n",
+    "# verbose      = 0\n",
+    "#\n",
+    "# ---------------------------------------------------------------------------\n",
     "\n",
+    "# ---- Data augmentation\n",
+    "#\n",
+    "if with_datagen :\n",
+    "    datagen = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,\n",
+    "                                                           featurewise_std_normalization=False,\n",
+    "                                                           width_shift_range=0.1,\n",
+    "                                                           height_shift_range=0.1,\n",
+    "                                                           zoom_range=0.2,\n",
+    "                                                           shear_range=0.1,\n",
+    "                                                           rotation_range=10.)\n",
+    "else:\n",
+    "    datagen=None\n",
+    "    \n",
     "# ---- Run\n",
-    "\n",
-    "out    = multi_run(datasets, models, batch_size=64, epochs=2)\n",
+    "#\n",
+    "output = multi_run(datasets, models,\n",
+    "                   datagen=datagen,\n",
+    "                   train_size=train_size, test_size=test_size,\n",
+    "                   batch_size=batch_size, epochs=epochs,\n",
+    "                   verbose=verbose,\n",
+    "                   extension_dir=tag_id)\n",
     "\n",
     "# ---- Save report\n",
+    "#\n",
+    "report={}\n",
+    "report['output']=output\n",
+    "report['description']='train_size={} test_size={} batch_size={} epochs={} data_aug={}'.format(train_size,test_size,batch_size,epochs,with_datagen)\n",
+    "\n",
+    "report_name='./run/report_{}.json'.format(tag_id)\n",
     "\n",
-    "with open(report_name, 'w') as outfile:\n",
-    "    json.dump(out, outfile)\n",
+    "with open(report_name, 'w') as file:\n",
+    "    json.dump(report, file)\n",
     "\n",
     "print('\\nReport saved as ',report_name)\n",
     "print('-'*59)\n"
diff --git a/GTSRB/05.1-Full-convolutions-batch.ipynb b/GTSRB/05.1-Full-convolutions-batch.ipynb
index cd78bdb..da29279 100644
--- a/GTSRB/05.1-Full-convolutions-batch.ipynb
+++ b/GTSRB/05.1-Full-convolutions-batch.ipynb
@@ -23,18 +23,8 @@
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": 22,
+   "cell_type": "raw",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Done.\n"
-     ]
-    }
-   ],
    "source": [
     "%%bash\n",
     "\n",
@@ -56,7 +46,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 1,
    "metadata": {},
    "outputs": [
     {
@@ -64,7 +54,7 @@
      "output_type": "stream",
      "text": [
       "[NbConvertApp] Converting notebook 05-Full-convolutions.ipynb to script\n",
-      "[NbConvertApp] Writing 8155 bytes to ./run/full_convolutions.py\n"
+      "[NbConvertApp] Writing 11301 bytes to ./run/full_convolutions_A.py\n"
      ]
     }
    ],
@@ -73,19 +63,19 @@
     "\n",
     "# ---- This will convert a notebook to a notebook.py script\n",
     "#\n",
-    "jupyter nbconvert --to script --output='./run/full_convolutions' '05-Full-convolutions.ipynb'"
+    "jupyter nbconvert --to script --output='./run/full_convolutions_A' '05-Full-convolutions.ipynb'"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 2,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "-rwxr-xr-x 1 paroutyj l-simap 8155 Jan 19 22:24 ./run/full_convolutions.py\n"
+      "-rwxr-xr-x 1 paroutyj l-simap 11301 Jan 20 22:11 ./run/full_convolutions_A.py\n"
      ]
     }
    ],
@@ -110,18 +100,18 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Overwriting ./run/batch_full_convolutions.sh\n"
+      "Overwriting ./run/batch_full_convolutions_A.sh\n"
      ]
     }
    ],
    "source": [
-    "%%writefile \"./run/batch_full_convolutions.sh\"\n",
+    "%%writefile \"./run/batch_full_convolutions_A.sh\"\n",
     "#!/bin/bash\n",
     "#OAR -n Full convolutions\n",
     "#OAR -t gpu\n",
     "#OAR -l /nodes=1/gpudevice=1,walltime=01:00:00\n",
-    "#OAR --stdout full_convolutions.out\n",
-    "#OAR --stderr full_convolutions.err\n",
+    "#OAR --stdout _batch/full_convolutions_%jobid%.out\n",
+    "#OAR --stderr _batch/full_convolutions_%jobid%.err\n",
     "#OAR --project deeplearningshs\n",
     "\n",
     "#---- For cpu\n",
@@ -141,7 +131,7 @@
     "\n",
     "CONDA_ENV=deeplearning2\n",
     "RUN_DIR=~/fidle/GTSRB\n",
-    "RUN_SCRIPT=./run/full_convolutions.py\n",
+    "RUN_SCRIPT=./run/full_convolutions_A.py\n",
     "\n",
     "# ---- Cuda Conda initialization\n",
     "#\n",
@@ -162,22 +152,22 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 7,
+   "execution_count": 4,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "-rwxr-xr-x 1 paroutyj l-simap  955 Jan 19 22:13 ./run/batch_full_convolutions.sh\n",
-      "-rwxr-xr-x 1 paroutyj l-simap 8155 Jan 19 22:24 ./run/full_convolutions.py\n"
+      "-rwxr-xr-x 1 paroutyj l-simap  1045 Jan 20 22:12 ./run/batch_full_convolutions_A.sh\n",
+      "-rwxr-xr-x 1 paroutyj l-simap 11301 Jan 20 22:11 ./run/full_convolutions_A.py\n"
      ]
     }
    ],
    "source": [
     "%%bash\n",
-    "chmod 755 \"./run/batch_full_convolutions.sh\"\n",
-    "chmod 755 \"./run/full_convolutions.py\"\n",
+    "chmod 755 ./run/*.sh\n",
+    "chmod 755 ./run/*.py\n",
     "ls -l ./run/*full_convolutions*"
    ]
   },
diff --git a/GTSRB/05.2-Full-convolutions-reports.ipynb b/GTSRB/05.2-Full-convolutions-reports.ipynb
index 61e5387..ed87da1 100644
--- a/GTSRB/05.2-Full-convolutions-reports.ipynb
+++ b/GTSRB/05.2-Full-convolutions-reports.ipynb
@@ -19,7 +19,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": 3,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -38,24 +38,36 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": 7,
    "metadata": {},
    "outputs": [],
    "source": [
-    "def show_report(report_name):\n",
+    "def highlight_max(s):\n",
+    "    is_max = (s == s.max())\n",
+    "    return ['background-color: yellow' if v else '' for v in is_max]\n",
+    "\n",
+    "def show_report(file):\n",
     "    # ---- Read json file\n",
-    "    with open(report_name) as infile:\n",
+    "    with open(file) as infile:\n",
     "        dict_report = json.load( infile )\n",
+    "    output      = dict_report['output']\n",
+    "    description = dict_report['description']\n",
+    "    # ---- about\n",
+    "    print(\"\\n\\n\\nReport : \",Path(file).stem)\n",
+    "    print(    \"Desc.  : \",description,'\\n')\n",
     "    # ---- Create a pandas\n",
-    "    report = pd.DataFrame (dict_report)\n",
-    "    models = list(dict_report.keys())[2:]\n",
+    "    report       = pd.DataFrame (output)\n",
+    "    col_accuracy = [ c for c in output.keys() if c.endswith('Accuracy')]\n",
+    "    col_duration = [ c for c in output.keys() if c.endswith('Duration')]\n",
     "    # ---- Build formats\n",
     "    lambda_acc = lambda x : '{:.2f} %'.format(x) if (isinstance(x, float)) else '{:}'.format(x)\n",
     "    lambda_dur = lambda x : '{:.1f} s'.format(x) if (isinstance(x, float)) else '{:}'.format(x)\n",
     "    formats = {'Size':'{:.2f} Mo'}\n",
-    "    for m in models:\n",
-    "        formats[m]=lambda_acc if (m.endswith('Accuracy')) else lambda_dur\n",
-    "    t=report.style.format(formats).hide_index()\n",
+    "    for c in col_accuracy:   \n",
+    "        formats[c]=lambda_acc\n",
+    "    for c in col_duration:\n",
+    "        formats[c]=lambda_dur\n",
+    "    t=report.style.highlight_max(subset=col_accuracy).format(formats).hide_index()\n",
     "    display(t)"
    ]
   },
@@ -68,7 +80,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": 8,
    "metadata": {},
    "outputs": [
     {
@@ -77,7 +89,9 @@
      "text": [
       "\n",
       "\n",
-      "Report :  report-2020-01-19_22h14m29s \n",
+      "\n",
+      "Report :  report_2020_01_20_17h22m23s\n",
+      "Desc.  :  train_size=1 test_size=1 batch_size=64 epochs=16 data_aug=False \n",
       "\n"
      ]
     },
@@ -85,91 +99,97 @@
      "data": {
       "text/html": [
        "<style  type=\"text/css\" >\n",
-       "</style><table id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1 Accuracy</th>        <th class=\"col_heading level0 col3\" >v1 Duration</th>        <th class=\"col_heading level0 col4\" >v2 Accuracy</th>        <th class=\"col_heading level0 col5\" >v2 Duration</th>        <th class=\"col_heading level0 col6\" >v3 Accuracy</th>        <th class=\"col_heading level0 col7\" >v3 Duration</th>    </tr></thead><tbody>\n",
+       "    #T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col4 {\n",
+       "            background-color:  yellow;\n",
+       "        }    #T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col6 {\n",
+       "            background-color:  yellow;\n",
+       "        }    #T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col2 {\n",
+       "            background-color:  yellow;\n",
+       "        }</style><table id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302\" ><thead>    <tr>        <th class=\"col_heading level0 col0\" >Dataset</th>        <th class=\"col_heading level0 col1\" >Size</th>        <th class=\"col_heading level0 col2\" >v1_Accuracy</th>        <th class=\"col_heading level0 col3\" >v1_Duration</th>        <th class=\"col_heading level0 col4\" >v2_Accuracy</th>        <th class=\"col_heading level0 col5\" >v2_Duration</th>        <th class=\"col_heading level0 col6\" >v3_Accuracy</th>        <th class=\"col_heading level0 col7\" >v3_Duration</th>    </tr></thead><tbody>\n",
        "                <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col2\" class=\"data row0 col2\" >5.72 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col3\" class=\"data row0 col3\" >5.0 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col4\" class=\"data row0 col4\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col5\" class=\"data row0 col5\" >2.4 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col6\" class=\"data row0 col6\" >-</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row0_col7\" class=\"data row0 col7\" >-</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col0\" class=\"data row0 col0\" >set-24x24-L</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col1\" class=\"data row0 col1\" >228.77 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col2\" class=\"data row0 col2\" >95.39 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col3\" class=\"data row0 col3\" >58.2 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col4\" class=\"data row0 col4\" >97.32 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col5\" class=\"data row0 col5\" >52.6 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col6\" class=\"data row0 col6\" >95.16 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row0_col7\" class=\"data row0 col7\" >50.3 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col2\" class=\"data row1 col2\" >16.62 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col3\" class=\"data row1 col3\" >2.9 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col4\" class=\"data row1 col4\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col5\" class=\"data row1 col5\" >2.5 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col6\" class=\"data row1 col6\" >-</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row1_col7\" class=\"data row1 col7\" >-</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col0\" class=\"data row1 col0\" >set-24x24-RGB</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col1\" class=\"data row1 col1\" >684.39 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col2\" class=\"data row1 col2\" >96.11 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col3\" class=\"data row1 col3\" >47.2 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col4\" class=\"data row1 col4\" >97.77 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col5\" class=\"data row1 col5\" >55.2 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col6\" class=\"data row1 col6\" >96.52 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row1_col7\" class=\"data row1 col7\" >52.3 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col0\" class=\"data row2 col0\" >set-48x48-L</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col1\" class=\"data row2 col1\" >913.90 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col2\" class=\"data row2 col2\" >6.99 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col3\" class=\"data row2 col3\" >8.0 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col4\" class=\"data row2 col4\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col5\" class=\"data row2 col5\" >3.7 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col6\" class=\"data row2 col6\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row2_col7\" class=\"data row2 col7\" >2.0 s</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col0\" class=\"data row2 col0\" >set-48x48-L</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col1\" class=\"data row2 col1\" >913.90 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col2\" class=\"data row2 col2\" >95.98 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col3\" class=\"data row2 col3\" >135.2 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col4\" class=\"data row2 col4\" >97.88 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col5\" class=\"data row2 col5\" >118.3 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col6\" class=\"data row2 col6\" >97.38 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row2_col7\" class=\"data row2 col7\" >92.0 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col0\" class=\"data row3 col0\" >set-48x48-RGB</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col1\" class=\"data row3 col1\" >2736.36 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col2\" class=\"data row3 col2\" >14.96 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col3\" class=\"data row3 col3\" >8.5 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col4\" class=\"data row3 col4\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col5\" class=\"data row3 col5\" >3.7 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col6\" class=\"data row3 col6\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row3_col7\" class=\"data row3 col7\" >2.1 s</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col0\" class=\"data row3 col0\" >set-48x48-RGB</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col1\" class=\"data row3 col1\" >2736.36 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col2\" class=\"data row3 col2\" >96.29 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col3\" class=\"data row3 col3\" >138.7 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col4\" class=\"data row3 col4\" >97.95 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col5\" class=\"data row3 col5\" >124.7 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col6\" class=\"data row3 col6\" >97.53 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row3_col7\" class=\"data row3 col7\" >98.5 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col0\" class=\"data row4 col0\" >set-24x24-L-LHE</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col1\" class=\"data row4 col1\" >228.77 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col2\" class=\"data row4 col2\" >8.14 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col3\" class=\"data row4 col3\" >2.7 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col4\" class=\"data row4 col4\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col5\" class=\"data row4 col5\" >2.5 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col6\" class=\"data row4 col6\" >-</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row4_col7\" class=\"data row4 col7\" >-</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col0\" class=\"data row4 col0\" >set-24x24-L-LHE</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col1\" class=\"data row4 col1\" >228.77 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col2\" class=\"data row4 col2\" >95.79 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col3\" class=\"data row4 col3\" >44.3 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col4\" class=\"data row4 col4\" >96.41 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col5\" class=\"data row4 col5\" >53.1 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col6\" class=\"data row4 col6\" >95.72 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row4_col7\" class=\"data row4 col7\" >50.9 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col0\" class=\"data row5 col0\" >set-24x24-RGB-HE</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col1\" class=\"data row5 col1\" >684.39 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col2\" class=\"data row5 col2\" >19.96 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col3\" class=\"data row5 col3\" >2.5 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col4\" class=\"data row5 col4\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col5\" class=\"data row5 col5\" >2.7 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col6\" class=\"data row5 col6\" >-</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row5_col7\" class=\"data row5 col7\" >-</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col0\" class=\"data row5 col0\" >set-24x24-RGB-HE</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col1\" class=\"data row5 col1\" >684.39 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col2\" class=\"data row5 col2\" >95.35 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col3\" class=\"data row5 col3\" >46.0 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col4\" class=\"data row5 col4\" >96.80 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col5\" class=\"data row5 col5\" >54.5 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col6\" class=\"data row5 col6\" >94.29 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row5_col7\" class=\"data row5 col7\" >52.5 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col0\" class=\"data row6 col0\" >set-48x48-L-LHE</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col1\" class=\"data row6 col1\" >913.90 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col2\" class=\"data row6 col2\" >12.69 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col3\" class=\"data row6 col3\" >7.6 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col4\" class=\"data row6 col4\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col5\" class=\"data row6 col5\" >4.0 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col6\" class=\"data row6 col6\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row6_col7\" class=\"data row6 col7\" >2.0 s</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col0\" class=\"data row6 col0\" >set-48x48-L-LHE</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col1\" class=\"data row6 col1\" >913.90 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col2\" class=\"data row6 col2\" >96.72 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col3\" class=\"data row6 col3\" >131.8 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col4\" class=\"data row6 col4\" >97.80 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col5\" class=\"data row6 col5\" >117.9 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col6\" class=\"data row6 col6\" >97.16 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row6_col7\" class=\"data row6 col7\" >92.8 s</td>\n",
        "            </tr>\n",
        "            <tr>\n",
-       "                                <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col0\" class=\"data row7 col0\" >set-48x48-RGB-HE</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col1\" class=\"data row7 col1\" >2736.36 Mo</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col2\" class=\"data row7 col2\" >20.77 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col3\" class=\"data row7 col3\" >9.5 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col4\" class=\"data row7 col4\" >5.56 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col5\" class=\"data row7 col5\" >3.8 s</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col6\" class=\"data row7 col6\" >5.94 %</td>\n",
-       "                        <td id=\"T_deb0ddec_3b01_11ea_a4ca_350a472bdab7row7_col7\" class=\"data row7 col7\" >2.4 s</td>\n",
+       "                                <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col0\" class=\"data row7 col0\" >set-48x48-RGB-HE</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col1\" class=\"data row7 col1\" >2736.36 Mo</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col2\" class=\"data row7 col2\" >94.58 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col3\" class=\"data row7 col3\" >140.0 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col4\" class=\"data row7 col4\" >97.77 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col5\" class=\"data row7 col5\" >124.6 s</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col6\" class=\"data row7 col6\" >97.11 %</td>\n",
+       "                        <td id=\"T_24e98e70_3ba8_11ea_9561_9b700250c302row7_col7\" class=\"data row7 col7\" >100.1 s</td>\n",
        "            </tr>\n",
        "    </tbody></table>"
       ],
       "text/plain": [
-       "<pandas.io.formats.style.Styler at 0x7f5585503390>"
+       "<pandas.io.formats.style.Styler at 0x7f1cc3519dd0>"
       ]
      },
      "metadata": {},
@@ -178,36 +198,22 @@
    ],
    "source": [
     "for file in glob.glob(\"./run/*.json\"):\n",
-    "    print(\"\\n\\nReport : \",Path(file).stem,'\\n') \n",
     "    show_report(file)\n"
    ]
   },
   {
-   "cell_type": "markdown",
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {},
-   "source": [
-    "---\n",
-    "\n",
-    "\n",
-    "### Some old results :  \n",
-    "\n"
-   ]
+   "outputs": [],
+   "source": []
   },
   {
-   "cell_type": "markdown",
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {},
-   "source": [
-    "|         Datasets         |       Size      |     Model : v1     |    Model : v2      |     Model : v3     |\n",
-    "|:------------------------:|:---------------:|:------------------:|:------------------:|:------------------:|\n",
-    "| set-24x24-L              |          229 Mo |   95.91%  75.04s   |   96.86% 102.28s   |   -      -         |\n",
-    "| set-24x24-RGB            |          684 Mo |   96.60%  77.24s   |   97.32% 103.93s   |   -      -         |\n",
-    "| set-48x48-L              |          914 Mo | **96.71%** 123.94s |   97.68% 149.57s   |  97.60%  91.53s    |\n",
-    "| set-48x48-RGB            |         2736 Mo |   96.36% 117.74s   | **98.20%** 142.63s |  97.28%  91.29s    |\n",
-    "| set-24x24-L-LHE          |          229 Mo |   95.95%  66.12s   |   96.75%  89.45s   |   -      -         |\n",
-    "| set-24x24-RGB-HE         |          684 Mo |   95.30%  68.89s   |   96.28%  92.15s   |   -      -         |\n",
-    "| set-48x48-L-LHE          |          914 Mo |   96.69% 109.28s   |   97.94% 135.17s   | **97.97%** 83.80s  |\n",
-    "| set-48x48-RGB-HE         |         2736 Mo |   95.29% 117.70s   | **98.13%** 141.56s |  97.00%  89.38s    |"
-   ]
+   "outputs": [],
+   "source": []
   }
  ],
  "metadata": {
diff --git a/GTSRB/99 Scripts-Tensorboard.ipynb b/GTSRB/99 Scripts-Tensorboard.ipynb
index daba57f..2db0f8b 100644
--- a/GTSRB/99 Scripts-Tensorboard.ipynb	
+++ b/GTSRB/99 Scripts-Tensorboard.ipynb	
@@ -21,14 +21,15 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 13,
+   "execution_count": 2,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Tensorbord started - pid is  49306\n"
+      "Tensorbord started - pid is  9934\n",
+      "12399\n"
      ]
     }
    ],
@@ -39,7 +40,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
+   "execution_count": 7,
    "metadata": {},
    "outputs": [
     {
@@ -57,14 +58,14 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 17,
+   "execution_count": 3,
    "metadata": {},
    "outputs": [
     {
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "Tensorboard process not found...\n"
+      "Tensorbord stopped - pid was 9934\n"
      ]
     }
    ],
@@ -82,7 +83,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": 2,
    "metadata": {},
    "outputs": [
     {
@@ -112,6 +113,7 @@
     "\n",
     "VERSION='1.2'\n",
     "CONDA_ENV='GPU'\n",
+    "HOST_FRONT='f-dahu'\n",
     "\n",
     "# ---- Usage\n",
     "#\n",
@@ -145,16 +147,26 @@
     "# ---- Start it\n",
     "#\n",
     "tensorboard --port $PORT_TSB --host 0.0.0.0 $@  &>/dev/null &\n",
-    "\n",
+    "                     \n",
     "# ---- Where is it ?\n",
     "#\n",
     "sleep 5\n",
     "p=\"$(/bin/ps ax | /bin/grep \"tensorboard --port $PORT_TSB\" | /bin/grep -v grep | awk '{print $1}')\"\n",
     "if [ -z \"$p\" ]; then\n",
     "        echo \"Tensorboard didn't start... check your parameters !\"\n",
+    "        exit\n",
     "else\n",
     "        echo \"Tensorbord started - pid is  $p\"\n",
-    "fi"
+    "fi\n",
+    "                        \n",
+    "# ---- Not on a node ? we give the tunnel\n",
+    "#\n",
+    "if [ \"$(hostname)\" == $HOST_FRONT ]\n",
+    "then\n",
+    "    SSH_CMD=\"/usr/bin/ssh -NL 6006:$HOST_FRONT:$PORT_TSB dahu.ciment\"\n",
+    "    echo -e \"SSH Tunnel : \\e[93m$SSH_CMD \\e[0m\\n\"\n",
+    "fi\n",
+    "   "
    ]
   },
   {
-- 
GitLab