diff --git a/LinearReg/01-Linear-Regression.ipynb b/LinearReg/01-Linear-Regression.ipynb
index bcab10d0dd0340661e347417b19cfc2e9f746ce7..bcd6815e75d2ba6f8b91bb2c3bbb55efd4900261 100644
--- a/LinearReg/01-Linear-Regression.ipynb
+++ b/LinearReg/01-Linear-Regression.ipynb
@@ -38,11 +38,10 @@
     "import matplotlib\n",
     "import matplotlib.pyplot as plt\n",
     "import sys\n",
+    "import fidle\n",
     "\n",
-    "sys.path.append('..')\n",
-    "import fidle.pwk as pwk\n",
-    "\n",
-    "datasets_dir = pwk.init('LINR1')"
+    "# Init Fidle environment\n",
+    "run_id, run_dir, datasets_dir = fidle.init('LINR1')"
    ]
   },
   {
@@ -107,7 +106,7 @@
     "ax.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)\n",
     "ax.set_xlabel('x axis')\n",
     "ax.set_ylabel('y axis')\n",
-    "pwk.save_fig('01-set_of_points')\n",
+    "fidle.scrawler.save_fig('01-set_of_points')\n",
     "plt.show()"
    ]
   },
@@ -164,7 +163,7 @@
     "ax.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)\n",
     "ax.set_xlabel('x axis')\n",
     "ax.set_ylabel('y axis')\n",
-    "pwk.save_fig('02-regression-line')\n",
+    "fidle.scrawler.save_fig('02-regression-line')\n",
     "plt.show()"
    ]
   },
@@ -174,7 +173,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "pwk.end()"
+    "fidle.end()"
    ]
   },
   {
@@ -188,7 +187,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python 3.9.2 ('fidle-env')",
    "language": "python",
    "name": "python3"
   },
@@ -202,7 +201,12 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.7.9"
+   "version": "3.9.2"
+  },
+  "vscode": {
+   "interpreter": {
+    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
+   }
   }
  },
  "nbformat": 4,
diff --git a/LinearReg/02-Gradient-descent.ipynb b/LinearReg/02-Gradient-descent.ipynb
index da61c7062366899787f7266846dbcdfafa535670..0cb10ef77789abf14d7826a9467bbd73e5b7aba3 100644
--- a/LinearReg/02-Gradient-descent.ipynb
+++ b/LinearReg/02-Gradient-descent.ipynb
@@ -50,18 +50,17 @@
     "import numpy as np\n",
     "import sys\n",
     "\n",
-    "sys.path.append('..')\n",
-    "import fidle.pwk as pwk\n",
+    "import fidle\n",
     "\n",
     "from modules.RegressionCooker import RegressionCooker \n",
     "\n",
-    "# ---- Init Fidle stuffs\n",
+    "# Init Fidle environment\n",
     "#\n",
-    "datasets_dir = pwk.init('GRAD1')\n",
+    "run_id, run_dir, datasets_dir = fidle.init('GRAD1')\n",
     "\n",
     "# ---- Instanciate a Regression Cooker\n",
     "#\n",
-    "cooker = RegressionCooker(pwk)"
+    "cooker = RegressionCooker(fidle)"
    ]
   },
   {
@@ -140,7 +139,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "pwk.end()"
+    "fidle.end()"
    ]
   },
   {
@@ -154,7 +153,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python 3.9.2 ('fidle-env')",
    "language": "python",
    "name": "python3"
   },
@@ -168,7 +167,12 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.7.9"
+   "version": "3.9.2"
+  },
+  "vscode": {
+   "interpreter": {
+    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
+   }
   }
  },
  "nbformat": 4,
diff --git a/LinearReg/03-Polynomial-Regression.ipynb b/LinearReg/03-Polynomial-Regression.ipynb
index 1cf5aa85bacec23d1a1b3722be36b0ad1d862d41..1bf39cb12c2ee257a49eb543837211b5467ae717 100644
--- a/LinearReg/03-Polynomial-Regression.ipynb
+++ b/LinearReg/03-Polynomial-Regression.ipynb
@@ -34,11 +34,10 @@
     "import matplotlib\n",
     "import matplotlib.pyplot as plt\n",
     "import sys\n",
+    "import fidle\n",
     "\n",
-    "sys.path.append('..')\n",
-    "import fidle.pwk as pwk\n",
-    "\n",
-    "datasets_dir = pwk.init('POLR1')"
+    "# Init Fidle environment\n",
+    "run_id, run_dir, datasets_dir = fidle.init('POLR1')"
    ]
   },
   {
@@ -97,24 +96,24 @@
     "    print(\"{:8} :      mean={:+12.4f}  std={:+12.4f}    min={:+12.4f}    max={:+12.4f}\".format(name,m,s,V.min(),V.max()))\n",
     "\n",
     "\n",
-    "pwk.display_md('#### Generator :')\n",
+    "fidle.utils.display_md('#### Generator :')\n",
     "print(f\"Nomber of points={n}  deg={deg} bruit={noise}\")\n",
     "\n",
-    "pwk.display_md('#### Datasets :')\n",
+    "fidle.utils.display_md('#### Datasets :')\n",
     "print(f\"{nb_viz} points visibles sur {n})\")\n",
     "plt.figure(figsize=(width, height))\n",
     "plt.plot(X[:nb_viz], Y[:nb_viz], '.')\n",
     "plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)\n",
     "plt.xlabel('x axis')\n",
     "plt.ylabel('y axis')\n",
-    "pwk.save_fig(\"01-dataset\")\n",
+    "fidle.scrawler.save_fig(\"01-dataset\")\n",
     "plt.show()\n",
     "\n",
-    "pwk.display_md('#### Before normalization :')\n",
+    "fidle.utils.display_md('#### Before normalization :')\n",
     "vector_infos('X',X)\n",
     "vector_infos('Y',Y)\n",
     "\n",
-    "pwk.display_md('#### After normalization :')         \n",
+    "fidle.utils.display_md('#### After normalization :')         \n",
     "vector_infos('X_norm',X_norm)\n",
     "vector_infos('Y_norm',Y_norm)\n"
    ]
@@ -143,7 +142,7 @@
     "    plt.tick_params(axis='both', which='both', bottom=False, left=False, labelbottom=False, labelleft=False)\n",
     "    plt.xlabel('x axis')\n",
     "    plt.ylabel('y axis')\n",
-    "    pwk.save_fig(save_as)\n",
+    "    fidle.scrawler.save_fig(save_as)\n",
     "    plt.show()"
    ]
   },
@@ -212,7 +211,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "pwk.end()"
+    "fidle.end()"
    ]
   },
   {
@@ -226,7 +225,7 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python 3.9.2 ('fidle-env')",
    "language": "python",
    "name": "python3"
   },
@@ -240,7 +239,12 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.7.9"
+   "version": "3.9.2"
+  },
+  "vscode": {
+   "interpreter": {
+    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
+   }
   }
  },
  "nbformat": 4,
diff --git a/LinearReg/modules/RegressionCooker.py b/LinearReg/modules/RegressionCooker.py
index 9640d163f4c433647aba1736804d7ec1bc7e399c..7bf6a11c523d8abfaba8175001cd7739627389c4 100644
--- a/LinearReg/modules/RegressionCooker.py
+++ b/LinearReg/modules/RegressionCooker.py
@@ -20,17 +20,15 @@ import matplotlib
 import matplotlib.pyplot as plt
 from IPython.display import display,Markdown,HTML
 
-sys.path.append('..')
-import fidle.pwk as pwk
 
 class RegressionCooker():
     
-    pwk     = None
+    fidle   = None
     version = '0.1'
     
-    def __init__(self, pwk):
-        self.pwk = pwk
-        pwk.subtitle('FIDLE 2020 - Regression Cooker')
+    def __init__(self, fidle):
+        self.fidle = fidle
+        fidle.utils.subtitle('FIDLE 2020 - Regression Cooker')
         print('Version      :', self.version)
         print('Run time     : {}'.format(time.strftime("%A %d %B %Y, %H:%M:%S")))
         
@@ -101,7 +99,7 @@ class RegressionCooker():
         print(f"X shape : {X.shape}  Y shape : {Y.shape}  plot : {nb_viz} points")
         plt.figure(figsize=(width, height))
         plt.plot(X[:nb_viz], Y[:nb_viz], '.')
-        self.pwk.save_fig('01-dataset')
+        self.fidle.scrawler.save_fig('01-dataset')
         plt.show()
         self.vector_infos('X',X)
         self.vector_infos('Y',Y)
@@ -189,13 +187,13 @@ class RegressionCooker():
 
         # ---- Visualization
 
-        pwk.subtitle('Visualization :')
-        self.pwk.save_fig('02-basic_descent')
+        self.fidle.utils.subtitle('Visualization :')
+        self.fidle.scrawler.save_fig('02-basic_descent')
         plt.show()
 
-        pwk.subtitle('Loss :')
+        self.fidle.utils.subtitle('Loss :')
         self.__plot_loss(loss)
-        self.pwk.save_fig('03-basic_descent_loss')
+        self.fidle.scrawler.save_fig('03-basic_descent_loss')
         plt.show()
         
         return theta
@@ -268,13 +266,13 @@ class RegressionCooker():
         
         # ---- Visualization
 
-        pwk.subtitle('Visualization :')
-        self.pwk.save_fig('04-minibatch_descent')
+        self.fidle.utils.subtitle('Visualization :')
+        self.fidle.scrawler.save_fig('04-minibatch_descent')
         plt.show()
 
-        pwk.subtitle('Loss :')
+        self.fidle.utils.subtitle('Loss :')
         self.__plot_loss(loss)
-        self.pwk.save_fig('05-minibatch_descent_loss')
+        self.fidle.scrawler.save_fig('05-minibatch_descent_loss')
         plt.show()
         
         
diff --git a/README.ipynb b/README.ipynb
index 08c5a9a7816a238450e823f383a8c9401f649268..775dc5de072c436a2f55470d9cae2724b53a5ee3 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -3,13 +3,13 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "id": "9e5009a6",
+   "id": "a2ed5c3c",
    "metadata": {
     "execution": {
-     "iopub.execute_input": "2022-03-29T20:08:36.649716Z",
-     "iopub.status.busy": "2022-03-29T20:08:36.646330Z",
-     "iopub.status.idle": "2022-03-29T20:08:36.658519Z",
-     "shell.execute_reply": "2022-03-29T20:08:36.658045Z"
+     "iopub.execute_input": "2022-10-07T14:36:11.174082Z",
+     "iopub.status.busy": "2022-10-07T14:36:11.173271Z",
+     "iopub.status.idle": "2022-10-07T14:36:11.184854Z",
+     "shell.execute_reply": "2022-10-07T14:36:11.184061Z"
     },
     "jupyter": {
      "source_hidden": true
@@ -31,8 +31,9 @@
        "## About Fidle\n",
        "\n",
        "This repository contains all the documents and links of the **Fidle Training** .   \n",
-       "Fidle (for Formation Introduction au Deep Learning) is a 2-day training session  \n",
-       "co-organized by the Formation Permanente CNRS and the Resinfo/SARI and DevLOG CNRS networks.  \n",
+       "Fidle (for Formation Introduction au Deep Learning) is a 3-day training session co-organized  \n",
+       "by the 3IA MIAI institute, the CNRS, via the Mission for Transversal and Interdisciplinary  \n",
+       "Initiatives (MITI) and the University of Grenoble Alpes (UGA).  \n",
        "\n",
        "The objectives of this training are :\n",
        " - Understanding the **bases of Deep Learning** neural networks\n",
@@ -51,9 +52,7 @@
        "For more information, you can contact us at :  \n",
        "[<img width=\"200px\" style=\"vertical-align:middle\" src=\"fidle/img/00-Mail_contact.svg\"></img>](#top)\n",
        "\n",
-       "Current Version : <!-- VERSION_BEGIN -->\n",
-       "**2.0.35**\n",
-       "<!-- VERSION_END -->\n",
+       "Current Version : <!-- VERSION_BEGIN -->2.02b2<!-- VERSION_END -->\n",
        "\n",
        "\n",
        "## Course materials\n",
@@ -227,13 +226,13 @@
     "from IPython.display import display,Markdown\n",
     "display(Markdown(open('README.md', 'r').read()))\n",
     "#\n",
-    "# This README is visible under Jupiter LAb ! :-)"
+    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 07/10/22 16:36:10"
    ]
   }
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python 3 (ipykernel)",
    "language": "python",
    "name": "python3"
   },
@@ -247,7 +246,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.7"
+   "version": "3.9.2"
   }
  },
  "nbformat": 4,
diff --git a/README.md b/README.md
index 2dc61b106b27ef6c545c654393eb154173ce6a55..52214bb22f85a8690d6db9c32c3a57cb82419ddb 100644
--- a/README.md
+++ b/README.md
@@ -10,8 +10,9 @@
 ## About Fidle
 
 This repository contains all the documents and links of the **Fidle Training** .   
-Fidle (for Formation Introduction au Deep Learning) is a 2-day training session  
-co-organized by the Formation Permanente CNRS and the Resinfo/SARI and DevLOG CNRS networks.  
+Fidle (for Formation Introduction au Deep Learning) is a 3-day training session co-organized  
+by the 3IA MIAI institute, the CNRS, via the Mission for Transversal and Interdisciplinary  
+Initiatives (MITI) and the University of Grenoble Alpes (UGA).  
 
 The objectives of this training are :
  - Understanding the **bases of Deep Learning** neural networks
@@ -30,9 +31,7 @@ For more information, see **https://fidle.cnrs.fr** :
 For more information, you can contact us at :  
 [<img width="200px" style="vertical-align:middle" src="fidle/img/00-Mail_contact.svg"></img>](#top)
 
-Current Version : <!-- VERSION_BEGIN -->
-**2.0.35**
-<!-- VERSION_END -->
+Current Version : <!-- VERSION_BEGIN -->2.02b2<!-- VERSION_END -->
 
 
 ## Course materials
diff --git a/fidle/01-update-index.ipynb b/fidle/01-update-index.ipynb
deleted file mode 100644
index 8994f52eb52cca8004850afb21472ae580fc7764..0000000000000000000000000000000000000000
--- a/fidle/01-update-index.ipynb
+++ /dev/null
@@ -1,302 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
-    "\n",
-    "\n",
-    "## Mise a jour du catalog des notebooks et des READMEs\n",
-    " - Génération du **catalog des notebooks** : [./logs/catalog.json](./logs/catalog.json)  \n",
-    "   Ce fichier comporte une liste détaillée de tous les notebooks et scripts.\n",
-    "   \n",
-    "   \n",
-    " - Génération automatique de la table des matières et mise à jour des **README**\n",
-    "     - [README.md](../README.md)\n",
-    "     - [README.ipynb](../README.ipynb)\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Load modules and init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import nbformat\n",
-    "from nbconvert.preprocessors import ExecutePreprocessor\n",
-    "from IPython.display import display,Markdown\n",
-    "import sys\n",
-    "import datetime\n",
-    "\n",
-    "sys.path.append('..')\n",
-    "import fidle.config as config\n",
-    "import fidle.cookindex as cookindex\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - List of folders containing notebooks to be indexed :\n",
-    "Order wil be index order"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "directories_to_index = {'LinearReg':'Linear and logistic regression', \n",
-    "                        'IRIS':'Perceptron Model 1957', \n",
-    "                        'BHPD':'Basic regression using DNN',\n",
-    "                        'MNIST':'Basic classification using a DNN',\n",
-    "                        'GTSRB':'Images classification with Convolutional Neural Networks (CNN)',\n",
-    "                        'IMDB':'Sentiment analysis with word embedding',\n",
-    "                        'SYNOP':'Time series with Recurrent Neural Network (RNN)',\n",
-    "                        'Transformers': 'Sentiment analysis with transformers',\n",
-    "                        'AE':'Unsupervised learning with an autoencoder neural network (AE)',\n",
-    "                        'VAE':'Generative network with Variational Autoencoder (VAE)',\n",
-    "                        'DCGAN':'Generative Adversarial Networks (GANs)',\n",
-    "                        'DRL':'Deep Reinforcement Learning (DRL)',\n",
-    "                        'Misc':'Miscellaneous'\n",
-    "                        }"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Catalog of notebooks\n",
-    "### 3.1 - Build catalog"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "cookindex.build_catalog(directories_to_index)\n",
-    "cookindex.build_default_profile()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 3.3 Buil index"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "catalog = cookindex. read_catalog()\n",
-    "styles = open('css/readme.css', \"r\").read()\n",
-    "\n",
-    "lines_md=[]\n",
-    "lines_html=[styles]\n",
-    "\n",
-    "for directory,title in directories_to_index.items():\n",
-    "    \n",
-    "    lines_md.append(f'\\n### {title}')\n",
-    "    lines_html.append( f'<div class=\"fid_section\">{title}</div>')\n",
-    "    \n",
-    "    entries = { k:v for k,v in catalog.items() if v['dirname']==directory }\n",
-    "\n",
-    "    for id, about in entries.items():\n",
-    "        id          = about['id']\n",
-    "        dirname     = about['dirname']\n",
-    "        basename    = about['basename']\n",
-    "        title       = about['title']\n",
-    "        description = about['description']\n",
-    "\n",
-    "        link=f'{dirname}/{basename}'.replace(' ','%20')\n",
-    "        md   = f'- **[{id}]({link})** - [{title}]({link})  \\n'\n",
-    "        md  += f'{description}'\n",
-    "        html = f\"\"\"<div class=\"fid_line\">\n",
-    "                       <span class=\"fid_id\">\n",
-    "                           <a href=\"{link}\">{id}</a>\n",
-    "                       </span> <a href=\"{link}\">{title}</a><br>\n",
-    "                       <span class=\"fid_desc\">{description}</span>\n",
-    "                  </div>\n",
-    "                \"\"\"\n",
-    "        lines_md.append(md)\n",
-    "        lines_html.append(html)\n",
-    "\n",
-    "index_md   = '\\n'.join(lines_md)\n",
-    "index_html = '\\n'.join(lines_html)\n",
-    "\n",
-    "display(Markdown('**Index is :**'))\n",
-    "\n",
-    "display(Markdown(index_md))\n",
-    "# display(HTML(index_html))\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Update README.md"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Load README.md\n",
-    "#\n",
-    "with open('../README.md','r') as fp:\n",
-    "    readme=fp.read()\n",
-    "    \n",
-    "# ---- Update index, version\n",
-    "#\n",
-    "readme = cookindex.tag('INDEX',   index_md,                readme)\n",
-    "readme = cookindex.tag('VERSION', f'**{config.VERSION}**', readme)\n",
-    "\n",
-    "# ---- Save it\n",
-    "#\n",
-    "with open('../README.md','wt') as fp:\n",
-    "    fp.write(readme)\n",
-    "\n",
-    "print('README.md is updated.')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - README.ipynb\n",
-    "Just execute README.ipynb"
-   ]
-  },
-  {
-   "cell_type": "raw",
-   "metadata": {},
-   "source": [
-    "# ---- Load notebook\n",
-    "#\n",
-    "notebook = nbformat.read('../README.ipynb', nbformat.NO_CONVERT)\n",
-    "\n",
-    "# new_cell = nbformat.v4.new_markdown_cell(source=readme)\n",
-    "# notebook.cells.append(new_cell)\n",
-    "\n",
-    "# ---- Execute it\n",
-    "#\n",
-    "ep = ExecutePreprocessor(timeout=600, kernel_name=\"python3\")\n",
-    "ep.preprocess(notebook,  {'metadata': {'path': '..'}})\n",
-    "\n",
-    "# ---- Save it\n",
-    "with open('../READMEv2.ipynb', mode=\"w\", encoding='utf-8') as fp:\n",
-    "    nbformat.write(notebook)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - More fun : Create and execute it :-)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Plus rigolo, on va fabriquer le README.ipynb et l'executer :-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Create Notebook from scratch\n",
-    "#\n",
-    "notebook = nbformat.v4.new_notebook()\n",
-    "\n",
-    "# ---- Add a code cell\n",
-    "#\n",
-    "code = \"from IPython.display import display,Markdown\\n\"\n",
-    "code+= \"display(Markdown(open('README.md', 'r').read()))\\n\"\n",
-    "code+= \"#\\n\"\n",
-    "code+= \"# This README is visible under Jupiter LAb ! :-)\"\n",
-    "\n",
-    "new_cell = nbformat.v4.new_code_cell(source=code)\n",
-    "new_cell['metadata']= { \"jupyter\": { \"source_hidden\": True} }\n",
-    "notebook.cells.append(new_cell)\n",
-    "\n",
-    "# --- Pour éviter une modification lors de l'ouverture du notebook\n",
-    "#     pas génante, mais nécessite de resauvegarder le document à la fermeture...\n",
-    "notebook['metadata'][\"kernelspec\"] = {\"display_name\": \"Python 3\", \"language\": \"python\", \"name\": \"python3\" }\n",
-    "\n",
-    "# ---- Run it\n",
-    "#\n",
-    "ep = ExecutePreprocessor(timeout=600, kernel_name=\"python3\")\n",
-    "ep.preprocess(notebook,  {'metadata': {'path': '..'}})\n",
-    "\n",
-    "# ---- Save it\n",
-    "#\n",
-    "with open('../README.ipynb', mode=\"w\", encoding='utf-8') as fp:\n",
-    "    nbformat.write(notebook, fp)\n",
-    "print('README.ipynb built and saved')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "now = datetime.datetime.now()\n",
-    "print('Completed on : ', now.strftime(\"%A %d %B %Y, %H:%M:%S\"))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/00-Fidle-logo-01.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "interpreter": {
-   "hash": "7822d55dc7294a4f6f06b86d8ad2ca65bd6e1ee5d72628c47c30a06bbf89aef6"
-  },
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/fidle/02-running-ci-tests.ipynb b/fidle/02-running-ci-tests.ipynb
deleted file mode 100644
index 4889877d48ff5b0eff2dcadc03cf93ce33b1b03e..0000000000000000000000000000000000000000
--- a/fidle/02-running-ci-tests.ipynb
+++ /dev/null
@@ -1,123 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
-    "\n",
-    "\n",
-    "# Gestion des tests d'intégration continue\n",
-    "\n",
-    "**La liste des notebooks a éxécuter** et de leurs paramètres (override) est définie dans un **profile**.\\\n",
-    "Un **rapport d'éxécution** est généré durant l'éxécution des tests.\n",
-    "\n",
-    "## Step 1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import cookci\n",
-    "import os\n",
-    "import pwk\n",
-    "datasets_dir = pwk.init('RUNCI')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Profile and parameters\n",
-    "`profile_name`: ci profile name - see en ./ci  \n",
-    "`reset`: reset the catalog.json file of results (False)  \n",
-    "`run_if_exist`: run if html output exist (False)  \n",
-    "`filters`: regex to define witch notebook will be run, examples :  \n",
-    "- `.*`\n",
-    "- `Nb_GTSRB.*|Nb_AE.*`\n",
-    "- `Nb_VAE3`"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "profile_name = './ci/small_cpu.yml'\n",
-    "reset        = False\n",
-    "run_if_exist = False\n",
-    "filter       = 'No_one.*'\n",
-    "\n",
-    "pwk.override('profile_name', 'reset', 'run_if_exist', 'filter')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Run it"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "cookci.run_profile( profile_name, \n",
-    "                    reset        = reset, \n",
-    "                    run_if_exist = run_if_exist, \n",
-    "                    filter       = filter )\n",
-    "cookci.build_ci_report(profile_name)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "%%bash\n",
-    "# tree ./run/ci\n",
-    "# find .. -name \"*==ci==.*\" -ls | sort -k11\n",
-    "# rm $(find .. -name \"*==ci==.ipynb\")\n",
-    "# rm $(find .. -name \"*==ci==.html\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/00-Fidle-logo-01.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "interpreter": {
-   "hash": "7822d55dc7294a4f6f06b86d8ad2ca65bd6e1ee5d72628c47c30a06bbf89aef6"
-  },
-  "kernelspec": {
-   "display_name": "Python 3.9.7 64-bit ('fidle': conda)",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/fidle/Template.ipynb b/fidle/Template.ipynb
deleted file mode 100644
index 56584b7ee9b5e53d822884c09a39da48dd30ca04..0000000000000000000000000000000000000000
--- a/fidle/Template.ipynb
+++ /dev/null
@@ -1,56 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> Titre_du_notebook\n",
-    "<!-- DESC --> Description_du_notebook_et_de_sa_thématique\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Objectif \n",
-    " - Objectif_pédagogique  \n",
-    "\n",
-    "\n",
-    "A_propos_du_dataset\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Ceci\n",
-    " - Cela\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/00-Fidle-logo-01.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.7.7"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/fidle/__init__.py b/fidle/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/fidle/about.yml b/fidle/about.yml
new file mode 100644
index 0000000000000000000000000000000000000000..46d8091b5541295ce18171f494f09deee57bb55a
--- /dev/null
+++ b/fidle/about.yml
@@ -0,0 +1,39 @@
+#--------------------------------------------------------------------
+#  ______ _     _ _         _____            _             _       
+# |  ____(_)   | | |       / ____|          | |           | |      
+# | |__   _  __| | | ___  | |     ___  _ __ | |_ ___ _ __ | |_ ___ 
+# |  __| | |/ _` | |/ _ \ | |    / _ \| '_ \| __/ _ \ '_ \| __/ __|
+# | |    | | (_| | |  __/ | |___| (_) | | | | ||  __/ | | | |_\__ \
+# |_|    |_|\__,_|_|\___|  \_____\___/|_| |_|\__\___|_| |_|\__|___/
+#
+#                      Formation Introduction au Deep Learning - 2022
+#--------------------------------------------------------------------
+# Formation Introduction au Deep Learning       https://fidle.cnrs.fr
+# By MIAI/CNRS/UGA 2022
+#
+# This file describes the notebooks used by the Fidle training.
+
+version:          2.02b3
+content:          notebooks
+name:             Notebooks Fidle
+description:      All notebooks used by the Fidle training
+
+
+readme_md:        README.md
+readme_ipynb:     README.ipynb
+default_ci:       fidle/ci/default.yml
+
+toc:
+  LinearReg:      Linear and logistic regression
+  IRIS:           Perceptron Model 1957
+  BHPD:           Basic regression using DN
+  MNIST:          Basic classification using a DN
+  GTSRB:          Images classification with Convolutional Neural Networks (CNN
+  IMDB:           Sentiment analysis with word embeddin
+  SYNOP:          Time series with Recurrent Neural Network (RNN
+  Transformers:   Sentiment analysis with transformer
+  AE:             Unsupervised learning with an autoencoder neural network (AE
+  VAE:            Generative network with Variational Autoencoder (VAE
+  DCGAN:          Generative Adversarial Networks (GANs
+  DRL:            Deep Reinforcement Learning (DRL
+  Misc:           Miscellaneous
diff --git a/fidle/batch-ci.sh b/fidle/batch-ci.sh
deleted file mode 100644
index 4a3cce32d4a4ca96e848bea425786b8d5c668cc4..0000000000000000000000000000000000000000
--- a/fidle/batch-ci.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-# -----------------------------------------------
-#         _           _       _
-#        | |__   __ _| |_ ___| |__
-#        | '_ \ / _` | __/ __| '_ \
-#        | |_) | (_| | || (__| | | |
-#        |_.__/ \__,_|\__\___|_| |_|
-#                              Fidle at IDRIS
-# -----------------------------------------------
-# Full_gpu ci - pjluc 2021
-#
-# Soumission :  sbatch  /(...)/fidle/VAE/batch_slurm.sh
-# Suivi      :  squeue -u $USER
-
-# ==== Job parameters ==============================================
-
-#SBATCH --job-name="Fidle-ci"                          # nom du job
-#SBATCH --ntasks=1                                     # nombre de tâche (un unique processus ici)
-#SBATCH --gres=gpu:1                                   # nombre de GPU à réserver (un unique GPU ici)
-#SBATCH --cpus-per-task=10                             # nombre de coeurs à réserver (un quart du noeud)
-#SBATCH --hint=nomultithread                           # on réserve des coeurs physiques et non logiques
-#SBATCH --time=06:00:00                                # temps exécution maximum demande (HH:MM:SS)
-#SBATCH --output="Fidle_%j.out"                        # nom du fichier de sortie
-#SBATCH --error="Fidle_%j.err"                         # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH --mail-user=Jean-Luc.Parouty@grenoble-inp.fr
-#SBATCH --mail-type=ALL
-
-# ==== Notebook parameters =========================================
-
-#MODULE_ENV="tensorflow-gpu/py3/2.5.0"
-MODULE_ENV="tensorflow-gpu/py3/2.7.0"
-NOTEBOOK_DIR="$WORK/fidle/fidle"
-
-export FIDLE_OVERRIDE_RUNCI_profile_name='./ci/smart_gpu.yml'
-
-# Reset report file (./run/done/report.json)
-export FIDLE_OVERRIDE_RUNCI_reset='False'
-
-# Run if html output still exist
-export FIDLE_OVERRIDE_RUNCI_run_if_exist='False'
-
-# Notebooks to run (regex)
-export FIDLE_OVERRIDE_RUNCI_filter='.*'
-
-NOTEBOOK_SRC1="02-running-ci-tests.ipynb"
-
-# ==================================================================
-
-export FIDLE_OVERRIDE_PROFILE
-
-echo '------------------------------------------------------------'
-echo "Start : $0"
-echo '------------------------------------------------------------'
-echo "Job id        : $SLURM_JOB_ID"
-echo "Job name      : $SLURM_JOB_NAME"
-echo "Job node list : $SLURM_JOB_NODELIST"
-echo '------------------------------------------------------------'
-echo "Notebook dir  : $NOTEBOOK_DIR"
-echo "Notebook src1 : $NOTEBOOK_SRC1"
-echo "Environment   : $MODULE_ENV"
-echo '------------------------------------------------------------'
-env | grep FIDLE_OVERRIDE | awk 'BEGIN { FS = "=" } ; { printf("%-35s : %s\n",$1,$2) }'
-echo '------------------------------------------------------------'
-
-# ---- Module
-
-module purge
-module load "$MODULE_ENV"
-
-# ---- Run it...
-
-cd $NOTEBOOK_DIR
-
-jupyter nbconvert --ExecutePreprocessor.timeout=-1 --to notebook --output "${NOTEBOOK_SRC1%.*}==${SLURM_JOB_ID}==.ipynb" --execute "$NOTEBOOK_SRC1"
-
-echo 'Done.'
diff --git a/fidle/ci/default.yml b/fidle/ci/default.yml
index 60267f2d385bfb5ff0e99540a41e0ba0791eb333..d124c1b8d96185844543691698a978a257358e31 100644
--- a/fidle/ci/default.yml
+++ b/fidle/ci/default.yml
@@ -1,120 +1,92 @@
-_metadata_:
+campain:
   version: '1.0'
-  output_tag: ==ci==
-  save_figs: true
-  description: Default generated profile
-  output_ipynb: <directory for ipynb>
-  output_html: <directory for html>
-  report_json: <report json file>
-  report_error: <error file>
-Nb_LINR1:
-  notebook_id: LINR1
-  notebook_dir: LinearReg
-  notebook_src: 01-Linear-Regression.ipynb
-  notebook_tag: default
-Nb_GRAD1:
-  notebook_id: GRAD1
-  notebook_dir: LinearReg
-  notebook_src: 02-Gradient-descent.ipynb
-  notebook_tag: default
-Nb_POLR1:
-  notebook_id: POLR1
-  notebook_dir: LinearReg
-  notebook_src: 03-Polynomial-Regression.ipynb
-  notebook_tag: default
-Nb_LOGR1:
-  notebook_id: LOGR1
-  notebook_dir: LinearReg
-  notebook_src: 04-Logistic-Regression.ipynb
-  notebook_tag: default
-Nb_PER57:
-  notebook_id: PER57
-  notebook_dir: IRIS
-  notebook_src: 01-Simple-Perceptron.ipynb
-  notebook_tag: default
-Nb_BHPD1:
-  notebook_id: BHPD1
-  notebook_dir: BHPD
-  notebook_src: 01-DNN-Regression.ipynb
-  notebook_tag: default
+  description: Automatically generated ci profile (07/10/22 16:36:10)
+  directory: ./campains/default
+  existing_notebook: 'remove    # remove|skip'
+  report_template: 'fidle     # fidle|default'
+  timeout: 6000
+
+#
+# ------------ LinearReg
+#
+LINR1:
+  notebook: LinearReg/01-Linear-Regression.ipynb
+GRAD1:
+  notebook: LinearReg/02-Gradient-descent.ipynb
+POLR1:
+  notebook: LinearReg/03-Polynomial-Regression.ipynb
+LOGR1:
+  notebook: LinearReg/04-Logistic-Regression.ipynb
+
+#
+# ------------ IRIS
+#
+PER57:
+  notebook: IRIS/01-Simple-Perceptron.ipynb
+
+#
+# ------------ BHPD
+#
+BHPD1:
+  notebook: BHPD/01-DNN-Regression.ipynb
   overrides:
     fit_verbosity: default
-Nb_BHPD2:
-  notebook_id: BHPD2
-  notebook_dir: BHPD
-  notebook_src: 02-DNN-Regression-Premium.ipynb
-  notebook_tag: default
+BHPD2:
+  notebook: BHPD/02-DNN-Regression-Premium.ipynb
   overrides:
     fit_verbosity: default
-Nb_MNIST1:
-  notebook_id: MNIST1
-  notebook_dir: MNIST
-  notebook_src: 01-DNN-MNIST.ipynb
-  notebook_tag: default
+
+#
+# ------------ MNIST
+#
+MNIST1:
+  notebook: MNIST/01-DNN-MNIST.ipynb
   overrides:
     fit_verbosity: default
-Nb_MNIST2:
-  notebook_id: MNIST2
-  notebook_dir: MNIST
-  notebook_src: 02-CNN-MNIST.ipynb
-  notebook_tag: default
+MNIST2:
+  notebook: MNIST/02-CNN-MNIST.ipynb
   overrides:
     fit_verbosity: default
-Nb_GTSRB1:
-  notebook_id: GTSRB1
-  notebook_dir: GTSRB
-  notebook_src: 01-Preparation-of-data.ipynb
-  notebook_tag: default
+
+#
+# ------------ GTSRB
+#
+GTSRB1:
+  notebook: GTSRB/01-Preparation-of-data.ipynb
   overrides:
     scale: default
     output_dir: default
     progress_verbosity: default
-Nb_GTSRB2:
-  notebook_id: GTSRB2
-  notebook_dir: GTSRB
-  notebook_src: 02-First-convolutions.ipynb
-  notebook_tag: default
+GTSRB2:
+  notebook: GTSRB/02-First-convolutions.ipynb
   overrides:
-    run_dir: default
     enhanced_dir: default
     dataset_name: default
     batch_size: default
     epochs: default
     scale: default
     fit_verbosity: default
-Nb_GTSRB3:
-  notebook_id: GTSRB3
-  notebook_dir: GTSRB
-  notebook_src: 03-Tracking-and-visualizing.ipynb
-  notebook_tag: default
+GTSRB3:
+  notebook: GTSRB/03-Tracking-and-visualizing.ipynb
   overrides:
-    run_dir: default
     enhanced_dir: default
     dataset_name: default
     batch_size: default
     epochs: default
     scale: default
     fit_verbosity: default
-Nb_GTSRB4:
-  notebook_id: GTSRB4
-  notebook_dir: GTSRB
-  notebook_src: 04-Data-augmentation.ipynb
-  notebook_tag: default
+GTSRB4:
+  notebook: GTSRB/04-Data-augmentation.ipynb
   overrides:
-    run_dir: default
     enhanced_dir: default
     dataset_name: default
     batch_size: default
     epochs: default
     scale: default
     fit_verbosity: default
-Nb_GTSRB5:
-  notebook_id: GTSRB5
-  notebook_dir: GTSRB
-  notebook_src: 05-Full-convolutions.ipynb
-  notebook_tag: default
+GTSRB5:
+  notebook: GTSRB/05-Full-convolutions.ipynb
   overrides:
-    run_dir: default
     enhanced_dir: default
     datasets: default
     models: default
@@ -123,48 +95,27 @@ Nb_GTSRB5:
     scale: default
     with_datagen: default
     fit_verbosity: default
-Nb_GTSRB6:
-  notebook_id: GTSRB6
-  notebook_dir: GTSRB
-  notebook_src: 06-Notebook-as-a-batch.ipynb
-  notebook_tag: default
-Nb_GTSRB7:
-  notebook_id: GTSRB7
-  notebook_dir: GTSRB
-  notebook_src: 07-Show-report.ipynb
-  notebook_tag: default
-  overrides:
-    run_dir: default
+GTSRB6:
+  notebook: GTSRB/06-Notebook-as-a-batch.ipynb
+GTSRB7:
+  notebook: GTSRB/07-Show-report.ipynb
+  overrides:
     report_dir: default
-Nb_GTSRB10:
-  notebook_id: GTSRB10
-  notebook_dir: GTSRB
-  notebook_src: batch_oar.sh
-  notebook_tag: default
-Nb_GTSRB11:
-  notebook_id: GTSRB11
-  notebook_dir: GTSRB
-  notebook_src: batch_slurm.sh
-  notebook_tag: default
-Nb_IMDB1:
-  notebook_id: IMDB1
-  notebook_dir: IMDB
-  notebook_src: 01-One-hot-encoding.ipynb
-  notebook_tag: default
-  overrides:
-    run_dir: default
+
+#
+# ------------ IMDB
+#
+IMDB1:
+  notebook: IMDB/01-One-hot-encoding.ipynb
+  overrides:
     vocab_size: default
     hide_most_frequently: default
     batch_size: default
     epochs: default
     fit_verbosity: default
-Nb_IMDB2:
-  notebook_id: IMDB2
-  notebook_dir: IMDB
-  notebook_src: 02-Keras-embedding.ipynb
-  notebook_tag: default
+IMDB2:
+  notebook: IMDB/02-Keras-embedding.ipynb
   overrides:
-    run_dir: default
     vocab_size: default
     hide_most_frequently: default
     review_len: default
@@ -173,33 +124,21 @@ Nb_IMDB2:
     epochs: default
     output_dir: default
     fit_verbosity: default
-Nb_IMDB3:
-  notebook_id: IMDB3
-  notebook_dir: IMDB
-  notebook_src: 03-Prediction.ipynb
-  notebook_tag: default
+IMDB3:
+  notebook: IMDB/03-Prediction.ipynb
   overrides:
-    run_dir: default
     vocab_size: default
     review_len: default
     dictionaries_dir: default
-Nb_IMDB4:
-  notebook_id: IMDB4
-  notebook_dir: IMDB
-  notebook_src: 04-Show-vectors.ipynb
-  notebook_tag: default
+IMDB4:
+  notebook: IMDB/04-Show-vectors.ipynb
   overrides:
-    run_dir: default
     vocab_size: default
     review_len: default
     dictionaries_dir: default
-Nb_IMDB5:
-  notebook_id: IMDB5
-  notebook_dir: IMDB
-  notebook_src: 05-LSTM-Keras.ipynb
-  notebook_tag: default
+IMDB5:
+  notebook: IMDB/05-LSTM-Keras.ipynb
   overrides:
-    run_dir: default
     vocab_size: default
     hide_most_frequently: default
     review_len: default
@@ -208,78 +147,60 @@ Nb_IMDB5:
     epochs: default
     fit_verbosity: default
     scale: default
-Nb_LADYB1:
-  notebook_id: LADYB1
-  notebook_dir: SYNOP
-  notebook_src: LADYB1-Ladybug.ipynb
-  notebook_tag: default
+
+#
+# ------------ SYNOP
+#
+LADYB1:
+  notebook: SYNOP/LADYB1-Ladybug.ipynb
   overrides:
-    run_dir: default
     scale: default
     train_prop: default
     sequence_len: default
     predict_len: default
     batch_size: default
     epochs: default
-Nb_SYNOP1:
-  notebook_id: SYNOP1
-  notebook_dir: SYNOP
-  notebook_src: SYNOP1-Preparation-of-data.ipynb
-  notebook_tag: default
+SYNOP1:
+  notebook: SYNOP/SYNOP1-Preparation-of-data.ipynb
   overrides:
-    run_dir: default
     output_dir: default
-Nb_SYNOP2:
-  notebook_id: SYNOP2
-  notebook_dir: SYNOP
-  notebook_src: SYNOP2-First-predictions.ipynb
-  notebook_tag: default
+SYNOP2:
+  notebook: SYNOP/SYNOP2-First-predictions.ipynb
   overrides:
-    run_dir: default
     scale: default
     train_prop: default
     sequence_len: default
     batch_size: default
     epochs: default
     fit_verbosity: default
-Nb_SYNOP3:
-  notebook_id: SYNOP3
-  notebook_dir: SYNOP
-  notebook_src: SYNOP3-12h-predictions.ipynb
-  notebook_tag: default
+SYNOP3:
+  notebook: SYNOP/SYNOP3-12h-predictions.ipynb
   overrides:
-    run_dir: default
     iterations: default
     scale: default
     train_prop: default
     sequence_len: default
-Nb_TRANS1:
-  notebook_id: TRANS1
-  notebook_dir: Transformers
-  notebook_src: 01-Distilbert.ipynb
-  notebook_tag: default
-Nb_TRANS2:
-  notebook_id: TRANS2
-  notebook_dir: Transformers
-  notebook_src: 02-distilbert_colab.ipynb
-  notebook_tag: default
-Nb_AE1:
-  notebook_id: AE1
-  notebook_dir: AE
-  notebook_src: 01-Prepare-MNIST-dataset.ipynb
-  notebook_tag: default
-  overrides:
-    run_dir: default
+
+#
+# ------------ Transformers
+#
+TRANS1:
+  notebook: Transformers/01-Distilbert.ipynb
+TRANS2:
+  notebook: Transformers/02-distilbert_colab.ipynb
+
+#
+# ------------ AE
+#
+AE1:
+  notebook: AE/01-Prepare-MNIST-dataset.ipynb
+  overrides:
     prepared_dataset: default
     scale: default
     progress_verbosity: default
-Nb_AE2:
-  notebook_id: AE2
-  notebook_dir: AE
-  notebook_src: 02-AE-with-MNIST.ipynb
-  notebook_tag: default
+AE2:
+  notebook: AE/02-AE-with-MNIST.ipynb
   overrides:
-    run_dir: default
     prepared_dataset: default
     dataset_seed: default
     scale: default
@@ -287,24 +208,16 @@ Nb_AE2:
     train_prop: default
     batch_size: default
     epochs: default
-Nb_AE3:
-  notebook_id: AE3
-  notebook_dir: AE
-  notebook_src: 03-AE-with-MNIST-post.ipynb
-  notebook_tag: default
+AE3:
+  notebook: AE/03-AE-with-MNIST-post.ipynb
   overrides:
-    run_dir: default
     prepared_dataset: default
     dataset_seed: default
     scale: default
     train_prop: default
-Nb_AE4:
-  notebook_id: AE4
-  notebook_dir: AE
-  notebook_src: 04-ExtAE-with-MNIST.ipynb
-  notebook_tag: default
+AE4:
+  notebook: AE/04-ExtAE-with-MNIST.ipynb
   overrides:
-    run_dir: default
     prepared_dataset: default
     dataset_seed: default
     scale: default
@@ -312,13 +225,9 @@ Nb_AE4:
     train_prop: default
     batch_size: default
     epochs: default
-Nb_AE5:
-  notebook_id: AE5
-  notebook_dir: AE
-  notebook_src: 05-ExtAE-with-MNIST.ipynb
-  notebook_tag: default
+AE5:
+  notebook: AE/05-ExtAE-with-MNIST.ipynb
   overrides:
-    run_dir: default
     prepared_dataset: default
     dataset_seed: default
     scale: default
@@ -326,13 +235,13 @@ Nb_AE5:
     train_prop: default
     batch_size: default
     epochs: default
-Nb_VAE1:
-  notebook_id: VAE1
-  notebook_dir: VAE
-  notebook_src: 01-VAE-with-MNIST.ipynb
-  notebook_tag: default
+
+#
+# ------------ VAE
+#
+VAE1:
+  notebook: VAE/01-VAE-with-MNIST.ipynb
   overrides:
-    run_dir: default
     latent_dim: default
     loss_weights: default
     scale: default
@@ -340,13 +249,9 @@ Nb_VAE1:
     batch_size: default
     epochs: default
     fit_verbosity: default
-Nb_VAE2:
-  notebook_id: VAE2
-  notebook_dir: VAE
-  notebook_src: 02-VAE-with-MNIST.ipynb
-  notebook_tag: default
+VAE2:
+  notebook: VAE/02-VAE-with-MNIST.ipynb
   overrides:
-    run_dir: default
     latent_dim: default
     loss_weights: default
     scale: default
@@ -354,30 +259,18 @@ Nb_VAE2:
     batch_size: default
     epochs: default
     fit_verbosity: default
-Nb_VAE3:
-  notebook_id: VAE3
-  notebook_dir: VAE
-  notebook_src: 03-VAE-with-MNIST-post.ipynb
-  notebook_tag: default
+VAE3:
+  notebook: VAE/03-VAE-with-MNIST-post.ipynb
   overrides:
-    run_dir: default
     scale: default
     seed: default
-Nb_VAE5:
-  notebook_id: VAE5
-  notebook_dir: VAE
-  notebook_src: 05-About-CelebA.ipynb
-  notebook_tag: default
+VAE5:
+  notebook: VAE/05-About-CelebA.ipynb
   overrides:
-    run_dir: default
     progress_verbosity: default
-Nb_VAE6:
-  notebook_id: VAE6
-  notebook_dir: VAE
-  notebook_src: 06-Prepare-CelebA-datasets.ipynb
-  notebook_tag: default
+VAE6:
+  notebook: VAE/06-Prepare-CelebA-datasets.ipynb
   overrides:
-    run_dir: default
     progress_verbosity: default
     scale: default
     seed: default
@@ -385,23 +278,15 @@ Nb_VAE6:
     image_size: default
     output_dir: default
     exit_if_exist: default
-Nb_VAE7:
-  notebook_id: VAE7
-  notebook_dir: VAE
-  notebook_src: 07-Check-CelebA.ipynb
-  notebook_tag: default
+VAE7:
+  notebook: VAE/07-Check-CelebA.ipynb
   overrides:
-    run_dir: default
     image_size: default
     enhanced_dir: default
     progress_verbosity: default
-Nb_VAE8:
-  notebook_id: VAE8
-  notebook_dir: VAE
-  notebook_src: 08-VAE-with-CelebA.ipynb
-  notebook_tag: default
+VAE8:
+  notebook: VAE/08-VAE-with-CelebA.ipynb
   overrides:
-    run_dir: default
     scale: default
     image_size: default
     enhanced_dir: default
@@ -410,13 +295,9 @@ Nb_VAE8:
     batch_size: default
     epochs: default
     fit_verbosity: default
-Nb_VAE9:
-  notebook_id: VAE9
-  notebook_dir: VAE
-  notebook_src: 09-VAE-with-CelebA-192x160.ipynb
-  notebook_tag: default
+VAE9:
+  notebook: VAE/09-VAE-with-CelebA-192x160.ipynb
   overrides:
-    run_dir: default
     scale: default
     image_size: default
     enhanced_dir: default
@@ -425,64 +306,51 @@ Nb_VAE9:
     batch_size: default
     epochs: default
     fit_verbosity: default
-Nb_VAE10:
-  notebook_id: VAE10
-  notebook_dir: VAE
-  notebook_src: batch_slurm.sh
-  notebook_tag: default
-Nb_SHEEP1:
-  notebook_id: SHEEP1
-  notebook_dir: DCGAN
-  notebook_src: 01-DCGAN-Draw-me-a-sheep.ipynb
-  notebook_tag: default
-  overrides:
-    run_dir: default
+VAE10:
+  notebook: VAE/10-VAE-with-CelebA-post.ipynb
+  overrides:
+    image_size: default
+    enhanced_dir: default
+
+#
+# ------------ DCGAN
+#
+SHEEP1:
+  notebook: DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb
+  overrides:
     scale: default
     latent_dim: default
     epochs: default
     batch_size: default
     num_img: default
     fit_verbosity: default
-Nb_SHEEP2:
-  notebook_id: SHEEP2
-  notebook_dir: DCGAN
-  notebook_src: 02-WGANGP-Draw-me-a-sheep.ipynb
-  notebook_tag: default
+SHEEP2:
+  notebook: DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb
   overrides:
-    run_dir: default
     scale: default
     latent_dim: default
     epochs: default
     batch_size: default
     num_img: default
     fit_verbosity: default
-Nb_DRL1:
-  notebook_id: DRL1
-  notebook_dir: DRL
-  notebook_src: FIDLE_DQNfromScratch.ipynb
-  notebook_tag: default
-Nb_DRL2:
-  notebook_id: DRL2
-  notebook_dir: DRL
-  notebook_src: FIDLE_rl_baselines_zoo.ipynb
-  notebook_tag: default
-Nb_ACTF1:
-  notebook_id: ACTF1
-  notebook_dir: Misc
-  notebook_src: Activation-Functions.ipynb
-  notebook_tag: default
-Nb_NP1:
-  notebook_id: NP1
-  notebook_dir: Misc
-  notebook_src: Numpy.ipynb
-  notebook_tag: default
-Nb_SCRATCH1:
-  notebook_id: SCRATCH1
-  notebook_dir: Misc
-  notebook_src: Scratchbook.ipynb
-  notebook_tag: default
-Nb_TSB1:
-  notebook_id: TSB1
-  notebook_dir: Misc
-  notebook_src: Using-Tensorboard.ipynb
-  notebook_tag: default
+
+#
+# ------------ DRL
+#
+DRL1:
+  notebook: DRL/FIDLE_DQNfromScratch.ipynb
+DRL2:
+  notebook: DRL/FIDLE_rl_baselines_zoo.ipynb
+
+#
+# ------------ Misc
+#
+ACTF1:
+  notebook: Misc/Activation-Functions.ipynb
+NP1:
+  notebook: Misc/Numpy.ipynb
+SCRATCH1:
+  notebook: Misc/Scratchbook.ipynb
+TSB1:
+  notebook: Misc/Using-Tensorboard.ipynb
+  overrides: ??
diff --git a/fidle/config.py b/fidle/config.py
deleted file mode 100644
index 637d15da657b479dbe89aa4f73e0ff8cd90d3cf3..0000000000000000000000000000000000000000
--- a/fidle/config.py
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-# ==================================================================
-#  ____                 _   _           _  __        __         _
-# |  _ \ _ __ __ _  ___| |_(_) ___ __ _| | \ \      / /__  _ __| | __
-# | |_) | '__/ _` |/ __| __| |/ __/ _` | |  \ \ /\ / / _ \| '__| |/ /
-# |  __/| | | (_| | (__| |_| | (_| (_| | |   \ V  V / (_) | |  |   <
-# |_|   |_|  \__,_|\___|\__|_|\___\__,_|_|    \_/\_/ \___/|_|  |_|\_\
-#                                                      Configuration
-# ==================================================================
-# Few configuration stuffs for the Fidle practical work notebooks
-# Jean-Luc Parouty 2020
-
-
-# ---- Version -----------------------------------------------------
-#
-VERSION = '2.0.35'
-
-# ---- Default notebook name ---------------------------------------
-#
-DEFAULT_NOTEBOOK_NAME = "Unknown"
-
-# ---- Styles ------------------------------------------------------
-#
-FIDLE_MPLSTYLE = '../fidle/mplstyles/custom.mplstyle'
-FIDLE_CSSFILE  = '../fidle/css/custom.css'
-
-# ---- Save figs or not (yes|no)
-#      Overided by env : FIDLE_SAVE_FIGS
-#      
-SAVE_FIGS    = False
-
-# ---- Catalog file, a json description of all notebooks ------------
-#
-CATALOG_FILE    = '../fidle/logs/catalog.json'
-PROFILE_FILE    = '../fidle/ci/default.yml'
-
-# ---- CI report files ----------------------------------------------
-#
-CI_REPORT_JSON = '../fidle/logs/ci_report.json'
-CI_REPORT_HTML = '../fidle/logs/ci_report.html'
-CI_ERROR_FILE  = '../fidle/logs/ci_ERROR.txt'
-
-# ---- Used modules -------------------------------------------------
-#
-USED_MODULES   = ['tensorflow','tensorflow.keras','numpy', 'sklearn',
-                  'skimage', 'matplotlib','plotly','pandas','jupyterlab',
-                  'pytorch', 'torchvision']
-
-# -------------------------------------------------------------------
diff --git a/fidle/cookci.py b/fidle/cookci.py
deleted file mode 100644
index 3c7c7ba32e7b1e9d736de2043fc477afb3611094..0000000000000000000000000000000000000000
--- a/fidle/cookci.py
+++ /dev/null
@@ -1,545 +0,0 @@
-
-# -----------------------------------------------------------------------------
-#                         ____                 _       ____ _
-#                       / ___|___   ___   ___| | __  / ___(_)
-#                      | |   / _ \ / _ \ / __| |/ / | |   | |
-#                      | |__| (_) | (_) | (__|   <  | |___| |
-#                      \____\___/ \___/ \___|_|\_\  \____|_|
-#
-#                                           Fidle mod for continous integration
-# -----------------------------------------------------------------------------
-#
-# A simple module to run all notebooks with parameters overriding
-# Jean-Luc Parouty 2021
-
-import sys,os,platform
-import json
-import datetime, time
-import nbformat
-from nbconvert               import HTMLExporter
-from nbconvert.preprocessors import ExecutePreprocessor, CellExecutionError
-from asyncio import CancelledError
-import re
-import yaml
-import base64
-from collections import OrderedDict
-from IPython.display import display,Image,Markdown,HTML
-import pandas as pd
-
-sys.path.append('..')
-import fidle.config as config
-import fidle.cookindex as cookindex
-
-VERSION = '1.4.1'
-
-start_time = {}
-end_time   = {}
-
-_report_json  = None
-_report_error = None
-
-
-       
-def load_profile(filename):
-    '''Load yaml profile'''
-    with open(filename,'r') as fp:
-        profile=yaml.load(fp, Loader=yaml.FullLoader)
-        print(f'\nLoad profile :{filename}')
-        print('  - Entries : ',len(profile)-1)
-        return profile
-    
-    
-def run_profile(profile_name, reset=False, run_if_exist=False, filter=r'.*', top_dir='..'):
-    '''
-    Récupère la liste des notebooks et des paramètres associés,
-    décrit dans le profile, et pour chaque notebook :
-    Positionner les variables d'environnement pour l'override
-    Charge le notebook
-    Exécute celui-ci
-    Sauvegarde le notebook résultat, avec son nom taggé.
-    Params:
-        profile_name : nom du profile d'éxécution
-        top_dir : chemin relatif vers la racine fidle (..)
-    '''
-
-    print(f'\n=== Run profile session - FIDLE 2021')
-    print(f'=== Version : {VERSION}')
-    
-    chrono_start('main')
-    
-    # ---- Retrieve profile ---------------------------------------------------
-    #
-    profile   = load_profile(profile_name)
-    config    = profile['_metadata_']
-    notebooks = profile
-    del notebooks['_metadata_']   
-    
-    # ---- Report file --------------------------------------------------------
-    #
-    metadata = config
-    metadata['host']    = platform.uname()[1]
-    metadata['profile'] = profile_name
-
-    report_json  = top_dir + '/' + config['report_json' ]
-    report_error = top_dir + '/' + config['report_error']
-
-    init_ci_report(report_json, report_error, metadata, reset=reset)
-    
-    # ---- Where I am, me and the output_dir ----------------------------------
-    #
-    home         = os.getcwd()
-    output_ipynb = config['output_ipynb']
-    output_html  = config['output_html']
-        
-    # ---- Environment vars ---------------------------------------------------
-    #
-    print('\nSet environment var:')
-    environment_vars = config['environment_vars']
-    for name,value in environment_vars.items():
-        os.environ[name] = str(value)
-        print(f'  - {name:20s} = {value}')
-
-    # ---- For each notebook --------------------------------------------------
-    #
-    print('\n---- Start running process ------------------------')
-    for run_id,about in notebooks.items():
-
-        # ---- Filtering ------------------------------------------------------
-        #
-        if not re.match(filter, run_id):
-            continue
-        
-        print(f'\n  - Run : {run_id}')
-
-        # ---- Get notebook infos ---------------------------------------------
-        #
-        notebook_id   = about['notebook_id']
-        notebook_dir  = about['notebook_dir']
-        notebook_src  = about['notebook_src']
-        notebook_name = os.path.splitext(notebook_src)[0]
-        notebook_tag  = about['notebook_tag']
-        overrides     = about.get('overrides',None)
-        
-
-        # ---- Output name ----------------------------------------------------
-        #
-        if notebook_tag=='default':
-            output_name  = notebook_name + config['output_tag']
-        else:
-            output_name  = notebook_name + notebook_tag
- 
-        # ---- Run if exist ---------------------------------------------------
-        #
-        done_html = os.path.abspath( f'{top_dir}/{output_html}/{notebook_dir}/{output_name}.html' )
-        if (os.path.isfile(done_html) is True) and (run_if_exist is False) : continue
-
-        # ---- Go to the right place ------------------------------------------
-        #
-        os.chdir(f'{top_dir}/{notebook_dir}')
-
-        # ---- Override ------------------------------------------------------- 
-        
-        to_unset=[]
-        if isinstance(overrides,dict):
-            print('    - Overrides :')
-            for name,value in overrides.items():
-                # ---- Default : no nothing
-                if value=='default' : continue
-                #  ---- Set env
-                env_name  = f'FIDLE_OVERRIDE_{notebook_id.upper()}_{name}'
-                env_value = str(value)
-                os.environ[env_name] = env_value
-                # ---- For cleaning
-                to_unset.append(env_name)
-                # ---- Fine :(-)
-                print(f'      - {env_name:38s} = {env_value}')
-    
-        # ---- Run it ! -------------------------------------------------------
-
-        # ---- Go to the notebook_dir
-        #
-        os.chdir(f'{top_dir}/{notebook_dir}')
-
-        # ---- Top chrono - Start
-        #
-        chrono_start('nb')
-        update_ci_report(run_id, notebook_id, notebook_dir, notebook_src, output_name, start=True)
-        
-        # ---- Try to run...
-        #
-        print('    - Run notebook...',end='')
-        try:
-            notebook = nbformat.read( f'{notebook_src}', nbformat.NO_CONVERT)
-            ep = ExecutePreprocessor(timeout=6000, kernel_name="python3")
-            ep.preprocess(notebook)
-        except CellExecutionError as e:
-            happy_end = False
-            output_name = notebook_name + '==ERROR=='
-            print('\n   ','*'*60)
-            print( '    ** AAARG.. An error occured : ',type(e).__name__)
-            print(f'    ** See notebook :  {output_name} for details.')
-            print('   ','*'*60)
-        else:
-            happy_end = True
-            print('..done.')
-
-        # ---- Top chrono - Stop
-        #
-        chrono_stop('nb')        
-        update_ci_report(run_id, notebook_id, notebook_dir, notebook_src, output_name, end=True, happy_end=happy_end)
-        print('    - Duration : ',chrono_get_delay('nb') )
-
-        # ---- Back to home
-        #
-        os.chdir(home)
-
-        # ---- Check for images to embed --------------------------------------
-        #      We just try to embed <img src="..."> tag in some markdown cells
-        #      Very fast and suffisant for header/ender.
-        #
-        for cell in notebook.cells:
-            if cell['cell_type'] == 'markdown':
-                cell.source = images_embedder(cell.source)
-
-        # ---- Save notebook as ipynb -----------------------------------------
-        #
-        if output_ipynb.lower()!="none":
-            save_dir = os.path.abspath( f'{top_dir}/{output_ipynb}/{notebook_dir}' )
-            os.makedirs(save_dir, mode=0o750, exist_ok=True)
-            with open(  f'{save_dir}/{output_name}.ipynb', mode="w", encoding='utf-8') as fp:
-                nbformat.write(notebook, fp)
-            print(f'    - Saved {save_dir}/{output_name}.ipynb')
-
-        # ---- Save notebook as html ------------------------------------------
-        #
-        if output_html.lower()!="none":
-
-            # ---- Convert notebook to html
-            exporter = HTMLExporter()
-            exporter.template_name = 'classic'
-            (body_html, resources_html) = exporter.from_notebook_node(notebook)
-            
-            # ---- Embed images
-            # body_html = images_embedder(body_html)
-            
-            # ---- Save
-            save_dir = os.path.abspath( f'{top_dir}/{output_html}/{notebook_dir}' )
-            os.makedirs(save_dir, mode=0o750, exist_ok=True)
-            with open(  f'{save_dir}/{output_name}.html', mode='wb') as fp:
-                fp.write(body_html.encode("utf-8"))
-            print(f'    - Saved {save_dir}/{output_name}.html')
-
-        # ---- Clean all ------------------------------------------------------
-        #
-        for env_name in to_unset:
-            del os.environ[env_name]
-
-    # ---- End of running
-    chrono_stop('main')
-    print('\n---- End of running process -----------------------')
-
-    print('\nDuration :', chrono_get_delay('main'))
-    complete_ci_report()
-    
-    
-
-def get_base64_image(filename):
-    '''
-    Read an image file and return as a base64 encoded version
-    params:
-        filename : image filemane
-    return:
-        base 64 encoded image
-    '''
-    with open(filename, "rb") as image_file:
-        img64 = base64.b64encode(image_file.read())
-    src="data:image/svg+xml;base64,"+img64.decode("utf-8") 
-    return src
-
-    
-def images_embedder(html):
-    '''
-    Images embedder. Search images src="..." link and replace them
-    by base64 embedded images.
-    params:
-        html: input html
-    return:
-        output html
-    '''
-    for img_tag in re.findall('.*(<img .*>).*', html):
-        for src_tag in re.findall('.*src=[\'\"](.*)[\'\"].*', img_tag):
-            if src_tag.startswith('data:'): continue
-            src_b64 = get_base64_image(src_tag)
-            img_b64 = img_tag.replace(src_tag, src_b64)
-            html = html.replace(img_tag,img_b64)
-    return html
-
-
-
-
-def chrono_start(id='default'):
-    global start_time
-    start_time[id] = datetime.datetime.now()
-        
-def chrono_stop(id='default'):
-    global end_time
-    end_time[id] = datetime.datetime.now()
-
-def chrono_get_delay(id='default', in_seconds=False):
-    global start_time, end_time
-    delta = end_time[id] - start_time[id]
-    if in_seconds:
-        return round(delta.total_seconds(),2)
-    else:
-        delta = delta - datetime.timedelta(microseconds=delta.microseconds)
-        return str(delta)
-
-def chrono_get_start(id='default'):
-    global start_time
-    return start_time[id].strftime("%d/%m/%y %H:%M:%S")
-
-def chrono_get_end(id='default'):
-    global end_time
-    return end_time[id].strftime("%d/%m/%y %H:%M:%S")
-
-def reset_chrono():
-    global start_time, end_time
-    start_time, end_time = {},{}
-    
-
-def init_ci_report(report_json, report_error, metadata, reset=True):
-    
-    global _report_json, _report_error
-    
-    _report_json  = os.path.abspath(report_json)
-    _report_error = os.path.abspath(report_error)
-
-    print('\nInit report :')
-    print(f'  - report file is : {_report_json}')
-    print(f'  - error  file is : {_report_error}')
-
-    # ---- Create directories if doesn't exist
-    #
-    report_dir=os.path.dirname(report_json)
-    os.makedirs(report_dir, mode=0o750, exist_ok=True)
-    
-    # ---- Reset ?
-    #
-    if reset is False and os.path.isfile(_report_json) :
-        with open(_report_json) as fp:
-            report = json.load(fp)
-        print('  - Report is reloaded')
-    else:
-        report={}
-        print('- Report is reseted')
-
-    metadata['reseted']=reset     
-    metadata['start']=chrono_get_start('main')
-
-    # ---- Create json report
-    #
-    report['_metadata_']=metadata
-    with open(_report_json,'wt') as fp:
-        json.dump(report,fp,indent=4)
-    print('  - Report file saved')
-
-    # ---- Reset error
-    #
-    if os.path.exists(_report_error):
-        os.remove(_report_error)
-    print('  - Error file removed')
-
-    
-def complete_ci_report():
-
-    global _report_json
-
-    with open(_report_json) as fp:
-        report = json.load(fp)
-        
-    report['_metadata_']['end']      = chrono_get_end('main')
-    report['_metadata_']['duration'] = chrono_get_delay('main')
-    
-    with open(_report_json,'wt') as fp:
-        json.dump(report,fp,indent=4)
-        
-    print(f'\nComplete ci report :')
-    print(f'  - Report file saved')
-    
-    
-def update_ci_report(run_id, notebook_id, notebook_dir, notebook_src, notebook_out, start=False, end=False, happy_end=True):
-    global start_time, end_time
-    global _report_json, _report_error
-    
-    # ---- Load it
-    with open(_report_json) as fp:
-        report = json.load(fp)
-        
-    # ---- Update as a start
-    if start is True:
-        report[run_id]              = {}
-        report[run_id]['id']        = notebook_id
-        report[run_id]['dir']       = notebook_dir
-        report[run_id]['src']       = notebook_src
-        report[run_id]['out']       = notebook_out
-        report[run_id]['start']     = chrono_get_start('nb')
-        report[run_id]['end']       = ''
-        report[run_id]['duration']  = 'Unfinished...'
-        report[run_id]['state']     = 'Unfinished...'
-        report['_metadata_']['end']      = 'Unfinished...'
-        report['_metadata_']['duration'] = 'Unfinished...'
-
-
-    # ---- Update as an end
-    if end is True:
-        report[run_id]['end']       = chrono_get_end('nb')
-        report[run_id]['duration']  = chrono_get_delay('nb')
-        report[run_id]['state']     = 'ok' if happy_end else 'ERROR'
-        report[run_id]['out']       = notebook_out     # changeg in case of error
-
-    # ---- Save report
-    with open(_report_json,'wt') as fp:
-        json.dump(report,fp,indent=4)
-
-    if not happy_end:
-        with open(_report_error, 'a') as fp:
-            print(f"See : {notebook_dir}/{notebook_out} ", file=fp)
-        
-        
-
-
-def build_ci_report(profile_name, top_dir='..'):
-    
-    print('\n== Build CI Report - FIDLE 2021')
-    print(f'== Version : {VERSION}')
-
-
-    profile   = load_profile(profile_name)
-    config    = profile['_metadata_']
-
-    report_json  = top_dir + '/' + config['report_json' ]
-    report_error = top_dir + '/' + config['report_error']
-
-    report_json  = os.path.abspath(report_json)
-    report_error = os.path.abspath(report_error)
-
-    # ---- Load report
-    #
-    print('\nReport in progress:')
-    with open(report_json) as infile:
-        report = json.load( infile )
-    print(f'  - Load json report file : {_report_json}')
-
-    # ---- metadata
-    #
-    metadata=report['_metadata_']
-    del report['_metadata_']
-
-    output_html = metadata['output_html']
-
-    if output_html.lower()=='none':
-        print('  - No HTML output is specified in profile...')
-        return
-    
-    reportfile = os.path.abspath( f'{top_dir}/{output_html}/index.html' )
-
-    # ---- HTML for metadata
-    #
-    html_metadata = ''
-    for name,value in metadata.items():
-        html_metadata += f'<b>{name.title()}</b> : {value}  <br>\n'
-
-    # ---- HTML for report    
-    #
-    html_report = '<table>'
-    html_report += '<tr><th>Directory</th><th>Id</th><th>Notebook</th><th>Start</th><th>Duration</th><th>State</th></tr>\n'
-    for id,entry in report.items():
-        dir   = entry['dir']
-        src   = entry['src']
-        out   = entry['out']+'.html'
-        start = entry['start']
-        end   = entry['end']
-        dur   = entry['duration']
-        state = entry['state']
-
-        cols = []
-        cols.append( f'<a href="{dir}/{out}" target="_blank">{dir}</a>'       )
-        cols.append( f'<a href="{dir}/{out}" target="_blank">{id}</a>'  )
-        cols.append( f'<a href="{dir}/{out}" target="_blank">{src}</a>' )
-        cols.append( start )
-        cols.append( dur   )
-        cols.append( state )
-
-        html_report+='<tr>'
-        for c in cols:
-            html_report+=f'<td>{c}</td>'
-        html_report+='</tr>\n'
-
-    html_report+='</table>'
-
-    body_html = _get_html_report(html_metadata, html_report)
-    with open(reportfile, "wt") as fp:
-        fp.write(body_html)
-    print(f'  - Saved HTML report : {reportfile}')
-            
-
-    
-
-
-def _get_html_report(html_metadata, html_report):
-
-    with open('./img/00-Fidle-header-01.svg','r') as fp:
-        logo_header = fp.read()
-
-    with open('./img/00-Fidle-logo-01-80px.svg','r') as fp:
-        logo_ender = fp.read()
-
-    html = f"""\
-    <html>
-        <head><title>FIDLE - CI Report</title></head>
-        <body>
-        <style>
-            body{{
-                  font-family: sans-serif;
-            }}
-            div.title{{ 
-                font-size: 1.4em;
-                font-weight: bold;
-                padding: 40px 0px 10px 0px; }}
-            a{{
-                color: SteelBlue;
-                text-decoration:none;
-            }}
-            table{{      
-                  border-collapse : collapse;
-                  font-size : 0.9em;
-            }}
-            td{{
-                  border-style: solid;
-                  border-width:  thin;
-                  border-color:  lightgrey;
-                  padding: 5px 20px 5px 20px;
-            }}
-            .metadata{{ padding: 10px 0px 10px 30px; font-size: 0.9em; }}
-            .result{{ padding: 10px 0px 10px 30px; }}
-        </style>
-
-            {logo_header}
-
-            <div class='title'>Notebooks performed :</div>
-            <div class="result">
-                <p>Here is a "correction" of all the notebooks.</p>
-                <p>These notebooks have been run on Jean-Zay, on GPU (V100) and the results are proposed here in HTML format.</p>    
-                {html_report}
-            </div>
-            <div class='title'>Metadata :</div>
-            <div class="metadata">
-                {html_metadata}
-            </div>
-
-            {logo_ender}
-
-            </body>
-    </html>
-    """
-    return html
diff --git a/fidle/cookindex.py b/fidle/cookindex.py
deleted file mode 100644
index df1c04895a9dfffbc55270b91b9001a8725c7d52..0000000000000000000000000000000000000000
--- a/fidle/cookindex.py
+++ /dev/null
@@ -1,285 +0,0 @@
-
-# -----------------------------------------------------------------------------
-#                ____                 _      ___           _
-#              / ___|___   ___   ___| | __ |_ _|_ __   __| | _____  __
-#             | |   / _ \ / _ \ / __| |/ /  | || '_ \ / _` |/ _ \ \/ /
-#             | |__| (_) | (_) | (__|   <   | || | | | (_| |  __/>  <
-#             \____\___/ \___/ \___|_|\_\ |___|_| |_|\__,_|\___/_/\_\
-#
-#                                                   Fidle mod for index cooking
-# -----------------------------------------------------------------------------
-#
-# A simple module to build the notebook catalog and update the README.
-# Jean-Luc Parouty 2021
-
-
-import nbformat
-from nbconvert.preprocessors import ExecutePreprocessor
-import pandas as pd
-from IPython.display import display, Markdown, HTML
-
-import re
-import sys, os, glob, yaml
-import json
-from datetime import datetime
-from collections import OrderedDict
-from IPython.display import display
-
-sys.path.append('..')
-import fidle.config as config
-
-# -----------------------------------------------------------------------------
-# To built README.md / README.ipynb
-# -----------------------------------------------------------------------------
-#    get_files          :  Get files lists
-#    get_notebook_infos :  Get infos about a entry
-#    get_catalog        :  Get a catalog of all entries
-# -----------------------------------------------------------------------------
-
-def build_catalog(directories):
-
-    # ---- Get the notebook list
-    #
-    files_list = get_files(directories.keys())
-
-    # ---- Get a detailled catalog for this list
-    #
-    catalog = get_catalog(files_list)
-
-    with open(config.CATALOG_FILE,'wt') as fp:
-        n=len(catalog)
-        json.dump(catalog,fp,indent=4)
-        print(f'Catalog saved as         : {config.CATALOG_FILE} ({n} entries)')
-
-
-def get_files(directories, top_dir='..'):
-    '''
-    Return a list of files from a given list of directories
-    args:
-        directories : list of directories
-        top_dir : location of theses directories
-    return:
-        files : filenames list (without top_dir prefix)
-    '''
-    files = []
-    regex = re.compile('.*==.+?==.*')
-
-    for d in directories:
-        notebooks = glob.glob( f'{top_dir}/{d}/*.ipynb')
-        notebooks.sort()
-        scripts   = glob.glob( f'{top_dir}/{d}/*.sh')
-        scripts.sort()
-        files.extend(notebooks)
-        files.extend(scripts)
-        
-    files = [x for x in files if not regex.match(x)]
-    files = [ x.replace(f'{top_dir}/','') for x in files]
-    return files
-
-
-def get_notebook_infos(filename, top_dir='..'):
-    '''
-    Extract informations from a fidle notebook.
-    Informations are dirname, basename, id, title, description and are extracted from comments tags in markdown.
-    args:
-        filename : notebook filename
-    return:
-        dict : with infos.
-    '''
-    # print('Read : ',filename)
-    about={}
-    about['id']          = '??'
-    about['dirname']     = os.path.dirname(filename)
-    about['basename']    = os.path.basename(filename)
-    about['title']       = '??'
-    about['description'] = '??'
-    about['overrides']   = None
-    
-    # ---- Read notebook
-    #
-    notebook = nbformat.read(f'{top_dir}/{filename}', nbformat.NO_CONVERT)
-    
-    # ---- Get id, title and desc tags
-    #
-    overrides=[]
-    for cell in notebook.cells:
-     
-        # ---- Find Index informations
-        #
-        if cell['cell_type'] == 'markdown':
-
-            find = re.findall(r'<\!-- TITLE -->\s*\[(.*)\]\s*-\s*(.*)\n',cell.source)
-            if find:
-                about['id']    = find[0][0]
-                about['title'] = find[0][1]
-
-            find = re.findall(r'<\!-- DESC -->\s*(.*)\n',cell.source)
-            if find:
-                about['description']  = find[0]
-
-        # ---- Find override informations
-        #
-        if cell['cell_type'] == 'code':
-            
-            # Try to find : override(...) call
-            for m in re.finditer('override\((.+?)\)', cell.source):
-                overrides.extend ( re.findall(r'\w+', m.group(1)) )
-
-            # Try to find : run_dir=
-            if re.search(r"\s*run_dir\s*?=", cell.source):
-                overrides.append('run_dir')
-                
-    about['overrides']=overrides
-    return about
-
-    
-    
-def get_txtfile_infos(filename, top_dir='..'):
-    '''
-    Extract fidle  informations from a text file (script...).
-    Informations are dirname, basename, id, title, description and are extracted from comments tags in document
-    args:
-        filename : file to analyze
-    return:
-        dict : with infos.
-    '''
-
-    about={}
-    about['id']          = '??'
-    about['dirname']     = os.path.dirname(filename)
-    about['basename']    = os.path.basename(filename)
-    about['title']       = '??'
-    about['description'] = '??'
-    about['overrides']   = []
-    
-    # ---- Read file
-    #
-    with open(f'{top_dir}/{filename}') as fp:
-        text = fp.read()
-
-    find = re.findall(r'<\!-- TITLE -->\s*\[(.*)\]\s*-\s*(.*)\n',text)
-    if find:
-        about['id']    = find[0][0]
-        about['title'] = find[0][1]
-
-    find = re.findall(r'<\!-- DESC -->\s*(.*)\n',text)
-    if find:
-        about['description']  = find[0]
-
-    return about
-
-              
-def get_catalog(files_list=None, top_dir='..'):
-    '''
-    Return an OrderedDict of files attributes.
-    Keys are file id.
-    args:
-        files_list : list of files filenames
-        top_dir : Location of theses files
-    return:
-        OrderedDict : {<file id> : { description} }
-    '''
-       
-    catalog = OrderedDict()
-
-    # ---- Build catalog
-    for file in files_list:
-        about=None
-        if file.endswith('.ipynb'): about = get_notebook_infos(file, top_dir='..')
-        if file.endswith('.sh'):    about = get_txtfile_infos(file, top_dir='..')
-        if about is None:
-            print(f'** Warning : File [{file}] have no tags infomations...')
-            continue
-        id=about['id']
-        catalog[id] = about
-        
-    return catalog
-        
-
-def tag(tag, text, document):
-    '''
-    Put a text inside a tag
-    args:
-        tag : tag prefix name
-        txt : text to insert
-        document : document 
-    return:
-        updated document
-    '''
-    debut  = f'<!-- {tag}_BEGIN -->'
-    fin    = f'<!-- {tag}_END -->'
-
-    output = re.sub(f'{debut}.*{fin}',f'{debut}\n{text}\n{fin}',document, flags=re.DOTALL)
-    return output
-
-
-def read_catalog():
-    '''
-    Read json catalog file.
-    args:
-        None
-    return:
-        json catalog
-    '''
-    with open(config.CATALOG_FILE) as fp:
-        catalog = json.load(fp)
-    return catalog
-
-
-# -----------------------------------------------------------------------------
-# To built default.yml profile
-# -----------------------------------------------------------------------------
-#    build_default_profile :  Get default profile
-# -----------------------------------------------------------------------------
-
-
-def build_default_profile(output_tag='==ci=='):
-    '''
-    Return a default profile for continous integration.
-    Ce profile contient une liste des notebooks avec les paramètres modifiables.
-    Il peut être modifié et sauvegardé, puis être utilisé pour lancer l'éxécution
-    des notebooks.
-    params:
-        catalog : Notebooks catalog. if None (default), load config.CATALOG_FILE
-        output_tag  : tag name of generated notebook
-        profile_filename : Default profile filename
-    return:
-        None
-    '''
-    
-    catalog = read_catalog()
-
-    metadata   = { 'version'       : '1.0', 
-                   'output_tag'    : output_tag, 
-                   'save_figs'     : True, 
-                   'description'   : 'Default generated profile',
-                   'output_ipynb'  : '<directory for ipynb>',
-                   'output_html'   : '<directory for html>',
-                   'report_json'   : '<report json file>',
-                   'report_error'  : '<error file>'
-                   }
-    profile  = { '_metadata_':metadata }
-    for id, about in catalog.items():
-        
-        id        = about['id']
-        title     = about['title']
-        dirname   = about['dirname']
-        basename  = about['basename']
-        overrides = about.get('overrides',None)
-    
-        notebook = {}
-        notebook['notebook_id']  = id
-        notebook['notebook_dir'] = dirname
-        notebook['notebook_src'] = basename
-        notebook['notebook_tag'] = 'default'
-        if len(overrides)>0:
-            notebook['overrides']={ name:'default' for name in overrides }
-                    
-        profile[f'Nb_{id}']=notebook
-        
-    # ---- Save profile
-    #
-    with open(config.PROFILE_FILE,'wt') as fp:
-        n=len(profile)-1
-        yaml.dump(profile, fp, sort_keys=False)
-        print(f'default profile saved as : {config.PROFILE_FILE} ({n} entries)')
\ No newline at end of file
diff --git a/fidle/logs/catalog.json b/fidle/logs/catalog.json
deleted file mode 100644
index f7e7c8b39f4389d7c4dbe23405dc407b1985c2ff..0000000000000000000000000000000000000000
--- a/fidle/logs/catalog.json
+++ /dev/null
@@ -1,640 +0,0 @@
-{
-    "LINR1": {
-        "id": "LINR1",
-        "dirname": "LinearReg",
-        "basename": "01-Linear-Regression.ipynb",
-        "title": "Linear regression with direct resolution",
-        "description": "Low-level implementation, using numpy, of a direct resolution for a linear regression",
-        "overrides": []
-    },
-    "GRAD1": {
-        "id": "GRAD1",
-        "dirname": "LinearReg",
-        "basename": "02-Gradient-descent.ipynb",
-        "title": "Linear regression with gradient descent",
-        "description": "Low level implementation of a solution by gradient descent. Basic and stochastic approach.",
-        "overrides": []
-    },
-    "POLR1": {
-        "id": "POLR1",
-        "dirname": "LinearReg",
-        "basename": "03-Polynomial-Regression.ipynb",
-        "title": "Complexity Syndrome",
-        "description": "Illustration of the problem of complexity with the polynomial regression",
-        "overrides": []
-    },
-    "LOGR1": {
-        "id": "LOGR1",
-        "dirname": "LinearReg",
-        "basename": "04-Logistic-Regression.ipynb",
-        "title": "Logistic regression",
-        "description": "Simple example of logistic regression with a sklearn solution",
-        "overrides": []
-    },
-    "PER57": {
-        "id": "PER57",
-        "dirname": "IRIS",
-        "basename": "01-Simple-Perceptron.ipynb",
-        "title": "Perceptron Model 1957",
-        "description": "Example of use of a Perceptron, with sklearn and IRIS dataset of 1936 !",
-        "overrides": []
-    },
-    "BHPD1": {
-        "id": "BHPD1",
-        "dirname": "BHPD",
-        "basename": "01-DNN-Regression.ipynb",
-        "title": "Regression with a Dense Network (DNN)",
-        "description": "Simple example of a regression with the dataset Boston Housing Prices Dataset (BHPD)",
-        "overrides": [
-            "fit_verbosity"
-        ]
-    },
-    "BHPD2": {
-        "id": "BHPD2",
-        "dirname": "BHPD",
-        "basename": "02-DNN-Regression-Premium.ipynb",
-        "title": "Regression with a Dense Network (DNN) - Advanced code",
-        "description": "A more advanced implementation of the precedent example",
-        "overrides": [
-            "fit_verbosity"
-        ]
-    },
-    "MNIST1": {
-        "id": "MNIST1",
-        "dirname": "MNIST",
-        "basename": "01-DNN-MNIST.ipynb",
-        "title": "Simple classification with DNN",
-        "description": "An example of classification using a dense neural network for the famous MNIST dataset",
-        "overrides": [
-            "fit_verbosity"
-        ]
-    },
-    "MNIST2": {
-        "id": "MNIST2",
-        "dirname": "MNIST",
-        "basename": "02-CNN-MNIST.ipynb",
-        "title": "Simple classification with CNN",
-        "description": "An example of classification using a convolutional neural network for the famous MNIST dataset",
-        "overrides": [
-            "fit_verbosity"
-        ]
-    },
-    "GTSRB1": {
-        "id": "GTSRB1",
-        "dirname": "GTSRB",
-        "basename": "01-Preparation-of-data.ipynb",
-        "title": "Dataset analysis and preparation",
-        "description": "Episode 1 : Analysis of the GTSRB dataset and creation of an enhanced dataset",
-        "overrides": [
-            "scale",
-            "output_dir",
-            "progress_verbosity"
-        ]
-    },
-    "GTSRB2": {
-        "id": "GTSRB2",
-        "dirname": "GTSRB",
-        "basename": "02-First-convolutions.ipynb",
-        "title": "First convolutions",
-        "description": "Episode 2 : First convolutions and first classification of our traffic signs",
-        "overrides": [
-            "run_dir",
-            "enhanced_dir",
-            "dataset_name",
-            "batch_size",
-            "epochs",
-            "scale",
-            "fit_verbosity"
-        ]
-    },
-    "GTSRB3": {
-        "id": "GTSRB3",
-        "dirname": "GTSRB",
-        "basename": "03-Tracking-and-visualizing.ipynb",
-        "title": "Training monitoring",
-        "description": "Episode 3 : Monitoring, analysis and check points during a training session",
-        "overrides": [
-            "run_dir",
-            "enhanced_dir",
-            "dataset_name",
-            "batch_size",
-            "epochs",
-            "scale",
-            "fit_verbosity"
-        ]
-    },
-    "GTSRB4": {
-        "id": "GTSRB4",
-        "dirname": "GTSRB",
-        "basename": "04-Data-augmentation.ipynb",
-        "title": "Data augmentation ",
-        "description": "Episode 4 : Adding data by data augmentation when we lack it, to improve our results",
-        "overrides": [
-            "run_dir",
-            "enhanced_dir",
-            "dataset_name",
-            "batch_size",
-            "epochs",
-            "scale",
-            "fit_verbosity"
-        ]
-    },
-    "GTSRB5": {
-        "id": "GTSRB5",
-        "dirname": "GTSRB",
-        "basename": "05-Full-convolutions.ipynb",
-        "title": "Full convolutions",
-        "description": "Episode 5 : A lot of models, a lot of datasets and a lot of results.",
-        "overrides": [
-            "run_dir",
-            "enhanced_dir",
-            "datasets",
-            "models",
-            "batch_size",
-            "epochs",
-            "scale",
-            "with_datagen",
-            "fit_verbosity"
-        ]
-    },
-    "GTSRB6": {
-        "id": "GTSRB6",
-        "dirname": "GTSRB",
-        "basename": "06-Notebook-as-a-batch.ipynb",
-        "title": "Full convolutions as a batch",
-        "description": "Episode 6 : To compute bigger, use your notebook in batch mode",
-        "overrides": []
-    },
-    "GTSRB7": {
-        "id": "GTSRB7",
-        "dirname": "GTSRB",
-        "basename": "07-Show-report.ipynb",
-        "title": "Batch reports",
-        "description": "Episode 7 : Displaying our jobs report, and the winner is...",
-        "overrides": [
-            "run_dir",
-            "report_dir"
-        ]
-    },
-    "GTSRB10": {
-        "id": "GTSRB10",
-        "dirname": "GTSRB",
-        "basename": "batch_oar.sh",
-        "title": "OAR batch script submission",
-        "description": "Bash script for an OAR batch submission of an ipython code",
-        "overrides": []
-    },
-    "GTSRB11": {
-        "id": "GTSRB11",
-        "dirname": "GTSRB",
-        "basename": "batch_slurm.sh",
-        "title": "SLURM batch script",
-        "description": "Bash script for a Slurm batch submission of an ipython code",
-        "overrides": []
-    },
-    "IMDB1": {
-        "id": "IMDB1",
-        "dirname": "IMDB",
-        "basename": "01-One-hot-encoding.ipynb",
-        "title": "Sentiment analysis with hot-one encoding",
-        "description": "A basic example of sentiment analysis with sparse encoding, using a dataset from Internet Movie Database (IMDB)",
-        "overrides": [
-            "run_dir",
-            "vocab_size",
-            "hide_most_frequently",
-            "batch_size",
-            "epochs",
-            "fit_verbosity"
-        ]
-    },
-    "IMDB2": {
-        "id": "IMDB2",
-        "dirname": "IMDB",
-        "basename": "02-Keras-embedding.ipynb",
-        "title": "Sentiment analysis with text embedding",
-        "description": "A very classical example of word embedding with a dataset from Internet Movie Database (IMDB)",
-        "overrides": [
-            "run_dir",
-            "vocab_size",
-            "hide_most_frequently",
-            "review_len",
-            "dense_vector_size",
-            "batch_size",
-            "epochs",
-            "output_dir",
-            "fit_verbosity"
-        ]
-    },
-    "IMDB3": {
-        "id": "IMDB3",
-        "dirname": "IMDB",
-        "basename": "03-Prediction.ipynb",
-        "title": "Reload and reuse a saved model",
-        "description": "Retrieving a saved model to perform a sentiment analysis (movie review)",
-        "overrides": [
-            "run_dir",
-            "vocab_size",
-            "review_len",
-            "dictionaries_dir"
-        ]
-    },
-    "IMDB4": {
-        "id": "IMDB4",
-        "dirname": "IMDB",
-        "basename": "04-Show-vectors.ipynb",
-        "title": "Reload embedded vectors",
-        "description": "Retrieving embedded vectors from our trained model",
-        "overrides": [
-            "run_dir",
-            "vocab_size",
-            "review_len",
-            "dictionaries_dir"
-        ]
-    },
-    "IMDB5": {
-        "id": "IMDB5",
-        "dirname": "IMDB",
-        "basename": "05-LSTM-Keras.ipynb",
-        "title": "Sentiment analysis with a RNN network",
-        "description": "Still the same problem, but with a network combining embedding and RNN",
-        "overrides": [
-            "run_dir",
-            "vocab_size",
-            "hide_most_frequently",
-            "review_len",
-            "dense_vector_size",
-            "batch_size",
-            "epochs",
-            "fit_verbosity",
-            "scale"
-        ]
-    },
-    "LADYB1": {
-        "id": "LADYB1",
-        "dirname": "SYNOP",
-        "basename": "LADYB1-Ladybug.ipynb",
-        "title": "Prediction of a 2D trajectory via RNN",
-        "description": "Artificial dataset generation and prediction attempt via a recurrent network",
-        "overrides": [
-            "run_dir",
-            "scale",
-            "train_prop",
-            "sequence_len",
-            "predict_len",
-            "batch_size",
-            "epochs"
-        ]
-    },
-    "SYNOP1": {
-        "id": "SYNOP1",
-        "dirname": "SYNOP",
-        "basename": "SYNOP1-Preparation-of-data.ipynb",
-        "title": "Preparation of data",
-        "description": "Episode 1 : Data analysis and preparation of a usuable meteorological dataset (SYNOP)",
-        "overrides": [
-            "run_dir",
-            "output_dir"
-        ]
-    },
-    "SYNOP2": {
-        "id": "SYNOP2",
-        "dirname": "SYNOP",
-        "basename": "SYNOP2-First-predictions.ipynb",
-        "title": "First predictions at 3h",
-        "description": "Episode 2 : RNN training session for weather prediction attempt at 3h",
-        "overrides": [
-            "run_dir",
-            "scale",
-            "train_prop",
-            "sequence_len",
-            "batch_size",
-            "epochs",
-            "fit_verbosity"
-        ]
-    },
-    "SYNOP3": {
-        "id": "SYNOP3",
-        "dirname": "SYNOP",
-        "basename": "SYNOP3-12h-predictions.ipynb",
-        "title": "12h predictions",
-        "description": "Episode 3: Attempt to predict in a more longer term ",
-        "overrides": [
-            "run_dir",
-            "iterations",
-            "scale",
-            "train_prop",
-            "sequence_len"
-        ]
-    },
-    "TRANS1": {
-        "id": "TRANS1",
-        "dirname": "Transformers",
-        "basename": "01-Distilbert.ipynb",
-        "title": "IMDB, Sentiment analysis with Transformers ",
-        "description": "Using a Tranformer to perform a sentiment analysis (IMDB) - Jean Zay version",
-        "overrides": []
-    },
-    "TRANS2": {
-        "id": "TRANS2",
-        "dirname": "Transformers",
-        "basename": "02-distilbert_colab.ipynb",
-        "title": "IMDB, Sentiment analysis with Transformers ",
-        "description": "Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version",
-        "overrides": []
-    },
-    "AE1": {
-        "id": "AE1",
-        "dirname": "AE",
-        "basename": "01-Prepare-MNIST-dataset.ipynb",
-        "title": "Prepare a noisy MNIST dataset",
-        "description": "Episode 1: Preparation of a noisy MNIST dataset",
-        "overrides": [
-            "run_dir",
-            "prepared_dataset",
-            "scale",
-            "progress_verbosity"
-        ]
-    },
-    "AE2": {
-        "id": "AE2",
-        "dirname": "AE",
-        "basename": "02-AE-with-MNIST.ipynb",
-        "title": "Building and training an AE denoiser model",
-        "description": "Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset.",
-        "overrides": [
-            "run_dir",
-            "prepared_dataset",
-            "dataset_seed",
-            "scale",
-            "latent_dim",
-            "train_prop",
-            "batch_size",
-            "epochs"
-        ]
-    },
-    "AE3": {
-        "id": "AE3",
-        "dirname": "AE",
-        "basename": "03-AE-with-MNIST-post.ipynb",
-        "title": "Playing with our denoiser model",
-        "description": "Episode 2 : Using the previously trained autoencoder to denoise data",
-        "overrides": [
-            "run_dir",
-            "prepared_dataset",
-            "dataset_seed",
-            "scale",
-            "train_prop"
-        ]
-    },
-    "AE4": {
-        "id": "AE4",
-        "dirname": "AE",
-        "basename": "04-ExtAE-with-MNIST.ipynb",
-        "title": "Denoiser and classifier model",
-        "description": "Episode 4 : Construction of a denoiser and classifier model",
-        "overrides": [
-            "run_dir",
-            "prepared_dataset",
-            "dataset_seed",
-            "scale",
-            "latent_dim",
-            "train_prop",
-            "batch_size",
-            "epochs"
-        ]
-    },
-    "AE5": {
-        "id": "AE5",
-        "dirname": "AE",
-        "basename": "05-ExtAE-with-MNIST.ipynb",
-        "title": "Advanced denoiser and classifier model",
-        "description": "Episode 5 : Construction of an advanced denoiser and classifier model",
-        "overrides": [
-            "run_dir",
-            "prepared_dataset",
-            "dataset_seed",
-            "scale",
-            "latent_dim",
-            "train_prop",
-            "batch_size",
-            "epochs"
-        ]
-    },
-    "VAE1": {
-        "id": "VAE1",
-        "dirname": "VAE",
-        "basename": "01-VAE-with-MNIST.ipynb",
-        "title": "First VAE, using functional API (MNIST dataset)",
-        "description": "Construction and training of a VAE, using functional APPI, with a latent space of small dimension.",
-        "overrides": [
-            "run_dir",
-            "latent_dim",
-            "loss_weights",
-            "scale",
-            "seed",
-            "batch_size",
-            "epochs",
-            "fit_verbosity",
-            "run_dir"
-        ]
-    },
-    "VAE2": {
-        "id": "VAE2",
-        "dirname": "VAE",
-        "basename": "02-VAE-with-MNIST.ipynb",
-        "title": "VAE, using a custom model class  (MNIST dataset)",
-        "description": "Construction and training of a VAE, using model subclass, with a latent space of small dimension.",
-        "overrides": [
-            "run_dir",
-            "latent_dim",
-            "loss_weights",
-            "scale",
-            "seed",
-            "batch_size",
-            "epochs",
-            "fit_verbosity",
-            "run_dir"
-        ]
-    },
-    "VAE3": {
-        "id": "VAE3",
-        "dirname": "VAE",
-        "basename": "03-VAE-with-MNIST-post.ipynb",
-        "title": "Analysis of the VAE's latent space of MNIST dataset",
-        "description": "Visualization and analysis of the VAE's latent space of the dataset MNIST",
-        "overrides": [
-            "run_dir",
-            "scale",
-            "seed"
-        ]
-    },
-    "VAE5": {
-        "id": "VAE5",
-        "dirname": "VAE",
-        "basename": "05-About-CelebA.ipynb",
-        "title": "Another game play : About the CelebA dataset",
-        "description": "Episode 1 : Presentation of the CelebA dataset and problems related to its size",
-        "overrides": [
-            "run_dir",
-            "progress_verbosity"
-        ]
-    },
-    "VAE6": {
-        "id": "VAE6",
-        "dirname": "VAE",
-        "basename": "06-Prepare-CelebA-datasets.ipynb",
-        "title": "Generation of a clustered dataset",
-        "description": "Episode 2 : Analysis of the CelebA dataset and creation of an clustered and usable dataset",
-        "overrides": [
-            "run_dir",
-            "progress_verbosity",
-            "scale",
-            "seed",
-            "cluster_size",
-            "image_size",
-            "output_dir",
-            "exit_if_exist"
-        ]
-    },
-    "VAE7": {
-        "id": "VAE7",
-        "dirname": "VAE",
-        "basename": "07-Check-CelebA.ipynb",
-        "title": "Checking the clustered dataset",
-        "description": "Episode : 3 Clustered dataset verification and testing of our datagenerator",
-        "overrides": [
-            "run_dir",
-            "image_size",
-            "enhanced_dir",
-            "progress_verbosity"
-        ]
-    },
-    "VAE8": {
-        "id": "VAE8",
-        "dirname": "VAE",
-        "basename": "08-VAE-with-CelebA.ipynb",
-        "title": "Training session for our VAE",
-        "description": "Episode 4 : Training with our clustered datasets in notebook or batch mode",
-        "overrides": [
-            "run_dir",
-            "scale",
-            "image_size",
-            "enhanced_dir",
-            "latent_dim",
-            "loss_weights",
-            "batch_size",
-            "epochs",
-            "fit_verbosity",
-            "run_dir"
-        ]
-    },
-    "VAE9": {
-        "id": "VAE9",
-        "dirname": "VAE",
-        "basename": "09-VAE-with-CelebA-192x160.ipynb",
-        "title": "Training session for our VAE with 192x160 images",
-        "description": "Episode 4 : Training with our clustered datasets in notebook or batch mode",
-        "overrides": [
-            "run_dir",
-            "scale",
-            "image_size",
-            "enhanced_dir",
-            "latent_dim",
-            "loss_weights",
-            "batch_size",
-            "epochs",
-            "fit_verbosity",
-            "run_dir"
-        ]
-    },
-    "VAE10": {
-        "id": "VAE10",
-        "dirname": "VAE",
-        "basename": "batch_slurm.sh",
-        "title": "SLURM batch script",
-        "description": "Bash script for SLURM batch submission of VAE8 notebooks ",
-        "overrides": []
-    },
-    "SHEEP1": {
-        "id": "SHEEP1",
-        "dirname": "DCGAN",
-        "basename": "01-DCGAN-Draw-me-a-sheep.ipynb",
-        "title": "A first DCGAN to Draw a Sheep",
-        "description": "Episode 1 : Draw me a sheep, revisited with a DCGAN",
-        "overrides": [
-            "run_dir",
-            "scale",
-            "latent_dim",
-            "epochs",
-            "batch_size",
-            "num_img",
-            "fit_verbosity",
-            "run_dir"
-        ]
-    },
-    "SHEEP2": {
-        "id": "SHEEP2",
-        "dirname": "DCGAN",
-        "basename": "02-WGANGP-Draw-me-a-sheep.ipynb",
-        "title": "A WGAN-GP to Draw a Sheep",
-        "description": "Episode 2 : Draw me a sheep, revisited with a WGAN-GP",
-        "overrides": [
-            "run_dir",
-            "scale",
-            "latent_dim",
-            "epochs",
-            "batch_size",
-            "num_img",
-            "fit_verbosity",
-            "run_dir"
-        ]
-    },
-    "DRL1": {
-        "id": "DRL1",
-        "dirname": "DRL",
-        "basename": "FIDLE_DQNfromScratch.ipynb",
-        "title": "Solving CartPole with DQN",
-        "description": "Using a a Deep Q-Network to play CartPole - an inverted pendulum problem (PyTorch)",
-        "overrides": []
-    },
-    "DRL2": {
-        "id": "DRL2",
-        "dirname": "DRL",
-        "basename": "FIDLE_rl_baselines_zoo.ipynb",
-        "title": "RL Baselines3 Zoo: Training in Colab",
-        "description": "Demo of Stable baseline3 with Colab",
-        "overrides": []
-    },
-    "ACTF1": {
-        "id": "ACTF1",
-        "dirname": "Misc",
-        "basename": "Activation-Functions.ipynb",
-        "title": "Activation functions",
-        "description": "Some activation functions, with their derivatives.",
-        "overrides": []
-    },
-    "NP1": {
-        "id": "NP1",
-        "dirname": "Misc",
-        "basename": "Numpy.ipynb",
-        "title": "A short introduction to Numpy",
-        "description": "Numpy is an essential tool for the Scientific Python.",
-        "overrides": []
-    },
-    "SCRATCH1": {
-        "id": "SCRATCH1",
-        "dirname": "Misc",
-        "basename": "Scratchbook.ipynb",
-        "title": "Scratchbook",
-        "description": "A scratchbook for small examples",
-        "overrides": []
-    },
-    "TSB1": {
-        "id": "TSB1",
-        "dirname": "Misc",
-        "basename": "Using-Tensorboard.ipynb",
-        "title": "Tensorboard with/from Jupyter ",
-        "description": "4 ways to use Tensorboard from the Jupyter environment",
-        "overrides": []
-    }
-}
\ No newline at end of file
diff --git a/fidle/mplstyles/custom.mplstyle b/fidle/mplstyles/custom.mplstyle
deleted file mode 100644
index 01e69c7b3139a392f3f892034dcce70afdd0a40b..0000000000000000000000000000000000000000
--- a/fidle/mplstyles/custom.mplstyle
+++ /dev/null
@@ -1,32 +0,0 @@
-
-# See : https://matplotlib.org/users/customizing.html
-
-axes.titlesize : 24
-axes.labelsize : 20
-axes.edgecolor      : dimgrey
-axes.labelcolor     : dimgrey
-axes.linewidth      : 2
-axes.grid           : False
-
-axes.prop_cycle    : cycler('color', ['steelblue', 'tomato', '2ca02c', 'd62728', '9467bd', '8c564b', 'e377c2', '7f7f7f', 'bcbd22', '17becf'])
-
-lines.linewidth     : 3
-lines.markersize    : 10
-
-xtick.color         : black
-xtick.labelsize     : 18
-ytick.color         : black
-ytick.labelsize     : 18
-
-axes.spines.left   : True
-axes.spines.bottom : True
-axes.spines.top    : False
-axes.spines.right  : False
-
-savefig.dpi         : 300      # figure dots per inch or 'figure'
-savefig.facecolor   : white    # figure facecolor when saving
-savefig.edgecolor   : white    # figure edgecolor when saving
-savefig.format      : svg
-savefig.bbox        : tight
-savefig.pad_inches  : 0.1
-savefig.transparent : True
diff --git a/fidle/pwk.py b/fidle/pwk.py
deleted file mode 100644
index 6b752ac9b6620c4bb94ab20dcc48f2611eaecf4a..0000000000000000000000000000000000000000
--- a/fidle/pwk.py
+++ /dev/null
@@ -1,891 +0,0 @@
-# ==================================================================
-#  ____                 _   _           _  __        __         _
-# |  _ \ _ __ __ _  ___| |_(_) ___ __ _| | \ \      / /__  _ __| | __
-# | |_) | '__/ _` |/ __| __| |/ __/ _` | |  \ \ /\ / / _ \| '__| |/ /
-# |  __/| | | (_| | (__| |_| | (_| (_| | |   \ V  V / (_) | |  |   <
-# |_|   |_|  \__,_|\___|\__|_|\___\__,_|_|    \_/\_/ \___/|_|  |_|\_\
-#                                                   Fidle module pwk                                   
-# ==================================================================
-# A simple module to host some common functions for practical work
-# Jean-Luc Parouty 2020
-
-import os,sys,platform
-import glob
-import shutil
-from datetime import datetime
-import itertools
-import datetime, time
-import json
-
-import math
-import numpy as np
-from collections.abc import Iterable
-
-import tensorflow as tf
-from tensorflow import keras
-from sklearn.metrics import confusion_matrix
-
-import pandas as pd
-import matplotlib
-import matplotlib.pyplot as plt
-
-from IPython.display import display,Image,Markdown,HTML
-
-import fidle.config as config
-
-
-__version__   = config.VERSION
-
-datasets_dir  = None
-notebook_id   = None
-running_mode  = None
-run_dir       = None
-
-_save_figs    = False
-_figs_dir     = './figs'
-_figs_name    = 'fig_'
-_figs_id      = 0
-
-_start_time   = None
-_end_time     = None
-_chrono_start = None
-_chrono_stop  = None
-
-# -------------------------------------------------------------
-# init_all
-# -------------------------------------------------------------
-#
-def init(name=None, run_directory='./run'):
-    global notebook_id
-    global datasets_dir
-    global run_dir
-    global _start_time
-    
-    # ---- Parameters from config.py
-    #
-    notebook_id = config.DEFAULT_NOTEBOOK_NAME if name is None else name
-    mplstyle    = config.FIDLE_MPLSTYLE
-    cssfile     = config.FIDLE_CSSFILE
-    
-    # ---- Load matplotlib style and css
-    #
-    matplotlib.style.use(mplstyle)
-    load_cssfile(cssfile)
-    
-    # ---- datasets location
-    #
-    datasets_dir = os.getenv('FIDLE_DATASETS_DIR', False)
-    if datasets_dir is False:
-        error_datasets_not_found()
-    # Resolve tilde...
-    datasets_dir=os.path.expanduser(datasets_dir)
-        
-    # ---- run_dir
-    #
-    attrs   = override('run_dir', return_attributes=True)
-    run_dir = attrs.get('run_dir', run_directory)
-    mkdir(run_dir)
-    
-    # ---- Update Keras cache
-    #
-    updated = update_keras_cache()
-
-    # ---- Tensorflow log level
-    #
-    log_level = int(os.getenv('TF_CPP_MIN_LOG_LEVEL', 0 ))
-    str_level = ['Info + Warning + Error','Warning + Error','Error only'][log_level]
-    
-    # ---- Today, now and hostname
-    #
-    _start_time = datetime.datetime.now()
-    h = platform.uname()
-    
-    # ---- Hello world
-    #
-    display_md('<br>**FIDLE 2020 - Practical Work Module**')
-    print('Version              :', config.VERSION)
-    print('Notebook id          :', notebook_id)
-    print('Run time             :', _start_time.strftime("%A %d %B %Y, %H:%M:%S"))
-    print('Hostname             :', f'{h[1]} ({h[0]})')
-    print('Tensorflow log level :', str_level,f' (={log_level})')
-    print('Datasets dir         :', datasets_dir)
-    print('Run dir              :', run_dir)
-    print('Update keras cache   :', updated)
-    
-    # ---- Versions catalog
-    #
-    for m in config.USED_MODULES:
-        if m in sys.modules:
-            print(f'{m:21s}:', sys.modules[m].__version__)
-
-    # ---- Save figs or not
-    #
-    save_figs = os.getenv('FIDLE_SAVE_FIGS', str(config.SAVE_FIGS) )
-    if save_figs.lower() == 'true':
-        set_save_fig(save=True, figs_dir=f'{run_dir}/figs', figs_name='fig_', figs_id=0)
-
-    return datasets_dir
-
-# ------------------------------------------------------------------
-# Update keras cache
-# ------------------------------------------------------------------
-# Try to sync ~/.keras/cache with datasets/keras_cache
-# because sometime, we cannot access to internet... (IDRIS..)
-#
-def update_keras_cache():
-    updated = False
-    if os.path.isdir(f'{datasets_dir}/keras_cache'):
-        from_dir = f'{datasets_dir}/keras_cache/*.*'
-        to_dir   = os.path.expanduser('~/.keras/datasets')
-        mkdir(to_dir)
-        for pathname in glob.glob(from_dir):
-            filename=os.path.basename(pathname)
-            destname=f'{to_dir}/{filename}'
-            if not os.path.isfile(destname):
-                shutil.copy(pathname, destname)
-                updated=True
-    return updated
-
-# ------------------------------------------------------------------
-# Where are my datasets ?
-# ------------------------------------------------------------------
-#
-def error_datasets_not_found():        
-    display_md('## ATTENTION !!\n----')
-    print('Le dossier contenant les datasets est introuvable\n')
-    print('Pour que les notebooks puissent les localiser, vous devez :\n')
-    print('         1/ Récupérer le dossier datasets')
-    print('            Une archive (datasets.tar) est disponible via le repository Fidle.\n')
-    print("         2/ Préciser la localisation de ce dossier datasets via la variable")
-    print("            d'environnement : FIDLE_DATASETS_DIR.\n")
-    print('Exemple :')
-    print("   Dans votre fichier .bashrc :")
-    print('   export FIDLE_DATASETS_DIR=~/datasets')
-    display_md('----')
-    assert False, 'datasets folder not found, please set FIDLE_DATASETS_DIR env var.'
-
-    
-    
-def override(*names, module_name='__main__', verbose=True, return_attributes=False):
-    '''
-    Try to override attributes given par name with environment variables.
-    Environment variables name must be : FIDLE_OVERRIDE_<NOTEBOOK-ID>_<NAME>
-    If no env variable is available for a given name, nothing is change.
-    If type is str, substitution is done with 'notebook_id' and 'datasets_dir'
-    Example : override('image_size','nb_epochs')
-    params:
-       names : list of attributes names as a str list
-               if empty, all attributes can be override
-    return :
-       dict {name=new value}
-    '''
-    # ---- Where to override
-    #
-    module=sys.modules[module_name]
-    
-    # ---- No names : mean all
-    #
-    if len(names)==0:
-        names=[]
-        for name in dir(module):
-            if name.startswith('_'): continue
-            v=getattr(module,name)
-            if type(v) not in [str, int, float, bool, tuple, list, dict]: continue
-            names.append(name)
-            
-    # ---- Search for names
-    #
-    overrides={}
-    for name in names:
-        
-        # ---- Environment variable name
-        #
-        env_name  = f'FIDLE_OVERRIDE_{notebook_id}_{name}'
-        env_value = os.environ.get(env_name) 
-
-        # ---- Environment variable : Doesn't exist
-        #
-        if env_value is None: continue
-
-        # ---- Environment variable : Exist
-        #
-        value_old  = getattr(module,name)
-        value_type = type(value_old)
-        
-        if value_type in [ str ] : 
-            new_value = env_value.format(datasets_dir=datasets_dir, notebook_id=notebook_id)
-
-        if value_type in [ int, float, bool, tuple, list, dict, type(None)]:
-            new_value = eval(env_value)
-    
-        # ---- Override value
-        #
-        setattr(module,name,new_value)
-        overrides[name]=new_value
-
-    if verbose and len(overrides)>0:
-        display_md('**\*\* Overrided parameters : \*\***')
-        for name,value in overrides.items():
-            print(f'{name:20s} : {value}')
-            
-    if return_attributes:
-        return overrides
-       
-    
-# -------------------------------------------------------------
-# Folder cooking
-# -------------------------------------------------------------
-#
-def tag_now():
-    return datetime.datetime.now().strftime("%Y-%m-%d_%Hh%Mm%Ss")
-
-def mkdir(path):
-    os.makedirs(path, mode=0o750, exist_ok=True)
-      
-def get_directory_size(path):
-    """
-    Return the directory size, but only 1 level
-    args:
-        path : directory path
-    return:
-        size in Mo
-    """
-    size=0
-    for f in os.listdir(path):
-        if os.path.isfile(path+'/'+f):
-            size+=os.path.getsize(path+'/'+f)
-    return size/(1024*1024)
-
-
-# -------------------------------------------------------------
-# shuffle_dataset
-# -------------------------------------------------------------
-#
-def shuffle_np_dataset(x, y):
-    """
-    Shuffle a dataset (x,y)
-    args:
-        x,y : dataset
-    return:
-        x,y mixed
-    """
-    assert (len(x) == len(y)), "x and y must have same size"
-    p = np.random.permutation(len(x))
-    return x[p], y[p]
-
-
-def rescale_dataset(*data, scale=1):
-    '''
-    Rescale numpy array with 'scale' factor
-    args:
-        *data : arrays
-        scale : scale factor
-    return:
-        arrays of rescaled data
-    '''
-    return [ d[:int(scale*len(d))] for d in data ]
-
-def pick_dataset(*data,n=5):
-    '''Return random subsets of n elements'''
-    ii = np.random.choice(range(len(data[0])), n)
-    out = [ d[ii] for d in data ]
-    return out[0] if len(out)==1 else out
-
-def update_progress(what,i,imax, redraw=False, verbosity=1):
-    """
-    Display a text progress bar, as :
-    My progress bar : ############# 34%
-
-    Args:
-        what  : Progress bar name
-        i     : Current progress
-        imax  : Max value for i
-        verbosity : progress bar verbosity (0: no bar, 1: progress bar, 2: one line)
-        
-    Returns:
-        nothing
-    """
-    if verbosity==0:   return
-    if verbosity==2 and i<imax: return
-    bar_length = min(40,imax)
-    if (i%int(imax/bar_length))!=0 and i<imax and not redraw:
-        return
-    progress  = float(i/imax)
-    block     = int(round(bar_length * progress))
-    endofline = '\r' if progress<1 else '\n'
-    text = "{:16s} [{}] {:>5.1f}% of {}".format( what, "#"*block+"-"*(bar_length-block), progress*100, imax)
-    print(text, end=endofline)
-
-    
-def rmax(l):
-    """
-    Recursive max() for a given iterable of iterables
-    Should be np.array of np.array or list of list, etc.
-    args:
-        l : Iterable of iterables
-    return: 
-        max value
-    """
-    maxi = float('-inf')
-    for item in l:
-        if isinstance(item, Iterable):
-            t = rmax(item)
-        else:
-            t = item
-        if t > maxi:
-            maxi = t
-    return maxi
-
-def rmin(l):
-    """
-    Recursive min() for a given iterable of iterables
-    Should be np.array of np.array or list of list, etc.
-    args:
-        l : Iterable of iterables
-    return: 
-        min value
-    """
-    mini = float('inf')
-    for item in l:
-        if isinstance(item, Iterable):
-            t = rmin(item)
-        else:
-            t = item
-        if t < mini:
-            mini = t
-    return mini
-
-# -------------------------------------------------------------
-# show_images
-# -------------------------------------------------------------
-#
-def plot_images(x,y=None, indices='all', columns=12, x_size=1, y_size=1,
-                colorbar=False, y_pred=None, cm='binary', norm=None, y_padding=0.35, spines_alpha=1,
-                fontsize=20, interpolation='lanczos', save_as='auto'):
-    """
-    Show some images in a grid, with legends
-    args:
-        x             : images - Shapes must be (-1,lx,ly) (-1,lx,ly,1) or (-1,lx,ly,3)
-        y             : real classes or labels or None (None)
-        indices       : indices of images to show or 'all' for all ('all')
-        columns       : number of columns (12)
-        x_size,y_size : figure size (1), (1)
-        colorbar      : show colorbar (False)
-        y_pred        : predicted classes (None)
-        cm            : Matplotlib color map (binary)
-        norm          : Matplotlib imshow normalization (None)
-        y_padding     : Padding / rows (0.35)
-        spines_alpha  : Spines alpha (1.)
-        font_size     : Font size in px (20)
-        save_as       : Filename to use if save figs is enable ('auto')
-    returns: 
-        nothing
-    """
-    if indices=='all': indices=range(len(x))
-    if norm and len(norm) == 2: norm = matplotlib.colors.Normalize(vmin=norm[0], vmax=norm[1])
-    draw_labels = (y is not None)
-    draw_pred   = (y_pred is not None)
-    rows        = math.ceil(len(indices)/columns)
-    fig=plt.figure(figsize=(columns*x_size, rows*(y_size+y_padding)))
-    n=1
-    for i in indices:
-        axs=fig.add_subplot(rows, columns, n)
-        n+=1
-        # ---- Shape is (lx,ly)
-        if len(x[i].shape)==2:
-            xx=x[i]
-        # ---- Shape is (lx,ly,n)
-        if len(x[i].shape)==3:
-            (lx,ly,lz)=x[i].shape
-            if lz==1: 
-                xx=x[i].reshape(lx,ly)
-            else:
-                xx=x[i]
-        img=axs.imshow(xx,   cmap = cm, norm=norm, interpolation=interpolation)
-#         img=axs.imshow(xx,   cmap = cm, interpolation=interpolation)
-        axs.spines['right'].set_visible(True)
-        axs.spines['left'].set_visible(True)
-        axs.spines['top'].set_visible(True)
-        axs.spines['bottom'].set_visible(True)
-        axs.spines['right'].set_alpha(spines_alpha)
-        axs.spines['left'].set_alpha(spines_alpha)
-        axs.spines['top'].set_alpha(spines_alpha)
-        axs.spines['bottom'].set_alpha(spines_alpha)
-        axs.set_yticks([])
-        axs.set_xticks([])
-        if draw_labels and not draw_pred:
-            axs.set_xlabel(y[i],fontsize=fontsize)
-        if draw_labels and draw_pred:
-            if y[i]!=y_pred[i]:
-                axs.set_xlabel(f'{y_pred[i]} ({y[i]})',fontsize=fontsize)
-                axs.xaxis.label.set_color('red')
-            else:
-                axs.set_xlabel(y[i],fontsize=fontsize)
-        if colorbar:
-            fig.colorbar(img,orientation="vertical", shrink=0.65)
-    save_fig(save_as)
-    plt.show()
-
-    
-def plot_image(x,cm='binary', figsize=(4,4),interpolation='lanczos', save_as='auto'):
-    """
-    Draw a single image.
-    Image shape can be (lx,ly), (lx,ly,1) or (lx,ly,n)
-    args:
-        x       : image as np array
-        cm      : color map ('binary')
-        figsize : fig size (4,4)
-    """
-    # ---- Shape is (lx,ly)
-    if len(x.shape)==2:
-        xx=x
-    # ---- Shape is (lx,ly,n)
-    if len(x.shape)==3:
-        (lx,ly,lz)=x.shape
-        if lz==1: 
-            xx=x.reshape(lx,ly)
-        else:
-            xx=x
-    # ---- Draw it
-    plt.figure(figsize=figsize)
-    plt.imshow(xx,   cmap = cm, interpolation=interpolation)
-    save_fig(save_as)
-    plt.show()
-
-
-# -------------------------------------------------------------
-# show_history
-# -------------------------------------------------------------
-#
-def plot_history(history, figsize=(8,6), 
-                 plot={"Accuracy":['accuracy','val_accuracy'], 'Loss':['loss', 'val_loss']},
-                 save_as='auto'):
-    """
-    Show history
-    args:
-        history: history
-        figsize: fig size
-        plot: list of data to plot : {<title>:[<metrics>,...], ...}
-    """
-    fig_id=0
-    for title,curves in plot.items():
-        plt.figure(figsize=figsize)
-        plt.title(title)
-        plt.ylabel(title)
-        plt.xlabel('Epoch')
-        for c in curves:
-            plt.plot(history.history[c])
-        plt.legend(curves, loc='upper left')
-        if save_as=='auto':
-            figname='auto'
-        else:
-            figname=f'{save_as}_{fig_id}'
-            fig_id+=1
-        save_fig(figname)
-        plt.show()
-
-    
-    
-def plot_confusion_matrix(y_true,y_pred,
-                          target_names,
-                          title='Confusion matrix',
-                          cmap=None,
-                          normalize=True,
-                          figsize=(10, 8),
-                          digit_format='{:0.2f}',
-                          save_as='auto'):
-    """
-    given a sklearn confusion matrix (cm), make a nice plot
-
-    Arguments
-    ---------
-    cm:           confusion matrix from sklearn.metrics.confusion_matrix
-
-    target_names: given classification classes such as [0, 1, 2]
-                  the class names, for example: ['high', 'medium', 'low']
-
-    title:        the text to display at the top of the matrix
-
-    cmap:         the gradient of the values displayed from matplotlib.pyplot.cm
-                  see http://matplotlib.org/examples/color/colormaps_reference.html
-                  plt.get_cmap('jet') or plt.cm.Blues
-
-    normalize:    If False, plot the raw numbers
-                  If True, plot the proportions
-
-    Citiation
-    ---------
-    http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
-
-    """
-    cm = confusion_matrix( y_true,y_pred, normalize=None, labels=target_names)
-    
-    accuracy = np.trace(cm) / float(np.sum(cm))
-    misclass = 1 - accuracy
-
-    if cmap is None:
-        cmap = plt.get_cmap('Blues')
-
-    plt.figure(figsize=figsize)
-    plt.imshow(cm, interpolation='nearest', cmap=cmap)
-    plt.title(title)
-    plt.colorbar()
-
-    if target_names is not None:
-        tick_marks = np.arange(len(target_names))
-        plt.xticks(tick_marks, target_names, rotation=90)
-        plt.yticks(tick_marks, target_names)
-
-    if normalize:
-        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
-
-
-    thresh = cm.max() / 1.5 if normalize else cm.max() / 2
-    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
-        if normalize:
-            plt.text(j, i, digit_format.format(cm[i, j]),
-                     horizontalalignment="center",
-                     color="white" if cm[i, j] > thresh else "black")
-        else:
-            plt.text(j, i, "{:,}".format(cm[i, j]),
-                     horizontalalignment="center",
-                     color="white" if cm[i, j] > thresh else "black")
-
-    plt.tight_layout()
-    plt.ylabel('True label')
-    plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
-    save_fig(save_as)
-    plt.show()
-
-
-    
-def display_confusion_matrix(y_true,y_pred,labels=None,color='green',
-                             font_size='12pt', title="#### Confusion matrix is :"):
-    """
-    Show a confusion matrix for a predictions.
-    see : sklearn.metrics.confusion_matrix
-
-    Args:
-        y_true:       Real classes
-        y_pred:       Predicted classes
-        labels:       List of classes to show in the cm
-        color:        Color for the palette (green)
-        font_size:    Values font size 
-        title:        the text to display at the top of the matrix        
-    """
-    assert (labels!=None),"Label must be set"
-    
-    if title != None :  display(Markdown(title)) 
-    
-    cm = confusion_matrix( y_true,y_pred, normalize="true", labels=labels)
-    df=pd.DataFrame(cm)
-
-#     cmap = sn.light_palette(color, as_cmap=True)
-
-    colorsList = ['whitesmoke','bisque']
-    cmap = matplotlib.colors.ListedColormap(colorsList)
-    cmap = matplotlib.colors.ListedColormap(cmap(np.linspace(0, 1, 256)))
-
-    df.style.set_properties(**{'font-size': '20pt'})
-    display(df.style.format('{:.2f}') \
-            .background_gradient(cmap=cmap)
-            .set_properties(**{'font-size': font_size}))
-    
-    
-def plot_donut(values, labels, colors=["lightsteelblue","coral"], figsize=(6,6), title=None, save_as='auto'):
-    """
-    Draw a donut
-    args:
-        values   : list of values
-        labels   : list of labels
-        colors   : list of color (["lightsteelblue","coral"])
-        figsize  : size of figure ( (6,6) )
-    return:
-        nothing
-    """
-    # ---- Title or not
-    if title != None :  display(Markdown(title))
-    # ---- Donut
-    plt.figure(figsize=figsize)
-    # ---- Draw a pie  chart..
-    plt.pie(values, labels=labels, 
-            colors = colors, autopct='%1.1f%%', startangle=70, pctdistance=0.85,
-            textprops={'fontsize': 18},
-            wedgeprops={"edgecolor":"w",'linewidth': 5, 'linestyle': 'solid', 'antialiased': True})
-    # ---- ..with a white circle
-    circle = plt.Circle((0,0),0.70,fc='white')
-    ax = plt.gca()
-    ax.add_artist(circle)
-    # Equal aspect ratio ensures that pie is drawn as a circle
-    plt.axis('equal')  
-    plt.tight_layout()
-    save_fig(save_as)
-    plt.show()
-    
-
-    
-def plot_multivariate_serie(sequence, labels=None, predictions=None, only_features=None,
-                            columns=3, width=5,height=4,wspace=0.3,hspace=0.2,ms=6,lw=1,
-                            save_as='auto', time_dt=1, hide_ticks=False):
-    
-    sequence_len = len(sequence)
-    features_len = sequence.shape[1]
-    if only_features is None : only_features=range(features_len)
-    if labels is None        : labels=range(features_len)
-    
-    t  = np.arange(sequence_len)    
-    if predictions is None:
-        dt = 0
-    else:
-        dt = len(predictions)
-        sequence_with_pred = sequence.copy()
-        sequence_with_pred[-dt:]=predictions
-
-    rows = math.ceil(features_len/columns)
-    fig  = plt.figure(figsize=(columns*width, rows*height))
-    fig.subplots_adjust(wspace=0.3,hspace=0.2)
-    n=1
-    for i in only_features:
-        ax=fig.add_subplot(rows, columns, n)
-        
-        # ---- Real sequence without prediction
-        #
-        ax.plot( t[:-dt],sequence[:-dt,i], 'o',  markersize=ms, color='C0', zorder=2)
-        ax.plot( t,sequence[:,i],          '-',  linewidth=lw,  color='C0', label=labels[i],zorder=1)
-
-        # ---- What we expect
-        #
-        ax.plot(t[-dt:], sequence[-dt:,i], 'o', markeredgecolor='C0',markerfacecolor='white',ms=6)
-        
-        if predictions is not None:
-            ax.plot(t[-dt-1:], sequence_with_pred[-dt-1:,i], '--',  lw=lw, fillstyle='full',  ms=ms, color='C1',zorder=1)
-            ax.plot(t[-dt:],   predictions[:,i],             'o',   lw=lw, fillstyle='full',  ms=ms, color='C1',zorder=2)
-
-        if hide_ticks:
-            ax.set_yticks([])
-            ax.set_xticks([])
-        
-        ax.legend(loc="upper left")
-        n+=1
-    save_fig(save_as)
-    plt.show()
-
-    
-# -------------------------------------------------------------
-# Show 2d series and segments
-# -------------------------------------------------------------
-#
-
-def plot_2d_serie(data, figsize=(10,8), monocolor=False, hide_ticks=True, lw=2, ms=4, save_as='auto'):
-    """
-    Plot a 2d dataset as a trajectory
-    args:
-        data:      Dataset to plot
-        figsize:   Figure size ( (10,8))
-        monocolor: Monocolor line or not. (False)
-    return:
-        nothing
-    """
-    # ---- Get x,y, min and max
-    #
-    n     = len(data)
-    k     = int(n/100)
-    x,y   = data[:,0], data[:,1]
- 
-    # ---- Draw it
-    #
-    fig = plt.figure(figsize=figsize)
-    ax = plt.axes()
-
-    # ---- Monocolor or gray gradient
-    #
-    if monocolor:
-        ax.plot(x,y)
-    else:
-        for i in range(0,100):
-            a= (200-i)/200
-            ax.plot(x[i*k:(i+1)*k+1], y[i*k:(i+1)*k+1], '-', color=(a,a,a),lw=lw,zorder=1)
-
-    # ---- Last point
-    #
-    ax.plot(x[n-1], y[n-1], 'o', color='C1',ms=ms,zorder=2)
-    
-    ax.set_aspect('equal', 'box')
-    ax.set_xlabel('axis=0')
-    ax.set_ylabel('axis=1')
-    
-    if hide_ticks:
-        ax.set_yticks([])
-        ax.set_xticks([])
-
-    save_fig(save_as)
-    plt.show()
-    
-    
-
-    
-def plot_2d_segment(sequence_real, sequence_pred, figsize=(10,8), ms=6, lw=1, hide_ticks=True, save_as='auto'):
-    """
-    Plot a 2d segment real and predicted
-    args:
-        sequence_real: Real sequence
-        sequence_pred: Predicted sequence
-        figsize:       Figure size ( (10,8) )
-        ms:            Marker size (6)
-    return:
-        nothing
-    """
-    k = len(sequence_pred)
-    x,y = sequence_real[:,0],sequence_real[:,1]
-    u,v = sequence_pred[:,0],sequence_pred[:,1]
-    
-    fig = plt.figure(figsize=figsize)
-
-    ax = plt.axes()
-    
-    # ---- Draw real sequence without prediction
-    #
-    ax.plot(x[:-k], y[:-k],   'o', color='C0', fillstyle='full', zorder=2, ms=ms)
-    ax.plot(x, y,             '-', color='C0', lw=lw, zorder=1)
-    
-    # ---- What we expect
-    #
-    ax.plot(x[-k:], y[-k:], 'o', ms=ms, markeredgecolor='C0', markerfacecolor='white', zorder=2)
-
-    # ---- What we have
-    #
-    ax.plot(u, v,                            'o',  color='C1',fillstyle='full',zorder=2, ms=ms)
-    ax.plot( [x[-1-k],u[0]], [y[-1-k],v[0]], '--', color='C1',lw=lw, zorder=1)
-    ax.plot(u, v,                            '--', color='C1',lw=lw, zorder=1)
-
-    ax.set_aspect('equal', 'box')
-    ax.set_xlabel('axis=0')
-    ax.set_ylabel('axis=1')
-    
-    if hide_ticks:
-        ax.set_yticks([])
-        ax.set_xticks([])
-
-    save_fig(save_as)
-    plt.show()
-
-    
-    
-    
-
-    
-def set_save_fig(save=True, figs_dir='./run/figs', figs_name='fig_', figs_id=0):
-    """
-    Set save_fig parameters
-    Default figs name is <figs_name><figs_id>.{png|svg}
-    args:
-        save      : Boolean, True to save figs (True)
-        figs_dir  : Path to save figs (./figs)
-        figs_name : Default basename for figs (figs_)
-        figs_id   : Start id for figs name (0)
-    """
-    global _save_figs, _figs_dir, _figs_name, _figs_id
-    _save_figs = save
-    _figs_dir  = figs_dir
-    _figs_name = figs_name
-    _figs_id   = figs_id
-    print(f'Save figs            : {_save_figs}')
-    print(f'Path figs            : {_figs_dir}')
-    
-    
-def save_fig(filename='auto', png=True, svg=False):
-    """
-    Save current figure
-    args:
-        filename : Image filename ('auto')
-        png      : Boolean. Save as png if True (True)
-        svg      : Boolean. Save as svg if True (False)
-    """
-    global _save_figs, _figs_dir, _figs_name, _figs_id
-    if filename is None : return
-    if not _save_figs   : return
-    mkdir(_figs_dir)
-    if filename=='auto': 
-        path=f'{_figs_dir}/{notebook_id}-{_figs_name}{_figs_id:02d}'
-    else:
-        path=f'{_figs_dir}/{notebook_id}-{filename}'
-    if png : plt.savefig( f'{path}.png')
-    if svg : plt.savefig( f'{path}.png')
-    if filename=='auto': _figs_id+=1
-    display_html(f'<div class="comment">Saved: {path}</div>')
-    
-
-def subtitle(t):
-    display(Markdown(f'<br>**{t}**'))
-    
-def display_md(text):
-    display(Markdown(text))
-
-def display_html(text):
-    display(HTML(text))
-    
-def display_img(img):
-    display(Image(img))
-
-def chrono_start():
-    global _chrono_start, _chrono_stop
-    _chrono_start=time.time()
-
-# return delay in seconds or in humain format
-def chrono_stop(hdelay=False):
-    global _chrono_start, _chrono_stop
-    _chrono_stop = time.time()
-    sec = _chrono_stop - _chrono_start
-    if hdelay : return hdelay_ms(sec)
-    return sec
-    
-def chrono_show():
-    print('\nDuration : ', hdelay_ms(time.time() - _chrono_start))
-    
-def hdelay(sec):
-    return str(datetime.timedelta(seconds=int(sec)))    
-    
-# Return human delay like 01:14:28 543ms
-# delay can be timedelta or seconds
-def hdelay_ms(delay):
-    if type(delay) is not datetime.timedelta:
-        delay=datetime.timedelta(seconds=delay)
-    sec = delay.total_seconds()
-    hh = sec // 3600
-    mm = (sec // 60) - (hh * 60)
-    ss = sec - hh*3600 - mm*60
-    ms = (sec - int(sec))*1000
-    return f'{hh:02.0f}:{mm:02.0f}:{ss:02.0f} {ms:03.0f}ms'
-
-def hsize(num, suffix='o'):
-    for unit in ['','K','M','G','T','P','E','Z']:
-        if abs(num) < 1024.0:
-            return f'{num:3.1f} {unit}{suffix}'
-        num /= 1024.0
-    return f'{num:.1f} Y{suffix}'
-
-def load_cssfile(cssfile):
-    if cssfile is None: return
-    styles = open(cssfile, "r").read()
-    display(HTML(styles))
-    
-     
-def np_print(*args, precision=3, linewidth=120):
-    with np.printoptions(precision=precision, linewidth=linewidth):
-        for a in args:
-            print(a)
-    
-     
-def end():
-    global _end_time
-    _end_time = datetime.datetime.now()
-    end_time = time.strftime("%A %d %B %Y, %H:%M:%S")
-    duration = hdelay_ms(_end_time - _start_time)
-    site_url = "https://fidle.cnrs.fr"
-    md = f'**End time :** {end_time}  \n'
-    md+= f'**Duration :** {duration}  \n'
-    md+= f'This notebook ends here :-)  \n'
-    md+= f'[{site_url}]({site_url})'
-    display_md(md)
-     
-