From 63cc63b6d6806169e1378c229e1f5238479319a8 Mon Sep 17 00:00:00 2001
From: Jean-Luc Parouty <Jean-Luc.Parouty@simap.grenoble-inp.fr>
Date: Tue, 23 Jan 2024 10:57:17 +0100
Subject: [PATCH] Clean notebooks (rm AE, VAE, GAN)  (v3.0.2)

---
 AE.Keras2/01-Prepare-MNIST-dataset.ipynb      | 243 --------
 AE.Keras2/02-AE-with-MNIST.ipynb              | 437 --------------
 AE.Keras2/03-AE-with-MNIST-post.ipynb         | 306 ----------
 AE.Keras2/04-ExtAE-with-MNIST.ipynb           | 533 -----------------
 AE.Keras2/05-ExtAE-with-MNIST.ipynb           | 565 ------------------
 AE.Keras2/modules/ImagesCallback.py           |  44 --
 AE.Keras2/modules/MNIST.py                    | 178 ------
 DCGAN.Lightning/01-DCGAN-PL.ipynb             | 462 --------------
 DCGAN.Lightning/modules/Discriminators.py     | 136 -----
 DCGAN.Lightning/modules/GAN.py                | 182 ------
 DCGAN.Lightning/modules/Generators.py         |  94 ---
 .../modules/QuickDrawDataModule.py            |  71 ---
 DCGAN.Lightning/modules/SmartProgressBar.py   |  70 ---
 DCGAN.Lightning/modules/WGANGP.py             | 229 -------
 GTSRB.Keras3/04-Keras-cv.ipynb                |   6 +-
 README.ipynb                                  |  69 +--
 README.md                                     |  57 +-
 VAE.Keras2/01-VAE-with-MNIST.ipynb            | 410 -------------
 VAE.Keras2/02-VAE-with-MNIST.ipynb            | 505 ----------------
 VAE.Keras2/03-VAE-with-MNIST-post.ipynb       | 339 -----------
 .../modules/callbacks/BestModelCallback.py    |  34 --
 .../modules/callbacks/ImagesCallback.py       | 106 ----
 VAE.Keras2/modules/callbacks/__init__.py      |   2 -
 VAE.Keras2/modules/datagen/DataGenerator.py   | 148 -----
 VAE.Keras2/modules/datagen/MNIST.py           | 114 ----
 VAE.Keras2/modules/datagen/__init__.py        |   4 -
 VAE.Keras2/modules/layers/SamplingLayer.py    |  36 --
 .../modules/layers/VariationalLossLayer.py    |  60 --
 VAE.Keras2/modules/layers/__init__.py         |   2 -
 VAE.Keras2/modules/models/VAE.py              | 152 -----
 VAE.Keras2/modules/models/__init__.py         |   1 -
 .../01-VAE-lightning-with-MNIST.ipynb         | 541 -----------------
 .../02-VAE-with-Lightning-MNIST.ipynb         | 516 ----------------
 .../03-VAE-Lightning-with-MNIST-post.ipynb    | 358 -----------
 VAE.Lightning/modules/.gitkeep                |   0
 VAE.Lightning/modules/callbacks/.gitkeep      |   0
 .../modules/callbacks/BestModelCallback.py    |  29 -
 .../modules/callbacks/ImagesCallback.py       | 110 ----
 VAE.Lightning/modules/callbacks/__init__.py   |   2 -
 VAE.Lightning/modules/datagen/.gitkeep        |   0
 VAE.Lightning/modules/datagen/MNIST.py        | 132 ----
 VAE.Lightning/modules/datagen/__init__.py     |   4 -
 VAE.Lightning/modules/layers/.gitkeep         |   0
 VAE.Lightning/modules/layers/SamplingLayer.py |  35 --
 .../modules/layers/VariationalLossLayer.py    |  48 --
 VAE.Lightning/modules/layers/__init__.py      |   2 -
 VAE.Lightning/modules/models/.gitkeep         |   0
 VAE.Lightning/modules/models/Decoder.py       |  55 --
 VAE.Lightning/modules/models/Encoder.py       |  58 --
 VAE.Lightning/modules/models/VAE.py           |  96 ---
 VAE.Lightning/modules/models/__init__.py      |   3 -
 VAE.Lightning/modules/progressbar.py          |  52 --
 docker/Dockerfile                             |   4 +-
 docker/images/env-fidle.png                   | Bin 2262 -> 0 bytes
 docker/images/env-fidle.svg                   |  86 ---
 docker/images/env-keras3.png                  | Bin 0 -> 2979 bytes
 docker/images/env-keras3.svg                  |  66 ++
 fidle/about.yml                               |  10 +-
 fidle/ci/default.yml                          | 118 +---
 fidle/img/logo-YouTube.png                    | Bin 0 -> 18600 bytes
 60 files changed, 97 insertions(+), 7823 deletions(-)
 delete mode 100644 AE.Keras2/01-Prepare-MNIST-dataset.ipynb
 delete mode 100644 AE.Keras2/02-AE-with-MNIST.ipynb
 delete mode 100644 AE.Keras2/03-AE-with-MNIST-post.ipynb
 delete mode 100644 AE.Keras2/04-ExtAE-with-MNIST.ipynb
 delete mode 100644 AE.Keras2/05-ExtAE-with-MNIST.ipynb
 delete mode 100644 AE.Keras2/modules/ImagesCallback.py
 delete mode 100644 AE.Keras2/modules/MNIST.py
 delete mode 100644 DCGAN.Lightning/01-DCGAN-PL.ipynb
 delete mode 100644 DCGAN.Lightning/modules/Discriminators.py
 delete mode 100644 DCGAN.Lightning/modules/GAN.py
 delete mode 100644 DCGAN.Lightning/modules/Generators.py
 delete mode 100644 DCGAN.Lightning/modules/QuickDrawDataModule.py
 delete mode 100644 DCGAN.Lightning/modules/SmartProgressBar.py
 delete mode 100644 DCGAN.Lightning/modules/WGANGP.py
 delete mode 100644 VAE.Keras2/01-VAE-with-MNIST.ipynb
 delete mode 100644 VAE.Keras2/02-VAE-with-MNIST.ipynb
 delete mode 100644 VAE.Keras2/03-VAE-with-MNIST-post.ipynb
 delete mode 100644 VAE.Keras2/modules/callbacks/BestModelCallback.py
 delete mode 100644 VAE.Keras2/modules/callbacks/ImagesCallback.py
 delete mode 100644 VAE.Keras2/modules/callbacks/__init__.py
 delete mode 100644 VAE.Keras2/modules/datagen/DataGenerator.py
 delete mode 100644 VAE.Keras2/modules/datagen/MNIST.py
 delete mode 100644 VAE.Keras2/modules/datagen/__init__.py
 delete mode 100644 VAE.Keras2/modules/layers/SamplingLayer.py
 delete mode 100644 VAE.Keras2/modules/layers/VariationalLossLayer.py
 delete mode 100644 VAE.Keras2/modules/layers/__init__.py
 delete mode 100644 VAE.Keras2/modules/models/VAE.py
 delete mode 100644 VAE.Keras2/modules/models/__init__.py
 delete mode 100644 VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb
 delete mode 100644 VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb
 delete mode 100644 VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb
 delete mode 100644 VAE.Lightning/modules/.gitkeep
 delete mode 100644 VAE.Lightning/modules/callbacks/.gitkeep
 delete mode 100644 VAE.Lightning/modules/callbacks/BestModelCallback.py
 delete mode 100644 VAE.Lightning/modules/callbacks/ImagesCallback.py
 delete mode 100644 VAE.Lightning/modules/callbacks/__init__.py
 delete mode 100644 VAE.Lightning/modules/datagen/.gitkeep
 delete mode 100644 VAE.Lightning/modules/datagen/MNIST.py
 delete mode 100644 VAE.Lightning/modules/datagen/__init__.py
 delete mode 100644 VAE.Lightning/modules/layers/.gitkeep
 delete mode 100644 VAE.Lightning/modules/layers/SamplingLayer.py
 delete mode 100644 VAE.Lightning/modules/layers/VariationalLossLayer.py
 delete mode 100644 VAE.Lightning/modules/layers/__init__.py
 delete mode 100644 VAE.Lightning/modules/models/.gitkeep
 delete mode 100644 VAE.Lightning/modules/models/Decoder.py
 delete mode 100644 VAE.Lightning/modules/models/Encoder.py
 delete mode 100644 VAE.Lightning/modules/models/VAE.py
 delete mode 100644 VAE.Lightning/modules/models/__init__.py
 delete mode 100644 VAE.Lightning/modules/progressbar.py
 delete mode 100644 docker/images/env-fidle.png
 delete mode 100644 docker/images/env-fidle.svg
 create mode 100644 docker/images/env-keras3.png
 create mode 100644 docker/images/env-keras3.svg
 create mode 100644 fidle/img/logo-YouTube.png

diff --git a/AE.Keras2/01-Prepare-MNIST-dataset.ipynb b/AE.Keras2/01-Prepare-MNIST-dataset.ipynb
deleted file mode 100644
index 37fac4a..0000000
--- a/AE.Keras2/01-Prepare-MNIST-dataset.ipynb
+++ /dev/null
@@ -1,243 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE1] - Prepare a noisy MNIST dataset\n",
-    "<!-- DESC --> Episode 1: Preparation of a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Prepare a MNIST noisy dataset, usable with our denoiser autoencoder (duration : <50s)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Load original MNIST dataset\n",
-    " - Adding noise, a lot !\n",
-    " - Save it :-)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init and set parameters\n",
-    "### 1.1 - Init python"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import sys\n",
-    "\n",
-    "from skimage import io\n",
-    "from skimage.util import random_noise\n",
-    "\n",
-    "import modules.MNIST\n",
-    "from modules.MNIST     import MNIST\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE1')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the future prepared dataset (example : ./data/mnist-noisy.h5)\\\n",
-    "`scale` : Dataset scale. 1 mean 100% of the dataset - set 0.1 for tests\\\n",
-    "`progress_verbosity`: Verbosity of progress bar: 0=silent, 1=progress bar, 2=One line"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset   = './data/mnist-noisy.h5'\n",
-    "scale              = 1\n",
-    "progress_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'scale', 'progress_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get original dataset\n",
-    "We load :  \n",
-    "`clean_data` : Original and clean images - This is what we will want to ontain at the **output** of the AE  \n",
-    "`class_data` : Image classes - Useless, because the training will be unsupervised  \n",
-    "We'll build :  \n",
-    "`noisy_data` : Noisy images - These are the images that we will give as **input** to our AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_data, class_data = MNIST.get_origine(scale=scale)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Add noise\n",
-    "We add noise to the original images (clean_data) to obtain noisy images (noisy_data)  \n",
-    "Need 30-40 seconds"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def noise_it(data):\n",
-    "    new_data = np.copy(data)\n",
-    "    for i,image in enumerate(new_data):\n",
-    "        fidle.utils.update_progress('Add noise : ',i+1,len(data),verbosity=progress_verbosity)\n",
-    "        image=random_noise(image, mode='gaussian', mean=0, var=0.3)\n",
-    "        image=random_noise(image, mode='s&p',      amount=0.2, salt_vs_pepper=0.5)\n",
-    "        image=random_noise(image, mode='poisson') \n",
-    "        image=random_noise(image, mode='speckle',  mean=0, var=0.1)\n",
-    "        new_data[i]=image\n",
-    "    print('Done.')\n",
-    "    return new_data\n",
-    "\n",
-    "# ---- Add noise to input data : x_data\n",
-    "#\n",
-    "noisy_data = noise_it(clean_data)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Have a look"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print('Clean dataset (clean_data) : ',clean_data.shape)\n",
-    "print('Noisy dataset (noisy_data) : ',noisy_data.shape)\n",
-    "\n",
-    "fidle.utils.subtitle(\"Noisy images we'll have in input (or x)\")\n",
-    "fidle.scrawler.images(noisy_data[:5], None, indices='all', columns=5, x_size=3,y_size=3, interpolation=None, save_as='01-noisy')\n",
-    "fidle.utils.subtitle('Clean images we want to obtain (or y)')\n",
-    "fidle.scrawler.images(clean_data[:5], None, indices='all', columns=5, x_size=3,y_size=3, interpolation=None, save_as='02-original')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Shuffle dataset"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "p = np.random.permutation(len(clean_data))\n",
-    "clean_data, noisy_data, class_data = clean_data[p], noisy_data[p], class_data[p]\n",
-    "print('Shuffled.')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Save our prepared dataset"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "MNIST.save_prepared_dataset( clean_data, noisy_data, class_data, filename=prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/02-AE-with-MNIST.ipynb b/AE.Keras2/02-AE-with-MNIST.ipynb
deleted file mode 100644
index 1898715..0000000
--- a/AE.Keras2/02-AE-with-MNIST.ipynb
+++ /dev/null
@@ -1,437 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE2] - Building and training an AE denoiser model\n",
-    "<!-- DESC --> Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a denoizing **autoencoder** neurals network (AE)\n",
-    " - First overview or example of Keras procedural syntax\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST.  \n",
-    "The use of a GPU is often indispensable.\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Follow the learning process with Tensorboard\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `denoised_test` for denoised images at the output of the model"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from skimage import io\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n",
-    "\n",
-    "import os,sys\n",
-    "from importlib import reload\n",
-    "import h5py\n",
-    "\n",
-    "from modules.MNIST          import MNIST\n",
-    "from modules.ImagesCallback import ImagesCallback\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE2')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`latent_dim` : Dimension of the latent space  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)\n",
-    "`batch_size` : Batch size  \n",
-    "`epochs` : Nb of epochs for training\\\n",
-    "`fit_verbosity` is the verbosity during training : 0 = silent, 1 = progress bar, 2 = one line per epoch\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "dataset_seed     = 123\n",
-    "\n",
-    "scale            = .1\n",
-    "\n",
-    "latent_dim       = 10\n",
-    "\n",
-    "train_prop       = .8\n",
-    "batch_size       = 128\n",
-    "epochs           = 30\n",
-    "fit_verbosity    = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'latent_dim')\n",
-    "fidle.override('train_prop', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, _,_ = MNIST.reload_prepared_dataset(scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename=prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Build models"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, activation=\"relu\", strides=2, padding=\"same\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "z         = layers.Dense(latent_dim)(x)\n",
-    "\n",
-    "encoder = keras.Model(inputs, z, name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1, 3, activation=\"sigmoid\", padding=\"same\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "latents   = encoder(inputs)\n",
-    "outputs   = decoder(latents)\n",
-    "\n",
-    "ae = keras.Model(inputs,outputs, name=\"ae\")\n",
-    "\n",
-    "ae.compile(optimizer=keras.optimizers.Adam(), loss='binary_crossentropy')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Train\n",
-    "20' on a CPU  \n",
-    "1'12 on a GPU (V100, IDRIS)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Callback : Images\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/images')\n",
-    "filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'\n",
-    "callback_images = ImagesCallback(filename, x=clean_test[:5], encoder=encoder,decoder=decoder)\n",
-    "\n",
-    "# ---- Callback : Best model\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/models')\n",
-    "filename = run_dir + '/models/best_model.h5'\n",
-    "callback_bestmodel = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=0, save_best_only=True)\n",
-    "\n",
-    "# ---- Callback tensorboard\n",
-    "#\n",
-    "logdir = run_dir + '/logs'\n",
-    "callback_tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1)\n",
-    "\n",
-    "# callbacks_list = [callback_images, callback_bestmodel, callback_tensorboard]\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono = fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = ae.fit(noisy_train, clean_train,\n",
-    "                 batch_size      = batch_size,\n",
-    "                 epochs          = epochs,\n",
-    "                 verbose         = fit_verbosity,\n",
-    "                 validation_data = (noisy_test, clean_test),\n",
-    "                 callbacks       = callbacks_list  )\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={'loss':['loss','val_loss']}, save_as='01-history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Denoising progress"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "imgs=[]\n",
-    "for epoch in range(0,epochs,2):\n",
-    "    for i in range(5):\n",
-    "        filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'.format(epoch=epoch, i=i)\n",
-    "        img      = io.imread(filename)\n",
-    "        imgs.append(img)      \n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='02-original-real')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='03-original-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Evolution during the training period (denoised_test) :')\n",
-    "fidle.scrawler.images(imgs, None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, y_padding=0.1, save_as='04-learning')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`denoised_test` for denoised images at the output of the model\n",
-    " \n",
-    "### 7.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{run_dir}/models/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Let's make a prediction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "denoised_test = model.predict(noisy_test)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/03-AE-with-MNIST-post.ipynb b/AE.Keras2/03-AE-with-MNIST-post.ipynb
deleted file mode 100644
index 1c77a68..0000000
--- a/AE.Keras2/03-AE-with-MNIST-post.ipynb
+++ /dev/null
@@ -1,306 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE3] - Playing with our denoiser model\n",
-    "<!-- DESC --> Episode 2 : Using the previously trained autoencoder to denoise data, using Keras 2 and Tensorflow (obsolete)\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Retrieve and use our denoiser model\n",
-    "\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Reload our dataset and saved best model\n",
-    " - Encode/decode some test images (neved used, never seen by the model)\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `denoised_test` for denoised images at the output of the model\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import matplotlib.pyplot as plt\n",
-    "import sys\n",
-    "import h5py\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "\n",
-    "from modules.MNIST import MNIST\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE3')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "These **parameters must be identical** to those used during the training in order to have the **same dataset**.\\\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "saved_models     = './run/AE2/models'\n",
-    "dataset_seed     = 123\n",
-    "scale            = 1\n",
-    "train_prop       = .8"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'train_prop')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)  \n",
-    "**Important :** Make sure that the **digest is identical** to the one used during the training !\\\n",
-    "See : [AE2 / Step 2 - Retrieve dataset](./02-AE-with-MNIST.ipynb#Step-2---Retrieve-dataset)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, _,_ = MNIST.reload_prepared_dataset(scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename=prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`denoised_test` for denoised images at the output of the model\n",
-    " \n",
-    "### 3.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{saved_models}/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 3.2 - Let's make a prediction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from tabnanny import verbose\n",
-    "\n",
-    "\n",
-    "denoised_test = model.predict(noisy_test,verbose=0)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 3.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Looking at the latent space\n",
-    "### 4.1 - Getting clean data and class"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_data,_, _,_, class_data,_ = MNIST.reload_prepared_dataset(scale      = 1, \n",
-    "                                                                train_prop = 1,\n",
-    "                                                                seed       = dataset_seed,\n",
-    "                                                                shuffle    = False,\n",
-    "                                                                filename   = prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 4.2 - Retrieve encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "encoder=model.get_layer('encoder')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 4.3 Showing latent space\n",
-    "Here is the digit distribution in the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = 20000\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(clean_data, class_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z = encoder.predict(x_show)\n",
-    "\n",
-    "# ---- Show them\n",
-    "\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('08-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/04-ExtAE-with-MNIST.ipynb b/AE.Keras2/04-ExtAE-with-MNIST.ipynb
deleted file mode 100644
index ed99c0c..0000000
--- a/AE.Keras2/04-ExtAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,533 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE4] - Denoiser and classifier model\n",
-    "<!-- DESC --> Episode 4 : Construction of a denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Building a multiple output model, able to **denoise** and **classify**\n",
-    " - Understanding a more **advanced programming model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST.  \n",
-    "The use of a GPU is often indispensable.\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a multiple output model using Keras procedural programing model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Follow the learning process\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `class_train`, `class_test` for the classes to which the images belong \n",
-    "- `denoised_test` for denoised images at the output of the model\n",
-    "- `classcat_test` for class prediction in model output (is a softmax)\n",
-    "- `classid_test` class prediction (ie: argmax of classcat_test)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from skimage import io\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n",
-    "\n",
-    "import os,sys\n",
-    "from importlib import reload\n",
-    "import h5py\n",
-    "\n",
-    "from modules.MNIST          import MNIST\n",
-    "from modules.ImagesCallback import ImagesCallback\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE4')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset. 'None' mean using /dev/urandom  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`latent_dim` : Dimension of the latent space  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)\n",
-    "`batch_size` : Batch size  \n",
-    "`epochs` : Nb of epochs for training\\\n",
-    "`fit_verbosity` is the verbosity during training : 0 = silent, 1 = progress bar, 2 = one line per epoch\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "dataset_seed     = None\n",
-    "\n",
-    "scale            = .1\n",
-    "\n",
-    "latent_dim       = 10\n",
-    "\n",
-    "train_prop       = .8\n",
-    "batch_size       = 128\n",
-    "epochs           = 30\n",
-    "fit_verbosity    = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'latent_dim')\n",
-    "fidle.override('train_prop', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, class_train,class_test = MNIST.reload_prepared_dataset(\n",
-    "                                                                                    scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename   = prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Build models"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, activation=\"relu\", strides=2, padding=\"same\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "z         = layers.Dense(latent_dim)(x)\n",
-    "\n",
-    "encoder = keras.Model(inputs, z, name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1, 3, activation=\"sigmoid\", padding=\"same\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "latents   = encoder(inputs)\n",
-    "outputs   = decoder(latents)\n",
-    "\n",
-    "ae = keras.Model(inputs,outputs, name='ae')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### CNN"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "hidden1     = 100\n",
-    "hidden2     = 100\n",
-    "\n",
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "x         = keras.layers.Conv2D(8, (3,3),  activation='relu')(inputs)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Conv2D(16, (3,3), activation='relu')(x)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Flatten()(x)\n",
-    "x         = keras.layers.Dense(100, activation='relu')(x)\n",
-    "x         = keras.layers.Dropout(0.5)(x)\n",
-    "\n",
-    "outputs   = keras.layers.Dense(10, activation='softmax')(x)\n",
-    "\n",
-    "cnn       = keras.Model(inputs, outputs, name='cnn')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Final model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "denoised = ae(inputs)\n",
-    "classcat = cnn(inputs)\n",
-    "\n",
-    "model = keras.Model(inputs, [denoised, classcat])\n",
-    "\n",
-    "model.compile(optimizer='rmsprop', \n",
-    "              loss={'ae':'binary_crossentropy', 'cnn':'sparse_categorical_crossentropy'},\n",
-    "              loss_weights=[1,1],\n",
-    "              metrics={'cnn':'accuracy'} )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Train\n",
-    "20' on a CPU  \n",
-    "1'12 on a GPU (V100, IDRIS)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Callback : Images\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/images')\n",
-    "filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'\n",
-    "callback_images = ImagesCallback(filename, x=clean_test[:5], encoder=encoder,decoder=decoder)\n",
-    "\n",
-    "# ---- Callback : Best model\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/models')\n",
-    "filename = run_dir + '/models/best_model.h5'\n",
-    "callback_bestmodel = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=0, save_best_only=True)\n",
-    "\n",
-    "# ---- Callback tensorboard\n",
-    "#\n",
-    "logdir = run_dir + '/logs'\n",
-    "callback_tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1)\n",
-    "\n",
-    "# callbacks_list = [callback_images, callback_bestmodel, callback_tensorboard]\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono = fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = model.fit(noisy_train, [clean_train, class_train],\n",
-    "                 batch_size      = batch_size,\n",
-    "                 epochs          = epochs,\n",
-    "                 verbose         = fit_verbosity,\n",
-    "                 validation_data = (noisy_test, [clean_test, class_test]),\n",
-    "                 callbacks       = callbacks_list  )\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={'Loss':['loss', 'ae_loss', 'cnn_loss'],\n",
-    "                                 'Validation loss':['val_loss','val_ae_loss', 'val_cnn_loss'], \n",
-    "                                 'Accuracy':['cnn_accuracy','val_cnn_accuracy']}, save_as='01-history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Denoising progress"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "imgs=[]\n",
-    "for epoch in range(0,epochs,4):\n",
-    "    for i in range(5):\n",
-    "        filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'.format(epoch=epoch, i=i)\n",
-    "        img      = io.imread(filename)\n",
-    "        imgs.append(img)      \n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='02-original-real')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='03-original-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Evolution during the training period (denoised_test) :')\n",
-    "fidle.scrawler.images(imgs, None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, y_padding=0.1, save_as='04-learning')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`class_train`, `class_test` for the classes to which the images belong \\\n",
-    "`denoised_test` for denoised images at the output of the model\\\n",
-    "`classcat_test` for class prediction in model output (is a softmax)\\\n",
-    "`classid_test` class prediction (ie: argmax of classcat_test)\n",
-    " \n",
-    "### 7.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{run_dir}/models/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Let's make a prediction\n",
-    "Note that our model will returns 2 outputs : **denoised images** from output 1 and **class prediction** from output 2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "denoised_test, classcat_test = model.predict(noisy_test, verbose=0)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)\n",
-    "print('Predicted classes (classcat_test) shape : ',classcat_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Class prediction\n",
-    "Note: The evaluation requires the noisy images as input (noisy_test) and the 2 expected outputs:\n",
-    " - the images without noise (clean_test)\n",
-    " - the classes (class_test)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "score = model.evaluate(noisy_test, [clean_test, class_test], verbose=0)\n",
-    "\n",
-    "fidle.utils.subtitle(\"Accuracy :\")\n",
-    "print(f'Classification accuracy : {score[3]:4.4f}')\n",
-    "\n",
-    "fidle.utils.subtitle(\"Few examples :\")\n",
-    "classid_test  = np.argmax(classcat_test, axis=-1)\n",
-    "fidle.scrawler.images(noisy_test, class_test, range(0,200), columns=12, x_size=1, y_size=1, y_pred=classid_test, save_as='04-predictions')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/05-ExtAE-with-MNIST.ipynb b/AE.Keras2/05-ExtAE-with-MNIST.ipynb
deleted file mode 100644
index 21eccfb..0000000
--- a/AE.Keras2/05-ExtAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,565 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE5] - Advanced denoiser and classifier model\n",
-    "<!-- DESC --> Episode 5 : Construction of an advanced denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Building a multiple output model, able to **denoise** and **classify**\n",
-    " - Understanding a more complex **advanced programming model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST.  \n",
-    "The use of a GPU is often indispensable.\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a multiple output model using Keras procedural programing model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Follow the learning process\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `class_train`, `class_test` for the classes to which the images belong \n",
-    "- `denoised_test` for denoised images at the output of the model\n",
-    "- `classcat_test` for class prediction in model output (is a softmax)\n",
-    "- `classid_test` class prediction (ie: argmax of classcat_test)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from skimage import io\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n",
-    "\n",
-    "import os,sys\n",
-    "from importlib import reload\n",
-    "import h5py\n",
-    "\n",
-    "from modules.MNIST          import MNIST\n",
-    "from modules.ImagesCallback import ImagesCallback\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`latent_dim` : Dimension of the latent space  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)\n",
-    "`batch_size` : Batch size  \n",
-    "`epochs` : Nb of epochs for training\\\n",
-    "`fit_verbosity` is the verbosity during training : 0 = silent, 1 = progress bar, 2 = one line per epoch\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "dataset_seed     = None\n",
-    "\n",
-    "scale            = .1\n",
-    "\n",
-    "latent_dim       = 10\n",
-    "\n",
-    "train_prop       = .8\n",
-    "batch_size       = 128\n",
-    "epochs           = 20\n",
-    "fit_verbosity    = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'latent_dim')\n",
-    "fidle.override('train_prop', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, class_train,class_test = MNIST.reload_prepared_dataset(\n",
-    "                                                                                    scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename   = prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Build model"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, activation=\"relu\", strides=2, padding=\"same\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "z         = layers.Dense(latent_dim)(x)\n",
-    "\n",
-    "encoder = keras.Model(inputs, z, name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1, 3, activation=\"sigmoid\", padding=\"same\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "latents   = encoder(inputs)\n",
-    "outputs   = decoder(latents)\n",
-    "\n",
-    "ae = keras.Model(inputs,outputs, name='ae')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### CNN1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "x         = keras.layers.Conv2D(8, (3,3),  activation='relu')(inputs)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Conv2D(16, (3,3), activation='relu')(x)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Flatten()(x)\n",
-    "x         = keras.layers.Dense(100, activation='relu')(x)\n",
-    "outputs   = keras.layers.Dropout(0.5)(x)\n",
-    "\n",
-    "cnn1       = keras.Model(inputs, outputs, name='cnn1')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### CNN2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "x         = keras.layers.Conv2D(32, (5,5),  activation='relu')(inputs)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.3)(x)\n",
-    "\n",
-    "x         = keras.layers.Conv2D(64, (5,5), activation='relu')(x)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.3)(x)\n",
-    "\n",
-    "x         = keras.layers.Flatten()(x)\n",
-    "x         = keras.layers.Dense(50, activation='relu')(x)\n",
-    "outputs   = keras.layers.Dropout(0.3)(x)\n",
-    "\n",
-    "cnn2       = keras.Model(inputs, outputs, name='cnn2')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Final model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "denoised = ae(inputs)\n",
-    "\n",
-    "branch_1 = cnn1(inputs)\n",
-    "branch_2 = cnn2(inputs)\n",
-    "\n",
-    "x        = keras.layers.concatenate([branch_1,branch_2], axis=1)\n",
-    "\n",
-    "classcat = keras.layers.Dense(10, activation='softmax', name='cnn')(x)\n",
-    "\n",
-    "\n",
-    "model = keras.Model(inputs, [denoised, classcat])\n",
-    "\n",
-    "model.compile(optimizer='rmsprop', \n",
-    "              loss={'ae':'binary_crossentropy', 'cnn':'sparse_categorical_crossentropy'},\n",
-    "              loss_weights=[1,1],\n",
-    "              metrics={'cnn':'accuracy'} )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Train\n",
-    "20' on a CPU  \n",
-    "1'30 on a GPU (V100, IDRIS)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Callback : Images\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/images')\n",
-    "filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'\n",
-    "callback_images = ImagesCallback(filename, x=clean_test[:5], encoder=encoder,decoder=decoder)\n",
-    "\n",
-    "# ---- Callback : Best model\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/models')\n",
-    "filename = run_dir + '/models/best_model.h5'\n",
-    "callback_bestmodel = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=0, save_best_only=True)\n",
-    "\n",
-    "# ---- Callback tensorboard\n",
-    "#\n",
-    "logdir = run_dir + '/logs'\n",
-    "callback_tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1)\n",
-    "\n",
-    "# callbacks_list = [callback_images, callback_bestmodel, callback_tensorboard]\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono = fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = model.fit(noisy_train, [clean_train, class_train],\n",
-    "                 batch_size      = batch_size,\n",
-    "                 epochs          = epochs,\n",
-    "                 verbose         = fit_verbosity,\n",
-    "                 validation_data = (noisy_test, [clean_test, class_test]),\n",
-    "                 callbacks       = callbacks_list  )\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={'Loss':['loss', 'ae_loss', 'cnn_loss'],\n",
-    "                                 'Validation loss':['val_loss','val_ae_loss', 'val_cnn_loss'], \n",
-    "                                 'Accuracy':['cnn_accuracy','val_cnn_accuracy']}, save_as='01-history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Denoising progress"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "imgs=[]\n",
-    "for epoch in range(0,epochs,4):\n",
-    "    for i in range(5):\n",
-    "        filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'.format(epoch=epoch, i=i)\n",
-    "        img      = io.imread(filename)\n",
-    "        imgs.append(img)      \n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='02-original-real')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='03-original-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Evolution during the training period (denoised_test) :')\n",
-    "fidle.scrawler.images(imgs, None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, y_padding=0.1, save_as='04-learning')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`class_train`, `class_test` for the classes to which the images belong \\\n",
-    "`denoised_test` for denoised images at the output of the model\\\n",
-    "`classcat_test` for class prediction in model output (is a softmax)\\\n",
-    "`classid_test` class prediction (ie: argmax of classcat_test)\n",
-    " \n",
-    "### 7.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{run_dir}/models/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Let's make a prediction\n",
-    "Note that our model will returns 2 outputs : **denoised images** from output 1 and **class prediction** from output 2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "denoised_test, classcat_test = model.predict(noisy_test, verbose=0)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)\n",
-    "print('Predicted classes (classcat_test) shape : ',classcat_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Class prediction\n",
-    "Note: The evaluation requires the noisy images as input (noisy_test) and the 2 expected outputs:\n",
-    " - the images without noise (clean_test)\n",
-    " - the classes (class_test)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "score = model.evaluate(noisy_test, [clean_test, class_test], verbose=0)\n",
-    "\n",
-    "fidle.utils.subtitle(\"Accuracy :\")\n",
-    "print(f'Classification accuracy : {score[3]:4.4f}')\n",
-    "\n",
-    "fidle.utils.subtitle(\"Few examples :\")\n",
-    "classid_test  = np.argmax(classcat_test, axis=-1)\n",
-    "fidle.scrawler.images(noisy_test, class_test, range(0,200), columns=12, x_size=1, y_size=1, y_pred=classid_test, save_as='04-predictions')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/modules/ImagesCallback.py b/AE.Keras2/modules/ImagesCallback.py
deleted file mode 100644
index 5962bb9..0000000
--- a/AE.Keras2/modules/ImagesCallback.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from tensorflow.keras.callbacks import Callback
-import numpy as np
-import matplotlib.pyplot as plt
-
-class ImagesCallback(Callback):
-    
-   
-    def __init__(self, filename='image-{epoch:03d}-{i:02d}.jpg', 
-                       x=None,
-                       encoder=None, decoder=None):
-        self.filename  = filename
-        self.x         = x
-        self.encoder   = encoder
-        self.decoder   = decoder
-        if len(x)>100:
-            print('***Warning : The number of images is reduced to 100')
-            self.x=x[:100]
-        
-    def on_epoch_end(self, epoch, logs={}):  
-        
-        # ---- Get latent points
-        #
-        z_new  = self.encoder.predict(self.x)
-        
-        # ---- Predict an image
-        #
-        images = self.decoder.predict(np.array(z_new))
-        
-        # ---- Save images
-        #
-        for i,image in enumerate(images):
-            
-            # ---- Squeeze it if monochrome : (lx,ly,1) -> (lx,ly) 
-            #
-            image = image.squeeze()
-        
-            # ---- Save it
-            #
-            filename = self.filename.format(epoch=epoch,i=i)
-            
-            if len(image.shape) == 2:
-                plt.imsave(filename, image, cmap='gray_r')
-            else:
-                plt.imsave(filename, image)
diff --git a/AE.Keras2/modules/MNIST.py b/AE.Keras2/modules/MNIST.py
deleted file mode 100644
index 6e040a6..0000000
--- a/AE.Keras2/modules/MNIST.py
+++ /dev/null
@@ -1,178 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-import h5py
-import os
-import numpy as np
-from hashlib import blake2b
-import tensorflow as tf
-import tensorflow.keras.datasets.mnist as mnist
-
-
-# ------------------------------------------------------------------
-#   A usefull class to manage our MNIST dataset
-#   This class allows to manage datasets derived from the original MNIST
-# ------------------------------------------------------------------
-
-
-class MNIST():
-    
-    version = '0.1'
-    
-    def __init__(self):
-        pass
-   
-    @classmethod
-    def get_origine(cls, scale=1, normalize=True, expand=True, concatenate=True):
-        """
-        Return original MNIST dataset
-        args:
-            scale       : Proportion of the requested dataset
-            normalize   : Normalize dataset or not (True)
-            expand      : Reshape images as (28,28,1) instead (28,28) (True)
-            concatenate : Concatenate train and test sets (True)
-        returns:
-            x_data,y_data                   if concatenate is False
-            x_train,y_train,x_test,y_test   if concatenate is True
-        """
-
-        # ---- Get data
-        #
-        (x_train, y_train), (x_test, y_test) = mnist.load_data()
-        print('Dataset loaded.')
-        
-        # ---- Normalization
-        #
-        if normalize:
-            x_train = x_train.astype('float32') / 255.
-            x_test  = x_test.astype( 'float32') / 255.
-            print('Normalized.')
-            
-        # ---- Reshape : (28,28) -> (28,28,1)
-        #
-        if expand:
-            x_train = np.expand_dims(x_train, axis=-1)
-            x_test  = np.expand_dims(x_test,  axis=-1)
-            print('Reshaped.')
-
-        # ---- scale
-        #
-        n1 = int(len(x_train)*scale)
-        n2 = int(len(x_test)*scale)
-        x_train = x_train[:n1]
-        y_train = y_train[:n1]
-        x_test  = x_test[:n2]
-        y_test  = y_test[:n2]
-
-        # ---- Concatenate
-        #
-        if concatenate:
-            x_data = np.concatenate([x_train, x_test], axis=0)
-            y_data = np.concatenate([y_train, y_test])
-            print('Concatenate.')
-            print('x shape :', x_data.shape)
-            print('y shape :', y_data.shape)
-            return x_data,y_data
-        else:
-            print('x_train shape :', x_train.shape)
-            print('y_train shape :', y_train.shape)
-            print('x_test  shape :', x_test.shape)
-            print('y_test  shape :', y_test.shape)
-            return x_train,y_train,x_test,y_test
-        
-        
-    @classmethod
-    def save_prepared_dataset(cls, clean_data, noisy_data, class_data, filename='./data/mnist-noisy.h5'):
-        """
-        Save a prepared dataset in a h5 file
-        args:
-            clean_data, noisy_data, class_data : clean, noisy and class dataset
-            filename                      : filename
-        return:
-            None
-        """
-        path=os.path.dirname(filename)
-        os.makedirs(path, mode=0o750, exist_ok=True)
-
-        with h5py.File(filename, "w") as f:
-            f.create_dataset("clean_data", data=clean_data)
-            f.create_dataset("noisy_data", data=noisy_data)
-            f.create_dataset("class_data", data=class_data)
-        print('Saved.')
-        print('clean_data shape is : ',clean_data.shape)
-        print('noisy_data shape is : ',noisy_data.shape)
-        print('class_data shape is : ',class_data.shape)
-            
-            
-    @classmethod    
-    def reload_prepared_dataset(cls, scale=1., train_prop=0.8, shuffle=True, seed=False, filename='./data/mnist-noisy.h5'):
-        """
-        Reload a saved dataset
-        args:
-            scale      : Scale of dataset to use. 1. mean 100% (1.)
-            train_prop : Ratio of train/test
-            shuffle    : Shuffle data if True
-            seed       : Random seed value. False mean no seed, None mean using /dev/urandom (None)
-            filename   : filename of the prepared dataset
-        returns:
-            clean_train,clean_test, noisy_train,noisy_test, class_train,class_test
-        """
-        # ---- Load saved dataset
-        #
-        with  h5py.File(filename,'r') as f:
-            clean_data  = f['clean_data'][:]
-            noisy_data  = f['noisy_data'][:]
-            class_data  = f['class_data'][:]
-        print('Loaded.')
-        
-        # ---- Rescale
-        #
-        n = int(scale*len(clean_data))
-        clean_data, noisy_data, class_data = clean_data[:n], noisy_data[:n], class_data[:n]
-        print(f'rescaled ({scale}).') 
-        
-        # ---- Seed
-        #
-        if seed is not False:
-            np.random.seed(seed)
-            print(f'Seeded ({seed})')
-        
-        # ---- Shuffle
-        #
-        if shuffle:
-            p = np.random.permutation(len(clean_data))
-            clean_data, noisy_data, class_data = clean_data[p], noisy_data[p], class_data[p]
-            print('Shuffled.')
-        
-        # ---- Split
-        #
-        n=int(len(clean_data)*train_prop)
-        clean_train, clean_test = clean_data[:n], clean_data[n:]
-        noisy_train, noisy_test = noisy_data[:n], noisy_data[n:]
-        class_train, class_test = class_data[:n], class_data[n:]
-        print(f'splited ({train_prop}).') 
-
-        # ---- Hash
-        #
-        h = blake2b(digest_size=10)
-        for a in [clean_train,clean_test, noisy_train,noisy_test, class_train,class_test]:
-            h.update(a)
-        
-        print('clean_train shape is : ', clean_train.shape)
-        print('clean_test  shape is : ', clean_test.shape)
-        print('noisy_train shape is : ', noisy_train.shape)
-        print('noisy_test  shape is : ', noisy_test.shape)
-        print('class_train shape is : ', class_train.shape)
-        print('class_test  shape is : ', class_test.shape)
-        print('Blake2b digest is    : ', h.hexdigest())
-        return  clean_train,clean_test, noisy_train,noisy_test, class_train,class_test
\ No newline at end of file
diff --git a/DCGAN.Lightning/01-DCGAN-PL.ipynb b/DCGAN.Lightning/01-DCGAN-PL.ipynb
deleted file mode 100644
index b0153ec..0000000
--- a/DCGAN.Lightning/01-DCGAN-PL.ipynb
+++ /dev/null
@@ -1,462 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LSHEEP3] - A DCGAN to Draw a Sheep, using Pytorch Lightning\n",
-    "<!-- DESC --> \"Draw me a sheep\", revisited with a DCGAN, using Pytorch Lightning\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Build and train a DCGAN model with the Quick Draw dataset\n",
-    " - Understanding DCGAN\n",
-    "\n",
-    "The [Quick draw dataset](https://quickdraw.withgoogle.com/data) contains about 50.000.000 drawings, made by real people...  \n",
-    "We are using a subset of 117.555 of Sheep drawings  \n",
-    "To get the dataset : [https://github.com/googlecreativelab/quickdraw-dataset](https://github.com/googlecreativelab/quickdraw-dataset)  \n",
-    "Datasets in numpy bitmap file : [https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap](https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap)   \n",
-    "Sheep dataset : [https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/sheep.npy](https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/sheep.npy) (94.3 Mo)\n",
-    "\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Have a look to the dataset\n",
-    " - Defining a GAN model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look of the results"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init and parameters\n",
-    "#### Python init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import sys\n",
-    "import shutil\n",
-    "\n",
-    "import numpy as np\n",
-    "import torch\n",
-    "import torch.nn as nn\n",
-    "import torch.nn.functional as F\n",
-    "import torchvision\n",
-    "import torchvision.transforms as transforms\n",
-    "from lightning import LightningDataModule, LightningModule, Trainer\n",
-    "from lightning.pytorch.callbacks.progress.tqdm_progress import TQDMProgressBar\n",
-    "from lightning.pytorch.callbacks.progress.base          import ProgressBarBase\n",
-    "from lightning.pytorch.callbacks                        import ModelCheckpoint\n",
-    "from lightning.pytorch.loggers.tensorboard              import TensorBoardLogger\n",
-    "\n",
-    "from tqdm import tqdm\n",
-    "from torch.utils.data import DataLoader\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "from modules.SmartProgressBar    import SmartProgressBar\n",
-    "from modules.QuickDrawDataModule import QuickDrawDataModule\n",
-    "\n",
-    "from modules.GAN                 import GAN\n",
-    "from modules.WGANGP              import WGANGP\n",
-    "from modules.Generators          import *\n",
-    "from modules.Discriminators      import *\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LSHEEP3')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Few parameters"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "latent_dim          = 128\n",
-    "\n",
-    "gan_class           = 'WGANGP'\n",
-    "generator_class     = 'Generator_2'\n",
-    "discriminator_class = 'Discriminator_3'    \n",
-    "    \n",
-    "scale               = 0.001\n",
-    "epochs              = 3\n",
-    "lr                  = 0.0001\n",
-    "b1                  = 0.5\n",
-    "b2                  = 0.999\n",
-    "batch_size          = 32\n",
-    "num_img             = 48\n",
-    "fit_verbosity       = 2\n",
-    "    \n",
-    "dataset_file        = datasets_dir+'/QuickDraw/origine/sheep.npy' \n",
-    "data_shape          = (28,28,1)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Cleaning"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# You can comment these lines to keep each run...\n",
-    "shutil.rmtree(f'{run_dir}/figs', ignore_errors=True)\n",
-    "shutil.rmtree(f'{run_dir}/models', ignore_errors=True)\n",
-    "shutil.rmtree(f'{run_dir}/tb_logs', ignore_errors=True)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get some nice data"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Get a Nice DataModule\n",
-    "Our DataModule is defined in [./modules/QuickDrawDataModule.py](./modules/QuickDrawDataModule.py)   \n",
-    "This is a [LightningDataModule](https://pytorch-lightning.readthedocs.io/en/stable/data/datamodule.html)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "dm = QuickDrawDataModule(dataset_file, scale, batch_size, num_workers=8)\n",
-    "dm.setup()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Have a look"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dl         = dm.train_dataloader()\n",
-    "batch_data = next(iter(dl))\n",
-    "\n",
-    "fidle.scrawler.images( batch_data.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
-   ]
-  },
-  {
-   "attachments": {},
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Get a nice GAN model\n",
-    "\n",
-    "Our Generators are defined in [./modules/Generators.py](./modules/Generators.py)  \n",
-    "Our Discriminators are defined in [./modules/Discriminators.py](./modules/Discriminators.py)  \n",
-    "\n",
-    "\n",
-    "Our GAN is defined in [./modules/GAN.py](./modules/GAN.py)  \n",
-    "\n",
-    "#### Class loader"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def get_class(class_name):\n",
-    "    module=sys.modules['__main__']\n",
-    "    class_    = getattr(module, class_name)\n",
-    "    return class_\n",
-    "    \n",
-    "def get_instance(class_name, **args):\n",
-    "    module=sys.modules['__main__']\n",
-    "    class_    = getattr(module, class_name)\n",
-    "    instance_ = class_(**args)\n",
-    "    return instance_"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Basic test - Just to be sure it (could) works... ;-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "# ---- A little piece of black magic to instantiate a class from its name\n",
-    "#\n",
-    "def get_classByName(class_name, **args):\n",
-    "    module=sys.modules['__main__']\n",
-    "    class_    = getattr(module, class_name)\n",
-    "    instance_ = class_(**args)\n",
-    "    return instance_\n",
-    "\n",
-    "# ----Get it, and play with them\n",
-    "#\n",
-    "print('\\nInstantiation :\\n')\n",
-    "\n",
-    "Generator_     = get_class(generator_class)\n",
-    "Discriminator_ = get_class(discriminator_class)\n",
-    "\n",
-    "generator     = Generator_( latent_dim=latent_dim, data_shape=data_shape)\n",
-    "discriminator = Discriminator_( latent_dim=latent_dim, data_shape=data_shape)\n",
-    "\n",
-    "print('\\nFew tests :\\n')\n",
-    "z = torch.randn(batch_size, latent_dim)\n",
-    "print('z size        : ',z.size())\n",
-    "\n",
-    "fake_img = generator.forward(z)\n",
-    "print('fake_img      : ', fake_img.size())\n",
-    "\n",
-    "p = discriminator.forward(fake_img)\n",
-    "print('pred fake     : ', p.size())\n",
-    "\n",
-    "print('batch_data    : ',batch_data.size())\n",
-    "\n",
-    "p = discriminator.forward(batch_data)\n",
-    "print('pred real     : ', p.size())\n",
-    "\n",
-    "nimg = fake_img.detach().numpy()\n",
-    "fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "\n",
-    "print(fake_img.size())\n",
-    "print(batch_data.size())\n",
-    "e = torch.distributions.uniform.Uniform(0, 1).sample([32,1])\n",
-    "e = e[:None,None,None]\n",
-    "i = fake_img * e + (1-e)*batch_data\n",
-    "\n",
-    "\n",
-    "nimg = i.detach().numpy()\n",
-    "fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### GAN model\n",
-    "To simplify our code, the GAN class is defined separately in the module [./modules/GAN.py](./modules/GAN.py)  \n",
-    "Passing the classe names for generator/discriminator by parameter allows to stay modular and to use the PL checkpoints."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "GAN_           = get_class(gan_class)\n",
-    "\n",
-    "gan = GAN_( data_shape          = data_shape,\n",
-    "            lr                  = lr,\n",
-    "            b1                  = b1,\n",
-    "            b2                  = b2,\n",
-    "            batch_size          = batch_size, \n",
-    "            latent_dim          = latent_dim, \n",
-    "            generator_class     = generator_class, \n",
-    "            discriminator_class = discriminator_class)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train it !\n",
-    "#### Instantiate Callbacks, Logger & co.\n",
-    "More about :\n",
-    "- [Checkpoints](https://pytorch-lightning.readthedocs.io/en/stable/common/checkpointing_basic.html)\n",
-    "- [modelCheckpoint](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.callbacks.ModelCheckpoint.html#pytorch_lightning.callbacks.ModelCheckpoint)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "# ---- for tensorboard logs\n",
-    "#\n",
-    "logger       = TensorBoardLogger(       save_dir       = f'{run_dir}',\n",
-    "                                        name           = 'tb_logs'  )\n",
-    "\n",
-    "log_dir = os.path.abspath(f'{run_dir}/tb_logs')\n",
-    "print('To access the logs with tensorboard, use this command line :')\n",
-    "print(f'tensorboard --logdir {log_dir}')\n",
-    "\n",
-    "# ---- To save checkpoints\n",
-    "#\n",
-    "callback_checkpoints = ModelCheckpoint( dirpath        = f'{run_dir}/models', \n",
-    "                                        filename       = 'bestModel', \n",
-    "                                        save_top_k     = 1, \n",
-    "                                        save_last      = True,\n",
-    "                                        every_n_epochs = 1, \n",
-    "                                        monitor        = \"g_loss\")\n",
-    "\n",
-    "# ---- To have a nive progress bar\n",
-    "#\n",
-    "callback_progressBar = SmartProgressBar(verbosity=2)          # Usable evertywhere\n",
-    "# progress_bar = TQDMProgressBar(refresh_rate=1)              # Usable in real jupyter lab (bug in vscode)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Train it"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "trainer = Trainer(\n",
-    "    accelerator        = \"auto\",\n",
-    "    max_epochs         = epochs,\n",
-    "    callbacks          = [callback_progressBar, callback_checkpoints],\n",
-    "    log_every_n_steps  = batch_size,\n",
-    "    logger             = logger\n",
-    ")\n",
-    "\n",
-    "trainer.fit(gan, dm)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Reload our best model\n",
-    "Note : "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "gan = WGANGP.load_from_checkpoint('./run/SHEEP3/models/bestModel.ckpt')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "nb_images = 96\n",
-    "\n",
-    "z = torch.randn(nb_images, latent_dim)\n",
-    "print('z size        : ',z.size())\n",
-    "\n",
-    "fake_img = gan.generator.forward(z)\n",
-    "print('fake_img      : ', fake_img.size())\n",
-    "\n",
-    "nimg = fake_img.detach().numpy()\n",
-    "fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(nb_images), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "fidle-env",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/DCGAN.Lightning/modules/Discriminators.py b/DCGAN.Lightning/modules/Discriminators.py
deleted file mode 100644
index bdbaa79..0000000
--- a/DCGAN.Lightning/modules/Discriminators.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                         GAN / Generators
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-import numpy as np
-import torch.nn as nn
-
-class Discriminator_1(nn.Module):
-    '''
-    A basic DNN discriminator, usable with classic GAN
-    '''
-
-    def __init__(self, latent_dim=None, data_shape=None):
-    
-        super().__init__()
-        self.img_shape = data_shape
-        print('init discriminator 1     : ',data_shape,' to sigmoid')
-
-        self.model = nn.Sequential(
-
-            nn.Flatten(),
-            nn.Linear(int(np.prod(data_shape)), 512),
-            nn.ReLU(),
-            
-            nn.Linear(512, 256),
-            nn.ReLU(),
-
-            nn.Linear(256, 1),
-            nn.Sigmoid(),
-        )
-
-    def forward(self, img):
-        validity = self.model(img)
-
-        return validity
-
-
-
-
-class Discriminator_2(nn.Module):
-    '''
-    A more efficient discriminator,based on CNN, usable with classic GAN
-    '''
-
-    def __init__(self, latent_dim=None, data_shape=None):
-    
-        super().__init__()
-        self.img_shape = data_shape
-        print('init discriminator 2     : ',data_shape,' to sigmoid')
-
-        self.model = nn.Sequential(
-
-            nn.Conv2d(1, 32, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(32),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(32, 64, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(64),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(64, 128, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(128),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(128, 256, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(256),
-            nn.Dropout2d(0.25),
-
-            nn.Flatten(),
-            nn.Linear(12544, 1),
-            nn.Sigmoid(),
-        )
-
-    def forward(self, img):
-        img_nchw = img.permute(0, 3, 1, 2) # reformat from NHWC to NCHW
-        validity = self.model(img_nchw)
-
-        return validity
-
-
-        
-class Discriminator_3(nn.Module):
-    '''
-    A CNN discriminator, usable with a WGANGP.
-    This discriminator has no sigmoid and returns a critical and not a probability
-    '''
-
-    def __init__(self, latent_dim=None, data_shape=None):
-    
-        super().__init__()
-        self.img_shape = data_shape
-        print('init discriminator 2     : ',data_shape,' to sigmoid')
-
-        self.model = nn.Sequential(
-
-            nn.Conv2d(1, 32, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(32),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(32, 64, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(64),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(64, 128, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(128),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(128, 256, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(256),
-            nn.Dropout2d(0.25),
-
-            nn.Flatten(),
-            nn.Linear(12544, 1),
-        )
-
-    def forward(self, img):
-        img_nchw = img.permute(0, 3, 1, 2) # reformat from NHWC to NCHW
-        validity = self.model(img_nchw)
-
-        return validity
\ No newline at end of file
diff --git a/DCGAN.Lightning/modules/GAN.py b/DCGAN.Lightning/modules/GAN.py
deleted file mode 100644
index cf5a569..0000000
--- a/DCGAN.Lightning/modules/GAN.py
+++ /dev/null
@@ -1,182 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                GAN / GAN LigthningModule
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import sys
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torchvision
-from lightning import LightningModule
-
-
-class GAN(LightningModule):
-
-    # -------------------------------------------------------------------------
-    # Init
-    # -------------------------------------------------------------------------
-    #
-    def __init__(
-        self,
-        data_shape          = (None,None,None),
-        latent_dim          = None,
-        lr                  = 0.0002,
-        b1                  = 0.5,
-        b2                  = 0.999,
-        batch_size          = 64,
-        generator_class     = None,
-        discriminator_class = None,
-        **kwargs,
-    ):
-        super().__init__()
-
-        print('\n---- GAN initialization --------------------------------------------')
-
-        # ---- Hyperparameters
-        #
-        # Enable Lightning to store all the provided arguments under the self.hparams attribute.
-        # These hyperparameters will also be stored within the model checkpoint.
-        #
-        self.save_hyperparameters()
-
-        print('Hyperarameters are :')
-        for name,value in self.hparams.items():
-            print(f'{name:24s} : {value}')
-
-        # ---- Generator/Discriminator instantiation
-        #
-        # self.generator     = Generator(latent_dim=self.hparams.latent_dim, img_shape=data_shape)
-        # self.discriminator = Discriminator(img_shape=data_shape)
-
-        print('Submodels :')
-        module=sys.modules['__main__']
-        class_g = getattr(module, generator_class)
-        class_d = getattr(module, discriminator_class)
-        self.generator     = class_g( latent_dim=latent_dim, data_shape=data_shape)
-        self.discriminator = class_d( latent_dim=latent_dim, data_shape=data_shape)
-
-        # ---- Validation and example data
-        #
-        self.validation_z        = torch.randn(8, self.hparams.latent_dim)
-        self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
-
-
-    def forward(self, z):
-        return self.generator(z)
-
-
-    def adversarial_loss(self, y_hat, y):
-        return F.binary_cross_entropy(y_hat, y)
-
-
-    def training_step(self, batch, batch_idx, optimizer_idx):
-        imgs       = batch
-        batch_size = batch.size(0)
-
-        # ---- Get some latent space vectors
-        #      We use type_as() to make sure we initialize z on the right device (GPU/CPU).
-        #
-        z = torch.randn(batch_size, self.hparams.latent_dim)
-        z = z.type_as(imgs)
-
-        # ---- Train generator
-        #      Generator use optimizer #0
-        #      We try to generate false images that could mislead the discriminator
-        #
-        if optimizer_idx == 0:
-
-            # Generate fake images
-            self.fake_imgs = self.generator.forward(z)
-
-            # Assemble labels that say all images are real, yes it's a lie ;-)
-            # put on GPU because we created this tensor inside training_loop
-            misleading_labels = torch.ones(batch_size, 1)
-            misleading_labels = misleading_labels.type_as(imgs)
-
-            # Adversarial loss is binary cross-entropy
-            g_loss = self.adversarial_loss(self.discriminator.forward(self.fake_imgs), misleading_labels)
-            self.log("g_loss", g_loss, prog_bar=True)
-            return g_loss
-
-        # ---- Train discriminator
-        #      Discriminator use optimizer #1
-        #      We try to make the difference between fake images and real ones 
-        #
-        if optimizer_idx == 1:
-            
-            # These images are reals
-            real_labels = torch.ones(batch_size, 1)
-            real_labels = real_labels.type_as(imgs)
-            pred_labels = self.discriminator.forward(imgs)
-
-            real_loss   = self.adversarial_loss(pred_labels, real_labels)
-
-            # These images are fake
-            fake_imgs   = self.generator.forward(z)
-            fake_labels = torch.zeros(batch_size, 1)
-            fake_labels = fake_labels.type_as(imgs)
-
-            fake_loss   = self.adversarial_loss(self.discriminator(fake_imgs.detach()), fake_labels)
-
-            # Discriminator loss is the average
-            d_loss = (real_loss + fake_loss) / 2
-            self.log("d_loss", d_loss, prog_bar=True)
-            return d_loss
-
-
-    def configure_optimizers(self):
-
-        lr = self.hparams.lr
-        b1 = self.hparams.b1
-        b2 = self.hparams.b2
-
-        # With a GAN, we need 2 separate optimizer.
-        # opt_g to optimize the generator      #0
-        # opt_d to optimize the discriminator  #1
-        # opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr, betas=(b1, b2))
-        # opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2),)
-        opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr)
-        opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr)
-        return [opt_g, opt_d], []
-
-
-    def training_epoch_end(self, outputs):
-
-        # Get our validation latent vectors as z
-        # z = self.validation_z.type_as(self.generator.model[0].weight)
-
-        # ---- Log Graph
-        #
-        if(self.current_epoch==1):
-            sampleImg=torch.rand((1,28,28,1))
-            sampleImg=sampleImg.type_as(self.generator.model[0].weight)
-            self.logger.experiment.add_graph(self.discriminator,sampleImg)
-
-        # ---- Log d_loss/epoch
-        #
-        g_loss, d_loss = 0,0
-        for metrics in outputs:
-            g_loss+=float( metrics[0]['loss'] )
-            d_loss+=float( metrics[1]['loss'] )
-        g_loss, d_loss = g_loss/len(outputs), d_loss/len(outputs)
-        self.logger.experiment.add_scalar("g_loss/epochs",g_loss, self.current_epoch)
-        self.logger.experiment.add_scalar("d_loss/epochs",d_loss, self.current_epoch)
-
-        # ---- Log some of these images
-        #
-        z = torch.randn(self.hparams.batch_size, self.hparams.latent_dim)
-        z = z.type_as(self.generator.model[0].weight)
-        sample_imgs = self.generator(z)
-        sample_imgs = sample_imgs.permute(0, 3, 1, 2) # from NHWC to NCHW
-        grid = torchvision.utils.make_grid(tensor=sample_imgs, nrow=12, )
-        self.logger.experiment.add_image(f"Generated images", grid,self.current_epoch)
diff --git a/DCGAN.Lightning/modules/Generators.py b/DCGAN.Lightning/modules/Generators.py
deleted file mode 100644
index 9b104d5..0000000
--- a/DCGAN.Lightning/modules/Generators.py
+++ /dev/null
@@ -1,94 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                         GAN / Generators
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import numpy as np
-import torch.nn as nn
-
-
-class Generator_1(nn.Module):
-
-    def __init__(self, latent_dim=None, data_shape=None):
-        super().__init__()
-        self.latent_dim = latent_dim
-        self.img_shape  = data_shape
-        print('init generator 1         : ',latent_dim,' to ',data_shape)
-
-        self.model = nn.Sequential(
-            
-            nn.Linear(latent_dim, 128),
-            nn.ReLU(),
-
-            nn.Linear(128,256),
-            nn.BatchNorm1d(256, 0.8),
-            nn.ReLU(),
-
-            nn.Linear(256, 512),
-            nn.BatchNorm1d(512, 0.8),
-            nn.ReLU(),
-
-            nn.Linear(512, 1024),
-            nn.BatchNorm1d(1024, 0.8),
-            nn.ReLU(),
-
-            nn.Linear(1024, int(np.prod(data_shape))),
-            nn.Sigmoid()
-
-        )
-
-
-    def forward(self, z):
-        img = self.model(z)
-        img = img.view(img.size(0), *self.img_shape)
-        return img
-
-
-
-class Generator_2(nn.Module):
-
-    def __init__(self, latent_dim=None, data_shape=None):
-        super().__init__()
-        self.latent_dim = latent_dim
-        self.img_shape  = data_shape
-        print('init generator 2         : ',latent_dim,' to ',data_shape)
-
-        self.model = nn.Sequential(
-            
-            nn.Linear(latent_dim, 7*7*64),
-            nn.Unflatten(1, (64,7,7)),
-            
-            # nn.UpsamplingNearest2d( scale_factor=2 ),
-            nn.UpsamplingBilinear2d( scale_factor=2 ),
-            nn.Conv2d( 64,128, (3,3), stride=(1,1), padding=(1,1) ),
-            nn.ReLU(),
-            nn.BatchNorm2d(128),
-
-            # nn.UpsamplingNearest2d( scale_factor=2 ),
-            nn.UpsamplingBilinear2d( scale_factor=2 ),
-            nn.Conv2d( 128,256, (3,3), stride=(1,1), padding=(1,1)),
-            nn.ReLU(),
-            nn.BatchNorm2d(256),
-
-            nn.Conv2d( 256,1, (5,5), stride=(1,1), padding=(2,2)),
-            nn.Sigmoid()
-
-        )
-
-    def forward(self, z):
-        img_nchw = self.model(z)
-        img_nhwc = img_nchw.permute(0, 2, 3, 1) # reformat from NCHW to NHWC
-        # img = img.view(img.size(0), *self.img_shape) # reformat from NCHW to NHWC
-        return img_nhwc
-
-
-
diff --git a/DCGAN.Lightning/modules/QuickDrawDataModule.py b/DCGAN.Lightning/modules/QuickDrawDataModule.py
deleted file mode 100644
index 34a4ecf..0000000
--- a/DCGAN.Lightning/modules/QuickDrawDataModule.py
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                GAN / QuickDrawDataModule
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import numpy as np
-import torch
-from lightning import LightningDataModule
-from torch.utils.data import DataLoader
-
-
-class QuickDrawDataModule(LightningDataModule):
-
-
-    def __init__( self, dataset_file='./sheep.npy', scale=1., batch_size=64, num_workers=4 ):
-
-        super().__init__()
-
-        print('\n---- QuickDrawDataModule initialization ----------------------------')
-        print(f'with : scale={scale}  batch size={batch_size}')
-        
-        self.scale        = scale
-        self.dataset_file = dataset_file
-        self.batch_size   = batch_size
-        self.num_workers  = num_workers
-
-        self.dims         = (28, 28, 1)
-        self.num_classes  = 10
-
-
-
-    def prepare_data(self):
-        pass
-
-
-    def setup(self, stage=None):
-        print('\nDataModule Setup :')
-        # Load dataset
-        # Called at the beginning of each stage (train,val,test)
-        # Here, whatever the stage value, we'll have only one set.
-        data = np.load(self.dataset_file)
-        print('Original dataset shape : ',data.shape)
-
-        # Rescale
-        n=int(self.scale*len(data))
-        data = data[:n]
-        print('Rescaled dataset shape : ',data.shape)
-
-        # Normalize, reshape and shuffle
-        data = data/255
-        data = data.reshape(-1,28,28,1)
-        data = torch.from_numpy(data).float()
-        print('Final dataset shape    : ',data.shape)
-
-        print('Dataset loaded and ready.')
-        self.data_train = data
-
-
-    def train_dataloader(self):
-        # Note : Numpy ndarray is Dataset compliant
-        # Have map-style interface. See https://pytorch.org/docs/stable/data.html 
-        return DataLoader( self.data_train, batch_size=self.batch_size, num_workers=self.num_workers )
\ No newline at end of file
diff --git a/DCGAN.Lightning/modules/SmartProgressBar.py b/DCGAN.Lightning/modules/SmartProgressBar.py
deleted file mode 100644
index 3ebe192..0000000
--- a/DCGAN.Lightning/modules/SmartProgressBar.py
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                   GAN / SmartProgressBar
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-from lightning.pytorch.callbacks.progress.base import ProgressBarBase
-from tqdm import tqdm
-import sys
-
-class SmartProgressBar(ProgressBarBase):
-
-    def __init__(self, verbosity=2):
-        super().__init__()
-        self.verbosity = verbosity
-
-    def disable(self):
-        self.enable = False
-
-
-    def setup(self, trainer, pl_module, stage):
-        super().setup(trainer, pl_module, stage)
-        self.stage = stage
-
-
-    def on_train_epoch_start(self, trainer, pl_module):
-        super().on_train_epoch_start(trainer, pl_module)
-        if not self.enable : return
-
-        if self.verbosity==2:
-            self.progress=tqdm( total=trainer.num_training_batches,
-                                desc=f'{self.stage} {trainer.current_epoch+1}/{trainer.max_epochs}', 
-                                ncols=100, ascii= " >", 
-                                bar_format='{l_bar}{bar}| [{elapsed}] {postfix}')
-
-
-
-    def on_train_epoch_end(self, trainer, pl_module):
-        super().on_train_epoch_end(trainer, pl_module)
-
-        if not self.enable : return
-
-        if self.verbosity==2:
-            self.progress.close()
-
-        if self.verbosity==1:
-            print(f'Train {trainer.current_epoch+1}/{trainer.max_epochs} Done.')
-
-
-    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
-        super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx)
-
-        if not self.enable : return
-        
-        if self.verbosity==2:
-            metrics = {}
-            for name,value in trainer.logged_metrics.items():
-                metrics[name]=f'{float( trainer.logged_metrics[name] ):3.3f}'
-            self.progress.set_postfix(metrics)
-            self.progress.update(1)
-
-
-progress_bar = SmartProgressBar(verbosity=2)
diff --git a/DCGAN.Lightning/modules/WGANGP.py b/DCGAN.Lightning/modules/WGANGP.py
deleted file mode 100644
index 030740b..0000000
--- a/DCGAN.Lightning/modules/WGANGP.py
+++ /dev/null
@@ -1,229 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                GAN / GAN LigthningModule
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import sys
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torchvision
-from lightning import LightningModule
-
-
-class WGANGP(LightningModule):
-
-    # -------------------------------------------------------------------------
-    # Init
-    # -------------------------------------------------------------------------
-    #
-    def __init__(
-        self,
-        data_shape          = (None,None,None),
-        latent_dim          = None,
-        lr                  = 0.0002,
-        b1                  = 0.5,
-        b2                  = 0.999,
-        batch_size          = 64,
-        lambda_gp           = 10,
-        generator_class     = None,
-        discriminator_class = None,
-        **kwargs,
-    ):
-        super().__init__()
-
-        print('\n---- WGANGP initialization -----------------------------------------')
-
-        # ---- Hyperparameters
-        #
-        # Enable Lightning to store all the provided arguments under the self.hparams attribute.
-        # These hyperparameters will also be stored within the model checkpoint.
-        #
-        self.save_hyperparameters()
-
-        print('Hyperarameters are :')
-        for name,value in self.hparams.items():
-            print(f'{name:24s} : {value}')
-
-        # ---- Generator/Discriminator instantiation
-        #
-        # self.generator     = Generator(latent_dim=self.hparams.latent_dim, img_shape=data_shape)
-        # self.discriminator = Discriminator(img_shape=data_shape)
-
-        print('Submodels :')
-        module=sys.modules['__main__']
-        class_g = getattr(module, generator_class)
-        class_d = getattr(module, discriminator_class)
-        self.generator     = class_g( latent_dim=latent_dim, data_shape=data_shape)
-        self.discriminator = class_d( latent_dim=latent_dim, data_shape=data_shape)
-
-        # ---- Validation and example data
-        #
-        self.validation_z        = torch.randn(8, self.hparams.latent_dim)
-        self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
-
-
-    def forward(self, z):
-        return self.generator(z)
-
-
-    def adversarial_loss(self, y_hat, y):
-        return F.binary_cross_entropy(y_hat, y)
-
-
-
-# ------------------------------------------------------------------------------------ TO DO -------------------
-
-    # see : # from : https://github.com/rosshemsley/gander/blob/main/gander/models/gan.py
-
-    def gradient_penalty(self, real_images, fake_images):
-
-        batch_size = real_images.size(0)
-
-        # ---- Create interpolate images
-        #
-        # Get a random vector : size=([batch_size])
-        epsilon = torch.distributions.uniform.Uniform(0, 1).sample([batch_size])
-        # Add dimensions to match images batch : size=([batch_size,1,1,1])
-        epsilon = epsilon[:, None, None, None]
-        # Put epsilon a the right place
-        epsilon = epsilon.type_as(real_images)
-        # Do interpolation
-        interpolates = epsilon * fake_images + ((1 - epsilon) * real_images)
-
-        # ---- Use autograd to compute gradient
-        #
-        # The key to making this work is including `create_graph`, this means that the computations
-        # in this penalty will be added to the computation graph for the loss function, so that the
-        # second partial derivatives will be correctly computed.
-        #
-        interpolates.requires_grad = True
-
-        pred_labels = self.discriminator.forward(interpolates)
-
-        gradients = torch.autograd.grad(  inputs       = interpolates,
-                                          outputs      = pred_labels, 
-                                          grad_outputs = torch.ones_like(pred_labels),
-                                          create_graph = True, 
-                                          only_inputs  = True )[0]
-
-        grad_flat   = gradients.view(batch_size, -1)
-        grad_norm   = torch.linalg.norm(grad_flat, dim=1)
-
-        grad_penalty = (grad_norm - 1) ** 2 
-
-        return grad_penalty
-
-
-
-# ------------------------------------------------------------------------------------------------------------------
-
-
-    def training_step(self, batch, batch_idx, optimizer_idx):
-
-        real_imgs  = batch
-        batch_size = batch.size(0)
-        lambda_gp  = self.hparams.lambda_gp
-
-        # ---- Get some latent space vectors and fake images
-        #      We use type_as() to make sure we initialize z on the right device (GPU/CPU).
-        #
-        z = torch.randn(batch_size, self.hparams.latent_dim)
-        z = z.type_as(real_imgs)
-        
-        fake_imgs = self.generator.forward(z)
-
-        # ---- Train generator
-        #      Generator use optimizer #0
-        #      We try to generate false images that could have nive critics
-        #
-        if optimizer_idx == 0:
-
-            # Get critics
-            critics = self.discriminator.forward(fake_imgs)
-
-            # Loss
-            g_loss = -critics.mean()
-
-            # Log
-            self.log("g_loss", g_loss, prog_bar=True)
-
-            return g_loss
-
-        # ---- Train discriminator
-        #      Discriminator use optimizer #1
-        #      We try to make the difference between fake images and real ones 
-        #
-        if optimizer_idx == 1:
-            
-            # Get critics
-            critics_real = self.discriminator.forward(real_imgs)
-            critics_fake = self.discriminator.forward(fake_imgs)
-
-            # Get gradient penalty
-            grad_penalty = self.gradient_penalty(real_imgs, fake_imgs)
-
-            # Loss
-            d_loss = critics_fake.mean() - critics_real.mean() + lambda_gp*grad_penalty.mean()
-
-            # Log loss
-            self.log("d_loss", d_loss, prog_bar=True)
-
-            return d_loss
-
-
-    def configure_optimizers(self):
-
-        lr = self.hparams.lr
-        b1 = self.hparams.b1
-        b2 = self.hparams.b2
-
-        # With a GAN, we need 2 separate optimizer.
-        # opt_g to optimize the generator      #0
-        # opt_d to optimize the discriminator  #1
-        # opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr, betas=(b1, b2))
-        # opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2),)
-        opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr)
-        opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr)
-        return [opt_g, opt_d], []
-
-
-    def training_epoch_end(self, outputs):
-
-        # Get our validation latent vectors as z
-        # z = self.validation_z.type_as(self.generator.model[0].weight)
-
-        # ---- Log Graph
-        #
-        if(self.current_epoch==1):
-            sampleImg=torch.rand((1,28,28,1))
-            sampleImg=sampleImg.type_as(self.generator.model[0].weight)
-            self.logger.experiment.add_graph(self.discriminator,sampleImg)
-
-        # ---- Log d_loss/epoch
-        #
-        g_loss, d_loss = 0,0
-        for metrics in outputs:
-            g_loss+=float( metrics[0]['loss'] )
-            d_loss+=float( metrics[1]['loss'] )
-        g_loss, d_loss = g_loss/len(outputs), d_loss/len(outputs)
-        self.logger.experiment.add_scalar("g_loss/epochs",g_loss, self.current_epoch)
-        self.logger.experiment.add_scalar("d_loss/epochs",d_loss, self.current_epoch)
-
-        # ---- Log some of these images
-        #
-        z = torch.randn(self.hparams.batch_size, self.hparams.latent_dim)
-        z = z.type_as(self.generator.model[0].weight)
-        sample_imgs = self.generator(z)
-        sample_imgs = sample_imgs.permute(0, 3, 1, 2) # from NHWC to NCHW
-        grid = torchvision.utils.make_grid(tensor=sample_imgs, nrow=12, )
-        self.logger.experiment.add_image(f"Generated images", grid,self.current_epoch)
diff --git a/GTSRB.Keras3/04-Keras-cv.ipynb b/GTSRB.Keras3/04-Keras-cv.ipynb
index 6c88dc7..105d485 100644
--- a/GTSRB.Keras3/04-Keras-cv.ipynb
+++ b/GTSRB.Keras3/04-Keras-cv.ipynb
@@ -20,9 +20,7 @@
     " See : https://keras.io/guides/keras_cv/classification_with_keras_cv/  \n",
     " Imagenet classes can be found at : https://gist.githubusercontent.com/LukeWood/62eebcd5c5c4a4d0e0b7845780f76d55/raw/fde63e5e4c09e2fa0a3436680f436bdcb8325aac/ImagenetClassnames.json\n",
     "\n",
-    "## Step 1 - Import and init\n",
-    "\n",
-    "**ATTENTION :** A specific environment is required for this example (Which may require 6 GB).  \n",
+    "## ATTENTION : A specific environment is required for this example !\n",
     "This python environment required for this notebook is :\n",
     "```\n",
     "python3 -m venv fidle-kcv\n",
@@ -31,6 +29,8 @@
     "```\n",
     "Note: Tensorflow is not used for interference, and will no longer be required in later versions of Keras 3.\n",
     "\n",
+    "## Step 1 - Import and init\n",
+    "\n",
     "### 1.1 - Python stuffs"
    ]
   },
diff --git a/README.ipynb b/README.ipynb
index 7f16f1a..c51c15f 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -3,13 +3,13 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "id": "1f828036",
+   "id": "e3301300",
    "metadata": {
     "execution": {
-     "iopub.execute_input": "2024-01-21T16:21:09.860108Z",
-     "iopub.status.busy": "2024-01-21T16:21:09.859792Z",
-     "iopub.status.idle": "2024-01-21T16:21:09.870962Z",
-     "shell.execute_reply": "2024-01-21T16:21:09.870075Z"
+     "iopub.execute_input": "2024-01-23T09:53:31.520305Z",
+     "iopub.status.busy": "2024-01-23T09:53:31.519598Z",
+     "iopub.status.idle": "2024-01-23T09:53:31.530907Z",
+     "shell.execute_reply": "2024-01-23T09:53:31.530000Z"
     },
     "jupyter": {
      "source_hidden": true
@@ -45,35 +45,30 @@
        "- **[Fidle site](https://fidle.cnrs.fr)**\n",
        "- **[Presentation of the training](https://fidle.cnrs.fr/presentation)**\n",
        "- **[Detailed program](https://fidle.cnrs.fr/programme)**\n",
-       "- [Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !\n",
-       "- [Find us on youtube](https://fidle.cnrs.fr/youtube)\n",
-       "- [Corrected notebooks](https://fidle.cnrs.fr/done)\n",
+       "- **[Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !**\n",
+       "- **[Corrected notebooks](https://fidle.cnrs.fr/done)**\n",
+       "- **[Follow us on our channel :](https://fidle.cnrs.fr/youtube)**\\\n",
+       "[<img width=\"120px\" style=\"vertical-align:middle\" src=\"fidle/img/logo-YouTube.png\"></img>](https://fidle.cnrs.fr/youtube)\n",
        "\n",
        "For more information, you can contact us at :  \n",
        "[<img width=\"200px\" style=\"vertical-align:middle\" src=\"fidle/img/00-Mail_contact.svg\"></img>](#top)\n",
        "\n",
-       "Current Version : <!-- VERSION_BEGIN -->2.5.4<!-- VERSION_END -->\n",
+       "Current Version : <!-- VERSION_BEGIN -->3.0.1<!-- VERSION_END -->\n",
        "\n",
        "\n",
        "## Course materials\n",
        "\n",
        "| | | | |\n",
        "|:--:|:--:|:--:|:--:|\n",
-       "| **[<img width=\"50px\" src=\"fidle/img/00-Fidle-pdf.svg\"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width=\"50px\" src=\"fidle/img/00-Notebooks.svg\"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width=\"50px\" src=\"fidle/img/00-Datasets-tar.svg\"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width=\"50px\" src=\"fidle/img/00-Videos.svg\"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;<br>&nbsp;|\n",
+       "| **[<img width=\"50px\" src=\"fidle/img/00-Fidle-pdf.svg\"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width=\"50px\" src=\"fidle/img/00-Notebooks.svg\"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width=\"50px\" src=\"fidle/img/00-Datasets-tar.svg\"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width=\"50px\" src=\"fidle/img/00-Videos.svg\"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|\n",
        "\n",
        "Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.\n",
        "\n",
        "\n",
        "## Jupyter notebooks\n",
        "\n",
-       "**NOTE :** The examples marked **\"obsolete\"** are still functional under Keras2/Tensorflow, \n",
-       "but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  \n",
-       "We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  \n",
-       "For these reason, they are kept as examples, while we develop the Keras3/PyTorch versions.  \n",
-       "The world of Deep Learning is changing very fast !\n",
-       "\n",
        "<!-- TOC_BEGIN -->\n",
-       "<!-- Automatically generated on : 21/01/24 17:21:08 -->\n",
+       "<!-- Automatically generated on : 23/01/24 10:53:30 -->\n",
        "\n",
        "### Linear and logistic regression\n",
        "- **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  \n",
@@ -159,38 +154,6 @@
        "- **[TRANS2](Transformers.PyTorch/02-distilbert_colab.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/02-distilbert_colab.ipynb)  \n",
        "Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version\n",
        "\n",
-       "### Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)\n",
-       "- **[K2AE1](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)  \n",
-       "Episode 1: Preparation of a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE2](AE.Keras2/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras2/02-AE-with-MNIST.ipynb)  \n",
-       "Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE3](AE.Keras2/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras2/03-AE-with-MNIST-post.ipynb)  \n",
-       "Episode 2 : Using the previously trained autoencoder to denoise data, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE4](AE.Keras2/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras2/04-ExtAE-with-MNIST.ipynb)  \n",
-       "Episode 4 : Construction of a denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE5](AE.Keras2/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras2/05-ExtAE-with-MNIST.ipynb)  \n",
-       "Episode 5 : Construction of an advanced denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-       "\n",
-       "### Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)\n",
-       "- **[K2VAE1](VAE.Keras2/01-VAE-with-MNIST.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras2/01-VAE-with-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using functional APPI, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2VAE2](VAE.Keras2/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras2/02-VAE-with-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using model subclass, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2VAE3](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)  \n",
-       "Visualization and analysis of the VAE's latent space of the dataset MNIST, using Keras 2 and Tensorflow (obsolete)\n",
-       "\n",
-       "### Generative network with Variational Autoencoder (VAE), using PyTorch Lightning\n",
-       "- **[LVAE1](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)** - [First VAE, using Lightning API (MNIST dataset)](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using Lightning API, with a latent space of small dimension, using PyTorch Lightning\n",
-       "- **[LVAE2](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using model subclass, with a latent space of small dimension, using PyTorch Lightninh\n",
-       "- **[LVAE3](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)  \n",
-       "Visualization and analysis of the VAE's latent space of the dataset MNIST, using PyTorch Lightning\n",
-       "\n",
-       "### Generative Adversarial Networks (GANs), using Lightning\n",
-       "- **[LSHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  \n",
-       "\"Draw me a sheep\", revisited with a DCGAN, using Pytorch Lightning\n",
-       "\n",
        "### Diffusion Model (DDPM) using PyTorch\n",
        "- **[DDPM1](DDPM.PyTorch/01-ddpm.ipynb)** - [Fashion MNIST Generation with DDPM](DDPM.PyTorch/01-ddpm.ipynb)  \n",
        "Diffusion Model example, to generate Fashion MNIST images.\n",
@@ -222,12 +185,6 @@
        "A scratchbook for small examples\n",
        "<!-- TOC_END -->\n",
        "\n",
-       "**NOTE :** The examples marked **\"obsolete\"** are still functional under Keras2/Tensorflow, \n",
-       "but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  \n",
-       "We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  \n",
-       "For these resaon, they are kept as examples, while we develop the Keras3/PyTorch versions.  \n",
-       "The world of Deep Learning is changing very fast !\n",
-       "\n",
        "## Installation\n",
        "\n",
        "Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.\n",
@@ -256,7 +213,7 @@
     "from IPython.display import display,Markdown\n",
     "display(Markdown(open('README.md', 'r').read()))\n",
     "#\n",
-    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 21/01/24 17:21:08"
+    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 23/01/24 10:53:30"
    ]
   }
  ],
diff --git a/README.md b/README.md
index 9295f81..b3cd57a 100644
--- a/README.md
+++ b/README.md
@@ -24,35 +24,30 @@ For more information, see **https://fidle.cnrs.fr** :
 - **[Fidle site](https://fidle.cnrs.fr)**
 - **[Presentation of the training](https://fidle.cnrs.fr/presentation)**
 - **[Detailed program](https://fidle.cnrs.fr/programme)**
-- [Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !
-- [Find us on youtube](https://fidle.cnrs.fr/youtube)
-- [Corrected notebooks](https://fidle.cnrs.fr/done)
+- **[Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !**
+- **[Corrected notebooks](https://fidle.cnrs.fr/done)**
+- **[Follow us on our channel :](https://fidle.cnrs.fr/youtube)**\
+[<img width="120px" style="vertical-align:middle" src="fidle/img/logo-YouTube.png"></img>](https://fidle.cnrs.fr/youtube)
 
 For more information, you can contact us at :  
 [<img width="200px" style="vertical-align:middle" src="fidle/img/00-Mail_contact.svg"></img>](#top)
 
-Current Version : <!-- VERSION_BEGIN -->2.5.4<!-- VERSION_END -->
+Current Version : <!-- VERSION_BEGIN -->3.0.1<!-- VERSION_END -->
 
 
 ## Course materials
 
 | | | | |
 |:--:|:--:|:--:|:--:|
-| **[<img width="50px" src="fidle/img/00-Fidle-pdf.svg"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width="50px" src="fidle/img/00-Notebooks.svg"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width="50px" src="fidle/img/00-Datasets-tar.svg"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width="50px" src="fidle/img/00-Videos.svg"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;<br>&nbsp;|
+| **[<img width="50px" src="fidle/img/00-Fidle-pdf.svg"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width="50px" src="fidle/img/00-Notebooks.svg"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width="50px" src="fidle/img/00-Datasets-tar.svg"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width="50px" src="fidle/img/00-Videos.svg"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|
 
 Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.
 
 
 ## Jupyter notebooks
 
-**NOTE :** The examples marked **"obsolete"** are still functional under Keras2/Tensorflow, 
-but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  
-We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  
-For these reason, they are kept as examples, while we develop the Keras3/PyTorch versions.  
-The world of Deep Learning is changing very fast !
-
 <!-- TOC_BEGIN -->
-<!-- Automatically generated on : 21/01/24 17:21:08 -->
+<!-- Automatically generated on : 23/01/24 10:53:30 -->
 
 ### Linear and logistic regression
 - **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  
@@ -138,38 +133,6 @@ Using a Tranformer to perform a sentiment analysis (IMDB) - Jean Zay version
 - **[TRANS2](Transformers.PyTorch/02-distilbert_colab.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/02-distilbert_colab.ipynb)  
 Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version
 
-### Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)
-- **[K2AE1](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)  
-Episode 1: Preparation of a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE2](AE.Keras2/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras2/02-AE-with-MNIST.ipynb)  
-Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE3](AE.Keras2/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras2/03-AE-with-MNIST-post.ipynb)  
-Episode 2 : Using the previously trained autoencoder to denoise data, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE4](AE.Keras2/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras2/04-ExtAE-with-MNIST.ipynb)  
-Episode 4 : Construction of a denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE5](AE.Keras2/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras2/05-ExtAE-with-MNIST.ipynb)  
-Episode 5 : Construction of an advanced denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)
-
-### Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)
-- **[K2VAE1](VAE.Keras2/01-VAE-with-MNIST.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras2/01-VAE-with-MNIST.ipynb)  
-Construction and training of a VAE, using functional APPI, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)
-- **[K2VAE2](VAE.Keras2/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras2/02-VAE-with-MNIST.ipynb)  
-Construction and training of a VAE, using model subclass, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)
-- **[K2VAE3](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)  
-Visualization and analysis of the VAE's latent space of the dataset MNIST, using Keras 2 and Tensorflow (obsolete)
-
-### Generative network with Variational Autoencoder (VAE), using PyTorch Lightning
-- **[LVAE1](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)** - [First VAE, using Lightning API (MNIST dataset)](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)  
-Construction and training of a VAE, using Lightning API, with a latent space of small dimension, using PyTorch Lightning
-- **[LVAE2](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)  
-Construction and training of a VAE, using model subclass, with a latent space of small dimension, using PyTorch Lightninh
-- **[LVAE3](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)  
-Visualization and analysis of the VAE's latent space of the dataset MNIST, using PyTorch Lightning
-
-### Generative Adversarial Networks (GANs), using Lightning
-- **[LSHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  
-"Draw me a sheep", revisited with a DCGAN, using Pytorch Lightning
-
 ### Diffusion Model (DDPM) using PyTorch
 - **[DDPM1](DDPM.PyTorch/01-ddpm.ipynb)** - [Fashion MNIST Generation with DDPM](DDPM.PyTorch/01-ddpm.ipynb)  
 Diffusion Model example, to generate Fashion MNIST images.
@@ -201,12 +164,6 @@ PyTorch est l'un des principaux framework utilisé dans le Deep Learning
 A scratchbook for small examples
 <!-- TOC_END -->
 
-**NOTE :** The examples marked **"obsolete"** are still functional under Keras2/Tensorflow, 
-but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  
-We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  
-For these resaon, they are kept as examples, while we develop the Keras3/PyTorch versions.  
-The world of Deep Learning is changing very fast !
-
 ## Installation
 
 Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.
diff --git a/VAE.Keras2/01-VAE-with-MNIST.ipynb b/VAE.Keras2/01-VAE-with-MNIST.ipynb
deleted file mode 100644
index ebddf5a..0000000
--- a/VAE.Keras2/01-VAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,410 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2VAE1] - First VAE, using functional API (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using functional APPI, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding **Keras functional API**, using two custom layers\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n",
-    "\n",
-    "## Acknowledgements :\n",
-    "Thanks to **François Chollet** who is at the base of this example (and the creator of Keras !!).  \n",
-    "See : https://keras.io/examples/generative/vae\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import TensorBoard\n",
-    "\n",
-    "from modules.layers    import SamplingLayer, VariationalLossLayer\n",
-    "from modules.callbacks import ImagesCallback, BestModelCallback\n",
-    "from modules.datagen   import MNIST\n",
-    "\n",
-    "import sys\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2VAE1')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : With scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !\\\n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !\\\n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `total_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.001\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 2\n",
-    "loss_weights  = [1,.001]\n",
-    "\n",
-    "scale         = 0.2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 10\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use the **functional API.**  \n",
-    "For this, we will use two custom layers :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_log_var - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VariationalLossLayer`, which allows us to calculate the loss function, loss - See : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, strides=1, padding=\"same\", activation=\"relu\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "\n",
-    "z_mean    = layers.Dense(latent_dim, name=\"z_mean\")(x)\n",
-    "z_log_var = layers.Dense(latent_dim, name=\"z_log_var\")(x)\n",
-    "z         = SamplingLayer()([z_mean, z_log_var])\n",
-    "\n",
-    "encoder = keras.Model(inputs, [z_mean, z_log_var, z], name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1,  3, padding=\"same\", activation=\"sigmoid\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "\n",
-    "We will calculate the loss with a specific layer: `VariationalLossLayer` - See : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "\n",
-    "inputs = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "z_mean, z_log_var, z = encoder(inputs)\n",
-    "outputs              = decoder(z)\n",
-    "\n",
-    "outputs = VariationalLossLayer(loss_weights=loss_weights)([inputs, z_mean, z_log_var, outputs])\n",
-    "\n",
-    "vae=keras.Model(inputs,outputs)\n",
-    "\n",
-    "vae.compile(optimizer='adam', loss=None)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback` : qui va sauvegarder des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " - `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "callback_images      = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "callback_bestmodel   = BestModelCallback( run_dir + '/models/best_model.h5' )\n",
-    "callback_tensorboard = TensorBoard(log_dir=run_dir + '/logs', histogram_freq=1)\n",
-    "\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = vae.fit(x_data, epochs=epochs, batch_size=batch_size, callbacks=callbacks_list, verbose=fit_verbosity)\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={\"Loss\":['loss','r_loss', 'kl_loss']}, save_as='history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = callback_images.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-generated')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Annexe - Model Save and reload "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "filename = run_dir+'/models/my_model.h5'\n",
-    "\n",
-    "# ---- Save our model :\n",
-    "\n",
-    "vae.save(filename)\n",
-    "\n",
-    "# ---- Reload it\n",
-    "\n",
-    "vae = keras.models.load_model(filename, custom_objects={'SamplingLayer': SamplingLayer, 'VariationalLossLayer':VariationalLossLayer})\n",
-    "\n",
-    "# ---- Retrieve a layer\n",
-    "\n",
-    "decoder = vae.get_layer('decoder')\n",
-    "\n",
-    "img = decoder( np.array([[-1,.1]]))\n",
-    "fidle.scrawler.images(np.array(img), x_size=2,y_size=2, save_as='04-example')\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Keras2/02-VAE-with-MNIST.ipynb b/VAE.Keras2/02-VAE-with-MNIST.ipynb
deleted file mode 100644
index e1bc1e1..0000000
--- a/VAE.Keras2/02-VAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,505 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2VAE2] - VAE, using a custom model class  (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using model subclass, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding a still more **advanced programming model**, using a **custom model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n",
-    "\n",
-    "## Acknowledgements :\n",
-    "Thanks to **François Chollet** who is at the base of this example (and the creator of Keras !!).  \n",
-    "See : https://keras.io/examples/generative/vae\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import matplotlib.pyplot as plt\n",
-    "import scipy.stats\n",
-    "import sys\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import TensorBoard\n",
-    "\n",
-    "from modules.models    import VAE\n",
-    "from modules.layers    import SamplingLayer\n",
-    "from modules.callbacks import ImagesCallback, BestModelCallback\n",
-    "from modules.datagen   import MNIST\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2VAE2')\n",
-    "\n",
-    "VAE.about()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : with scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !  \n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !  \n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `total_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.01\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 6\n",
-    "loss_weights  = [1,.001]       # [1, .001] give good results\n",
-    "\n",
-    "scale         = .2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 5\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use a **custom model**.\n",
-    "For this, we will use :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_log_var - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VAE`, a custom model with a specific train_step - See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, strides=1, padding=\"same\", activation=\"relu\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "\n",
-    "z_mean    = layers.Dense(latent_dim, name=\"z_mean\")(x)\n",
-    "z_log_var = layers.Dense(latent_dim, name=\"z_log_var\")(x)\n",
-    "z         = SamplingLayer()([z_mean, z_log_var])\n",
-    "\n",
-    "encoder = keras.Model(inputs, [z_mean, z_log_var, z], name=\"encoder\")\n",
-    "encoder.compile()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1,  3, padding=\"same\", activation=\"sigmoid\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "decoder.compile()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "`VAE` is a custom model with a specific train_step - See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae = VAE(encoder, decoder, loss_weights)\n",
-    "\n",
-    "vae.compile(optimizer='adam')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback` : qui va sauvegarder des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " - `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "callback_images      = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "callback_bestmodel   = BestModelCallback( run_dir + '/models/best_model.h5' )\n",
-    "callback_tensorboard = TensorBoard(log_dir=run_dir + '/logs', histogram_freq=1)\n",
-    "\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = vae.fit(x_data, epochs=epochs, batch_size=batch_size, callbacks=callbacks_list, verbose=fit_verbosity)\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={\"Loss\":['loss','r_loss', 'kl_loss']}, save_as='history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = callback_images.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-original')\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder) during training"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='04-encoded')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Model evaluation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.1 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae=VAE()\n",
-    "vae.reload(f'{run_dir}/models/best_model')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder.predict(x_show)\n",
-    "x_reconst         = vae.decoder.predict(z)\n",
-    "\n",
-    "# ---- Show it\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='05-original')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='06-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Visualization of the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = int(20000*scale)\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder.predict(x_show)\n",
-    "\n",
-    "# ---- Show them\n",
-    "\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('07-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Generative latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "    \n",
-    "    grid_size   = 18\n",
-    "    grid_scale  = 1\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(10, 8))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('08-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "\n",
-    "    x_reconst = vae.decoder.predict([grid])\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='09-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Keras2/03-VAE-with-MNIST-post.ipynb b/VAE.Keras2/03-VAE-with-MNIST-post.ipynb
deleted file mode 100644
index 4cf60f2..0000000
--- a/VAE.Keras2/03-VAE-with-MNIST-post.ipynb
+++ /dev/null
@@ -1,339 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2VAE3] - Analysis of the VAE's latent space of MNIST dataset\n",
-    "<!-- DESC --> Visualization and analysis of the VAE's latent space of the dataset MNIST, using Keras 2 and Tensorflow (obsolete)\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - First data generation from **latent space** \n",
-    " - Understanding of underlying principles\n",
-    " - Model management\n",
-    "\n",
-    "Here, we don't consume data anymore, but we generate them ! ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Load a saved model\n",
-    " - Reconstruct some images\n",
-    " - Latent space visualization\n",
-    " - Matrix of generated images\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.1 - Init python"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "\n",
-    "from modules.models    import VAE\n",
-    "from modules.datagen   import MNIST\n",
-    "\n",
-    "import scipy.stats\n",
-    "import matplotlib\n",
-    "import matplotlib.pyplot as plt\n",
-    "from barviz import Simplex\n",
-    "from barviz import Collection\n",
-    "\n",
-    "import sys\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2VAE3')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "scale      = 1\n",
-    "seed       = 123\n",
-    "models_dir = './run/VAE2'"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('scale', 'seed', 'models_dir')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae=VAE()\n",
-    "vae.reload(f'{models_dir}/models/best_model')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder.predict(x_show, verbose=0)\n",
-    "x_reconst         = vae.decoder.predict(z,      verbose=0)\n",
-    "\n",
-    "latent_dim        = z.shape[1]\n",
-    "\n",
-    "# ---- Show it\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "fidle.utils.subtitle('Originals :')\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='01-original')\n",
-    "fidle.utils.subtitle('Reconstructed :')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='02-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Visualizing the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = 20000\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder.predict(x_show, verbose=0)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.1 - Classic 2d visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('03-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Simplex visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim<4:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is greater than 3')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    # ---- Softmax rescale\n",
-    "    #\n",
-    "    zs = np.exp(z)/np.sum(np.exp(z),axis=1,keepdims=True)\n",
-    "    # zc  = zs * 1/np.max(zs)\n",
-    "\n",
-    "    # ---- Create collection\n",
-    "    #\n",
-    "    c = Collection(zs, colors=y_show, labels=y_show)\n",
-    "    c.attrs.markers_colormap     = {'colorscale':'Rainbow','cmin':0,'cmax':latent_dim}\n",
-    "    c.attrs.markers_size         = 5\n",
-    "    c.attrs.markers_border_width = 0\n",
-    "    c.attrs.markers_opacity      = 0.8\n",
-    "\n",
-    "    s = Simplex.build(latent_dim)\n",
-    "    s.attrs.width  = 1000\n",
-    "    s.attrs.height = 1000\n",
-    "    s.plot(c)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Generate from latent space (latent_dim==2)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    grid_size   = 14\n",
-    "    grid_scale  = 1.\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(12, 10))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('04-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "\n",
-    "    x_reconst = vae.decoder.predict([grid])\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='05-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Keras2/modules/callbacks/BestModelCallback.py b/VAE.Keras2/modules/callbacks/BestModelCallback.py
deleted file mode 100644
index 8ec462e..0000000
--- a/VAE.Keras2/modules/callbacks/BestModelCallback.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                        BestModelCallback
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-from tensorflow.keras.callbacks import Callback
-import numpy as np
-import os
-
-                
-class BestModelCallback(Callback):
-
-    def __init__(self, filename= './run_dir/best-model.h5', verbose=0 ):
-        self.filename = filename
-        self.verbose  = verbose
-        self.loss     = np.Inf
-        os.makedirs( os.path.dirname(filename), mode=0o750, exist_ok=True)
-                
-    def on_train_begin(self, logs=None):
-        self.loss = np.Inf
-        
-    def on_epoch_end(self, epoch, logs=None):
-        current = logs.get("loss")
-        if current < self.loss:
-            self.loss = current
-            self.model.save(self.filename)
-            if self.verbose>0: print(f'Saved - loss={current:.6f}')
diff --git a/VAE.Keras2/modules/callbacks/ImagesCallback.py b/VAE.Keras2/modules/callbacks/ImagesCallback.py
deleted file mode 100644
index 85bdb54..0000000
--- a/VAE.Keras2/modules/callbacks/ImagesCallback.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            ImageCallback
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-from tensorflow.keras.callbacks import Callback
-import numpy as np
-import matplotlib.pyplot as plt
-from skimage import io
-import os
-
-class ImagesCallback(Callback):
-    '''
-    Save generated (random mode) or encoded/decoded (z mode) images on epoch end.
-    params:
-        x           : input images, for z mode (None)
-        z_dim       : size of the latent space, for random mode (None)
-        nb_images   : number of images to save
-        from_z      : save images from z (False)
-        from_random : save images from random (False)
-        filename    : images filename
-        run_dir     : output directory to save images        
-    '''
-    
-   
-    def __init__(self, x           = None,
-                       z_dim       = None,
-                       nb_images   = 5,
-                       from_z      = False, 
-                       from_random = False,
-                       filename    = 'image-{epoch:03d}-{i:02d}.jpg',
-                       run_dir     = './run'):
-        
-        # ---- Parameters
-        #
-        
-        self.x = None if x is None else x[:nb_images]
-        self.z_dim       = z_dim
-        
-        self.nb_images   = nb_images
-        self.from_z      = from_z
-        self.from_random = from_random
-
-        self.filename_z       = run_dir + '/images-z/'      + filename
-        self.filename_random  = run_dir + '/images-random/' + filename
-        
-        if from_z:      os.makedirs( run_dir + '/images-z/',     mode=0o750, exist_ok=True)
-        if from_random: os.makedirs( run_dir + '/images-random/', mode=0o750, exist_ok=True)
-        
-    
-    
-    def save_images(self, images, filename, epoch):
-        '''Save images as <filename>'''
-        
-        for i,image in enumerate(images):
-            
-            image = image.squeeze()  # Squeeze it if monochrome : (lx,ly,1) -> (lx,ly) 
-        
-            filenamei = filename.format(epoch=epoch,i=i)
-            
-            if len(image.shape) == 2:
-                plt.imsave(filenamei, image, cmap='gray_r')
-            else:
-                plt.imsave(filenamei, image)
-
-    
-    
-    def on_epoch_end(self, epoch, logs={}):
-        '''Called at the end of each epoch'''
-        
-        encoder     = self.model.get_layer('encoder')
-        decoder     = self.model.get_layer('decoder')
-
-        if self.from_random:
-            z      = np.random.normal( size=(self.nb_images,self.z_dim) )
-            images = decoder.predict(z)
-            self.save_images(images, self.filename_random, epoch)
-            
-        if self.from_z:
-            z_mean, z_var, z  = encoder.predict(self.x)
-            images            = decoder.predict(z)
-            self.save_images(images, self.filename_z, epoch)
-
-
-    def get_images(self, epochs=None, from_z=True,from_random=True):
-        '''Read and return saved images. epochs is a range'''
-        if epochs is None : return
-        images_z = []
-        images_r = []
-        for epoch in list(epochs):
-            for i in range(self.nb_images):
-                if from_z:
-                    f = self.filename_z.format(epoch=epoch,i=i)
-                    images_z.append( io.imread(f) )
-                if from_random:
-                    f = self.filename_random.format(epoch=epoch,i=i)
-                    images_r.append( io.imread(f) )
-        return images_z, images_r
-            
diff --git a/VAE.Keras2/modules/callbacks/__init__.py b/VAE.Keras2/modules/callbacks/__init__.py
deleted file mode 100644
index 99114d9..0000000
--- a/VAE.Keras2/modules/callbacks/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.callbacks.BestModelCallback import BestModelCallback
-from modules.callbacks.ImagesCallback    import ImagesCallback
\ No newline at end of file
diff --git a/VAE.Keras2/modules/datagen/DataGenerator.py b/VAE.Keras2/modules/datagen/DataGenerator.py
deleted file mode 100644
index b1c7752..0000000
--- a/VAE.Keras2/modules/datagen/DataGenerator.py
+++ /dev/null
@@ -1,148 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/       DataGenerator
-#    |_|   |_|\__,_|_|\___|       for clustered CelebA sataset
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# Initial version by JL Parouty, feb 2020
-
-
-import numpy as np
-import pandas as pd
-import math
-import os,glob
-import tensorflow as tf
-from tensorflow.keras.utils import Sequence
-from IPython.display import display,Markdown
-
-class DataGenerator(Sequence):
-
-    version = '0.4.1'
-    
-    def __init__(self, clusters_dir='./data', batch_size=32, debug=False, scale=1):
-        '''
-        Instanciation of the data generator
-        args:
-            cluster_dir : directory of the clusters files
-            batch_size  : batch size (32)
-            debug       : debug mode (False)
-            scale       : scale of dataset to use. 1. mean 100% (1.)
-        '''
-        if debug : self.about()
-        #
-        # ---- Get the list of clusters
-        #      
-        clusters_name = [ os.path.splitext(f)[0] for f in glob.glob( f'{clusters_dir}/*.npy') ]
-        clusters_size = len(clusters_name)
-        #
-        # ---- Read each cluster description
-        #      because we need the full dataset size
-        #
-        dataset_size  = 0
-        for c in clusters_name:
-            df = pd.read_csv(c+'.csv', header=0)
-            dataset_size+=len(df.index)
-        #
-        # ---- If we only want to use a part of the dataset...
-        #
-        dataset_size = int(dataset_size * scale)
-        #
-        if debug: 
-            print(f'\nClusters nb  : {len(clusters_name)} files')
-            print(f'Dataset size : {dataset_size}')
-            print(f'Batch size   : {batch_size}')
-
-        #
-        # ---- Remember all of that
-        #
-        self.clusters_dir  = clusters_dir
-        self.batch_size    = batch_size
-        self.clusters_name = clusters_name
-        self.clusters_size = clusters_size
-        self.dataset_size  = dataset_size
-        self.debug         = debug
-        #
-        # ---- Read a first cluster
-        #
-        self.rewind()
-    
-    
-    def rewind(self):
-        self.cluster_i = self.clusters_size
-        self.read_next_cluster()
-
-        
-    def __len__(self):
-        return math.floor(self.dataset_size / self.batch_size)
-
-    
-    def __getitem__(self, idx):
-        #
-        # ---- Get the next item index
-        #
-        i=self.data_i
-        #
-        # ---- Get a batch
-        #
-        batch = self.data[i:i+self.batch_size]
-        #
-        # ---- Cluster is large enough
-        #
-        if len(batch) == self.batch_size:
-            self.data_i += self.batch_size
-            if self.debug: print(f'({len(batch)}) ',end='')
-            return batch,batch
-        #
-        # ---- Not enough...
-        #
-        if self.debug: print(f'({len(batch)}..) ',end='')
-        #
-        self.read_next_cluster()
-        batch2 = self.data[ 0:self.batch_size-len(batch) ]
-        self.data_i = self.batch_size-len(batch)
-        batch  = np.concatenate( (batch,batch2) )
-        #
-        if self.debug: print(f'(..{len(batch2)}) ',end='')
-        return batch, batch
-    
-    
-    def on_epoch_end(self):
-        self.rewind()
-    
-    
-    def read_next_cluster(self):
-        #
-        # ---- Get the next cluster name
-        #      If we have reached the end of the list, we mix and
-        #      start again from the beginning. 
-        #
-        i = self.cluster_i + 1
-        if i >= self.clusters_size:
-            np.random.shuffle(self.clusters_name)
-            i = 0
-            if self.debug : print(f'\n[shuffle!]')
-        #
-        # ---- Read it (images still normalized)
-        #
-        data = np.load( self.clusters_name[i]+'.npy', mmap_mode='r' )
-        #
-        # ---- Remember all of that
-        #
-        self.data      = data
-        self.data_i    = 0
-        self.cluster_i = i
-        #
-        if self.debug: print(f'\n[Load {self.cluster_i:02d},s={len(self.data):3d}] ',end='')
-          
-        
-    @classmethod
-    def about(cls):
-        display(Markdown('<br>**FIDLE 2020 - DataGenerator**'))
-        print('Version              :', cls.version)
-        print('TensorFlow version   :', tf.__version__)
-        print('Keras version        :', tf.keras.__version__)
diff --git a/VAE.Keras2/modules/datagen/MNIST.py b/VAE.Keras2/modules/datagen/MNIST.py
deleted file mode 100644
index f0ca276..0000000
--- a/VAE.Keras2/modules/datagen/MNIST.py
+++ /dev/null
@@ -1,114 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-import h5py
-import os
-import numpy as np
-from hashlib import blake2b
-import tensorflow as tf
-import tensorflow.keras.datasets.mnist as mnist
-
-
-# ------------------------------------------------------------------
-#   A usefull class to manage our MNIST dataset
-#   This class allows to manage datasets derived from the original MNIST
-# ------------------------------------------------------------------
-
-
-class MNIST():
-    
-    version = '0.1'
-    
-    def __init__(self):
-        pass
-   
-    @classmethod
-    def get_data(cls, normalize=True, expand=True, scale=1., train_prop=0.8, shuffle=True, seed=None):
-        """
-        Return original MNIST dataset
-        args:
-            normalize   : Normalize dataset or not (True)
-            expand      : Reshape images as (28,28,1) instead (28,28) (True)
-            scale      : Scale of dataset to use. 1. mean 100% (1.)
-            train_prop : Ratio of train/test (0.8)
-            shuffle    : Shuffle data if True (True)
-            seed       : Random seed value. False mean no seed, None mean using /dev/urandom (None)
-        returns:
-            x_train,y_train,x_test,y_test
-        """
-
-        # ---- Seed
-        #
-        if seed is not False:
-            np.random.seed(seed)
-            print(f'Seeded ({seed})')
-
-        # ---- Get data
-        #
-        (x_train, y_train), (x_test, y_test) = mnist.load_data()
-        print('Dataset loaded.')
-        
-        # ---- Concatenate
-        #
-        x_data = np.concatenate([x_train, x_test], axis=0)
-        y_data = np.concatenate([y_train, y_test])
-        print('Concatenated.')
-
-        # ---- Shuffle
-        #
-        if shuffle:
-            p = np.random.permutation(len(x_data))
-            x_data, y_data = x_data[p], y_data[p]
-            print('Shuffled.')     
-        
-        # ---- Rescale
-        #
-        n = int(scale*len(x_data))
-        x_data, y_data = x_data[:n], y_data[:n]
-        print(f'rescaled ({scale}).') 
-
-        # ---- Normalization
-        #
-        if normalize:
-            x_data = x_data.astype('float32') / 255.
-            print('Normalized.')
-            
-        # ---- Reshape : (28,28) -> (28,28,1)
-        #
-        if expand:
-            x_data = np.expand_dims(x_data, axis=-1)
-            print('Reshaped.')
-
-        # ---- Split
-        #
-        n=int(len(x_data)*train_prop)
-        x_train, x_test = x_data[:n], x_data[n:]
-        y_train, y_test = y_data[:n], y_data[n:]
-        print(f'splited ({train_prop}).') 
-
-        # ---- Hash
-        #
-        h = blake2b(digest_size=10)
-        for a in [x_train,x_test, y_train,y_test]:
-            h.update(a)
-            
-        # ---- About and return
-        #
-        print('x_train shape is  : ', x_train.shape)
-        print('x_test  shape is  : ', x_test.shape)
-        print('y_train shape is  : ', y_train.shape)
-        print('y_test  shape is  : ', y_test.shape)
-        print('Blake2b digest is : ', h.hexdigest())
-        return  x_train,y_train, x_test,y_test
-                
-            
diff --git a/VAE.Keras2/modules/datagen/__init__.py b/VAE.Keras2/modules/datagen/__init__.py
deleted file mode 100644
index 74bb0f9..0000000
--- a/VAE.Keras2/modules/datagen/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from modules.datagen.MNIST         import MNIST
-from modules.datagen.DataGenerator import DataGenerator
-
-
diff --git a/VAE.Keras2/modules/layers/SamplingLayer.py b/VAE.Keras2/modules/layers/SamplingLayer.py
deleted file mode 100644
index f0856c6..0000000
--- a/VAE.Keras2/modules/layers/SamplingLayer.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# by JL Parouty (dec 2020), based on François Chollet example
-#
-# Thanks to François Chollet example : https://keras.io/examples/generative/vae
-
-import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
-from IPython.display import display,Markdown
-
-# Note : https://keras.io/guides/making_new_layers_and_models_via_subclassing/
-
-class SamplingLayer(keras.layers.Layer):
-    '''A custom layer that receive (z_mean, z_var) and sample a z vector'''
-
-    def call(self, inputs):
-        
-        z_mean, z_log_var = inputs
-        
-        batch_size = tf.shape(z_mean)[0]
-        latent_dim = tf.shape(z_mean)[1]
-        
-        epsilon = tf.keras.backend.random_normal(shape=(batch_size, latent_dim))
-        z = z_mean + tf.exp(0.5 * z_log_var) * epsilon
-        
-        return z
\ No newline at end of file
diff --git a/VAE.Keras2/modules/layers/VariationalLossLayer.py b/VAE.Keras2/modules/layers/VariationalLossLayer.py
deleted file mode 100644
index 4de04a3..0000000
--- a/VAE.Keras2/modules/layers/VariationalLossLayer.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# by JL Parouty (dec 2020), based on François Chollet example
-#
-# Thanks to François Chollet example : https://keras.io/examples/generative/vae
-
-import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
-from IPython.display import display,Markdown
-
-# Note : https://keras.io/guides/making_new_layers_and_models_via_subclassing/
-
-class VariationalLossLayer(keras.layers.Layer):
-   
-    def __init__(self, loss_weights=[3,7]):
-        super().__init__()
-        self.k1 = loss_weights[0]
-        self.k2 = loss_weights[1]
-
-
-    def call(self, inputs):
-        
-        # ---- Retrieve inputs
-        #
-        x, z_mean, z_log_var, y = inputs
-        
-        # ---- Compute : reconstruction loss
-        #
-        r_loss  = tf.reduce_mean( keras.losses.binary_crossentropy(x,y) ) * self.k1
-        #
-        # ---- Compute : kl_loss
-        #
-        kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
-        kl_loss = -tf.reduce_mean(kl_loss) * self.k2
-        
-        # ---- Add loss
-        #
-        loss = r_loss + kl_loss
-        self.add_loss(loss)
-        
-        # ---- Keep metrics
-        #
-        self.add_metric(loss,   aggregation='mean',name='loss')
-        self.add_metric(r_loss, aggregation='mean',name='r_loss')
-        self.add_metric(kl_loss,aggregation='mean',name='kl_loss')
-        return y
-
-    
-    def get_config(self):
-        return {'loss_weights':[self.k1,self.k2]}
\ No newline at end of file
diff --git a/VAE.Keras2/modules/layers/__init__.py b/VAE.Keras2/modules/layers/__init__.py
deleted file mode 100644
index bf47016..0000000
--- a/VAE.Keras2/modules/layers/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.layers.SamplingLayer        import SamplingLayer
-from modules.layers.VariationalLossLayer import VariationalLossLayer
diff --git a/VAE.Keras2/modules/models/VAE.py b/VAE.Keras2/modules/models/VAE.py
deleted file mode 100644
index d4705eb..0000000
--- a/VAE.Keras2/modules/models/VAE.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# by JL Parouty (dec 2020), based on François Chollet example
-#
-# Thanks to François Chollet example : https://keras.io/examples/generative/vae
-
-import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
-from IPython.display import display,Markdown
-from modules.layers    import SamplingLayer
-import os
-
-
-# Note : https://keras.io/guides/making_new_layers_and_models_via_subclassing/
-
-
-
-class VAE(keras.Model):
-    '''
-    A VAE model, built from given encoder and decoder
-    '''
-
-    version = '1.4'
-
-    def __init__(self, encoder=None, decoder=None, loss_weights=[1,1], **kwargs):
-        '''
-        VAE instantiation with encoder, decoder and r_loss_factor
-        args :
-            encoder : Encoder model
-            decoder : Decoder model
-            loss_weights : Weight of the loss functions: reconstruction_loss and kl_loss
-            r_loss_factor : Proportion of reconstruction loss for global loss (0.3)
-        return:
-            None
-        '''
-        super(VAE, self).__init__(**kwargs)
-        self.encoder      = encoder
-        self.decoder      = decoder
-        self.loss_weights = loss_weights
-        print(f'Fidle VAE is ready :-)  loss_weights={list(self.loss_weights)}')
-       
-        
-    def call(self, inputs):
-        '''
-        Model forward pass, when we use our model
-        args:
-            inputs : Model inputs
-        return:
-            output : Output of the model 
-        '''
-        z_mean, z_log_var, z = self.encoder(inputs)
-        output               = self.decoder(z)
-        return output
-                
-        
-    def train_step(self, input):
-        '''
-        Implementation of the training update.
-        Receive an input, compute loss, get gradient, update weights and return metrics.
-        Here, our metrics are loss.
-        args:
-            inputs : Model inputs
-        return:
-            loss    : Total loss
-            r_loss  : Reconstruction loss
-            kl_loss : KL loss
-        '''
-        
-        # ---- Get the input we need, specified in the .fit()
-        #
-        if isinstance(input, tuple):
-            input = input[0]
-        
-        k1,k2 = self.loss_weights
-        
-        # ---- Forward pass
-        #      Run the forward pass and record 
-        #      operations on the GradientTape.
-        #
-        with tf.GradientTape() as tape:
-            
-            # ---- Get encoder outputs
-            #
-            z_mean, z_log_var, z = self.encoder(input)
-            
-            # ---- Get reconstruction from decoder
-            #
-            reconstruction       = self.decoder(z)
-         
-            # ---- Compute loss
-            #      Reconstruction loss, KL loss and Total loss
-            #
-            reconstruction_loss  = k1 * tf.reduce_mean( keras.losses.binary_crossentropy(input, reconstruction) )
-
-            kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
-            kl_loss = -tf.reduce_mean(kl_loss) * k2
-
-            total_loss = reconstruction_loss + kl_loss
-
-        # ---- Retrieve gradients from gradient_tape
-        #      and run one step of gradient descent
-        #      to optimize trainable weights
-        #
-        grads = tape.gradient(total_loss, self.trainable_weights)
-        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
-        
-        return {
-            "loss":     total_loss,
-            "r_loss":   reconstruction_loss,
-            "kl_loss":  kl_loss,
-        }
-    
-    
-    def predict(self,inputs):
-        '''Our predict function...'''
-        z_mean, z_var, z  = self.encoder.predict(inputs)
-        outputs           = self.decoder.predict(z)
-        return outputs
-
-        
-    def save(self,filename):
-        '''Save model in 2 part'''
-        filename, extension = os.path.splitext(filename)
-        self.encoder.save(f'{filename}-encoder.h5')
-        self.decoder.save(f'{filename}-decoder.h5')
-
-    
-    def reload(self,filename):
-        '''Reload a 2 part saved model.'''
-        filename, extension = os.path.splitext(filename)
-        self.encoder = keras.models.load_model(f'{filename}-encoder.h5', custom_objects={'SamplingLayer': SamplingLayer})
-        self.decoder = keras.models.load_model(f'{filename}-decoder.h5')
-        print('Reloaded.')
-                
-        
-    @classmethod
-    def about(cls):
-        '''Basic whoami method'''
-        display(Markdown('<br>**FIDLE 2021 - VAE**'))
-        print('Version              :', cls.version)
-        print('TensorFlow version   :', tf.__version__)
-        print('Keras version        :', tf.keras.__version__)
diff --git a/VAE.Keras2/modules/models/__init__.py b/VAE.Keras2/modules/models/__init__.py
deleted file mode 100644
index b8d0a17..0000000
--- a/VAE.Keras2/modules/models/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from modules.models.VAE import VAE
\ No newline at end of file
diff --git a/VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb b/VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb
deleted file mode 100644
index cb3ae15..0000000
--- a/VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb
+++ /dev/null
@@ -1,541 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LVAE1] - First VAE, using Lightning API (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using Lightning API, with a latent space of small dimension, using PyTorch Lightning\n",
-    "\n",
-    "<!-- AUTHOR : Achille Mbogol Touye (EFIlIA-MIAI/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding **Ligthning API**, using two custom layers\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import torch\n",
-    "import pandas as pd\n",
-    "import numpy  as np\n",
-    "import torch.nn as nn\n",
-    "import lightning.pytorch as pl\n",
-    "\n",
-    "from modules.datagen     import MNIST\n",
-    "from torch.utils.data    import TensorDataset, DataLoader\n",
-    "from modules.progressbar import CustomTrainProgressBar\n",
-    "from modules.callbacks   import ImagesCallback, BestModelCallback\n",
-    "from modules.layers      import SamplingLayer, VariationalLossLayer\n",
-    "from lightning.pytorch.loggers.tensorboard import TensorBoardLogger\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LVAE1')\n",
-    "\n",
-    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : With scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !\\\n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !\\\n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `vae_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.001\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 2\n",
-    "loss_weights  = [1,.001]\n",
-    "\n",
-    "scale         = 0.2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 10\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    " ## 3.1 -  For Training model use Dataloader\n",
-    "The Dataset retrieves our dataset’s features and labels one sample at a time. While training a model, we typically want to pass samples in minibatches, reshuffle the data at every epoch to reduce model overfitting. DataLoader is an iterable that abstracts this complexity for us in an easy API"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "train_dataset = TensorDataset(x_data,y_data)\n",
-    "\n",
-    "# train bacth data\n",
-    "train_loader= DataLoader(\n",
-    "  dataset=train_dataset, \n",
-    "  shuffle=False, \n",
-    "  batch_size=batch_size, \n",
-    "  num_workers=2 \n",
-    ")\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use the **pytorch ligthning API.**  \n",
-    "For this, we will use two custom layers :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_logvar - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VariationalLossLayer`, which allows us to calculate the loss function, loss - See         : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "class Encoder(nn.Module):\n",
-    "    def __init__(self, latent_dim):\n",
-    "        super().__init__()\n",
-    "        self.Convblock=nn.Sequential(\n",
-    "            nn.Conv2d(in_channels=1,  out_channels=32, kernel_size=3, stride=1, padding=1),\n",
-    "            nn.BatchNorm2d(32),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Flatten(),\n",
-    "\n",
-    "            nn.Linear(64*7*7, 16),\n",
-    "            nn.BatchNorm1d(16),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "        )\n",
-    "\n",
-    "        self.z_mean   = nn.Linear(16, latent_dim)\n",
-    "        self.z_logvar = nn.Linear(16, latent_dim)\n",
-    "        \n",
-    "\n",
-    "\n",
-    "    def forward(self, x):\n",
-    "       x        = self.Convblock(x)\n",
-    "       z_mean   = self.z_mean(x)\n",
-    "       z_logvar = self.z_logvar(x) \n",
-    "       z        = SamplingLayer()([z_mean, z_logvar]) \n",
-    "         \n",
-    "       return z_mean, z_logvar, z        "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "class Decoder(nn.Module):\n",
-    "    def __init__(self, latent_dim):\n",
-    "        super().__init__()\n",
-    "        self.linear=nn.Sequential(\n",
-    "            nn.Linear(latent_dim, 16),\n",
-    "            nn.BatchNorm1d(16),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.Linear(16, 64*7*7),\n",
-    "            nn.BatchNorm1d(64*7*7),\n",
-    "            nn.ReLU()\n",
-    "        )\n",
-    "        \n",
-    "        self.Deconvblock=nn.Sequential(\n",
-    "            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(32),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.ConvTranspose2d(in_channels=32, out_channels=1,  kernel_size=3, stride=1, padding=1),\n",
-    "            nn.Sigmoid()\n",
-    "        )\n",
-    "    \n",
-    "\n",
-    "\n",
-    "    def forward(self, z):\n",
-    "       x        = self.linear(z)\n",
-    "       x        = x.reshape(-1,64,7,7)\n",
-    "       x_hat    = self.Deconvblock(x)\n",
-    "       return x_hat"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "\n",
-    "We will calculate the loss with a specific layer: `VariationalLossLayer` - See : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "class LitVAE(pl.LightningModule):\n",
-    "    \n",
-    "    def __init__(self, encoder, decoder):\n",
-    "        super().__init__()\n",
-    "        self.encoder  = encoder\n",
-    "        self.decoder  = decoder\n",
-    "        \n",
-    "    # forward pass\n",
-    "    def forward(self, x):\n",
-    "       z_mean, z_logvar, z = self.encoder(x)\n",
-    "       x_hat               = self.decoder(z)\n",
-    "       return x_hat\n",
-    "\n",
-    "    def training_step(self, batch, batch_idx):\n",
-    "        # training_step defines the train loop.\n",
-    "        x, _                = batch\n",
-    "        z_mean, z_logvar, z = self.encoder(x)\n",
-    "        x_hat               = self.decoder(z)\n",
-    "\n",
-    "        \n",
-    "        r_loss,kl_loss,loss = VariationalLossLayer(loss_weights=loss_weights)([x, z_mean,z_logvar,x_hat]) \n",
-    "\n",
-    "        metrics = {\"r_loss\"      : r_loss, \n",
-    "                    \"kl_loss\"    : kl_loss,\n",
-    "                    \"vae_loss\"   : loss\n",
-    "                  }\n",
-    "        \n",
-    "        # logs metrics for each training_step\n",
-    "        self.log_dict(metrics,\n",
-    "                      on_step  = False,\n",
-    "                      on_epoch = True, \n",
-    "                      prog_bar = True, \n",
-    "                      logger   = True\n",
-    "                     ) \n",
-    "        \n",
-    "       \n",
-    "        return loss\n",
-    "        \n",
-    "    def configure_optimizers(self):\n",
-    "        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n",
-    "        return optimizer\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# print model\n",
-    "vae=LitVAE(Encoder(latent_dim=2),Decoder(latent_dim=2))\n",
-    "print(vae)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback`    : sauvegardera des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " -  `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# save best model\n",
-    "save_dir = \"./run/models/\"\n",
-    "BestModelCallback = BestModelCallback(dirpath= save_dir) \n",
-    "CallbackImages    = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "logger= TensorBoardLogger(save_dir='VAE1_logs',name=\"VAE_logs\") # loggers data"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "# train model\n",
-    "trainer= pl.Trainer(accelerator='auto',\n",
-    "                    max_epochs=epochs,\n",
-    "                    logger=logger,\n",
-    "                    num_sanity_val_steps=0,\n",
-    "                   callbacks=[CustomTrainProgressBar(), BestModelCallback, CallbackImages]\n",
-    "                   )\n",
-    "\n",
-    "trainer.fit(model=vae, train_dataloaders=train_loader)\n",
-    "\n",
-    "\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# launch Tensorboard \n",
-    "%reload_ext tensorboard\n",
-    "%tensorboard --logdir=./VAE1_logs/VAE_logs/ --bind_all"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = CallbackImages.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-generated')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Annexe - Model Save and reload "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#---- Load the model from a checkpoint\n",
-    "loaded_model = LitVAE.load_from_checkpoint(BestModelCallback.best_model_path,\n",
-    "                                           encoder=Encoder(latent_dim=2),\n",
-    "                                           decoder=Decoder(latent_dim=2))\n",
-    "# put model in evaluation modecnrs\n",
-    "loaded_model.eval()\n",
-    "\n",
-    "# ---- Retrieve a layer decoder\n",
-    "decoder=loaded_model.decoder\n",
-    "\n",
-    "# example of z\n",
-    "z   = torch.Tensor([[-1,.1]]).to(device)\n",
-    "img = decoder(z)\n",
-    "\n",
-    "fidle.scrawler.images(img.cpu().detach(), x_size=2,y_size=2, save_as='04-example')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.10"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb b/VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb
deleted file mode 100644
index 8d4335c..0000000
--- a/VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb
+++ /dev/null
@@ -1,516 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LVAE2] - VAE, using a custom model class  (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using model subclass, with a latent space of small dimension, using PyTorch Lightninh\n",
-    "\n",
-    "<!-- AUTHOR : Achille Mbogol Touye (EFILIA-MIAI/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding a still more **advanced programming model**, using a **custom model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os,sys\n",
-    "import torch\n",
-    "import pandas as pd\n",
-    "import numpy  as np\n",
-    "import matplotlib.pyplot as plt\n",
-    "import scipy.stats\n",
-    "\n",
-    "import torch.nn as nn\n",
-    "import lightning.pytorch as pl\n",
-    "\n",
-    "from torch.utils.data    import TensorDataset, DataLoader\n",
-    "from modules.callbacks   import ImagesCallback,BestModelCallback\n",
-    "from modules.progressbar import CustomTrainProgressBar\n",
-    "from modules.layers      import SamplingLayer\n",
-    "from modules.datagen     import MNIST\n",
-    "from modules.models      import VAE, Encoder, Decoder\n",
-    "from lightning.pytorch.loggers.tensorboard import TensorBoardLogger\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LVAE2')\n",
-    "\n",
-    "VAE.about()\n",
-    "\n",
-    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : with scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !  \n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !  \n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `total_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.01\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 6\n",
-    "loss_weights  = [1,.001]       # [1, .001] give good results\n",
-    "\n",
-    "scale         = .2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 5\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    " ## 3.1 -  For Training model use Dataloader\n",
-    "The Dataset retrieves our dataset’s features and labels one sample at a time. While training a model, we typically want to pass samples in minibatches, reshuffle the data at every epoch to reduce model overfitting. DataLoader is an iterable that abstracts this complexity for us in an easy API"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "train_dataset = TensorDataset(x_data,y_data)\n",
-    "\n",
-    "# train bacth data\n",
-    "train_loader= DataLoader(\n",
-    "  dataset=train_dataset, \n",
-    "  shuffle=False, \n",
-    "  batch_size=batch_size, \n",
-    "  num_workers=2 \n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use a **custom model**.\n",
-    "For this, we will use :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_logvar - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VAE`, a custom model- See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "`VAE` is a custom model with a specific train_step - See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae=VAE(Encoder(latent_dim = latent_dim),\n",
-    "        Decoder(latent_dim = latent_dim),\n",
-    "        loss_weights\n",
-    "       )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback` : qui va sauvegarder des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " - `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "save_dir = \"./run/models_dir/\"\n",
-    "BestModelCallback   = BestModelCallback(dirpath = save_dir)\n",
-    "CallbackImages      = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "logger              = TensorBoardLogger(save_dir='VAE2_logs',name=\"VAE_logs\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "# train model\n",
-    "trainer= pl.Trainer(accelerator='auto',\n",
-    "                    max_epochs=epochs,\n",
-    "                    logger=logger,\n",
-    "                    num_sanity_val_steps=0,\n",
-    "                   callbacks=[CustomTrainProgressBar(), BestModelCallback, CallbackImages]\n",
-    "                   )\n",
-    "\n",
-    "trainer.fit(model=vae, train_dataloaders=train_loader)\n",
-    "\n",
-    "\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# launch Tensorboard \n",
-    "%reload_ext tensorboard\n",
-    "%tensorboard --logdir=./VAE1_logs/VAE_logs/ --bind_all"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = CallbackImages.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-original')\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder) during training"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='04-encoded')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Model evaluation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.1 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "BestModelCallback.best_model_path"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#---- Load the model from a checkpoint\n",
-    "vae = VAE.load_from_checkpoint(BestModelCallback.best_model_path,\n",
-    "                                encoder=Encoder(latent_dim=latent_dim),\n",
-    "                                decoder=Decoder(latent_dim=latent_dim)\n",
-    "                              )\n",
-    "# put model in evaluation mode\n",
-    "vae.eval()\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder(x_show.to(device))\n",
-    "x_reconst         = vae.decoder(z)\n",
-    "\n",
-    "# ---- Show it\n",
-    "z         = z.cpu().detach()\n",
-    "x_reconst = x_reconst.cpu().detach()\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='05-original')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='06-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Visualization of the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = int(20000*scale)\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder(x_show.to(device))\n",
-    "\n",
-    "# ---- Show them\n",
-    "z         = z.cpu().detach()           # Move the tensor to CPU and detach it\n",
-    "x_reconst = x_reconst.cpu().detach()\n",
-    "\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('07-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Generative latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "    \n",
-    "    grid_size   = 18\n",
-    "    grid_scale  = 1\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(10, 8))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('08-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "    grid=torch.from_numpy(grid).to(device)\n",
-    "    x_reconst = vae.decoder([grid])\n",
-    "    x_reconst = x_reconst.cpu().detach()\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='09-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.10"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb b/VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb
deleted file mode 100644
index f35f8a4..0000000
--- a/VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb
+++ /dev/null
@@ -1,358 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LVAE3] - Analysis of the VAE's latent space of MNIST dataset\n",
-    "<!-- DESC --> Visualization and analysis of the VAE's latent space of the dataset MNIST, using PyTorch Lightning\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - First data generation from **latent space** \n",
-    " - Understanding of underlying principles\n",
-    " - Model management\n",
-    "\n",
-    "Here, we don't consume data anymore, but we generate them ! ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Load a saved model\n",
-    " - Reconstruct some images\n",
-    " - Latent space visualization\n",
-    " - Matrix of generated images\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.1 - Init python"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import sys\n",
-    "import torch\n",
-    "import pandas as pd\n",
-    "import numpy  as np\n",
-    "import torch.nn as nn\n",
-    "\n",
-    "from modules.callbacks import ImagesCallback, BestModelCallback\n",
-    "from modules.datagen   import MNIST\n",
-    "from modules.models    import Encoder, Decoder, VAE \n",
-    "\n",
-    "\n",
-    "import scipy.stats\n",
-    "import matplotlib\n",
-    "import matplotlib.pyplot as plt\n",
-    "from barviz import Simplex\n",
-    "from barviz import Collection\n",
-    "\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LVAE3')\n",
-    "\n",
-    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "scale      = 1\n",
-    "seed       = 123\n",
-    "models_dir = './run/models_dir/best-model-epoch=4-loss=0.00.ckpt'"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('scale', 'seed', 'models_dir')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#---- Load the model from a checkpoint\n",
-    "latent_dim=6\n",
-    "\n",
-    "vae = VAE.load_from_checkpoint(models_dir,\n",
-    "                               encoder=Encoder(latent_dim=latent_dim),\n",
-    "                               decoder=Decoder(latent_dim=latent_dim)\n",
-    "                              )\n",
-    "# put model in evaluation mode\n",
-    "vae.eval()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder(x_show.to(device))\n",
-    "x_reconst         = vae.decoder(z)\n",
-    "\n",
-    "latent_dim        = z.shape[1]\n",
-    "\n",
-    "# ---- Show it\n",
-    "z         = z.cpu().detach()         # Move the tensor to CPU and detach it\n",
-    "x_reconst = x_reconst.cpu().detach()\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "fidle.utils.subtitle('Originals :')\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='01-original')\n",
-    "fidle.utils.subtitle('Reconstructed :')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='02-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Visualizing the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = 5000\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show   = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder(x_show.to(device))\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.1 - Classic 2d visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "z   = z.cpu().detach()\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('03-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Simplex visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim<4:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is greater than 3')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    # ---- Softmax rescale\n",
-    "    #\n",
-    "    zs = torch.exp(z)/torch.sum(torch.exp(z),axis=1,keepdims=True)\n",
-    "    zs=zs.cpu().detach()\n",
-    "    # zc  = zs * 1/np.max(zs)\n",
-    "\n",
-    "    # ---- Create collection\n",
-    "    #\n",
-    "    c = Collection(zs, colors=y_show, labels=y_show)\n",
-    "    c.attrs.markers_colormap     = {'colorscale':'Rainbow','cmin':0,'cmax':latent_dim}\n",
-    "    c.attrs.markers_size         = 5\n",
-    "    c.attrs.markers_border_width = 0\n",
-    "    c.attrs.markers_opacity      = 0.8\n",
-    "\n",
-    "    s = Simplex.build(latent_dim)\n",
-    "    s.attrs.width  = 1000\n",
-    "    s.attrs.height = 1000\n",
-    "    s.plot(c)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Generate from latent space (latent_dim==2)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    grid_size   = 14\n",
-    "    grid_scale  = 1.\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(12, 10))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('04-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "    grid      = torch.from_numpy(grid).to(device)\n",
-    "    x_reconst = vae.decoder([grid])\n",
-    "    x_reconst = x_reconst.cpu().detach()\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='05-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.10"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Lightning/modules/.gitkeep b/VAE.Lightning/modules/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/VAE.Lightning/modules/callbacks/.gitkeep b/VAE.Lightning/modules/callbacks/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/VAE.Lightning/modules/callbacks/BestModelCallback.py b/VAE.Lightning/modules/callbacks/BestModelCallback.py
deleted file mode 100644
index d44bfca..0000000
--- a/VAE.Lightning/modules/callbacks/BestModelCallback.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import os
-import lightning.pytorch as pl
-from lightning.pytorch.callbacks import ModelCheckpoint
-from lightning.pytorch.callbacks import Callback
-
-
-class BestModelCallback(Callback):
-
-    def __init__(self, filename='best-model-{epoch}-{loss:.2f}', dirpath="./run/models/"):
-        super(BestModelCallback, self).__init__()  
-        self.filename = filename
-        self.dirpath  = dirpath
-        os.makedirs(dirpath, exist_ok=True)
-        self.best_model_path = None
-        self.model_checkpoint = ModelCheckpoint(
-            dirpath    = dirpath,
-            filename   = filename,
-            save_top_k = 1,
-            verbose    = False,
-            monitor    = "vae_loss",
-            mode       = "min"
-        )
-
-    def on_train_epoch_end(self, trainer, pl_module):
-        # save the best model
-        self.model_checkpoint.on_train_epoch_end(trainer, pl_module)
-        self.best_model_path = self.model_checkpoint.best_model_path
-
-                
diff --git a/VAE.Lightning/modules/callbacks/ImagesCallback.py b/VAE.Lightning/modules/callbacks/ImagesCallback.py
deleted file mode 100644
index 25e0c12..0000000
--- a/VAE.Lightning/modules/callbacks/ImagesCallback.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            ImageCallback
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# 2.0 version by Achille Mbogol Touye, sep 2023
-
-
-import lightning.pytorch as pl
-import numpy as np
-import matplotlib.pyplot as plt
-from skimage import io
-import os
-import torch
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-class ImagesCallback(pl.Callback):
-    '''
-    Save generated (random mode) or encoded/decoded (z mode) images on epoch end.
-    params:
-        x           : input images, for z mode (None)
-        z_dim       : size of the latent space, for random mode (None)
-        nb_images   : number of images to save
-        from_z      : save images from z (False)
-        from_random : save images from random (False)
-        filename    : images filename
-        run_dir     : output directory to save images        
-    '''
-    
-   
-    def __init__(self, x           = None,
-                       z_dim       = None,
-                       nb_images   = 5,
-                       from_z      = False, 
-                       from_random = False,
-                       filename    = 'image-{epoch:03d}-{i:02d}.jpg',
-                       run_dir     = './run'):
-        
-        # ---- Parameters
-        #import lightning.pytorch as pl
-        super().__init__()
-        self.x = None if x is None else x[:nb_images]
-        self.z_dim       = z_dim
-        
-        self.nb_images   = nb_images
-        self.from_z      = from_z
-        self.from_random = from_random
-
-        self.filename_z       = run_dir + '/images-z/'      + filename
-        self.filename_random  = run_dir + '/images-random/' + filename
-        
-        if from_z:      os.makedirs( run_dir + '/images-z/',      mode=0o750, exist_ok=True)
-        if from_random: os.makedirs( run_dir + '/images-random/', mode=0o750, exist_ok=True)
-        
-    
-    
-    def save_images(self, images, filename, epoch):
-        '''Save images as <filename>'''
-        
-        for i,image in enumerate(images):
-            
-            image = image.squeeze()  # Squeeze it if monochrome : (1,H,W) -> (H,W) 
-        
-            filenamei = filename.format(epoch=epoch,i=i)
-            
-            if len(image.shape) == 2:
-                plt.imsave(filenamei, image, cmap='gray_r')
-            else:
-                plt.imsave(filenamei, image)
-
-    
-    
-    def on_train_epoch_end(self, trainer, pl_module):
-        '''Called at the end of each epoch'''
-        
-        encoder     = pl_module.encoder
-        decoder     = pl_module.decoder
-
-        if self.from_random:
-            z      = torch.randn(self.nb_images,self.z_dim).to(device)
-            images = decoder(z)
-            self.save_images(images.cpu().detach(), self.filename_random, trainer.current_epoch)
-            
-        if self.from_z:
-            z_mean, z_logvar, z  = encoder(self.x.to(device))
-            images               = decoder(z)
-            self.save_images(images.cpu().detach(), self.filename_z, trainer.current_epoch)
-
-
-    def get_images(self, epochs=None, from_z=True,from_random=True):
-        '''Read and return saved images. epochs is a range'''
-        if epochs is None : return
-        images_z = []
-        images_r = []
-        for epoch in list(epochs):
-            for i in range(self.nb_images):
-                if from_z:
-                    f = self.filename_z.format(epoch=epoch,i=i)
-                    images_z.append( io.imread(f) )
-                if from_random:
-                    f = self.filename_random.format(epoch=epoch,i=i)
-                    images_r.append( io.imread(f) )
-        return images_z, images_r
-            
diff --git a/VAE.Lightning/modules/callbacks/__init__.py b/VAE.Lightning/modules/callbacks/__init__.py
deleted file mode 100644
index 83b25a9..0000000
--- a/VAE.Lightning/modules/callbacks/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.callbacks.ImagesCallback     import ImagesCallback
-from modules.callbacks.BestModelCallback  import BestModelCallback
\ No newline at end of file
diff --git a/VAE.Lightning/modules/datagen/.gitkeep b/VAE.Lightning/modules/datagen/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/VAE.Lightning/modules/datagen/MNIST.py b/VAE.Lightning/modules/datagen/MNIST.py
deleted file mode 100644
index 7b063d0..0000000
--- a/VAE.Lightning/modules/datagen/MNIST.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# 2.0 version by Achille Mbogol Touye, sep 2023
-
-
-import torch
-
-import numpy as np
-import torchvision.transforms as T
-from torchvision import datasets
-from torch.utils.data import DataLoader
-from hashlib import blake2b
-
-# ------------------------------------------------------------------
-#   A usefull class to manage our MNIST dataset
-#   This class allows to manage datasets derived from the original MNIST
-# ------------------------------------------------------------------
-
-
-class MNIST():
-    
-    version = '0.1'
-    
-    def __init__(self):
-        pass
-   
-    @classmethod
-    def get_data(cls, normalize=True,  scale=1., train_prop=0.8, shuffle=True, seed=None):
-        """
-        Return original MNIST dataset
-        args:
-            normalize   : Normalize dataset or not (True)
-            scale      : Scale of dataset to use. 1. mean 100% (1.)
-            train_prop : Ratio of train/test (0.8)
-            shuffle    : Shuffle data if True (True)
-            seed       : Random seed value. False mean no seed, None mean using /dev/urandom (None)
-        returns:
-            x_train,y_train,x_test,y_test
-        """
-
-        # ---- Seed
-        #
-        if seed is not False:
-            np.random.seed(seed)
-            print(f'Seeded ({seed})')
-
-        # ---- Get data
-        #
-        train_dataset = datasets.MNIST(root=".data", train=True,  download=True, transform=T.PILToTensor())
-
-        test_dataset  = datasets.MNIST(root=".data", train=False, download=True, transform=T.PILToTensor())
-        print('Dataset loaded.')
-
-        # ---- Normalization
-        #
-        if normalize:
-            train_dataset = datasets.MNIST(root=".data", train=True,  download=True, transform=T.ToTensor())
-            test_dataset  = datasets.MNIST(root=".data", train=False, download=True, transform=T.ToTensor())
-
-            trainloader   = DataLoader(train_dataset, batch_size=len(train_dataset))
-            testloader    = DataLoader(test_dataset,  batch_size=len(test_dataset) )
-            
-            x_train       = next(iter(trainloader))[0]
-            y_train       = next(iter(trainloader))[1]
-
-            x_test        = next(iter(testloader))[0]
-            y_test        = next(iter(testloader))[1]
-
-            print('Normalized.')
-            
-        else:
-            trainloader   = DataLoader(train_dataset, batch_size=len(train_dataset))
-            testloader    = DataLoader(test_dataset,  batch_size=len(test_dataset) )
-            
-            x_train       = next(iter(trainloader))[0]
-            y_train       = next(iter(trainloader))[1]
-
-            x_test        = next(iter(testloader))[0]
-            y_test        = next(iter(testloader))[1] 
-
-            print('Unnormalized.')
-        
-        # ---- Concatenate
-        #
-        x_data = torch.cat([x_train, x_test], dim=0)
-        y_data = torch.cat([y_train, y_test])
-        print('Concatenated.')
-
-        # ---- Shuffle
-        #
-        if shuffle:
-            p              = torch.randperm(len(x_data))
-            x_data, y_data = x_data[p], y_data[p]
-            print('Shuffled.')     
-        
-        # ---- Rescale
-        #
-        n              = int(scale*len(x_data))
-        x_data, y_data = x_data[:n], y_data[:n]
-        print(f'rescaled ({scale}).') 
-
-        # ---- Split
-        #
-        n               = int(len(x_data)*train_prop)
-        x_train, x_test = x_data[:n], x_data[n:]
-        y_train, y_test = y_data[:n], y_data[n:]
-        print(f'splited ({train_prop}).') 
-
-        # ---- Hash
-        #
-        h = blake2b(digest_size=10)
-        for a in [x_train,x_test, y_train,y_test]:
-            h.update(a.numpy().tobytes())
-            
-        # ---- About and return
-        #
-        print('x_train shape is  : ', x_train.shape)
-        print('x_test  shape is  : ', x_test.shape)
-        print('y_train shape is  : ', y_train.shape)
-        print('y_test  shape is  : ', y_test.shape)
-        print('Blake2b digest is : ', h.hexdigest())
-        return  x_train,y_train, x_test,y_test
-                
-            
diff --git a/VAE.Lightning/modules/datagen/__init__.py b/VAE.Lightning/modules/datagen/__init__.py
deleted file mode 100644
index 74bb0f9..0000000
--- a/VAE.Lightning/modules/datagen/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from modules.datagen.MNIST         import MNIST
-from modules.datagen.DataGenerator import DataGenerator
-
-
diff --git a/VAE.Lightning/modules/layers/.gitkeep b/VAE.Lightning/modules/layers/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/VAE.Lightning/modules/layers/SamplingLayer.py b/VAE.Lightning/modules/layers/SamplingLayer.py
deleted file mode 100644
index 324ef34..0000000
--- a/VAE.Lightning/modules/layers/SamplingLayer.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023), based on https://www.researchgate.net/publication/304163568_Tutorial_on_Variational_Autoencoders
-#
-
-import torch
-import torch.nn as nn
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-class SamplingLayer(nn.Module):
-    '''A custom layer that receive (z_mean, z_var) and sample a z vector'''
-
-    def forward(self, inputs):
-        
-        z_mean, z_logvar = inputs
-        
-        batch_size = z_mean.size(0)
-        latent_dim = z_mean.size(1)
-
-        z_sigma    = torch.exp(0.5 * z_logvar)
-        
-        epsilon    = torch.randn(size=(batch_size, latent_dim)).to(device)  
-        
-        z          = z_mean + z_sigma * epsilon
-        
-        return z
\ No newline at end of file
diff --git a/VAE.Lightning/modules/layers/VariationalLossLayer.py b/VAE.Lightning/modules/layers/VariationalLossLayer.py
deleted file mode 100644
index 0d9115c..0000000
--- a/VAE.Lightning/modules/layers/VariationalLossLayer.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2020), based on https://www.researchgate.net/publication/304163568_Tutorial_on_Variational_Autoencoders
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-class VariationalLossLayer(nn.Module):
-   
-    def __init__(self, loss_weights=[3,7]):
-        super().__init__()
-        self.k1 = loss_weights[0]
-        self.k2 = loss_weights[1]
-
-
-    def forward(self, inputs):
-        
-        # ---- Retrieve inputs
-        #
-        x, z_mean, z_logvar, x_hat = inputs
-        
-        # ---- Compute : reconstruction loss
-        #
-        r_loss  = F.mse_loss(x_hat, x)* self.k1
-        
-        #
-        # ---- Compute : kl_loss
-        #
-        kl_loss =  - torch.mean(1 + z_logvar - torch.square(z_mean) - torch.exp(z_logvar))* self.k2
-
-        # ---- total loss
-        #
-        loss   = r_loss + kl_loss
-       
-        return r_loss, kl_loss, loss
-
-    
-    def get_config(self):
-        return {'loss_weights':[self.k1,self.k2]}
\ No newline at end of file
diff --git a/VAE.Lightning/modules/layers/__init__.py b/VAE.Lightning/modules/layers/__init__.py
deleted file mode 100644
index bf47016..0000000
--- a/VAE.Lightning/modules/layers/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.layers.SamplingLayer        import SamplingLayer
-from modules.layers.VariationalLossLayer import VariationalLossLayer
diff --git a/VAE.Lightning/modules/models/.gitkeep b/VAE.Lightning/modules/models/.gitkeep
deleted file mode 100644
index e69de29..0000000
diff --git a/VAE.Lightning/modules/models/Decoder.py b/VAE.Lightning/modules/models/Decoder.py
deleted file mode 100644
index aa512da..0000000
--- a/VAE.Lightning/modules/models/Decoder.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023)
-#
-
-import os
-import torch
-import numpy as np
-import torch.nn as nn
-
-class Decoder(nn.Module):
-    def __init__(self, latent_dim):
-        super().__init__()
-        self.linear=nn.Sequential(
-            nn.Linear(latent_dim, 16),
-            nn.BatchNorm1d(16),
-            nn.ReLU(),
-            
-            nn.Linear(16, 64*7*7),
-            nn.BatchNorm1d(64*7*7),
-            nn.ReLU()
-        )
-        
-        self.Deconvblock=nn.Sequential(
-            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2d(64),
-            nn.ReLU(),
-            
-            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1),
-            nn.BatchNorm2d(64),
-            nn.ReLU(),
-            
-            nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1),
-            nn.BatchNorm2d(32),
-            nn.ReLU(),
-            
-            nn.ConvTranspose2d(in_channels=32, out_channels=1,  kernel_size=3, stride=1, padding=1),
-            nn.Sigmoid()
-        )
-    
-
-    def forward(self, z):
-       x        = self.linear(z)
-       x        = x.reshape(-1,64,7,7)
-       x_hat    = self.Deconvblock(x)
-       return x_hat
-        
\ No newline at end of file
diff --git a/VAE.Lightning/modules/models/Encoder.py b/VAE.Lightning/modules/models/Encoder.py
deleted file mode 100644
index 1619160..0000000
--- a/VAE.Lightning/modules/models/Encoder.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023)
-#
-
-import os
-import torch
-import numpy as np
-import torch.nn as nn
-from modules.layers  import SamplingLayer
-
-class Encoder(nn.Module):
-    def __init__(self, latent_dim):
-        super().__init__()
-        self.Convblock=nn.Sequential(
-            nn.Conv2d(in_channels=1,  out_channels=32, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2d(32),
-            nn.LeakyReLU(0.2),
-            
-            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),
-            nn.BatchNorm2d(64),
-            nn.LeakyReLU(0.2),
-            
-            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1),
-            nn.BatchNorm2d(64),
-            nn.LeakyReLU(0.2),
-            
-            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2d(64),
-            nn.LeakyReLU(0.2),
-            
-            nn.Flatten(),
-
-            nn.Linear(64*7*7, 16),
-            nn.BatchNorm1d(16),
-            nn.LeakyReLU(0.2),
-        )
-
-        self.z_mean   = nn.Linear(16, latent_dim)
-        self.z_logvar = nn.Linear(16, latent_dim)
-        
-
-
-    def forward(self, x):
-       x        = self.Convblock(x)
-       z_mean   = self.z_mean(x)
-       z_logvar = self.z_logvar(x) 
-       z        = SamplingLayer()([z_mean, z_logvar]) 
-         
-       return z_mean, z_logvar, z 
diff --git a/VAE.Lightning/modules/models/VAE.py b/VAE.Lightning/modules/models/VAE.py
deleted file mode 100644
index 771e11c..0000000
--- a/VAE.Lightning/modules/models/VAE.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023)
-#
-
-import os
-import torch
-import numpy as np
-import torch.nn as nn
-import lightning.pytorch as pl
-
-from IPython.display import display,Markdown
-from modules.layers  import VariationalLossLayer
-
-
-class VAE(pl.LightningModule):
-    '''
-    A VAE model, built from given encoder and decoder
-    '''
-
-    version = '1.4'
-
-    def __init__(self, encoder=None, decoder=None, loss_weights=[1,.001], **kwargs):
-        '''
-        VAE instantiation with encoder, decoder and r_loss_factor
-        args :
-            encoder : Encoder model
-            decoder : Decoder model
-            loss_weights : Weight of the loss functions: reconstruction_loss and kl_loss
-            r_loss_factor : Proportion of reconstruction loss for global loss (0.3)
-        return:
-            None
-        '''
-        super(VAE, self).__init__(**kwargs)
-        self.encoder      = encoder
-        self.decoder      = decoder
-        self.loss_weights = loss_weights
-        print(f'Fidle VAE is ready :-)  loss_weights={list(self.loss_weights)}')
-       
-        
-    def forward(self, inputs):
-        '''
-        args:
-            inputs : Model inputs
-        return:
-            output : Output of the model 
-        '''
-        z_mean, z_logvar, z = self.encoder(inputs)
-        output              = self.decoder(z)
-        return output
-                    
-
-    def training_step(self, batch, batch_idx):
-        # training_step defines the train loop.
-        inputs, _           = batch
-        z_mean, z_logvar, z = self.encoder(inputs)
-        x_hat               = self.decoder(z)
-        
-        r_loss,kl_loss,loss = VariationalLossLayer(loss_weights=self.loss_weights)([inputs, z_mean,z_logvar,x_hat]) 
-
-        metrics = { "r_loss"     : r_loss, 
-                    "kl_loss"    : kl_loss,
-                    "vae_loss"   : loss
-                  }
-        
-        # logs metrics for each training_step
-        self.log_dict(metrics,
-                      on_step  = False,
-                      on_epoch = True, 
-                      prog_bar = True, 
-                      logger   = True
-                     ) 
-        
-        return loss
-
-
-    def configure_optimizers(self):
-        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
-        return optimizer
-        
-
-    
-    @classmethod
-    def about(cls):
-        '''Basic whoami method'''
-        display(Markdown('<br>**FIDLE 2023 - VAE**'))
-        print('Version              :', cls.version)
-        print('Lightning version    :', pl.__version__)
diff --git a/VAE.Lightning/modules/models/__init__.py b/VAE.Lightning/modules/models/__init__.py
deleted file mode 100644
index 336f990..0000000
--- a/VAE.Lightning/modules/models/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from modules.models.VAE     import VAE
-from modules.models.Encoder import Encoder
-from modules.models.Decoder import Decoder
\ No newline at end of file
diff --git a/VAE.Lightning/modules/progressbar.py b/VAE.Lightning/modules/progressbar.py
deleted file mode 100644
index fb25a07..0000000
--- a/VAE.Lightning/modules/progressbar.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2023 
-# ------------------------------------------------------------------
-# 2.0 version by Achille Mbogol Touye (EFELIA-MIAI/SIMAP¨), sep 2023
-
-from tqdm import tqdm as _tqdm
-from lightning.pytorch.callbacks import TQDMProgressBar
-
-# Créez un callback de barre de progression pour afficher les métriques d'entraînement
-class CustomTrainProgressBar(TQDMProgressBar):
-    def __init__(self):
-        super().__init__()
-        self._val_progress_bar     = _tqdm()
-        self._predict_progress_bar = _tqdm()
-        
-    def init_predict_tqdm(self):
-        bar=super().init_test_tqdm()
-        bar.set_description("Predicting")
-        return bar
-
-    def init_train_tqdm(self):
-        bar=super().init_train_tqdm()
-        bar.set_description("Training")
-        return bar    
-
-    @property
-    def val_progress_bar(self):
-        if self._val_progress_bar is None:
-            raise ValueError("The `_val_progress_bar` reference has not been set yet.")
-        return self._val_progress_bar
-
-    @property
-    def predict_progress_bar(self) -> _tqdm:
-        if self._predict_progress_bar is None:
-            raise TypeError(f"The `{self.__class__.__name__}._predict_progress_bar` reference has not been set yet.")
-        return self._predict_progress_bar    
-    
-
-    def on_validation_start(self, trainer, pl_module):
-        # Désactivez l'affichage de la barre de progression de validation
-        self.val_progress_bar.disable = True  
-
-    def on_predict_start(self, trainer, pl_module):
-        # Désactivez l'affichage de la barre de progression de validation
-        self.predict_progress_bar.disable = True 
\ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
index c375423..2737f21 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -37,8 +37,8 @@ RUN pip3 install --no-cache-dir --upgrade tensorboard tensorboardX jupyter ipywi
 RUN bin/rm /usr/local/share/jupyter/kernels/python3/logo*      
     
 # Change default logo and name kernels
-COPY images/env-fidle.png /usr/local/share/jupyter/kernels/python3/logo-64x64.png
-COPY images/env-fidle.svg /usr/local/share/jupyter/kernels/python3/logo-svg.svg
+COPY images/env-keras3.png /usr/local/share/jupyter/kernels/python3/logo-64x64.png
+COPY images/env-keras3.svg /usr/local/share/jupyter/kernels/python3/logo-svg.svg
 
 # Get Fidle datasets
 RUN mkdir /data && \
diff --git a/docker/images/env-fidle.png b/docker/images/env-fidle.png
deleted file mode 100644
index 5d7548194ce56bbbe9adb7d16bab5cf199051b2b..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 2262
zcmV;{2r2i8P)<h;3K|Lk000e1NJLTq002M$002M;1^@s6s%dfF00009a7bBm000G|
z000G|0d{khW&i*Ph)G02RCt{2TYqen<sJXDy}MG{qd>KiLPF8af<Gc%l(LvfQFQx5
zM=6Pkjl*lqqETo0hdB__HdbKxhv3k;K-SV}-55eeO<b|ia~ZZ}hDM`cme}ejgDPw3
zac%E<+%2D{eXsZ9{r$doT*5xdrT4z~Jn!>-zt8jie1AM|vm%j51lKUKuVvv0dlV23
zO?}dcD_@@xAFHsYfD3)VTb00xCg77M;4hWH=s?P~K*4=mY7z1KEI-Sf0!9XaBOYL1
zIq<D_=KjCEz`iAbcOwuOu{=3B%m{zkmggB$z{Cj9)d2iqIq=zj<#wlb1I?vCPlNRt
z#=vgj3H=H<)dIY}9O!BU0++Pg2QLA=jX-ND(BE!tmMIdR&{n|LUBJFd;7~0v@{REk
zeBS^ct^<x#0vG#CO;S$yoF!$*xp8S+yYaTy4fBGs0)_{G10G=SYM|e%*i1M6;|1O+
z2fQ92sGlE{gm-UmMpnK%de6UWD<<ClJ$##ukz=v~#z%mT2H=(Dz^5v9ZWjOA4;&~3
z`n35$ukcb^B0MyR@P$DO3V7QCbTk4JI_%sm$Payu!0{?ugeQ34S+N@KZOuuhp$7RK
zkS-{gf>GfK-Y?ktYMk)F&UZ1k_qVFTIMOGrKG1$Izf@;PcsWpb@&e(waVcE)t(9%3
z-wl?o0-ic$T%xMuK+zq#o0G}IL+7H`W`t*dNqAyx?~b@<#zI>Ga~A{an}Nr>fCbBS
zPa=wimB7P&KxH%FD%6~K;K-ZNYcs+V*~KNu{rRKOYwjj0R<^C`lh)J&b$!5(JccG9
zi!u*TGXUIHr`RNMlKC5(3=1z|_Aj3kijY8)kaLgn3b=JGu(=<&zX6yv*VF`}I9v&Q
zKOF$M<^n5s0T1^B3sxmHo$T(6ey7bEVRJd;)jXB({9{S2k`?es3sBn*%qz7tcogTn
zz+cOO6B}n(zn~QOSr>43OSBo8KbH7hEAO>DG?kc@Ul6^X{QP86n`8ydw@L6se7+m_
zTY}#&m>DaPq#U>3Wy`QROJ;VFT!54@6`@c<al@?bCn8pnSNf9+DA!a;OfHkm7zO0q
zurMXhjV+rwZo4z-_tY!ku6kX~VqF#yk|8WJxYGYsw3nP&rCtFm8h}Upfzq|A^?*du
z3(B}GA~H4>_g4*E{aA8!$`EUMOu$_V{Inf-s7tjOvpEUD&UV?rC2RI<4ApqVUo9jC
zR?Z*am((M@98jblO6RVbag!u0|0~bSw#!+h%@+)MCVP%YuWA2@W=3R`7jUk;H;$+%
zz~nzV5cb8h_q_Tohy;R)&*9eh?}}GmVpJ$0<J=%)cnNurKL_XC4<OLf8@(3^27yor
zS-CERFZ+}FbLA6j2~3QX0;`Nu0ONlqL4@N6OA+WjiSM3x2L40sh)f2N<#Hjrun^xp
z`78Jj9*Xv(*SOsDtZXeoY4ybzDVE0+q~ZlhFt=hg3hSOm=<Fb}oGyfi{|o=SA7K1w
zCmhpU;NQ6k!M}f)G?bon+PDH{6;9Rk^uWE?k`gBW`7wgMA7l2a3OH^m6Zf;{&Bu60
zr%;N!b_0SZj>!to{xm18klMHjY#abqX#_X9g+N6;@QAr{013`J?}78ya)iF<#b{kM
z0w4TY{6gUipM4sWJ;yNi>ejeEBvJRaKkBX}n14Tj=Y#KgfU{oUC-p!UfKM8MAFc&f
z?*iu7n^n>rLVexHUR;Lk`9+vG+=9S+Z$`5{k{nZLqcsHUFG%o&*nb{y6-<4KO_a@K
z5d89tV1L?v@bpYP&j83(b~AFWD?)C?3b<~uRy4_|H$=jtK8zpz2$y!hhR~pV5i=2v
zg(b*ezfSN$_8j9WbjB#a|IT3yzqkWopSd&tY_}UlFK<KM{mJ33YCokPxb^62<S_i=
zc1*+sCsx3%exc?uT>SO(riCZim$8Abxz$iVMFns>@`0RSEI5F#<xlV*w0`8xRUfK(
z41pedQeR@YX@?;tC@MfA7fz7MfV54Uc8KqxLH!4ia=5r@i{({U0_Ui-dlR7=2+^)M
z!SfHU6B(EtD@A2q+_V|vAIazYc7bL}FQG+@s)0~$#Z&@8Mz%}%p_&aT7ar4d)?*5)
z3YfKWh4OC|%D>5hTypN7H3**m%1}Qmu&e+zmtw@-kUP#lRGkugR|Ch>;sR9#Fx!&M
zAnyLc5?vA4l)X=po!xLPE5r3~{7!uMT?vU^%#Z?zIkg*-?&(>5#t9@(=JBb(b#Lqu
z^O)%F6=bJ24>f3}!yJSfj<sDiZs4k4+Pz1_3aJElCz;;rIJ(l|xi4(iY%?YJYl7%f
zEj&TnW)47CZ3Vh>fmm&G(0MQXM#Kh@Q6I)SI>m(2O>%C}FGR+u=2^&LH?%3g?WS7+
zTw&U@-YhLsprrua6(L4cBp$0<F5Pe~Q}<SeN@8?<OgXOxsQA1!#$KAByUu$+-D4T9
z3DPSR?R+WiDsU_+LD9?GMJ-SZ-1dwQire;UMy#gJ1<1bXRsxCcY<Y$FUgAB9{OYPW
za}<@K>M9hswTPnWXmhKsjL=d=$c`$GnZ6X3B{1$r>1==j&*Yb9MEp;p)*?R%mP=v`
zHNSuI8N=Rzt3@G1y}@Sp$-u3+tyNcz(DKn3iE+XEhYb-_s)e7wbGz;WsjWp(+8hHW
zwcWt&OL98@V70h*-m+8>6}Pa}n!^<N?J-mxw)Jd~Hp#?9)D4?kWmlI06{ELBeLX_x
zD`y8W(S1_buUU{wxT8pZMv=0zI0dB=NR+9J9%)V#+@a&82|1+YT!5xwXTBgEhUBlW
zGA+1-v>%KJTkk#@bz6EA!YwXwg-FjW9*4!^s-FzeRD!2Zi|<qnrt=Qxw2-c4%S;`4
k>Bay5dprZ?aP<J-f0-WYlPSqdwEzGB07*qoM6N<$g7U;jy#N3J

diff --git a/docker/images/env-fidle.svg b/docker/images/env-fidle.svg
deleted file mode 100644
index 9f3f0b9..0000000
--- a/docker/images/env-fidle.svg
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 125.8 125.8">
-  <defs>
-    <style>
-      .k {
-        fill: #fff;
-      }
-
-      .l {
-        fill: url(#h);
-      }
-
-      .m {
-        fill: none;
-      }
-
-      .n {
-        fill: #ee4c2c;
-      }
-
-      .o {
-        fill: url(#j);
-      }
-
-      .p {
-        clip-path: url(#i);
-      }
-
-      .q {
-        fill: #e12229;
-      }
-
-      .r {
-        clip-path: url(#g);
-      }
-    </style>
-    <clipPath id="g">
-      <polygon points="62.24 20.43 36.46 5.76 36.46 66.03 46.77 60.07 46.77 43.12 54.56 47.58 54.45 36.01 46.77 31.54 46.77 24.78 62.24 33.83 62.24 20.43" style="fill: none;"/>
-    </clipPath>
-    <linearGradient id="h" data-name="Dégradé sans nom 2" x1="5.98" y1="-5330.53" x2="69.23" y2="-5330.53" gradientTransform="translate(0 -5294.69) scale(1 -1)" gradientUnits="userSpaceOnUse">
-      <stop offset="0" stop-color="#ff6f00"/>
-      <stop offset="1" stop-color="#ffa800"/>
-    </linearGradient>
-    <clipPath id="i">
-      <polygon points="8.39 20.43 34.17 5.76 34.17 66.03 23.86 60.07 23.86 24.78 8.39 33.83 8.39 20.43" style="fill: none;"/>
-    </clipPath>
-    <linearGradient id="j" data-name="Dégradé sans nom 2" x1="5.64" x2="68.89" xlink:href="#h"/>
-  </defs>
-  <g id="a" data-name="Calque 1"/>
-  <g id="b" data-name="Calque 2">
-    <g id="c" data-name="Iconographie">
-      <g>
-        <rect width="125.8" height="125.8" style="fill: #fff;"/>
-        <g>
-          <g id="d" data-name="group">
-            <path id="e" data-name="Path" d="M110.22,22.14l-4.44,4.44c7.27,7.27,7.27,18.97,0,26.1-7.27,7.27-18.97,7.27-26.1,0-7.27-7.27-7.27-18.97,0-26.1l11.5-11.5,1.61-1.61V4.78l-17.36,17.36c-9.69,9.69-9.69,25.3,0,34.99,9.69,9.69,25.3,9.69,34.78,0,9.69-9.76,9.69-25.3,0-34.99Z" style="fill: #ee4c2c;"/>
-            <path id="f" data-name="Path-1" d="M104.77,17.83c0,1.78-1.45,3.23-3.23,3.23s-3.23-1.45-3.23-3.23,1.45-3.23,3.23-3.23,3.23,1.45,3.23,3.23Z" style="fill: #ee4c2c;"/>
-          </g>
-          <g>
-            <g style="clip-path: url(#g);">
-              <path d="M5.98,5.53h63.25v60.62H5.98V5.53Z" style="fill: url(#h);"/>
-            </g>
-            <g style="clip-path: url(#i);">
-              <path d="M5.64,5.53h63.25v60.62H5.64V5.53Z" style="fill: url(#j);"/>
-            </g>
-          </g>
-          <g>
-            <path d="M96.62,104.09c4.29-.4,7.17-1.94,9.48-3.18,1.58-.85,2.9-1.55,4.24-1.66,.76-2.49,1.18-5.09,1.18-7.73,0-8.79-4.27-16.92-11.53-21.81-3.37-2.27-11.13-5.29-21.33-3.6-7.49,1.24-15.26,6.91-18.3,9.45-1.83,1.53-23.6,22.17-32.68,18.06-6.14-2.78,4.83-12.91-.21-22.17-.14-.25-.5-.28-.67-.04-2.56,3.46-5.06,7.6-8.47,5.74-1.52-.83-2.89-3.49-3.95-5.17-.21-.34-.74-.17-.73,.23,.39,11.62,6.02,20.18,10.54,25.6,7.83,9.37,21.55,20.28,43.72,22.49,9.6,.96,32.37-2.07,41.47-18.44-.65,.26-1.37,.65-2.19,1.09-2.36,1.27-5.59,3-10.35,3.44-.32,.03-.63,.04-.95,.04-4.33,0-8.36-2.63-10.32-4.14-.15,.04-.3,.06-.46,.06-1.07,0-1.94-.87-1.94-1.94s.87-1.94,1.94-1.94,1.94,.87,1.94,1.94c0,.03,0,.06,0,.09,1.87,1.43,5.77,3.95,9.59,3.58Zm-29.02,8.57c-4,1.07-6.6,1.51-8.29,1.51-1.87,0-2.65-.53-3.02-1.33-.98-2.1,2.54-5.13,5.66-7.31,.52-.36,1.24-.23,1.61,.29,.36,.52,.24,1.24-.29,1.61-2.21,1.54-4.32,3.49-4.76,4.38,.61,.13,2.59,.22,8.5-1.37,.62-.17,1.25,.2,1.41,.82,.17,.62-.2,1.25-.81,1.41Zm26.71-28.1c2.28,0,4.12,1.88,4.27,4.24-.48-.92-1.43-1.56-2.55-1.56-1.59,0-2.88,1.29-2.88,2.88s1.29,2.88,2.88,2.88c.18,0,.36-.02,.53-.05-.66,.43-1.43,.7-2.26,.7-2.37,0-4.31-2.04-4.31-4.54,0-2.51,1.93-4.54,4.31-4.54Z" style="fill: #e12229;"/>
-            <g>
-              <path d="M85.39,60.57c-.55,0-.84-.41-.94-.54-.38-.54-.84-.73-1.69-.73-.25,0-.49,.02-.72,.03-.26,.02-.51,.03-.74,.03-.37,0-.92-.03-1.41-.3-1.6-.88-2.18-2.89-1.3-4.49,.58-1.06,1.69-1.71,2.9-1.71,.56,0,1.11,.14,1.59,.41,1.21,.67,3.53,5.61,3.29,6.54l-.19,.75-.79,.02Z" style="fill: #e12229;"/>
-              <path d="M81.49,53.85c.38,0,.76,.09,1.11,.29,1.02,.56,3.04,5.43,2.79,5.43-.02,0-.06-.04-.12-.12-.7-.97-1.61-1.15-2.5-1.15-.52,0-1.02,.06-1.46,.06-.36,0-.67-.04-.93-.18-1.11-.61-1.52-2.02-.91-3.13,.42-.76,1.21-1.19,2.02-1.19m0-2c-1.57,0-3.02,.85-3.77,2.23-1.15,2.08-.39,4.7,1.69,5.85,.7,.38,1.41,.43,1.89,.43,.25,0,.52-.02,.8-.03,.21-.01,.44-.03,.66-.03,.65,0,.74,.13,.87,.32,.57,.79,1.27,.96,1.75,.96h1.55l.4-1.52c.12-.46,.27-1.03-1.13-3.98-1.26-2.65-2.04-3.35-2.65-3.68-.64-.35-1.35-.54-2.08-.54h0Z" style="fill: #fff;"/>
-            </g>
-            <g>
-              <path d="M88.42,64.01c-.39,0-.9-.24-1.08-.9-.37-1.36,.54-7.02,3.38-8.69,.49-.29,1.04-.44,1.61-.44,1.2,0,2.32,.66,2.9,1.72,.43,.77,.53,1.67,.28,2.51-.25,.85-.81,1.55-1.58,1.98-.18,.1-.4,.21-.63,.33-1.14,.58-2.87,1.47-3.93,2.95-.33,.46-.73,.52-.95,.52h0Z" style="fill: #e12229;"/>
-              <path d="M92.32,54.99c.81,0,1.61,.44,2.03,1.21,.62,1.11,.21,2.52-.91,3.13-1.12,.61-3.47,1.58-4.89,3.58-.05,.07-.1,.1-.13,.1-.52,0,.12-6.15,2.8-7.72,.35-.2,.73-.3,1.11-.3m0-2c-.74,0-1.48,.2-2.12,.58-3.4,2-4.29,8.13-3.83,9.81,.31,1.13,1.25,1.63,2.05,1.63,.69,0,1.33-.34,1.76-.94,.92-1.28,2.45-2.07,3.57-2.64,.25-.13,.47-.24,.66-.35,1.01-.56,1.74-1.47,2.06-2.58,.32-1.11,.19-2.27-.37-3.28-.76-1.38-2.21-2.24-3.78-2.24h0Z" style="fill: #fff;"/>
-            </g>
-            <g>
-              <path d="M88.02,54.01c-.26,0-.75-.1-1.05-.75-.41-.89-1.01-4.47,.57-6.3,.48-.56,1.18-.88,1.92-.88,.61,0,1.2,.22,1.64,.63,1,.91,1.05,2.5,.1,3.55-.1,.11-.21,.22-.34,.36-.58,.6-1.46,1.52-1.82,2.61-.19,.58-.67,.79-1.03,.79h0Z" style="fill: #e12229;"/>
-              <path d="M89.46,47.08c.35,0,.7,.12,.97,.37,.6,.54,.62,1.5,.04,2.14-.58,.64-1.86,1.77-2.37,3.33-.02,.07-.05,.1-.08,.1-.33,0-1.07-3.84,.28-5.4,.31-.36,.74-.54,1.16-.54m0-2c-1.03,0-2,.45-2.68,1.23-1.88,2.18-1.28,6.15-.72,7.37,.57,1.24,1.64,1.34,1.96,1.34,.91,0,1.69-.58,1.98-1.48,.28-.87,1.04-1.66,1.59-2.23,.13-.14,.26-.27,.36-.38,.62-.69,.96-1.57,.94-2.48-.02-.95-.42-1.85-1.11-2.48-.63-.57-1.45-.89-2.31-.89h0Z" style="fill: #fff;"/>
-            </g>
-          </g>
-        </g>
-      </g>
-    </g>
-  </g>
-</svg>
\ No newline at end of file
diff --git a/docker/images/env-keras3.png b/docker/images/env-keras3.png
new file mode 100644
index 0000000000000000000000000000000000000000..ccf36706c866dcc5e073da920a367482283fcf08
GIT binary patch
literal 2979
zcmV;U3taSxP)<h;3K|Lk000e1NJLTq002M$002M;1^@s7-EKih00009a7bBm000LT
z000LT0ppr#{r~_ARY^oaRCt`#n|n}GS02Z|H-rQXH-u7@1%{@uqlM7{XBJ|2X1n##
z=|8qg^^exEKDOJ(4wcrKwJS>3cGS|^KNerDo!zY#J8u7|5=#HsjcIlNSR2_HakK$<
zN<}6xYM~*yghxnn_K)km$$gxA^MGPMGmN?CoO{mi{O)=Ee!n9_2qDnG0037RM*IVS
zVADp#?hf-M2qC10|3~~+;K#K~2}1}W!kYk4LkLJ{9E>Fo0KllpFj!d)04VSG@-hes
z^54Uct%=u=m{Hty4!56rCN8AFt%@o#{cSI~RZ&H5Ra6l|2vL=Rst19vyE9%w=)a(Y
zAJ;CG=t5B-QAuKfL`MGDED9(JIapbZ@_w(Rs}e=Y^`PiQdA}F=O^sl;Juha6zvVJ$
z5!kUoktS9k^i*wVOrLj>>GMwUdHgSXNu<A*UO{AROj3cMvRF8*NYnk4a>zso%mRz}
zY)s4))Gbd_LntR!aA(t2EIe}x06$J6L&KVq2#qPaKx<K;xa%BZTo5^q`g?uSVXW;K
zxm8g`hJLe}O#JJ-B0ps%$}#8{i8FcT17)JxIMA9ts`E{#fDl5i*=&jm<S!*YE4q-s
ze!W6U%j*Xu&)JTZbR86I+oq@>e96ssHxdB=M6P#>d6JQsxbP%0<M|o@z%UG=-}ixp
z#>Kp(KVx*@JA}U*gfS1uUa?AD0vd^<v+&SCWGCc_Yj@~uhvLQCYa0|pd-RRHxV^SP
z+6C@pJNyUUmCQuFIU$ra006sv3sq$KOW73<OVUIo#)2Gt%N^7f4J?Zo$EiyI00t|I
z;;wVl3St}wcH8qP@Aty;Onk6u?vM-5oWjV<J0vY9whlmhJ(&Ksm)w4C3%ONMMee@T
zs9GkaeIZrCm)yE#Qd&=vCD^nP{->6S&yj20)Ut|iK%0+pYE))muxTTrH*di5`T>|$
zuT#yp9{F67rm2A%LXc&%p@tCT*4AnY<bP@zqBn1#pzE4y;{Xu7c|)QZT>$_f@A1b0
z0FymEdPTv$^RZ@r&U1fIr0EI}a5{07VNgQ|9#C0p1&m971#ZvpBx%u+5%D((ahUqY
zhYIb?>KGs)4kQMQyu1U2@4baPn_oo9MVF}dg05?dcQW3VX^@#2FxlC<<s|z-*xLtF
z^&<d)(Or8``2IV(6->I^_~Dfuy5$Tki?SY%B3+^ZwKFgcU#&y2L?%Po6%PznHZB;g
z!Q3^gASdkY!(e4K1}m$RYJAa|Px!@uLwp02?>L&9L4H#sxRdQ^^;97LwMGOEAD5&h
zc9c2@!`{BQ#g%HKO^pD6z>(vMyn>e9k_}vU7HDnMg@+De^o_kq3s!jOpyYETC4ijJ
z*^ZPN1(X9XBLQZcc^6<#&I16yEYF(&B*bCt*<a6(0I4*t5Mcbm1&N!8_ILZOw{#m9
zcD}J^AH~F9-=hkUlyQ;_YK;Sc?Xyb&ct4NYNs0OiY6uZeLyLQQuykxpQvkKb6A87o
zD2`*A0thVb>4C{^$90>HS|<8^9{|Ai*?*{I^7bAEpqw3=0@$5S<kr+^HVyz&UwjGx
zU@YpZ9S}E4r#eq3F8}~U0|D5bPR%m0J41TaP|*}%VEuYrx7m=tZXL2Tv|w7HJUh_H
zURIa900P51cVemEPt7;4<NbeJ^L8;!eoPyC_E7+UF<AkwG7QEqT;MBfYtua~o1fkc
zCeJE97o9i?GBX2!w}?n+91DK`hT^?hTL%`r_$L5yKU|FC;H!0@q_Z8-n>S#7db6l0
z3A1bvi~P91l@LP6#HYtJ%iUjgLApeILm2+;6RLTpswF7?Qt3hx5C`Fu=dvV2{!GJH
z>!3Cc0O8&n7_6*D*xRQq0Khwb0e&FKkJ55tK`v?g008xbEeeq1sK>OYTh~Db@JDtn
zmlh3Qt@0h}I$)JK-vk1OkEd-fsA)?gK%zO8`^0KR;o-;Ds2UqI%KN=2x#*IlO}gB4
z0g664rdrXI=Q0K>t963`YVXMhsC+OgreS_lBLYW`<M!GHFjgz3J#-J39>K#Oz*ts_
zh>yNosxCyygg$8KnQO&e=R_X_RRAD;`yb%Hy;>)Xr6nl&_jySgbrzKMcwk`ZCmaCO
zd|@gVoj8i}elN=Vy(m0%5N09XlYs7C;NRa0?$l{Tda9N&pzd+eJ&+W<+00wFvfgHu
zm7=`gi-MNjX*Uj}Xq?7^9PVz~f^hGRWM2Wh?RgZu*-XvPXgdy_?ZEh1CrB_T3T1ZG
zA;(b<cKa5pPhi#rm<{G#fca+5{1bBp32_KEZB&oNropTo%I6vq;=na;NAxB?*RoTv
z#k9~=Ky+*@sW3^hnCy0#?Dmx2k#`(`A|MrNYeCrENextKp{an$o*wkCT&YzgB^K@5
zhhmCvNlr2y7sz>fGZ+h-a_50Gr$BCPEwXGj(Qp>Xvf0GQo;)uVxaRGMT>r25toLQJ
zDjvat3jTl<c6TDw)&hVx)&+Z+Mp}#D;wx8B`SsVL|18U9<9%p;KkQB?qB_1bEdnn*
zDRw^Fu~IVxsz&yTRWK|p0RT*Y@u{c`8X4^Zi5)lD?fBpF<+yIMrM+%Ucb*n+?U`1u
zLte{~l<QzDEb{;UPf-bBcc*65JR=3zJ35eCQv-lsFv9}_nN)}HH(!ar&1uZ%L=s2M
z%v>p8prHX*83w~Ucf$10Ls;tft6oA%maK}np(J>H(<SkJgNEZaBLxU^{fD+T^gjGB
z@67G!&~@n2$VqVCP5kXp{1U4S?$l|?g#bBWcW3<Gl_gKzDR_^x6fn@xfIDx$Et%`#
zfdO2%*)Y6wC#LTC^;5;Lutak8BJjeKnE3F3qMX@LkE{nDRHO^1L}B4$EP1dVIjh-b
zTH9d2=|q6?m49e!OQ*`XK+Eb-&~*)5^LFI597S|22;XuCicTDZ*-?ks=v`zl{v{$q
z!-)EZV0P4D=E^0E|M?|JJ*L&`6!-sB+tbQ4+3i@gZ(l|oN<`Q;lYwnL2>>u!@=$ig
zgUHY@VjKs?`T!<6yD++I4|1PajUq=K5S{|Sk08tI2NY2NEq<2PV2tBnU|Enz1dK5=
zbLPG@hGIbuldkia`lcI^p&^BD&{VYqS*4}OSy2zOqh8`>OO^X#1jK?I{QFxGI+xz=
zAz*gYq3Fc1q&zVKs=Gko@NwQR8I<}Zxs&Y}tgOc9u00tEDloch4+bl%;al#=xbpxo
z?dgVZxdXmhho(0;5!Dfpcw;Dc%_<nIR*8USM;#WO`6MkNBfcSwyz**Fj=glrZ)!yT
zYmJ&^RM!Ag^&>Dl>M-r;#^m|0l=)Su&$&sL8>74SWON5CRfI^2mJ+CIfL7d3>lRcZ
zu^@-hUCpXW%oKx_MdACcFt9A9JeM^idL<WKFqW07*F$^BSXL_jKaoTV4;|F)eBkhL
zac<8==K=mB<gQtT@y|QJecX=1*8SA7p|kY$Me`_}<ETf;MVBNh5b-I`=aVis?rhpB
z-m6R{h?g!xxFlyq9kPB=4DM8WTJ<Eo4c^(bHKSNvx)ikRhX2638P!pA;;3rG6-e<M
z5cc-r?xw9j=|W~;Sv>gBQ5aY&MqYU}<+vs7-m*Yi2ch{;1i;5P^IvO(WkUm|JeM*3
zO*g{cUWpvW($agoSaK)Cj81}??CAw9yJ0LVP3rb}Iv7|M#b2JIuKohEb`+j;x#8d6
zsyjsUn;Ky(D-mP6rm7_{JL)m%a;F`Zmt%QmeNxx|fjP1TVnGhv$#&}HcLU49SXMHt
zr$GbDqVWB9sEZwqpUfDF_=Yg)a$~~fo|WCiRJ8<_4GpktXh_NUk#k>IgY(JHE;>Bh
Z{2%(n>MmI!>(>AP002ovPDHLkV1hA4kh1^)

literal 0
HcmV?d00001

diff --git a/docker/images/env-keras3.svg b/docker/images/env-keras3.svg
new file mode 100644
index 0000000..c3e0158
--- /dev/null
+++ b/docker/images/env-keras3.svg
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg id="Calque_2" data-name="Calque 2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
+  <defs>
+    <style>
+      .cls-1 {
+        fill: #d00000;
+      }
+
+      .cls-1, .cls-2, .cls-3, .cls-4, .cls-5 {
+        stroke-width: 0px;
+      }
+
+      .cls-2 {
+        fill: none;
+      }
+
+      .cls-3 {
+        fill: #fff;
+      }
+
+      .cls-4 {
+        fill: #e12229;
+      }
+
+      .cls-5 {
+        fill: #ee4c2c;
+      }
+    </style>
+  </defs>
+  <g id="Mode_Isolation" data-name="Mode Isolation">
+    <g>
+      <rect class="cls-3" width="100" height="100"/>
+      <g id="group">
+        <path id="Path" class="cls-5" d="M84.64,15.79l-3.09,3.09c5.06,5.06,5.06,13.21,0,18.17-5.06,5.06-13.21,5.06-18.17,0-5.06-5.06-5.06-13.21,0-18.17l8.01-8.01,1.12-1.12V3.7l-12.08,12.08c-6.75,6.75-6.75,17.61,0,24.36,6.75,6.75,17.61,6.75,24.22,0,6.75-6.79,6.75-17.61,0-24.36Z"/>
+        <path id="Path-1" class="cls-5" d="M80.85,12.79c0,1.24-1.01,2.25-2.25,2.25s-2.25-1.01-2.25-2.25,1.01-2.25,2.25-2.25,2.25,1.01,2.25,2.25Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-2" d="M52.97,86.43c-4.89,1.33-6.52,1.26-7.02,1.15.37-.75,2.11-2.39,3.93-3.69.43-.31.54-.91.24-1.35-.3-.44-.89-.55-1.33-.24-2.58,1.83-5.48,4.39-4.67,6.16.31.67.95,1.12,2.5,1.12,1.4,0,3.55-.37,6.85-1.27.51-.14.81-.67.67-1.19-.13-.52-.66-.83-1.17-.69Z"/>
+          <g>
+            <path class="cls-4" d="M68.15,44.5c-.34,0-.63-.17-.87-.5-.3-.42-.64-.57-1.3-.57-.2,0-.4.01-.59.03-.22.01-.42.03-.62.03-.32,0-.79-.03-1.23-.27-1.36-.77-1.86-2.52-1.11-3.9.5-.92,1.46-1.5,2.49-1.5.48,0,.96.12,1.38.36,1.06.59,2.99,4.78,2.77,5.62l-.18.7-.74.02Z"/>
+            <path class="cls-3" d="M64.93,38.75c.31,0,.63.08.92.24.85.48,2.51,4.58,2.3,4.58-.02,0-.05-.03-.1-.11-.58-.82-1.33-.97-2.06-.97-.43,0-.84.05-1.21.05-.29,0-.56-.03-.77-.15-.92-.52-1.26-1.7-.75-2.64.35-.64,1-1.01,1.67-1.01M64.93,36.87c-1.38,0-2.66.76-3.32,1.99-.99,1.83-.33,4.15,1.48,5.16.62.35,1.26.39,1.68.39.21,0,.44-.01.68-.03.17-.01.35-.02.53-.02.41,0,.45.05.53.17.55.79,1.26.9,1.64.9h1.45l.38-1.41c.11-.43.24-.93-.94-3.48-1.06-2.29-1.74-2.9-2.27-3.2-.56-.32-1.2-.48-1.84-.48h0Z"/>
+          </g>
+          <path class="cls-4" d="M62.06,75.3c-.39-.47-.34-1.18.12-1.58.46-.4,1.16-.35,1.55.13,5.79,6.92,15.18,8.77,24.52,4.83.95-2.66,1.42-5.45,1.49-8.18,0-7.41-3.53-14.26-9.52-18.38-2.78-1.91-9.2-4.45-17.62-3.04-6.19,1.04-12.61,5.82-15.12,7.97-1.51,1.29-19.5,18.68-27,15.22-5.07-2.35,3.99-10.88-.17-18.68-.11-.21-.41-.23-.55-.04-2.12,2.91-4.18,6.41-7,4.84-1.26-.7-2.39-2.94-3.26-4.36-.18-.28-.61-.14-.6.19.32,9.8,4.97,17.01,8.71,21.57,6.47,7.9,17.8,17.09,36.12,18.95,18.88,1.75,28.93-4.73,33.3-13.21-2.84.96-5.67,1.44-8.4,1.44-6.45,0-12.34-2.63-16.56-7.67ZM53.46,88.31c-3.3.9-5.45,1.27-6.85,1.27-1.55,0-2.19-.45-2.5-1.12-.81-1.77,2.1-4.32,4.67-6.16.43-.3,1.03-.2,1.33.24.3.44.19,1.05-.24,1.35-1.83,1.3-3.56,2.94-3.93,3.69.5.11,2.14.18,7.02-1.15.51-.14,1.03.17,1.17.69.14.52-.16,1.05-.67,1.19Z"/>
+          <g>
+            <path class="cls-4" d="M70.65,47.4c-.36,0-.83-.21-1-.82-.32-1.15.43-5.99,2.83-7.43.42-.25.9-.39,1.39-.39,1.04,0,2,.58,2.5,1.51.75,1.38.25,3.13-1.11,3.9-.15.09-.33.18-.53.28-.93.49-2.34,1.22-3.2,2.45-.3.42-.68.49-.88.49h0Z"/>
+            <path class="cls-3" d="M73.88,39.71c.67,0,1.33.38,1.67,1.02.51.94.17,2.12-.75,2.64s-2.86,1.33-4.04,3.01c-.04.06-.08.09-.11.09-.43,0,.1-5.18,2.31-6.51.29-.17.6-.25.91-.25M73.88,37.83c-.66,0-1.31.18-1.88.52-2.91,1.74-3.65,7.04-3.25,8.48.25.9,1.01,1.5,1.9,1.5.65,0,1.25-.32,1.64-.89.73-1.04,1.97-1.69,2.87-2.16.21-.11.39-.21.55-.29,1.81-1.02,2.47-3.33,1.48-5.17-.67-1.23-1.94-2-3.32-2h0Z"/>
+          </g>
+          <g>
+            <path class="cls-4" d="M70.32,38.97c-.19,0-.68-.07-.96-.67-.34-.73-.85-3.85.48-5.42.42-.5,1.03-.78,1.67-.78.54,0,1.05.2,1.44.56.86.8.91,2.2.09,3.11-.08.09-.17.19-.28.3-.48.5-1.19,1.26-1.48,2.17-.17.54-.62.73-.96.73h0Z"/>
+            <path class="cls-3" d="M71.52,33.04c.29,0,.58.1.8.31.49.46.51,1.26.03,1.8s-1.54,1.5-1.95,2.81c-.02.06-.04.08-.06.08-.28,0-.88-3.23.23-4.55.25-.3.61-.45.96-.45M71.52,31.16c-.92,0-1.79.41-2.39,1.11-1.6,1.89-1.08,5.4-.61,6.42.52,1.13,1.52,1.22,1.81,1.22.85,0,1.58-.54,1.85-1.39.22-.7.83-1.34,1.27-1.81.11-.12.21-.23.3-.32,1.15-1.29,1.08-3.27-.15-4.42-.56-.52-1.3-.81-2.07-.81h0Z"/>
+          </g>
+        </g>
+        <g>
+          <ellipse class="cls-3" cx="75.51" cy="68.45" rx="3.52" ry="3.88"/>
+          <ellipse class="cls-4" cx="76.93" cy="69.31" rx="2.38" ry="2.42"/>
+        </g>
+      </g>
+      <g>
+        <path class="cls-3" d="M43.24,43.2s0,0,0,0H11.89s0,0,0,0V11.85s0,0,0,0h31.35s0,0,0,0v31.35h0Z"/>
+        <path class="cls-1" d="M42.72,42.68s0,0,0,0H12.41s0,0,0,0V12.37s0,0,0,0h30.31s0,0,0,0v30.31h0Z"/>
+        <path class="cls-3" d="M20.68,35.76s.01.05.03.07l.52.52s.05.03.07.03h1.78s.05-.01.07-.03l.52-.52s.03-.05.03-.07v-5.63s.01-.05.03-.07l2.26-2.15s.04-.01.05,0l5.7,8.44s.04.03.06.03h2.52s.05-.02.06-.04l.46-.88s0-.05,0-.07l-6.67-9.66s-.01-.05,0-.06l6.13-6.1s.03-.05.03-.07v-.11s0-.06-.02-.08l-.35-.81s-.04-.04-.06-.04h-2.49s-.05.01-.07.03l-7.62,7.64s-.03.01-.03-.01v-7.01s-.01-.06-.03-.07l-.51-.55s-.05-.03-.07-.03h-1.79s-.05.01-.07.03l-.52.56s-.03.05-.03.07v16.65h0Z"/>
+      </g>
+    </g>
+  </g>
+</svg>
\ No newline at end of file
diff --git a/fidle/about.yml b/fidle/about.yml
index 94c6ed9..3e184c0 100644
--- a/fidle/about.yml
+++ b/fidle/about.yml
@@ -13,7 +13,7 @@
 #
 # This file describes the notebooks used by the Fidle training.
 
-version:                  3.0.0
+version:                  3.0.2
 content:                  notebooks
 name:                     Notebooks Fidle
 description:              All notebooks used by the Fidle training
@@ -37,10 +37,10 @@ toc:
   Embedding.Keras3:       Sentiment analysis with word embedding, using Keras3/PyTorch
   RNN.Keras3:             Time series with Recurrent Neural Network (RNN), using Keras3/PyTorch
   Transformers.PyTorch:   Sentiment analysis with transformer, using PyTorch
-  AE.Keras2:              Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)
-  VAE.Keras2:             Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)
-  VAE.Lightning:          Generative network with Variational Autoencoder (VAE), using PyTorch Lightning
-  DCGAN.Lightning:        Generative Adversarial Networks (GANs), using Lightning
+#  AE.Keras2:              Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)
+#  VAE.Keras2:             Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)
+#  VAE.Lightning:          Generative network with Variational Autoencoder (VAE), using PyTorch Lightning
+#  DCGAN.Lightning:        Generative Adversarial Networks (GANs), using Lightning
   DDPM.PyTorch:           Diffusion Model (DDPM) using PyTorch
   Optimization.PyTorch:   Training optimization, using PyTorch
   DRL.PyTorch:            Deep Reinforcement Learning (DRL), using PyTorch
diff --git a/fidle/ci/default.yml b/fidle/ci/default.yml
index f8c8ee2..c8446a8 100644
--- a/fidle/ci/default.yml
+++ b/fidle/ci/default.yml
@@ -1,6 +1,6 @@
 campain:
   version: '1.0'
-  description: Automatically generated ci profile (21/01/24 17:21:08)
+  description: Automatically generated ci profile (23/01/24 10:53:30)
   directory: ./campains/default
   existing_notebook: 'remove    # remove|skip'
   report_template: 'fidle     # fidle|default'
@@ -184,122 +184,6 @@ TRANS1:
 TRANS2:
   notebook: Transformers.PyTorch/02-distilbert_colab.ipynb
 
-#
-# ------------ AE.Keras2
-#
-K2AE1:
-  notebook: AE.Keras2/01-Prepare-MNIST-dataset.ipynb
-  overrides:
-    prepared_dataset: default
-    scale: default
-    progress_verbosity: default
-K2AE2:
-  notebook: AE.Keras2/02-AE-with-MNIST.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    latent_dim: default
-    train_prop: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2AE3:
-  notebook: AE.Keras2/03-AE-with-MNIST-post.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    train_prop: default
-K2AE4:
-  notebook: AE.Keras2/04-ExtAE-with-MNIST.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    latent_dim: default
-    train_prop: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2AE5:
-  notebook: AE.Keras2/05-ExtAE-with-MNIST.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    latent_dim: default
-    train_prop: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-
-#
-# ------------ VAE.Keras2
-#
-K2VAE1:
-  notebook: VAE.Keras2/01-VAE-with-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2VAE2:
-  notebook: VAE.Keras2/02-VAE-with-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2VAE3:
-  notebook: VAE.Keras2/03-VAE-with-MNIST-post.ipynb
-  overrides:
-    scale: default
-    seed: default
-    models_dir: default
-
-#
-# ------------ VAE.Lightning
-#
-LVAE1:
-  notebook: VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-LVAE2:
-  notebook: VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-LVAE3:
-  notebook: VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb
-  overrides:
-    scale: default
-    seed: default
-    models_dir: default
-
-#
-# ------------ DCGAN.Lightning
-#
-LSHEEP3:
-  notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
-
 #
 # ------------ DDPM.PyTorch
 #
diff --git a/fidle/img/logo-YouTube.png b/fidle/img/logo-YouTube.png
new file mode 100644
index 0000000000000000000000000000000000000000..63cc69f99c93d39dd4051901dd380bfe79ed1ed5
GIT binary patch
literal 18600
zcmY&=2{_bW*!GzjYuN_L78*jxmOX2xQWVOBvM-S>LUu-qkuAwq_98}EOLo)0ED_T-
z_BBNICHwZB>3zTJyT0kFEBg7JbDs08_j5l*8(q1;e2DiD0D$?T9`-5#H1Mxrf`JbH
zNf&jU`W20n&Sf0{$`cOn+S0<Wh3xgNUIrjg6af4K0Jh<`@Y4YJ$^tNB1Atl@07pGD
zYK_(5KhW9fU%-L`>aU!JS4jX|Xu62iG4b!89}37YnM{{i8u2C)bL-4G*$~F)e&RKg
z2<Mm}&KIIy`tlcjstP|#-O7trOHZHfyh};HXeX+y{O!}=v;#%ub74|*8sYvUNsl)S
zCOt)YI(g#aJKw(!;il9#5{TVgeAD?|0kav~AzFf(7=@|&Meptn)kRFsvsZYa!k~>L
zaRV=4FBzu6t8G8IVK1Q-(M~>$=f-;_Yq^527F!`4c;n=egyLO`?3VU>JF^2;*^L!{
zdzvih<puDHgzN!SP7s@xDYDGFRBIKu34U6yG7YUFdjO5(kwq|?!iuWG?Z0NQ=>Y=+
z<|pZa+fcEEhvUr#q|fqeQ4IX#<uK|Ry*t1CA^H4#b?AY6i9*H>Z_5h_=EdKa?1L>P
z7q<@#tRos1o#+OP3OxVl%l|#WT(z#x{?}w5eslW?+s&O5oZF$dTu+3TzgQWWzk1~o
z8=Cr&L&$26hcMwUCP!{=B6wHE<vdYM2V{22BfF-T+Sd1T!k*Z)hVF+-;xDgUF{GpJ
z1!ITI`OUYMgfKwK9wX%@TPFejt6Pf`?Tf4*g6irxD0Qd6FtV&@W%f<O{(&O61I)xC
z`0{kH)Q<sRD)thj_Y}dukZfWpRaj4$?wJ;B{;1DMxSZ@jas^hUyMeUY2$IA}F%H7N
z4<mk0=`RO}M|{10ds<AgDgJCgusWL3*a@!V*pam07|2Sxn}`4R?2H^DXHsV46+#6~
zVg&K1x+#uxlv$89zcI@b;Iu~jZ~u>OAhp94@J%$H2)uoO9$}_i8n7ZaPvJ*AYp~QO
z@)$(G+g;p264>V=T~uenzD>;p3v`~vKt_)L-!YEs1LuIx^~E5VWzu{zn!qw)3q~HV
zghj($5HXoo{D?DB96RgkcdT-a{L?Rl-@irsbLo!JC@;ta`^j-(7~cDpK<c_pVu~2e
ziGNGpFaYA7YIsM`h_YeVy@LJdqlBM2Qmq>v%|uVVDC0O0z{y=qN<c{I1mtO9@)FrW
zDljXWjMk+tN@#O{huQ*GG)u~b<@t2c;_L;W+Aw_aWa^=R3&<w{qtQ~JPM7m=>;y)T
zv7O_Q<{J|Fe_yW62xzh4cTm-bwS{%{u?STz($y<$depPN4O^B_+_wiF7iv1R(8ga2
z5N3I#Y{oK`x#Z6d^7a-zAvB0LKjran;>R<t&3aY1?;PZ*e|wEiakT9F_H9{8M(a?q
zk6K7RasPxcS{*mvHMQz=njOnoOM_2&zpM|kZB(M$a^%FxhnLU@u7@?4KDT?R?gL&=
zgOs@w4MbTbIS-Zg_!mmkd8XxtY&_CSNj>(k9<9bfDvHG}VI5dsXk$fVr)Qs}w2tmS
zhLR<s<3MOJX$NDJN*|}mFE#t=2Fn%EB-Q_H#s!JnSf%Gf&HC|BU^Ai!{7TwElOuO@
zVWdLtAcxRgM*{C<P&mW*j-Q{rdZlD)To<07(-{i31MCXx>R}b9g+Q2*%~dl=wwUHD
zy0RZX_s_j72m=0DeosE{#k8HZx&?p2<S3NpZ&Y>Mm~bBNJx+Yx&p~XU@RQD4K?o^1
zvj<y;xugg^F2UGO4#-j8R<G+T?7~pz+Qh(H^>5uWAl7b^P}mXzNJr0F>_;&z)CGKu
zWL8v-&CAbs<Yc<?SRBKeA{a%z8nht*)wJ4Aa)t9AZ$3k={%5vyN%zOZzUy6XZFK=H
z7P_RbSyNuST2Ot?X(4h*JV3L#3~fwM0Go!icD9oK(y2FTZQ_dddS<Zr$hw2NW&dC~
z8A+&*k9?5Y9o9EJE(H96swS%{KssV8RZU3ar!y+%Ks+-5>d!lGy<d7lH5NTF04x~q
z?TO_r)f$I09+!ImdEs}uf#~UiZls+*qxQW6G5D0T7*t=@QGKSP=!d-@!2SuhhwJK#
zw&0v@T|!OCz;*p)z5J^`uoIpUtq~muRE2-b0YC{-HgOD-_qgto?md}~sns*MHJ)ob
zr5~b!p<MOisfTBCGgGP7aZU+g#IqAdt9uTM{`z3Wv!G0_d%nI~6se$&7>xiHR_CcV
z*+&EL`oO@pkRXmJF^SM`dw2?g?|;s)^ioZOg(}c+5wIU`3++FhY4ktYBP!YCCI*`q
zxkHAAqDfJdv;6)2{p=wA^xGiCdJGCKo7e<z`fA~jV-A53IeYE6WGVfq?X@!r&31}%
zOg2+pnXO1?h40&@A*!hzZ>I98>XZnB;ihCHfq<EgIBvoI)ojC?yy}8MyWQPwB6zNc
zQH39j92I7Qhce<PZP`kAN^#v}&}LQ)b<LyKW;Os+tK{sY!6SblzBm#H;(2EkS+${B
z1DpKZAwgE5>nK~5zOSKv$(Qo;>m8t`i&roL;DltOE;?htJfhL{({#H0kB^#r#jiJS
z-b5kOZ0$xcE1phHPOLIK)D7H4j)pL?qR;h;fX}i=%&;@}RJD724dve5A+PrCWy5!-
z0U^wxnjim}^xBng(%_{zcCO=IFaq*wY}|n<P83j=tz`tK!g%!M)UljiuOf@eGBYzZ
zFe}x$LwuPGUq@;1TW532B2y3j&-LL~Oy%Ma#PSLc+Z(=$Vw!)inDjYTu6&&KFxMx<
zTOOiv(>@1OiU|(TdRhT%LVxR|r)SzB3yEX{MLqkhsbfxdt$a#uDs<YF(II?Ue&KKf
zg~VSwo2vm#%xDTiC!pp>>wi~AamCjs-3CIBkkC4d1FQ8M2pUZq9qg5>IF7M}iz4g^
z%ksH}E9#4UDUL(8${4JXp6Sr(Ak*xLzsk+NjPvNh0LF%<T}+cb0f076kUg>K^j9`z
zMOJ*#@<GBW>&E(+REBf7T%%qZo1~(>S3mx^M?MJxIq~D1{#<Ov4FO~}Kr4z=iOoHp
zr*;WEh)eYNLuZOqsmc0fK)qHoBp`V)9Y#|U18gNd?d`|yKz`1KBqr+B)uP)v3#Zc!
zu3V+0pBH$X(o6%Wg40CEM%lu5Juw#IDnQEIta9VSPcbV#LLH+M!n`7VAu7zOsi{@e
zt5Yvzh7mlcJ}iHTlz3dq&yoqnhQ2m_AUK*&4bE$ifu6h+*2L+mtz_AU$v^kA7`~by
z@k0kNz7$HcgOiaD;k6gK*H|nBfSXKnftvUE<k7`*GR_{DME?h=wAB5mK{ulUu5oi=
zh^KAmwk966eWY7Ib8mgFFP!N}hl9K~h-Iw3kE!r!estt!fH_P;j9+qq(Vj4vRR|df
z$d_O1@kOQ6=a@+F{Mla&5U4w#-nyj-h`*&rATZ)R#kpb*U!F6?USV?9Drh;7nK<#k
zdslgM93D;KayYj4&`@7r%@yojokdez#50B^2bAs5Q}UGc|9kcc^;u4or$6^$kG#5C
z!k6?jejb(CJ8cf~95w}}{SiRjiXF=26O^*CPbb6en#-}K3DBa(cL&3#0>luHb0lwh
z2Are%KN1Hx&CO2Tzj|$LtpKV_Y<e#+YidIsh}IrR;+yL^sj^?88pCbX`_zSxv<n!`
zcE1@wjb236tt%pOh@zq(Sni*X(1xofLcgxUNpg;SK$!U%%a%HXeebI!hI|XYlqRXL
zLoxGQ1pF<YY{ti)bN#B&a<Iw1@bc<OYr9(m_vlEQIg+2258zu&3>on!VSJ~Qe;r00
zYc)sMWQJSrtR)X)Hx7<nzI<6ffQOm`asVv$o1b%Ahv4RfFXnEZ;ecWyFdG~mwwiFH
zZicGx=SD<m*=<kN=Pt~KUZ!!jqXqxt)N3yR{&wm<I6%_J8~!*;8q#6^_p8gplMWXg
zhU)|9i(J}`cH!}<(Uv3tuqKOVpELOKJDtX_<NeMkC4KIt4BGhlZA2H|y*j3i6-kA+
zH1@|3F1z*=*s$UQ@3o&K&Gp5iD0N0+XD>6M#FH;k1nAiOH^Ok1ZP%!tXhug}LZ(sS
zY%4!%@b2NIq-<clb*Ris*#4{mcn~{V%X%YaEm>&8Je^_b6>jilG$U{g-EDFo;5gOc
z;$`{%9JZiv=6H(Zv0nkhG-#e{TjiqQ1=b;fc+Yg-?F{}hahnT&ImNMo&IVU*W<lxe
z>-(smMG(L7{m<V-5Kk-Hx>_%cFgTL949=tQ#09QI58RLtrnGn_TdLzm_s^ei>l+$E
zl!aBw+P}A6Fi%muhD*3g5$&jG3Cc<>lXA@<#?yUeAaNwlgV6vhWqfCfqdTIEhZNXR
zVbxdZM3!{ze%;qa1JaKspi-?_f`4(4n)W3<XWwWSeX3^T0LSIr``<b#x%HMEgEzlk
zM5N3iguoRB6}O?z&Q8Oh-@P&vUArf+7_Vo<EFp{-NTVOMA+HKyCbgrraU=T&4e_6Y
zP&c<~8k=HT$FtlA!jMa`M9pHN{^5sb&w$kT2u$9RS(%Ma@ZR>C6Kb=Xknx6idcARj
z^{;yGPQZ5xWwO6&?=^A%+iJaa>5X?1rmv1Czev#Rk8ievSvWIk8Qfae&#9kMt>5r|
ze?Xqwv$$t@l$n{i%suEdJ{7k{`!vc^Go3s&!tnAw$eDDbJ*~MmSD8&4x)kAekUkEq
zH<3#uR4gkc<|a+G+Lt3`?mInTUy5W|3tuvxDq}sY_?_8NKE1DG_7;kz?`*4Amtx4^
z)uNxvDUKmnoEFWf3XMAJ+2&j506^M<YTq=KDztJ_Epxww_^ftnT_yhXvkSQk<PTSH
znT~cNSyP=b;gn(B7<pe+C153yTmqKf1D2vdVdII5V1zt*cSC6NVkAm2WczQ6oPA3$
zC+Qp*rnh;LoA(+l<q-FkvdLPO9tEb>)-Wd2(|!e~7)KMrXcn3TtAF#_thuy2$dmr7
zk!!Mc9L*??V4Gc7(BvW|2@e?+nW){)DLoP@|27x{gpxiREUPbG#WgUI=E55X>D}^(
zq3k5Fy4u<*7*wo@GYR~yw@W9zhpXmK0Tv~*2ZxWf){MNWZ2&o)HbeU#yfOwhow@J6
zSm|ni=kS2AE1TL8(*4SNg%0%aF)c10Yu)nH$hJ;5_|4Gc+jLbTS(8>URpvV72gtZJ
zunw;!W-D0JWp&c5$~~K9^ePafmo1@dSD+{Hqqm#}s?Hz=d19J9$hRu_h&PrC>;FDU
zkgf)8llbR%=vw0J)y3P-YVi;WH2e4}n14$oYb-4vl+4lzl$uERK6M{Zt}CfKk85Jg
z;c~J$R!TeoqqGB6*%=x^>*n<uZ0PjU_ARmBs@z9LYdr)=BRhSYBX!0S7}nmZy~^~{
zPVuZk`~xr%t8%wax$?>s>CZ`e<@$2D={*-vt}WSl<UTM``eojTLPK(}y8hNWn#PlY
zo3uqh>-l6C%XrRsIw?1P{55Tv#OO=icfSz~KC}QA^2SJ9MQ2_H##dOtD51@1M<t#{
zjXeBNlr?p#%6&&?pem{UGK~C^nO6jr1OHB^8ZU&{x=gn_LM<d0{P^=im%p_V-i{S=
z=``+g)XUx%2#X@(LwMdFu{@{G2k-8%gCzgk-*OBx4-MYqTbipR-F_t}z^TX~d^JJ4
z?wzSUhGZh4$fcdbYLDKsH>$hz>W8UW3J!Rx;>&IsX#txWD7Rbk>2$-=1j4S<!1|QQ
z+HLUeG5&JV%;`Tq+tsqoWw1a9lt|u=AW9=2GyW<1Dun5Y7tja{kWbIBj-w&z)Ym)f
z_4ngj1?d+W`BYZ)S|aj(3B56hZ>J$W=1;ux>ViV5fg`t04)eHB%K`bBqHBfwK$9uu
zrtQ6qz<068r6$Xs5VdrJu8C6-gf_v|-<@aLs^$+6OAYM}@2?veQKH-ivN$4D3a%E#
zj4y9qLaWMPUbTEnan!gCx0?=fMx((|OBx>^pgaf^l;lM@?|Fev+fuBq?#CAZX*oW$
zTE8Vc_~|nus_5sBAFG7yGkULnyb@!NJ_5QDM&EtEW!}9CkLot81gmW57|oOF>gohh
zOXa+$eqq6eSkmVk3+Dv%^RJGe6A#YgJA~=ii1ede1655?wy$|%ERVCe#^JfQrkx8c
z0&lCzX>(fTHVIx&nFvyA3yJp;fa&Y~EU@$kdQ7(s*YPU%FbmvKOuzRWJe$@_c&dN_
zAq+9zM@>U9QBVDljYr(kHHMCgP{iqu?gi7EH*Y3Ed2uDWVnS<!r8BP_@axX$e5L_7
z^4~usau|!0<)I9J%(#y2k%u1r;WBrW{|kVz7y4>!W3yQ16>G$(zyqfzO^MEIi3d;%
zuhsjqq2ml(U1!IL+uSbRzG>mXvdNb$tz6>{N=iz!Kw-=z2Dg?3C9-1x9-E70F{tk;
zc(y`JCQ4p_Qz@}p@KKvCVsVj%X;o5NK3)9~!tnfT&)w)ElbIyqwg%Q-I{L#S#Ac%V
zfFLYpnDs@Ic&<q#3jj8v`TDtu0tfj3vp9u)4ogeRCPZ0IR@X7y9{oU7sXY?^r9x`-
zi9WuoYh@R_I4>v4$(1;b`1<WzITL9c_^DFk`bQUy7Q0mx3%SKY`u*#EzIX0BXVF_H
zUCPclXkWxz_g00-69~?2^Umbe&{)qdBdy_z*I}g(@B8@rhOkZ^ah>_r14FDTJXnBz
z%hM@(v+_K3TOs55nD#xRa}#4ier6KMk!P54PL1}e15)vCI>}kgNc0N!$r=$&8dB6e
z$%!`Zy!h&ewFyO*{Oa!KxxUKNk@o(yc=FHX+YUuDWT(z&zNDI{ZFWGv`d!%iScp%+
zXVSSCeE0R1h%~@}p9%=?LfM#im$fVx&nzRN3W#3+l0@P@Yo2n)lBWTz^89idBemfr
zy&MayC5E1wpb!GGHjZ37ldTbS?>z2+l`LxD?(D2G8Vu$T+8@+*+(z?L@dF+x(Pd#Y
z<z<@)78(>AZvT4-K62MH5D<t0d^TqFcIDO8&-mu%=NlMF{8nmrvgxO;H-2J6e+!i|
zD-$Xqj<m8ld6gPxJG;AU43a`mK?y$EPV@JZC6C6iyv3I0Px{h4=axQXpw40us`{9W
zGPP!=x<1O2h`g(bU0q#qz>38))%$l>=79Rsu*U|X*D&l3Uj|G|t@f_JQ@YNx+SRfU
zWUbw3C7E;<jiwNlM}k+q30IJ+Fdibq1fYEzBGSl}e=I#dgg&{%wYipUkS>eZT=45y
zMFlTK#L~+lMiYdh5TBrVM*UT^#v<qUONqGWds<1K6+mIn#RC?H3`g@3M{WXvjw_~F
zG$_k2<P&l24=na*l#X7}*Y8k4Z~0|iUJU5D2rHkaL2D%5p6l~#A6~O+w#~i8@buF>
ztT5rOX9Q*`ia%|EyKP+5RDdbaTYl2_%Fq)BFnJ+L-#wVC=`Gk;hRSR|n(NF<2_%dF
z8wqm327BT`T*zN!8Hlwd6pC7SA|7`jlt-9i`C9!SpX@Xy8c1;k@shfLpo$pF@>IlJ
z)qDFdX>w$oy>5|-3(R$7tfc9EICQnK3OO%cn4QG!%?hn2UCJ*&R6lw3t&v>nqqa8|
zassIAXS-a`MNz{KmNc1);tJ|!=K#Wq|9~LIr4L$9lYt%hP0JmBZhn5g*~Lu{yUzoI
z7JT_OR9CVc6wgcqAw%$!EI@Cmw|n<}YaID3Jz|Q?NgvC7cG~?$KtA3noA!!E^0JU9
zIR85rZ1_4re~}xp1p}57J<lC<QTygWRaKSfQt?M4EA9PgW9Y#6IEI5)JFo2y>P)Kc
zZZNGOLP%BfJO;q1nm<f^=@SQO$Paaa|ARkFFF(3&(y6g(-?_8s72(EB2dg_rbKJ!T
z&`9@~@zIKR{gofJX@C-X&t!r{)XH3wK6;e>F}+Hc;HCVl8Is|I!YdFO%gP=YX$abQ
z#jF*j0S+T9Xh;3Ys|9|3e#*!+8lilmAYw@a%rXle(#L7lYzVf%NWu2j3Lhhl?9vk4
z?0%1b?t!eW<2h=Fw0V5DW>8|Fst2U;d^sWM4g|PACD5gVwp*2brp3#$M-p94D1U83
zba74G39XAoOsoE^P!NQ+qqw!xvUOL(32jp#IP3jUOy2REsL>wc_5)(chWZ_Sh?be)
zSUD2C5y4D~N=0>!=1$vN2F8?Prt6E#W>WWjm7ZHFkw$z`y-Yb^v=K^l7S?(tNKauV
zW>dDr1cZOjB}QoDW8Z$#ScGX9iGx(DcB%k%g8+>xla0~ootr5~9FRE@LkHK2e)162
zI+n(Qef~~=OApoz>uKbLIgm<MzlF5lLt@q&;s8_XR_V<fH_k7wpT#OTOKOnMki9a@
z<HvXPHM5&ajl|VQV1?W7mGNqIYgYq)`?9z9s>0Kt(A#e~EpQ32&TceN$Tkq2Lh11m
z`%CT1dsTOwcbhH*1?_mWdU>!XuA)aDd<jxjO1^Xq0tpWX#-dPK2dYccFGzB&vI3uI
zNTYDqqj*L<Jm~{s7c)_t!b^7?Q&5>;n0hJSJN48}rD?hl&FnBvk}6o|mfH9bjs0aY
zY(*fjw4s(xw0iwMa_Avyy`We@zVtdJ0Vz(=JNBz&c3Ykm?`VFMhxnrdRwrTQO`moe
zC+^lXW9-}(gwQp3M(`+-_AvgaHO)dbD(%bX>LM4rL)ZoV{HXv|lsH|v3|ci9hW(ue
zalgf34SLcvp4j9BVK&2fAK~PSQJNkz+{76rTti`HWu;T$0nEjlgfOCY26R;lFdPu(
z*%DdL#IG`g1-8Z2jp#Nf*Fx#mt+LAq8w_F;xjB&dTI6ZR<N~%#(0=<&65I?+2U5C*
zrfn+RwUqXJdDKbVxrqgs8Hc_iYI6MC|Fyj7Meoei#acGDHC^jFdwzE<sHeno3c>BV
zBs=^V)->E7I$9GD>h5K`Q>IVEvDm*>LD-4^ETlK0(1TN9BABM)hwTKtg8qTG?mHsj
z0&ig#Dt`Q}sqGnM9yNZaoDmCcXq5{5K1Km|Ns}Gnhox=QAqEmo`7VpEV(Z@dNS83w
zl9iP;aK4(EcBzzZ_G`8GRM%<$pyQapgginpCbdID^E-1*NzgGD8*v!X-e|gY>&d92
zw`>)w##Or5fjJ77_V`l%4QO)Cmqf!y4g}2JGPF5xL+6?kHfyL@77ZjN5p(2aL`ps{
z2WXL~JoS9##XHxYc>gVqRxbhe#Eha#W+}b`FqYr3?wGtA_PQnV(8K$vQI8)-*wK92
zuiKw%w36z6RKYBu?9uc3)~~C6p$9bD!lkN@GAhY*R)LJwTeoa>{aYd;C7ujDrB<Ez
z%ZPc8Y(_lK>07aMUwMsGt-SbO$$pQ68Ca5$)kx1%>+6e_=MM%(4<8ppieLiYAASD(
zd6U&-(7dIhQR%lL6Jq`iK4=3R$Y8%|!Hx@ZpSHMJme{P^rv2Zp83NC*3g@n25Ei~i
z$^sQV*C^UjEoonE&aE|0BN%2rct3#90R47MOG`@tU`J?F7e9ITUTSuaE@40dV)Z7=
zljF(-5_!bR8#itgta9mMfBN10Nse{6TNBP7IGxLi@bB=M#J+T4z6V<f7n;205Zz;A
zE*^Jgeio6Dy|$y+Gg*v)D99m(9NCjz`tLC5&#Th{CS*n+)EfYP^G_e2P$sVuuUg|2
z@2wWDcU;iSwzgeqmQB>lXuk<DVKBNMta)(uopP<i=O^rVt#0i-qN;#jc=Vyd8GTLu
z=SS#>^7P5asos1qO_|kyu6%_bap73A|JQv5>^r;U0HBZ)(mB+cO+H)*i$wvRKj;5}
z+E?WLzho7ziz3u*%QUHw4`NAuab&l@il7d4<Zbchoa_@oXts-IeXb~n9-U4}N#O}$
z#M4=YnwU~czgr!}bg{=e{J&~ro^!vE5`HhAGX$5>|6GokKPU>;Z1R4|*H_~`RSsIx
z)NWxa2BSKqmHm0M{-Yi$<81Cyy)~bo<;KY$M@%S0rY#sg?8_6Z9%>FehJA+3vY}+r
z_zUU+h8rh9FA6_nXfNzJ{|-M6U{PnrWq;N;5fm~waN`N}Vu&)E5BF8@eTexciM7~f
zhzhG;&xaXRdsJ#g#s{@d*t6Q*)W)tYn1*`P8;MV#2R}8TDx_R*Bxg-|+t}=C;ooUC
zWWSd4(Yp%8aQQhMwOadfnsjnk{M;H`=K4o}wj1R=?Amu4w+fhTeAv$TS-zLx0D)P4
zuRqwoBSO#fs(&D^+3pyf;Rm`N-)r@qY{nXuQm(n=IwK&uxMoDT4PIVa<PgpJaA(_R
z&RW?=?+ILKEnQ2{f92xnLS6VrRws4w?;Fkkko6L9kCJ8}nlM}}7h<4I88i~-B!2JI
zARRx9YEa)=S?TWWHG@9FthDCeLXydsh7g0Uha-#ooCnq)NVv7Ooz}%EZ$33MyEE<2
z-<Jw(l-2WAC=3=pXlm9o2x0{~(_|5B7yk#H_m@qj)I}l8QjVA)ZH^OV`)nPNq}U?+
z6PQo~%?Io2>v;qpUUlYl2>-97JDL}}<Zx5C>1<xo<B8i9c3thslFI)XutOEkLntbv
zdaPMF50Uay<ptJGuJmQQoJvq8q$bn^X&9hxZ{l3jK9*S4-Fl`js7?+VYISe2UCMu|
z3tI=fPPOzvom||4#Y159fus4&-9v}*opeMWtTj^TgKLB{h!SMk-JK!{CjjrQa$?t~
z86m`yk@imC&g@pb(7l^dT!JN6F=Ngjqg-BC^Km7{6-eY0PeBQxw-#{YGA8A{gh24}
z$Dbk~$}O>fMeuz?r*_D*C!Z%oiUq8EqR&r!IUSyAg``Fy_4MUJ*5yJ|PYMxNBV@&d
z6&c#t>F(~<Mey@#uVpRH%?+C6ukc+4n}d4Fs%QR6rKNvMClq>Am+Tb$kuRLF72e%A
zZIBFW+hdv@p>??~atLh&@f2E%yB7ocNHDPQs@xU*HS?>nkf`$)nMV2kUH%Yw)ck7>
zPrTllo*X9%?8?)iH;khzR;q;+J}>O`D%-clhaASGu{u3C|98y*8!wCVL@%F4lyw>;
zwLBxyfNw_p!Lpt|E|XR!NdZ%G0uTzdWLpKF_;`75NU&!5^v>iHD-8sM(97<zIYijc
z6H3(kGVvyo(53}b(R1J?^HKXbhHTuA%SSA5ep{Qt$`llg&Uv;M^YL1{2pA;NG@dWp
zAcdat^6?!-iElLs_V_xP3cS=20aa@8s}@JzD-VqxUlG0hY+_18=R8b%=eaInu@My?
zCc4f!>@p~OJ^1izV{?;fPRZm|G}QmNPkm0Pj+B3SxFFddd!b%HqQ<SS!X0F>j*exs
zNFanTF`ybZI59@YYP|8}gnqF8SF@CN14}2{j>9r~|8ze}8+V}}MKVJQ2_&Qj$HqiE
zg5wyNlquN#!+=OL6cWW0C1-UNZDdF<N3aEd+I3qu7UU@w!?3Q7Yi6jEj$fM0>6uE!
z3w)d!HW_vy5Q6V4&C6Om@Di44D#zAO1%!%@#Y7Te&SXq6$Y2QGs3Uok5sN{B;~!|W
z&1j%!oP(axOGjaoJ^E9gi@T&i!t>p2R`j2>fTInOWDRO#Z1>KmNHqkoc~o@2u4LAU
z6<i~aMstC&d>xZ>t;gsG10_AVqu$FSjCLf(C-)WvdU+FCBg|s_o1t-87ypVF`CFSE
z8g@+f88D*g!sbUKF<bx$9}^BLE4m+)H8tg3?NE4yLkbAUFy~41)gGz-{p>FL=vog?
z!jc5V*wPZP?$omuACdtP0iTP0uY?&<E=DF?ei;M^Z8ZOme6(Kxq#G0<w3CYNL-3`o
z3o*J<E5v)-yPRe)G$_CqqI}W^S`QnHEtIhnI^8cs`3j8%kx=W<#*n8G=kD$vJ+}LC
zVPPR6wvlOD1tycH{oOL4_MihN5?4$Qj!1GJ76oUE%%r4aTXmb3bWCA=k%Sxm{YhL5
zFvV-ldCNi_zf)iutYd20qXKlTx|IYH_(HBIC@5^Ez}R%n`Y;FeUM(^+NzF%5PMOUu
zVSk^_%$h7%h+%3Tn~qb9Tw<4t4CcJGx(TIcsxrHhkM(;mwU-D57CNJ3iT796eC@GY
zZZ;?MLMGybb=}`*H)yFEetdQY=~0h%cBfsaiCQKU)Yi#d_uf+H*yV1?A&v)}+zjef
z@)%T!w}lQc`OmNw>_>gPv>uq{q=qnx9re0xgTy;8y+Mq2oe59j8ja+`X9=`CV8n|v
z`XWgcQd;dJU-Um597RXs32mk=)@G+Vc&{V1ajnm!{H|R~y0aKjAc}B+uPDn3Vo{mo
z9R02z&dkTnDhe>Kii>CEK+xG6yQUR=Zm$H%p;m{ef%;)Ofm;x1V}9K(%~5$fwznb3
zZz2*Dy8L%GGtTz}?~@*Ddg(eQl|Le*=}!M%<T_OA8wAvOzW+M-@M^w-9#2*$^EHb@
z*VyjQioaT<Ft}wED+*c?1Q)m7xucg2eqO|~bA7q2t!+}bK8I-Bx=gAHKeHh2ckn9k
zYUxIE)Y%&z){!y<h9IzcfcX!0d9a#yEqnZt+F16FyRR4pJ`i2Yj8Gi9Y?P-vi}bh1
zbR~5xz3*a{3k}!QKIn2!A*Nxt)ZgtbBPy+2T}qt_c)<gSItQQZ<i;BsG{`Zu87es;
z`ewendHR1nWEvSVLe)IS9+LXs#nN*CNTA3#0=gd-efdZ3Sd+ZgdXzPLc`}~Rwz2ct
z6y?a>-7vhWt|?3`4Tsm;`8?dU1aU<L;s1-VC1M8B2Fg_&Cf6!^&c>$RZ!$^th7#Rs
zTU8%l<&D-;ZwtGy(<}f?S|6PoO}U9TucQLp7SaSA186J}$0ZlfLOqUS1Q<+Mc2Y|a
zVDjV9_RQzDX1Tzh0n?OkznZBY=h(4WS~<km1PIHHBFfGh4X#1_`LEZ`?#ifE;~*NY
z=W9u>_@en;gUr{xYJxk39@BY!kB*8Nt%+pQ!y;H^ieG6znh}psiLd0*4&rWyXgFB%
zj%Ha0OM*evE%)_SzW&OB0Hkg$*wFj<*4H)Q`az9%xYm9m6&qMT$YE)gh4|*Kw3xN}
zu2zrA2abR2g>hYAl1#n^{SwmkIPe>0>{VLoyn}4uUxw{7raa?yBZfzRTE>@~)p4*5
zy#B^SA|MvIc9i~Wr>v2P#<p|7y@SaPp2M&-XAm{as`r6Zl>tO->6o2nDwQB&0=Y;1
z%4p;Bw*IhQ&MJ2wDBV0}CyywLib};yzU#r42GdrLv$Wig3?Y|Jwab`SynK2oU*Je$
zLbs=!;KXehGO(Iz(vOlWX7<jd=KXY-r5dN6Xy2w7G>KtSm6YpoR@lTufBX7rD^EZ7
z)uF*4nI*#mll^T&Sj7^e^-d|5z4!6)sVDt-3Y&$_&M<xaZ+>ek{KplPbYogv7_nez
zO#rldbtBDiFPekc@E!jiB)}w{{!Tq2`1fhjMKOe-R;Ry_;@Tl22kv1H=SmB>Eox(w
zr_9MPKvipi&ZDWJ{)?co@BA$Wm)hJn#9BfWWLprNr&Z{|!`8hGl&K|c{mLPGozPzz
z<$1ZeW8a2{hXue3m%_D!yReLoxDfU4akx8|kqSo=j}tg*)z+dVhhWQ@>`3)P6c))T
zDQc=7r=FF1!4(0gJv$Xi%dMyctu89ia7*39esXM+${)dq0&)5OPh+pFUk?Ap5B3Dv
zQ*ips3nf>J-aftX5UvQ1`RIOgOt^glZAl|<Z)4`NkZ_zq<mfT@=>V@wA65|BenDp{
zAaI1*>K+pV_vD!zEOLzm#>2W)wUs*jA788&YkUsd{O^p|{x3Pi89KZ-Oemg8cRtS{
zU(#UBr1^gImx2G-e%|_c6GzP*F&N%(m3Heo3m_L6fpjwlenYgot6Hc#4bA>Wr}|ph
z>Yu~TZOgb9>*UMwlaS_97I`6pDxpm&1Sg=sdCfCL3K4&XrTXvHe}=&}4bw1&QOA$X
zqbp(5%Mrc|mL8@eSNmN_j;V;nFs-GP4k|9mCarI<V1W}HJN|DI=_=O#pZ)6mP$S!1
zFkRC2Qyz!5{opZS?m8~%T<-@+F8fCUph)0Ghl4uCcdbK`mvhQSKNZ67b0+q9oqEK8
z(pkJnU5q&U@F*TSm#n}?<vC^W3~VsiyH@w;P6T9`+CdV)ZjzqJCtpiCJ8qLTi;fD+
zMur_LAeOAnhVyxV(7!KM3=W`D8TkwDo;&}+8uR@l)hD<5T8>LLMC*z3(Za4+KsEG;
zcNdZHG2g%INUh!DOO#UN85NV%vvC@E)AMKebV=MCFeXd|$aPf2rs&|J){bbrj*ci3
z0o6=G%p+7)v}S%i#y-T!IKYjx8&{BpZU#Fc44>Wr+&ajdPr3e@863f5`L-RCcyBVT
zbUkF%Ju<Mz0%;tSjcg0#CyWMUEz@XAaq)R52eLG;${|S7r3(HxtDcH&K0i%wR{eN6
zM|`Z|ajyz9*Z+E_Vwj_R%v?(LUfz6hhk<lH@;*zU2{LqrW3<DXPRZR)UHs9o9RpOy
zmNbI6go)y<Kpt)KYGfS~f&*xZ0wr0_x7Ms$7W*?zcTd>s7_W=!YP)67J<@<84t6l_
zcK$Iw4O>oSyeq^T3YqnU3DplYV7`Zl`<EYNceZFzP{W5|@1Od5d6ReFe0U<y(7=HF
zLIWt>8Rn1qrTQ(5LAz+=;9A4LyThC7@5Ao|qo&8`dF_z+UX}bA4z#8Xyuk*tQu+eI
zNFw=O)Uv9&%g;g@jEbrcKcHE9n?aM--tn?4K)|!dlGL<&cu6!!_uA0dMMDEXqle?M
zhcRNYGEUJbG7|>HI)jwD+}z_&1Y1uvZhjxg3gP;nVBfd-Di8*opCk8@&ZMewZ*s>p
z-&;`-#;mNFe|M;5($LWOSld%f{6<NKj2e#5G};+5jWl4BQi?y=IN+v-o1H#Zka}i0
zXwEibY<s@_R^`9jeZf187%jQ^-h*5waKF4)smC|&h<c(PwvirK2ha9hmkzr^v;H72
z6#K~sne$<4JN*i98#6zTo!F^--8Zp+;p?7Y{?*&hzeQF!o}e<n9w`me|8dWng3oR4
z{jya<kiMLTEjLl#;e#>`0QFSv8GwlqjDZC4{~nDVy%_jDE^=ehj}`o|R-(ROWrQZ_
z%+W+!G0;CS;D?f}x)JMW&JQyNPTM<-qjF3?7xPbi*8nEIPd~R8Noc7<LkKI)w5etg
zzP!%+{IN1o7pO1_q0x)sZxd+nWABnUG<!-vc<yztN<m>|{y7eWLz-@|+Ehf}PrY=)
zItaXd_wEFYK-cp!CVE+Ho)bNeLdHc^OdAV%75;gzqoOJ(>-{ON8280LfBH1x#m;}s
z)JuhtJx@L~?xo*t7nlB@6iPlYIMv$2wlcq%>S#_)zP1)^sONZpzB;-G@Y_$TPP!s<
z-oN)_!cSN&2kvYTx7QDXtZI^ov$cfQaZk&mRql&LA}sg*o<L(Ail+k3Y(HVxV?T68
zoqW?&oHHwC@rVT`FR(dCs6js{DCk?&{4vnNCvjG|T_3Uij_y16$K$`F=rI@wsDpwu
zDLgTL+DZmfnm+U9_lAWkQSJMbr~PEl)9@#BL1LUz9TGs#e6K5B8axlkWHZj(hAj(}
ztSUMO)|kHrdN1QNK6$#0jQ%-?oydj-?PqYFPr8K=XhtM{W>shuZO8e#k=9gc{5M4K
zpezLv^(ZHY51ezND!+YtOfXp4IY;8`@$Fe+f)Y0@H&d^t%X-0gesl3M%pJFbgP);F
z$lTO=$N%63uci!p^n*K$0GH`d=8}K=M?pjTj~G~Nra1B#f><9GfR;;v<&2kRtLSvD
zzZa<RL*gxMq6y3zd4BbFcc(&?zMtxl??c(l!@}>9v5Jm~VoS#Zm*}Yguk+y&Q~#kA
z9|JSjm63$UBaHYQG3TcSqDe3lCooLYiHW=j7wm$xOYSU`jqigiNyhb83tJX$GRGou
zT@U*rW6I8?cEnBZNu+i(JX(?35dG!%m`vXMqtkQpd1q~q7*G*NAk2Kbfp}`0gP34$
zs^3x#TIGybyGyKAB~QpzNX0OM{+;~cY1NaM7Xh4wzqmi{SjQzcS)ate&UFo_AosX@
z)ldio5Sn^C<$D(EHi1r7R92ocP?CkPet*Jz6;w^|MV`C=2yXCt=~#;Z-_5rhs^46z
z84NC=jk0D>*>Bz`ynd%NIg%*bJhe$PgtGA)iT*YsSs?h5nuQp>alUhB#QiRWzj{O6
z{K#1v7Xjn7wY!#n0T0+g9QCwapG5_b74CJknlkpv1gG8^iOe68%$E95+x*!%Vz&oZ
zvuN<(geNyZQ?ik|+B*S=Hr~Mo%TRUG28735i$W^nAzs3x)OnNE?@%fG47uWU-WX;n
zL!@x#HuPvNYrxDlZx^BPHQ`Y8CDb}(xSoh5%&;y7z18YqS_pE31ccuo^)L6^WFG2B
zm02YIS`TE(7K~bE&uvIsxO^rq3NO{M{rHdkC%bZDa7{w%xXi~d7MCwTquu)cq5y&(
zYWO0Z>*ujbbCL*>j4jsWj{nGuJq_9H)4RyY)}C!uy5hHQ+Xn`0H#u$XV}_eo)qhM)
zeERd_mACVykeH$w^8y*#9^X{hz=P!S!(R+&oPGWL#P)hUVc$n?y1j6wc;^!ES$YP!
zc@^gP_uO>-WYj7Tx5bIGIKww9qO|z(l3dJ+PqK1N^TgJ1Oz6F_iMt-hLrT)5wIN57
zDJy9W@<Or?+1wwq^ZC8Hm5B@H()7q<&zjnLB5O+7q8k?Lo|i<F#il|kYGNukvR|;(
zODTr%dCRM-@OO)Rx&iO4RPg#9D|iui3YQ7F(_*-wI!L`i<|LciY(kWy*@wrjyqA$O
zd<!<|QKQ$?|D&K2T0?H?U~vZ@eD+M%@+GWI6ZnX&J0XEwS#G0SJN=F9;MnLTNV(HW
zk)u%{9L0+<FYGG}REb@8wJ(_s{b^&VbkE0Ug1CFOlCt#YIK{&amvVboAUF*?LE5TA
zP9n|_Kn%-~M*WuRFLLtiUayB*X4aw6@q$?=^X?eRVAfQCAvdvIl`?EInI8Hm+`Gh^
zM1z0Icfd~v@zr0Bk$Af0uNH2e8(u)L_}99$l38jL@W*K?-Fc>|1<^ehnK(#eC`GBn
zn_8*Vl(+_)r4ytIf3M#n!*GZX8~gTVreSsv_EP;c2NN0zyxhd&n0d;e6-<{=BncEw
z)eXi#ia_Z>bbF=7wD-b8m?|9U@Hb~S&KzX}YB^X;Im4|fM0FnTz{UKli}aqcACCS<
zAJRM5&+GqfZ{j}fVd9E>>XlbAh$X}Nng5DSB;LFUxBCKT1z@z!fCzNf)ZS4%cY+6H
zasGXOmE}7cJ4m9?!D?EgaI?4F+$?5v>-TN)<g=zMjhj0e@nqh@mVTJ2-VS2~p=jZ}
zz^}~G2)&51yOZt8w$8_XT|#+Vsby-0O*~JWWsdv?Az@=qNO3QCW8D<`vXvgvIM{J}
zCwtele!>=4<uOHBCQ`-grqZ(!-?<VmAPjyduAM{ScVPFBkUB^0Za|Yc=^|`g`4$ok
z-?I)h^caOmdq7G+Jue^J{Lv2vnEi+E7nyf9!=3y!*o}|*>)_cxYDfI#OFCmpS^ScD
z=6wK-%&}~wXYB_Gb=Z_vHVJwG@sQ*jDyrCtGqBS==UHq!u+B`}KD{LAYV!I`xcsiW
zd^-R7BSA>LayIY&F<Kqg6o*dSfowZdiPbPBb7<!#kU>HxTgN3Kf=SpcAA-m~Cy9@a
zs8DMrweR_oFCB7g9MD_wx%Ml6!`Z<+xVIW8l1B)-e>~?yh19IT$8##wCmpoN3wsUx
zT}KmdGN&G-hPo*{?`&kWabnKETtHO&7Phul%_~9981UTlj6X#YMgMsnH;}S6o?CMD
z+xPEx3GM^$5u3;G5djLQM7G4R*_j!a_wGC6x-~~l*cHh!ZS^;bVy6NUHs9R_O-eqO
zv!NQ49$RS2R`ve9iN!461V5OLrR;1x2W&YEqvlpIkil*Ut;h_PMdsXDBBnSQND3=~
z4n~<}eFWmjm6Dc=e9T-83<tFyM8eNcU(cayM3OJ<8B)9P^xJ)qOLJB&vyp@34X*q?
z!gY#bg><KR0+~^YX&1jh*x44c-QY;cAs|g3m6s+<Pim-g_k%o(JMi}gIsNpM#zoi!
z@Ww)R7o^|_(s~Nwc@#~ej!{d3uGaYx*QMIVlMwp<3DIL0@dN3k&tF`fo&DQ?HdtZr
zLc;buBj<*)Z6^oln()=b8i#11%3wG<uDKf~iaD^Y<_?p)H`lubRj?z)aZmJlCrh=@
z9f{;i<|%9EaC<(?^=%_lDqXh(6FOqRmEwoquqeL)IfFI>>l(!EYDe?Zq8Tn=kMpf1
zWPs5<AWv+Mk*IaKeZTMJRDizwK&TmR@o1uA)|5M2B3(xXi!miYoRu|D5MRwOu%|dG
zj^4uHWw>I_!3Li@rh-`;Npk4%<#-pEVY22&g`=CQ7FD`9s@#tP5$2)s(zw>~u&gVu
znjmu|)H+?*Sx9c6$`G}*Q8K%)%JgM><RI<u!2#+7Og-{k@_&*<E^|s_Rye@HA)B{j
zM_-$R(8<~bQK&rEajg}vPZ9eGEB-?qh%Y$GjR4)<&|`{KAcv9Uh`DQia9)$|`r60U
z`g<M3BJ?rjs2f_94{W!$h?qEMY0x87#F-p%6zlDf0K}%%Wa`P&CTm*y`A_<*GHI8@
zz;oj)i$H$7Fv3BiNm-gJW^coY9n^u?F~<v7^xMbBb8_$UJ5{<5959bo@SglG0tC4d
zS$mdZg1fykT7x~@G_Kjlog#WaJZpPREz`>KVmF!+^9Hur#{lnJEpN+>5k#!Ag*D>q
zjV)ZUK*z1hj_rf+ciH`af!UYUG&t#@8Htb7#U04BTMIdRHa=W=%%{b7lLc@3f**@E
zXU<7_b*)h9k^kPkVu>K8YhfF6u4XqB9V`1RlTTdxL-Od`VP$zLeQ?bFJQn>|6jE6t
zPiB*4jwT-1iqQ2aWy?FZTk6c|;a&{(u2}T!Ndm30lSc&5T4KjB%7{(=1ckNLN<3dx
z1JhRMn@sp{i0)-D7_%Y^6Ke6Thd}zHwy^(l1A2~DHjVD9X{wa@)kDsHtmjqYwN7##
zA{V*PlZi{y^YbCxlSgFprjsExG`RTHt5-nNuxKuz)eE^({RC3Q!w*(F9t@?@2huGn
zBZMYf4oZ2>U>>EN;YRXC$H&LNCJc7z&GlT{3f|Akxz|mh_-f(j(Xr$4{_6Xb_Qhja
z*T>3lbv~O10tCAu?8IT$o|1X1x_j>L39l!=iQ@LIf8F8N2>jcG=>0U!I?dDSMVIl|
zOZN0{*~+t6<CSXA!9&Me1pDu^;I)$ooe7*9xwNUi-qhqf+dZ#CqjjTj#<a3;jt-=$
z`OdgJopR1wBG~;G;3b{9<KE=0(E3qbB#SV3tXysHHtY^JF$TWsNI5kSxzeuww&w-Z
zy5fyBCQTcpHhv1B-lf8Bj17)Cp5tOxd8_jtdJ8EmmvbNvsmgr4i)3N0)}sYiGPffd
z4~l#yEjoWls$a_gz9%lj%5}sNlmwgJ)Payp?%OXWsZ)1rwCI<N9uU{oKvYnJJ|vE?
zB3_TCuSt(cL&?w2%Fx%{>Qd&pfO&*Z2Kyvgqj!*@VSTRbo9b0^i(NV2<+<vL!O>OY
z+s;KZV)Xt+x9lacYG=*_nXJ*HiQkX%x1RiafFiH@UttM%G*3W4l%S4%(s%sh0%V1R
zy17hI4Vt_vjS!=mJ7C&h2_=8{;C2qLh4g>AERy$l=Lt>kiS!v=NC`~_Dyld8xIh^C
zlf2PRuoHSK5No$p6Repnm=o&e_Rv6G;6NImi7sq!h#w!+)Q(7*nqd2TDl@I~)*1AE
z!fOdk4`~Ve_~F?xM|4e!(@~}7@#Pqnzf4t;WglcBWV*1NR)&mAL6PA^_pOylOU#Nf
zxC0$ipvf8s#Ag-F36Q682FOIIr;PaVPdTCX9!>>hFvvPo^v!WS2)pto83xNAsJ-Sw
z;}`I-xgRW~xU#2rvdyH{eIEoBu4UF(l!&%xcT6@G6aBiEYu>>dT{SJS*y;}t_58{R
zPL`&3dc<Qn&z}I<PTXy+M-y<wK+PGyKNf~Kna@+J@4ntNLIiV-Sr#nk{{H<tr0dj&
z(l=v2zkXd(1g^R_cA2xLLPVga_75vI_|(E^O#dT`-RnA}@ZGCWdEU7WuL?SOi3gNF
zR3afU<q&5=>%kSQK;g%0uI~vJ)lFUpA1?uu-P8EE`rv4BhelzNJ`-vS8hKL=QIM|e
zMHg{_I5R?DGcghlQD+le825i>ntU<Mnh?2cB)=Pf{_Gk51LYdy_bHHrpHIwYw*B`0
zW{NKycs29R>4?9uMKW2_u<n2cz=dmIBXzUk@&_tj$4Kg1cE`FY4LsKqkv@CuvP~&*
z0AwgX&EnR*1oQ&9en|Rnu^SV7Ky|on4*m&9K>BOVIH_nS5ckh%W*=N+PMKE&eQeEl
z|4e^-q4l=3^mWKT1i?Sf^rrYC>tVzoZyx3jq|QUs{aD1s9B+ZA9~8T@23u<qH`Es+
zYy<8U&Ll^u%OKweZ>_-Y^Suy;HMzOUj}@;uBsfM{`i+8sHa-Zko0ckzewgcp94)2o
z?KCnnf^2*E^A14fd_8d%WWa%i#W&wty04H|HJNO#G1%3paK(h=d`}LPRbdi(`Rw`g
z12jqaxcCeo;9{!Mm+M8@7(u#_X47}BLl38LYcJV8t>`O0`x`Pf_SR!apw(;N0C#};
zmicj^hZuNOI+br_3GJG8rk<B{*Qy0;GG2nV;rXbo14k2Fp<$n7=;>+9-PFJb#E(n8
zSsRBJMqAbp8MC!_@>)SgP>#%j0l?})06~r`#_|F06}Crh-bA$#uMC~iH(fO+=^Wja
zo`?8`X+esTHNx}8$R&iLzeg~f8MyO6WY_~T%&ucKBc0dUgAS9dAzx}aSX1jY<y>Sk
z%!Uq=qCW5>L>J!M)5Z%zDEJZ_8cY~h$-YLZ#Ju$x2t?QX^vXzp3Zu$I3nzUI%~O1D
zF*jDanNsxmTeo(p)w}NQ``vzCJ6MV7!<h#TlD*<}1=^(s=;AXHshp{+`W?RMK*h+D
zzOT@a-^p7uTZ?0|rf2=9-f|tnA`2Q6M^!a+V<RyBtAfyI&kedIu**bpgHm)A;p6pS
z1q?9!3Zm+|*^TGn)Q2JyilT@5n^;g#U@sx#ZF@Om@has`%B2&q@E_u-N-cz;0<zk^
zpTw+K#BoCGXmw?4S0-|8rKR*uZVx2d*;83v$m)jkkNzIQd&5Bsm-+s8Lm#Wj4dML@
zx9uU7RX07Y%a?y@AZzL!QnsP?k|tM7=A24bSft{5v1w^&hgU`uj1E44{NT+P9C$c|
zX;q0|{o^7s@+NWD9+8DyLO*ToV@Ff&=L2c>oHnC>HN6aMq|ZYY?KGkkqq8PSN{DJS
zDHietEuqkzO;sS~Z87xor_VFO?wKz`g4d(lvQ^_(by~e5Ls*_(IsqFbIr3MwP7}A$
z;0~`u3kz<~1&&%lF$wUGRrQ{B)S|nA7}Y-X@IPKSG()%K1?CY)YmSoMK)<%>rc@_&
zs5J*N4gaWgea5UH;Do%VYjrgoz-=v=t%5~vQ`_WS$>Mjmt}pr6Y~=7U#tXb>u@2vp
zK-U|lYZ&6^qmZZ+7Bj9=iJgE$BeX81VwUJhx&3T7!Sw@*@VL~_9_!17hK4kFp)i^J
z4i?UOcKmr$$dsu^z1-_#C+>0lCs%Wv`7BgOm>s@@(G)#s`ZtcaeFyHa5YnI)KW@!v
z0nvhW_lR+ohqn7SAxRP{thYLz^}|7~ur&UR3FRLDl(%k1wg@L=j$&H9G$#Uv6ESy>
zxi}p;Y>I;!mp<f<y_$f>x%tioM=6yo>W<`8L6Jg0%QCMR1|@)iQ|b5QI4P;09Yi@}
znj;M6!!{O%W_#Y;d@uH>JX13y&>n_R9?V?<JS)&kZ@Wxr(}go5Zm>R7c+fYkdkPK>
zg5VoiO~z4KL0+>aj>K$ut#4U-Cfs^_`SxNBi%bIFRaSbxupTgc_2U}O_er$<9rKi)
zZ{Li)JFs0a^&f>ipdKJiE8Eomd=XJ*h7C2A;tKoG*Jn-<sQ>J+BHK|>u*n1CPL;;<
z&U@EggQzpT8|&+$yP74K?;1Y?j;zCKiAS8Zd0z#eWb`nh7N}t-OiN~mapIiaKL>*1
z-3Qz;cT<R2ETalw9M!)jfi`Ed`3B>f0Q0XcNy=av(i^eMLSkx^<}k*4&@Tm?^fMJc
z^zIVMrjZCsv{=HIs}yq}dfj%l4!oxOdJ>~bUd^IkI;&`HpevDLAZo~!NQ=2khv$&}
z%gW1#oj3vi`8wqU%xq7()CHZSghq&*#5h3`IIJHI*=(~Zmvd;t|9}7;Ft}PD6?Mi{
zg;D}%E-0(@*AQQhB-#Uh)My)Ow-SR-7B7AJ_X$xmg*;XB-@uvIaOnKKiA1Ip=0Z%H
z0WOo#Mil?s)~%GD6bMRV^oCdKCqYW<_;M-?N+~&#VwB+=WD;WV@Mws-AEC{u0qh&&
z1!XxoeNQ`e@n`Fa6cE}zwVVEk#ynw5gj0dTp)AF<V7q1J4zjGT;x(KKbiYw^ck+4c
z#OT|%Z~33-iQEdvQntj37P`D}G=H%(6a<6K&8(@0bUhKZ%k`g(L#P7;-iC%`si#_R
zFB3Bo>_=+;`a&=o_P_Z|)+jLH5Ig@Sl&{SD_ZOa#A3wYyJ|$=^k^F3Y**|r*c7T_c
zx9W{qxgFns8p4fo)N8ISIdcWq8j#fk8wKZ!XZ&&RfHt-&hA>sTunraC0<Nd0Usi3|
z{>7nBMp^TlBS6HT2uZLjVE>Yf|0zdmvoP<5-B!6V!UECAIQoB9GG+I40m0C2?g!Qk
zw{H9Z?%M}W)CEP~jgNW<93)}b*HXSq6S!04_-2N6Qyxq|qzIA+P5pK@oMhO?^uU-w
zN}@_hifMV#{~ucm&PahKD+6=Y8GflU@v(l0WSGxx(AeJ4@$IoIgE{jLeMT?n=@T77
z41a|ffjg!+9uzU8&(s0#f1Z*HI&fn-gTE2<zzu(fH&b~mcn+{mZ8?z1aP0F1fvdc%
z0Sp^B4!mcuQG5C|6MPDWMiy|AJc=2(jV;QPZA)lf@3V(WT`jo`aSR_sPEG!)JM~l4
zQ{c=UaC2-vP@xc0A%i=^jGOPn*$(6|xHENp3=q0HMUG)T^9zOvtQ(fSpFZWkH&{7v
zF5J@6fa|FuOFN^5%+q_!m$mmV3l(CY92nO(p~EeYy`gj}+dpZBxV3-2moltpjL<(Y
zn+Lda83=?KmI<-4FWkd)aLK9lC#(PdSJe1_#$w8I1qt>AGnaF?&R|clJ{9|Ed$`oM
z+X3(T_A?$hcIy4_fAzu*y+QwWfwsbe26<hyG6mMFF!8NYVLkSKs=|xk?+^a|vD=X0
z*rGkMi?VL9E~sA5Ruiwh@BRMT{r7iWpP$dZ!MR~Bk22H=A%9a29h~ZNAYw|pF7T|2
z!wVKp(&-5QH+zK&pH$$y9~<-=3k*&b%l0rQJe)gK=vSTG)cN^q{_M-Y{bc>ziuBrF
zYp4F-HMPx*;S1A?Re#Q$^IBZ%zmze4jmf>cUW@m<PcL8mXKJnc)A!rH{r?{H?>pGd
zF3z9+Gx~}z(Ft9gp3K0&pjzS@QIe8al4_M)lnSI6j0}v7bPddO4NOA}Ev-z<tW3?d
n4GgRd4BiJXJcgnnH$NpatrE9}d3=&pKn)C@u6{1-oD!M<9i&1d

literal 0
HcmV?d00001

-- 
GitLab