diff --git a/AE.Keras2/01-Prepare-MNIST-dataset.ipynb b/AE.Keras2/01-Prepare-MNIST-dataset.ipynb
deleted file mode 100644
index 37fac4a2bb32d76126ac4c18c1a7106c465fcfa5..0000000000000000000000000000000000000000
--- a/AE.Keras2/01-Prepare-MNIST-dataset.ipynb
+++ /dev/null
@@ -1,243 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE1] - Prepare a noisy MNIST dataset\n",
-    "<!-- DESC --> Episode 1: Preparation of a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Prepare a MNIST noisy dataset, usable with our denoiser autoencoder (duration : <50s)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Load original MNIST dataset\n",
-    " - Adding noise, a lot !\n",
-    " - Save it :-)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init and set parameters\n",
-    "### 1.1 - Init python"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import sys\n",
-    "\n",
-    "from skimage import io\n",
-    "from skimage.util import random_noise\n",
-    "\n",
-    "import modules.MNIST\n",
-    "from modules.MNIST     import MNIST\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE1')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the future prepared dataset (example : ./data/mnist-noisy.h5)\\\n",
-    "`scale` : Dataset scale. 1 mean 100% of the dataset - set 0.1 for tests\\\n",
-    "`progress_verbosity`: Verbosity of progress bar: 0=silent, 1=progress bar, 2=One line"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset   = './data/mnist-noisy.h5'\n",
-    "scale              = 1\n",
-    "progress_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'scale', 'progress_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get original dataset\n",
-    "We load :  \n",
-    "`clean_data` : Original and clean images - This is what we will want to ontain at the **output** of the AE  \n",
-    "`class_data` : Image classes - Useless, because the training will be unsupervised  \n",
-    "We'll build :  \n",
-    "`noisy_data` : Noisy images - These are the images that we will give as **input** to our AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_data, class_data = MNIST.get_origine(scale=scale)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Add noise\n",
-    "We add noise to the original images (clean_data) to obtain noisy images (noisy_data)  \n",
-    "Need 30-40 seconds"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def noise_it(data):\n",
-    "    new_data = np.copy(data)\n",
-    "    for i,image in enumerate(new_data):\n",
-    "        fidle.utils.update_progress('Add noise : ',i+1,len(data),verbosity=progress_verbosity)\n",
-    "        image=random_noise(image, mode='gaussian', mean=0, var=0.3)\n",
-    "        image=random_noise(image, mode='s&p',      amount=0.2, salt_vs_pepper=0.5)\n",
-    "        image=random_noise(image, mode='poisson') \n",
-    "        image=random_noise(image, mode='speckle',  mean=0, var=0.1)\n",
-    "        new_data[i]=image\n",
-    "    print('Done.')\n",
-    "    return new_data\n",
-    "\n",
-    "# ---- Add noise to input data : x_data\n",
-    "#\n",
-    "noisy_data = noise_it(clean_data)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Have a look"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "print('Clean dataset (clean_data) : ',clean_data.shape)\n",
-    "print('Noisy dataset (noisy_data) : ',noisy_data.shape)\n",
-    "\n",
-    "fidle.utils.subtitle(\"Noisy images we'll have in input (or x)\")\n",
-    "fidle.scrawler.images(noisy_data[:5], None, indices='all', columns=5, x_size=3,y_size=3, interpolation=None, save_as='01-noisy')\n",
-    "fidle.utils.subtitle('Clean images we want to obtain (or y)')\n",
-    "fidle.scrawler.images(clean_data[:5], None, indices='all', columns=5, x_size=3,y_size=3, interpolation=None, save_as='02-original')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Shuffle dataset"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "p = np.random.permutation(len(clean_data))\n",
-    "clean_data, noisy_data, class_data = clean_data[p], noisy_data[p], class_data[p]\n",
-    "print('Shuffled.')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Save our prepared dataset"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "MNIST.save_prepared_dataset( clean_data, noisy_data, class_data, filename=prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/02-AE-with-MNIST.ipynb b/AE.Keras2/02-AE-with-MNIST.ipynb
deleted file mode 100644
index 1898715a4c98445e310f9193e7142bd8328f94e9..0000000000000000000000000000000000000000
--- a/AE.Keras2/02-AE-with-MNIST.ipynb
+++ /dev/null
@@ -1,437 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE2] - Building and training an AE denoiser model\n",
-    "<!-- DESC --> Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a denoizing **autoencoder** neurals network (AE)\n",
-    " - First overview or example of Keras procedural syntax\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST.  \n",
-    "The use of a GPU is often indispensable.\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Follow the learning process with Tensorboard\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `denoised_test` for denoised images at the output of the model"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from skimage import io\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n",
-    "\n",
-    "import os,sys\n",
-    "from importlib import reload\n",
-    "import h5py\n",
-    "\n",
-    "from modules.MNIST          import MNIST\n",
-    "from modules.ImagesCallback import ImagesCallback\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE2')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`latent_dim` : Dimension of the latent space  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)\n",
-    "`batch_size` : Batch size  \n",
-    "`epochs` : Nb of epochs for training\\\n",
-    "`fit_verbosity` is the verbosity during training : 0 = silent, 1 = progress bar, 2 = one line per epoch\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "dataset_seed     = 123\n",
-    "\n",
-    "scale            = .1\n",
-    "\n",
-    "latent_dim       = 10\n",
-    "\n",
-    "train_prop       = .8\n",
-    "batch_size       = 128\n",
-    "epochs           = 30\n",
-    "fit_verbosity    = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'latent_dim')\n",
-    "fidle.override('train_prop', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, _,_ = MNIST.reload_prepared_dataset(scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename=prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Build models"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, activation=\"relu\", strides=2, padding=\"same\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "z         = layers.Dense(latent_dim)(x)\n",
-    "\n",
-    "encoder = keras.Model(inputs, z, name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1, 3, activation=\"sigmoid\", padding=\"same\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "latents   = encoder(inputs)\n",
-    "outputs   = decoder(latents)\n",
-    "\n",
-    "ae = keras.Model(inputs,outputs, name=\"ae\")\n",
-    "\n",
-    "ae.compile(optimizer=keras.optimizers.Adam(), loss='binary_crossentropy')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Train\n",
-    "20' on a CPU  \n",
-    "1'12 on a GPU (V100, IDRIS)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Callback : Images\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/images')\n",
-    "filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'\n",
-    "callback_images = ImagesCallback(filename, x=clean_test[:5], encoder=encoder,decoder=decoder)\n",
-    "\n",
-    "# ---- Callback : Best model\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/models')\n",
-    "filename = run_dir + '/models/best_model.h5'\n",
-    "callback_bestmodel = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=0, save_best_only=True)\n",
-    "\n",
-    "# ---- Callback tensorboard\n",
-    "#\n",
-    "logdir = run_dir + '/logs'\n",
-    "callback_tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1)\n",
-    "\n",
-    "# callbacks_list = [callback_images, callback_bestmodel, callback_tensorboard]\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono = fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = ae.fit(noisy_train, clean_train,\n",
-    "                 batch_size      = batch_size,\n",
-    "                 epochs          = epochs,\n",
-    "                 verbose         = fit_verbosity,\n",
-    "                 validation_data = (noisy_test, clean_test),\n",
-    "                 callbacks       = callbacks_list  )\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={'loss':['loss','val_loss']}, save_as='01-history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Denoising progress"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "imgs=[]\n",
-    "for epoch in range(0,epochs,2):\n",
-    "    for i in range(5):\n",
-    "        filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'.format(epoch=epoch, i=i)\n",
-    "        img      = io.imread(filename)\n",
-    "        imgs.append(img)      \n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='02-original-real')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='03-original-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Evolution during the training period (denoised_test) :')\n",
-    "fidle.scrawler.images(imgs, None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, y_padding=0.1, save_as='04-learning')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`denoised_test` for denoised images at the output of the model\n",
-    " \n",
-    "### 7.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{run_dir}/models/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Let's make a prediction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "denoised_test = model.predict(noisy_test)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/03-AE-with-MNIST-post.ipynb b/AE.Keras2/03-AE-with-MNIST-post.ipynb
deleted file mode 100644
index 1c77a689c3b7a60b01662f5e338ea64a187808f1..0000000000000000000000000000000000000000
--- a/AE.Keras2/03-AE-with-MNIST-post.ipynb
+++ /dev/null
@@ -1,306 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE3] - Playing with our denoiser model\n",
-    "<!-- DESC --> Episode 2 : Using the previously trained autoencoder to denoise data, using Keras 2 and Tensorflow (obsolete)\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Retrieve and use our denoiser model\n",
-    "\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Reload our dataset and saved best model\n",
-    " - Encode/decode some test images (neved used, never seen by the model)\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `denoised_test` for denoised images at the output of the model\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import matplotlib.pyplot as plt\n",
-    "import sys\n",
-    "import h5py\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "\n",
-    "from modules.MNIST import MNIST\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE3')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "These **parameters must be identical** to those used during the training in order to have the **same dataset**.\\\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "saved_models     = './run/AE2/models'\n",
-    "dataset_seed     = 123\n",
-    "scale            = 1\n",
-    "train_prop       = .8"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'train_prop')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)  \n",
-    "**Important :** Make sure that the **digest is identical** to the one used during the training !\\\n",
-    "See : [AE2 / Step 2 - Retrieve dataset](./02-AE-with-MNIST.ipynb#Step-2---Retrieve-dataset)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, _,_ = MNIST.reload_prepared_dataset(scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename=prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`denoised_test` for denoised images at the output of the model\n",
-    " \n",
-    "### 3.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{saved_models}/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 3.2 - Let's make a prediction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from tabnanny import verbose\n",
-    "\n",
-    "\n",
-    "denoised_test = model.predict(noisy_test,verbose=0)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 3.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Looking at the latent space\n",
-    "### 4.1 - Getting clean data and class"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_data,_, _,_, class_data,_ = MNIST.reload_prepared_dataset(scale      = 1, \n",
-    "                                                                train_prop = 1,\n",
-    "                                                                seed       = dataset_seed,\n",
-    "                                                                shuffle    = False,\n",
-    "                                                                filename   = prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 4.2 - Retrieve encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "encoder=model.get_layer('encoder')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 4.3 Showing latent space\n",
-    "Here is the digit distribution in the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = 20000\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(clean_data, class_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z = encoder.predict(x_show)\n",
-    "\n",
-    "# ---- Show them\n",
-    "\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('08-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/04-ExtAE-with-MNIST.ipynb b/AE.Keras2/04-ExtAE-with-MNIST.ipynb
deleted file mode 100644
index ed99c0c20a4c4f10b1a8d3b298fe69903e324f8e..0000000000000000000000000000000000000000
--- a/AE.Keras2/04-ExtAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,533 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE4] - Denoiser and classifier model\n",
-    "<!-- DESC --> Episode 4 : Construction of a denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Building a multiple output model, able to **denoise** and **classify**\n",
-    " - Understanding a more **advanced programming model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST.  \n",
-    "The use of a GPU is often indispensable.\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a multiple output model using Keras procedural programing model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Follow the learning process\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `class_train`, `class_test` for the classes to which the images belong \n",
-    "- `denoised_test` for denoised images at the output of the model\n",
-    "- `classcat_test` for class prediction in model output (is a softmax)\n",
-    "- `classid_test` class prediction (ie: argmax of classcat_test)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from skimage import io\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n",
-    "\n",
-    "import os,sys\n",
-    "from importlib import reload\n",
-    "import h5py\n",
-    "\n",
-    "from modules.MNIST          import MNIST\n",
-    "from modules.ImagesCallback import ImagesCallback\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE4')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset. 'None' mean using /dev/urandom  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`latent_dim` : Dimension of the latent space  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)\n",
-    "`batch_size` : Batch size  \n",
-    "`epochs` : Nb of epochs for training\\\n",
-    "`fit_verbosity` is the verbosity during training : 0 = silent, 1 = progress bar, 2 = one line per epoch\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "dataset_seed     = None\n",
-    "\n",
-    "scale            = .1\n",
-    "\n",
-    "latent_dim       = 10\n",
-    "\n",
-    "train_prop       = .8\n",
-    "batch_size       = 128\n",
-    "epochs           = 30\n",
-    "fit_verbosity    = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'latent_dim')\n",
-    "fidle.override('train_prop', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, class_train,class_test = MNIST.reload_prepared_dataset(\n",
-    "                                                                                    scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename   = prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Build models"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, activation=\"relu\", strides=2, padding=\"same\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "z         = layers.Dense(latent_dim)(x)\n",
-    "\n",
-    "encoder = keras.Model(inputs, z, name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1, 3, activation=\"sigmoid\", padding=\"same\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "latents   = encoder(inputs)\n",
-    "outputs   = decoder(latents)\n",
-    "\n",
-    "ae = keras.Model(inputs,outputs, name='ae')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### CNN"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "hidden1     = 100\n",
-    "hidden2     = 100\n",
-    "\n",
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "x         = keras.layers.Conv2D(8, (3,3),  activation='relu')(inputs)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Conv2D(16, (3,3), activation='relu')(x)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Flatten()(x)\n",
-    "x         = keras.layers.Dense(100, activation='relu')(x)\n",
-    "x         = keras.layers.Dropout(0.5)(x)\n",
-    "\n",
-    "outputs   = keras.layers.Dense(10, activation='softmax')(x)\n",
-    "\n",
-    "cnn       = keras.Model(inputs, outputs, name='cnn')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Final model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "denoised = ae(inputs)\n",
-    "classcat = cnn(inputs)\n",
-    "\n",
-    "model = keras.Model(inputs, [denoised, classcat])\n",
-    "\n",
-    "model.compile(optimizer='rmsprop', \n",
-    "              loss={'ae':'binary_crossentropy', 'cnn':'sparse_categorical_crossentropy'},\n",
-    "              loss_weights=[1,1],\n",
-    "              metrics={'cnn':'accuracy'} )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Train\n",
-    "20' on a CPU  \n",
-    "1'12 on a GPU (V100, IDRIS)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Callback : Images\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/images')\n",
-    "filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'\n",
-    "callback_images = ImagesCallback(filename, x=clean_test[:5], encoder=encoder,decoder=decoder)\n",
-    "\n",
-    "# ---- Callback : Best model\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/models')\n",
-    "filename = run_dir + '/models/best_model.h5'\n",
-    "callback_bestmodel = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=0, save_best_only=True)\n",
-    "\n",
-    "# ---- Callback tensorboard\n",
-    "#\n",
-    "logdir = run_dir + '/logs'\n",
-    "callback_tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1)\n",
-    "\n",
-    "# callbacks_list = [callback_images, callback_bestmodel, callback_tensorboard]\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono = fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = model.fit(noisy_train, [clean_train, class_train],\n",
-    "                 batch_size      = batch_size,\n",
-    "                 epochs          = epochs,\n",
-    "                 verbose         = fit_verbosity,\n",
-    "                 validation_data = (noisy_test, [clean_test, class_test]),\n",
-    "                 callbacks       = callbacks_list  )\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={'Loss':['loss', 'ae_loss', 'cnn_loss'],\n",
-    "                                 'Validation loss':['val_loss','val_ae_loss', 'val_cnn_loss'], \n",
-    "                                 'Accuracy':['cnn_accuracy','val_cnn_accuracy']}, save_as='01-history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Denoising progress"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "imgs=[]\n",
-    "for epoch in range(0,epochs,4):\n",
-    "    for i in range(5):\n",
-    "        filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'.format(epoch=epoch, i=i)\n",
-    "        img      = io.imread(filename)\n",
-    "        imgs.append(img)      \n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='02-original-real')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='03-original-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Evolution during the training period (denoised_test) :')\n",
-    "fidle.scrawler.images(imgs, None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, y_padding=0.1, save_as='04-learning')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`class_train`, `class_test` for the classes to which the images belong \\\n",
-    "`denoised_test` for denoised images at the output of the model\\\n",
-    "`classcat_test` for class prediction in model output (is a softmax)\\\n",
-    "`classid_test` class prediction (ie: argmax of classcat_test)\n",
-    " \n",
-    "### 7.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{run_dir}/models/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Let's make a prediction\n",
-    "Note that our model will returns 2 outputs : **denoised images** from output 1 and **class prediction** from output 2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "denoised_test, classcat_test = model.predict(noisy_test, verbose=0)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)\n",
-    "print('Predicted classes (classcat_test) shape : ',classcat_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Class prediction\n",
-    "Note: The evaluation requires the noisy images as input (noisy_test) and the 2 expected outputs:\n",
-    " - the images without noise (clean_test)\n",
-    " - the classes (class_test)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "score = model.evaluate(noisy_test, [clean_test, class_test], verbose=0)\n",
-    "\n",
-    "fidle.utils.subtitle(\"Accuracy :\")\n",
-    "print(f'Classification accuracy : {score[3]:4.4f}')\n",
-    "\n",
-    "fidle.utils.subtitle(\"Few examples :\")\n",
-    "classid_test  = np.argmax(classcat_test, axis=-1)\n",
-    "fidle.scrawler.images(noisy_test, class_test, range(0,200), columns=12, x_size=1, y_size=1, y_pred=classid_test, save_as='04-predictions')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/05-ExtAE-with-MNIST.ipynb b/AE.Keras2/05-ExtAE-with-MNIST.ipynb
deleted file mode 100644
index 21eccfb9d618e42246197620821f92ace2c3fc8f..0000000000000000000000000000000000000000
--- a/AE.Keras2/05-ExtAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,565 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2AE5] - Advanced denoiser and classifier model\n",
-    "<!-- DESC --> Episode 5 : Construction of an advanced denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Building a multiple output model, able to **denoise** and **classify**\n",
-    " - Understanding a more complex **advanced programming model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST.  \n",
-    "The use of a GPU is often indispensable.\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a multiple output model using Keras procedural programing model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Follow the learning process\n",
-    " \n",
-    "## Data Terminology :\n",
-    "- `clean_train`, `clean_test` for noiseless images \n",
-    "- `noisy_train`, `noisy_test` for noisy images\n",
-    "- `class_train`, `class_test` for the classes to which the images belong \n",
-    "- `denoised_test` for denoised images at the output of the model\n",
-    "- `classcat_test` for class prediction in model output (is a softmax)\n",
-    "- `classid_test` class prediction (ie: argmax of classcat_test)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff\n",
-    "### 1.1 - Init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "from skimage import io\n",
-    "import random\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard\n",
-    "\n",
-    "import os,sys\n",
-    "from importlib import reload\n",
-    "import h5py\n",
-    "\n",
-    "from modules.MNIST          import MNIST\n",
-    "from modules.ImagesCallback import ImagesCallback\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2AE5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters\n",
-    "`prepared_dataset` : Filename of the prepared dataset (Need 400 Mo, but can be in ./data)  \n",
-    "`dataset_seed` : Random seed for shuffling dataset  \n",
-    "`scale` : % of the dataset to use (1. for 100%)  \n",
-    "`latent_dim` : Dimension of the latent space  \n",
-    "`train_prop` : Percentage for train (the rest being for the test)\n",
-    "`batch_size` : Batch size  \n",
-    "`epochs` : Nb of epochs for training\\\n",
-    "`fit_verbosity` is the verbosity during training : 0 = silent, 1 = progress bar, 2 = one line per epoch\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "dataset_seed     = None\n",
-    "\n",
-    "scale            = .1\n",
-    "\n",
-    "latent_dim       = 10\n",
-    "\n",
-    "train_prop       = .8\n",
-    "batch_size       = 128\n",
-    "epochs           = 20\n",
-    "fit_verbosity    = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('prepared_dataset', 'dataset_seed', 'scale', 'latent_dim')\n",
-    "fidle.override('train_prop', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Retrieve dataset\n",
-    "With our MNIST class, in one call, we can reload, rescale, shuffle and split our previously saved dataset :-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "clean_train,clean_test, noisy_train,noisy_test, class_train,class_test = MNIST.reload_prepared_dataset(\n",
-    "                                                                                    scale      = scale, \n",
-    "                                                                                    train_prop = train_prop,\n",
-    "                                                                                    seed       = dataset_seed,\n",
-    "                                                                                    shuffle    = True,\n",
-    "                                                                                    filename   = prepared_dataset )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Build model"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, activation=\"relu\", strides=2, padding=\"same\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "z         = layers.Dense(latent_dim)(x)\n",
-    "\n",
-    "encoder = keras.Model(inputs, z, name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, activation=\"relu\", strides=2, padding=\"same\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1, 3, activation=\"sigmoid\", padding=\"same\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### AE\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "latents   = encoder(inputs)\n",
-    "outputs   = decoder(latents)\n",
-    "\n",
-    "ae = keras.Model(inputs,outputs, name='ae')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### CNN1"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "x         = keras.layers.Conv2D(8, (3,3),  activation='relu')(inputs)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Conv2D(16, (3,3), activation='relu')(x)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.2)(x)\n",
-    "\n",
-    "x         = keras.layers.Flatten()(x)\n",
-    "x         = keras.layers.Dense(100, activation='relu')(x)\n",
-    "outputs   = keras.layers.Dropout(0.5)(x)\n",
-    "\n",
-    "cnn1       = keras.Model(inputs, outputs, name='cnn1')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### CNN2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "x         = keras.layers.Conv2D(32, (5,5),  activation='relu')(inputs)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.3)(x)\n",
-    "\n",
-    "x         = keras.layers.Conv2D(64, (5,5), activation='relu')(x)\n",
-    "x         = keras.layers.MaxPooling2D((2,2))(x)\n",
-    "x         = keras.layers.Dropout(0.3)(x)\n",
-    "\n",
-    "x         = keras.layers.Flatten()(x)\n",
-    "x         = keras.layers.Dense(50, activation='relu')(x)\n",
-    "outputs   = keras.layers.Dropout(0.3)(x)\n",
-    "\n",
-    "cnn2       = keras.Model(inputs, outputs, name='cnn2')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Final model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "denoised = ae(inputs)\n",
-    "\n",
-    "branch_1 = cnn1(inputs)\n",
-    "branch_2 = cnn2(inputs)\n",
-    "\n",
-    "x        = keras.layers.concatenate([branch_1,branch_2], axis=1)\n",
-    "\n",
-    "classcat = keras.layers.Dense(10, activation='softmax', name='cnn')(x)\n",
-    "\n",
-    "\n",
-    "model = keras.Model(inputs, [denoised, classcat])\n",
-    "\n",
-    "model.compile(optimizer='rmsprop', \n",
-    "              loss={'ae':'binary_crossentropy', 'cnn':'sparse_categorical_crossentropy'},\n",
-    "              loss_weights=[1,1],\n",
-    "              metrics={'cnn':'accuracy'} )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Train\n",
-    "20' on a CPU  \n",
-    "1'30 on a GPU (V100, IDRIS)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Callback : Images\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/images')\n",
-    "filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'\n",
-    "callback_images = ImagesCallback(filename, x=clean_test[:5], encoder=encoder,decoder=decoder)\n",
-    "\n",
-    "# ---- Callback : Best model\n",
-    "#\n",
-    "fidle.utils.mkdir( run_dir + '/models')\n",
-    "filename = run_dir + '/models/best_model.h5'\n",
-    "callback_bestmodel = tf.keras.callbacks.ModelCheckpoint(filepath=filename, verbose=0, save_best_only=True)\n",
-    "\n",
-    "# ---- Callback tensorboard\n",
-    "#\n",
-    "logdir = run_dir + '/logs'\n",
-    "callback_tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1)\n",
-    "\n",
-    "# callbacks_list = [callback_images, callback_bestmodel, callback_tensorboard]\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono = fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = model.fit(noisy_train, [clean_train, class_train],\n",
-    "                 batch_size      = batch_size,\n",
-    "                 epochs          = epochs,\n",
-    "                 verbose         = fit_verbosity,\n",
-    "                 validation_data = (noisy_test, [clean_test, class_test]),\n",
-    "                 callbacks       = callbacks_list  )\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={'Loss':['loss', 'ae_loss', 'cnn_loss'],\n",
-    "                                 'Validation loss':['val_loss','val_ae_loss', 'val_cnn_loss'], \n",
-    "                                 'Accuracy':['cnn_accuracy','val_cnn_accuracy']}, save_as='01-history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Denoising progress"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "imgs=[]\n",
-    "for epoch in range(0,epochs,4):\n",
-    "    for i in range(5):\n",
-    "        filename = run_dir + '/images/image-{epoch:03d}-{i:02d}.jpg'.format(epoch=epoch, i=i)\n",
-    "        img      = io.imread(filename)\n",
-    "        imgs.append(img)      \n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='02-original-real')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as='03-original-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Evolution during the training period (denoised_test) :')\n",
-    "fidle.scrawler.images(imgs, None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, y_padding=0.1, save_as='04-learning')\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy images (noisy_test) :')\n",
-    "fidle.scrawler.images(noisy_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Real images (clean_test) :')\n",
-    "fidle.scrawler.images(clean_test[:5], None, indices='all', columns=5, x_size=2,y_size=2, interpolation=None, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Evaluation\n",
-    "**Note :** We will use the following data:\\\n",
-    "`clean_train`, `clean_test` for noiseless images \\\n",
-    "`noisy_train`, `noisy_test` for noisy images\\\n",
-    "`class_train`, `class_test` for the classes to which the images belong \\\n",
-    "`denoised_test` for denoised images at the output of the model\\\n",
-    "`classcat_test` for class prediction in model output (is a softmax)\\\n",
-    "`classid_test` class prediction (ie: argmax of classcat_test)\n",
-    " \n",
-    "### 7.1 - Reload our best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "model = keras.models.load_model(f'{run_dir}/models/best_model.h5')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Let's make a prediction\n",
-    "Note that our model will returns 2 outputs : **denoised images** from output 1 and **class prediction** from output 2"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "denoised_test, classcat_test = model.predict(noisy_test, verbose=0)\n",
-    "\n",
-    "print('Denoised images   (denoised_test) shape : ',denoised_test.shape)\n",
-    "print('Predicted classes (classcat_test) shape : ',classcat_test.shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Denoised images "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "i=random.randint(0,len(denoised_test)-8)\n",
-    "j=i+8\n",
-    "\n",
-    "fidle.utils.subtitle('Noisy test images (input):')\n",
-    "fidle.scrawler.images(noisy_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='05-test-noisy')\n",
-    "\n",
-    "fidle.utils.subtitle('Denoised images (output):')\n",
-    "fidle.scrawler.images(denoised_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='06-test-predict')\n",
-    "\n",
-    "fidle.utils.subtitle('Real test images :')\n",
-    "fidle.scrawler.images(clean_test[i:j], None, indices='all', columns=8, x_size=2,y_size=2, interpolation=None, save_as='07-test-real')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Class prediction\n",
-    "Note: The evaluation requires the noisy images as input (noisy_test) and the 2 expected outputs:\n",
-    " - the images without noise (clean_test)\n",
-    " - the classes (class_test)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "score = model.evaluate(noisy_test, [clean_test, class_test], verbose=0)\n",
-    "\n",
-    "fidle.utils.subtitle(\"Accuracy :\")\n",
-    "print(f'Classification accuracy : {score[3]:4.4f}')\n",
-    "\n",
-    "fidle.utils.subtitle(\"Few examples :\")\n",
-    "classid_test  = np.argmax(classcat_test, axis=-1)\n",
-    "fidle.scrawler.images(noisy_test, class_test, range(0,200), columns=12, x_size=1, y_size=1, y_pred=classid_test, save_as='04-predictions')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/AE.Keras2/modules/ImagesCallback.py b/AE.Keras2/modules/ImagesCallback.py
deleted file mode 100644
index 5962bb9f3e440cc28c2954f35ed20c84ba344fc4..0000000000000000000000000000000000000000
--- a/AE.Keras2/modules/ImagesCallback.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from tensorflow.keras.callbacks import Callback
-import numpy as np
-import matplotlib.pyplot as plt
-
-class ImagesCallback(Callback):
-    
-   
-    def __init__(self, filename='image-{epoch:03d}-{i:02d}.jpg', 
-                       x=None,
-                       encoder=None, decoder=None):
-        self.filename  = filename
-        self.x         = x
-        self.encoder   = encoder
-        self.decoder   = decoder
-        if len(x)>100:
-            print('***Warning : The number of images is reduced to 100')
-            self.x=x[:100]
-        
-    def on_epoch_end(self, epoch, logs={}):  
-        
-        # ---- Get latent points
-        #
-        z_new  = self.encoder.predict(self.x)
-        
-        # ---- Predict an image
-        #
-        images = self.decoder.predict(np.array(z_new))
-        
-        # ---- Save images
-        #
-        for i,image in enumerate(images):
-            
-            # ---- Squeeze it if monochrome : (lx,ly,1) -> (lx,ly) 
-            #
-            image = image.squeeze()
-        
-            # ---- Save it
-            #
-            filename = self.filename.format(epoch=epoch,i=i)
-            
-            if len(image.shape) == 2:
-                plt.imsave(filename, image, cmap='gray_r')
-            else:
-                plt.imsave(filename, image)
diff --git a/AE.Keras2/modules/MNIST.py b/AE.Keras2/modules/MNIST.py
deleted file mode 100644
index 6e040a60fa0133ef593e25793b5df3c0bc3aec20..0000000000000000000000000000000000000000
--- a/AE.Keras2/modules/MNIST.py
+++ /dev/null
@@ -1,178 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-import h5py
-import os
-import numpy as np
-from hashlib import blake2b
-import tensorflow as tf
-import tensorflow.keras.datasets.mnist as mnist
-
-
-# ------------------------------------------------------------------
-#   A usefull class to manage our MNIST dataset
-#   This class allows to manage datasets derived from the original MNIST
-# ------------------------------------------------------------------
-
-
-class MNIST():
-    
-    version = '0.1'
-    
-    def __init__(self):
-        pass
-   
-    @classmethod
-    def get_origine(cls, scale=1, normalize=True, expand=True, concatenate=True):
-        """
-        Return original MNIST dataset
-        args:
-            scale       : Proportion of the requested dataset
-            normalize   : Normalize dataset or not (True)
-            expand      : Reshape images as (28,28,1) instead (28,28) (True)
-            concatenate : Concatenate train and test sets (True)
-        returns:
-            x_data,y_data                   if concatenate is False
-            x_train,y_train,x_test,y_test   if concatenate is True
-        """
-
-        # ---- Get data
-        #
-        (x_train, y_train), (x_test, y_test) = mnist.load_data()
-        print('Dataset loaded.')
-        
-        # ---- Normalization
-        #
-        if normalize:
-            x_train = x_train.astype('float32') / 255.
-            x_test  = x_test.astype( 'float32') / 255.
-            print('Normalized.')
-            
-        # ---- Reshape : (28,28) -> (28,28,1)
-        #
-        if expand:
-            x_train = np.expand_dims(x_train, axis=-1)
-            x_test  = np.expand_dims(x_test,  axis=-1)
-            print('Reshaped.')
-
-        # ---- scale
-        #
-        n1 = int(len(x_train)*scale)
-        n2 = int(len(x_test)*scale)
-        x_train = x_train[:n1]
-        y_train = y_train[:n1]
-        x_test  = x_test[:n2]
-        y_test  = y_test[:n2]
-
-        # ---- Concatenate
-        #
-        if concatenate:
-            x_data = np.concatenate([x_train, x_test], axis=0)
-            y_data = np.concatenate([y_train, y_test])
-            print('Concatenate.')
-            print('x shape :', x_data.shape)
-            print('y shape :', y_data.shape)
-            return x_data,y_data
-        else:
-            print('x_train shape :', x_train.shape)
-            print('y_train shape :', y_train.shape)
-            print('x_test  shape :', x_test.shape)
-            print('y_test  shape :', y_test.shape)
-            return x_train,y_train,x_test,y_test
-        
-        
-    @classmethod
-    def save_prepared_dataset(cls, clean_data, noisy_data, class_data, filename='./data/mnist-noisy.h5'):
-        """
-        Save a prepared dataset in a h5 file
-        args:
-            clean_data, noisy_data, class_data : clean, noisy and class dataset
-            filename                      : filename
-        return:
-            None
-        """
-        path=os.path.dirname(filename)
-        os.makedirs(path, mode=0o750, exist_ok=True)
-
-        with h5py.File(filename, "w") as f:
-            f.create_dataset("clean_data", data=clean_data)
-            f.create_dataset("noisy_data", data=noisy_data)
-            f.create_dataset("class_data", data=class_data)
-        print('Saved.')
-        print('clean_data shape is : ',clean_data.shape)
-        print('noisy_data shape is : ',noisy_data.shape)
-        print('class_data shape is : ',class_data.shape)
-            
-            
-    @classmethod    
-    def reload_prepared_dataset(cls, scale=1., train_prop=0.8, shuffle=True, seed=False, filename='./data/mnist-noisy.h5'):
-        """
-        Reload a saved dataset
-        args:
-            scale      : Scale of dataset to use. 1. mean 100% (1.)
-            train_prop : Ratio of train/test
-            shuffle    : Shuffle data if True
-            seed       : Random seed value. False mean no seed, None mean using /dev/urandom (None)
-            filename   : filename of the prepared dataset
-        returns:
-            clean_train,clean_test, noisy_train,noisy_test, class_train,class_test
-        """
-        # ---- Load saved dataset
-        #
-        with  h5py.File(filename,'r') as f:
-            clean_data  = f['clean_data'][:]
-            noisy_data  = f['noisy_data'][:]
-            class_data  = f['class_data'][:]
-        print('Loaded.')
-        
-        # ---- Rescale
-        #
-        n = int(scale*len(clean_data))
-        clean_data, noisy_data, class_data = clean_data[:n], noisy_data[:n], class_data[:n]
-        print(f'rescaled ({scale}).') 
-        
-        # ---- Seed
-        #
-        if seed is not False:
-            np.random.seed(seed)
-            print(f'Seeded ({seed})')
-        
-        # ---- Shuffle
-        #
-        if shuffle:
-            p = np.random.permutation(len(clean_data))
-            clean_data, noisy_data, class_data = clean_data[p], noisy_data[p], class_data[p]
-            print('Shuffled.')
-        
-        # ---- Split
-        #
-        n=int(len(clean_data)*train_prop)
-        clean_train, clean_test = clean_data[:n], clean_data[n:]
-        noisy_train, noisy_test = noisy_data[:n], noisy_data[n:]
-        class_train, class_test = class_data[:n], class_data[n:]
-        print(f'splited ({train_prop}).') 
-
-        # ---- Hash
-        #
-        h = blake2b(digest_size=10)
-        for a in [clean_train,clean_test, noisy_train,noisy_test, class_train,class_test]:
-            h.update(a)
-        
-        print('clean_train shape is : ', clean_train.shape)
-        print('clean_test  shape is : ', clean_test.shape)
-        print('noisy_train shape is : ', noisy_train.shape)
-        print('noisy_test  shape is : ', noisy_test.shape)
-        print('class_train shape is : ', class_train.shape)
-        print('class_test  shape is : ', class_test.shape)
-        print('Blake2b digest is    : ', h.hexdigest())
-        return  clean_train,clean_test, noisy_train,noisy_test, class_train,class_test
\ No newline at end of file
diff --git a/DCGAN.Lightning/01-DCGAN-PL.ipynb b/DCGAN.Lightning/01-DCGAN-PL.ipynb
deleted file mode 100644
index b0153ec2f9729f0f78486650f464e190cfebd8b5..0000000000000000000000000000000000000000
--- a/DCGAN.Lightning/01-DCGAN-PL.ipynb
+++ /dev/null
@@ -1,462 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LSHEEP3] - A DCGAN to Draw a Sheep, using Pytorch Lightning\n",
-    "<!-- DESC --> \"Draw me a sheep\", revisited with a DCGAN, using Pytorch Lightning\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Build and train a DCGAN model with the Quick Draw dataset\n",
-    " - Understanding DCGAN\n",
-    "\n",
-    "The [Quick draw dataset](https://quickdraw.withgoogle.com/data) contains about 50.000.000 drawings, made by real people...  \n",
-    "We are using a subset of 117.555 of Sheep drawings  \n",
-    "To get the dataset : [https://github.com/googlecreativelab/quickdraw-dataset](https://github.com/googlecreativelab/quickdraw-dataset)  \n",
-    "Datasets in numpy bitmap file : [https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap](https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap)   \n",
-    "Sheep dataset : [https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/sheep.npy](https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/sheep.npy) (94.3 Mo)\n",
-    "\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Have a look to the dataset\n",
-    " - Defining a GAN model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look of the results"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init and parameters\n",
-    "#### Python init"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import sys\n",
-    "import shutil\n",
-    "\n",
-    "import numpy as np\n",
-    "import torch\n",
-    "import torch.nn as nn\n",
-    "import torch.nn.functional as F\n",
-    "import torchvision\n",
-    "import torchvision.transforms as transforms\n",
-    "from lightning import LightningDataModule, LightningModule, Trainer\n",
-    "from lightning.pytorch.callbacks.progress.tqdm_progress import TQDMProgressBar\n",
-    "from lightning.pytorch.callbacks.progress.base          import ProgressBarBase\n",
-    "from lightning.pytorch.callbacks                        import ModelCheckpoint\n",
-    "from lightning.pytorch.loggers.tensorboard              import TensorBoardLogger\n",
-    "\n",
-    "from tqdm import tqdm\n",
-    "from torch.utils.data import DataLoader\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "from modules.SmartProgressBar    import SmartProgressBar\n",
-    "from modules.QuickDrawDataModule import QuickDrawDataModule\n",
-    "\n",
-    "from modules.GAN                 import GAN\n",
-    "from modules.WGANGP              import WGANGP\n",
-    "from modules.Generators          import *\n",
-    "from modules.Discriminators      import *\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LSHEEP3')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Few parameters"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "latent_dim          = 128\n",
-    "\n",
-    "gan_class           = 'WGANGP'\n",
-    "generator_class     = 'Generator_2'\n",
-    "discriminator_class = 'Discriminator_3'    \n",
-    "    \n",
-    "scale               = 0.001\n",
-    "epochs              = 3\n",
-    "lr                  = 0.0001\n",
-    "b1                  = 0.5\n",
-    "b2                  = 0.999\n",
-    "batch_size          = 32\n",
-    "num_img             = 48\n",
-    "fit_verbosity       = 2\n",
-    "    \n",
-    "dataset_file        = datasets_dir+'/QuickDraw/origine/sheep.npy' \n",
-    "data_shape          = (28,28,1)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Cleaning"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# You can comment these lines to keep each run...\n",
-    "shutil.rmtree(f'{run_dir}/figs', ignore_errors=True)\n",
-    "shutil.rmtree(f'{run_dir}/models', ignore_errors=True)\n",
-    "shutil.rmtree(f'{run_dir}/tb_logs', ignore_errors=True)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get some nice data"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Get a Nice DataModule\n",
-    "Our DataModule is defined in [./modules/QuickDrawDataModule.py](./modules/QuickDrawDataModule.py)   \n",
-    "This is a [LightningDataModule](https://pytorch-lightning.readthedocs.io/en/stable/data/datamodule.html)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "dm = QuickDrawDataModule(dataset_file, scale, batch_size, num_workers=8)\n",
-    "dm.setup()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Have a look"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "dl         = dm.train_dataloader()\n",
-    "batch_data = next(iter(dl))\n",
-    "\n",
-    "fidle.scrawler.images( batch_data.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
-   ]
-  },
-  {
-   "attachments": {},
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Get a nice GAN model\n",
-    "\n",
-    "Our Generators are defined in [./modules/Generators.py](./modules/Generators.py)  \n",
-    "Our Discriminators are defined in [./modules/Discriminators.py](./modules/Discriminators.py)  \n",
-    "\n",
-    "\n",
-    "Our GAN is defined in [./modules/GAN.py](./modules/GAN.py)  \n",
-    "\n",
-    "#### Class loader"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def get_class(class_name):\n",
-    "    module=sys.modules['__main__']\n",
-    "    class_    = getattr(module, class_name)\n",
-    "    return class_\n",
-    "    \n",
-    "def get_instance(class_name, **args):\n",
-    "    module=sys.modules['__main__']\n",
-    "    class_    = getattr(module, class_name)\n",
-    "    instance_ = class_(**args)\n",
-    "    return instance_"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Basic test - Just to be sure it (could) works... ;-)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "# ---- A little piece of black magic to instantiate a class from its name\n",
-    "#\n",
-    "def get_classByName(class_name, **args):\n",
-    "    module=sys.modules['__main__']\n",
-    "    class_    = getattr(module, class_name)\n",
-    "    instance_ = class_(**args)\n",
-    "    return instance_\n",
-    "\n",
-    "# ----Get it, and play with them\n",
-    "#\n",
-    "print('\\nInstantiation :\\n')\n",
-    "\n",
-    "Generator_     = get_class(generator_class)\n",
-    "Discriminator_ = get_class(discriminator_class)\n",
-    "\n",
-    "generator     = Generator_( latent_dim=latent_dim, data_shape=data_shape)\n",
-    "discriminator = Discriminator_( latent_dim=latent_dim, data_shape=data_shape)\n",
-    "\n",
-    "print('\\nFew tests :\\n')\n",
-    "z = torch.randn(batch_size, latent_dim)\n",
-    "print('z size        : ',z.size())\n",
-    "\n",
-    "fake_img = generator.forward(z)\n",
-    "print('fake_img      : ', fake_img.size())\n",
-    "\n",
-    "p = discriminator.forward(fake_img)\n",
-    "print('pred fake     : ', p.size())\n",
-    "\n",
-    "print('batch_data    : ',batch_data.size())\n",
-    "\n",
-    "p = discriminator.forward(batch_data)\n",
-    "print('pred real     : ', p.size())\n",
-    "\n",
-    "nimg = fake_img.detach().numpy()\n",
-    "fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "\n",
-    "print(fake_img.size())\n",
-    "print(batch_data.size())\n",
-    "e = torch.distributions.uniform.Uniform(0, 1).sample([32,1])\n",
-    "e = e[:None,None,None]\n",
-    "i = fake_img * e + (1-e)*batch_data\n",
-    "\n",
-    "\n",
-    "nimg = i.detach().numpy()\n",
-    "fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### GAN model\n",
-    "To simplify our code, the GAN class is defined separately in the module [./modules/GAN.py](./modules/GAN.py)  \n",
-    "Passing the classe names for generator/discriminator by parameter allows to stay modular and to use the PL checkpoints."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "GAN_           = get_class(gan_class)\n",
-    "\n",
-    "gan = GAN_( data_shape          = data_shape,\n",
-    "            lr                  = lr,\n",
-    "            b1                  = b1,\n",
-    "            b2                  = b2,\n",
-    "            batch_size          = batch_size, \n",
-    "            latent_dim          = latent_dim, \n",
-    "            generator_class     = generator_class, \n",
-    "            discriminator_class = discriminator_class)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train it !\n",
-    "#### Instantiate Callbacks, Logger & co.\n",
-    "More about :\n",
-    "- [Checkpoints](https://pytorch-lightning.readthedocs.io/en/stable/common/checkpointing_basic.html)\n",
-    "- [modelCheckpoint](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.callbacks.ModelCheckpoint.html#pytorch_lightning.callbacks.ModelCheckpoint)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "# ---- for tensorboard logs\n",
-    "#\n",
-    "logger       = TensorBoardLogger(       save_dir       = f'{run_dir}',\n",
-    "                                        name           = 'tb_logs'  )\n",
-    "\n",
-    "log_dir = os.path.abspath(f'{run_dir}/tb_logs')\n",
-    "print('To access the logs with tensorboard, use this command line :')\n",
-    "print(f'tensorboard --logdir {log_dir}')\n",
-    "\n",
-    "# ---- To save checkpoints\n",
-    "#\n",
-    "callback_checkpoints = ModelCheckpoint( dirpath        = f'{run_dir}/models', \n",
-    "                                        filename       = 'bestModel', \n",
-    "                                        save_top_k     = 1, \n",
-    "                                        save_last      = True,\n",
-    "                                        every_n_epochs = 1, \n",
-    "                                        monitor        = \"g_loss\")\n",
-    "\n",
-    "# ---- To have a nive progress bar\n",
-    "#\n",
-    "callback_progressBar = SmartProgressBar(verbosity=2)          # Usable evertywhere\n",
-    "# progress_bar = TQDMProgressBar(refresh_rate=1)              # Usable in real jupyter lab (bug in vscode)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Train it"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "tags": []
-   },
-   "outputs": [],
-   "source": [
-    "\n",
-    "trainer = Trainer(\n",
-    "    accelerator        = \"auto\",\n",
-    "    max_epochs         = epochs,\n",
-    "    callbacks          = [callback_progressBar, callback_checkpoints],\n",
-    "    log_every_n_steps  = batch_size,\n",
-    "    logger             = logger\n",
-    ")\n",
-    "\n",
-    "trainer.fit(gan, dm)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Reload our best model\n",
-    "Note : "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "gan = WGANGP.load_from_checkpoint('./run/SHEEP3/models/bestModel.ckpt')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "nb_images = 96\n",
-    "\n",
-    "z = torch.randn(nb_images, latent_dim)\n",
-    "print('z size        : ',z.size())\n",
-    "\n",
-    "fake_img = gan.generator.forward(z)\n",
-    "print('fake_img      : ', fake_img.size())\n",
-    "\n",
-    "nimg = fake_img.detach().numpy()\n",
-    "fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(nb_images), columns=12, x_size=1, y_size=1, \n",
-    "                       y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "fidle-env",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/DCGAN.Lightning/modules/Discriminators.py b/DCGAN.Lightning/modules/Discriminators.py
deleted file mode 100644
index bdbaa79c08332bfcdd6c6a6e8ad3a4cee62f02e9..0000000000000000000000000000000000000000
--- a/DCGAN.Lightning/modules/Discriminators.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                         GAN / Generators
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-import numpy as np
-import torch.nn as nn
-
-class Discriminator_1(nn.Module):
-    '''
-    A basic DNN discriminator, usable with classic GAN
-    '''
-
-    def __init__(self, latent_dim=None, data_shape=None):
-    
-        super().__init__()
-        self.img_shape = data_shape
-        print('init discriminator 1     : ',data_shape,' to sigmoid')
-
-        self.model = nn.Sequential(
-
-            nn.Flatten(),
-            nn.Linear(int(np.prod(data_shape)), 512),
-            nn.ReLU(),
-            
-            nn.Linear(512, 256),
-            nn.ReLU(),
-
-            nn.Linear(256, 1),
-            nn.Sigmoid(),
-        )
-
-    def forward(self, img):
-        validity = self.model(img)
-
-        return validity
-
-
-
-
-class Discriminator_2(nn.Module):
-    '''
-    A more efficient discriminator,based on CNN, usable with classic GAN
-    '''
-
-    def __init__(self, latent_dim=None, data_shape=None):
-    
-        super().__init__()
-        self.img_shape = data_shape
-        print('init discriminator 2     : ',data_shape,' to sigmoid')
-
-        self.model = nn.Sequential(
-
-            nn.Conv2d(1, 32, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(32),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(32, 64, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(64),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(64, 128, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(128),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(128, 256, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(256),
-            nn.Dropout2d(0.25),
-
-            nn.Flatten(),
-            nn.Linear(12544, 1),
-            nn.Sigmoid(),
-        )
-
-    def forward(self, img):
-        img_nchw = img.permute(0, 3, 1, 2) # reformat from NHWC to NCHW
-        validity = self.model(img_nchw)
-
-        return validity
-
-
-        
-class Discriminator_3(nn.Module):
-    '''
-    A CNN discriminator, usable with a WGANGP.
-    This discriminator has no sigmoid and returns a critical and not a probability
-    '''
-
-    def __init__(self, latent_dim=None, data_shape=None):
-    
-        super().__init__()
-        self.img_shape = data_shape
-        print('init discriminator 2     : ',data_shape,' to sigmoid')
-
-        self.model = nn.Sequential(
-
-            nn.Conv2d(1, 32, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(32),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(32, 64, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(64),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(64, 128, kernel_size = 3, stride = 1, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(128),
-            nn.Dropout2d(0.25),
-
-            nn.Conv2d(128, 256, kernel_size = 3, stride = 2, padding = 1),
-            nn.ReLU(),
-            nn.BatchNorm2d(256),
-            nn.Dropout2d(0.25),
-
-            nn.Flatten(),
-            nn.Linear(12544, 1),
-        )
-
-    def forward(self, img):
-        img_nchw = img.permute(0, 3, 1, 2) # reformat from NHWC to NCHW
-        validity = self.model(img_nchw)
-
-        return validity
\ No newline at end of file
diff --git a/DCGAN.Lightning/modules/GAN.py b/DCGAN.Lightning/modules/GAN.py
deleted file mode 100644
index cf5a5697f5e259178411706c52decdef6f176eea..0000000000000000000000000000000000000000
--- a/DCGAN.Lightning/modules/GAN.py
+++ /dev/null
@@ -1,182 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                GAN / GAN LigthningModule
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import sys
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torchvision
-from lightning import LightningModule
-
-
-class GAN(LightningModule):
-
-    # -------------------------------------------------------------------------
-    # Init
-    # -------------------------------------------------------------------------
-    #
-    def __init__(
-        self,
-        data_shape          = (None,None,None),
-        latent_dim          = None,
-        lr                  = 0.0002,
-        b1                  = 0.5,
-        b2                  = 0.999,
-        batch_size          = 64,
-        generator_class     = None,
-        discriminator_class = None,
-        **kwargs,
-    ):
-        super().__init__()
-
-        print('\n---- GAN initialization --------------------------------------------')
-
-        # ---- Hyperparameters
-        #
-        # Enable Lightning to store all the provided arguments under the self.hparams attribute.
-        # These hyperparameters will also be stored within the model checkpoint.
-        #
-        self.save_hyperparameters()
-
-        print('Hyperarameters are :')
-        for name,value in self.hparams.items():
-            print(f'{name:24s} : {value}')
-
-        # ---- Generator/Discriminator instantiation
-        #
-        # self.generator     = Generator(latent_dim=self.hparams.latent_dim, img_shape=data_shape)
-        # self.discriminator = Discriminator(img_shape=data_shape)
-
-        print('Submodels :')
-        module=sys.modules['__main__']
-        class_g = getattr(module, generator_class)
-        class_d = getattr(module, discriminator_class)
-        self.generator     = class_g( latent_dim=latent_dim, data_shape=data_shape)
-        self.discriminator = class_d( latent_dim=latent_dim, data_shape=data_shape)
-
-        # ---- Validation and example data
-        #
-        self.validation_z        = torch.randn(8, self.hparams.latent_dim)
-        self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
-
-
-    def forward(self, z):
-        return self.generator(z)
-
-
-    def adversarial_loss(self, y_hat, y):
-        return F.binary_cross_entropy(y_hat, y)
-
-
-    def training_step(self, batch, batch_idx, optimizer_idx):
-        imgs       = batch
-        batch_size = batch.size(0)
-
-        # ---- Get some latent space vectors
-        #      We use type_as() to make sure we initialize z on the right device (GPU/CPU).
-        #
-        z = torch.randn(batch_size, self.hparams.latent_dim)
-        z = z.type_as(imgs)
-
-        # ---- Train generator
-        #      Generator use optimizer #0
-        #      We try to generate false images that could mislead the discriminator
-        #
-        if optimizer_idx == 0:
-
-            # Generate fake images
-            self.fake_imgs = self.generator.forward(z)
-
-            # Assemble labels that say all images are real, yes it's a lie ;-)
-            # put on GPU because we created this tensor inside training_loop
-            misleading_labels = torch.ones(batch_size, 1)
-            misleading_labels = misleading_labels.type_as(imgs)
-
-            # Adversarial loss is binary cross-entropy
-            g_loss = self.adversarial_loss(self.discriminator.forward(self.fake_imgs), misleading_labels)
-            self.log("g_loss", g_loss, prog_bar=True)
-            return g_loss
-
-        # ---- Train discriminator
-        #      Discriminator use optimizer #1
-        #      We try to make the difference between fake images and real ones 
-        #
-        if optimizer_idx == 1:
-            
-            # These images are reals
-            real_labels = torch.ones(batch_size, 1)
-            real_labels = real_labels.type_as(imgs)
-            pred_labels = self.discriminator.forward(imgs)
-
-            real_loss   = self.adversarial_loss(pred_labels, real_labels)
-
-            # These images are fake
-            fake_imgs   = self.generator.forward(z)
-            fake_labels = torch.zeros(batch_size, 1)
-            fake_labels = fake_labels.type_as(imgs)
-
-            fake_loss   = self.adversarial_loss(self.discriminator(fake_imgs.detach()), fake_labels)
-
-            # Discriminator loss is the average
-            d_loss = (real_loss + fake_loss) / 2
-            self.log("d_loss", d_loss, prog_bar=True)
-            return d_loss
-
-
-    def configure_optimizers(self):
-
-        lr = self.hparams.lr
-        b1 = self.hparams.b1
-        b2 = self.hparams.b2
-
-        # With a GAN, we need 2 separate optimizer.
-        # opt_g to optimize the generator      #0
-        # opt_d to optimize the discriminator  #1
-        # opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr, betas=(b1, b2))
-        # opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2),)
-        opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr)
-        opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr)
-        return [opt_g, opt_d], []
-
-
-    def training_epoch_end(self, outputs):
-
-        # Get our validation latent vectors as z
-        # z = self.validation_z.type_as(self.generator.model[0].weight)
-
-        # ---- Log Graph
-        #
-        if(self.current_epoch==1):
-            sampleImg=torch.rand((1,28,28,1))
-            sampleImg=sampleImg.type_as(self.generator.model[0].weight)
-            self.logger.experiment.add_graph(self.discriminator,sampleImg)
-
-        # ---- Log d_loss/epoch
-        #
-        g_loss, d_loss = 0,0
-        for metrics in outputs:
-            g_loss+=float( metrics[0]['loss'] )
-            d_loss+=float( metrics[1]['loss'] )
-        g_loss, d_loss = g_loss/len(outputs), d_loss/len(outputs)
-        self.logger.experiment.add_scalar("g_loss/epochs",g_loss, self.current_epoch)
-        self.logger.experiment.add_scalar("d_loss/epochs",d_loss, self.current_epoch)
-
-        # ---- Log some of these images
-        #
-        z = torch.randn(self.hparams.batch_size, self.hparams.latent_dim)
-        z = z.type_as(self.generator.model[0].weight)
-        sample_imgs = self.generator(z)
-        sample_imgs = sample_imgs.permute(0, 3, 1, 2) # from NHWC to NCHW
-        grid = torchvision.utils.make_grid(tensor=sample_imgs, nrow=12, )
-        self.logger.experiment.add_image(f"Generated images", grid,self.current_epoch)
diff --git a/DCGAN.Lightning/modules/Generators.py b/DCGAN.Lightning/modules/Generators.py
deleted file mode 100644
index 9b104d579469f51dfda08b1332c9b100b6fddaa4..0000000000000000000000000000000000000000
--- a/DCGAN.Lightning/modules/Generators.py
+++ /dev/null
@@ -1,94 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                         GAN / Generators
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import numpy as np
-import torch.nn as nn
-
-
-class Generator_1(nn.Module):
-
-    def __init__(self, latent_dim=None, data_shape=None):
-        super().__init__()
-        self.latent_dim = latent_dim
-        self.img_shape  = data_shape
-        print('init generator 1         : ',latent_dim,' to ',data_shape)
-
-        self.model = nn.Sequential(
-            
-            nn.Linear(latent_dim, 128),
-            nn.ReLU(),
-
-            nn.Linear(128,256),
-            nn.BatchNorm1d(256, 0.8),
-            nn.ReLU(),
-
-            nn.Linear(256, 512),
-            nn.BatchNorm1d(512, 0.8),
-            nn.ReLU(),
-
-            nn.Linear(512, 1024),
-            nn.BatchNorm1d(1024, 0.8),
-            nn.ReLU(),
-
-            nn.Linear(1024, int(np.prod(data_shape))),
-            nn.Sigmoid()
-
-        )
-
-
-    def forward(self, z):
-        img = self.model(z)
-        img = img.view(img.size(0), *self.img_shape)
-        return img
-
-
-
-class Generator_2(nn.Module):
-
-    def __init__(self, latent_dim=None, data_shape=None):
-        super().__init__()
-        self.latent_dim = latent_dim
-        self.img_shape  = data_shape
-        print('init generator 2         : ',latent_dim,' to ',data_shape)
-
-        self.model = nn.Sequential(
-            
-            nn.Linear(latent_dim, 7*7*64),
-            nn.Unflatten(1, (64,7,7)),
-            
-            # nn.UpsamplingNearest2d( scale_factor=2 ),
-            nn.UpsamplingBilinear2d( scale_factor=2 ),
-            nn.Conv2d( 64,128, (3,3), stride=(1,1), padding=(1,1) ),
-            nn.ReLU(),
-            nn.BatchNorm2d(128),
-
-            # nn.UpsamplingNearest2d( scale_factor=2 ),
-            nn.UpsamplingBilinear2d( scale_factor=2 ),
-            nn.Conv2d( 128,256, (3,3), stride=(1,1), padding=(1,1)),
-            nn.ReLU(),
-            nn.BatchNorm2d(256),
-
-            nn.Conv2d( 256,1, (5,5), stride=(1,1), padding=(2,2)),
-            nn.Sigmoid()
-
-        )
-
-    def forward(self, z):
-        img_nchw = self.model(z)
-        img_nhwc = img_nchw.permute(0, 2, 3, 1) # reformat from NCHW to NHWC
-        # img = img.view(img.size(0), *self.img_shape) # reformat from NCHW to NHWC
-        return img_nhwc
-
-
-
diff --git a/DCGAN.Lightning/modules/QuickDrawDataModule.py b/DCGAN.Lightning/modules/QuickDrawDataModule.py
deleted file mode 100644
index 34a4ecfba7e5d123a833e5a6e58d14e4d4903d53..0000000000000000000000000000000000000000
--- a/DCGAN.Lightning/modules/QuickDrawDataModule.py
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                GAN / QuickDrawDataModule
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import numpy as np
-import torch
-from lightning import LightningDataModule
-from torch.utils.data import DataLoader
-
-
-class QuickDrawDataModule(LightningDataModule):
-
-
-    def __init__( self, dataset_file='./sheep.npy', scale=1., batch_size=64, num_workers=4 ):
-
-        super().__init__()
-
-        print('\n---- QuickDrawDataModule initialization ----------------------------')
-        print(f'with : scale={scale}  batch size={batch_size}')
-        
-        self.scale        = scale
-        self.dataset_file = dataset_file
-        self.batch_size   = batch_size
-        self.num_workers  = num_workers
-
-        self.dims         = (28, 28, 1)
-        self.num_classes  = 10
-
-
-
-    def prepare_data(self):
-        pass
-
-
-    def setup(self, stage=None):
-        print('\nDataModule Setup :')
-        # Load dataset
-        # Called at the beginning of each stage (train,val,test)
-        # Here, whatever the stage value, we'll have only one set.
-        data = np.load(self.dataset_file)
-        print('Original dataset shape : ',data.shape)
-
-        # Rescale
-        n=int(self.scale*len(data))
-        data = data[:n]
-        print('Rescaled dataset shape : ',data.shape)
-
-        # Normalize, reshape and shuffle
-        data = data/255
-        data = data.reshape(-1,28,28,1)
-        data = torch.from_numpy(data).float()
-        print('Final dataset shape    : ',data.shape)
-
-        print('Dataset loaded and ready.')
-        self.data_train = data
-
-
-    def train_dataloader(self):
-        # Note : Numpy ndarray is Dataset compliant
-        # Have map-style interface. See https://pytorch.org/docs/stable/data.html 
-        return DataLoader( self.data_train, batch_size=self.batch_size, num_workers=self.num_workers )
\ No newline at end of file
diff --git a/DCGAN.Lightning/modules/SmartProgressBar.py b/DCGAN.Lightning/modules/SmartProgressBar.py
deleted file mode 100644
index 3ebe192d0d9732da08125fa0503b2b6f7a59cf02..0000000000000000000000000000000000000000
--- a/DCGAN.Lightning/modules/SmartProgressBar.py
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                   GAN / SmartProgressBar
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-from lightning.pytorch.callbacks.progress.base import ProgressBarBase
-from tqdm import tqdm
-import sys
-
-class SmartProgressBar(ProgressBarBase):
-
-    def __init__(self, verbosity=2):
-        super().__init__()
-        self.verbosity = verbosity
-
-    def disable(self):
-        self.enable = False
-
-
-    def setup(self, trainer, pl_module, stage):
-        super().setup(trainer, pl_module, stage)
-        self.stage = stage
-
-
-    def on_train_epoch_start(self, trainer, pl_module):
-        super().on_train_epoch_start(trainer, pl_module)
-        if not self.enable : return
-
-        if self.verbosity==2:
-            self.progress=tqdm( total=trainer.num_training_batches,
-                                desc=f'{self.stage} {trainer.current_epoch+1}/{trainer.max_epochs}', 
-                                ncols=100, ascii= " >", 
-                                bar_format='{l_bar}{bar}| [{elapsed}] {postfix}')
-
-
-
-    def on_train_epoch_end(self, trainer, pl_module):
-        super().on_train_epoch_end(trainer, pl_module)
-
-        if not self.enable : return
-
-        if self.verbosity==2:
-            self.progress.close()
-
-        if self.verbosity==1:
-            print(f'Train {trainer.current_epoch+1}/{trainer.max_epochs} Done.')
-
-
-    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
-        super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx)
-
-        if not self.enable : return
-        
-        if self.verbosity==2:
-            metrics = {}
-            for name,value in trainer.logged_metrics.items():
-                metrics[name]=f'{float( trainer.logged_metrics[name] ):3.3f}'
-            self.progress.set_postfix(metrics)
-            self.progress.update(1)
-
-
-progress_bar = SmartProgressBar(verbosity=2)
diff --git a/DCGAN.Lightning/modules/WGANGP.py b/DCGAN.Lightning/modules/WGANGP.py
deleted file mode 100644
index 030740b562d2bbac62f17fc671e530fc9383ed7d..0000000000000000000000000000000000000000
--- a/DCGAN.Lightning/modules/WGANGP.py
+++ /dev/null
@@ -1,229 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                GAN / GAN LigthningModule
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG MIAI/EFELIA 2023 - https://fidle.cnrs.fr
-# ------------------------------------------------------------------
-# by JL Parouty (feb 2023) - PyTorch Lightning example
-
-
-import sys
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torchvision
-from lightning import LightningModule
-
-
-class WGANGP(LightningModule):
-
-    # -------------------------------------------------------------------------
-    # Init
-    # -------------------------------------------------------------------------
-    #
-    def __init__(
-        self,
-        data_shape          = (None,None,None),
-        latent_dim          = None,
-        lr                  = 0.0002,
-        b1                  = 0.5,
-        b2                  = 0.999,
-        batch_size          = 64,
-        lambda_gp           = 10,
-        generator_class     = None,
-        discriminator_class = None,
-        **kwargs,
-    ):
-        super().__init__()
-
-        print('\n---- WGANGP initialization -----------------------------------------')
-
-        # ---- Hyperparameters
-        #
-        # Enable Lightning to store all the provided arguments under the self.hparams attribute.
-        # These hyperparameters will also be stored within the model checkpoint.
-        #
-        self.save_hyperparameters()
-
-        print('Hyperarameters are :')
-        for name,value in self.hparams.items():
-            print(f'{name:24s} : {value}')
-
-        # ---- Generator/Discriminator instantiation
-        #
-        # self.generator     = Generator(latent_dim=self.hparams.latent_dim, img_shape=data_shape)
-        # self.discriminator = Discriminator(img_shape=data_shape)
-
-        print('Submodels :')
-        module=sys.modules['__main__']
-        class_g = getattr(module, generator_class)
-        class_d = getattr(module, discriminator_class)
-        self.generator     = class_g( latent_dim=latent_dim, data_shape=data_shape)
-        self.discriminator = class_d( latent_dim=latent_dim, data_shape=data_shape)
-
-        # ---- Validation and example data
-        #
-        self.validation_z        = torch.randn(8, self.hparams.latent_dim)
-        self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
-
-
-    def forward(self, z):
-        return self.generator(z)
-
-
-    def adversarial_loss(self, y_hat, y):
-        return F.binary_cross_entropy(y_hat, y)
-
-
-
-# ------------------------------------------------------------------------------------ TO DO -------------------
-
-    # see : # from : https://github.com/rosshemsley/gander/blob/main/gander/models/gan.py
-
-    def gradient_penalty(self, real_images, fake_images):
-
-        batch_size = real_images.size(0)
-
-        # ---- Create interpolate images
-        #
-        # Get a random vector : size=([batch_size])
-        epsilon = torch.distributions.uniform.Uniform(0, 1).sample([batch_size])
-        # Add dimensions to match images batch : size=([batch_size,1,1,1])
-        epsilon = epsilon[:, None, None, None]
-        # Put epsilon a the right place
-        epsilon = epsilon.type_as(real_images)
-        # Do interpolation
-        interpolates = epsilon * fake_images + ((1 - epsilon) * real_images)
-
-        # ---- Use autograd to compute gradient
-        #
-        # The key to making this work is including `create_graph`, this means that the computations
-        # in this penalty will be added to the computation graph for the loss function, so that the
-        # second partial derivatives will be correctly computed.
-        #
-        interpolates.requires_grad = True
-
-        pred_labels = self.discriminator.forward(interpolates)
-
-        gradients = torch.autograd.grad(  inputs       = interpolates,
-                                          outputs      = pred_labels, 
-                                          grad_outputs = torch.ones_like(pred_labels),
-                                          create_graph = True, 
-                                          only_inputs  = True )[0]
-
-        grad_flat   = gradients.view(batch_size, -1)
-        grad_norm   = torch.linalg.norm(grad_flat, dim=1)
-
-        grad_penalty = (grad_norm - 1) ** 2 
-
-        return grad_penalty
-
-
-
-# ------------------------------------------------------------------------------------------------------------------
-
-
-    def training_step(self, batch, batch_idx, optimizer_idx):
-
-        real_imgs  = batch
-        batch_size = batch.size(0)
-        lambda_gp  = self.hparams.lambda_gp
-
-        # ---- Get some latent space vectors and fake images
-        #      We use type_as() to make sure we initialize z on the right device (GPU/CPU).
-        #
-        z = torch.randn(batch_size, self.hparams.latent_dim)
-        z = z.type_as(real_imgs)
-        
-        fake_imgs = self.generator.forward(z)
-
-        # ---- Train generator
-        #      Generator use optimizer #0
-        #      We try to generate false images that could have nive critics
-        #
-        if optimizer_idx == 0:
-
-            # Get critics
-            critics = self.discriminator.forward(fake_imgs)
-
-            # Loss
-            g_loss = -critics.mean()
-
-            # Log
-            self.log("g_loss", g_loss, prog_bar=True)
-
-            return g_loss
-
-        # ---- Train discriminator
-        #      Discriminator use optimizer #1
-        #      We try to make the difference between fake images and real ones 
-        #
-        if optimizer_idx == 1:
-            
-            # Get critics
-            critics_real = self.discriminator.forward(real_imgs)
-            critics_fake = self.discriminator.forward(fake_imgs)
-
-            # Get gradient penalty
-            grad_penalty = self.gradient_penalty(real_imgs, fake_imgs)
-
-            # Loss
-            d_loss = critics_fake.mean() - critics_real.mean() + lambda_gp*grad_penalty.mean()
-
-            # Log loss
-            self.log("d_loss", d_loss, prog_bar=True)
-
-            return d_loss
-
-
-    def configure_optimizers(self):
-
-        lr = self.hparams.lr
-        b1 = self.hparams.b1
-        b2 = self.hparams.b2
-
-        # With a GAN, we need 2 separate optimizer.
-        # opt_g to optimize the generator      #0
-        # opt_d to optimize the discriminator  #1
-        # opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr, betas=(b1, b2))
-        # opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2),)
-        opt_g = torch.optim.Adam(self.generator.parameters(),     lr=lr)
-        opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr)
-        return [opt_g, opt_d], []
-
-
-    def training_epoch_end(self, outputs):
-
-        # Get our validation latent vectors as z
-        # z = self.validation_z.type_as(self.generator.model[0].weight)
-
-        # ---- Log Graph
-        #
-        if(self.current_epoch==1):
-            sampleImg=torch.rand((1,28,28,1))
-            sampleImg=sampleImg.type_as(self.generator.model[0].weight)
-            self.logger.experiment.add_graph(self.discriminator,sampleImg)
-
-        # ---- Log d_loss/epoch
-        #
-        g_loss, d_loss = 0,0
-        for metrics in outputs:
-            g_loss+=float( metrics[0]['loss'] )
-            d_loss+=float( metrics[1]['loss'] )
-        g_loss, d_loss = g_loss/len(outputs), d_loss/len(outputs)
-        self.logger.experiment.add_scalar("g_loss/epochs",g_loss, self.current_epoch)
-        self.logger.experiment.add_scalar("d_loss/epochs",d_loss, self.current_epoch)
-
-        # ---- Log some of these images
-        #
-        z = torch.randn(self.hparams.batch_size, self.hparams.latent_dim)
-        z = z.type_as(self.generator.model[0].weight)
-        sample_imgs = self.generator(z)
-        sample_imgs = sample_imgs.permute(0, 3, 1, 2) # from NHWC to NCHW
-        grid = torchvision.utils.make_grid(tensor=sample_imgs, nrow=12, )
-        self.logger.experiment.add_image(f"Generated images", grid,self.current_epoch)
diff --git a/GTSRB.Keras3/04-Keras-cv.ipynb b/GTSRB.Keras3/04-Keras-cv.ipynb
index 6c88dc768d1356d4c945d2b18e8516d5519990e4..105d48545bd5ace0d7f5e4a6bc538a66d9e37a81 100644
--- a/GTSRB.Keras3/04-Keras-cv.ipynb
+++ b/GTSRB.Keras3/04-Keras-cv.ipynb
@@ -20,9 +20,7 @@
     " See : https://keras.io/guides/keras_cv/classification_with_keras_cv/  \n",
     " Imagenet classes can be found at : https://gist.githubusercontent.com/LukeWood/62eebcd5c5c4a4d0e0b7845780f76d55/raw/fde63e5e4c09e2fa0a3436680f436bdcb8325aac/ImagenetClassnames.json\n",
     "\n",
-    "## Step 1 - Import and init\n",
-    "\n",
-    "**ATTENTION :** A specific environment is required for this example (Which may require 6 GB).  \n",
+    "## ATTENTION : A specific environment is required for this example !\n",
     "This python environment required for this notebook is :\n",
     "```\n",
     "python3 -m venv fidle-kcv\n",
@@ -31,6 +29,8 @@
     "```\n",
     "Note: Tensorflow is not used for interference, and will no longer be required in later versions of Keras 3.\n",
     "\n",
+    "## Step 1 - Import and init\n",
+    "\n",
     "### 1.1 - Python stuffs"
    ]
   },
diff --git a/README.ipynb b/README.ipynb
index 7f16f1a9afd93eccf0d8e3e5edfb884f8ffa6beb..c51c15fc13d6cd103ea87626f3aebb6d75b67d6f 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -3,13 +3,13 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "id": "1f828036",
+   "id": "e3301300",
    "metadata": {
     "execution": {
-     "iopub.execute_input": "2024-01-21T16:21:09.860108Z",
-     "iopub.status.busy": "2024-01-21T16:21:09.859792Z",
-     "iopub.status.idle": "2024-01-21T16:21:09.870962Z",
-     "shell.execute_reply": "2024-01-21T16:21:09.870075Z"
+     "iopub.execute_input": "2024-01-23T09:53:31.520305Z",
+     "iopub.status.busy": "2024-01-23T09:53:31.519598Z",
+     "iopub.status.idle": "2024-01-23T09:53:31.530907Z",
+     "shell.execute_reply": "2024-01-23T09:53:31.530000Z"
     },
     "jupyter": {
      "source_hidden": true
@@ -45,35 +45,30 @@
        "- **[Fidle site](https://fidle.cnrs.fr)**\n",
        "- **[Presentation of the training](https://fidle.cnrs.fr/presentation)**\n",
        "- **[Detailed program](https://fidle.cnrs.fr/programme)**\n",
-       "- [Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !\n",
-       "- [Find us on youtube](https://fidle.cnrs.fr/youtube)\n",
-       "- [Corrected notebooks](https://fidle.cnrs.fr/done)\n",
+       "- **[Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !**\n",
+       "- **[Corrected notebooks](https://fidle.cnrs.fr/done)**\n",
+       "- **[Follow us on our channel :](https://fidle.cnrs.fr/youtube)**\\\n",
+       "[<img width=\"120px\" style=\"vertical-align:middle\" src=\"fidle/img/logo-YouTube.png\"></img>](https://fidle.cnrs.fr/youtube)\n",
        "\n",
        "For more information, you can contact us at :  \n",
        "[<img width=\"200px\" style=\"vertical-align:middle\" src=\"fidle/img/00-Mail_contact.svg\"></img>](#top)\n",
        "\n",
-       "Current Version : <!-- VERSION_BEGIN -->2.5.4<!-- VERSION_END -->\n",
+       "Current Version : <!-- VERSION_BEGIN -->3.0.1<!-- VERSION_END -->\n",
        "\n",
        "\n",
        "## Course materials\n",
        "\n",
        "| | | | |\n",
        "|:--:|:--:|:--:|:--:|\n",
-       "| **[<img width=\"50px\" src=\"fidle/img/00-Fidle-pdf.svg\"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width=\"50px\" src=\"fidle/img/00-Notebooks.svg\"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width=\"50px\" src=\"fidle/img/00-Datasets-tar.svg\"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width=\"50px\" src=\"fidle/img/00-Videos.svg\"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;<br>&nbsp;|\n",
+       "| **[<img width=\"50px\" src=\"fidle/img/00-Fidle-pdf.svg\"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width=\"50px\" src=\"fidle/img/00-Notebooks.svg\"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width=\"50px\" src=\"fidle/img/00-Datasets-tar.svg\"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width=\"50px\" src=\"fidle/img/00-Videos.svg\"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|\n",
        "\n",
        "Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.\n",
        "\n",
        "\n",
        "## Jupyter notebooks\n",
        "\n",
-       "**NOTE :** The examples marked **\"obsolete\"** are still functional under Keras2/Tensorflow, \n",
-       "but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  \n",
-       "We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  \n",
-       "For these reason, they are kept as examples, while we develop the Keras3/PyTorch versions.  \n",
-       "The world of Deep Learning is changing very fast !\n",
-       "\n",
        "<!-- TOC_BEGIN -->\n",
-       "<!-- Automatically generated on : 21/01/24 17:21:08 -->\n",
+       "<!-- Automatically generated on : 23/01/24 10:53:30 -->\n",
        "\n",
        "### Linear and logistic regression\n",
        "- **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  \n",
@@ -159,38 +154,6 @@
        "- **[TRANS2](Transformers.PyTorch/02-distilbert_colab.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/02-distilbert_colab.ipynb)  \n",
        "Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version\n",
        "\n",
-       "### Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)\n",
-       "- **[K2AE1](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)  \n",
-       "Episode 1: Preparation of a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE2](AE.Keras2/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras2/02-AE-with-MNIST.ipynb)  \n",
-       "Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE3](AE.Keras2/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras2/03-AE-with-MNIST-post.ipynb)  \n",
-       "Episode 2 : Using the previously trained autoencoder to denoise data, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE4](AE.Keras2/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras2/04-ExtAE-with-MNIST.ipynb)  \n",
-       "Episode 4 : Construction of a denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2AE5](AE.Keras2/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras2/05-ExtAE-with-MNIST.ipynb)  \n",
-       "Episode 5 : Construction of an advanced denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)\n",
-       "\n",
-       "### Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)\n",
-       "- **[K2VAE1](VAE.Keras2/01-VAE-with-MNIST.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras2/01-VAE-with-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using functional APPI, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2VAE2](VAE.Keras2/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras2/02-VAE-with-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using model subclass, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-       "- **[K2VAE3](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)  \n",
-       "Visualization and analysis of the VAE's latent space of the dataset MNIST, using Keras 2 and Tensorflow (obsolete)\n",
-       "\n",
-       "### Generative network with Variational Autoencoder (VAE), using PyTorch Lightning\n",
-       "- **[LVAE1](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)** - [First VAE, using Lightning API (MNIST dataset)](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using Lightning API, with a latent space of small dimension, using PyTorch Lightning\n",
-       "- **[LVAE2](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)  \n",
-       "Construction and training of a VAE, using model subclass, with a latent space of small dimension, using PyTorch Lightninh\n",
-       "- **[LVAE3](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)  \n",
-       "Visualization and analysis of the VAE's latent space of the dataset MNIST, using PyTorch Lightning\n",
-       "\n",
-       "### Generative Adversarial Networks (GANs), using Lightning\n",
-       "- **[LSHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  \n",
-       "\"Draw me a sheep\", revisited with a DCGAN, using Pytorch Lightning\n",
-       "\n",
        "### Diffusion Model (DDPM) using PyTorch\n",
        "- **[DDPM1](DDPM.PyTorch/01-ddpm.ipynb)** - [Fashion MNIST Generation with DDPM](DDPM.PyTorch/01-ddpm.ipynb)  \n",
        "Diffusion Model example, to generate Fashion MNIST images.\n",
@@ -222,12 +185,6 @@
        "A scratchbook for small examples\n",
        "<!-- TOC_END -->\n",
        "\n",
-       "**NOTE :** The examples marked **\"obsolete\"** are still functional under Keras2/Tensorflow, \n",
-       "but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  \n",
-       "We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  \n",
-       "For these resaon, they are kept as examples, while we develop the Keras3/PyTorch versions.  \n",
-       "The world of Deep Learning is changing very fast !\n",
-       "\n",
        "## Installation\n",
        "\n",
        "Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.\n",
@@ -256,7 +213,7 @@
     "from IPython.display import display,Markdown\n",
     "display(Markdown(open('README.md', 'r').read()))\n",
     "#\n",
-    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 21/01/24 17:21:08"
+    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 23/01/24 10:53:30"
    ]
   }
  ],
diff --git a/README.md b/README.md
index 9295f816a0521507969df52c9b933e9f919f58c8..b3cd57aa6cb3e571c2f67a7f6204512ee8779f46 100644
--- a/README.md
+++ b/README.md
@@ -24,35 +24,30 @@ For more information, see **https://fidle.cnrs.fr** :
 - **[Fidle site](https://fidle.cnrs.fr)**
 - **[Presentation of the training](https://fidle.cnrs.fr/presentation)**
 - **[Detailed program](https://fidle.cnrs.fr/programme)**
-- [Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !
-- [Find us on youtube](https://fidle.cnrs.fr/youtube)
-- [Corrected notebooks](https://fidle.cnrs.fr/done)
+- **[Subscribe to the list](https://fidle.cnrs.fr/listeinfo), to stay informed !**
+- **[Corrected notebooks](https://fidle.cnrs.fr/done)**
+- **[Follow us on our channel :](https://fidle.cnrs.fr/youtube)**\
+[<img width="120px" style="vertical-align:middle" src="fidle/img/logo-YouTube.png"></img>](https://fidle.cnrs.fr/youtube)
 
 For more information, you can contact us at :  
 [<img width="200px" style="vertical-align:middle" src="fidle/img/00-Mail_contact.svg"></img>](#top)
 
-Current Version : <!-- VERSION_BEGIN -->2.5.4<!-- VERSION_END -->
+Current Version : <!-- VERSION_BEGIN -->3.0.1<!-- VERSION_END -->
 
 
 ## Course materials
 
 | | | | |
 |:--:|:--:|:--:|:--:|
-| **[<img width="50px" src="fidle/img/00-Fidle-pdf.svg"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width="50px" src="fidle/img/00-Notebooks.svg"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width="50px" src="fidle/img/00-Datasets-tar.svg"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width="50px" src="fidle/img/00-Videos.svg"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;<br>&nbsp;|
+| **[<img width="50px" src="fidle/img/00-Fidle-pdf.svg"></img><br>Course slides](https://fidle.cnrs.fr/supports)**<br>The course in pdf format<br>| **[<img width="50px" src="fidle/img/00-Notebooks.svg"></img><br>Notebooks](https://fidle.cnrs.fr/notebooks)**<br> &nbsp;&nbsp;&nbsp;&nbsp;Get a Zip or clone this repository &nbsp;&nbsp;&nbsp;&nbsp;<br>| **[<img width="50px" src="fidle/img/00-Datasets-tar.svg"></img><br>Datasets](https://fidle.cnrs.fr/datasets-fidle.tar)**<br>All the needed datasets<br>|**[<img width="50px" src="fidle/img/00-Videos.svg"></img><br>Videos](https://fidle.cnrs.fr/youtube)**<br>&nbsp;&nbsp;&nbsp;&nbsp;Our Youtube channel&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|
 
 Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.
 
 
 ## Jupyter notebooks
 
-**NOTE :** The examples marked **"obsolete"** are still functional under Keras2/Tensorflow, 
-but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  
-We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  
-For these reason, they are kept as examples, while we develop the Keras3/PyTorch versions.  
-The world of Deep Learning is changing very fast !
-
 <!-- TOC_BEGIN -->
-<!-- Automatically generated on : 21/01/24 17:21:08 -->
+<!-- Automatically generated on : 23/01/24 10:53:30 -->
 
 ### Linear and logistic regression
 - **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  
@@ -138,38 +133,6 @@ Using a Tranformer to perform a sentiment analysis (IMDB) - Jean Zay version
 - **[TRANS2](Transformers.PyTorch/02-distilbert_colab.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/02-distilbert_colab.ipynb)  
 Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version
 
-### Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)
-- **[K2AE1](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras2/01-Prepare-MNIST-dataset.ipynb)  
-Episode 1: Preparation of a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE2](AE.Keras2/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras2/02-AE-with-MNIST.ipynb)  
-Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE3](AE.Keras2/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras2/03-AE-with-MNIST-post.ipynb)  
-Episode 2 : Using the previously trained autoencoder to denoise data, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE4](AE.Keras2/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras2/04-ExtAE-with-MNIST.ipynb)  
-Episode 4 : Construction of a denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)
-- **[K2AE5](AE.Keras2/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras2/05-ExtAE-with-MNIST.ipynb)  
-Episode 5 : Construction of an advanced denoiser and classifier model, using Keras 2 and Tensorflow (obsolete)
-
-### Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)
-- **[K2VAE1](VAE.Keras2/01-VAE-with-MNIST.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras2/01-VAE-with-MNIST.ipynb)  
-Construction and training of a VAE, using functional APPI, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)
-- **[K2VAE2](VAE.Keras2/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras2/02-VAE-with-MNIST.ipynb)  
-Construction and training of a VAE, using model subclass, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)
-- **[K2VAE3](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras2/03-VAE-with-MNIST-post.ipynb)  
-Visualization and analysis of the VAE's latent space of the dataset MNIST, using Keras 2 and Tensorflow (obsolete)
-
-### Generative network with Variational Autoencoder (VAE), using PyTorch Lightning
-- **[LVAE1](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)** - [First VAE, using Lightning API (MNIST dataset)](VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb)  
-Construction and training of a VAE, using Lightning API, with a latent space of small dimension, using PyTorch Lightning
-- **[LVAE2](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb)  
-Construction and training of a VAE, using model subclass, with a latent space of small dimension, using PyTorch Lightninh
-- **[LVAE3](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb)  
-Visualization and analysis of the VAE's latent space of the dataset MNIST, using PyTorch Lightning
-
-### Generative Adversarial Networks (GANs), using Lightning
-- **[LSHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  
-"Draw me a sheep", revisited with a DCGAN, using Pytorch Lightning
-
 ### Diffusion Model (DDPM) using PyTorch
 - **[DDPM1](DDPM.PyTorch/01-ddpm.ipynb)** - [Fashion MNIST Generation with DDPM](DDPM.PyTorch/01-ddpm.ipynb)  
 Diffusion Model example, to generate Fashion MNIST images.
@@ -201,12 +164,6 @@ PyTorch est l'un des principaux framework utilisé dans le Deep Learning
 A scratchbook for small examples
 <!-- TOC_END -->
 
-**NOTE :** The examples marked **"obsolete"** are still functional under Keras2/Tensorflow, 
-but cannot be run in the proposed environment, now based on Keras3, PyTorch and Lightning.  
-We have decided to consider Keras2/Tensorflow as pedagogically obsolete, although Keras2 and Tensorflow are still perfectly usable (January 2024).  
-For these resaon, they are kept as examples, while we develop the Keras3/PyTorch versions.  
-The world of Deep Learning is changing very fast !
-
 ## Installation
 
 Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)** these notebooks and datasets.
diff --git a/VAE.Keras2/01-VAE-with-MNIST.ipynb b/VAE.Keras2/01-VAE-with-MNIST.ipynb
deleted file mode 100644
index ebddf5aa95fce835cbfd926708094ed4c8f18ba7..0000000000000000000000000000000000000000
--- a/VAE.Keras2/01-VAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,410 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2VAE1] - First VAE, using functional API (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using functional APPI, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding **Keras functional API**, using two custom layers\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n",
-    "\n",
-    "## Acknowledgements :\n",
-    "Thanks to **François Chollet** who is at the base of this example (and the creator of Keras !!).  \n",
-    "See : https://keras.io/examples/generative/vae\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import TensorBoard\n",
-    "\n",
-    "from modules.layers    import SamplingLayer, VariationalLossLayer\n",
-    "from modules.callbacks import ImagesCallback, BestModelCallback\n",
-    "from modules.datagen   import MNIST\n",
-    "\n",
-    "import sys\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2VAE1')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : With scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !\\\n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !\\\n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `total_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.001\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 2\n",
-    "loss_weights  = [1,.001]\n",
-    "\n",
-    "scale         = 0.2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 10\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use the **functional API.**  \n",
-    "For this, we will use two custom layers :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_log_var - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VariationalLossLayer`, which allows us to calculate the loss function, loss - See : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, strides=1, padding=\"same\", activation=\"relu\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "\n",
-    "z_mean    = layers.Dense(latent_dim, name=\"z_mean\")(x)\n",
-    "z_log_var = layers.Dense(latent_dim, name=\"z_log_var\")(x)\n",
-    "z         = SamplingLayer()([z_mean, z_log_var])\n",
-    "\n",
-    "encoder = keras.Model(inputs, [z_mean, z_log_var, z], name=\"encoder\")\n",
-    "# encoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1,  3, padding=\"same\", activation=\"sigmoid\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "\n",
-    "# decoder.summary()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "\n",
-    "We will calculate the loss with a specific layer: `VariationalLossLayer` - See : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "\n",
-    "inputs = keras.Input(shape=(28, 28, 1))\n",
-    "\n",
-    "z_mean, z_log_var, z = encoder(inputs)\n",
-    "outputs              = decoder(z)\n",
-    "\n",
-    "outputs = VariationalLossLayer(loss_weights=loss_weights)([inputs, z_mean, z_log_var, outputs])\n",
-    "\n",
-    "vae=keras.Model(inputs,outputs)\n",
-    "\n",
-    "vae.compile(optimizer='adam', loss=None)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback` : qui va sauvegarder des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " - `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "callback_images      = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "callback_bestmodel   = BestModelCallback( run_dir + '/models/best_model.h5' )\n",
-    "callback_tensorboard = TensorBoard(log_dir=run_dir + '/logs', histogram_freq=1)\n",
-    "\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = vae.fit(x_data, epochs=epochs, batch_size=batch_size, callbacks=callbacks_list, verbose=fit_verbosity)\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={\"Loss\":['loss','r_loss', 'kl_loss']}, save_as='history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = callback_images.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-generated')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Annexe - Model Save and reload "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "filename = run_dir+'/models/my_model.h5'\n",
-    "\n",
-    "# ---- Save our model :\n",
-    "\n",
-    "vae.save(filename)\n",
-    "\n",
-    "# ---- Reload it\n",
-    "\n",
-    "vae = keras.models.load_model(filename, custom_objects={'SamplingLayer': SamplingLayer, 'VariationalLossLayer':VariationalLossLayer})\n",
-    "\n",
-    "# ---- Retrieve a layer\n",
-    "\n",
-    "decoder = vae.get_layer('decoder')\n",
-    "\n",
-    "img = decoder( np.array([[-1,.1]]))\n",
-    "fidle.scrawler.images(np.array(img), x_size=2,y_size=2, save_as='04-example')\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Keras2/02-VAE-with-MNIST.ipynb b/VAE.Keras2/02-VAE-with-MNIST.ipynb
deleted file mode 100644
index e1bc1e1a4d380ecfb8de4578232ceb52f8186a94..0000000000000000000000000000000000000000
--- a/VAE.Keras2/02-VAE-with-MNIST.ipynb
+++ /dev/null
@@ -1,505 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2VAE2] - VAE, using a custom model class  (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using model subclass, with a latent space of small dimension, using Keras 2 and Tensorflow (obsolete)\n",
-    "\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding a still more **advanced programming model**, using a **custom model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n",
-    "\n",
-    "## Acknowledgements :\n",
-    "Thanks to **François Chollet** who is at the base of this example (and the creator of Keras !!).  \n",
-    "See : https://keras.io/examples/generative/vae\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import matplotlib.pyplot as plt\n",
-    "import scipy.stats\n",
-    "import sys\n",
-    "\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "from tensorflow.keras import layers\n",
-    "from tensorflow.keras.callbacks import TensorBoard\n",
-    "\n",
-    "from modules.models    import VAE\n",
-    "from modules.layers    import SamplingLayer\n",
-    "from modules.callbacks import ImagesCallback, BestModelCallback\n",
-    "from modules.datagen   import MNIST\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2VAE2')\n",
-    "\n",
-    "VAE.about()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : with scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !  \n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !  \n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `total_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.01\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 6\n",
-    "loss_weights  = [1,.001]       # [1, .001] give good results\n",
-    "\n",
-    "scale         = .2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 5\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use a **custom model**.\n",
-    "For this, we will use :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_log_var - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VAE`, a custom model with a specific train_step - See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs    = keras.Input(shape=(28, 28, 1))\n",
-    "x         = layers.Conv2D(32, 3, strides=1, padding=\"same\", activation=\"relu\")(inputs)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Conv2D(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x         = layers.Flatten()(x)\n",
-    "x         = layers.Dense(16, activation=\"relu\")(x)\n",
-    "\n",
-    "z_mean    = layers.Dense(latent_dim, name=\"z_mean\")(x)\n",
-    "z_log_var = layers.Dense(latent_dim, name=\"z_log_var\")(x)\n",
-    "z         = SamplingLayer()([z_mean, z_log_var])\n",
-    "\n",
-    "encoder = keras.Model(inputs, [z_mean, z_log_var, z], name=\"encoder\")\n",
-    "encoder.compile()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "inputs  = keras.Input(shape=(latent_dim,))\n",
-    "x       = layers.Dense(7 * 7 * 64, activation=\"relu\")(inputs)\n",
-    "x       = layers.Reshape((7, 7, 64))(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=1, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(64, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "x       = layers.Conv2DTranspose(32, 3, strides=2, padding=\"same\", activation=\"relu\")(x)\n",
-    "outputs = layers.Conv2DTranspose(1,  3, padding=\"same\", activation=\"sigmoid\")(x)\n",
-    "\n",
-    "decoder = keras.Model(inputs, outputs, name=\"decoder\")\n",
-    "decoder.compile()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "`VAE` is a custom model with a specific train_step - See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae = VAE(encoder, decoder, loss_weights)\n",
-    "\n",
-    "vae.compile(optimizer='adam')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback` : qui va sauvegarder des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " - `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "callback_images      = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "callback_bestmodel   = BestModelCallback( run_dir + '/models/best_model.h5' )\n",
-    "callback_tensorboard = TensorBoard(log_dir=run_dir + '/logs', histogram_freq=1)\n",
-    "\n",
-    "callbacks_list = [callback_images, callback_bestmodel]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "history = vae.fit(x_data, epochs=epochs, batch_size=batch_size, callbacks=callbacks_list, verbose=fit_verbosity)\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.scrawler.history(history,  plot={\"Loss\":['loss','r_loss', 'kl_loss']}, save_as='history')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = callback_images.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-original')\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder) during training"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='04-encoded')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Model evaluation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.1 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae=VAE()\n",
-    "vae.reload(f'{run_dir}/models/best_model')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder.predict(x_show)\n",
-    "x_reconst         = vae.decoder.predict(z)\n",
-    "\n",
-    "# ---- Show it\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='05-original')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='06-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Visualization of the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = int(20000*scale)\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder.predict(x_show)\n",
-    "\n",
-    "# ---- Show them\n",
-    "\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('07-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Generative latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "    \n",
-    "    grid_size   = 18\n",
-    "    grid_scale  = 1\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(10, 8))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('08-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "\n",
-    "    x_reconst = vae.decoder.predict([grid])\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='09-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Keras2/03-VAE-with-MNIST-post.ipynb b/VAE.Keras2/03-VAE-with-MNIST-post.ipynb
deleted file mode 100644
index 4cf60f2e75a7bc67e90fdac0fd9970385b95b8c9..0000000000000000000000000000000000000000
--- a/VAE.Keras2/03-VAE-with-MNIST-post.ipynb
+++ /dev/null
@@ -1,339 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K2VAE3] - Analysis of the VAE's latent space of MNIST dataset\n",
-    "<!-- DESC --> Visualization and analysis of the VAE's latent space of the dataset MNIST, using Keras 2 and Tensorflow (obsolete)\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - First data generation from **latent space** \n",
-    " - Understanding of underlying principles\n",
-    " - Model management\n",
-    "\n",
-    "Here, we don't consume data anymore, but we generate them ! ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Load a saved model\n",
-    " - Reconstruct some images\n",
-    " - Latent space visualization\n",
-    " - Matrix of generated images\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.1 - Init python"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import numpy as np\n",
-    "import tensorflow as tf\n",
-    "from tensorflow import keras\n",
-    "\n",
-    "from modules.models    import VAE\n",
-    "from modules.datagen   import MNIST\n",
-    "\n",
-    "import scipy.stats\n",
-    "import matplotlib\n",
-    "import matplotlib.pyplot as plt\n",
-    "from barviz import Simplex\n",
-    "from barviz import Collection\n",
-    "\n",
-    "import sys\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('K2VAE3')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "scale      = 1\n",
-    "seed       = 123\n",
-    "models_dir = './run/VAE2'"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('scale', 'seed', 'models_dir')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae=VAE()\n",
-    "vae.reload(f'{models_dir}/models/best_model')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder.predict(x_show, verbose=0)\n",
-    "x_reconst         = vae.decoder.predict(z,      verbose=0)\n",
-    "\n",
-    "latent_dim        = z.shape[1]\n",
-    "\n",
-    "# ---- Show it\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "fidle.utils.subtitle('Originals :')\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='01-original')\n",
-    "fidle.utils.subtitle('Reconstructed :')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='02-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Visualizing the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = 20000\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder.predict(x_show, verbose=0)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.1 - Classic 2d visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('03-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Simplex visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim<4:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is greater than 3')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    # ---- Softmax rescale\n",
-    "    #\n",
-    "    zs = np.exp(z)/np.sum(np.exp(z),axis=1,keepdims=True)\n",
-    "    # zc  = zs * 1/np.max(zs)\n",
-    "\n",
-    "    # ---- Create collection\n",
-    "    #\n",
-    "    c = Collection(zs, colors=y_show, labels=y_show)\n",
-    "    c.attrs.markers_colormap     = {'colorscale':'Rainbow','cmin':0,'cmax':latent_dim}\n",
-    "    c.attrs.markers_size         = 5\n",
-    "    c.attrs.markers_border_width = 0\n",
-    "    c.attrs.markers_opacity      = 0.8\n",
-    "\n",
-    "    s = Simplex.build(latent_dim)\n",
-    "    s.attrs.width  = 1000\n",
-    "    s.attrs.height = 1000\n",
-    "    s.plot(c)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Generate from latent space (latent_dim==2)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    grid_size   = 14\n",
-    "    grid_scale  = 1.\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(12, 10))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('04-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "\n",
-    "    x_reconst = vae.decoder.predict([grid])\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='05-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3.9.2 ('fidle-env')",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Keras2/modules/callbacks/BestModelCallback.py b/VAE.Keras2/modules/callbacks/BestModelCallback.py
deleted file mode 100644
index 8ec462eba8a2cca7e129b0c42a8a76fad3ebb84a..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/callbacks/BestModelCallback.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                        BestModelCallback
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-from tensorflow.keras.callbacks import Callback
-import numpy as np
-import os
-
-                
-class BestModelCallback(Callback):
-
-    def __init__(self, filename= './run_dir/best-model.h5', verbose=0 ):
-        self.filename = filename
-        self.verbose  = verbose
-        self.loss     = np.Inf
-        os.makedirs( os.path.dirname(filename), mode=0o750, exist_ok=True)
-                
-    def on_train_begin(self, logs=None):
-        self.loss = np.Inf
-        
-    def on_epoch_end(self, epoch, logs=None):
-        current = logs.get("loss")
-        if current < self.loss:
-            self.loss = current
-            self.model.save(self.filename)
-            if self.verbose>0: print(f'Saved - loss={current:.6f}')
diff --git a/VAE.Keras2/modules/callbacks/ImagesCallback.py b/VAE.Keras2/modules/callbacks/ImagesCallback.py
deleted file mode 100644
index 85bdb546b1093cc129d5b61b33c1c99579c91adf..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/callbacks/ImagesCallback.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            ImageCallback
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-from tensorflow.keras.callbacks import Callback
-import numpy as np
-import matplotlib.pyplot as plt
-from skimage import io
-import os
-
-class ImagesCallback(Callback):
-    '''
-    Save generated (random mode) or encoded/decoded (z mode) images on epoch end.
-    params:
-        x           : input images, for z mode (None)
-        z_dim       : size of the latent space, for random mode (None)
-        nb_images   : number of images to save
-        from_z      : save images from z (False)
-        from_random : save images from random (False)
-        filename    : images filename
-        run_dir     : output directory to save images        
-    '''
-    
-   
-    def __init__(self, x           = None,
-                       z_dim       = None,
-                       nb_images   = 5,
-                       from_z      = False, 
-                       from_random = False,
-                       filename    = 'image-{epoch:03d}-{i:02d}.jpg',
-                       run_dir     = './run'):
-        
-        # ---- Parameters
-        #
-        
-        self.x = None if x is None else x[:nb_images]
-        self.z_dim       = z_dim
-        
-        self.nb_images   = nb_images
-        self.from_z      = from_z
-        self.from_random = from_random
-
-        self.filename_z       = run_dir + '/images-z/'      + filename
-        self.filename_random  = run_dir + '/images-random/' + filename
-        
-        if from_z:      os.makedirs( run_dir + '/images-z/',     mode=0o750, exist_ok=True)
-        if from_random: os.makedirs( run_dir + '/images-random/', mode=0o750, exist_ok=True)
-        
-    
-    
-    def save_images(self, images, filename, epoch):
-        '''Save images as <filename>'''
-        
-        for i,image in enumerate(images):
-            
-            image = image.squeeze()  # Squeeze it if monochrome : (lx,ly,1) -> (lx,ly) 
-        
-            filenamei = filename.format(epoch=epoch,i=i)
-            
-            if len(image.shape) == 2:
-                plt.imsave(filenamei, image, cmap='gray_r')
-            else:
-                plt.imsave(filenamei, image)
-
-    
-    
-    def on_epoch_end(self, epoch, logs={}):
-        '''Called at the end of each epoch'''
-        
-        encoder     = self.model.get_layer('encoder')
-        decoder     = self.model.get_layer('decoder')
-
-        if self.from_random:
-            z      = np.random.normal( size=(self.nb_images,self.z_dim) )
-            images = decoder.predict(z)
-            self.save_images(images, self.filename_random, epoch)
-            
-        if self.from_z:
-            z_mean, z_var, z  = encoder.predict(self.x)
-            images            = decoder.predict(z)
-            self.save_images(images, self.filename_z, epoch)
-
-
-    def get_images(self, epochs=None, from_z=True,from_random=True):
-        '''Read and return saved images. epochs is a range'''
-        if epochs is None : return
-        images_z = []
-        images_r = []
-        for epoch in list(epochs):
-            for i in range(self.nb_images):
-                if from_z:
-                    f = self.filename_z.format(epoch=epoch,i=i)
-                    images_z.append( io.imread(f) )
-                if from_random:
-                    f = self.filename_random.format(epoch=epoch,i=i)
-                    images_r.append( io.imread(f) )
-        return images_z, images_r
-            
diff --git a/VAE.Keras2/modules/callbacks/__init__.py b/VAE.Keras2/modules/callbacks/__init__.py
deleted file mode 100644
index 99114d967cb8c577cf75c3a701e85ac3786dd100..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/callbacks/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.callbacks.BestModelCallback import BestModelCallback
-from modules.callbacks.ImagesCallback    import ImagesCallback
\ No newline at end of file
diff --git a/VAE.Keras2/modules/datagen/DataGenerator.py b/VAE.Keras2/modules/datagen/DataGenerator.py
deleted file mode 100644
index b1c7752ded90decee3f2ae7f1bfca070c0a4f29e..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/datagen/DataGenerator.py
+++ /dev/null
@@ -1,148 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/       DataGenerator
-#    |_|   |_|\__,_|_|\___|       for clustered CelebA sataset
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# Initial version by JL Parouty, feb 2020
-
-
-import numpy as np
-import pandas as pd
-import math
-import os,glob
-import tensorflow as tf
-from tensorflow.keras.utils import Sequence
-from IPython.display import display,Markdown
-
-class DataGenerator(Sequence):
-
-    version = '0.4.1'
-    
-    def __init__(self, clusters_dir='./data', batch_size=32, debug=False, scale=1):
-        '''
-        Instanciation of the data generator
-        args:
-            cluster_dir : directory of the clusters files
-            batch_size  : batch size (32)
-            debug       : debug mode (False)
-            scale       : scale of dataset to use. 1. mean 100% (1.)
-        '''
-        if debug : self.about()
-        #
-        # ---- Get the list of clusters
-        #      
-        clusters_name = [ os.path.splitext(f)[0] for f in glob.glob( f'{clusters_dir}/*.npy') ]
-        clusters_size = len(clusters_name)
-        #
-        # ---- Read each cluster description
-        #      because we need the full dataset size
-        #
-        dataset_size  = 0
-        for c in clusters_name:
-            df = pd.read_csv(c+'.csv', header=0)
-            dataset_size+=len(df.index)
-        #
-        # ---- If we only want to use a part of the dataset...
-        #
-        dataset_size = int(dataset_size * scale)
-        #
-        if debug: 
-            print(f'\nClusters nb  : {len(clusters_name)} files')
-            print(f'Dataset size : {dataset_size}')
-            print(f'Batch size   : {batch_size}')
-
-        #
-        # ---- Remember all of that
-        #
-        self.clusters_dir  = clusters_dir
-        self.batch_size    = batch_size
-        self.clusters_name = clusters_name
-        self.clusters_size = clusters_size
-        self.dataset_size  = dataset_size
-        self.debug         = debug
-        #
-        # ---- Read a first cluster
-        #
-        self.rewind()
-    
-    
-    def rewind(self):
-        self.cluster_i = self.clusters_size
-        self.read_next_cluster()
-
-        
-    def __len__(self):
-        return math.floor(self.dataset_size / self.batch_size)
-
-    
-    def __getitem__(self, idx):
-        #
-        # ---- Get the next item index
-        #
-        i=self.data_i
-        #
-        # ---- Get a batch
-        #
-        batch = self.data[i:i+self.batch_size]
-        #
-        # ---- Cluster is large enough
-        #
-        if len(batch) == self.batch_size:
-            self.data_i += self.batch_size
-            if self.debug: print(f'({len(batch)}) ',end='')
-            return batch,batch
-        #
-        # ---- Not enough...
-        #
-        if self.debug: print(f'({len(batch)}..) ',end='')
-        #
-        self.read_next_cluster()
-        batch2 = self.data[ 0:self.batch_size-len(batch) ]
-        self.data_i = self.batch_size-len(batch)
-        batch  = np.concatenate( (batch,batch2) )
-        #
-        if self.debug: print(f'(..{len(batch2)}) ',end='')
-        return batch, batch
-    
-    
-    def on_epoch_end(self):
-        self.rewind()
-    
-    
-    def read_next_cluster(self):
-        #
-        # ---- Get the next cluster name
-        #      If we have reached the end of the list, we mix and
-        #      start again from the beginning. 
-        #
-        i = self.cluster_i + 1
-        if i >= self.clusters_size:
-            np.random.shuffle(self.clusters_name)
-            i = 0
-            if self.debug : print(f'\n[shuffle!]')
-        #
-        # ---- Read it (images still normalized)
-        #
-        data = np.load( self.clusters_name[i]+'.npy', mmap_mode='r' )
-        #
-        # ---- Remember all of that
-        #
-        self.data      = data
-        self.data_i    = 0
-        self.cluster_i = i
-        #
-        if self.debug: print(f'\n[Load {self.cluster_i:02d},s={len(self.data):3d}] ',end='')
-          
-        
-    @classmethod
-    def about(cls):
-        display(Markdown('<br>**FIDLE 2020 - DataGenerator**'))
-        print('Version              :', cls.version)
-        print('TensorFlow version   :', tf.__version__)
-        print('Keras version        :', tf.keras.__version__)
diff --git a/VAE.Keras2/modules/datagen/MNIST.py b/VAE.Keras2/modules/datagen/MNIST.py
deleted file mode 100644
index f0ca276a7521a546df4ad768ac3ec1c2254b0a5c..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/datagen/MNIST.py
+++ /dev/null
@@ -1,114 +0,0 @@
-
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# 2.0 version by JL Parouty, feb 2021
-
-import h5py
-import os
-import numpy as np
-from hashlib import blake2b
-import tensorflow as tf
-import tensorflow.keras.datasets.mnist as mnist
-
-
-# ------------------------------------------------------------------
-#   A usefull class to manage our MNIST dataset
-#   This class allows to manage datasets derived from the original MNIST
-# ------------------------------------------------------------------
-
-
-class MNIST():
-    
-    version = '0.1'
-    
-    def __init__(self):
-        pass
-   
-    @classmethod
-    def get_data(cls, normalize=True, expand=True, scale=1., train_prop=0.8, shuffle=True, seed=None):
-        """
-        Return original MNIST dataset
-        args:
-            normalize   : Normalize dataset or not (True)
-            expand      : Reshape images as (28,28,1) instead (28,28) (True)
-            scale      : Scale of dataset to use. 1. mean 100% (1.)
-            train_prop : Ratio of train/test (0.8)
-            shuffle    : Shuffle data if True (True)
-            seed       : Random seed value. False mean no seed, None mean using /dev/urandom (None)
-        returns:
-            x_train,y_train,x_test,y_test
-        """
-
-        # ---- Seed
-        #
-        if seed is not False:
-            np.random.seed(seed)
-            print(f'Seeded ({seed})')
-
-        # ---- Get data
-        #
-        (x_train, y_train), (x_test, y_test) = mnist.load_data()
-        print('Dataset loaded.')
-        
-        # ---- Concatenate
-        #
-        x_data = np.concatenate([x_train, x_test], axis=0)
-        y_data = np.concatenate([y_train, y_test])
-        print('Concatenated.')
-
-        # ---- Shuffle
-        #
-        if shuffle:
-            p = np.random.permutation(len(x_data))
-            x_data, y_data = x_data[p], y_data[p]
-            print('Shuffled.')     
-        
-        # ---- Rescale
-        #
-        n = int(scale*len(x_data))
-        x_data, y_data = x_data[:n], y_data[:n]
-        print(f'rescaled ({scale}).') 
-
-        # ---- Normalization
-        #
-        if normalize:
-            x_data = x_data.astype('float32') / 255.
-            print('Normalized.')
-            
-        # ---- Reshape : (28,28) -> (28,28,1)
-        #
-        if expand:
-            x_data = np.expand_dims(x_data, axis=-1)
-            print('Reshaped.')
-
-        # ---- Split
-        #
-        n=int(len(x_data)*train_prop)
-        x_train, x_test = x_data[:n], x_data[n:]
-        y_train, y_test = y_data[:n], y_data[n:]
-        print(f'splited ({train_prop}).') 
-
-        # ---- Hash
-        #
-        h = blake2b(digest_size=10)
-        for a in [x_train,x_test, y_train,y_test]:
-            h.update(a)
-            
-        # ---- About and return
-        #
-        print('x_train shape is  : ', x_train.shape)
-        print('x_test  shape is  : ', x_test.shape)
-        print('y_train shape is  : ', y_train.shape)
-        print('y_test  shape is  : ', y_test.shape)
-        print('Blake2b digest is : ', h.hexdigest())
-        return  x_train,y_train, x_test,y_test
-                
-            
diff --git a/VAE.Keras2/modules/datagen/__init__.py b/VAE.Keras2/modules/datagen/__init__.py
deleted file mode 100644
index 74bb0f9b2c772833524423ac4a64444c96b9d69b..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/datagen/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from modules.datagen.MNIST         import MNIST
-from modules.datagen.DataGenerator import DataGenerator
-
-
diff --git a/VAE.Keras2/modules/layers/SamplingLayer.py b/VAE.Keras2/modules/layers/SamplingLayer.py
deleted file mode 100644
index f0856c68d2d1ecbd21538609b2b23d4fca005824..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/layers/SamplingLayer.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# by JL Parouty (dec 2020), based on François Chollet example
-#
-# Thanks to François Chollet example : https://keras.io/examples/generative/vae
-
-import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
-from IPython.display import display,Markdown
-
-# Note : https://keras.io/guides/making_new_layers_and_models_via_subclassing/
-
-class SamplingLayer(keras.layers.Layer):
-    '''A custom layer that receive (z_mean, z_var) and sample a z vector'''
-
-    def call(self, inputs):
-        
-        z_mean, z_log_var = inputs
-        
-        batch_size = tf.shape(z_mean)[0]
-        latent_dim = tf.shape(z_mean)[1]
-        
-        epsilon = tf.keras.backend.random_normal(shape=(batch_size, latent_dim))
-        z = z_mean + tf.exp(0.5 * z_log_var) * epsilon
-        
-        return z
\ No newline at end of file
diff --git a/VAE.Keras2/modules/layers/VariationalLossLayer.py b/VAE.Keras2/modules/layers/VariationalLossLayer.py
deleted file mode 100644
index 4de04a3f157375a92c2c4e5a976d02c1ae35dff7..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/layers/VariationalLossLayer.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# by JL Parouty (dec 2020), based on François Chollet example
-#
-# Thanks to François Chollet example : https://keras.io/examples/generative/vae
-
-import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
-from IPython.display import display,Markdown
-
-# Note : https://keras.io/guides/making_new_layers_and_models_via_subclassing/
-
-class VariationalLossLayer(keras.layers.Layer):
-   
-    def __init__(self, loss_weights=[3,7]):
-        super().__init__()
-        self.k1 = loss_weights[0]
-        self.k2 = loss_weights[1]
-
-
-    def call(self, inputs):
-        
-        # ---- Retrieve inputs
-        #
-        x, z_mean, z_log_var, y = inputs
-        
-        # ---- Compute : reconstruction loss
-        #
-        r_loss  = tf.reduce_mean( keras.losses.binary_crossentropy(x,y) ) * self.k1
-        #
-        # ---- Compute : kl_loss
-        #
-        kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
-        kl_loss = -tf.reduce_mean(kl_loss) * self.k2
-        
-        # ---- Add loss
-        #
-        loss = r_loss + kl_loss
-        self.add_loss(loss)
-        
-        # ---- Keep metrics
-        #
-        self.add_metric(loss,   aggregation='mean',name='loss')
-        self.add_metric(r_loss, aggregation='mean',name='r_loss')
-        self.add_metric(kl_loss,aggregation='mean',name='kl_loss')
-        return y
-
-    
-    def get_config(self):
-        return {'loss_weights':[self.k1,self.k2]}
\ No newline at end of file
diff --git a/VAE.Keras2/modules/layers/__init__.py b/VAE.Keras2/modules/layers/__init__.py
deleted file mode 100644
index bf4701637121a0180ec939d8713c7c5bbaca6caa..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/layers/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.layers.SamplingLayer        import SamplingLayer
-from modules.layers.VariationalLossLayer import VariationalLossLayer
diff --git a/VAE.Keras2/modules/models/VAE.py b/VAE.Keras2/modules/models/VAE.py
deleted file mode 100644
index d4705eb3adf3613287bd5cbb1dc26cf41c60b2f8..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/models/VAE.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
-# ------------------------------------------------------------------
-# by JL Parouty (dec 2020), based on François Chollet example
-#
-# Thanks to François Chollet example : https://keras.io/examples/generative/vae
-
-import numpy as np
-import tensorflow as tf
-from tensorflow import keras
-from tensorflow.keras import layers
-from IPython.display import display,Markdown
-from modules.layers    import SamplingLayer
-import os
-
-
-# Note : https://keras.io/guides/making_new_layers_and_models_via_subclassing/
-
-
-
-class VAE(keras.Model):
-    '''
-    A VAE model, built from given encoder and decoder
-    '''
-
-    version = '1.4'
-
-    def __init__(self, encoder=None, decoder=None, loss_weights=[1,1], **kwargs):
-        '''
-        VAE instantiation with encoder, decoder and r_loss_factor
-        args :
-            encoder : Encoder model
-            decoder : Decoder model
-            loss_weights : Weight of the loss functions: reconstruction_loss and kl_loss
-            r_loss_factor : Proportion of reconstruction loss for global loss (0.3)
-        return:
-            None
-        '''
-        super(VAE, self).__init__(**kwargs)
-        self.encoder      = encoder
-        self.decoder      = decoder
-        self.loss_weights = loss_weights
-        print(f'Fidle VAE is ready :-)  loss_weights={list(self.loss_weights)}')
-       
-        
-    def call(self, inputs):
-        '''
-        Model forward pass, when we use our model
-        args:
-            inputs : Model inputs
-        return:
-            output : Output of the model 
-        '''
-        z_mean, z_log_var, z = self.encoder(inputs)
-        output               = self.decoder(z)
-        return output
-                
-        
-    def train_step(self, input):
-        '''
-        Implementation of the training update.
-        Receive an input, compute loss, get gradient, update weights and return metrics.
-        Here, our metrics are loss.
-        args:
-            inputs : Model inputs
-        return:
-            loss    : Total loss
-            r_loss  : Reconstruction loss
-            kl_loss : KL loss
-        '''
-        
-        # ---- Get the input we need, specified in the .fit()
-        #
-        if isinstance(input, tuple):
-            input = input[0]
-        
-        k1,k2 = self.loss_weights
-        
-        # ---- Forward pass
-        #      Run the forward pass and record 
-        #      operations on the GradientTape.
-        #
-        with tf.GradientTape() as tape:
-            
-            # ---- Get encoder outputs
-            #
-            z_mean, z_log_var, z = self.encoder(input)
-            
-            # ---- Get reconstruction from decoder
-            #
-            reconstruction       = self.decoder(z)
-         
-            # ---- Compute loss
-            #      Reconstruction loss, KL loss and Total loss
-            #
-            reconstruction_loss  = k1 * tf.reduce_mean( keras.losses.binary_crossentropy(input, reconstruction) )
-
-            kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
-            kl_loss = -tf.reduce_mean(kl_loss) * k2
-
-            total_loss = reconstruction_loss + kl_loss
-
-        # ---- Retrieve gradients from gradient_tape
-        #      and run one step of gradient descent
-        #      to optimize trainable weights
-        #
-        grads = tape.gradient(total_loss, self.trainable_weights)
-        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
-        
-        return {
-            "loss":     total_loss,
-            "r_loss":   reconstruction_loss,
-            "kl_loss":  kl_loss,
-        }
-    
-    
-    def predict(self,inputs):
-        '''Our predict function...'''
-        z_mean, z_var, z  = self.encoder.predict(inputs)
-        outputs           = self.decoder.predict(z)
-        return outputs
-
-        
-    def save(self,filename):
-        '''Save model in 2 part'''
-        filename, extension = os.path.splitext(filename)
-        self.encoder.save(f'{filename}-encoder.h5')
-        self.decoder.save(f'{filename}-decoder.h5')
-
-    
-    def reload(self,filename):
-        '''Reload a 2 part saved model.'''
-        filename, extension = os.path.splitext(filename)
-        self.encoder = keras.models.load_model(f'{filename}-encoder.h5', custom_objects={'SamplingLayer': SamplingLayer})
-        self.decoder = keras.models.load_model(f'{filename}-decoder.h5')
-        print('Reloaded.')
-                
-        
-    @classmethod
-    def about(cls):
-        '''Basic whoami method'''
-        display(Markdown('<br>**FIDLE 2021 - VAE**'))
-        print('Version              :', cls.version)
-        print('TensorFlow version   :', tf.__version__)
-        print('Keras version        :', tf.keras.__version__)
diff --git a/VAE.Keras2/modules/models/__init__.py b/VAE.Keras2/modules/models/__init__.py
deleted file mode 100644
index b8d0a1727e74707706090552278ebbd9b28894ff..0000000000000000000000000000000000000000
--- a/VAE.Keras2/modules/models/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from modules.models.VAE import VAE
\ No newline at end of file
diff --git a/VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb b/VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb
deleted file mode 100644
index cb3ae1508f44f53b9b0fe1506da26ca1b5199755..0000000000000000000000000000000000000000
--- a/VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb
+++ /dev/null
@@ -1,541 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LVAE1] - First VAE, using Lightning API (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using Lightning API, with a latent space of small dimension, using PyTorch Lightning\n",
-    "\n",
-    "<!-- AUTHOR : Achille Mbogol Touye (EFIlIA-MIAI/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding **Ligthning API**, using two custom layers\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import torch\n",
-    "import pandas as pd\n",
-    "import numpy  as np\n",
-    "import torch.nn as nn\n",
-    "import lightning.pytorch as pl\n",
-    "\n",
-    "from modules.datagen     import MNIST\n",
-    "from torch.utils.data    import TensorDataset, DataLoader\n",
-    "from modules.progressbar import CustomTrainProgressBar\n",
-    "from modules.callbacks   import ImagesCallback, BestModelCallback\n",
-    "from modules.layers      import SamplingLayer, VariationalLossLayer\n",
-    "from lightning.pytorch.loggers.tensorboard import TensorBoardLogger\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LVAE1')\n",
-    "\n",
-    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : With scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !\\\n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !\\\n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `vae_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.001\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 2\n",
-    "loss_weights  = [1,.001]\n",
-    "\n",
-    "scale         = 0.2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 10\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    " ## 3.1 -  For Training model use Dataloader\n",
-    "The Dataset retrieves our dataset’s features and labels one sample at a time. While training a model, we typically want to pass samples in minibatches, reshuffle the data at every epoch to reduce model overfitting. DataLoader is an iterable that abstracts this complexity for us in an easy API"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "train_dataset = TensorDataset(x_data,y_data)\n",
-    "\n",
-    "# train bacth data\n",
-    "train_loader= DataLoader(\n",
-    "  dataset=train_dataset, \n",
-    "  shuffle=False, \n",
-    "  batch_size=batch_size, \n",
-    "  num_workers=2 \n",
-    ")\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use the **pytorch ligthning API.**  \n",
-    "For this, we will use two custom layers :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_logvar - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VariationalLossLayer`, which allows us to calculate the loss function, loss - See         : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Encoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "class Encoder(nn.Module):\n",
-    "    def __init__(self, latent_dim):\n",
-    "        super().__init__()\n",
-    "        self.Convblock=nn.Sequential(\n",
-    "            nn.Conv2d(in_channels=1,  out_channels=32, kernel_size=3, stride=1, padding=1),\n",
-    "            nn.BatchNorm2d(32),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "            \n",
-    "            nn.Flatten(),\n",
-    "\n",
-    "            nn.Linear(64*7*7, 16),\n",
-    "            nn.BatchNorm1d(16),\n",
-    "            nn.LeakyReLU(0.2),\n",
-    "        )\n",
-    "\n",
-    "        self.z_mean   = nn.Linear(16, latent_dim)\n",
-    "        self.z_logvar = nn.Linear(16, latent_dim)\n",
-    "        \n",
-    "\n",
-    "\n",
-    "    def forward(self, x):\n",
-    "       x        = self.Convblock(x)\n",
-    "       z_mean   = self.z_mean(x)\n",
-    "       z_logvar = self.z_logvar(x) \n",
-    "       z        = SamplingLayer()([z_mean, z_logvar]) \n",
-    "         \n",
-    "       return z_mean, z_logvar, z        "
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Decoder"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "class Decoder(nn.Module):\n",
-    "    def __init__(self, latent_dim):\n",
-    "        super().__init__()\n",
-    "        self.linear=nn.Sequential(\n",
-    "            nn.Linear(latent_dim, 16),\n",
-    "            nn.BatchNorm1d(16),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.Linear(16, 64*7*7),\n",
-    "            nn.BatchNorm1d(64*7*7),\n",
-    "            nn.ReLU()\n",
-    "        )\n",
-    "        \n",
-    "        self.Deconvblock=nn.Sequential(\n",
-    "            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(64),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1),\n",
-    "            nn.BatchNorm2d(32),\n",
-    "            nn.ReLU(),\n",
-    "            \n",
-    "            nn.ConvTranspose2d(in_channels=32, out_channels=1,  kernel_size=3, stride=1, padding=1),\n",
-    "            nn.Sigmoid()\n",
-    "        )\n",
-    "    \n",
-    "\n",
-    "\n",
-    "    def forward(self, z):\n",
-    "       x        = self.linear(z)\n",
-    "       x        = x.reshape(-1,64,7,7)\n",
-    "       x_hat    = self.Deconvblock(x)\n",
-    "       return x_hat"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "\n",
-    "We will calculate the loss with a specific layer: `VariationalLossLayer` - See : [VariationalLossLayer.py](./modules/layers/VariationalLossLayer.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "class LitVAE(pl.LightningModule):\n",
-    "    \n",
-    "    def __init__(self, encoder, decoder):\n",
-    "        super().__init__()\n",
-    "        self.encoder  = encoder\n",
-    "        self.decoder  = decoder\n",
-    "        \n",
-    "    # forward pass\n",
-    "    def forward(self, x):\n",
-    "       z_mean, z_logvar, z = self.encoder(x)\n",
-    "       x_hat               = self.decoder(z)\n",
-    "       return x_hat\n",
-    "\n",
-    "    def training_step(self, batch, batch_idx):\n",
-    "        # training_step defines the train loop.\n",
-    "        x, _                = batch\n",
-    "        z_mean, z_logvar, z = self.encoder(x)\n",
-    "        x_hat               = self.decoder(z)\n",
-    "\n",
-    "        \n",
-    "        r_loss,kl_loss,loss = VariationalLossLayer(loss_weights=loss_weights)([x, z_mean,z_logvar,x_hat]) \n",
-    "\n",
-    "        metrics = {\"r_loss\"      : r_loss, \n",
-    "                    \"kl_loss\"    : kl_loss,\n",
-    "                    \"vae_loss\"   : loss\n",
-    "                  }\n",
-    "        \n",
-    "        # logs metrics for each training_step\n",
-    "        self.log_dict(metrics,\n",
-    "                      on_step  = False,\n",
-    "                      on_epoch = True, \n",
-    "                      prog_bar = True, \n",
-    "                      logger   = True\n",
-    "                     ) \n",
-    "        \n",
-    "       \n",
-    "        return loss\n",
-    "        \n",
-    "    def configure_optimizers(self):\n",
-    "        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n",
-    "        return optimizer\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# print model\n",
-    "vae=LitVAE(Encoder(latent_dim=2),Decoder(latent_dim=2))\n",
-    "print(vae)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback`    : sauvegardera des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " -  `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# save best model\n",
-    "save_dir = \"./run/models/\"\n",
-    "BestModelCallback = BestModelCallback(dirpath= save_dir) \n",
-    "CallbackImages    = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "logger= TensorBoardLogger(save_dir='VAE1_logs',name=\"VAE_logs\") # loggers data"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "# train model\n",
-    "trainer= pl.Trainer(accelerator='auto',\n",
-    "                    max_epochs=epochs,\n",
-    "                    logger=logger,\n",
-    "                    num_sanity_val_steps=0,\n",
-    "                   callbacks=[CustomTrainProgressBar(), BestModelCallback, CallbackImages]\n",
-    "                   )\n",
-    "\n",
-    "trainer.fit(model=vae, train_dataloaders=train_loader)\n",
-    "\n",
-    "\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# launch Tensorboard \n",
-    "%reload_ext tensorboard\n",
-    "%tensorboard --logdir=./VAE1_logs/VAE_logs/ --bind_all"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = CallbackImages.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "scrolled": true
-   },
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-generated')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Annexe - Model Save and reload "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#---- Load the model from a checkpoint\n",
-    "loaded_model = LitVAE.load_from_checkpoint(BestModelCallback.best_model_path,\n",
-    "                                           encoder=Encoder(latent_dim=2),\n",
-    "                                           decoder=Decoder(latent_dim=2))\n",
-    "# put model in evaluation modecnrs\n",
-    "loaded_model.eval()\n",
-    "\n",
-    "# ---- Retrieve a layer decoder\n",
-    "decoder=loaded_model.decoder\n",
-    "\n",
-    "# example of z\n",
-    "z   = torch.Tensor([[-1,.1]]).to(device)\n",
-    "img = decoder(z)\n",
-    "\n",
-    "fidle.scrawler.images(img.cpu().detach(), x_size=2,y_size=2, save_as='04-example')"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.10"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb b/VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb
deleted file mode 100644
index 8d4335c8bcf2b1ae34082315c725a158fd470af7..0000000000000000000000000000000000000000
--- a/VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb
+++ /dev/null
@@ -1,516 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LVAE2] - VAE, using a custom model class  (MNIST dataset)\n",
-    "<!-- DESC --> Construction and training of a VAE, using model subclass, with a latent space of small dimension, using PyTorch Lightninh\n",
-    "\n",
-    "<!-- AUTHOR : Achille Mbogol Touye (EFILIA-MIAI/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - Understanding and implementing a **variational autoencoder** neurals network (VAE)\n",
-    " - Understanding a still more **advanced programming model**, using a **custom model**\n",
-    "\n",
-    "The calculation needs being important, it is preferable to use a very simple dataset such as MNIST to start with.  \n",
-    "...MNIST with a small scale if you haven't a GPU ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Defining a VAE model\n",
-    " - Build the model\n",
-    " - Train it\n",
-    " - Have a look on the train process\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os,sys\n",
-    "import torch\n",
-    "import pandas as pd\n",
-    "import numpy  as np\n",
-    "import matplotlib.pyplot as plt\n",
-    "import scipy.stats\n",
-    "\n",
-    "import torch.nn as nn\n",
-    "import lightning.pytorch as pl\n",
-    "\n",
-    "from torch.utils.data    import TensorDataset, DataLoader\n",
-    "from modules.callbacks   import ImagesCallback,BestModelCallback\n",
-    "from modules.progressbar import CustomTrainProgressBar\n",
-    "from modules.layers      import SamplingLayer\n",
-    "from modules.datagen     import MNIST\n",
-    "from modules.models      import VAE, Encoder, Decoder\n",
-    "from lightning.pytorch.loggers.tensorboard import TensorBoardLogger\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LVAE2')\n",
-    "\n",
-    "VAE.about()\n",
-    "\n",
-    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Parameters\n",
-    "`scale` : with scale=1, we need 1'30s on a GPU V100 ...and >20' on a CPU !  \n",
-    "`latent_dim` : 2 dimensions is small, but usefull to draw !  \n",
-    "`fit_verbosity`: Verbosity of training progress bar: 0=silent, 1=progress bar, 2=One line  \n",
-    "\n",
-    "`loss_weights` : Our **loss function** is the weighted sum of two loss:\n",
-    " - `r_loss` which measures the loss during reconstruction.  \n",
-    " - `kl_loss` which measures the dispersion.  \n",
-    "\n",
-    "The weights are defined by: `loss_weights=[k1,k2]` where : `total_loss = k1*r_loss + k2*kl_loss`  \n",
-    "In practice, a value of \\[1,.01\\] gives good results here.\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "latent_dim    = 6\n",
-    "loss_weights  = [1,.001]       # [1, .001] give good results\n",
-    "\n",
-    "scale         = .2\n",
-    "seed          = 123\n",
-    "\n",
-    "batch_size    = 64\n",
-    "epochs        = 5\n",
-    "fit_verbosity = 1"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('latent_dim', 'loss_weights', 'scale', 'seed', 'batch_size', 'epochs', 'fit_verbosity')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Prepare data\n",
-    "`MNIST.get_data()` return : `x_train,y_train, x_test,y_test`,  \\\n",
-    "but we only need x_train for our training."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )\n",
-    "\n",
-    "fidle.scrawler.images(x_data[:20], None, indices='all', columns=10, x_size=1,y_size=1,y_padding=0, save_as='01-original')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    " ## 3.1 -  For Training model use Dataloader\n",
-    "The Dataset retrieves our dataset’s features and labels one sample at a time. While training a model, we typically want to pass samples in minibatches, reshuffle the data at every epoch to reduce model overfitting. DataLoader is an iterable that abstracts this complexity for us in an easy API"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "train_dataset = TensorDataset(x_data,y_data)\n",
-    "\n",
-    "# train bacth data\n",
-    "train_loader= DataLoader(\n",
-    "  dataset=train_dataset, \n",
-    "  shuffle=False, \n",
-    "  batch_size=batch_size, \n",
-    "  num_workers=2 \n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Build model\n",
-    "In this example, we will use a **custom model**.\n",
-    "For this, we will use :\n",
-    " - `SamplingLayer`, which generates a vector z from the parameters z_mean and z_logvar - See : [SamplingLayer.py](./modules/layers/SamplingLayer.py)\n",
-    " - `VAE`, a custom model- See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### VAE\n",
-    "`VAE` is a custom model with a specific train_step - See : [VAE.py](./modules/models/VAE.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "vae=VAE(Encoder(latent_dim = latent_dim),\n",
-    "        Decoder(latent_dim = latent_dim),\n",
-    "        loss_weights\n",
-    "       )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Train\n",
-    "### 5.1 - Using two nice custom callbacks :-)\n",
-    "Two custom callbacks are used:\n",
-    " - `ImagesCallback` : qui va sauvegarder des images durant l'apprentissage - See [ImagesCallback.py](./modules/callbacks/ImagesCallback.py)\n",
-    " - `BestModelCallback` : qui sauvegardera le meilleur model - See [BestModelCallback.py](./modules/callbacks/BestModelCallback.py)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "save_dir = \"./run/models_dir/\"\n",
-    "BestModelCallback   = BestModelCallback(dirpath = save_dir)\n",
-    "CallbackImages      = ImagesCallback(x=x_data, z_dim=latent_dim, nb_images=5, from_z=True, from_random=True, run_dir=run_dir)\n",
-    "logger              = TensorBoardLogger(save_dir='VAE2_logs',name=\"VAE_logs\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Let's train !\n",
-    "With `scale=1`, need 1'15 on a GPU (V100 at IDRIS) ...or 20' on a CPU  "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "chrono=fidle.Chrono()\n",
-    "chrono.start()\n",
-    "\n",
-    "# train model\n",
-    "trainer= pl.Trainer(accelerator='auto',\n",
-    "                    max_epochs=epochs,\n",
-    "                    logger=logger,\n",
-    "                    num_sanity_val_steps=0,\n",
-    "                   callbacks=[CustomTrainProgressBar(), BestModelCallback, CallbackImages]\n",
-    "                   )\n",
-    "\n",
-    "trainer.fit(model=vae, train_dataloaders=train_loader)\n",
-    "\n",
-    "\n",
-    "\n",
-    "chrono.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Training review\n",
-    "### 6.1 - History"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# launch Tensorboard \n",
-    "%reload_ext tensorboard\n",
-    "%tensorboard --logdir=./VAE1_logs/VAE_logs/ --bind_all"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.2 - Reconstruction during training\n",
-    "At the end of each epoch, our callback saved some reconstructed images.  \n",
-    "Where :  \n",
-    "Original image -> encoder -> z -> decoder -> Reconstructed image"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_z, images_r = CallbackImages.get_images( range(0,epochs,2) )\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as='02-original')\n",
-    "\n",
-    "fidle.utils.subtitle('Encoded/decoded images')\n",
-    "fidle.scrawler.images(images_z, None, indices='all', columns=5, x_size=2,y_size=2, save_as='03-reconstruct')\n",
-    "\n",
-    "fidle.utils.subtitle('Original images :')\n",
-    "fidle.scrawler.images(x_data[:5], None, indices='all', columns=5, x_size=2,y_size=2, save_as=None)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 6.3 - Generation (latent -> decoder) during training"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.utils.subtitle('Generated images from latent space')\n",
-    "fidle.scrawler.images(images_r, None, indices='all', columns=5, x_size=2,y_size=2, save_as='04-encoded')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 7 - Model evaluation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.1 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "BestModelCallback.best_model_path"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#---- Load the model from a checkpoint\n",
-    "vae = VAE.load_from_checkpoint(BestModelCallback.best_model_path,\n",
-    "                                encoder=Encoder(latent_dim=latent_dim),\n",
-    "                                decoder=Decoder(latent_dim=latent_dim)\n",
-    "                              )\n",
-    "# put model in evaluation mode\n",
-    "vae.eval()\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.2 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder(x_show.to(device))\n",
-    "x_reconst         = vae.decoder(z)\n",
-    "\n",
-    "# ---- Show it\n",
-    "z         = z.cpu().detach()\n",
-    "x_reconst = x_reconst.cpu().detach()\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='05-original')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='06-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.3 - Visualization of the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = int(20000*scale)\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder(x_show.to(device))\n",
-    "\n",
-    "# ---- Show them\n",
-    "z         = z.cpu().detach()           # Move the tensor to CPU and detach it\n",
-    "x_reconst = x_reconst.cpu().detach()\n",
-    "\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('07-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 7.4 - Generative latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "    \n",
-    "    grid_size   = 18\n",
-    "    grid_scale  = 1\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(10, 8))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('08-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "    grid=torch.from_numpy(grid).to(device)\n",
-    "    x_reconst = vae.decoder([grid])\n",
-    "    x_reconst = x_reconst.cpu().detach()\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='09-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.10"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb b/VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb
deleted file mode 100644
index f35f8a4267418f2bcdf71a18b50b75b61cf6db66..0000000000000000000000000000000000000000
--- a/VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb
+++ /dev/null
@@ -1,358 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [LVAE3] - Analysis of the VAE's latent space of MNIST dataset\n",
-    "<!-- DESC --> Visualization and analysis of the VAE's latent space of the dataset MNIST, using PyTorch Lightning\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    " - First data generation from **latent space** \n",
-    " - Understanding of underlying principles\n",
-    " - Model management\n",
-    "\n",
-    "Here, we don't consume data anymore, but we generate them ! ;-)\n",
-    "\n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Load a saved model\n",
-    " - Reconstruct some images\n",
-    " - Latent space visualization\n",
-    " - Matrix of generated images\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 1 - Init python stuff"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.1 - Init python"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import sys\n",
-    "import torch\n",
-    "import pandas as pd\n",
-    "import numpy  as np\n",
-    "import torch.nn as nn\n",
-    "\n",
-    "from modules.callbacks import ImagesCallback, BestModelCallback\n",
-    "from modules.datagen   import MNIST\n",
-    "from modules.models    import Encoder, Decoder, VAE \n",
-    "\n",
-    "\n",
-    "import scipy.stats\n",
-    "import matplotlib\n",
-    "import matplotlib.pyplot as plt\n",
-    "from barviz import Simplex\n",
-    "from barviz import Collection\n",
-    "\n",
-    "\n",
-    "import fidle\n",
-    "\n",
-    "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('LVAE3')\n",
-    "\n",
-    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 1.2 - Parameters"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "scale      = 1\n",
-    "seed       = 123\n",
-    "models_dir = './run/models_dir/best-model-epoch=4-loss=0.00.ckpt'"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Override parameters (batch mode) - Just forget this cell"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.override('scale', 'seed', 'models_dir')"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get data"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "x_data, y_data, _,_ = MNIST.get_data(seed=seed, scale=scale, train_prop=1 )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Reload best model"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "#---- Load the model from a checkpoint\n",
-    "latent_dim=6\n",
-    "\n",
-    "vae = VAE.load_from_checkpoint(models_dir,\n",
-    "                               encoder=Encoder(latent_dim=latent_dim),\n",
-    "                               decoder=Decoder(latent_dim=latent_dim)\n",
-    "                              )\n",
-    "# put model in evaluation mode\n",
-    "vae.eval()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Image reconstruction"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# ---- Select few images\n",
-    "\n",
-    "x_show = fidle.utils.pick_dataset(x_data, n=10)\n",
-    "\n",
-    "# ---- Get latent points and reconstructed images\n",
-    "\n",
-    "z_mean, z_var, z  = vae.encoder(x_show.to(device))\n",
-    "x_reconst         = vae.decoder(z)\n",
-    "\n",
-    "latent_dim        = z.shape[1]\n",
-    "\n",
-    "# ---- Show it\n",
-    "z         = z.cpu().detach()         # Move the tensor to CPU and detach it\n",
-    "x_reconst = x_reconst.cpu().detach()\n",
-    "\n",
-    "labels=[ str(np.round(z[i],1)) for i in range(10) ]\n",
-    "fidle.utils.subtitle('Originals :')\n",
-    "fidle.scrawler.images(x_show,    None, indices='all', columns=10, x_size=2,y_size=2, save_as='01-original')\n",
-    "fidle.utils.subtitle('Reconstructed :')\n",
-    "fidle.scrawler.images(x_reconst, None, indices='all', columns=10, x_size=2,y_size=2, save_as='02-reconstruct')\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Visualizing the latent space"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "n_show = 5000\n",
-    "\n",
-    "# ---- Select images\n",
-    "\n",
-    "x_show, y_show   = fidle.utils.pick_dataset(x_data,y_data, n=n_show)\n",
-    "\n",
-    "# ---- Get latent points\n",
-    "\n",
-    "z_mean, z_var, z = vae.encoder(x_show.to(device))\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.1 - Classic 2d visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "z   = z.cpu().detach()\n",
-    "fig = plt.figure(figsize=(14, 10))\n",
-    "plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=30)\n",
-    "plt.colorbar()\n",
-    "fidle.scrawler.save_fig('03-Latent-space')\n",
-    "plt.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "### 5.2 - Simplex visualisaton"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim<4:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is greater than 3')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    # ---- Softmax rescale\n",
-    "    #\n",
-    "    zs = torch.exp(z)/torch.sum(torch.exp(z),axis=1,keepdims=True)\n",
-    "    zs=zs.cpu().detach()\n",
-    "    # zc  = zs * 1/np.max(zs)\n",
-    "\n",
-    "    # ---- Create collection\n",
-    "    #\n",
-    "    c = Collection(zs, colors=y_show, labels=y_show)\n",
-    "    c.attrs.markers_colormap     = {'colorscale':'Rainbow','cmin':0,'cmax':latent_dim}\n",
-    "    c.attrs.markers_size         = 5\n",
-    "    c.attrs.markers_border_width = 0\n",
-    "    c.attrs.markers_opacity      = 0.8\n",
-    "\n",
-    "    s = Simplex.build(latent_dim)\n",
-    "    s.attrs.width  = 1000\n",
-    "    s.attrs.height = 1000\n",
-    "    s.plot(c)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 6 - Generate from latent space (latent_dim==2)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "if latent_dim>2:\n",
-    "\n",
-    "    print('Sorry, This part can only work if the latent space is of dimension 2')\n",
-    "\n",
-    "else:\n",
-    "\n",
-    "    grid_size   = 14\n",
-    "    grid_scale  = 1.\n",
-    "\n",
-    "    # ---- Draw a ppf grid\n",
-    "\n",
-    "    grid=[]\n",
-    "    for y in scipy.stats.norm.ppf(np.linspace(0.99, 0.01, grid_size),scale=grid_scale):\n",
-    "        for x in scipy.stats.norm.ppf(np.linspace(0.01, 0.99, grid_size),scale=grid_scale):\n",
-    "            grid.append( (x,y) )\n",
-    "    grid=np.array(grid)\n",
-    "\n",
-    "    # ---- Draw latentspoints and grid\n",
-    "\n",
-    "    fig = plt.figure(figsize=(12, 10))\n",
-    "    plt.scatter(z[:, 0] , z[:, 1], c=y_show, cmap= 'tab10', alpha=0.5, s=20)\n",
-    "    plt.scatter(grid[:, 0] , grid[:, 1], c = 'black', s=60, linewidth=2, marker='+', alpha=1)\n",
-    "    fidle.scrawler.save_fig('04-Latent-grid')\n",
-    "    plt.show()\n",
-    "\n",
-    "    # ---- Plot grid corresponding images\n",
-    "    grid      = torch.from_numpy(grid).to(device)\n",
-    "    x_reconst = vae.decoder([grid])\n",
-    "    x_reconst = x_reconst.cpu().detach()\n",
-    "    fidle.scrawler.images(x_reconst, indices='all', columns=grid_size, x_size=0.5,y_size=0.5, y_padding=0,spines_alpha=0.1, save_as='05-Latent-morphing')\n",
-    "\n"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "fidle.end()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3 (ipykernel)",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.8.10"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/VAE.Lightning/modules/.gitkeep b/VAE.Lightning/modules/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/VAE.Lightning/modules/callbacks/.gitkeep b/VAE.Lightning/modules/callbacks/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/VAE.Lightning/modules/callbacks/BestModelCallback.py b/VAE.Lightning/modules/callbacks/BestModelCallback.py
deleted file mode 100644
index d44bfca2a79547f736f26f2233db61860526f6de..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/callbacks/BestModelCallback.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import os
-import lightning.pytorch as pl
-from lightning.pytorch.callbacks import ModelCheckpoint
-from lightning.pytorch.callbacks import Callback
-
-
-class BestModelCallback(Callback):
-
-    def __init__(self, filename='best-model-{epoch}-{loss:.2f}', dirpath="./run/models/"):
-        super(BestModelCallback, self).__init__()  
-        self.filename = filename
-        self.dirpath  = dirpath
-        os.makedirs(dirpath, exist_ok=True)
-        self.best_model_path = None
-        self.model_checkpoint = ModelCheckpoint(
-            dirpath    = dirpath,
-            filename   = filename,
-            save_top_k = 1,
-            verbose    = False,
-            monitor    = "vae_loss",
-            mode       = "min"
-        )
-
-    def on_train_epoch_end(self, trainer, pl_module):
-        # save the best model
-        self.model_checkpoint.on_train_epoch_end(trainer, pl_module)
-        self.best_model_path = self.model_checkpoint.best_model_path
-
-                
diff --git a/VAE.Lightning/modules/callbacks/ImagesCallback.py b/VAE.Lightning/modules/callbacks/ImagesCallback.py
deleted file mode 100644
index 25e0c123f1218af88542f64a261e17cad3fac656..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/callbacks/ImagesCallback.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            ImageCallback
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# 2.0 version by Achille Mbogol Touye, sep 2023
-
-
-import lightning.pytorch as pl
-import numpy as np
-import matplotlib.pyplot as plt
-from skimage import io
-import os
-import torch
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-class ImagesCallback(pl.Callback):
-    '''
-    Save generated (random mode) or encoded/decoded (z mode) images on epoch end.
-    params:
-        x           : input images, for z mode (None)
-        z_dim       : size of the latent space, for random mode (None)
-        nb_images   : number of images to save
-        from_z      : save images from z (False)
-        from_random : save images from random (False)
-        filename    : images filename
-        run_dir     : output directory to save images        
-    '''
-    
-   
-    def __init__(self, x           = None,
-                       z_dim       = None,
-                       nb_images   = 5,
-                       from_z      = False, 
-                       from_random = False,
-                       filename    = 'image-{epoch:03d}-{i:02d}.jpg',
-                       run_dir     = './run'):
-        
-        # ---- Parameters
-        #import lightning.pytorch as pl
-        super().__init__()
-        self.x = None if x is None else x[:nb_images]
-        self.z_dim       = z_dim
-        
-        self.nb_images   = nb_images
-        self.from_z      = from_z
-        self.from_random = from_random
-
-        self.filename_z       = run_dir + '/images-z/'      + filename
-        self.filename_random  = run_dir + '/images-random/' + filename
-        
-        if from_z:      os.makedirs( run_dir + '/images-z/',      mode=0o750, exist_ok=True)
-        if from_random: os.makedirs( run_dir + '/images-random/', mode=0o750, exist_ok=True)
-        
-    
-    
-    def save_images(self, images, filename, epoch):
-        '''Save images as <filename>'''
-        
-        for i,image in enumerate(images):
-            
-            image = image.squeeze()  # Squeeze it if monochrome : (1,H,W) -> (H,W) 
-        
-            filenamei = filename.format(epoch=epoch,i=i)
-            
-            if len(image.shape) == 2:
-                plt.imsave(filenamei, image, cmap='gray_r')
-            else:
-                plt.imsave(filenamei, image)
-
-    
-    
-    def on_train_epoch_end(self, trainer, pl_module):
-        '''Called at the end of each epoch'''
-        
-        encoder     = pl_module.encoder
-        decoder     = pl_module.decoder
-
-        if self.from_random:
-            z      = torch.randn(self.nb_images,self.z_dim).to(device)
-            images = decoder(z)
-            self.save_images(images.cpu().detach(), self.filename_random, trainer.current_epoch)
-            
-        if self.from_z:
-            z_mean, z_logvar, z  = encoder(self.x.to(device))
-            images               = decoder(z)
-            self.save_images(images.cpu().detach(), self.filename_z, trainer.current_epoch)
-
-
-    def get_images(self, epochs=None, from_z=True,from_random=True):
-        '''Read and return saved images. epochs is a range'''
-        if epochs is None : return
-        images_z = []
-        images_r = []
-        for epoch in list(epochs):
-            for i in range(self.nb_images):
-                if from_z:
-                    f = self.filename_z.format(epoch=epoch,i=i)
-                    images_z.append( io.imread(f) )
-                if from_random:
-                    f = self.filename_random.format(epoch=epoch,i=i)
-                    images_r.append( io.imread(f) )
-        return images_z, images_r
-            
diff --git a/VAE.Lightning/modules/callbacks/__init__.py b/VAE.Lightning/modules/callbacks/__init__.py
deleted file mode 100644
index 83b25a9615bdd2753b3047eb49473b86f3e6cb99..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/callbacks/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.callbacks.ImagesCallback     import ImagesCallback
-from modules.callbacks.BestModelCallback  import BestModelCallback
\ No newline at end of file
diff --git a/VAE.Lightning/modules/datagen/.gitkeep b/VAE.Lightning/modules/datagen/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/VAE.Lightning/modules/datagen/MNIST.py b/VAE.Lightning/modules/datagen/MNIST.py
deleted file mode 100644
index 7b063d0820fe0a10ace79ddf288206f4219e6439..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/datagen/MNIST.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# 2.0 version by Achille Mbogol Touye, sep 2023
-
-
-import torch
-
-import numpy as np
-import torchvision.transforms as T
-from torchvision import datasets
-from torch.utils.data import DataLoader
-from hashlib import blake2b
-
-# ------------------------------------------------------------------
-#   A usefull class to manage our MNIST dataset
-#   This class allows to manage datasets derived from the original MNIST
-# ------------------------------------------------------------------
-
-
-class MNIST():
-    
-    version = '0.1'
-    
-    def __init__(self):
-        pass
-   
-    @classmethod
-    def get_data(cls, normalize=True,  scale=1., train_prop=0.8, shuffle=True, seed=None):
-        """
-        Return original MNIST dataset
-        args:
-            normalize   : Normalize dataset or not (True)
-            scale      : Scale of dataset to use. 1. mean 100% (1.)
-            train_prop : Ratio of train/test (0.8)
-            shuffle    : Shuffle data if True (True)
-            seed       : Random seed value. False mean no seed, None mean using /dev/urandom (None)
-        returns:
-            x_train,y_train,x_test,y_test
-        """
-
-        # ---- Seed
-        #
-        if seed is not False:
-            np.random.seed(seed)
-            print(f'Seeded ({seed})')
-
-        # ---- Get data
-        #
-        train_dataset = datasets.MNIST(root=".data", train=True,  download=True, transform=T.PILToTensor())
-
-        test_dataset  = datasets.MNIST(root=".data", train=False, download=True, transform=T.PILToTensor())
-        print('Dataset loaded.')
-
-        # ---- Normalization
-        #
-        if normalize:
-            train_dataset = datasets.MNIST(root=".data", train=True,  download=True, transform=T.ToTensor())
-            test_dataset  = datasets.MNIST(root=".data", train=False, download=True, transform=T.ToTensor())
-
-            trainloader   = DataLoader(train_dataset, batch_size=len(train_dataset))
-            testloader    = DataLoader(test_dataset,  batch_size=len(test_dataset) )
-            
-            x_train       = next(iter(trainloader))[0]
-            y_train       = next(iter(trainloader))[1]
-
-            x_test        = next(iter(testloader))[0]
-            y_test        = next(iter(testloader))[1]
-
-            print('Normalized.')
-            
-        else:
-            trainloader   = DataLoader(train_dataset, batch_size=len(train_dataset))
-            testloader    = DataLoader(test_dataset,  batch_size=len(test_dataset) )
-            
-            x_train       = next(iter(trainloader))[0]
-            y_train       = next(iter(trainloader))[1]
-
-            x_test        = next(iter(testloader))[0]
-            y_test        = next(iter(testloader))[1] 
-
-            print('Unnormalized.')
-        
-        # ---- Concatenate
-        #
-        x_data = torch.cat([x_train, x_test], dim=0)
-        y_data = torch.cat([y_train, y_test])
-        print('Concatenated.')
-
-        # ---- Shuffle
-        #
-        if shuffle:
-            p              = torch.randperm(len(x_data))
-            x_data, y_data = x_data[p], y_data[p]
-            print('Shuffled.')     
-        
-        # ---- Rescale
-        #
-        n              = int(scale*len(x_data))
-        x_data, y_data = x_data[:n], y_data[:n]
-        print(f'rescaled ({scale}).') 
-
-        # ---- Split
-        #
-        n               = int(len(x_data)*train_prop)
-        x_train, x_test = x_data[:n], x_data[n:]
-        y_train, y_test = y_data[:n], y_data[n:]
-        print(f'splited ({train_prop}).') 
-
-        # ---- Hash
-        #
-        h = blake2b(digest_size=10)
-        for a in [x_train,x_test, y_train,y_test]:
-            h.update(a.numpy().tobytes())
-            
-        # ---- About and return
-        #
-        print('x_train shape is  : ', x_train.shape)
-        print('x_test  shape is  : ', x_test.shape)
-        print('y_train shape is  : ', y_train.shape)
-        print('y_test  shape is  : ', y_test.shape)
-        print('Blake2b digest is : ', h.hexdigest())
-        return  x_train,y_train, x_test,y_test
-                
-            
diff --git a/VAE.Lightning/modules/datagen/__init__.py b/VAE.Lightning/modules/datagen/__init__.py
deleted file mode 100644
index 74bb0f9b2c772833524423ac4a64444c96b9d69b..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/datagen/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from modules.datagen.MNIST         import MNIST
-from modules.datagen.DataGenerator import DataGenerator
-
-
diff --git a/VAE.Lightning/modules/layers/.gitkeep b/VAE.Lightning/modules/layers/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/VAE.Lightning/modules/layers/SamplingLayer.py b/VAE.Lightning/modules/layers/SamplingLayer.py
deleted file mode 100644
index 324ef344072fb60eb7aedf9c0d8c7d711db1d83d..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/layers/SamplingLayer.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023), based on https://www.researchgate.net/publication/304163568_Tutorial_on_Variational_Autoencoders
-#
-
-import torch
-import torch.nn as nn
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-class SamplingLayer(nn.Module):
-    '''A custom layer that receive (z_mean, z_var) and sample a z vector'''
-
-    def forward(self, inputs):
-        
-        z_mean, z_logvar = inputs
-        
-        batch_size = z_mean.size(0)
-        latent_dim = z_mean.size(1)
-
-        z_sigma    = torch.exp(0.5 * z_logvar)
-        
-        epsilon    = torch.randn(size=(batch_size, latent_dim)).to(device)  
-        
-        z          = z_mean + z_sigma * epsilon
-        
-        return z
\ No newline at end of file
diff --git a/VAE.Lightning/modules/layers/VariationalLossLayer.py b/VAE.Lightning/modules/layers/VariationalLossLayer.py
deleted file mode 100644
index 0d9115c0fad1ebc09b1407601bfa86dfcad7c79e..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/layers/VariationalLossLayer.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                            SamplingLayer
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2020), based on https://www.researchgate.net/publication/304163568_Tutorial_on_Variational_Autoencoders
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-class VariationalLossLayer(nn.Module):
-   
-    def __init__(self, loss_weights=[3,7]):
-        super().__init__()
-        self.k1 = loss_weights[0]
-        self.k2 = loss_weights[1]
-
-
-    def forward(self, inputs):
-        
-        # ---- Retrieve inputs
-        #
-        x, z_mean, z_logvar, x_hat = inputs
-        
-        # ---- Compute : reconstruction loss
-        #
-        r_loss  = F.mse_loss(x_hat, x)* self.k1
-        
-        #
-        # ---- Compute : kl_loss
-        #
-        kl_loss =  - torch.mean(1 + z_logvar - torch.square(z_mean) - torch.exp(z_logvar))* self.k2
-
-        # ---- total loss
-        #
-        loss   = r_loss + kl_loss
-       
-        return r_loss, kl_loss, loss
-
-    
-    def get_config(self):
-        return {'loss_weights':[self.k1,self.k2]}
\ No newline at end of file
diff --git a/VAE.Lightning/modules/layers/__init__.py b/VAE.Lightning/modules/layers/__init__.py
deleted file mode 100644
index bf4701637121a0180ec939d8713c7c5bbaca6caa..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/layers/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from modules.layers.SamplingLayer        import SamplingLayer
-from modules.layers.VariationalLossLayer import VariationalLossLayer
diff --git a/VAE.Lightning/modules/models/.gitkeep b/VAE.Lightning/modules/models/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/VAE.Lightning/modules/models/Decoder.py b/VAE.Lightning/modules/models/Decoder.py
deleted file mode 100644
index aa512da7edbcd51c2bf15935edb11ff5fe900dab..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/models/Decoder.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023)
-#
-
-import os
-import torch
-import numpy as np
-import torch.nn as nn
-
-class Decoder(nn.Module):
-    def __init__(self, latent_dim):
-        super().__init__()
-        self.linear=nn.Sequential(
-            nn.Linear(latent_dim, 16),
-            nn.BatchNorm1d(16),
-            nn.ReLU(),
-            
-            nn.Linear(16, 64*7*7),
-            nn.BatchNorm1d(64*7*7),
-            nn.ReLU()
-        )
-        
-        self.Deconvblock=nn.Sequential(
-            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2d(64),
-            nn.ReLU(),
-            
-            nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1),
-            nn.BatchNorm2d(64),
-            nn.ReLU(),
-            
-            nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=4, stride=2, padding=1),
-            nn.BatchNorm2d(32),
-            nn.ReLU(),
-            
-            nn.ConvTranspose2d(in_channels=32, out_channels=1,  kernel_size=3, stride=1, padding=1),
-            nn.Sigmoid()
-        )
-    
-
-    def forward(self, z):
-       x        = self.linear(z)
-       x        = x.reshape(-1,64,7,7)
-       x_hat    = self.Deconvblock(x)
-       return x_hat
-        
\ No newline at end of file
diff --git a/VAE.Lightning/modules/models/Encoder.py b/VAE.Lightning/modules/models/Encoder.py
deleted file mode 100644
index 16191608115834a4c9af452ba23744bf0bb90626..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/models/Encoder.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023)
-#
-
-import os
-import torch
-import numpy as np
-import torch.nn as nn
-from modules.layers  import SamplingLayer
-
-class Encoder(nn.Module):
-    def __init__(self, latent_dim):
-        super().__init__()
-        self.Convblock=nn.Sequential(
-            nn.Conv2d(in_channels=1,  out_channels=32, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2d(32),
-            nn.LeakyReLU(0.2),
-            
-            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),
-            nn.BatchNorm2d(64),
-            nn.LeakyReLU(0.2),
-            
-            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, padding=1),
-            nn.BatchNorm2d(64),
-            nn.LeakyReLU(0.2),
-            
-            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2d(64),
-            nn.LeakyReLU(0.2),
-            
-            nn.Flatten(),
-
-            nn.Linear(64*7*7, 16),
-            nn.BatchNorm1d(16),
-            nn.LeakyReLU(0.2),
-        )
-
-        self.z_mean   = nn.Linear(16, latent_dim)
-        self.z_logvar = nn.Linear(16, latent_dim)
-        
-
-
-    def forward(self, x):
-       x        = self.Convblock(x)
-       z_mean   = self.z_mean(x)
-       z_logvar = self.z_logvar(x) 
-       z        = SamplingLayer()([z_mean, z_logvar]) 
-         
-       return z_mean, z_logvar, z 
diff --git a/VAE.Lightning/modules/models/VAE.py b/VAE.Lightning/modules/models/VAE.py
deleted file mode 100644
index 771e11cea40501bdb99f270698a396c350f2306c..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/models/VAE.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|                              VAE Example
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2020 
-# ------------------------------------------------------------------
-# by Achille Mbogol Touye (sep 2023)
-#
-
-import os
-import torch
-import numpy as np
-import torch.nn as nn
-import lightning.pytorch as pl
-
-from IPython.display import display,Markdown
-from modules.layers  import VariationalLossLayer
-
-
-class VAE(pl.LightningModule):
-    '''
-    A VAE model, built from given encoder and decoder
-    '''
-
-    version = '1.4'
-
-    def __init__(self, encoder=None, decoder=None, loss_weights=[1,.001], **kwargs):
-        '''
-        VAE instantiation with encoder, decoder and r_loss_factor
-        args :
-            encoder : Encoder model
-            decoder : Decoder model
-            loss_weights : Weight of the loss functions: reconstruction_loss and kl_loss
-            r_loss_factor : Proportion of reconstruction loss for global loss (0.3)
-        return:
-            None
-        '''
-        super(VAE, self).__init__(**kwargs)
-        self.encoder      = encoder
-        self.decoder      = decoder
-        self.loss_weights = loss_weights
-        print(f'Fidle VAE is ready :-)  loss_weights={list(self.loss_weights)}')
-       
-        
-    def forward(self, inputs):
-        '''
-        args:
-            inputs : Model inputs
-        return:
-            output : Output of the model 
-        '''
-        z_mean, z_logvar, z = self.encoder(inputs)
-        output              = self.decoder(z)
-        return output
-                    
-
-    def training_step(self, batch, batch_idx):
-        # training_step defines the train loop.
-        inputs, _           = batch
-        z_mean, z_logvar, z = self.encoder(inputs)
-        x_hat               = self.decoder(z)
-        
-        r_loss,kl_loss,loss = VariationalLossLayer(loss_weights=self.loss_weights)([inputs, z_mean,z_logvar,x_hat]) 
-
-        metrics = { "r_loss"     : r_loss, 
-                    "kl_loss"    : kl_loss,
-                    "vae_loss"   : loss
-                  }
-        
-        # logs metrics for each training_step
-        self.log_dict(metrics,
-                      on_step  = False,
-                      on_epoch = True, 
-                      prog_bar = True, 
-                      logger   = True
-                     ) 
-        
-        return loss
-
-
-    def configure_optimizers(self):
-        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
-        return optimizer
-        
-
-    
-    @classmethod
-    def about(cls):
-        '''Basic whoami method'''
-        display(Markdown('<br>**FIDLE 2023 - VAE**'))
-        print('Version              :', cls.version)
-        print('Lightning version    :', pl.__version__)
diff --git a/VAE.Lightning/modules/models/__init__.py b/VAE.Lightning/modules/models/__init__.py
deleted file mode 100644
index 336f99008e502418d903f34a02d1a3fd4ca98f5a..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/models/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from modules.models.VAE     import VAE
-from modules.models.Encoder import Encoder
-from modules.models.Decoder import Decoder
\ No newline at end of file
diff --git a/VAE.Lightning/modules/progressbar.py b/VAE.Lightning/modules/progressbar.py
deleted file mode 100644
index fb25a079b603ff54dcbcff4a252c0fda9f288416..0000000000000000000000000000000000000000
--- a/VAE.Lightning/modules/progressbar.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# ------------------------------------------------------------------
-#     _____ _     _ _
-#    |  ___(_) __| | | ___
-#    | |_  | |/ _` | |/ _ \
-#    |  _| | | (_| | |  __/
-#    |_|   |_|\__,_|_|\___|
-# ------------------------------------------------------------------
-# Formation Introduction au Deep Learning  (FIDLE)
-# CNRS/SARI/DEVLOG 2023 
-# ------------------------------------------------------------------
-# 2.0 version by Achille Mbogol Touye (EFELIA-MIAI/SIMAP¨), sep 2023
-
-from tqdm import tqdm as _tqdm
-from lightning.pytorch.callbacks import TQDMProgressBar
-
-# Créez un callback de barre de progression pour afficher les métriques d'entraînement
-class CustomTrainProgressBar(TQDMProgressBar):
-    def __init__(self):
-        super().__init__()
-        self._val_progress_bar     = _tqdm()
-        self._predict_progress_bar = _tqdm()
-        
-    def init_predict_tqdm(self):
-        bar=super().init_test_tqdm()
-        bar.set_description("Predicting")
-        return bar
-
-    def init_train_tqdm(self):
-        bar=super().init_train_tqdm()
-        bar.set_description("Training")
-        return bar    
-
-    @property
-    def val_progress_bar(self):
-        if self._val_progress_bar is None:
-            raise ValueError("The `_val_progress_bar` reference has not been set yet.")
-        return self._val_progress_bar
-
-    @property
-    def predict_progress_bar(self) -> _tqdm:
-        if self._predict_progress_bar is None:
-            raise TypeError(f"The `{self.__class__.__name__}._predict_progress_bar` reference has not been set yet.")
-        return self._predict_progress_bar    
-    
-
-    def on_validation_start(self, trainer, pl_module):
-        # Désactivez l'affichage de la barre de progression de validation
-        self.val_progress_bar.disable = True  
-
-    def on_predict_start(self, trainer, pl_module):
-        # Désactivez l'affichage de la barre de progression de validation
-        self.predict_progress_bar.disable = True 
\ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
index c3754230633fa3f421619e7afa10fc7e6a34e6e7..2737f21f6a293a31940976bb6a516e30bb855a19 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -37,8 +37,8 @@ RUN pip3 install --no-cache-dir --upgrade tensorboard tensorboardX jupyter ipywi
 RUN bin/rm /usr/local/share/jupyter/kernels/python3/logo*      
     
 # Change default logo and name kernels
-COPY images/env-fidle.png /usr/local/share/jupyter/kernels/python3/logo-64x64.png
-COPY images/env-fidle.svg /usr/local/share/jupyter/kernels/python3/logo-svg.svg
+COPY images/env-keras3.png /usr/local/share/jupyter/kernels/python3/logo-64x64.png
+COPY images/env-keras3.svg /usr/local/share/jupyter/kernels/python3/logo-svg.svg
 
 # Get Fidle datasets
 RUN mkdir /data && \
diff --git a/docker/images/env-fidle.png b/docker/images/env-fidle.png
deleted file mode 100644
index 5d7548194ce56bbbe9adb7d16bab5cf199051b2b..0000000000000000000000000000000000000000
Binary files a/docker/images/env-fidle.png and /dev/null differ
diff --git a/docker/images/env-fidle.svg b/docker/images/env-fidle.svg
deleted file mode 100644
index 9f3f0b984e82e1afb4a5c49c75e7622334a2a2a6..0000000000000000000000000000000000000000
--- a/docker/images/env-fidle.svg
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 125.8 125.8">
-  <defs>
-    <style>
-      .k {
-        fill: #fff;
-      }
-
-      .l {
-        fill: url(#h);
-      }
-
-      .m {
-        fill: none;
-      }
-
-      .n {
-        fill: #ee4c2c;
-      }
-
-      .o {
-        fill: url(#j);
-      }
-
-      .p {
-        clip-path: url(#i);
-      }
-
-      .q {
-        fill: #e12229;
-      }
-
-      .r {
-        clip-path: url(#g);
-      }
-    </style>
-    <clipPath id="g">
-      <polygon points="62.24 20.43 36.46 5.76 36.46 66.03 46.77 60.07 46.77 43.12 54.56 47.58 54.45 36.01 46.77 31.54 46.77 24.78 62.24 33.83 62.24 20.43" style="fill: none;"/>
-    </clipPath>
-    <linearGradient id="h" data-name="Dégradé sans nom 2" x1="5.98" y1="-5330.53" x2="69.23" y2="-5330.53" gradientTransform="translate(0 -5294.69) scale(1 -1)" gradientUnits="userSpaceOnUse">
-      <stop offset="0" stop-color="#ff6f00"/>
-      <stop offset="1" stop-color="#ffa800"/>
-    </linearGradient>
-    <clipPath id="i">
-      <polygon points="8.39 20.43 34.17 5.76 34.17 66.03 23.86 60.07 23.86 24.78 8.39 33.83 8.39 20.43" style="fill: none;"/>
-    </clipPath>
-    <linearGradient id="j" data-name="Dégradé sans nom 2" x1="5.64" x2="68.89" xlink:href="#h"/>
-  </defs>
-  <g id="a" data-name="Calque 1"/>
-  <g id="b" data-name="Calque 2">
-    <g id="c" data-name="Iconographie">
-      <g>
-        <rect width="125.8" height="125.8" style="fill: #fff;"/>
-        <g>
-          <g id="d" data-name="group">
-            <path id="e" data-name="Path" d="M110.22,22.14l-4.44,4.44c7.27,7.27,7.27,18.97,0,26.1-7.27,7.27-18.97,7.27-26.1,0-7.27-7.27-7.27-18.97,0-26.1l11.5-11.5,1.61-1.61V4.78l-17.36,17.36c-9.69,9.69-9.69,25.3,0,34.99,9.69,9.69,25.3,9.69,34.78,0,9.69-9.76,9.69-25.3,0-34.99Z" style="fill: #ee4c2c;"/>
-            <path id="f" data-name="Path-1" d="M104.77,17.83c0,1.78-1.45,3.23-3.23,3.23s-3.23-1.45-3.23-3.23,1.45-3.23,3.23-3.23,3.23,1.45,3.23,3.23Z" style="fill: #ee4c2c;"/>
-          </g>
-          <g>
-            <g style="clip-path: url(#g);">
-              <path d="M5.98,5.53h63.25v60.62H5.98V5.53Z" style="fill: url(#h);"/>
-            </g>
-            <g style="clip-path: url(#i);">
-              <path d="M5.64,5.53h63.25v60.62H5.64V5.53Z" style="fill: url(#j);"/>
-            </g>
-          </g>
-          <g>
-            <path d="M96.62,104.09c4.29-.4,7.17-1.94,9.48-3.18,1.58-.85,2.9-1.55,4.24-1.66,.76-2.49,1.18-5.09,1.18-7.73,0-8.79-4.27-16.92-11.53-21.81-3.37-2.27-11.13-5.29-21.33-3.6-7.49,1.24-15.26,6.91-18.3,9.45-1.83,1.53-23.6,22.17-32.68,18.06-6.14-2.78,4.83-12.91-.21-22.17-.14-.25-.5-.28-.67-.04-2.56,3.46-5.06,7.6-8.47,5.74-1.52-.83-2.89-3.49-3.95-5.17-.21-.34-.74-.17-.73,.23,.39,11.62,6.02,20.18,10.54,25.6,7.83,9.37,21.55,20.28,43.72,22.49,9.6,.96,32.37-2.07,41.47-18.44-.65,.26-1.37,.65-2.19,1.09-2.36,1.27-5.59,3-10.35,3.44-.32,.03-.63,.04-.95,.04-4.33,0-8.36-2.63-10.32-4.14-.15,.04-.3,.06-.46,.06-1.07,0-1.94-.87-1.94-1.94s.87-1.94,1.94-1.94,1.94,.87,1.94,1.94c0,.03,0,.06,0,.09,1.87,1.43,5.77,3.95,9.59,3.58Zm-29.02,8.57c-4,1.07-6.6,1.51-8.29,1.51-1.87,0-2.65-.53-3.02-1.33-.98-2.1,2.54-5.13,5.66-7.31,.52-.36,1.24-.23,1.61,.29,.36,.52,.24,1.24-.29,1.61-2.21,1.54-4.32,3.49-4.76,4.38,.61,.13,2.59,.22,8.5-1.37,.62-.17,1.25,.2,1.41,.82,.17,.62-.2,1.25-.81,1.41Zm26.71-28.1c2.28,0,4.12,1.88,4.27,4.24-.48-.92-1.43-1.56-2.55-1.56-1.59,0-2.88,1.29-2.88,2.88s1.29,2.88,2.88,2.88c.18,0,.36-.02,.53-.05-.66,.43-1.43,.7-2.26,.7-2.37,0-4.31-2.04-4.31-4.54,0-2.51,1.93-4.54,4.31-4.54Z" style="fill: #e12229;"/>
-            <g>
-              <path d="M85.39,60.57c-.55,0-.84-.41-.94-.54-.38-.54-.84-.73-1.69-.73-.25,0-.49,.02-.72,.03-.26,.02-.51,.03-.74,.03-.37,0-.92-.03-1.41-.3-1.6-.88-2.18-2.89-1.3-4.49,.58-1.06,1.69-1.71,2.9-1.71,.56,0,1.11,.14,1.59,.41,1.21,.67,3.53,5.61,3.29,6.54l-.19,.75-.79,.02Z" style="fill: #e12229;"/>
-              <path d="M81.49,53.85c.38,0,.76,.09,1.11,.29,1.02,.56,3.04,5.43,2.79,5.43-.02,0-.06-.04-.12-.12-.7-.97-1.61-1.15-2.5-1.15-.52,0-1.02,.06-1.46,.06-.36,0-.67-.04-.93-.18-1.11-.61-1.52-2.02-.91-3.13,.42-.76,1.21-1.19,2.02-1.19m0-2c-1.57,0-3.02,.85-3.77,2.23-1.15,2.08-.39,4.7,1.69,5.85,.7,.38,1.41,.43,1.89,.43,.25,0,.52-.02,.8-.03,.21-.01,.44-.03,.66-.03,.65,0,.74,.13,.87,.32,.57,.79,1.27,.96,1.75,.96h1.55l.4-1.52c.12-.46,.27-1.03-1.13-3.98-1.26-2.65-2.04-3.35-2.65-3.68-.64-.35-1.35-.54-2.08-.54h0Z" style="fill: #fff;"/>
-            </g>
-            <g>
-              <path d="M88.42,64.01c-.39,0-.9-.24-1.08-.9-.37-1.36,.54-7.02,3.38-8.69,.49-.29,1.04-.44,1.61-.44,1.2,0,2.32,.66,2.9,1.72,.43,.77,.53,1.67,.28,2.51-.25,.85-.81,1.55-1.58,1.98-.18,.1-.4,.21-.63,.33-1.14,.58-2.87,1.47-3.93,2.95-.33,.46-.73,.52-.95,.52h0Z" style="fill: #e12229;"/>
-              <path d="M92.32,54.99c.81,0,1.61,.44,2.03,1.21,.62,1.11,.21,2.52-.91,3.13-1.12,.61-3.47,1.58-4.89,3.58-.05,.07-.1,.1-.13,.1-.52,0,.12-6.15,2.8-7.72,.35-.2,.73-.3,1.11-.3m0-2c-.74,0-1.48,.2-2.12,.58-3.4,2-4.29,8.13-3.83,9.81,.31,1.13,1.25,1.63,2.05,1.63,.69,0,1.33-.34,1.76-.94,.92-1.28,2.45-2.07,3.57-2.64,.25-.13,.47-.24,.66-.35,1.01-.56,1.74-1.47,2.06-2.58,.32-1.11,.19-2.27-.37-3.28-.76-1.38-2.21-2.24-3.78-2.24h0Z" style="fill: #fff;"/>
-            </g>
-            <g>
-              <path d="M88.02,54.01c-.26,0-.75-.1-1.05-.75-.41-.89-1.01-4.47,.57-6.3,.48-.56,1.18-.88,1.92-.88,.61,0,1.2,.22,1.64,.63,1,.91,1.05,2.5,.1,3.55-.1,.11-.21,.22-.34,.36-.58,.6-1.46,1.52-1.82,2.61-.19,.58-.67,.79-1.03,.79h0Z" style="fill: #e12229;"/>
-              <path d="M89.46,47.08c.35,0,.7,.12,.97,.37,.6,.54,.62,1.5,.04,2.14-.58,.64-1.86,1.77-2.37,3.33-.02,.07-.05,.1-.08,.1-.33,0-1.07-3.84,.28-5.4,.31-.36,.74-.54,1.16-.54m0-2c-1.03,0-2,.45-2.68,1.23-1.88,2.18-1.28,6.15-.72,7.37,.57,1.24,1.64,1.34,1.96,1.34,.91,0,1.69-.58,1.98-1.48,.28-.87,1.04-1.66,1.59-2.23,.13-.14,.26-.27,.36-.38,.62-.69,.96-1.57,.94-2.48-.02-.95-.42-1.85-1.11-2.48-.63-.57-1.45-.89-2.31-.89h0Z" style="fill: #fff;"/>
-            </g>
-          </g>
-        </g>
-      </g>
-    </g>
-  </g>
-</svg>
\ No newline at end of file
diff --git a/docker/images/env-keras3.png b/docker/images/env-keras3.png
new file mode 100644
index 0000000000000000000000000000000000000000..ccf36706c866dcc5e073da920a367482283fcf08
Binary files /dev/null and b/docker/images/env-keras3.png differ
diff --git a/docker/images/env-keras3.svg b/docker/images/env-keras3.svg
new file mode 100644
index 0000000000000000000000000000000000000000..c3e015857fad12afead9abe53d5f70f5041f53d5
--- /dev/null
+++ b/docker/images/env-keras3.svg
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg id="Calque_2" data-name="Calque 2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
+  <defs>
+    <style>
+      .cls-1 {
+        fill: #d00000;
+      }
+
+      .cls-1, .cls-2, .cls-3, .cls-4, .cls-5 {
+        stroke-width: 0px;
+      }
+
+      .cls-2 {
+        fill: none;
+      }
+
+      .cls-3 {
+        fill: #fff;
+      }
+
+      .cls-4 {
+        fill: #e12229;
+      }
+
+      .cls-5 {
+        fill: #ee4c2c;
+      }
+    </style>
+  </defs>
+  <g id="Mode_Isolation" data-name="Mode Isolation">
+    <g>
+      <rect class="cls-3" width="100" height="100"/>
+      <g id="group">
+        <path id="Path" class="cls-5" d="M84.64,15.79l-3.09,3.09c5.06,5.06,5.06,13.21,0,18.17-5.06,5.06-13.21,5.06-18.17,0-5.06-5.06-5.06-13.21,0-18.17l8.01-8.01,1.12-1.12V3.7l-12.08,12.08c-6.75,6.75-6.75,17.61,0,24.36,6.75,6.75,17.61,6.75,24.22,0,6.75-6.79,6.75-17.61,0-24.36Z"/>
+        <path id="Path-1" class="cls-5" d="M80.85,12.79c0,1.24-1.01,2.25-2.25,2.25s-2.25-1.01-2.25-2.25,1.01-2.25,2.25-2.25,2.25,1.01,2.25,2.25Z"/>
+      </g>
+      <g>
+        <g>
+          <path class="cls-2" d="M52.97,86.43c-4.89,1.33-6.52,1.26-7.02,1.15.37-.75,2.11-2.39,3.93-3.69.43-.31.54-.91.24-1.35-.3-.44-.89-.55-1.33-.24-2.58,1.83-5.48,4.39-4.67,6.16.31.67.95,1.12,2.5,1.12,1.4,0,3.55-.37,6.85-1.27.51-.14.81-.67.67-1.19-.13-.52-.66-.83-1.17-.69Z"/>
+          <g>
+            <path class="cls-4" d="M68.15,44.5c-.34,0-.63-.17-.87-.5-.3-.42-.64-.57-1.3-.57-.2,0-.4.01-.59.03-.22.01-.42.03-.62.03-.32,0-.79-.03-1.23-.27-1.36-.77-1.86-2.52-1.11-3.9.5-.92,1.46-1.5,2.49-1.5.48,0,.96.12,1.38.36,1.06.59,2.99,4.78,2.77,5.62l-.18.7-.74.02Z"/>
+            <path class="cls-3" d="M64.93,38.75c.31,0,.63.08.92.24.85.48,2.51,4.58,2.3,4.58-.02,0-.05-.03-.1-.11-.58-.82-1.33-.97-2.06-.97-.43,0-.84.05-1.21.05-.29,0-.56-.03-.77-.15-.92-.52-1.26-1.7-.75-2.64.35-.64,1-1.01,1.67-1.01M64.93,36.87c-1.38,0-2.66.76-3.32,1.99-.99,1.83-.33,4.15,1.48,5.16.62.35,1.26.39,1.68.39.21,0,.44-.01.68-.03.17-.01.35-.02.53-.02.41,0,.45.05.53.17.55.79,1.26.9,1.64.9h1.45l.38-1.41c.11-.43.24-.93-.94-3.48-1.06-2.29-1.74-2.9-2.27-3.2-.56-.32-1.2-.48-1.84-.48h0Z"/>
+          </g>
+          <path class="cls-4" d="M62.06,75.3c-.39-.47-.34-1.18.12-1.58.46-.4,1.16-.35,1.55.13,5.79,6.92,15.18,8.77,24.52,4.83.95-2.66,1.42-5.45,1.49-8.18,0-7.41-3.53-14.26-9.52-18.38-2.78-1.91-9.2-4.45-17.62-3.04-6.19,1.04-12.61,5.82-15.12,7.97-1.51,1.29-19.5,18.68-27,15.22-5.07-2.35,3.99-10.88-.17-18.68-.11-.21-.41-.23-.55-.04-2.12,2.91-4.18,6.41-7,4.84-1.26-.7-2.39-2.94-3.26-4.36-.18-.28-.61-.14-.6.19.32,9.8,4.97,17.01,8.71,21.57,6.47,7.9,17.8,17.09,36.12,18.95,18.88,1.75,28.93-4.73,33.3-13.21-2.84.96-5.67,1.44-8.4,1.44-6.45,0-12.34-2.63-16.56-7.67ZM53.46,88.31c-3.3.9-5.45,1.27-6.85,1.27-1.55,0-2.19-.45-2.5-1.12-.81-1.77,2.1-4.32,4.67-6.16.43-.3,1.03-.2,1.33.24.3.44.19,1.05-.24,1.35-1.83,1.3-3.56,2.94-3.93,3.69.5.11,2.14.18,7.02-1.15.51-.14,1.03.17,1.17.69.14.52-.16,1.05-.67,1.19Z"/>
+          <g>
+            <path class="cls-4" d="M70.65,47.4c-.36,0-.83-.21-1-.82-.32-1.15.43-5.99,2.83-7.43.42-.25.9-.39,1.39-.39,1.04,0,2,.58,2.5,1.51.75,1.38.25,3.13-1.11,3.9-.15.09-.33.18-.53.28-.93.49-2.34,1.22-3.2,2.45-.3.42-.68.49-.88.49h0Z"/>
+            <path class="cls-3" d="M73.88,39.71c.67,0,1.33.38,1.67,1.02.51.94.17,2.12-.75,2.64s-2.86,1.33-4.04,3.01c-.04.06-.08.09-.11.09-.43,0,.1-5.18,2.31-6.51.29-.17.6-.25.91-.25M73.88,37.83c-.66,0-1.31.18-1.88.52-2.91,1.74-3.65,7.04-3.25,8.48.25.9,1.01,1.5,1.9,1.5.65,0,1.25-.32,1.64-.89.73-1.04,1.97-1.69,2.87-2.16.21-.11.39-.21.55-.29,1.81-1.02,2.47-3.33,1.48-5.17-.67-1.23-1.94-2-3.32-2h0Z"/>
+          </g>
+          <g>
+            <path class="cls-4" d="M70.32,38.97c-.19,0-.68-.07-.96-.67-.34-.73-.85-3.85.48-5.42.42-.5,1.03-.78,1.67-.78.54,0,1.05.2,1.44.56.86.8.91,2.2.09,3.11-.08.09-.17.19-.28.3-.48.5-1.19,1.26-1.48,2.17-.17.54-.62.73-.96.73h0Z"/>
+            <path class="cls-3" d="M71.52,33.04c.29,0,.58.1.8.31.49.46.51,1.26.03,1.8s-1.54,1.5-1.95,2.81c-.02.06-.04.08-.06.08-.28,0-.88-3.23.23-4.55.25-.3.61-.45.96-.45M71.52,31.16c-.92,0-1.79.41-2.39,1.11-1.6,1.89-1.08,5.4-.61,6.42.52,1.13,1.52,1.22,1.81,1.22.85,0,1.58-.54,1.85-1.39.22-.7.83-1.34,1.27-1.81.11-.12.21-.23.3-.32,1.15-1.29,1.08-3.27-.15-4.42-.56-.52-1.3-.81-2.07-.81h0Z"/>
+          </g>
+        </g>
+        <g>
+          <ellipse class="cls-3" cx="75.51" cy="68.45" rx="3.52" ry="3.88"/>
+          <ellipse class="cls-4" cx="76.93" cy="69.31" rx="2.38" ry="2.42"/>
+        </g>
+      </g>
+      <g>
+        <path class="cls-3" d="M43.24,43.2s0,0,0,0H11.89s0,0,0,0V11.85s0,0,0,0h31.35s0,0,0,0v31.35h0Z"/>
+        <path class="cls-1" d="M42.72,42.68s0,0,0,0H12.41s0,0,0,0V12.37s0,0,0,0h30.31s0,0,0,0v30.31h0Z"/>
+        <path class="cls-3" d="M20.68,35.76s.01.05.03.07l.52.52s.05.03.07.03h1.78s.05-.01.07-.03l.52-.52s.03-.05.03-.07v-5.63s.01-.05.03-.07l2.26-2.15s.04-.01.05,0l5.7,8.44s.04.03.06.03h2.52s.05-.02.06-.04l.46-.88s0-.05,0-.07l-6.67-9.66s-.01-.05,0-.06l6.13-6.1s.03-.05.03-.07v-.11s0-.06-.02-.08l-.35-.81s-.04-.04-.06-.04h-2.49s-.05.01-.07.03l-7.62,7.64s-.03.01-.03-.01v-7.01s-.01-.06-.03-.07l-.51-.55s-.05-.03-.07-.03h-1.79s-.05.01-.07.03l-.52.56s-.03.05-.03.07v16.65h0Z"/>
+      </g>
+    </g>
+  </g>
+</svg>
\ No newline at end of file
diff --git a/fidle/about.yml b/fidle/about.yml
index 94c6ed9a3a1b58cb871846fe9303c4948de1548a..3e184c02f199dd9657b99dd12c024d170e3a0db4 100644
--- a/fidle/about.yml
+++ b/fidle/about.yml
@@ -13,7 +13,7 @@
 #
 # This file describes the notebooks used by the Fidle training.
 
-version:                  3.0.0
+version:                  3.0.2
 content:                  notebooks
 name:                     Notebooks Fidle
 description:              All notebooks used by the Fidle training
@@ -37,10 +37,10 @@ toc:
   Embedding.Keras3:       Sentiment analysis with word embedding, using Keras3/PyTorch
   RNN.Keras3:             Time series with Recurrent Neural Network (RNN), using Keras3/PyTorch
   Transformers.PyTorch:   Sentiment analysis with transformer, using PyTorch
-  AE.Keras2:              Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)
-  VAE.Keras2:             Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)
-  VAE.Lightning:          Generative network with Variational Autoencoder (VAE), using PyTorch Lightning
-  DCGAN.Lightning:        Generative Adversarial Networks (GANs), using Lightning
+#  AE.Keras2:              Unsupervised learning with an autoencoder neural network (AE), using Keras2 (obsolete)
+#  VAE.Keras2:             Generative network with Variational Autoencoder (VAE), using Keras2 (obsolete)
+#  VAE.Lightning:          Generative network with Variational Autoencoder (VAE), using PyTorch Lightning
+#  DCGAN.Lightning:        Generative Adversarial Networks (GANs), using Lightning
   DDPM.PyTorch:           Diffusion Model (DDPM) using PyTorch
   Optimization.PyTorch:   Training optimization, using PyTorch
   DRL.PyTorch:            Deep Reinforcement Learning (DRL), using PyTorch
diff --git a/fidle/ci/default.yml b/fidle/ci/default.yml
index f8c8ee262b73969cd942d874ae066cc5751674e5..c8446a860e59e65c2a59de9bb45ca46a2b64c3ac 100644
--- a/fidle/ci/default.yml
+++ b/fidle/ci/default.yml
@@ -1,6 +1,6 @@
 campain:
   version: '1.0'
-  description: Automatically generated ci profile (21/01/24 17:21:08)
+  description: Automatically generated ci profile (23/01/24 10:53:30)
   directory: ./campains/default
   existing_notebook: 'remove    # remove|skip'
   report_template: 'fidle     # fidle|default'
@@ -184,122 +184,6 @@ TRANS1:
 TRANS2:
   notebook: Transformers.PyTorch/02-distilbert_colab.ipynb
 
-#
-# ------------ AE.Keras2
-#
-K2AE1:
-  notebook: AE.Keras2/01-Prepare-MNIST-dataset.ipynb
-  overrides:
-    prepared_dataset: default
-    scale: default
-    progress_verbosity: default
-K2AE2:
-  notebook: AE.Keras2/02-AE-with-MNIST.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    latent_dim: default
-    train_prop: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2AE3:
-  notebook: AE.Keras2/03-AE-with-MNIST-post.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    train_prop: default
-K2AE4:
-  notebook: AE.Keras2/04-ExtAE-with-MNIST.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    latent_dim: default
-    train_prop: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2AE5:
-  notebook: AE.Keras2/05-ExtAE-with-MNIST.ipynb
-  overrides:
-    prepared_dataset: default
-    dataset_seed: default
-    scale: default
-    latent_dim: default
-    train_prop: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-
-#
-# ------------ VAE.Keras2
-#
-K2VAE1:
-  notebook: VAE.Keras2/01-VAE-with-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2VAE2:
-  notebook: VAE.Keras2/02-VAE-with-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-K2VAE3:
-  notebook: VAE.Keras2/03-VAE-with-MNIST-post.ipynb
-  overrides:
-    scale: default
-    seed: default
-    models_dir: default
-
-#
-# ------------ VAE.Lightning
-#
-LVAE1:
-  notebook: VAE.Lightning/01-VAE-lightning-with-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-LVAE2:
-  notebook: VAE.Lightning/02-VAE-with-Lightning-MNIST.ipynb
-  overrides:
-    latent_dim: default
-    loss_weights: default
-    scale: default
-    seed: default
-    batch_size: default
-    epochs: default
-    fit_verbosity: default
-LVAE3:
-  notebook: VAE.Lightning/03-VAE-Lightning-with-MNIST-post.ipynb
-  overrides:
-    scale: default
-    seed: default
-    models_dir: default
-
-#
-# ------------ DCGAN.Lightning
-#
-LSHEEP3:
-  notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
-
 #
 # ------------ DDPM.PyTorch
 #
diff --git a/fidle/img/logo-YouTube.png b/fidle/img/logo-YouTube.png
new file mode 100644
index 0000000000000000000000000000000000000000..63cc69f99c93d39dd4051901dd380bfe79ed1ed5
Binary files /dev/null and b/fidle/img/logo-YouTube.png differ