From cb776ade458f773cb537d9ee4ae449a1ac8bfe39 Mon Sep 17 00:00:00 2001
From: Jean-Luc Parouty <Jean-Luc.Parouty@simap.grenoble-inp.fr>
Date: Sun, 3 Mar 2024 20:56:31 +0100
Subject: [PATCH] Update notebook names, toc and ci (3.0.9)

---
 AE.Keras3/01-Prepare-MNIST-dataset.ipynb     |   4 +-
 AE.Keras3/02-AE-with-MNIST.ipynb             |   4 +-
 AE.Keras3/03-AE-with-MNIST-post.ipynb        |   6 +-
 AE.Keras3/04-ExtAE-with-MNIST.ipynb          |   4 +-
 AE.Keras3/05-ExtAE-with-MNIST.ipynb          |   4 +-
 DCGAN.Lightning/01-DCGAN-PL.ipynb            |  12 +-
 README.ipynb                                 |  34 ++---
 README.md                                    |  22 ++--
 VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb |  15 +--
 VAE.Keras3/02-VAE-with-MNIST.ipynb           |   4 +-
 VAE.Keras3/03-VAE-with-MNIST-post.ipynb      |   6 +-
 fidle/about.yml                              |   2 +-
 fidle/ci/default.yml                         |  21 +--
 fidle/ci/gpu-scale1.yml                      | 130 +++++++++++++++++++
 14 files changed, 197 insertions(+), 71 deletions(-)

diff --git a/AE.Keras3/01-Prepare-MNIST-dataset.ipynb b/AE.Keras3/01-Prepare-MNIST-dataset.ipynb
index be56312..8886e25 100644
--- a/AE.Keras3/01-Prepare-MNIST-dataset.ipynb
+++ b/AE.Keras3/01-Prepare-MNIST-dataset.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [AE1] - Prepare a noisy MNIST dataset\n",
+    "# <!-- TITLE --> [K3AE1] - Prepare a noisy MNIST dataset\n",
     "<!-- DESC --> Episode 1: Preparation of a noisy MNIST dataset\n",
     "\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
@@ -52,7 +52,7 @@
     "import fidle\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('AE1')"
+    "run_id, run_dir, datasets_dir = fidle.init('K3AE1')"
    ]
   },
   {
diff --git a/AE.Keras3/02-AE-with-MNIST.ipynb b/AE.Keras3/02-AE-with-MNIST.ipynb
index e593173..be3b4ff 100644
--- a/AE.Keras3/02-AE-with-MNIST.ipynb
+++ b/AE.Keras3/02-AE-with-MNIST.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [AE2] - Building and training an AE denoiser model\n",
+    "# <!-- TITLE --> [K3AE2] - Building and training an AE denoiser model\n",
     "<!-- DESC --> Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset.\n",
     "\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
@@ -66,7 +66,7 @@
     "import fidle\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('AE2')"
+    "run_id, run_dir, datasets_dir = fidle.init('K3AE2')"
    ]
   },
   {
diff --git a/AE.Keras3/03-AE-with-MNIST-post.ipynb b/AE.Keras3/03-AE-with-MNIST-post.ipynb
index e9b3f78..3ecc33f 100644
--- a/AE.Keras3/03-AE-with-MNIST-post.ipynb
+++ b/AE.Keras3/03-AE-with-MNIST-post.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [AE3] - Playing with our denoiser model\n",
+    "# <!-- TITLE --> [K3AE3] - Playing with our denoiser model\n",
     "<!-- DESC --> Episode 2 : Using the previously trained autoencoder to denoise data\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
@@ -53,7 +53,7 @@
     "import fidle\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('AE3')"
+    "run_id, run_dir, datasets_dir = fidle.init('K3AE3')"
    ]
   },
   {
@@ -75,7 +75,7 @@
    "outputs": [],
    "source": [
     "prepared_dataset = './data/mnist-noisy.h5'\n",
-    "saved_models     = './run/AE2/models'\n",
+    "saved_models     = './run/K3AE2/models'\n",
     "dataset_seed     = 123\n",
     "scale            = 1\n",
     "train_prop       = .8"
diff --git a/AE.Keras3/04-ExtAE-with-MNIST.ipynb b/AE.Keras3/04-ExtAE-with-MNIST.ipynb
index 2e74e09..2a26788 100644
--- a/AE.Keras3/04-ExtAE-with-MNIST.ipynb
+++ b/AE.Keras3/04-ExtAE-with-MNIST.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [AE4] - Denoiser and classifier model\n",
+    "# <!-- TITLE --> [K3AE4] - Denoiser and classifier model\n",
     "<!-- DESC --> Episode 4 : Construction of a denoiser and classifier model\n",
     "\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
@@ -64,7 +64,7 @@
     "import fidle\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('AE4')"
+    "run_id, run_dir, datasets_dir = fidle.init('K3AE4')"
    ]
   },
   {
diff --git a/AE.Keras3/05-ExtAE-with-MNIST.ipynb b/AE.Keras3/05-ExtAE-with-MNIST.ipynb
index aa5cc5b..589ace8 100644
--- a/AE.Keras3/05-ExtAE-with-MNIST.ipynb
+++ b/AE.Keras3/05-ExtAE-with-MNIST.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [AE5] - Advanced denoiser and classifier model\n",
+    "# <!-- TITLE --> [K3AE5] - Advanced denoiser and classifier model\n",
     "<!-- DESC --> Episode 5 : Construction of an advanced denoiser and classifier model\n",
     "\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
@@ -65,7 +65,7 @@
     "\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('AE5')"
+    "run_id, run_dir, datasets_dir = fidle.init('K3AE5')"
    ]
   },
   {
diff --git a/DCGAN.Lightning/01-DCGAN-PL.ipynb b/DCGAN.Lightning/01-DCGAN-PL.ipynb
index 99e9ab5..cfb550e 100644
--- a/DCGAN.Lightning/01-DCGAN-PL.ipynb
+++ b/DCGAN.Lightning/01-DCGAN-PL.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [SHEEP3] - A DCGAN to Draw a Sheep, using Pytorch Lightning\n",
+    "# <!-- TITLE --> [PLSHEEP3] - A DCGAN to Draw a Sheep, using Pytorch Lightning\n",
     "<!-- DESC --> \"Draw me a sheep\", revisited with a DCGAN, using Pytorch Lightning\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
@@ -66,7 +66,7 @@
     "from modules.Discriminators      import *\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('SHEEP3')"
+    "run_id, run_dir, datasets_dir = fidle.init('PLSHEEP3')"
    ]
   },
   {
@@ -390,7 +390,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "gan = GAN.load_from_checkpoint('./run/SHEEP3/models/bestModel.ckpt')"
+    "gan = GAN.load_from_checkpoint(f'{run_dir}/models/bestModel.ckpt')"
    ]
   },
   {
@@ -445,9 +445,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Fidle-keras 3",
+   "display_name": "fidle-env",
    "language": "python",
-   "name": "fidle-k3"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
@@ -459,7 +459,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.6"
+   "version": "3.9.2"
   }
  },
  "nbformat": 4,
diff --git a/README.ipynb b/README.ipynb
index 311cc15..8b92b21 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -3,13 +3,13 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "id": "42b2ce5d",
+   "id": "60a4d6df",
    "metadata": {
     "execution": {
-     "iopub.execute_input": "2024-03-03T13:46:52.756661Z",
-     "iopub.status.busy": "2024-03-03T13:46:52.756452Z",
-     "iopub.status.idle": "2024-03-03T13:46:52.770894Z",
-     "shell.execute_reply": "2024-03-03T13:46:52.769696Z"
+     "iopub.execute_input": "2024-03-03T19:38:38.844681Z",
+     "iopub.status.busy": "2024-03-03T19:38:38.844023Z",
+     "iopub.status.idle": "2024-03-03T19:38:38.854300Z",
+     "shell.execute_reply": "2024-03-03T19:38:38.853434Z"
     },
     "jupyter": {
      "source_hidden": true
@@ -53,7 +53,7 @@
        "For more information, you can contact us at :  \n",
        "[<img width=\"200px\" style=\"vertical-align:middle\" src=\"fidle/img/00-Mail_contact.svg\"></img>](#top)\n",
        "\n",
-       "Current Version : <!-- VERSION_BEGIN -->3.0.8<!-- VERSION_END -->\n",
+       "Current Version : <!-- VERSION_BEGIN -->3.0.9<!-- VERSION_END -->\n",
        "\n",
        "\n",
        "## Course materials\n",
@@ -68,7 +68,7 @@
        "## Jupyter notebooks\n",
        "\n",
        "<!-- TOC_BEGIN -->\n",
-       "<!-- Automatically generated on : 03/03/24 14:46:51 -->\n",
+       "<!-- Automatically generated on : 03/03/24 20:38:37 -->\n",
        "\n",
        "### Linear and logistic regression\n",
        "- **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  \n",
@@ -155,27 +155,27 @@
        "Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version\n",
        "\n",
        "### Unsupervised learning with an autoencoder neural network (AE), using Keras3\n",
-       "- **[AE1](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)  \n",
+       "- **[K3AE1](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)  \n",
        "Episode 1: Preparation of a noisy MNIST dataset\n",
-       "- **[AE2](AE.Keras3/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras3/02-AE-with-MNIST.ipynb)  \n",
+       "- **[K3AE2](AE.Keras3/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras3/02-AE-with-MNIST.ipynb)  \n",
        "Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset.\n",
-       "- **[AE3](AE.Keras3/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras3/03-AE-with-MNIST-post.ipynb)  \n",
+       "- **[K3AE3](AE.Keras3/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras3/03-AE-with-MNIST-post.ipynb)  \n",
        "Episode 2 : Using the previously trained autoencoder to denoise data\n",
-       "- **[AE4](AE.Keras3/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras3/04-ExtAE-with-MNIST.ipynb)  \n",
+       "- **[K3AE4](AE.Keras3/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras3/04-ExtAE-with-MNIST.ipynb)  \n",
        "Episode 4 : Construction of a denoiser and classifier model\n",
-       "- **[AE5](AE.Keras3/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras3/05-ExtAE-with-MNIST.ipynb)  \n",
+       "- **[K3AE5](AE.Keras3/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras3/05-ExtAE-with-MNIST.ipynb)  \n",
        "Episode 5 : Construction of an advanced denoiser and classifier model\n",
        "\n",
        "### Generative network with Variational Autoencoder (VAE), using Keras3\n",
-       "- **[VAE1](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)  \n",
+       "- **[K3VAE1](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)  \n",
        "Construction and training of a VAE, using functional APPI, with a latent space of small dimension.\n",
-       "- **[VAE2](VAE.Keras3/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras3/02-VAE-with-MNIST.ipynb)  \n",
+       "- **[K3VAE2](VAE.Keras3/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras3/02-VAE-with-MNIST.ipynb)  \n",
        "Construction and training of a VAE, using model subclass, with a latent space of small dimension.\n",
-       "- **[VAE3](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)  \n",
+       "- **[K3VAE3](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)  \n",
        "Visualization and analysis of the VAE's latent space of the dataset MNIST\n",
        "\n",
        "### Generative Adversarial Networks (GANs), using Lightning\n",
-       "- **[SHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  \n",
+       "- **[PLSHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  \n",
        "\"Draw me a sheep\", revisited with a DCGAN, using Pytorch Lightning\n",
        "\n",
        "### Diffusion Model (DDPM) using PyTorch\n",
@@ -237,7 +237,7 @@
     "from IPython.display import display,Markdown\n",
     "display(Markdown(open('README.md', 'r').read()))\n",
     "#\n",
-    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 03/03/24 14:46:51"
+    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 03/03/24 20:38:37"
    ]
   }
  ],
diff --git a/README.md b/README.md
index cbcd47c..320b9d2 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@ For more information, see **https://fidle.cnrs.fr** :
 For more information, you can contact us at :  
 [<img width="200px" style="vertical-align:middle" src="fidle/img/00-Mail_contact.svg"></img>](#top)
 
-Current Version : <!-- VERSION_BEGIN -->3.0.8<!-- VERSION_END -->
+Current Version : <!-- VERSION_BEGIN -->3.0.9<!-- VERSION_END -->
 
 
 ## Course materials
@@ -47,7 +47,7 @@ Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)
 ## Jupyter notebooks
 
 <!-- TOC_BEGIN -->
-<!-- Automatically generated on : 03/03/24 14:46:51 -->
+<!-- Automatically generated on : 03/03/24 20:38:37 -->
 
 ### Linear and logistic regression
 - **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  
@@ -134,27 +134,27 @@ Using a Tranformer to perform a sentiment analysis (IMDB) - Jean Zay version
 Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version
 
 ### Unsupervised learning with an autoencoder neural network (AE), using Keras3
-- **[AE1](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)  
+- **[K3AE1](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)** - [Prepare a noisy MNIST dataset](AE.Keras3/01-Prepare-MNIST-dataset.ipynb)  
 Episode 1: Preparation of a noisy MNIST dataset
-- **[AE2](AE.Keras3/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras3/02-AE-with-MNIST.ipynb)  
+- **[K3AE2](AE.Keras3/02-AE-with-MNIST.ipynb)** - [Building and training an AE denoiser model](AE.Keras3/02-AE-with-MNIST.ipynb)  
 Episode 1 : Construction of a denoising autoencoder and training of it with a noisy MNIST dataset.
-- **[AE3](AE.Keras3/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras3/03-AE-with-MNIST-post.ipynb)  
+- **[K3AE3](AE.Keras3/03-AE-with-MNIST-post.ipynb)** - [Playing with our denoiser model](AE.Keras3/03-AE-with-MNIST-post.ipynb)  
 Episode 2 : Using the previously trained autoencoder to denoise data
-- **[AE4](AE.Keras3/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras3/04-ExtAE-with-MNIST.ipynb)  
+- **[K3AE4](AE.Keras3/04-ExtAE-with-MNIST.ipynb)** - [Denoiser and classifier model](AE.Keras3/04-ExtAE-with-MNIST.ipynb)  
 Episode 4 : Construction of a denoiser and classifier model
-- **[AE5](AE.Keras3/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras3/05-ExtAE-with-MNIST.ipynb)  
+- **[K3AE5](AE.Keras3/05-ExtAE-with-MNIST.ipynb)** - [Advanced denoiser and classifier model](AE.Keras3/05-ExtAE-with-MNIST.ipynb)  
 Episode 5 : Construction of an advanced denoiser and classifier model
 
 ### Generative network with Variational Autoencoder (VAE), using Keras3
-- **[VAE1](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)  
+- **[K3VAE1](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)** - [First VAE, using functional API (MNIST dataset)](VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb)  
 Construction and training of a VAE, using functional APPI, with a latent space of small dimension.
-- **[VAE2](VAE.Keras3/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras3/02-VAE-with-MNIST.ipynb)  
+- **[K3VAE2](VAE.Keras3/02-VAE-with-MNIST.ipynb)** - [VAE, using a custom model class  (MNIST dataset)](VAE.Keras3/02-VAE-with-MNIST.ipynb)  
 Construction and training of a VAE, using model subclass, with a latent space of small dimension.
-- **[VAE3](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)  
+- **[K3VAE3](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)** - [Analysis of the VAE's latent space of MNIST dataset](VAE.Keras3/03-VAE-with-MNIST-post.ipynb)  
 Visualization and analysis of the VAE's latent space of the dataset MNIST
 
 ### Generative Adversarial Networks (GANs), using Lightning
-- **[SHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  
+- **[PLSHEEP3](DCGAN.Lightning/01-DCGAN-PL.ipynb)** - [A DCGAN to Draw a Sheep, using Pytorch Lightning](DCGAN.Lightning/01-DCGAN-PL.ipynb)  
 "Draw me a sheep", revisited with a DCGAN, using Pytorch Lightning
 
 ### Diffusion Model (DDPM) using PyTorch
diff --git a/VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb b/VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb
index d0206c4..104c0b4 100644
--- a/VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb
+++ b/VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [VAE1] - First VAE, using functional API (MNIST dataset)\n",
+    "# <!-- TITLE --> [K3VAE1] - First VAE, using functional API (MNIST dataset)\n",
     "<!-- DESC --> Construction and training of a VAE, using functional APPI, with a latent space of small dimension.\n",
     "\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
@@ -59,7 +59,7 @@
     "import fidle\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('VAE1')\n"
+    "run_id, run_dir, datasets_dir = fidle.init('K3VAE1')\n"
    ]
   },
   {
@@ -413,9 +413,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Fidle-keras 3",
+   "display_name": "fidle-env",
    "language": "python",
-   "name": "fidle-k3"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
@@ -427,12 +427,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.11.6"
-  },
-  "vscode": {
-   "interpreter": {
-    "hash": "b3929042cc22c1274d74e3e946c52b845b57cb6d84f2d591ffe0519b38e4896d"
-   }
+   "version": "3.9.2"
   }
  },
  "nbformat": 4,
diff --git a/VAE.Keras3/02-VAE-with-MNIST.ipynb b/VAE.Keras3/02-VAE-with-MNIST.ipynb
index 2c1b740..0cc8c11 100644
--- a/VAE.Keras3/02-VAE-with-MNIST.ipynb
+++ b/VAE.Keras3/02-VAE-with-MNIST.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [VAE2] - VAE, using a custom model class  (MNIST dataset)\n",
+    "# <!-- TITLE --> [K3VAE2] - VAE, using a custom model class  (MNIST dataset)\n",
     "<!-- DESC --> Construction and training of a VAE, using model subclass, with a latent space of small dimension.\n",
     "\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
@@ -64,7 +64,7 @@
     "import fidle\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('VAE2')\n",
+    "run_id, run_dir, datasets_dir = fidle.init('K3VAE2')\n",
     "\n",
     "VAE.about()"
    ]
diff --git a/VAE.Keras3/03-VAE-with-MNIST-post.ipynb b/VAE.Keras3/03-VAE-with-MNIST-post.ipynb
index 8833ca1..1db7d95 100644
--- a/VAE.Keras3/03-VAE-with-MNIST-post.ipynb
+++ b/VAE.Keras3/03-VAE-with-MNIST-post.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [VAE3] - Analysis of the VAE's latent space of MNIST dataset\n",
+    "# <!-- TITLE --> [K3VAE3] - Analysis of the VAE's latent space of MNIST dataset\n",
     "<!-- DESC --> Visualization and analysis of the VAE's latent space of the dataset MNIST\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
@@ -65,7 +65,7 @@
     "import fidle\n",
     "\n",
     "# Init Fidle environment\n",
-    "run_id, run_dir, datasets_dir = fidle.init('VAE3')"
+    "run_id, run_dir, datasets_dir = fidle.init('K3VAE3')"
    ]
   },
   {
@@ -83,7 +83,7 @@
    "source": [
     "scale      = 1\n",
     "seed       = 123\n",
-    "models_dir = './run/VAE2'"
+    "models_dir = './run/K3VAE2'"
    ]
   },
   {
diff --git a/fidle/about.yml b/fidle/about.yml
index 39c16fa..74bff6a 100644
--- a/fidle/about.yml
+++ b/fidle/about.yml
@@ -13,7 +13,7 @@
 #
 # This file describes the notebooks used by the Fidle training.
 
-version:                  3.0.8
+version:                  3.0.9
 content:                  notebooks
 name:                     Notebooks Fidle
 description:              All notebooks used by the Fidle training
diff --git a/fidle/ci/default.yml b/fidle/ci/default.yml
index f84337a..31d363e 100644
--- a/fidle/ci/default.yml
+++ b/fidle/ci/default.yml
@@ -1,6 +1,6 @@
 campain:
   version: '1.0'
-  description: Automatically generated ci profile (03/03/24 14:46:51)
+  description: Automatically generated ci profile (03/03/24 20:38:37)
   directory: ./campains/default
   existing_notebook: 'remove    # remove|skip'
   report_template: 'fidle     # fidle|default'
@@ -189,13 +189,13 @@ TRANS2:
 #
 # ------------ AE.Keras3
 #
-AE1:
+K3AE1:
   notebook: AE.Keras3/01-Prepare-MNIST-dataset.ipynb
   overrides:
     prepared_dataset: default
     scale: default
     progress_verbosity: default
-AE2:
+K3AE2:
   notebook: AE.Keras3/02-AE-with-MNIST.ipynb
   overrides:
     prepared_dataset: default
@@ -206,14 +206,14 @@ AE2:
     batch_size: default
     epochs: default
     fit_verbosity: default
-AE3:
+K3AE3:
   notebook: AE.Keras3/03-AE-with-MNIST-post.ipynb
   overrides:
     prepared_dataset: default
     dataset_seed: default
     scale: default
     train_prop: default
-AE4:
+K3AE4:
   notebook: AE.Keras3/04-ExtAE-with-MNIST.ipynb
   overrides:
     prepared_dataset: default
@@ -223,7 +223,7 @@ AE4:
     batch_size: default
     epochs: default
     fit_verbosity: default
-AE5:
+K3AE5:
   notebook: AE.Keras3/05-ExtAE-with-MNIST.ipynb
   overrides:
     prepared_dataset: default
@@ -237,7 +237,7 @@ AE5:
 #
 # ------------ VAE.Keras3
 #
-VAE1:
+K3VAE1:
   notebook: VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb
   overrides:
     latent_dim: default
@@ -247,7 +247,7 @@ VAE1:
     batch_size: default
     epochs: default
     fit_verbosity: default
-VAE2:
+K3VAE2:
   notebook: VAE.Keras3/02-VAE-with-MNIST.ipynb
   overrides:
     latent_dim: default
@@ -257,7 +257,7 @@ VAE2:
     batch_size: default
     epochs: default
     fit_verbosity: default
-VAE3:
+K3VAE3:
   notebook: VAE.Keras3/03-VAE-with-MNIST-post.ipynb
   overrides:
     scale: default
@@ -267,7 +267,7 @@ VAE3:
 #
 # ------------ DCGAN.Lightning
 #
-SHEEP3:
+PLSHEEP3:
   notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
   overrides:
     latent_dim: default
@@ -284,6 +284,7 @@ SHEEP3:
     dataset_file: default
     data_shape: default
     scale: default
+    num_workers: default
 
 #
 # ------------ DDPM.PyTorch
diff --git a/fidle/ci/gpu-scale1.yml b/fidle/ci/gpu-scale1.yml
index 090c84a..254f875 100644
--- a/fidle/ci/gpu-scale1.yml
+++ b/fidle/ci/gpu-scale1.yml
@@ -205,6 +205,136 @@ K3LADYB1:
     batch_size: 32
     epochs: 10
 
+#
+# ------------ AE.Keras3
+#
+K3AE1:
+  notebook: AE.Keras3/01-Prepare-MNIST-dataset.ipynb
+  overrides:
+    prepared_dataset: default
+    scale: 1
+    progress_verbosity: 2
+
+K3AE2:
+  notebook: AE.Keras3/02-AE-with-MNIST.ipynb
+  after: K3AE1
+  overrides:
+    prepared_dataset: default
+    dataset_seed: default
+    scale: 1
+    latent_dim: 10
+    train_prop: default
+    batch_size: default
+    epochs: 20
+    fit_verbosity: default
+    
+K3AE3:
+  notebook: AE.Keras3/03-AE-with-MNIST-post.ipynb
+  after: K3AE2
+  overrides:
+    prepared_dataset: default
+    dataset_seed: default
+    scale: default
+    train_prop: default
+
+K3AE4:
+  notebook: AE.Keras3/04-ExtAE-with-MNIST.ipynb
+  after: K3AE1
+  overrides:
+    prepared_dataset: default
+    dataset_seed: default
+    scale: 1
+    train_prop: default
+    batch_size: default
+    epochs: 20
+    fit_verbosity: default
+
+K3AE5:
+  notebook: AE.Keras3/05-ExtAE-with-MNIST.ipynb
+  after: K3AE1
+  overrides:
+    prepared_dataset: default
+    dataset_seed: default
+    scale: 1
+    train_prop: default
+    batch_size: default
+    epochs: 30
+    fit_verbosity: default
+
+#
+# ------------ VAE.Keras3
+#
+K3VAE1:
+  notebook: VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb
+  overrides:
+    latent_dim: default
+    loss_weights: default
+    scale: 1
+    seed: default
+    batch_size: default
+    epochs: 20
+    fit_verbosity: default
+
+K3VAE2:
+  notebook: VAE.Keras3/02-VAE-with-MNIST.ipynb
+  overrides:
+    latent_dim: default
+    loss_weights: default
+    scale: 1
+    seed: default
+    batch_size: default
+    epochs: 20
+    fit_verbosity: default
+
+K3VAE3:
+  notebook: VAE.Keras3/03-VAE-with-MNIST-post.ipynb
+  after: K3VAE2
+  overrides:
+    scale: 1
+    seed: default
+    models_dir: default
+
+#
+# ------------ DCGAN.Lightning
+#
+PLSHEEP3_1:
+  notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
+  overrides:
+    latent_dim: default
+    gan_name:           GAN
+    generator_name:     Generator_2
+    discriminator_name: Discriminator_3
+    epochs: 30
+    lr: default
+    b1: default
+    b2: default
+    batch_size: 64
+    num_img: default
+    fit_verbosity: default
+    dataset_file: default
+    data_shape: default
+    scale: 1
+    num_workers: 2
+
+PLSHEEP3_2:
+  notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
+  overrides:
+    latent_dim: default
+    gan_name:           WGANGP
+    generator_name:     Generator_2
+    discriminator_name: Discriminator_3
+    epochs: 30
+    lr: default
+    b1: default
+    b2: default
+    batch_size: 64
+    num_img: default
+    fit_verbosity: default
+    dataset_file: default
+    data_shape: default
+    scale: 1
+    num_workers: 2
+
 #
 # ------------ Misc
 #
-- 
GitLab