diff --git a/GTSRB/03-Tracking-and-visualizing.ipynb b/GTSRB/03-Tracking-and-visualizing.ipynb
index 2aa572ff0c0cae4f81d24bffb8f899e5300d1536..c211ca36f6ccbe6843c878fd7b4572f537dc8bd8 100644
--- a/GTSRB/03-Tracking-and-visualizing.ipynb
+++ b/GTSRB/03-Tracking-and-visualizing.ipynb
@@ -165,10 +165,8 @@
    ]
   },
   {
-   "cell_type": "code",
-   "execution_count": null,
+   "cell_type": "raw",
    "metadata": {},
-   "outputs": [],
    "source": [
     "%%bash\n",
     "# To clean old logs and saved model, run this cell\n",
diff --git a/GTSRB/README.ipynb b/GTSRB/README.ipynb
index b5fba4ba692e6222ee88b61e49deafddd7dc7093..163fe70440ce929dd5fa2fbcb7f9e3ae99e20f70 100644
--- a/GTSRB/README.ipynb
+++ b/GTSRB/README.ipynb
@@ -18,24 +18,6 @@
     "Traffic sign classification with **CNN**, using Tensorflow and **Keras**  \n",
     "\n",
     "\n",
-    "Prerequisite\n",
-    "------------\n",
-    "\n",
-    "Environment, with the following packages :\n",
-    " - Python 3.6\n",
-    " - numpy\n",
-    " - Tensorflow 2.0\n",
-    " - scikit-image\n",
-    " - scikit-learn\n",
-    " - Matplotlib\n",
-    " - seaborn\n",
-    " \n",
-    "You can create it from the `environment.yml` file :\n",
-    "```\n",
-    "# conda env create -f environment.yml\n",
-    "```\n",
-    "To manage conda environment see [there](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#)  \n",
-    "\n",
     "About the dataset\n",
     "-----------------\n",
     "\n",
diff --git a/README.md b/README.md
index 146861fcd8998f71db32de2aece738cb039453e3..eb7f172abaae82952ec4f9532a2544bbb2038dda 100644
--- a/README.md
+++ b/README.md
@@ -1,16 +1,30 @@
 
 
-German Traffic Sign Recognition Benchmark (GTSRB)
-=================================================
+FIDLE - Formation Introduction au Deep Learning
+===============================================
 ---
-Introduction au Deep Learning  (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020  
+S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020  
 
 ## 1/ Environment
-To install your conda environment :  
+To run this examples, you need an environment with the following packages :
+ - Python 3.6
+ - numpy
+ - Tensorflow 2.0
+ - scikit-image
+ - scikit-learn
+ - Matplotlib
+ - seaborn
+ - pyplot
+
+You can install such a predefined environment :
 ```
 conda env create -f environment.yml
 ```
 
+To manage conda environment see [there](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#)  
+
+
+
 ## 4/ Misc
 To update an existing environment :  
 ```
diff --git a/VAE/01-VAE with MNIST.ipynb b/VAE/01-VAE with MNIST.ipynb
index 4a2a0dba8571dc9c3f28e029ef06f5bdad5b1e63..5c0c2b9dc4d1a5701459445e2b8ed00134d1c9f5 100644
--- a/VAE/01-VAE with MNIST.ipynb	
+++ b/VAE/01-VAE with MNIST.ipynb	
@@ -4,8 +4,8 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Text Embedding - IMDB dataset\n",
-    "=============================\n",
+    "Variational AutoEncoder\n",
+    "=======================\n",
     "---\n",
     "Formation Introduction au Deep Learning  (FIDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020  \n",
     "\n",
@@ -22,31 +22,18 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "IDLE 2020 - Practical Work Module\n",
-      "  Version            : 0.2.4\n",
-      "  Run time           : Sunday 2 February 2020, 19:30:36\n",
-      "  Matplotlib style   : ../fidle/talk.mplstyle\n",
-      "  TensorFlow version : 2.0.0\n",
-      "  Keras version      : 2.2.4-tf\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "import numpy as np\n",
     "\n",
     "import tensorflow as tf\n",
     "import tensorflow.keras as keras\n",
-    "import tensorflow.keras.datasets.imdb as imdb\n",
+    "import tensorflow.keras.datasets.mnist as mnist\n",
     "\n",
-    "import models.VAE\n",
-    "from models.VAE import VariationalAutoencoder\n",
+    "import modules.vae\n",
+    "# from modules.vae import VariationalAutoencoder\n",
     "\n",
     "import matplotlib.pyplot as plt\n",
     "import matplotlib\n",
@@ -56,23 +43,57 @@
     "\n",
     "from importlib import reload\n",
     "\n",
-    "\n",
-    "\n",
     "sys.path.append('..')\n",
     "import fidle.pwk as ooo\n",
     "\n",
+    "reload(ooo)\n",
     "ooo.init()"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 2 - Get data"
+   ]
+  },
   {
    "cell_type": "code",
-   "execution_count": 20,
+   "execution_count": null,
    "metadata": {},
    "outputs": [],
    "source": [
-    "reload(models.VAE)\n",
+    "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n",
+    "\n",
+    "x_train = x_train.astype('float32') / 255.\n",
+    "x_train = np.expand_dims(x_train, axis=3)\n",
+    "x_test  = x_test.astype('float32') / 255.\n",
+    "x_test  = np.expand_dims(x_test, axis=3)\n",
+    "print(x_train.shape)\n",
+    "print(x_test.shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 3 - Get VAE model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "reload(modules.vae)\n",
+    "reload(modules.callbacks)\n",
+    "\n",
+    "tag = '000'\n",
+    "\n",
     "input_shape = (28,28,1)\n",
     "z_dim       = 2\n",
+    "verbose     = 0\n",
     "\n",
     "encoder= [ {'type':'Conv2D', 'filters':32, 'kernel_size':(3,3), 'strides':1, 'padding':'same', 'activation':'relu'},\n",
     "           {'type':'Conv2D', 'filters':64, 'kernel_size':(3,3), 'strides':2, 'padding':'same', 'activation':'relu'},\n",
@@ -86,112 +107,77 @@
     "           {'type':'Conv2DT', 'filters':1,  'kernel_size':(3,3), 'strides':1, 'padding':'same', 'activation':'sigmoid'}\n",
     "         ]\n",
     "\n",
-    "vae = models.VAE.VariationalAutoencoder(input_shape, encoder, decoder, z_dim)\n"
+    "vae = modules.vae.VariationalAutoencoder(input_shape    = input_shape, \n",
+    "                                         encoder_layers = encoder, \n",
+    "                                         decoder_layers = decoder,\n",
+    "                                         z_dim          = z_dim, \n",
+    "                                         verbose        = verbose,\n",
+    "                                         run_tag        = tag)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 4 - Compile it"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 21,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Model: \"model_29\"\n",
-      "__________________________________________________________________________________________________\n",
-      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
-      "==================================================================================================\n",
-      "encoder_input (InputLayer)      [(None, 28, 28, 1)]  0                                            \n",
-      "__________________________________________________________________________________________________\n",
-      "Layer_1 (Conv2D)                (None, 28, 28, 32)   320         encoder_input[0][0]              \n",
-      "__________________________________________________________________________________________________\n",
-      "Layer_2 (Conv2D)                (None, 14, 14, 64)   18496       Layer_1[0][0]                    \n",
-      "__________________________________________________________________________________________________\n",
-      "Layer_3 (Conv2D)                (None, 7, 7, 64)     36928       Layer_2[0][0]                    \n",
-      "__________________________________________________________________________________________________\n",
-      "Layer_4 (Conv2D)                (None, 7, 7, 64)     36928       Layer_3[0][0]                    \n",
-      "__________________________________________________________________________________________________\n",
-      "flatten_7 (Flatten)             (None, 3136)         0           Layer_4[0][0]                    \n",
-      "__________________________________________________________________________________________________\n",
-      "mu (Dense)                      (None, 2)            6274        flatten_7[0][0]                  \n",
-      "__________________________________________________________________________________________________\n",
-      "log_var (Dense)                 (None, 2)            6274        flatten_7[0][0]                  \n",
-      "__________________________________________________________________________________________________\n",
-      "encoder_output (Lambda)         (None, 2)            0           mu[0][0]                         \n",
-      "                                                                 log_var[0][0]                    \n",
-      "==================================================================================================\n",
-      "Total params: 105,220\n",
-      "Trainable params: 105,220\n",
-      "Non-trainable params: 0\n",
-      "__________________________________________________________________________________________________\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
-    "vae.encoder.summary()"
+    "learning_rate = 0.0005\n",
+    "r_loss_factor = 1000\n",
+    "\n",
+    "vae.compile(learning_rate, r_loss_factor)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 5 - Train"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 22,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Model: \"model_30\"\n",
-      "_________________________________________________________________\n",
-      "Layer (type)                 Output Shape              Param #   \n",
-      "=================================================================\n",
-      "decoder_input (InputLayer)   [(None, 2)]               0         \n",
-      "_________________________________________________________________\n",
-      "dense_7 (Dense)              (None, 3136)              9408      \n",
-      "_________________________________________________________________\n",
-      "reshape_7 (Reshape)          (None, 7, 7, 64)          0         \n",
-      "_________________________________________________________________\n",
-      "Layer_1 (Conv2DTranspose)    (None, 7, 7, 64)          36928     \n",
-      "_________________________________________________________________\n",
-      "Layer_2 (Conv2DTranspose)    (None, 14, 14, 64)        36928     \n",
-      "_________________________________________________________________\n",
-      "Layer_3 (Conv2DTranspose)    (None, 28, 28, 32)        18464     \n",
-      "_________________________________________________________________\n",
-      "Layer_4 (Conv2DTranspose)    (None, 28, 28, 1)         289       \n",
-      "=================================================================\n",
-      "Total params: 102,017\n",
-      "Trainable params: 102,017\n",
-      "Non-trainable params: 0\n",
-      "_________________________________________________________________\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
-    "vae.decoder.summary()"
+    "batch_size        = 100\n",
+    "epochs            = 200\n",
+    "batch_periodicity = 1000\n",
+    "initial_epoch     = 0\n",
+    "dataset_size      = 0.1"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": null,
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "{'b': 2}"
-      ]
-     },
-     "execution_count": 12,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
-    "a={'a':1, 'b':2}\n",
-    "del a['a']\n",
-    "a"
+    "vae.train(x_train,\n",
+    "          x_test,\n",
+    "          batch_size        = batch_size, \n",
+    "          epochs            = epochs,\n",
+    "          batch_periodicity = batch_periodicity,\n",
+    "          initial_epoch     = initial_epoch,\n",
+    "          dataset_size      = dataset_size,\n",
+    "          lr_decay          = 1\n",
+    "         )"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
   {
    "cell_type": "code",
    "execution_count": null,
diff --git a/VAE/modules/callbacks.py b/VAE/modules/callbacks.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7bbe1a178d4f98616a9213dc3794207e813fad3
--- /dev/null
+++ b/VAE/modules/callbacks.py
@@ -0,0 +1,32 @@
+from tensorflow.keras.callbacks import Callback
+import numpy as np
+import matplotlib.pyplot as plt
+import os
+
+class ImagesCallback(Callback):
+    
+    def __init__(self, initial_epoch=0, batch_periodicity=1000, vae=None):
+        self.epoch             = initial_epoch
+        self.batch_periodicity = batch_periodicity
+        self.vae               = vae
+        self.images_dir        = vae.run_directory+'/images'
+
+    def on_train_batch_end(self, batch, logs={}):  
+        
+        if batch % self.batch_periodicity == 0:
+            # ---- Get a random latent point
+            z_new   = np.random.normal(size = (1,self.vae.z_dim))
+            # ---- Predict an image
+            image = self.vae.decoder.predict(np.array(z_new))[0]
+            # ---- Squeeze it if monochrome : (lx,ly,1) -> (lx,ly) 
+            image = image.squeeze()
+            # ---- Save it
+            filename=f'{self.images_dir}/img_{self.epoch:05d}_{batch:06d}.jpg'
+            if len(image.shape) == 2:
+                plt.imsave(filename, image, cmap='gray_r')
+            else:
+                plt.imsave(filename, image)
+
+    def on_epoch_begin(self, epoch, logs={}):
+        self.epoch += 1
+
diff --git a/VAE/models/VAE.py b/VAE/modules/vae.py
similarity index 51%
rename from VAE/models/VAE.py
rename to VAE/modules/vae.py
index 380c3469b0978078132315eff05e6e3a52276b0a..8f09c47cc91cd039774f9222bc5c2bec67b87446 100644
--- a/VAE/models/VAE.py
+++ b/VAE/modules/vae.py
@@ -3,24 +3,36 @@ import numpy as np
 import tensorflow as tf
 import tensorflow.keras as keras
 from tensorflow.keras import backend as K
-from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda, Activation, BatchNormalization, LeakyReLU, Dropout
+from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda
+from tensorflow.keras.layers import Activation, BatchNormalization, LeakyReLU, Dropout
 from tensorflow.keras.models import Model
+from tensorflow.keras.callbacks import ModelCheckpoint 
+from tensorflow.keras.optimizers import Adam
+from tensorflow.keras.utils import plot_model
 
 import tensorflow.keras.datasets.imdb as imdb
 
+import modules.callbacks
+import os
 
 
 
 class VariationalAutoencoder():
 
     
-    def __init__(self, input_shape, encoder_layers, decoder_layers, z_dim):
+    def __init__(self, input_shape=None, encoder_layers=None, decoder_layers=None, z_dim=None, run_tag='default', verbose=0):
         
         self.name           = 'Variational AutoEncoder'
         self.input_shape    = input_shape
         self.encoder_layers = encoder_layers
         self.decoder_layers = decoder_layers
         self.z_dim          = z_dim
+        self.verbose        = verbose
+        self.run_directory  = f'./run/{run_tag}'
+        
+        # ---- Create run directories
+        for d in ('','/models','/figs','/logs','/images'):
+            os.makedirs(self.run_directory+d, mode=0o750, exist_ok=True)
         
         # ==== Encoder ================================================================
         
@@ -44,7 +56,7 @@ class VariationalAutoencoder():
         shape_before_flattening = K.int_shape(x)[1:]
         x = Flatten()(x)
         
-        # ---- mu    /   log_var
+        # ---- mu <-> log_var
         self.mu      = Dense(self.z_dim, name='mu')(x)
         self.log_var = Dense(self.z_dim, name='log_var')(x)
 
@@ -92,6 +104,19 @@ class VariationalAutoencoder():
 
         self.model = Model(model_input, model_output)
 
+        # ==== Verbosity ==============================================================
+
+        print('Model initialized.')
+        print('Outputs will be in : ',self.run_directory)
+        
+        if verbose>0 :
+            print('\n','-'*10,'Encoder','-'*50,'\n')
+            self.encoder.summary()
+            print('\n','-'*10,'Encoder','-'*50,'\n')
+            self.decoder.summary()
+            self.plot_model()
+        
+        
         
     def compile(self, learning_rate, r_loss_factor):
         self.learning_rate = learning_rate
@@ -111,4 +136,50 @@ class VariationalAutoencoder():
             return  r_loss + kl_loss
 
         optimizer = Adam(lr=learning_rate)
-        self.model.compile(optimizer=optimizer, loss = vae_loss,  metrics = [vae_r_loss, vae_kl_loss], experimental_run_tf_function=False)
+        self.model.compile(optimizer=optimizer, 
+                           loss = vae_loss,
+                           metrics = [vae_r_loss, vae_kl_loss], 
+                           experimental_run_tf_function=False)
+    
+    
+    def train(self, 
+              x_train,x_test,
+              batch_size=32, epochs=200, 
+              batch_periodicity=100, 
+              initial_epoch=0,
+              dataset_size=1,
+              lr_decay=1):
+
+        # ---- Dataset size
+        n_train = int(x_train.shape[0] * dataset_size)
+        n_test  = int(x_test.shape[0]  * dataset_size)
+
+        # ---- Callbacks
+        images_callback = modules.callbacks.ImagesCallback(initial_epoch, batch_periodicity, self)
+        
+#         lr_sched = step_decay_schedule(initial_lr=self.learning_rate, decay_factor=lr_decay, step_size=1)
+        
+        filename1 = self.run_directory+"/models/model-{epoch:03d}-{loss:.2f}.h5"
+        batch_per_epoch = int(len(x_train)/batch_size)
+        checkpoint1 = ModelCheckpoint(filename1, save_freq=batch_per_epoch*5,verbose=0)
+
+        filename2 = self.run_directory+"/models/best_model.h5"
+        checkpoint2 = ModelCheckpoint(filename2, save_best_only=True, mode='min',monitor='val_loss',verbose=0)
+
+        callbacks_list = [checkpoint1, checkpoint2, images_callback]
+
+        self.model.fit(x_train[:n_train], x_train[:n_train],
+                       batch_size = batch_size,
+                       shuffle = True,
+                       epochs = epochs,
+                       initial_epoch = initial_epoch,
+                       callbacks = callbacks_list,
+                       validation_data = (x_test[:n_test], x_test[:n_test])
+                        )
+        
+        
+    def plot_model(self):
+        d=self.run_directory+'/figs'
+        plot_model(self.model,   to_file=f'{d}/model.png',   show_shapes = True, show_layer_names = True, expand_nested=True)
+        plot_model(self.encoder, to_file=f'{d}/encoder.png', show_shapes = True, show_layer_names = True)
+        plot_model(self.decoder, to_file=f'{d}/decoder.png', show_shapes = True, show_layer_names = True)
diff --git a/fidle/pwk.py b/fidle/pwk.py
index fd2939d33d14ecf0fbb1fa3a73854388be62b46f..13cc6dfdca25dc3d8b89e43a65859f23f1289468 100644
--- a/fidle/pwk.py
+++ b/fidle/pwk.py
@@ -31,7 +31,7 @@ import seaborn as sn
 
 from IPython.display import display, Markdown
 
-VERSION='0.2.4'
+VERSION='0.2.5'
 
 
 # -------------------------------------------------------------
@@ -351,4 +351,7 @@ def plot_donut(values, labels, colors=["lightsteelblue","coral"], figsize=(6,6),
     # Equal aspect ratio ensures that pie is drawn as a circle
     plt.axis('equal')  
     plt.tight_layout()
-    plt.show()
\ No newline at end of file
+    plt.show()
+    
+def display_md(md_text):
+    display(Markdown(md_text))
\ No newline at end of file