diff --git a/AE/01-Prepare-MNIST-dataset.ipynb b/AE/01-Prepare-MNIST-dataset.ipynb
index eb901bb69e563a8d3c1032bf149e8639adf137b7..b54b84cf29340344e9d7dfb0db28476c963a2b82 100644
--- a/AE/01-Prepare-MNIST-dataset.ipynb
+++ b/AE/01-Prepare-MNIST-dataset.ipynb
@@ -68,7 +68,7 @@
    "outputs": [],
    "source": [
     "prepared_dataset   = './data/mnist-noisy.h5'\n",
-    "scale              = .1\n",
+    "scale              = 1\n",
     "progress_verbosity = 1"
    ]
   },
@@ -220,7 +220,8 @@
    "hash": "8e38643e33497db9a306e3f311fa98cb1e65371278ca73ee4ea0c76aa5a4f387"
   },
   "kernelspec": {
-   "display_name": "Python 3.9.7 64-bit ('fidle-cpu': conda)",
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
    "name": "python3"
   },
   "language_info": {
@@ -233,7 +234,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.7"
+   "version": "3.7.10"
   }
  },
  "nbformat": 4,
diff --git a/DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb b/DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb
index b6470ecb05283ea17da249eea4f7055017c73748..91ce1ca22478fc3106090c00d0055d43200d8520 100644
--- a/DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb
+++ b/DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb
@@ -63,8 +63,8 @@
     "sys.path.append('..')\n",
     "import fidle.pwk as pwk\n",
     "\n",
-    "run_dir = './run/DCGAN.001'                  # Output directory\n",
-    "datasets_dir = pwk.init('DCGAN01', run_dir)"
+    "run_dir = './run/SHEEP1.001'                  # Output directory\n",
+    "datasets_dir = pwk.init('SHEEP1', run_dir)"
    ]
   },
   {
@@ -86,7 +86,7 @@
    "source": [
     "latent_dim    = 128\n",
     "\n",
-    "scale         = .01\n",
+    "scale         = 1\n",
     "epochs        = 10\n",
     "batch_size    = 32\n",
     "num_img       = 12\n",
@@ -125,18 +125,18 @@
    "source": [
     "# Load dataset\n",
     "x_data = np.load(datasets_dir+'/QuickDraw/origine/sheep.npy')\n",
-    "print(x_data.shape)\n",
+    "print('Original dataset shape : ',x_data.shape)\n",
     "\n",
     "# Rescale\n",
     "n=int(scale*len(x_data))\n",
     "x_data = x_data[:n]\n",
-    "print(x_data.shape)\n",
+    "print('Rescaled dataset shape : ',x_data.shape)\n",
     "\n",
     "# Normalize, reshape and shuffle\n",
     "x_data = x_data/255\n",
     "x_data = x_data.reshape(-1,28,28,1)\n",
     "np.random.shuffle(x_data)\n",
-    "print(x_data.shape)\n"
+    "print('Final dataset shape    : ',x_data.shape)\n"
    ]
   },
   {
@@ -388,7 +388,8 @@
    "hash": "8e38643e33497db9a306e3f311fa98cb1e65371278ca73ee4ea0c76aa5a4f387"
   },
   "kernelspec": {
-   "display_name": "Python 3.9.7 64-bit ('fidle-cpu': conda)",
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
    "name": "python3"
   },
   "language_info": {
diff --git a/DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb b/DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..b1a7bb5b14b47a1acc2589e8fef30d40e075d897
--- /dev/null
+++ b/DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb
@@ -0,0 +1,413 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
+    "\n",
+    "# <!-- TITLE --> [SHEEP2] - A WGAN-GP to Draw a Sheep\n",
+    "<!-- DESC --> Episode 2 : Draw me a sheep, revisited with a WGAN-GP\n",
+    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
+    "\n",
+    "## Objectives :\n",
+    " - Build and train a WGAN-GP model with the Quick Draw dataset\n",
+    " - Understanding WGAN-GP\n",
+    "\n",
+    "The [Quick draw dataset](https://quickdraw.withgoogle.com/data) contains about 50.000.000 drawings, made by real people...  \n",
+    "We are using a subset of 117.555 of Sheep drawings  \n",
+    "To get the dataset : [https://github.com/googlecreativelab/quickdraw-dataset](https://github.com/googlecreativelab/quickdraw-dataset)  \n",
+    "Datasets in numpy bitmap file : [https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap](https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap)   \n",
+    "Sheep dataset : [https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/sheep.npy](https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/sheep.npy) (94.3 Mo)\n",
+    "\n",
+    "\n",
+    "## What we're going to do :\n",
+    "\n",
+    " - Have a look to the dataset\n",
+    " - Defining a GAN model\n",
+    " - Build the model\n",
+    " - Train it\n",
+    " - Analyze the results\n",
+    "\n",
+    "## Acknowledgements :\n",
+    "Thanks to **François Chollet** who is at the base of this example.  \n",
+    "See : [https://keras.io/examples/](https://keras.io/examples/)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 1 - Init python stuff"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "from skimage import io\n",
+    "import sys\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "from tensorflow import keras\n",
+    "from tensorflow.keras import layers\n",
+    "from tensorflow.keras.callbacks import TensorBoard\n",
+    "\n",
+    "from modules.models    import WGANGP\n",
+    "from modules.callbacks import ImagesCallback\n",
+    "\n",
+    "sys.path.append('..')\n",
+    "import fidle.pwk as pwk\n",
+    "\n",
+    "run_dir = './run/SHEEP2.001'                  # Output directory\n",
+    "datasets_dir = pwk.init('SHEEP2', run_dir)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 2 - Parameters\n",
+    "`scale` : With scale=1, we need 5-6 minutes on a GPU V100 ...and >2h on a CPU !  \n",
+    "`latent_dim` : Latent space dimension, 128 for example !  \n",
+    "`fit_verbosity` : verbosity during training : 0 = silent, 1 = progress bar, 2 = one line per epoch  \n",
+    "`num_img` : Number of images to visualize"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "latent_dim    = 128\n",
+    "\n",
+    "scale         = 1\n",
+    "epochs        = 3\n",
+    "n_critic      = 2\n",
+    "batch_size    = 64\n",
+    "num_img       = 12\n",
+    "fit_verbosity = 1"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Override parameters (batch mode) - Just forget this cell"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pwk.override('scale', 'latent_dim', 'epochs', 'batch_size', 'num_img', 'fit_verbosity')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 3 - Load dataset and have a look \n",
+    "Load sheeps as numpy bitmaps..."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Load dataset\n",
+    "x_data = np.load(datasets_dir+'/QuickDraw/origine/sheep.npy')\n",
+    "print('Original dataset shape : ',x_data.shape)\n",
+    "\n",
+    "# Rescale\n",
+    "n=int(scale*len(x_data))\n",
+    "x_data = x_data[:n]\n",
+    "print('Rescaled dataset shape : ',x_data.shape)\n",
+    "\n",
+    "# Normalize, reshape and shuffle\n",
+    "x_data = x_data/255\n",
+    "x_data = x_data.reshape(-1,28,28,1)\n",
+    "np.random.shuffle(x_data)\n",
+    "print('Final dataset shape    : ',x_data.shape)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "...and have a look :  \n",
+    "Note : These sheep are sheep drawn ... by real humans!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pwk.plot_images(x_data.reshape(-1,28,28), indices=range(72), columns=12, x_size=1, y_size=1, \n",
+    "                y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 4 - Create a discriminator"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "inputs  = keras.Input(shape=(28, 28, 1))\n",
+    "x       = layers.Conv2D(64, kernel_size=4, strides=2, padding=\"same\")(inputs)\n",
+    "x       = layers.LeakyReLU(alpha=0.2)(x)\n",
+    "x       = layers.Conv2D(128, kernel_size=4, strides=2, padding=\"same\")(x)\n",
+    "x       = layers.LeakyReLU(alpha=0.2)(x)\n",
+    "x       = layers.Conv2D(128, kernel_size=4, strides=2, padding=\"same\")(x)\n",
+    "x       = layers.LeakyReLU(alpha=0.2)(x)\n",
+    "x       = layers.Flatten()(x)\n",
+    "x       = layers.Dropout(0.2)(x)\n",
+    "c       = layers.Dense(1)(x)\n",
+    "\n",
+    "discriminator = keras.Model(inputs, c, name=\"discriminator\")\n",
+    "discriminator.summary()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 5 - Create a generator"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "inputs  = keras.Input(shape=(latent_dim,))\n",
+    "x       = layers.Dense(7 * 7 * 64)(inputs)\n",
+    "x       = layers.Reshape((7, 7, 64))(x)\n",
+    "x       = layers.UpSampling2D()(x)\n",
+    "x       = layers.Conv2D(128,  kernel_size=3, strides=1, padding='same', activation='relu')(x)\n",
+    "x       = layers.UpSampling2D()(x)\n",
+    "x       = layers.Conv2D(256,  kernel_size=3, strides=1, padding='same', activation='relu')(x)\n",
+    "outputs = layers.Conv2D(1,    kernel_size=5, strides=1, padding=\"same\", activation='sigmoid')(x)\n",
+    "\n",
+    "generator = keras.Model(inputs, outputs, name=\"generator\")\n",
+    "generator.summary()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 6 - Build, compile and train our DCGAN \n",
+    "Duration : 5' on a V100, with : scale=0.5, epochs=10, n_critic=2\n",
+    "First, clean saved images :"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "!rm $run_dir/images/*.jpg >/dev/null 2>&1 "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Build our model :"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "gan = WGANGP(discriminator=discriminator, generator=generator, latent_dim=latent_dim, n_critic=n_critic)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "gan.compile(\n",
+    "#     discriminator_optimizer = keras.optimizers.Adam(learning_rate=0.0001),\n",
+    "#     generator_optimizer     = keras.optimizers.Adam(learning_rate=0.0001)\n",
+    "    discriminator_optimizer = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9),\n",
+    "    generator_optimizer     = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9)\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Add a callback to save images, train our DCGAN model and save it :"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "imagesCallback = ImagesCallback(num_img=num_img, latent_dim=latent_dim, run_dir=f'{run_dir}/images')\n",
+    "\n",
+    "history = gan.fit( x_data, \n",
+    "                   epochs=epochs, \n",
+    "                   batch_size=batch_size, \n",
+    "                   callbacks=[imagesCallback], \n",
+    "                   verbose=fit_verbosity )\n",
+    "\n",
+    "gan.save(f'{run_dir}/models/model.h5')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 7 - History"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pwk.plot_history(history,  plot={'loss':['d_loss','g_loss']}, save_as='01-history')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "images=[]\n",
+    "for epoch in range(0,epochs,1):\n",
+    "    for i in range(num_img):\n",
+    "        filename = f'{run_dir}/images/image-{epoch:03d}-{i:02d}.jpg'\n",
+    "        image    = io.imread(filename)\n",
+    "        images.append(image)      \n",
+    "\n",
+    "pwk.plot_images(images, None, indices='all', columns=num_img, x_size=1,y_size=1, interpolation=None, y_padding=0, spines_alpha=0, save_as='04-learning')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Step 8 - Generation\n",
+    "Reload our saved model :"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "gan.reload(f'{run_dir}/models/model.h5')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Generate somes images from latent space :"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "nb_images = 12*15\n",
+    "\n",
+    "z = np.random.normal(size=(nb_images,latent_dim))\n",
+    "images = gan.predict(z)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Plot it :"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pwk.plot_images(images, None, indices='all', columns=num_img, x_size=1,y_size=1, interpolation=None, y_padding=0, spines_alpha=0, save_as='04-learning')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pwk.end()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "---\n",
+    "<img width=\"80px\" src=\"../fidle/img/00-Fidle-logo-01.svg\"></img>"
+   ]
+  }
+ ],
+ "metadata": {
+  "interpreter": {
+   "hash": "8e38643e33497db9a306e3f311fa98cb1e65371278ca73ee4ea0c76aa5a4f387"
+  },
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/DCGAN/modules/models/DCGAN.py b/DCGAN/modules/models/DCGAN.py
index 17a1beff149830f5db3e9a56fb61f5474791160a..03f6734b3c7ef5a533db603160a35c2dbd6279d0 100644
--- a/DCGAN/modules/models/DCGAN.py
+++ b/DCGAN/modules/models/DCGAN.py
@@ -63,7 +63,17 @@ class DCGAN(keras.Model):
                 discriminator_optimizer = keras.optimizers.Adam(), 
                 generator_optimizer     = keras.optimizers.Adam(), 
                 loss_function           = keras.losses.BinaryCrossentropy() ):
+        '''
+        Compile the model
+        args:
+            discriminator_optimizer : Discriminator optimizer (Adam)
+            generator_optimizer : Generator optimizer (Adam)
+            loss_function : Loss function
+        '''
         super(DCGAN, self).compile()
+        self.discriminator.compile(optimizer=discriminator_optimizer, loss=loss_function)
+        self.generator.compile(optimizer=generator_optimizer, loss=loss_function)
+        
         self.d_optimizer   = discriminator_optimizer
         self.g_optimizer   = generator_optimizer
         self.loss_fn       = loss_function
@@ -114,11 +124,11 @@ class DCGAN(keras.Model):
         combined_images = tf.concat( [generated_images, real_images], axis=0)
 
         # Creation of labels corresponding to real or fake images
-        # 1 is generated, 0 is real
-        labels = tf.concat( [tf.ones((batch_size, 1)),  tf.zeros((batch_size, 1))], axis=0)
+        # 0 is generated, 1 is real
+        labels = tf.concat( [tf.zeros((batch_size, 1)),  tf.ones((batch_size, 1))], axis=0)
 
         # Add random noise to the labels - important trick !
-        labels += 0.05 * tf.random.uniform(tf.shape(labels))
+#         labels += 0.05 * tf.random.uniform(tf.shape(labels))
 
         # ---- Train the discriminator -----------------------------
         # ----------------------------------------------------------
@@ -147,8 +157,8 @@ class DCGAN(keras.Model):
         # Sample random points in the latent space
         random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
 
-        # Assemble labels that say "all real images"
-        misleading_labels = tf.zeros((batch_size, 1))
+        # Assemble labels that say all images are real, yes it's a lie ;-)
+        misleading_labels = tf.ones((batch_size, 1))
 
         # ---- Train the generator ---------------------------------
         # ----------------------------------------------------------
diff --git a/DCGAN/modules/models/WGANGP.py b/DCGAN/modules/models/WGANGP.py
new file mode 100644
index 0000000000000000000000000000000000000000..35cdab43ba8aad460e1617f7617944270b9f4448
--- /dev/null
+++ b/DCGAN/modules/models/WGANGP.py
@@ -0,0 +1,256 @@
+# ------------------------------------------------------------------
+#     _____ _     _ _
+#    |  ___(_) __| | | ___
+#    | |_  | |/ _` | |/ _ \
+#    |  _| | | (_| | |  __/
+#    |_|   |_|\__,_|_|\___|                            DCGAN Example
+# ------------------------------------------------------------------
+# Formation Introduction au Deep Learning  (FIDLE)
+# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
+# ------------------------------------------------------------------
+# by JL Parouty (dec 2020), based on François Chollet example
+#
+# Thanks to François Chollet example : https://keras.io/examples
+
+import numpy as np
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.keras import layers
+from IPython.display import display,Markdown
+import os
+
+class WGANGP(keras.Model):
+    '''
+    A DCGAN model, built from given generator and discriminator
+    '''
+
+    version = '1.0'
+
+
+
+    def __init__(self, discriminator=None, generator=None, latent_dim=100, n_critic=3, lambda_gp=10, **kwargs):
+        '''
+        DCGAN instantiation with a given discriminator and generator
+        args :
+            discriminator : discriminator model
+            generator : generator model
+            latent_dim : latent space dimension
+        return:
+            None
+        '''
+        super(WGANGP, self).__init__(**kwargs)
+        self.discriminator = discriminator
+        self.generator     = generator
+        self.latent_dim    = latent_dim
+        self.n_critic      = n_critic
+        self.lambda_gp     = lambda_gp 
+        print(f'Fidle DCGAN is ready :-)  latent dim = {latent_dim}')
+
+
+        
+    def call(self, inputs):
+        '''
+        Implementation of the model forward pass
+        args:
+            inputs : vectors from latent space
+        return:
+            output : Output of the generator
+        '''
+        outputs = self.generator(inputs)
+        return outputs
+                
+
+
+    def compile(self, 
+                discriminator_optimizer = keras.optimizers.Adam(), 
+                generator_optimizer     = keras.optimizers.Adam()
+                ):
+        '''
+        Compile the model
+        args:
+            discriminator_optimizer : Discriminator optimizer (Adam)
+            generator_optimizer : Generator optimizer (Adam)
+            loss_function : Loss function
+        '''
+        super(WGANGP, self).compile()
+        self.discriminator.compile(optimizer=discriminator_optimizer)
+        self.generator.compile(optimizer=generator_optimizer)
+        
+        self.d_optimizer   = discriminator_optimizer
+        self.g_optimizer   = generator_optimizer
+        self.d_loss_metric = keras.metrics.Mean(name="d_loss")
+        self.g_loss_metric = keras.metrics.Mean(name="g_loss")
+
+
+
+    @property
+    def metrics(self):
+        return [self.d_loss_metric, self.g_loss_metric]
+
+
+    def gradient_penalty(self, batch_size, real_images, fake_images):
+        """ Calculates the gradient penalty.
+
+        This loss is calculated on an interpolated image
+        and added to the discriminator loss.
+        """
+        # Create some interpolated image
+        epsilon = tf.random.normal([batch_size, 1, 1, 1], 0.0, 1.0)
+        interpolated = fake_images + epsilon * (real_images - fake_images)
+
+        # Calculate interpolated critics, in gradient tape mode
+        with tf.GradientTape() as gp_tape:
+            gp_tape.watch(interpolated)
+            # Get the critics for the interpolated image.
+            interpolated_critics = self.discriminator(interpolated, training=True)
+
+        # Retrieve gradients for this interpolated critics
+        gradients = gp_tape.gradient(interpolated_critics, [interpolated])[0]
+        # Calculate the norm of the gradients.
+        norm = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3]))
+        # Calculate the final gp
+        gp = self.lambda_gp * tf.reduce_mean((norm - 1.0) ** 2)
+        return gp
+
+
+
+    def train_step(self, inputs):
+        '''
+        Implementation of the training update.
+        Receive some real images.
+        This will compute loss, get gradients and update weights for generator and discriminator
+        Return metrics.
+        args:
+            real_images : real images
+        return:
+            d_loss  : discriminator loss
+            g_loss  : generator loss
+        '''
+
+        # ---- Get the input we need, specified in the .fit()
+        #      inputs is tensor or a tuple of tensors
+        #
+        if isinstance(inputs, tuple):
+            real_images = inputs[0]
+        else:
+            real_images = inputs
+
+        batch_size=tf.shape(real_images)[0]
+
+        # ---- Train the discriminator ----------------------------------------
+        # ---------------------------------------------------------------------
+        #  d_loss = D(fake) - D(real) + lambda.( ||d D(interpolated)|| - 1 )^2
+        #         =        w_loss     + gp    
+        #
+        for i in range(self.n_critic):
+            
+            # ---- Forward pass
+            #
+            # Get some random points in the latent space : z
+            random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
+
+            # Generate fake images with the generator : G(z)
+            fake_images = self.generator(random_latent_vectors, training=True)
+
+            # Record operations with the GradientTape.
+            with tf.GradientTape() as tape:
+
+                # Get critics for the fake images : D(G(z))
+                fake_critics = self.discriminator(fake_images, training=True)
+                
+                # Get critics for the real images : D(x)
+                real_critics = self.discriminator(real_images, training=True)
+
+                # Calculate the wasserstein discriminator loss L = D(fake) - D(real)
+                w_loss = tf.reduce_mean(fake_critics) - tf.reduce_mean(real_critics)
+
+                # Calculate the gradient penalty
+                gp = self.gradient_penalty(batch_size, real_images, fake_images)
+
+                # Calculate the full discriminator loss : loss = w_loss + gp
+                d_loss = w_loss + gp
+
+            # ---- Backward pass
+            #
+            # Retrieve gradients from gradient_tape and run one step 
+            # of gradient descent to optimize trainable weights
+            gradients = tape.gradient(d_loss, self.discriminator.trainable_weights)
+
+            # Update discriminator weights
+            self.d_optimizer.apply_gradients( zip(gradients, self.discriminator.trainable_weights) )
+
+        # ---- Train the generator --------------------------------------------
+        # ---------------------------------------------------------------------
+        # g_loss = -D(fake)
+        #
+        # ---- Forward pass
+        #
+        # Get some random points in the latent space : z
+        random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
+
+        # Record operations with the GradientTape.
+        with tf.GradientTape() as tape:
+
+            # Generate fake images using the generator
+            fake_images = self.generator(random_latent_vectors, training=True)
+
+            # Get critics for fake images
+            fake_critics = self.discriminator(fake_images, training=True)
+
+            # Calculate the generator loss
+            g_loss = -tf.reduce_mean(fake_critics)
+
+        # ---- Backward pass
+        #
+        # Retrieve gradients from gradient_tape and run one step 
+        # of gradient descent to optimize trainable weights
+        gradients = tape.gradient(g_loss, self.generator.trainable_variables)
+
+        # Update generator weights
+        self.g_optimizer.apply_gradients( zip(gradients, self.generator.trainable_variables) )
+
+        # ---- Update and return metrics --------------------------------------
+        # ---------------------------------------------------------------------
+        #
+        # return {
+        #     "d_loss": d_loss,
+        #     "g_loss": g_loss
+        # }
+
+        self.d_loss_metric.update_state(d_loss)
+        self.g_loss_metric.update_state(g_loss)
+
+        return {
+            "d_loss": self.d_loss_metric.result(),
+            "g_loss": self.g_loss_metric.result(),
+        }
+
+
+            
+    def save(self,filename):
+        '''Save model in 2 part'''
+        save_dir             = os.path.dirname(filename)
+        filename, _extension = os.path.splitext(filename)
+        # ---- Create directory if needed
+        os.makedirs(save_dir, mode=0o750, exist_ok=True)
+        # ---- Save models
+        self.discriminator.save( f'{filename}-discriminator.h5' )
+        self.generator.save(     f'{filename}-generator.h5'     )
+
+    
+    def reload(self,filename):
+        '''Reload a 2 part saved model.
+        Note : to train it, you need to .compile() it...'''
+        filename, extension = os.path.splitext(filename)
+        self.discriminator = keras.models.load_model(f'{filename}-discriminator.h5', compile=False)
+        self.generator     = keras.models.load_model(f'{filename}-generator.h5'    , compile=False)
+        print('Reloaded.')
+                
+        
+    @classmethod
+    def about(cls):
+        '''Basic whoami method'''
+        display(Markdown('<br>**FIDLE 2022 - DCGAN**'))
+        print('Version              :', cls.version)
+        print('TensorFlow version   :', tf.__version__)
+        print('Keras version        :', tf.keras.__version__)
diff --git a/DCGAN/modules/models/__init__.py b/DCGAN/modules/models/__init__.py
index e679ea642affb99ad58b02d5f4f0af915daf0cf0..c213c03d7b822cd2b00152f0b3de3fe68a5d4fcc 100644
--- a/DCGAN/modules/models/__init__.py
+++ b/DCGAN/modules/models/__init__.py
@@ -1 +1,2 @@
-from modules.models.DCGAN import DCGAN
\ No newline at end of file
+from modules.models.DCGAN  import DCGAN
+from modules.models.WGANGP import WGANGP
\ No newline at end of file
diff --git a/Misc/Scratchbook.ipynb b/Misc/Scratchbook.ipynb
index f39fc5beb97f540d8545da916ac85448c795ac55..88b144ce32b24f09e5f066943639384d7d20562a 100644
--- a/Misc/Scratchbook.ipynb
+++ b/Misc/Scratchbook.ipynb
@@ -29,7 +29,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 1,
+   "execution_count": null,
    "id": "floppy-organic",
    "metadata": {},
    "outputs": [],
@@ -53,19 +53,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 2,
+   "execution_count": null,
    "id": "opposite-plasma",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Inputs shape is :  (32, 20, 8)\n",
-      "Output shape is :  (32, 16)\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "inputs  = tf.random.normal([32, 20, 8])\n",
     "\n",
@@ -78,20 +69,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": null,
    "id": "forbidden-murray",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Output shape :  (32, 20, 18)\n",
-      "Memory state :  (32, 18)\n",
-      "Carry  state :  (32, 18)\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "lstm = tf.keras.layers.LSTM(18, return_sequences=True, return_state=True)\n",
     "\n",
@@ -104,25 +85,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": null,
    "id": "verified-fruit",
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<tf.Tensor: shape=(18,), dtype=float32, numpy=\n",
-       "array([-0.20923303,  0.00193496,  0.05929745,  0.0429938 , -0.02835345,\n",
-       "        0.14096233,  0.07420755,  0.1777523 ,  0.1205566 , -0.03841979,\n",
-       "       -0.02402029,  0.16098973,  0.10468155, -0.06480312, -0.02497844,\n",
-       "        0.09700071, -0.24351674,  0.04884451], dtype=float32)>"
-      ]
-     },
-     "execution_count": 4,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "# --- See the last vector of the output\n",
     "output[-1,-1]"
@@ -130,25 +96,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": null,
    "id": "homeless-library",
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<tf.Tensor: shape=(18,), dtype=float32, numpy=\n",
-       "array([-0.20923303,  0.00193496,  0.05929745,  0.0429938 , -0.02835345,\n",
-       "        0.14096233,  0.07420755,  0.1777523 ,  0.1205566 , -0.03841979,\n",
-       "       -0.02402029,  0.16098973,  0.10468155, -0.06480312, -0.02497844,\n",
-       "        0.09700071, -0.24351674,  0.04884451], dtype=float32)>"
-      ]
-     },
-     "execution_count": 5,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "# ---- Memory state is the last output\n",
     "memory_state[-1]"
@@ -156,25 +107,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 6,
+   "execution_count": null,
    "id": "preliminary-psychiatry",
    "metadata": {},
-   "outputs": [
-    {
-     "data": {
-      "text/plain": [
-       "<tf.Tensor: shape=(18,), dtype=float32, numpy=\n",
-       "array([-0.3245376 ,  0.00296011,  0.13041827,  0.10711877, -0.05223516,\n",
-       "        0.4009896 ,  0.21599025,  0.4260387 ,  0.30799934, -0.0799172 ,\n",
-       "       -0.06359857,  0.29457492,  0.18084048, -0.14462015, -0.04707906,\n",
-       "        0.15726675, -0.38622206,  0.09004797], dtype=float32)>"
-      ]
-     },
-     "execution_count": 6,
-     "metadata": {},
-     "output_type": "execute_result"
-    }
-   ],
+   "outputs": [],
    "source": [
     "carry_state[-1]"
    ]
@@ -189,29 +125,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 27,
+   "execution_count": null,
    "id": "42276389-4ea6-42d1-93bc-6650062ef86a",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Number of batch : 10\n",
-      "\n",
-      "#0 : [[1 2 3 4 5]] => [6]\n",
-      "#1 : [[2 3 4 5 6]] => [7]\n",
-      "#2 : [[3 4 5 6 7]] => [8]\n",
-      "#3 : [[4 5 6 7 8]] => [9]\n",
-      "#4 : [[5 6 7 8 9]] => [10]\n",
-      "#5 : [[ 6  7  8  9 10]] => [11]\n",
-      "#6 : [[ 7  8  9 10 11]] => [12]\n",
-      "#7 : [[ 8  9 10 11 12]] => [13]\n",
-      "#8 : [[ 9 10 11 12 13]] => [14]\n",
-      "#9 : [[10 11 12 13 14]] => [15]\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "from keras.preprocessing.sequence import TimeseriesGenerator\n",
     "\n",
@@ -233,10 +150,139 @@
     "    print(f'#{i} : {x} => {y}')"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "id": "67e3c888-aaa4-4166-90a1-cdb63920fd7d",
+   "metadata": {},
+   "source": [
+    "## 3 - Upsampling"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "20f12cf0-1fdb-4b53-92c6-d03b140e42d1",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "x = np.array([1,2,3,4])\n",
+    "x = x.reshape(2,2)\n",
+    "print('\\nInitial : ', x.shape)\n",
+    "print(x)\n",
+    "\n",
+    "x = x.reshape((1,2,2,1))\n",
+    "print('\\nReshape as a batch of (2,2) vectors : ', x.shape)\n",
+    "print(x)\n",
+    "\n",
+    "y = tf.keras.layers.UpSampling2D( size=(2, 2), interpolation=\"nearest\" )(x)\n",
+    "\n",
+    "y = np.array(y)\n",
+    "print('\\ny shape : ',y.shape)\n",
+    "\n",
+    "y = y.reshape(4,4)\n",
+    "print('\\n After a (4,4) reshape :')\n",
+    "print(y)\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e8fb1472",
+   "metadata": {},
+   "source": [
+    "### 4 - Reduce mean"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "09ac4e52-8953-41d9-b712-e6a83a9ae860",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "c = np.array([[3.,4], [5.,6], [7.,8]])\n",
+    "print(np.mean(c,1))\n",
+    "\n",
+    "print(tf.reduce_mean(c,1))\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "72be9368",
+   "metadata": {},
+   "source": [
+    "## 5 - Gradient_tape()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "13fcc722",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import tensorflow as tf\n",
+    "\n",
+    "# ---- My function f\n",
+    "#\n",
+    "def f(x):\n",
+    "  y = x**2 + 4*x - 5\n",
+    "  return y\n",
+    "\n",
+    "# ---- Examples :\n",
+    "#\n",
+    "print('f(1) is : ', f(1))\n",
+    "print('f(2) is : ', f(2))\n",
+    "\n",
+    "# ---- With a tensor :\n",
+    "#\n",
+    "x = tf.Variable(2.0)\n",
+    "\n",
+    "print('f(x) is : ', f(x))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6fab93ce",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# ---- Derivative function of f(x)\n",
+    "#\n",
+    "def g(x):\n",
+    "    y = 2*x + 4\n",
+    "    return y\n",
+    "\n",
+    "print('Derivative of f(x) for x=3 : ', g(3))\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "3f3f0c4c",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# ---- Derivative using tf\n",
+    "#\n",
+    "with tf.GradientTape() as tape:\n",
+    "    x = tf.Variable(3.0)\n",
+    "    y = f(x)\n",
+    "\n",
+    "dy = tape.gradient(y, x)  # dy/dx\n",
+    "\n",
+    "print(dy)\n"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "4d94892b-d3a5-448d-aa2b-28c3a01a4b72",
+   "id": "43f9a625",
    "metadata": {},
    "outputs": [],
    "source": []
diff --git a/README.ipynb b/README.ipynb
index f7e40540284bc8c0586916ec9f253f94770d317e..9e0dd8b336ef007f626025a77467a3602a75cab2 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -3,13 +3,13 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "id": "3f8b217d",
+   "id": "7f8c4a8a",
    "metadata": {
     "execution": {
-     "iopub.execute_input": "2022-01-26T14:19:05.708967Z",
-     "iopub.status.busy": "2022-01-26T14:19:05.705058Z",
-     "iopub.status.idle": "2022-01-26T14:19:05.717400Z",
-     "shell.execute_reply": "2022-01-26T14:19:05.717043Z"
+     "iopub.execute_input": "2022-03-08T10:11:20.520072Z",
+     "iopub.status.busy": "2022-03-08T10:11:20.519675Z",
+     "iopub.status.idle": "2022-03-08T10:11:20.606245Z",
+     "shell.execute_reply": "2022-03-08T10:11:20.606632Z"
     },
     "jupyter": {
      "source_hidden": true
@@ -52,7 +52,7 @@
        "[<img width=\"200px\" style=\"vertical-align:middle\" src=\"fidle/img/00-Mail_contact.svg\"></img>](#top)\n",
        "\n",
        "Current Version : <!-- VERSION_BEGIN -->\n",
-       "**2.0.33**\n",
+       "**2.0.34**\n",
        "<!-- VERSION_END -->\n",
        "\n",
        "\n",
@@ -178,6 +178,8 @@
        "### Generative Adversarial Networks (GANs)\n",
        "- **[SHEEP1](DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb)** - [A first DCGAN to Draw a Sheep](DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb)  \n",
        "Episode 1 : Draw me a sheep, revisited with a DCGAN\n",
+       "- **[SHEEP2](DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb)** - [A WGAN-GP to Draw a Sheep](DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb)  \n",
+       "Episode 2 : Draw me a sheep, revisited with a WGAN-GP\n",
        "\n",
        "### Miscellaneous\n",
        "- **[ACTF1](Misc/Activation-Functions.ipynb)** - [Activation functions](Misc/Activation-Functions.ipynb)  \n",
diff --git a/README.md b/README.md
index 5c48bf6ac7e561c73071296b9e716f863068333b..68f097aeee55cf26b882560ddeee67298e9ef742 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,7 @@ For more information, you can contact us at :
 [<img width="200px" style="vertical-align:middle" src="fidle/img/00-Mail_contact.svg"></img>](#top)
 
 Current Version : <!-- VERSION_BEGIN -->
-**2.0.33**
+**2.0.34**
 <!-- VERSION_END -->
 
 
@@ -157,6 +157,8 @@ Bash script for SLURM batch submission of VAE8 notebooks
 ### Generative Adversarial Networks (GANs)
 - **[SHEEP1](DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb)** - [A first DCGAN to Draw a Sheep](DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb)  
 Episode 1 : Draw me a sheep, revisited with a DCGAN
+- **[SHEEP2](DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb)** - [A WGAN-GP to Draw a Sheep](DCGAN/02-WGANGP-Draw-me-a-sheep.ipynb)  
+Episode 2 : Draw me a sheep, revisited with a WGAN-GP
 
 ### Miscellaneous
 - **[ACTF1](Misc/Activation-Functions.ipynb)** - [Activation functions](Misc/Activation-Functions.ipynb)  
diff --git a/VAE/01-VAE-with-MNIST.ipynb b/VAE/01-VAE-with-MNIST.ipynb
index 1c6ad1a61e78c461878c76e1b9d411981255d1c0..317eda6b5969a14e7af2d5f37860e058ff527943 100644
--- a/VAE/01-VAE-with-MNIST.ipynb
+++ b/VAE/01-VAE-with-MNIST.ipynb
@@ -387,7 +387,8 @@
    "hash": "8e38643e33497db9a306e3f311fa98cb1e65371278ca73ee4ea0c76aa5a4f387"
   },
   "kernelspec": {
-   "display_name": "Python 3.9.7 64-bit ('fidle-cpu': conda)",
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
    "name": "python3"
   },
   "language_info": {
@@ -400,7 +401,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.7"
+   "version": "3.7.10"
   }
  },
  "nbformat": 4,
diff --git a/VAE/02-VAE-with-MNIST.ipynb b/VAE/02-VAE-with-MNIST.ipynb
index a7d9edf840fb3a0e269721e13a8705557db1e609..920a611e057ebb338cb3fad758ecf66f0eef5933 100644
--- a/VAE/02-VAE-with-MNIST.ipynb
+++ b/VAE/02-VAE-with-MNIST.ipynb
@@ -92,9 +92,9 @@
    "outputs": [],
    "source": [
     "latent_dim    = 2\n",
-    "loss_weights  = [1,.01]\n",
+    "loss_weights  = [1,.001]       # [1, .001] give good results\n",
     "\n",
-    "scale         = 0.01\n",
+    "scale         = 1\n",
     "seed          = 123\n",
     "\n",
     "batch_size    = 64\n",
@@ -476,7 +476,8 @@
    "hash": "8e38643e33497db9a306e3f311fa98cb1e65371278ca73ee4ea0c76aa5a4f387"
   },
   "kernelspec": {
-   "display_name": "Python 3.9.7 64-bit ('fidle-cpu': conda)",
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
    "name": "python3"
   },
   "language_info": {
@@ -489,7 +490,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.9.7"
+   "version": "3.7.10"
   }
  },
  "nbformat": 4,
diff --git a/fidle/ci/default.yml b/fidle/ci/default.yml
index be75d8389e1afdbd6299d4533d512c8a18c451df..9c7ebc5cadf8ef5e072824ec90284563b2bef286 100644
--- a/fidle/ci/default.yml
+++ b/fidle/ci/default.yml
@@ -437,6 +437,19 @@ Nb_SHEEP1:
     batch_size: default
     num_img: default
     fit_verbosity: default
+Nb_SHEEP2:
+  notebook_id: SHEEP2
+  notebook_dir: DCGAN
+  notebook_src: 02-WGANGP-Draw-me-a-sheep.ipynb
+  notebook_tag: default
+  overrides:
+    run_dir: default
+    scale: default
+    latent_dim: default
+    epochs: default
+    batch_size: default
+    num_img: default
+    fit_verbosity: default
 Nb_ACTF1:
   notebook_id: ACTF1
   notebook_dir: Misc
diff --git a/fidle/ci/small_cpu.yml b/fidle/ci/small_cpu.yml
index a344315f492c47a05504138c8ac19e2737900ef7..135c785cca62e4fe2b34d208aad6ef7db45615aa 100644
--- a/fidle/ci/small_cpu.yml
+++ b/fidle/ci/small_cpu.yml
@@ -448,8 +448,8 @@ Nb_VAE9:
 
 # ------ DCGAN -----------------------------------------------------
 #
-Nb_DCGAN01:
-  notebook_id: DCGAN01
+Nb_SHEEP1:
+  notebook_id: SHEEP1
   notebook_dir: DCGAN
   notebook_src: 01-DCGAN-Draw-me-a-sheep.ipynb
   notebook_tag: default
diff --git a/fidle/ci/smart_gpu.yml b/fidle/ci/smart_gpu.yml
index c3e0e998b7d3cc90cc1112242ae29f7b800a158f..e65f551810365debe62ec0db08be9d60de840ea1 100644
--- a/fidle/ci/smart_gpu.yml
+++ b/fidle/ci/smart_gpu.yml
@@ -528,8 +528,8 @@ Nb_VAE10:
 
 # ------ DCGAN -----------------------------------------------------
 #
-Nb_DCGAN01:
-  notebook_id: DCGAN01
+Nb_SHEEP1:
+  notebook_id: SHEEP1
   notebook_dir: DCGAN
   notebook_src: 01-DCGAN-Draw-me-a-sheep.ipynb
   notebook_tag: default
diff --git a/fidle/config.py b/fidle/config.py
index 76e4e4dd96a4ba9ef6da39dbbb63532f71cdbf76..8423d93fcbf8485ea59f589d4ced15e5918cb853 100644
--- a/fidle/config.py
+++ b/fidle/config.py
@@ -14,7 +14,7 @@
 
 # ---- Version -----------------------------------------------------
 #
-VERSION = '2.0.33'
+VERSION = '2.0.34'
 
 # ---- Default notebook name ---------------------------------------
 #
diff --git a/fidle/logs/catalog.json b/fidle/logs/catalog.json
index 73af6234cd18b9eb43e470d87206d8324123a18d..ab65f18c8a0b0eab6917580734ede7adeed09421 100644
--- a/fidle/logs/catalog.json
+++ b/fidle/logs/catalog.json
@@ -565,6 +565,23 @@
             "run_dir"
         ]
     },
+    "SHEEP2": {
+        "id": "SHEEP2",
+        "dirname": "DCGAN",
+        "basename": "02-WGANGP-Draw-me-a-sheep.ipynb",
+        "title": "A WGAN-GP to Draw a Sheep",
+        "description": "Episode 2 : Draw me a sheep, revisited with a WGAN-GP",
+        "overrides": [
+            "run_dir",
+            "scale",
+            "latent_dim",
+            "epochs",
+            "batch_size",
+            "num_img",
+            "fit_verbosity",
+            "run_dir"
+        ]
+    },
     "ACTF1": {
         "id": "ACTF1",
         "dirname": "Misc",