From 622e54df5e5feab55a2806b7b4e0d946762251ad Mon Sep 17 00:00:00 2001
From: Jean-Luc Parouty <Jean-Luc.Parouty@simap.grenoble-inp.fr>
Date: Wed, 6 Oct 2021 17:31:24 +0200
Subject: [PATCH] Works on DCGAN...

---
 .gitignore                                |   1 +
 DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb      | 263 ++++++++++++++++++++++
 DCGAN/modules/callbacks/ImagesCallback.py |  46 ++++
 DCGAN/modules/callbacks/__init__.py       |   1 +
 DCGAN/modules/models/DCGAN.py             | 247 ++++++++++++++++++++
 DCGAN/modules/models/__init__.py          |   1 +
 fidle/config.py                           |   2 +-
 7 files changed, 560 insertions(+), 1 deletion(-)
 create mode 100644 DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb
 create mode 100644 DCGAN/modules/callbacks/ImagesCallback.py
 create mode 100644 DCGAN/modules/callbacks/__init__.py
 create mode 100644 DCGAN/modules/models/DCGAN.py
 create mode 100644 DCGAN/modules/models/__init__.py

diff --git a/.gitignore b/.gitignore
index 504a663..4daa354 100755
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
 .ipynb_checkpoints
+.vscode
 */.ipynb_checkpoints/*
 __pycache__
 */__pycache__/*
diff --git a/DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb b/DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb
new file mode 100644
index 0000000..efb7ed4
--- /dev/null
+++ b/DCGAN/01-DCGAN-Draw-me-a-sheep.ipynb
@@ -0,0 +1,263 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "source": [
+    "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
+    "\n",
+    "# <!-- TITLE --> [DCGAN01] - A first DCGAN to Draw a Sheep\n",
+    "<!-- DESC --> Episode 1 : Draw me a sheep, revisited with a DCGAN\n",
+    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
+    "\n",
+    "## Objectives :\n",
+    " - Build and train a DCGAN model with the Quick Draw dataset\n",
+    " - Understanding DCGAN\n",
+    "\n",
+    "The [Quick draw dataset](https://quickdraw.withgoogle.com/data) contains about 50.000.000 drawings, made by real people...  \n",
+    "We are using a subset of 117.555 of Sheep drawings  \n",
+    "To get the dataset : [https://github.com/googlecreativelab/quickdraw-dataset](https://github.com/googlecreativelab/quickdraw-dataset)  \n",
+    "To get the subdataset in numpy bitmap file : [https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap](https://console.cloud.google.com/storage/quickdraw_dataset/full/numpy_bitmap) (94.3 Mo)\n",
+    "\n",
+    "\n",
+    "## What we're going to do :\n",
+    "\n",
+    " - Have a look to the dataset\n",
+    " - Defining a VAE model\n",
+    " - Build the model\n",
+    " - Train it\n",
+    " - Follow the learning process with Tensorboard\n",
+    "\n",
+    "## Acknowledgements :\n",
+    "Thanks to **François Chollet** who is at the base of this example.  \n",
+    "See : [https://keras.io/examples/](https://keras.io/examples/)\n"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Step 1 - Init python stuff"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "import numpy as np\n",
+    "import matplotlib.pyplot as plt\n",
+    "import scipy.stats\n",
+    "import sys\n",
+    "\n",
+    "import tensorflow as tf\n",
+    "from tensorflow import keras\n",
+    "from tensorflow.keras import layers\n",
+    "from tensorflow.keras.callbacks import TensorBoard\n",
+    "\n",
+    "from modules.models    import DCGAN\n",
+    "from modules.callbacks import ImagesCallback\n",
+    "\n",
+    "sys.path.append('..')\n",
+    "import fidle.pwk as pwk\n",
+    "\n",
+    "run_dir = './run/DCGAN.001'                  # Output directory\n",
+    "datasets_dir = pwk.init('DCGAN01', run_dir)"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Step 2 - Parameters"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "latent_dim = 128"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Step 3 - Load dataset and have a look \n",
+    "Load sheeps as numpy bitmaps..."
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "x_data = np.load(datasets_dir+'/QuickDraw/origine/full_numpy_bitmap_sheep.npy')\n",
+    "x_data = x_data/255\n",
+    "x_data=x_data.reshape(-1,28,28,1)\n",
+    "x_data.shape"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "...and have a look :  \n",
+    "Note : These sheep are sheep drawn ... by real humans!"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "pwk.plot_images(x_data.reshape(-1,28,28), indices=range(72), columns=12, x_size=1, y_size=1, y_padding=0,spines_alpha=0, save_as='01-Sheeps')"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Step 4 - Create a discriminator"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "discriminator = keras.Sequential(\n",
+    "    [\n",
+    "        keras.Input(shape=(28, 28, 1)),\n",
+    "        layers.Conv2D(64, kernel_size=4, strides=2, padding=\"same\"),\n",
+    "        layers.LeakyReLU(alpha=0.2),\n",
+    "        layers.Conv2D(128, kernel_size=4, strides=2, padding=\"same\"),\n",
+    "        layers.LeakyReLU(alpha=0.2),\n",
+    "        layers.Conv2D(128, kernel_size=4, strides=2, padding=\"same\"),\n",
+    "        layers.LeakyReLU(alpha=0.2),\n",
+    "        layers.Flatten(),\n",
+    "        layers.Dropout(0.2),\n",
+    "        layers.Dense(1, activation=\"sigmoid\"),\n",
+    "    ],\n",
+    "    name=\"discriminator\",\n",
+    ")\n",
+    "discriminator.summary()"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Step 5 - Create a generator"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "generator = keras.Sequential(\n",
+    "    [\n",
+    "        keras.Input(shape=(latent_dim,)),\n",
+    "        layers.Dense(7 * 7 * 64),\n",
+    "        layers.Reshape((7, 7, 64)),\n",
+    "        layers.Conv2DTranspose(128, kernel_size=3, strides=2, padding=\"same\"),\n",
+    "        layers.LeakyReLU(alpha=0.2),\n",
+    "        layers.Conv2DTranspose(256, kernel_size=3, strides=2, padding=\"same\"),\n",
+    "        layers.LeakyReLU(alpha=0.2),\n",
+    "        layers.Conv2D(1, kernel_size=5, padding=\"same\", activation=\"sigmoid\"),\n",
+    "    ],\n",
+    "    name=\"generator\",\n",
+    ")\n",
+    "generator.summary()\n"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "markdown",
+   "source": [
+    "## Step 6 - Create our DCGAN !"
+   ],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "gan = DCGAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "gan.compile(\n",
+    "    discriminator_optimizer = keras.optimizers.Adam(learning_rate=0.0001),\n",
+    "    generator_optimizer     = keras.optimizers.Adam(learning_rate=0.0001),\n",
+    "    loss_function           = keras.losses.BinaryCrossentropy(),\n",
+    ")"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "gan.fit( x_data, epochs=1, batch_size=32, callbacks=[ImagesCallback(num_img=10, latent_dim=latent_dim)]\n",
+    ")"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [
+    "a=np.array([[1,2,3], [1,2,3],])\n",
+    "a.shape"
+   ],
+   "outputs": [],
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "source": [],
+   "outputs": [],
+   "metadata": {}
+  }
+ ],
+ "metadata": {
+  "orig_nbformat": 4,
+  "language_info": {
+   "name": "python",
+   "version": "3.8.10",
+   "mimetype": "text/x-python",
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "pygments_lexer": "ipython3",
+   "nbconvert_exporter": "python",
+   "file_extension": ".py"
+  },
+  "kernelspec": {
+   "name": "python3",
+   "display_name": "Python 3.8.10 64-bit ('fidle': conda)"
+  },
+  "interpreter": {
+   "hash": "0d4eaa39e9afca7e4a329c71e6d11fd895be2163d24367e6af7052bf3ebf4932"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
\ No newline at end of file
diff --git a/DCGAN/modules/callbacks/ImagesCallback.py b/DCGAN/modules/callbacks/ImagesCallback.py
new file mode 100644
index 0000000..d418a0e
--- /dev/null
+++ b/DCGAN/modules/callbacks/ImagesCallback.py
@@ -0,0 +1,46 @@
+
+import tensorflow as tf
+from tensorflow import keras
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+
+class ImagesCallback(keras.callbacks.Callback):
+
+    def __init__(self, 
+                 num_img    = 3, 
+                 latent_dim = 100,
+                 filename   = 'image-{epoch:03d}-{i:02d}.jpg',
+                 run_dir    = './run'):
+        self.num_img    = num_img
+        self.latent_dim = latent_dim
+
+
+
+    def save_images(self, images, epoch):
+        '''Save images as <filename>'''
+        
+        for i,image in enumerate(images):
+            
+            image = image.squeeze()  # Squeeze it if monochrome : (lx,ly,1) -> (lx,ly) 
+        
+            filenamei = self.filename.format(epoch=epoch,i=i)
+            
+            if len(image.shape) == 2:
+                plt.imsave(filenamei, image, cmap='gray_r')
+            else:
+                plt.imsave(filenamei, image)
+
+
+
+    def on_epoch_end(self, epoch, logs=None):
+
+        # ---- Get some points from latent space
+        random_latent_vectors = tf.random.normal(shape=(self.num_img, self.latent_dim))
+
+        # ---- Get fake images from generator
+        generated_images = self.model.generator(random_latent_vectors)
+
+        # ---- Save them
+        self.save_images(generated_images, epoch)
diff --git a/DCGAN/modules/callbacks/__init__.py b/DCGAN/modules/callbacks/__init__.py
new file mode 100644
index 0000000..fa10321
--- /dev/null
+++ b/DCGAN/modules/callbacks/__init__.py
@@ -0,0 +1 @@
+from modules.callbacks.ImagesCallback    import ImagesCallback
\ No newline at end of file
diff --git a/DCGAN/modules/models/DCGAN.py b/DCGAN/modules/models/DCGAN.py
new file mode 100644
index 0000000..027e05d
--- /dev/null
+++ b/DCGAN/modules/models/DCGAN.py
@@ -0,0 +1,247 @@
+# ------------------------------------------------------------------
+#     _____ _     _ _
+#    |  ___(_) __| | | ___
+#    | |_  | |/ _` | |/ _ \
+#    |  _| | | (_| | |  __/
+#    |_|   |_|\__,_|_|\___|                            DCGAN Example
+# ------------------------------------------------------------------
+# Formation Introduction au Deep Learning  (FIDLE)
+# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
+# ------------------------------------------------------------------
+# by JL Parouty (dec 2020), based on François Chollet example
+#
+# Thanks to François Chollet example : https://keras.io/examples
+
+import numpy as np
+import tensorflow as tf
+from tensorflow import keras
+from tensorflow.keras import layers
+from IPython.display import display,Markdown
+import os
+
+class DCGAN(keras.Model):
+    '''
+    A DCGAN model, built from given generator and discriminator
+    '''
+
+    version = '1.0'
+
+    def __init__(self, discriminator=None, generator=None, latent_dim=100, **kwargs):
+        '''
+        DCGAN instantiation with a given discriminator and generator
+        args :
+            discriminator : discriminator model
+            generator : generator model
+            latent_dim : latent space dimension
+        return:
+            None
+        '''
+        super(DCGAN, self).__init__(**kwargs)
+        self.discriminator = discriminator
+        self.generator     = generator
+        self.latent_dim    = latent_dim
+        print(f'Fidle DCGAN is ready :-)  latent dim = {latent_dim}')
+       
+        
+    # def call(self, inputs):
+    #     '''
+    #     When we use our model
+    #     args:
+    #         inputs : Model inputs
+    #     return:
+    #         output : Output of the model 
+    #     '''
+    #     z_mean, z_log_var, z = self.encoder(inputs)
+    #     output               = self.decoder(z)
+    #     return output
+                
+
+    def compile(self, 
+                discriminator_optimizer = keras.optimizers.Adam(), 
+                generator_optimizer     = keras.optimizers.Adam(), 
+                loss_function           = keras.losses.BinaryCrossentropy() ):
+        super(DCGAN, self).compile()
+        self.d_optimizer   = discriminator_optimizer
+        self.g_optimizer   = generator_optimizer
+        self.loss_fn       = loss_function
+        self.d_loss_metric = keras.metrics.Mean(name="d_loss")
+        self.g_loss_metric = keras.metrics.Mean(name="g_loss")
+
+
+    @property
+    def metrics(self):
+        return [self.d_loss_metric, self.g_loss_metric]
+
+
+
+    def train_step(self, input):
+        '''
+        Implementation of the training update.
+        Receive some real images.
+        This will compute loss, get gradients and update weights for generator and discriminator
+        Return metrics.
+        args:
+            real_images : real images
+        return:
+            d_loss  : discriminator loss
+            g_loss  : generator loss
+        '''
+
+        # ---- Prepare data for discriminator ----------------------
+        # ----------------------------------------------------------
+        #        
+        # ---- Get the input we need, specified in the .fit()
+        #
+        if isinstance(input, tuple):
+            real_images = input[0]
+
+        batch_size=tf.shape(real_images)[0]
+
+        print('batch size = ', batch_size)
+        print('real images',real_images)
+
+        # Get some random points in the latent space
+        # batch_size = tf.shape(real_images)[0]
+        # random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
+        random_latent_vectors = np.random.uniform( size=(32, self.latent_dim) )
+
+
+        # Generate fake images with the generator
+        generated_images = self.generator(random_latent_vectors)
+
+        print('generated images shape =',generated_images.shape)
+
+        # Combine them with real images
+        combined_images = tf.concat([generated_images, real_images], axis=0)
+
+        # Creation of labels corresponding to real or fake images
+        labels = tf.concat(
+            [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0
+        )
+        # Add random noise to the labels - important trick!
+        labels += 0.05 * tf.random.uniform(tf.shape(labels))
+
+        # ---- Train the discriminator -----------------------------
+        # ----------------------------------------------------------
+        #
+        # ---- Forward pass
+        #      Run the forward pass and record operations with the GradientTape.
+        #
+        with tf.GradientTape() as tape:
+
+            # Get predictions from discriminator 
+            predictions = self.discriminator(combined_images)
+
+            # Get loss
+            d_loss = self.loss_fn(labels, predictions)
+
+        # ---- Backward pass
+        #      Retrieve gradients from gradient_tape and run one step
+        #      of gradient descent to optimize trainable weights
+        #
+        grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
+        self.d_optimizer.apply_gradients( zip(grads, self.discriminator.trainable_weights) )
+
+        # ---- Prepare data for generator ----------------------
+        # ----------------------------------------------------------
+        #
+        # Sample random points in the latent space
+        random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
+
+        # Assemble labels that say "all real images"
+        misleading_labels = tf.zeros((batch_size, 1))
+
+        # ---- Train the generator ---------------------------------
+        # ----------------------------------------------------------
+        # We should *not* update the weights of the discriminator!
+        #
+        # ---- Forward pass
+        #      Run the forward pass and record operations with the GradientTape.
+        #
+        with tf.GradientTape() as tape:
+
+            # Get fake images from generator
+            fake_images = self.generator(random_latent_vectors)
+
+            # Get predictions from discriminator 
+            predictions = self.discriminator(fake_images)
+
+            # Get loss
+            g_loss = self.loss_fn(misleading_labels, predictions)
+        
+        # ---- Backward pass (only for generator)
+        #      Retrieve gradients from gradient_tape and run one step
+        #      of gradient descent to optimize trainable weights
+        #
+        grads = tape.gradient(g_loss, self.generator.trainable_weights)
+        self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
+
+        # ---- Update and return metrics ---------------------------
+        # ----------------------------------------------------------
+        #
+        self.d_loss_metric.update_state(d_loss)
+        self.g_loss_metric.update_state(g_loss)
+
+        return {
+            "d_loss": self.d_loss_metric.result(),
+            "g_loss": self.g_loss_metric.result(),
+        }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    
+    
+    def predict(self,inputs):
+        '''Our predict function...'''
+        z_mean, z_var, z  = self.encoder.predict(inputs)
+        outputs           = self.decoder.predict(z)
+        return outputs
+
+        
+    def save(self,filename):
+        '''Save model in 2 part'''
+        filename, extension = os.path.splitext(filename)
+        self.encoder.save(f'{filename}-encoder.h5')
+        self.decoder.save(f'{filename}-decoder.h5')
+
+    
+    def reload(self,filename):
+        '''Reload a 2 part saved model.'''
+        filename, extension = os.path.splitext(filename)
+        self.encoder = keras.models.load_model(f'{filename}-encoder.h5', custom_objects={'SamplingLayer': SamplingLayer})
+        self.decoder = keras.models.load_model(f'{filename}-decoder.h5')
+        print('Reloaded.')
+                
+        
+    @classmethod
+    def about(cls):
+        '''Basic whoami method'''
+        display(Markdown('<br>**FIDLE 2021 - VAE**'))
+        print('Version              :', cls.version)
+        print('TensorFlow version   :', tf.__version__)
+        print('Keras version        :', tf.keras.__version__)
diff --git a/DCGAN/modules/models/__init__.py b/DCGAN/modules/models/__init__.py
new file mode 100644
index 0000000..e679ea6
--- /dev/null
+++ b/DCGAN/modules/models/__init__.py
@@ -0,0 +1 @@
+from modules.models.DCGAN import DCGAN
\ No newline at end of file
diff --git a/fidle/config.py b/fidle/config.py
index 9bc2e51..8b11294 100644
--- a/fidle/config.py
+++ b/fidle/config.py
@@ -14,7 +14,7 @@
 
 # ---- Version -----------------------------------------------------
 #
-VERSION = '2.0.23'
+VERSION = '2.0.24'
 
 # ---- Default notebook name ---------------------------------------
 #
-- 
GitLab