From cd008ed29b9b80044926b9dc27460a656dfe378d Mon Sep 17 00:00:00 2001
From: Jean-Luc Parouty <Jean-Luc.Parouty@grenoble-inp.fr>
Date: Wed, 4 Mar 2020 19:30:10 +0100
Subject: [PATCH] Update README.md/ipynd and details

Former-commit-id: c689dc0563c92b212a195d2c1fe4717591090acd
---
 README.ipynb                                  | 25 +++++++------------
 README.md                                     |  2 +-
 VAE/07-VAE-with-CelebA-m.ipynb                |  3 ++-
 ...07-VAE-with-CelebA-m.nbconvert-done.ipynb} |  3 ++-
 VAE/08-VAE-withCelebA-post.ipynb              |  2 +-
 5 files changed, 15 insertions(+), 20 deletions(-)
 rename VAE/{07-VAE-with-CelebA-m.nbconvert.ipynb => 07-VAE-with-CelebA-m.nbconvert-done.ipynb} (98%)

diff --git a/README.ipynb b/README.ipynb
index c1ec1d2..ba1081d 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -94,21 +94,21 @@
        "[[SYNOP3] - Time series with RNN - 12h predictions](SYNOP/03-12h-predictions.ipynb)  \n",
        "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 3: Attempt to predict in the longer term   \n",
        "[[VAE1] - Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;First generative network experience with the MNIST dataset  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 1 : Model construction and Training  \n",
        "[[VAE2] - Variational AutoEncoder (VAE) with MNIST - Analysis](VAE/02-VAE-with-MNIST-post.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Use of the previously trained model, analysis of the results  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 2 : Exploring our latent space  \n",
        "[[VAE3] - About the CelebA dataset](VAE/03-About-CelebA.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;New VAE experience, but with a larger and more fun dataset  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 3 : About the CelebA dataset, a more fun dataset !  \n",
        "[[VAE4] - Preparation of the CelebA dataset](VAE/04-Prepare-CelebA-batch.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Preparation of a clustered dataset, batchable  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 4 : Preparation of a clustered dataset, batchable  \n",
        "[[VAE5] - Checking the clustered CelebA dataset](VAE/05-Check-CelebA.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Verification of prepared data from CelebA dataset  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 5 :\\tChecking the clustered dataset  \n",
        "[[VAE6] - Variational AutoEncoder (VAE) with CelebA (small)](VAE/06-VAE-with-CelebA-s.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;VAE with a more fun and realistic dataset - small resolution and batchable  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 6 : Variational AutoEncoder (VAE) with CelebA (small res.)  \n",
        "[[VAE7] - Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;VAE with a more fun and realistic dataset - medium resolution and batchable  \n",
-       "[[VAE12] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/12-VAE-withCelebA-post.ipynb)  \n",
-       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Use of the previously trained model with CelebA, analysis of the results  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 7 : Variational AutoEncoder (VAE) with CelebA (medium res.)  \n",
+       "[[VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb)  \n",
+       "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 8 : Exploring latent space of our trained models  \n",
        "[[BASH1] - OAR batch script](VAE/batch-oar.sh)  \n",
        "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Bash script for OAR batch submission of a notebook  \n",
        "[[BASH2] - SLURM batch script](VAE/batch-slurm.sh)  \n",
@@ -142,13 +142,6 @@
      },
      "metadata": {},
      "output_type": "display_data"
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "The history saving thread hit an unexpected error (DatabaseError('database disk image is malformed')).History will not be written to the database.\n"
-     ]
     }
    ],
    "source": [
diff --git a/README.md b/README.md
index 9b97450..b87ec08 100644
--- a/README.md
+++ b/README.md
@@ -93,7 +93,7 @@ Useful information is also available in the [wiki](https://gricad-gitlab.univ-gr
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 6 : Variational AutoEncoder (VAE) with CelebA (small res.)  
 [[VAE7] - Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.ipynb)  
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 7 : Variational AutoEncoder (VAE) with CelebA (medium res.)  
-[[VAE12] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb)  
+[[VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb)  
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Episode 8 : Exploring latent space of our trained models  
 [[BASH1] - OAR batch script](VAE/batch-oar.sh)  
 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Bash script for OAR batch submission of a notebook  
diff --git a/VAE/07-VAE-with-CelebA-m.ipynb b/VAE/07-VAE-with-CelebA-m.ipynb
index 3943556..bfe64fd 100644
--- a/VAE/07-VAE-with-CelebA-m.ipynb
+++ b/VAE/07-VAE-with-CelebA-m.ipynb
@@ -179,7 +179,8 @@
     "## Step 5 - Train\n",
     "For 10 epochs, adam optimizer :  \n",
     "- Run time at IDRIS : 1299.77 sec. - 0:21:39\n",
-    "- Run time at GRICAD : 2092.77 sec. - 0:34:52"
+    "- Run time at GRICAD : 2092.77 sec. - 0:34:52\n",
+    "- Run time at IDRIS with medium resolution : Train duration : 6638.61 sec. - 1:50:38"
    ]
   },
   {
diff --git a/VAE/07-VAE-with-CelebA-m.nbconvert.ipynb b/VAE/07-VAE-with-CelebA-m.nbconvert-done.ipynb
similarity index 98%
rename from VAE/07-VAE-with-CelebA-m.nbconvert.ipynb
rename to VAE/07-VAE-with-CelebA-m.nbconvert-done.ipynb
index 13c392d..46f4b4e 100644
--- a/VAE/07-VAE-with-CelebA-m.nbconvert.ipynb
+++ b/VAE/07-VAE-with-CelebA-m.nbconvert-done.ipynb
@@ -262,7 +262,8 @@
     "## Step 5 - Train\n",
     "For 10 epochs, adam optimizer :  \n",
     "- Run time at IDRIS : 1299.77 sec. - 0:21:39\n",
-    "- Run time at GRICAD : 2092.77 sec. - 0:34:52"
+    "- Run time at GRICAD : 2092.77 sec. - 0:34:52\n",
+    "- At IDRIS with medium resolution : Train duration : 6638.61 sec. - 1:50:38"
    ]
   },
   {
diff --git a/VAE/08-VAE-withCelebA-post.ipynb b/VAE/08-VAE-withCelebA-post.ipynb
index 3ed0684..0f9c5a6 100644
--- a/VAE/08-VAE-withCelebA-post.ipynb
+++ b/VAE/08-VAE-withCelebA-post.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [VAE12] - Variational AutoEncoder (VAE) with CelebA - Analysis\n",
+    "# <!-- TITLE --> [VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis\n",
     "<!-- DESC --> Episode 8 : Exploring latent space of our trained models\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
-- 
GitLab