diff --git a/README.ipynb b/README.ipynb index c1ec1d289646b695e700f1c3bbbd8e99aff5a2c3..ba1081d3cc64cf9a6fa182fca4e3c047c5015efa 100644 --- a/README.ipynb +++ b/README.ipynb @@ -94,21 +94,21 @@ "[[SYNOP3] - Time series with RNN - 12h predictions](SYNOP/03-12h-predictions.ipynb) \n", " Episode 3: Attempt to predict in the longer term \n", "[[VAE1] - Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.ipynb) \n", - " First generative network experience with the MNIST dataset \n", + " Episode 1 : Model construction and Training \n", "[[VAE2] - Variational AutoEncoder (VAE) with MNIST - Analysis](VAE/02-VAE-with-MNIST-post.ipynb) \n", - " Use of the previously trained model, analysis of the results \n", + " Episode 2 : Exploring our latent space \n", "[[VAE3] - About the CelebA dataset](VAE/03-About-CelebA.ipynb) \n", - " New VAE experience, but with a larger and more fun dataset \n", + " Episode 3Â : About the CelebA dataset, a more fun dataset ! \n", "[[VAE4] - Preparation of the CelebA dataset](VAE/04-Prepare-CelebA-batch.ipynb) \n", - " Preparation of a clustered dataset, batchable \n", + " Episode 4Â : Preparation of a clustered dataset, batchable \n", "[[VAE5] - Checking the clustered CelebA dataset](VAE/05-Check-CelebA.ipynb) \n", - " Verification of prepared data from CelebA dataset \n", + " Episode 5Â :\\tChecking the clustered dataset \n", "[[VAE6] - Variational AutoEncoder (VAE) with CelebA (small)](VAE/06-VAE-with-CelebA-s.ipynb) \n", - " VAE with a more fun and realistic dataset - small resolution and batchable \n", + " Episode 6Â : Variational AutoEncoder (VAE) with CelebA (small res.) \n", "[[VAE7] - Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.ipynb) \n", - " VAE with a more fun and realistic dataset - medium resolution and batchable \n", - "[[VAE12] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/12-VAE-withCelebA-post.ipynb) \n", - " Use of the previously trained model with CelebA, analysis of the results \n", + " Episode 7Â : Variational AutoEncoder (VAE) with CelebA (medium res.) \n", + "[[VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb) \n", + " Episode 8Â : Exploring latent space of our trained models \n", "[[BASH1] - OAR batch script](VAE/batch-oar.sh) \n", " Bash script for OAR batch submission of a notebook \n", "[[BASH2] - SLURM batch script](VAE/batch-slurm.sh) \n", @@ -142,13 +142,6 @@ }, "metadata": {}, "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The history saving thread hit an unexpected error (DatabaseError('database disk image is malformed')).History will not be written to the database.\n" - ] } ], "source": [ diff --git a/README.md b/README.md index 9b97450162534925baf754cd8fefadb533455c2a..b87ec08fdf5a719ab3638a7f78950fb3c6e5d546 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ Useful information is also available in the [wiki](https://gricad-gitlab.univ-gr Episode 6Â : Variational AutoEncoder (VAE) with CelebA (small res.) [[VAE7] - Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.ipynb) Episode 7Â : Variational AutoEncoder (VAE) with CelebA (medium res.) -[[VAE12] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb) +[[VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb) Episode 8Â : Exploring latent space of our trained models [[BASH1] - OAR batch script](VAE/batch-oar.sh) Bash script for OAR batch submission of a notebook diff --git a/VAE/07-VAE-with-CelebA-m.ipynb b/VAE/07-VAE-with-CelebA-m.ipynb index 394355680c142600e2d2d7c2d942f4f49b570185..bfe64fd69b0e8a73da56f639effaf240c38517f6 100644 --- a/VAE/07-VAE-with-CelebA-m.ipynb +++ b/VAE/07-VAE-with-CelebA-m.ipynb @@ -179,7 +179,8 @@ "## Step 5 - Train\n", "For 10 epochs, adam optimizer : \n", "- Run time at IDRIS : 1299.77 sec. - 0:21:39\n", - "- Run time at GRICAD : 2092.77 sec. - 0:34:52" + "- Run time at GRICAD : 2092.77 sec. - 0:34:52\n", + "- Run time at IDRIS with medium resolution : Train duration : 6638.61 sec. - 1:50:38" ] }, { diff --git a/VAE/07-VAE-with-CelebA-m.nbconvert.ipynb b/VAE/07-VAE-with-CelebA-m.nbconvert-done.ipynb similarity index 98% rename from VAE/07-VAE-with-CelebA-m.nbconvert.ipynb rename to VAE/07-VAE-with-CelebA-m.nbconvert-done.ipynb index 13c392d4706e62967a83a3bfca894d816d21a256..46f4b4e41c372deff9b605b37b11c758ca21bcb6 100644 --- a/VAE/07-VAE-with-CelebA-m.nbconvert.ipynb +++ b/VAE/07-VAE-with-CelebA-m.nbconvert-done.ipynb @@ -262,7 +262,8 @@ "## Step 5 - Train\n", "For 10 epochs, adam optimizer : \n", "- Run time at IDRIS : 1299.77 sec. - 0:21:39\n", - "- Run time at GRICAD : 2092.77 sec. - 0:34:52" + "- Run time at GRICAD : 2092.77 sec. - 0:34:52\n", + "- At IDRIS with medium resolution : Train duration : 6638.61 sec. - 1:50:38" ] }, { diff --git a/VAE/08-VAE-withCelebA-post.ipynb b/VAE/08-VAE-withCelebA-post.ipynb index 3ed0684598d722a851281f53240542e242437011..0f9c5a6850db8b3f51679c0327ab80716e2f5703 100644 --- a/VAE/08-VAE-withCelebA-post.ipynb +++ b/VAE/08-VAE-withCelebA-post.ipynb @@ -6,7 +6,7 @@ "source": [ "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n", "\n", - "# <!-- TITLE --> [VAE12] - Variational AutoEncoder (VAE) with CelebA - Analysis\n", + "# <!-- TITLE --> [VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis\n", "<!-- DESC --> Episode 8Â : Exploring latent space of our trained models\n", "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n", "\n",