From bb4e6b0dde321ef16af0b376bad8a627e891ac39 Mon Sep 17 00:00:00 2001 From: Jean-Luc Parouty <Jean-Luc.Parouty@grenoble-inp.fr> Date: Sat, 12 Dec 2020 00:48:22 +0100 Subject: [PATCH] Rewrite index generator for README --- LinearReg/01-Linear-Regression.ipynb | 2 +- README.ipynb | 124 +++++--------- README.md | 115 +++++-------- fidle/Finished.ipynb | 152 +++++++++++++++++ fidle/Update_index.ipynb | 242 +++++++++++++++++++++++++++ fidle/config.py | 10 +- fidle/log/catalog_file.json | 226 +++++++++++++++++++++++++ 7 files changed, 712 insertions(+), 159 deletions(-) create mode 100644 fidle/Finished.ipynb create mode 100644 fidle/Update_index.ipynb create mode 100644 fidle/log/catalog_file.json diff --git a/LinearReg/01-Linear-Regression.ipynb b/LinearReg/01-Linear-Regression.ipynb index 5cc1809..f032d30 100644 --- a/LinearReg/01-Linear-Regression.ipynb +++ b/LinearReg/01-Linear-Regression.ipynb @@ -111,7 +111,7 @@ "sys.path.append('..')\n", "import fidle.pwk as pwk\n", "\n", - "datasets_dir = pwk.init('01-Linear-Regression')" + "datasets_dir = pwk.init('LINR1')" ] }, { diff --git a/README.ipynb b/README.ipynb index 9158125..9b86b75 100644 --- a/README.ipynb +++ b/README.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 7, "metadata": { "jupyter": { "source_hidden": true @@ -21,6 +21,7 @@ "<!-- Open the notebook: README.ipynb! -->\n", "<!-- --------------------------------------------------- -->\n", "\n", + "\n", "## A propos\n", "\n", "This repository contains all the documents and links of the **Fidle Training** . \n", @@ -35,7 +36,9 @@ "\n", "For more information, you can contact us at : \n", "[<img width=\"200px\" style=\"vertical-align:middle\" src=\"fidle/img/00-Mail_contact.svg\"></img>](#top) \n", - "Current Version : 0.5.9\n", + "Current Version : <!-- VERSION_BEGIN -->\n", + "0.6.0 DEV\n", + "<!-- VERSION_END -->\n", "\n", "\n", "## Course materials\n", @@ -48,83 +51,41 @@ "\n", "## Jupyter notebooks\n", "\n", - "<!-- DO NOT REMOVE THIS TAG !!! -->\n", - "<!-- INDEX -->\n", "<!-- INDEX_BEGIN -->\n", - "[[LINR1] - Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb) \n", - " Direct determination of linear regression \n", - "[[GRAD1] - Linear regression with gradient descent](LinearReg/02-Gradient-descent.ipynb) \n", - " An example of gradient descent in the simple case of a linear regression. \n", - "[[POLR1] - Complexity Syndrome](LinearReg/03-Polynomial-Regression.ipynb) \n", - " Illustration of the problem of complexity with the polynomial regression \n", - "[[LOGR1] - Logistic regression, in pure Tensorflow](LinearReg/04-Logistic-Regression.ipynb) \n", - " Logistic Regression with Mini-Batch Gradient Descent using pure TensorFlow. \n", - "[[PER57] - Perceptron Model 1957](IRIS/01-Simple-Perceptron.ipynb) \n", - " A simple perceptron, with the IRIS dataset. \n", - "[[BHP1] - Regression with a Dense Network (DNN)](BHPD/01-DNN-Regression.ipynb) \n", - " A Simple regression with a Dense Neural Network (DNN) - BHPD dataset \n", - "[[BHP2] - Regression with a Dense Network (DNN) - Advanced code](BHPD/02-DNN-Regression-Premium.ipynb) \n", - " More advanced example of DNN network code - BHPD dataset \n", - "[[MNIST1] - Simple classification with DNN](MNIST/01-DNN-MNIST.ipynb) \n", - " Example of classification with a fully connected neural network \n", - "[[GTS1] - CNN with GTSRB dataset - Data analysis and preparation](GTSRB/01-Preparation-of-data.ipynb) \n", - " Episode 1 : Data analysis and creation of a usable dataset \n", - "[[GTS2] - CNN with GTSRB dataset - First convolutions](GTSRB/02-First-convolutions.ipynb) \n", - " Episode 2 : First convolutions and first results \n", - "[[GTS3] - CNN with GTSRB dataset - Monitoring ](GTSRB/03-Tracking-and-visualizing.ipynb) \n", - " Episode 3 : Monitoring and analysing training, managing checkpoints \n", - "[[GTS4] - CNN with GTSRB dataset - Data augmentation ](GTSRB/04-Data-augmentation.ipynb) \n", - " Episode 4 : Improving the results with data augmentation \n", - "[[GTS5] - CNN with GTSRB dataset - Full convolutions ](GTSRB/05-Full-convolutions.ipynb) \n", - " Episode 5 : A lot of models, a lot of datasets and a lot of results. \n", - "[[GTS6] - CNN with GTSRB dataset - Full convolutions as a batch](GTSRB/06-Notebook-as-a-batch.ipynb) \n", - " Episode 6 : Run Full convolution notebook as a batch \n", - "[[GTS7] - CNN with GTSRB dataset - Show reports](GTSRB/07-Show-report.ipynb) \n", - " Episode 7 : Displaying the reports of the different jobs \n", - "[[TSB1] - Tensorboard with/from Jupyter ](GTSRB/99-Scripts-Tensorboard.ipynb) \n", - " 4 ways to use Tensorboard from the Jupyter environment \n", - "[[BASH1] - OAR batch script](GTSRB/batch_oar.sh) \n", - " Bash script for OAR batch submission of GTSRB notebook \n", - "[[BASH2] - SLURM batch script](GTSRB/batch_slurm.sh) \n", - " Bash script for SLURM batch submission of GTSRB notebooks \n", - "[[IMDB1] - Text embedding with IMDB](IMDB/01-Embedding-Keras.ipynb) \n", - " A very classical example of word embedding for text classification (sentiment analysis) \n", - "[[IMDB2] - Text embedding with IMDB - Reloaded](IMDB/02-Prediction.ipynb) \n", - " Example of reusing a previously saved model \n", - "[[IMDB3] - Text embedding/LSTM model with IMDB](IMDB/03-LSTM-Keras.ipynb) \n", - " Still the same problem, but with a network combining embedding and LSTM \n", - "[[SYNOP1] - Time series with RNN - Preparation of data](SYNOP/01-Preparation-of-data.ipynb) \n", - " Episode 1 : Data analysis and creation of a usable dataset \n", - "[[SYNOP2] - Time series with RNN - Try a prediction](SYNOP/02-First-predictions.ipynb) \n", - " Episode 2 : Training session and first predictions \n", - "[[SYNOP3] - Time series with RNN - 12h predictions](SYNOP/03-12h-predictions.ipynb) \n", - " Episode 3: Attempt to predict in the longer term \n", - "[[VAE1] - Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.ipynb) \n", - " Episode 1 : Model construction and Training \n", - "[[VAE1] - Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.nbconvert.ipynb) \n", - " Episode 1 : Model construction and Training \n", - "[[VAE2] - Variational AutoEncoder (VAE) with MNIST - Analysis](VAE/02-VAE-with-MNIST-post.ipynb) \n", - " Episode 2 : Exploring our latent space \n", - "[[VAE3] - About the CelebA dataset](VAE/03-About-CelebA.ipynb) \n", - " Episode 3Â : About the CelebA dataset, a more fun dataset ;-) \n", - "[[VAE4] - Preparation of the CelebA dataset](VAE/04-Prepare-CelebA-datasets.ipynb) \n", - " Episode 4Â : Preparation of a clustered dataset, batchable \n", - "[[VAE5] - Checking the clustered CelebA dataset](VAE/05-Check-CelebA.ipynb) \n", - " Episode 5Â :\\tChecking the clustered dataset \n", - "[[VAE6] - Variational AutoEncoder (VAE) with CelebA (small)](VAE/06-VAE-with-CelebA-s.ipynb) \n", - " Episode 6Â : Variational AutoEncoder (VAE) with CelebA (small res.) \n", - "[[VAE7] - Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.ipynb) \n", - " Episode 7Â : Variational AutoEncoder (VAE) with CelebA (medium res.) \n", - "[[VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb) \n", - " Episode 8Â : Exploring latent space of our trained models \n", - "[[BASH1] - OAR batch script](VAE/batch_oar.sh) \n", - " Bash script for OAR batch submission of VAE notebook \n", - "[[BASH2] - SLURM batch script](VAE/batch_slurm.sh) \n", - " Bash script for SLURM batch submission of VAE notebooks \n", - "[[ACTF1] - Activation functions](Misc/Activation-Functions.ipynb) \n", - " Some activation functions, with their derivatives. \n", - "[[NP1] - A short introduction to Numpy](Misc/Numpy.ipynb) \n", - " Numpy is an essential tool for the Scientific Python. \n", + "| | |\n", + "|--|--|\n", + "|LINR1| [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)<br>Direct determination of linear regression |\n", + "|GRAD1| [Linear regression with gradient descent](LinearReg/02-Gradient-descent.ipynb)<br>An example of gradient descent in the simple case of a linear regression.|\n", + "|POLR1| [Complexity Syndrome](LinearReg/03-Polynomial-Regression.ipynb)<br>Illustration of the problem of complexity with the polynomial regression|\n", + "|LOGR1| [Logistic regression, in pure Tensorflow](LinearReg/04-Logistic-Regression.ipynb)<br>Logistic Regression with Mini-Batch Gradient Descent using pure TensorFlow. |\n", + "|PER57| [Perceptron Model 1957](IRIS/01-Simple-Perceptron.ipynb)<br>A simple perceptron, with the IRIS dataset.|\n", + "|BHP1| [Regression with a Dense Network (DNN)](BHPD/01-DNN-Regression.ipynb)<br>A Simple regression with a Dense Neural Network (DNN) - BHPD dataset|\n", + "|BHP2| [Regression with a Dense Network (DNN) - Advanced code](BHPD/02-DNN-Regression-Premium.ipynb)<br>More advanced example of DNN network code - BHPD dataset|\n", + "|MNIST1| [Simple classification with DNN](MNIST/01-DNN-MNIST.ipynb)<br>Example of classification with a fully connected neural network|\n", + "|GTS1| [CNN with GTSRB dataset - Data analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)<br>Episode 1 : Data analysis and creation of a usable dataset|\n", + "|GTS2| [CNN with GTSRB dataset - First convolutions](GTSRB/02-First-convolutions.ipynb)<br>Episode 2 : First convolutions and first results|\n", + "|GTS3| [CNN with GTSRB dataset - Monitoring ](GTSRB/03-Tracking-and-visualizing.ipynb)<br>Episode 3 : Monitoring and analysing training, managing checkpoints|\n", + "|GTS4| [CNN with GTSRB dataset - Data augmentation ](GTSRB/04-Data-augmentation.ipynb)<br>Episode 4 : Improving the results with data augmentation|\n", + "|GTS5| [CNN with GTSRB dataset - Full convolutions ](GTSRB/05-Full-convolutions.ipynb)<br>Episode 5 : A lot of models, a lot of datasets and a lot of results.|\n", + "|GTS6| [CNN with GTSRB dataset - Full convolutions as a batch](GTSRB/06-Notebook-as-a-batch.ipynb)<br>Episode 6 : Run Full convolution notebook as a batch|\n", + "|GTS7| [CNN with GTSRB dataset - Show reports](GTSRB/07-Show-report.ipynb)<br>Episode 7 : Displaying the reports of the different jobs|\n", + "|TSB1| [Tensorboard with/from Jupyter ](GTSRB/99-Scripts-Tensorboard.ipynb)<br>4 ways to use Tensorboard from the Jupyter environment|\n", + "|IMDB1| [Text embedding with IMDB](IMDB/01-Embedding-Keras.ipynb)<br>A very classical example of word embedding for text classification (sentiment analysis)|\n", + "|IMDB2| [Text embedding with IMDB - Reloaded](IMDB/02-Prediction.ipynb)<br>Example of reusing a previously saved model|\n", + "|IMDB3| [Text embedding/LSTM model with IMDB](IMDB/03-LSTM-Keras.ipynb)<br>Still the same problem, but with a network combining embedding and LSTM|\n", + "|SYNOP1| [Time series with RNN - Preparation of data](SYNOP/01-Preparation-of-data.ipynb)<br>Episode 1 : Data analysis and creation of a usable dataset|\n", + "|SYNOP2| [Time series with RNN - Try a prediction](SYNOP/02-First-predictions.ipynb)<br>Episode 2 : Training session and first predictions|\n", + "|SYNOP3| [Time series with RNN - 12h predictions](SYNOP/03-12h-predictions.ipynb)<br>Episode 3: Attempt to predict in the longer term |\n", + "|VAE1| [Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.nbconvert.ipynb)<br>Episode 1 : Model construction and Training|\n", + "|VAE2| [Variational AutoEncoder (VAE) with MNIST - Analysis](VAE/02-VAE-with-MNIST-post.ipynb)<br>Episode 2 : Exploring our latent space|\n", + "|VAE3| [About the CelebA dataset](VAE/03-About-CelebA.ipynb)<br>Episode 3Â : About the CelebA dataset, a more fun dataset ;-)|\n", + "|VAE4| [Preparation of the CelebA dataset](VAE/04-Prepare-CelebA-datasets.ipynb)<br>Episode 4Â : Preparation of a clustered dataset, batchable|\n", + "|VAE5| [Checking the clustered CelebA dataset](VAE/05-Check-CelebA.ipynb)<br>Episode 5Â :\tChecking the clustered dataset|\n", + "|VAE6| [Variational AutoEncoder (VAE) with CelebA (small)](VAE/06-VAE-with-CelebA-s.nbconvert.ipynb)<br>Episode 6Â : Variational AutoEncoder (VAE) with CelebA (small res.)|\n", + "|VAE7| [Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.nbconvert.ipynb)<br>Episode 7Â : Variational AutoEncoder (VAE) with CelebA (medium res.)|\n", + "|VAE8| [Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb)<br>Episode 8Â : Exploring latent space of our trained models|\n", + "|ACTF1| [Activation functions](Misc/Activation-Functions.ipynb)<br>Some activation functions, with their derivatives.|\n", + "|NP1| [A short introduction to Numpy](Misc/Numpy.ipynb)<br>Numpy is an essential tool for the Scientific Python.|\n", "<!-- INDEX_END -->\n", "\n", "\n", @@ -158,6 +119,13 @@ "#\n", "# This README is visible under Jupiter LAb ! :-)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/README.md b/README.md index b14c76a..91016f9 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ <!-- Open the notebook: README.ipynb! --> <!-- --------------------------------------------------- --> + ## A propos This repository contains all the documents and links of the **Fidle Training** . @@ -21,7 +22,9 @@ The objectives of this training are : For more information, you can contact us at : [<img width="200px" style="vertical-align:middle" src="fidle/img/00-Mail_contact.svg"></img>](#top) -Current Version : 0.5.9 +Current Version : <!-- VERSION_BEGIN --> +0.6.0 DEV +<!-- VERSION_END --> ## Course materials @@ -34,83 +37,41 @@ Some other useful informations are also available in the [wiki](https://gricad-g ## Jupyter notebooks -<!-- DO NOT REMOVE THIS TAG !!! --> -<!-- INDEX --> <!-- INDEX_BEGIN --> -[[LINR1] - Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb) - Direct determination of linear regression -[[GRAD1] - Linear regression with gradient descent](LinearReg/02-Gradient-descent.ipynb) - An example of gradient descent in the simple case of a linear regression. -[[POLR1] - Complexity Syndrome](LinearReg/03-Polynomial-Regression.ipynb) - Illustration of the problem of complexity with the polynomial regression -[[LOGR1] - Logistic regression, in pure Tensorflow](LinearReg/04-Logistic-Regression.ipynb) - Logistic Regression with Mini-Batch Gradient Descent using pure TensorFlow. -[[PER57] - Perceptron Model 1957](IRIS/01-Simple-Perceptron.ipynb) - A simple perceptron, with the IRIS dataset. -[[BHP1] - Regression with a Dense Network (DNN)](BHPD/01-DNN-Regression.ipynb) - A Simple regression with a Dense Neural Network (DNN) - BHPD dataset -[[BHP2] - Regression with a Dense Network (DNN) - Advanced code](BHPD/02-DNN-Regression-Premium.ipynb) - More advanced example of DNN network code - BHPD dataset -[[MNIST1] - Simple classification with DNN](MNIST/01-DNN-MNIST.ipynb) - Example of classification with a fully connected neural network -[[GTS1] - CNN with GTSRB dataset - Data analysis and preparation](GTSRB/01-Preparation-of-data.ipynb) - Episode 1 : Data analysis and creation of a usable dataset -[[GTS2] - CNN with GTSRB dataset - First convolutions](GTSRB/02-First-convolutions.ipynb) - Episode 2 : First convolutions and first results -[[GTS3] - CNN with GTSRB dataset - Monitoring ](GTSRB/03-Tracking-and-visualizing.ipynb) - Episode 3 : Monitoring and analysing training, managing checkpoints -[[GTS4] - CNN with GTSRB dataset - Data augmentation ](GTSRB/04-Data-augmentation.ipynb) - Episode 4 : Improving the results with data augmentation -[[GTS5] - CNN with GTSRB dataset - Full convolutions ](GTSRB/05-Full-convolutions.ipynb) - Episode 5 : A lot of models, a lot of datasets and a lot of results. -[[GTS6] - CNN with GTSRB dataset - Full convolutions as a batch](GTSRB/06-Notebook-as-a-batch.ipynb) - Episode 6 : Run Full convolution notebook as a batch -[[GTS7] - CNN with GTSRB dataset - Show reports](GTSRB/07-Show-report.ipynb) - Episode 7 : Displaying the reports of the different jobs -[[TSB1] - Tensorboard with/from Jupyter ](GTSRB/99-Scripts-Tensorboard.ipynb) - 4 ways to use Tensorboard from the Jupyter environment -[[BASH1] - OAR batch script](GTSRB/batch_oar.sh) - Bash script for OAR batch submission of GTSRB notebook -[[BASH2] - SLURM batch script](GTSRB/batch_slurm.sh) - Bash script for SLURM batch submission of GTSRB notebooks -[[IMDB1] - Text embedding with IMDB](IMDB/01-Embedding-Keras.ipynb) - A very classical example of word embedding for text classification (sentiment analysis) -[[IMDB2] - Text embedding with IMDB - Reloaded](IMDB/02-Prediction.ipynb) - Example of reusing a previously saved model -[[IMDB3] - Text embedding/LSTM model with IMDB](IMDB/03-LSTM-Keras.ipynb) - Still the same problem, but with a network combining embedding and LSTM -[[SYNOP1] - Time series with RNN - Preparation of data](SYNOP/01-Preparation-of-data.ipynb) - Episode 1 : Data analysis and creation of a usable dataset -[[SYNOP2] - Time series with RNN - Try a prediction](SYNOP/02-First-predictions.ipynb) - Episode 2 : Training session and first predictions -[[SYNOP3] - Time series with RNN - 12h predictions](SYNOP/03-12h-predictions.ipynb) - Episode 3: Attempt to predict in the longer term -[[VAE1] - Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.ipynb) - Episode 1 : Model construction and Training -[[VAE1] - Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.nbconvert.ipynb) - Episode 1 : Model construction and Training -[[VAE2] - Variational AutoEncoder (VAE) with MNIST - Analysis](VAE/02-VAE-with-MNIST-post.ipynb) - Episode 2 : Exploring our latent space -[[VAE3] - About the CelebA dataset](VAE/03-About-CelebA.ipynb) - Episode 3Â : About the CelebA dataset, a more fun dataset ;-) -[[VAE4] - Preparation of the CelebA dataset](VAE/04-Prepare-CelebA-datasets.ipynb) - Episode 4Â : Preparation of a clustered dataset, batchable -[[VAE5] - Checking the clustered CelebA dataset](VAE/05-Check-CelebA.ipynb) - Episode 5Â :\tChecking the clustered dataset -[[VAE6] - Variational AutoEncoder (VAE) with CelebA (small)](VAE/06-VAE-with-CelebA-s.ipynb) - Episode 6Â : Variational AutoEncoder (VAE) with CelebA (small res.) -[[VAE7] - Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.ipynb) - Episode 7Â : Variational AutoEncoder (VAE) with CelebA (medium res.) -[[VAE8] - Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb) - Episode 8Â : Exploring latent space of our trained models -[[BASH1] - OAR batch script](VAE/batch_oar.sh) - Bash script for OAR batch submission of VAE notebook -[[BASH2] - SLURM batch script](VAE/batch_slurm.sh) - Bash script for SLURM batch submission of VAE notebooks -[[ACTF1] - Activation functions](Misc/Activation-Functions.ipynb) - Some activation functions, with their derivatives. -[[NP1] - A short introduction to Numpy](Misc/Numpy.ipynb) - Numpy is an essential tool for the Scientific Python. +| | | +|--|--| +|LINR1| [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)<br>Direct determination of linear regression | +|GRAD1| [Linear regression with gradient descent](LinearReg/02-Gradient-descent.ipynb)<br>An example of gradient descent in the simple case of a linear regression.| +|POLR1| [Complexity Syndrome](LinearReg/03-Polynomial-Regression.ipynb)<br>Illustration of the problem of complexity with the polynomial regression| +|LOGR1| [Logistic regression, in pure Tensorflow](LinearReg/04-Logistic-Regression.ipynb)<br>Logistic Regression with Mini-Batch Gradient Descent using pure TensorFlow. | +|PER57| [Perceptron Model 1957](IRIS/01-Simple-Perceptron.ipynb)<br>A simple perceptron, with the IRIS dataset.| +|BHP1| [Regression with a Dense Network (DNN)](BHPD/01-DNN-Regression.ipynb)<br>A Simple regression with a Dense Neural Network (DNN) - BHPD dataset| +|BHP2| [Regression with a Dense Network (DNN) - Advanced code](BHPD/02-DNN-Regression-Premium.ipynb)<br>More advanced example of DNN network code - BHPD dataset| +|MNIST1| [Simple classification with DNN](MNIST/01-DNN-MNIST.ipynb)<br>Example of classification with a fully connected neural network| +|GTS1| [CNN with GTSRB dataset - Data analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)<br>Episode 1 : Data analysis and creation of a usable dataset| +|GTS2| [CNN with GTSRB dataset - First convolutions](GTSRB/02-First-convolutions.ipynb)<br>Episode 2 : First convolutions and first results| +|GTS3| [CNN with GTSRB dataset - Monitoring ](GTSRB/03-Tracking-and-visualizing.ipynb)<br>Episode 3 : Monitoring and analysing training, managing checkpoints| +|GTS4| [CNN with GTSRB dataset - Data augmentation ](GTSRB/04-Data-augmentation.ipynb)<br>Episode 4 : Improving the results with data augmentation| +|GTS5| [CNN with GTSRB dataset - Full convolutions ](GTSRB/05-Full-convolutions.ipynb)<br>Episode 5 : A lot of models, a lot of datasets and a lot of results.| +|GTS6| [CNN with GTSRB dataset - Full convolutions as a batch](GTSRB/06-Notebook-as-a-batch.ipynb)<br>Episode 6 : Run Full convolution notebook as a batch| +|GTS7| [CNN with GTSRB dataset - Show reports](GTSRB/07-Show-report.ipynb)<br>Episode 7 : Displaying the reports of the different jobs| +|TSB1| [Tensorboard with/from Jupyter ](GTSRB/99-Scripts-Tensorboard.ipynb)<br>4 ways to use Tensorboard from the Jupyter environment| +|IMDB1| [Text embedding with IMDB](IMDB/01-Embedding-Keras.ipynb)<br>A very classical example of word embedding for text classification (sentiment analysis)| +|IMDB2| [Text embedding with IMDB - Reloaded](IMDB/02-Prediction.ipynb)<br>Example of reusing a previously saved model| +|IMDB3| [Text embedding/LSTM model with IMDB](IMDB/03-LSTM-Keras.ipynb)<br>Still the same problem, but with a network combining embedding and LSTM| +|SYNOP1| [Time series with RNN - Preparation of data](SYNOP/01-Preparation-of-data.ipynb)<br>Episode 1 : Data analysis and creation of a usable dataset| +|SYNOP2| [Time series with RNN - Try a prediction](SYNOP/02-First-predictions.ipynb)<br>Episode 2 : Training session and first predictions| +|SYNOP3| [Time series with RNN - 12h predictions](SYNOP/03-12h-predictions.ipynb)<br>Episode 3: Attempt to predict in the longer term | +|VAE1| [Variational AutoEncoder (VAE) with MNIST](VAE/01-VAE-with-MNIST.nbconvert.ipynb)<br>Episode 1 : Model construction and Training| +|VAE2| [Variational AutoEncoder (VAE) with MNIST - Analysis](VAE/02-VAE-with-MNIST-post.ipynb)<br>Episode 2 : Exploring our latent space| +|VAE3| [About the CelebA dataset](VAE/03-About-CelebA.ipynb)<br>Episode 3Â : About the CelebA dataset, a more fun dataset ;-)| +|VAE4| [Preparation of the CelebA dataset](VAE/04-Prepare-CelebA-datasets.ipynb)<br>Episode 4Â : Preparation of a clustered dataset, batchable| +|VAE5| [Checking the clustered CelebA dataset](VAE/05-Check-CelebA.ipynb)<br>Episode 5Â : Checking the clustered dataset| +|VAE6| [Variational AutoEncoder (VAE) with CelebA (small)](VAE/06-VAE-with-CelebA-s.nbconvert.ipynb)<br>Episode 6Â : Variational AutoEncoder (VAE) with CelebA (small res.)| +|VAE7| [Variational AutoEncoder (VAE) with CelebA (medium)](VAE/07-VAE-with-CelebA-m.nbconvert.ipynb)<br>Episode 7Â : Variational AutoEncoder (VAE) with CelebA (medium res.)| +|VAE8| [Variational AutoEncoder (VAE) with CelebA - Analysis](VAE/08-VAE-withCelebA-post.ipynb)<br>Episode 8Â : Exploring latent space of our trained models| +|ACTF1| [Activation functions](Misc/Activation-Functions.ipynb)<br>Some activation functions, with their derivatives.| +|NP1| [A short introduction to Numpy](Misc/Numpy.ipynb)<br>Numpy is an essential tool for the Scientific Python.| <!-- INDEX_END --> diff --git a/fidle/Finished.ipynb b/fidle/Finished.ipynb new file mode 100644 index 0000000..8faeb77 --- /dev/null +++ b/fidle/Finished.ipynb @@ -0,0 +1,152 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n", + "\n", + "\n", + "## Notebook Performance Statistics (Continuous integration)\n", + " - List of executed notebooks \n", + " - Runtime and status \n", + "\n", + "Each notebook indicates its start and end of execution:\n", + " - at the beginning, during `pwk.init()`\n", + " - at the end, with a `pwk.end()`\n", + "This information is saved in a json file (FINISHED_FILE, cf config.py)." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import os,sys,glob,json\n", + "from pathlib import Path\n", + "import pandas as pd\n", + "from IPython.display import display, Markdown, HTML\n", + "\n", + "sys.path.append('..')\n", + "import fidle.pwk as pwk\n", + "import fidle.config as config" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "<table border=\"1\" class=\"dataframe\">\n", + " <thead>\n", + " <tr style=\"text-align: right;\">\n", + " <th></th>\n", + " <th>Notebook</th>\n", + " <th>path</th>\n", + " <th>start</th>\n", + " <th>end</th>\n", + " <th>duration</th>\n", + " </tr>\n", + " </thead>\n", + " <tbody>\n", + " <tr>\n", + " <th>0</th>\n", + " <td><a href=\"/home/pjluc/dev/fidle/LinearReg/01-Linear-Regression.ipynb\">01-Linear-Regression</a></td>\n", + " <td>/home/pjluc/dev/fidle/LinearReg</td>\n", + " <td>Wednesday 9 December 2020, 20:16:41</td>\n", + " <td>Wednesday 9 December 2020, 20:16:41</td>\n", + " <td>00:00:00 256ms</td>\n", + " </tr>\n", + " <tr>\n", + " <th>1</th>\n", + " <td><a href=\"/home/pjluc/dev/fidle/LinearReg/02-Gradient-descent.ipynb\">02-Gradient-descent</a></td>\n", + " <td>/home/pjluc/dev/fidle/LinearReg</td>\n", + " <td>Wednesday 9 December 2020, 20:16:56</td>\n", + " <td>Wednesday 9 December 2020, 20:17:00</td>\n", + " <td>00:00:03 279ms</td>\n", + " </tr>\n", + " <tr>\n", + " <th>2</th>\n", + " <td><a href=\"/home/pjluc/dev/fidle/LinearReg/03-Polynomial-Regression.ipynb\">03-Polynomial-Regression</a></td>\n", + " <td>/home/pjluc/dev/fidle/LinearReg</td>\n", + " <td>Wednesday 9 December 2020, 20:20:32</td>\n", + " <td>Wednesday 9 December 2020, 20:20:33</td>\n", + " <td>00:00:01 682ms</td>\n", + " </tr>\n", + " </tbody>\n", + "</table>" + ], + "text/plain": [ + "<IPython.core.display.HTML object>" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "with open(config.FINISHED_FILE) as infile:\n", + " dict_finished = json.load( infile )\n", + "\n", + "df=pd.DataFrame(dict_finished).transpose()\n", + "\n", + "df.reset_index(inplace=True)\n", + "df.rename(columns = {'index':'Notebook'}, inplace=True)\n", + "\n", + "# df['Notebook'] = '<a href=\"{}/{}.ipynb\">{}</a>'.format(df['path'],df['Notebook'],df['Notebook'])\n", + "\n", + "df['Notebook'] = df.apply(\n", + " lambda x: '<a href=\"{}/{}.ipynb\">{}</a>'.format(x['path'],x['Notebook'],x['Notebook']), axis=1\n", + " )\n", + " \n", + "\n", + "\n", + "HTML(df.to_html(\n", + " render_links=True,\n", + " escape=False))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "<img width=\"80px\" src=\"../fidle/img/00-Fidle-logo-01.svg\"></img>" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/fidle/Update_index.ipynb b/fidle/Update_index.ipynb new file mode 100644 index 0000000..a53d231 --- /dev/null +++ b/fidle/Update_index.ipynb @@ -0,0 +1,242 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import nbformat\n", + "import re\n", + "import sys, os, glob\n", + "import json\n", + "from collections import OrderedDict\n", + "\n", + "sys.path.append('..')\n", + "# import fidle.pwk as pwk\n", + "import fidle.config as config\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "directories_to_index = ['LinearReg', 'IRIS', 'BHPD', 'MNIST', 'GTSRB', 'IMDB', 'SYNOP', 'VAE', 'Misc']" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "def get_notebooks(directories, top_dir='..'):\n", + " '''\n", + " Return a list of notebooks from a given list of directories\n", + " args:\n", + " directories : list of directories\n", + " top_dir : location of theses directories\n", + " return:\n", + " notebooks : notebooks filename list (without top_dir prefix)\n", + " '''\n", + " notebooks = []\n", + " \n", + " for d in directories:\n", + " filenames = glob.glob( f'{top_dir}/{d}/*.ipynb')\n", + " filenames.sort()\n", + " notebooks.extend(filenames)\n", + "\n", + " notebooks = [ x.replace(f'{top_dir}/','') for x in notebooks]\n", + " return notebooks\n", + "\n", + "\n", + "def get_infos(filename, top_dir='..'):\n", + " '''\n", + " Extract informations from a fidle notebook.\n", + " Informations are dirname, basename, id, title, description and are extracted from comments tags in markdown.\n", + " args:\n", + " filename : Notebook filename\n", + " return:\n", + " dict : with infos.\n", + " '''\n", + "\n", + " about={}\n", + " about['dirname'] = os.path.dirname(filename)\n", + " about['basename'] = os.path.basename(filename)\n", + " about['id'] = '??'\n", + " about['title'] = '??'\n", + " about['description'] = '??'\n", + " \n", + " # ---- Read notebook\n", + " #\n", + " notebook = nbformat.read(f'{top_dir}/{filename}', nbformat.NO_CONVERT)\n", + " \n", + " # ---- Get id, title and desc tags\n", + " #\n", + " for cell in notebook.cells:\n", + "\n", + " if cell['cell_type'] == 'markdown':\n", + "\n", + " find = re.findall(r'<\\!-- TITLE -->\\s*\\[(.*)\\]\\s*-\\s*(.*)\\n',cell.source)\n", + " if find:\n", + " about['id'] = find[0][0]\n", + " about['title'] = find[0][1]\n", + "\n", + " find = re.findall(r'<\\!-- DESC -->\\s*(.*)\\n',cell.source)\n", + " if find:\n", + " about['description'] = find[0]\n", + "\n", + " return about\n", + " \n", + "\n", + "def get_catalog(notebooks_list, top_dir='..'):\n", + " '''\n", + " Return an OrderedDict of notebooks attributes.\n", + " Keys are notebooks id.\n", + " args:\n", + " notebooks_list : list of notebooks filenames\n", + " top_dir : Location of theses notebooks\n", + " return:\n", + " OrderedDict : {<notebook id> : { description} }\n", + " '''\n", + " \n", + " catalog = OrderedDict()\n", + "\n", + " for nb in notebooks_list:\n", + " about = get_infos(nb, top_dir='..')\n", + " id=about['id']\n", + " catalog[id] = about\n", + "\n", + " return catalog\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "README.md is updated.\n" + ] + } + ], + "source": [ + "\n", + "# ---- Get the notebook list\n", + "#\n", + "notebooks_list = get_notebooks(directories_to_index)\n", + "\n", + "# ---- Get a detailled catalog for this list\n", + "#\n", + "catalog = get_catalog(notebooks_list)\n", + "\n", + "with open(config.CATALOG_FILE,'wt') as fp:\n", + " json.dump(catalog,fp,indent=4)\n", + "\n", + "# ---- Create a markdown index\n", + "#\n", + "lines=['| | |','|--|--|']\n", + "tab=' '*5\n", + "for id, about in catalog.items():\n", + " id = about['id']\n", + " dirname = about['dirname']\n", + " basename = about['basename']\n", + " title = about['title']\n", + " description = about['description']\n", + " \n", + "# lines.append( f'[[{id}] - {title}]({dirname}/{basename}) ' )\n", + "# lines.append( f'{tab}{description} ')\n", + " lines.append( f'|{id}| [{title}]({dirname}/{basename})<br>{description}|')\n", + "\n", + "index = '\\n'.join(lines)\n", + " \n", + "# ---- Load README.md\n", + "#\n", + "with open('../README.md','r') as fp:\n", + " readme=fp.read()\n", + " \n", + "# ---- Update index\n", + "#\n", + "debut = '<!-- INDEX_BEGIN -->'\n", + "fin = '<!-- INDEX_END -->'\n", + "\n", + "readme = re.sub(f'{debut}.*{fin}',f'{debut}\\n{index}\\n{fin}',readme, flags=re.DOTALL)\n", + "\n", + "# ---- Update version\n", + "#\n", + "debut = '<!-- VERSION_BEGIN -->'\n", + "fin = '<!-- VERSION_END -->'\n", + "\n", + "readme = re.sub(f'{debut}.*{fin}',f'{debut}\\n{config.VERSION}\\n{fin}',readme, flags=re.DOTALL)\n", + "\n", + "# ---- Save it\n", + "#\n", + "with open('../README.md','wt') as fp:\n", + " fp.write(readme)\n", + "\n", + "print('README.md is updated.')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'nbf' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m<ipython-input-9-85936a69f607>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0mnbformat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv4\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnew_markdown_cell\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mreadme\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'../test.ipynb'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'w'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mnbf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnnb\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mNameError\u001b[0m: name 'nbf' is not defined" + ] + } + ], + "source": [ + "nnb = nbformat.v4.new_notebook()\n", + "nbformat.v4.new_markdown_cell(source=readme)\n", + "with open('../test.ipynb', 'w') as f:\n", + " nbf.write(nnb, f)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/fidle/config.py b/fidle/config.py index 4181215..425ddf9 100644 --- a/fidle/config.py +++ b/fidle/config.py @@ -12,9 +12,9 @@ # Jean-Luc Parouty 2020 -# ---- Current version --------------------------------------------- +# ---- Version ----------------------------------------------------- # -VERSION = '0.7 DEV' +VERSION = '0.6.0 DEV' # ---- Default notebook name --------------------------------------- # @@ -27,4 +27,8 @@ FIDLE_CSSFILE = '../fidle/css/custom.css' # ---- Done file, to keep track of finished notebooks -------------- # -FINISHED_FILE = '../fidle/log/finished_file.json' \ No newline at end of file +FINISHED_FILE = '../fidle/log/finished_file.json' + +# ---- Catalog file, a json description of all notebooks +# +CATALOG_FILE = '../fidle/log/catalog_file.json' \ No newline at end of file diff --git a/fidle/log/catalog_file.json b/fidle/log/catalog_file.json new file mode 100644 index 0000000..8a6b68e --- /dev/null +++ b/fidle/log/catalog_file.json @@ -0,0 +1,226 @@ +{ + "LINR1": { + "dirname": "LinearReg", + "basename": "01-Linear-Regression.ipynb", + "id": "LINR1", + "title": "Linear regression with direct resolution", + "description": "Direct determination of linear regression " + }, + "GRAD1": { + "dirname": "LinearReg", + "basename": "02-Gradient-descent.ipynb", + "id": "GRAD1", + "title": "Linear regression with gradient descent", + "description": "An example of gradient descent in the simple case of a linear regression." + }, + "POLR1": { + "dirname": "LinearReg", + "basename": "03-Polynomial-Regression.ipynb", + "id": "POLR1", + "title": "Complexity Syndrome", + "description": "Illustration of the problem of complexity with the polynomial regression" + }, + "LOGR1": { + "dirname": "LinearReg", + "basename": "04-Logistic-Regression.ipynb", + "id": "LOGR1", + "title": "Logistic regression, in pure Tensorflow", + "description": "Logistic Regression with Mini-Batch Gradient Descent using pure TensorFlow. " + }, + "PER57": { + "dirname": "IRIS", + "basename": "01-Simple-Perceptron.ipynb", + "id": "PER57", + "title": "Perceptron Model 1957", + "description": "A simple perceptron, with the IRIS dataset." + }, + "BHP1": { + "dirname": "BHPD", + "basename": "01-DNN-Regression.ipynb", + "id": "BHP1", + "title": "Regression with a Dense Network (DNN)", + "description": "A Simple regression with a Dense Neural Network (DNN) - BHPD dataset" + }, + "BHP2": { + "dirname": "BHPD", + "basename": "02-DNN-Regression-Premium.ipynb", + "id": "BHP2", + "title": "Regression with a Dense Network (DNN) - Advanced code", + "description": "More advanced example of DNN network code - BHPD dataset" + }, + "MNIST1": { + "dirname": "MNIST", + "basename": "01-DNN-MNIST.ipynb", + "id": "MNIST1", + "title": "Simple classification with DNN", + "description": "Example of classification with a fully connected neural network" + }, + "GTS1": { + "dirname": "GTSRB", + "basename": "01-Preparation-of-data.ipynb", + "id": "GTS1", + "title": "CNN with GTSRB dataset - Data analysis and preparation", + "description": "Episode 1 : Data analysis and creation of a usable dataset" + }, + "GTS2": { + "dirname": "GTSRB", + "basename": "02-First-convolutions.ipynb", + "id": "GTS2", + "title": "CNN with GTSRB dataset - First convolutions", + "description": "Episode 2 : First convolutions and first results" + }, + "GTS3": { + "dirname": "GTSRB", + "basename": "03-Tracking-and-visualizing.ipynb", + "id": "GTS3", + "title": "CNN with GTSRB dataset - Monitoring ", + "description": "Episode 3 : Monitoring and analysing training, managing checkpoints" + }, + "GTS4": { + "dirname": "GTSRB", + "basename": "04-Data-augmentation.ipynb", + "id": "GTS4", + "title": "CNN with GTSRB dataset - Data augmentation ", + "description": "Episode 4 : Improving the results with data augmentation" + }, + "GTS5": { + "dirname": "GTSRB", + "basename": "05-Full-convolutions.ipynb", + "id": "GTS5", + "title": "CNN with GTSRB dataset - Full convolutions ", + "description": "Episode 5 : A lot of models, a lot of datasets and a lot of results." + }, + "GTS6": { + "dirname": "GTSRB", + "basename": "06-Notebook-as-a-batch.ipynb", + "id": "GTS6", + "title": "CNN with GTSRB dataset - Full convolutions as a batch", + "description": "Episode 6 : Run Full convolution notebook as a batch" + }, + "GTS7": { + "dirname": "GTSRB", + "basename": "07-Show-report.ipynb", + "id": "GTS7", + "title": "CNN with GTSRB dataset - Show reports", + "description": "Episode 7 : Displaying the reports of the different jobs" + }, + "TSB1": { + "dirname": "GTSRB", + "basename": "99-Scripts-Tensorboard.ipynb", + "id": "TSB1", + "title": "Tensorboard with/from Jupyter ", + "description": "4 ways to use Tensorboard from the Jupyter environment" + }, + "IMDB1": { + "dirname": "IMDB", + "basename": "01-Embedding-Keras.ipynb", + "id": "IMDB1", + "title": "Text embedding with IMDB", + "description": "A very classical example of word embedding for text classification (sentiment analysis)" + }, + "IMDB2": { + "dirname": "IMDB", + "basename": "02-Prediction.ipynb", + "id": "IMDB2", + "title": "Text embedding with IMDB - Reloaded", + "description": "Example of reusing a previously saved model" + }, + "IMDB3": { + "dirname": "IMDB", + "basename": "03-LSTM-Keras.ipynb", + "id": "IMDB3", + "title": "Text embedding/LSTM model with IMDB", + "description": "Still the same problem, but with a network combining embedding and LSTM" + }, + "SYNOP1": { + "dirname": "SYNOP", + "basename": "01-Preparation-of-data.ipynb", + "id": "SYNOP1", + "title": "Time series with RNN - Preparation of data", + "description": "Episode 1 : Data analysis and creation of a usable dataset" + }, + "SYNOP2": { + "dirname": "SYNOP", + "basename": "02-First-predictions.ipynb", + "id": "SYNOP2", + "title": "Time series with RNN - Try a prediction", + "description": "Episode 2 : Training session and first predictions" + }, + "SYNOP3": { + "dirname": "SYNOP", + "basename": "03-12h-predictions.ipynb", + "id": "SYNOP3", + "title": "Time series with RNN - 12h predictions", + "description": "Episode 3: Attempt to predict in the longer term " + }, + "VAE1": { + "dirname": "VAE", + "basename": "01-VAE-with-MNIST.nbconvert.ipynb", + "id": "VAE1", + "title": "Variational AutoEncoder (VAE) with MNIST", + "description": "Episode 1 : Model construction and Training" + }, + "VAE2": { + "dirname": "VAE", + "basename": "02-VAE-with-MNIST-post.ipynb", + "id": "VAE2", + "title": "Variational AutoEncoder (VAE) with MNIST - Analysis", + "description": "Episode 2 : Exploring our latent space" + }, + "VAE3": { + "dirname": "VAE", + "basename": "03-About-CelebA.ipynb", + "id": "VAE3", + "title": "About the CelebA dataset", + "description": "Episode 3\u00a0: About the CelebA dataset, a more fun dataset ;-)" + }, + "VAE4": { + "dirname": "VAE", + "basename": "04-Prepare-CelebA-datasets.ipynb", + "id": "VAE4", + "title": "Preparation of the CelebA dataset", + "description": "Episode 4\u00a0: Preparation of a clustered dataset, batchable" + }, + "VAE5": { + "dirname": "VAE", + "basename": "05-Check-CelebA.ipynb", + "id": "VAE5", + "title": "Checking the clustered CelebA dataset", + "description": "Episode 5\u00a0:\tChecking the clustered dataset" + }, + "VAE6": { + "dirname": "VAE", + "basename": "06-VAE-with-CelebA-s.nbconvert.ipynb", + "id": "VAE6", + "title": "Variational AutoEncoder (VAE) with CelebA (small)", + "description": "Episode 6\u00a0: Variational AutoEncoder (VAE) with CelebA (small res.)" + }, + "VAE7": { + "dirname": "VAE", + "basename": "07-VAE-with-CelebA-m.nbconvert.ipynb", + "id": "VAE7", + "title": "Variational AutoEncoder (VAE) with CelebA (medium)", + "description": "Episode 7\u00a0: Variational AutoEncoder (VAE) with CelebA (medium res.)" + }, + "VAE8": { + "dirname": "VAE", + "basename": "08-VAE-withCelebA-post.ipynb", + "id": "VAE8", + "title": "Variational AutoEncoder (VAE) with CelebA - Analysis", + "description": "Episode 8\u00a0: Exploring latent space of our trained models" + }, + "ACTF1": { + "dirname": "Misc", + "basename": "Activation-Functions.ipynb", + "id": "ACTF1", + "title": "Activation functions", + "description": "Some activation functions, with their derivatives." + }, + "NP1": { + "dirname": "Misc", + "basename": "Numpy.ipynb", + "id": "NP1", + "title": "A short introduction to Numpy", + "description": "Numpy is an essential tool for the Scientific Python." + } +} \ No newline at end of file -- GitLab