diff --git a/GTSRB/01-Preparation-of-data.ipynb b/GTSRB/01-Preparation-of-data.ipynb
index b2ce2a710e32185767151dbfd0e92fd01ab0c046..ddad62e1c7b0b61f745e40c3635409e16822bf0c 100644
--- a/GTSRB/01-Preparation-of-data.ipynb
+++ b/GTSRB/01-Preparation-of-data.ipynb
@@ -6,8 +6,8 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [GTSRB1] - CNN with GTSRB dataset - Data analysis and preparation\n",
-    "<!-- DESC --> Episode 1 : Data analysis and creation of a usable dataset\n",
+    "# <!-- TITLE --> [GTSRB1] - Dataset analysis and preparation\n",
+    "<!-- DESC --> Episode 1 : Analysis of the GTSRB dataset and creation of an enhanced dataset\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
diff --git a/GTSRB/02-First-convolutions.ipynb b/GTSRB/02-First-convolutions.ipynb
index 4d478af0e1e57781ab19ba4786dba0720e9da108..37e259b016f0809ab8d657c86930c790bc1b7261 100644
--- a/GTSRB/02-First-convolutions.ipynb
+++ b/GTSRB/02-First-convolutions.ipynb
@@ -6,8 +6,8 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [GTSRB2] - CNN with GTSRB dataset - First convolutions\n",
-    "<!-- DESC --> Episode 2 : First convolutions and first results\n",
+    "# <!-- TITLE --> [GTSRB2] - First convolutions\n",
+    "<!-- DESC --> Episode 2 : First convolutions and first classification of our traffic signs\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
diff --git a/GTSRB/03-Tracking-and-visualizing.ipynb b/GTSRB/03-Tracking-and-visualizing.ipynb
index 1ada67a096827b3a1372531402cc493dc6557049..8d8f089113829c1fb03a6636a45417fc4f0f641d 100644
--- a/GTSRB/03-Tracking-and-visualizing.ipynb
+++ b/GTSRB/03-Tracking-and-visualizing.ipynb
@@ -6,8 +6,8 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [GTSRB3] - CNN with GTSRB dataset - Monitoring \n",
-    "<!-- DESC --> Episode 3 : Monitoring and analysing training, managing checkpoints\n",
+    "# <!-- TITLE --> [GTSRB3] - Training monitoring\n",
+    "<!-- DESC --> Episode 3 : Monitoring, analysis and check points during a training session\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
diff --git a/GTSRB/04-Data-augmentation.ipynb b/GTSRB/04-Data-augmentation.ipynb
index 5fea43a9d56f55a711e7f859ce30c8e5e86d2baf..fd336e5d0ce0d656ec42245ee1eaeeebbd401c85 100644
--- a/GTSRB/04-Data-augmentation.ipynb
+++ b/GTSRB/04-Data-augmentation.ipynb
@@ -6,8 +6,8 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [GTSRB4] - CNN with GTSRB dataset - Data augmentation \n",
-    "<!-- DESC --> Episode 4 : Improving the results with data augmentation\n",
+    "# <!-- TITLE --> [GTSRB4] - Data augmentation \n",
+    "<!-- DESC --> Episode 4 : Adding data by data augmentation when we lack it, to improve our results\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
diff --git a/GTSRB/05-Full-convolutions.ipynb b/GTSRB/05-Full-convolutions.ipynb
index 2b7274f8d2725cd2a12b7136e0808d8f275f4c92..1c169c9e27e95b351b6a81edbc0b361b971b8bea 100644
--- a/GTSRB/05-Full-convolutions.ipynb
+++ b/GTSRB/05-Full-convolutions.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [GTSRB5] - CNN with GTSRB dataset - Full convolutions \n",
+    "# <!-- TITLE --> [GTSRB5] - Full convolutions\n",
     "<!-- DESC --> Episode 5 : A lot of models, a lot of datasets and a lot of results.\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
diff --git a/GTSRB/06-Notebook-as-a-batch.ipynb b/GTSRB/06-Notebook-as-a-batch.ipynb
index 9ef2ef37126a5963d57fcbfd4e30d0bca7946719..8902e0ed24ba52bd29ea61831e7618d9f609fc86 100644
--- a/GTSRB/06-Notebook-as-a-batch.ipynb
+++ b/GTSRB/06-Notebook-as-a-batch.ipynb
@@ -7,7 +7,7 @@
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
     "# <!-- TITLE --> [GTSRB6] - Full convolutions as a batch\n",
-    "<!-- DESC --> Episode 6 : Run Full convolution notebook as a batch\n",
+    "<!-- DESC --> Episode 6 : To compute bigger, use your notebook in batch mode\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
diff --git a/GTSRB/07-Show-report.ipynb b/GTSRB/07-Show-report.ipynb
index 0b1b34764cfd18e844dcbbbe165dc881bd741f2d..09999292c70b73dc81223e57b44478e4d2d241e3 100644
--- a/GTSRB/07-Show-report.ipynb
+++ b/GTSRB/07-Show-report.ipynb
@@ -6,8 +6,8 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [GTSRB7] - CNN with GTSRB dataset - Show reports\n",
-    "<!-- DESC -->  Episode 7 : Displaying a jobs report\n",
+    "# <!-- TITLE --> [GTSRB7] - Batch reportss\n",
+    "<!-- DESC -->  Episode 7 : Displaying our jobs report, and the winner is...\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
diff --git a/GTSRB/batch_oar.sh b/GTSRB/batch_oar.sh
index 272acfe4f38e8a2ad871804e76a8934162041c33..08197413936e41b23250333f340e1283a7948cde 100755
--- a/GTSRB/batch_oar.sh
+++ b/GTSRB/batch_oar.sh
@@ -19,8 +19,8 @@
 #                             Fidle at GRICAD
 # -----------------------------------------------
 #
-# <!-- TITLE --> [GTSRB10] - OAR batch submission
-# <!-- DESC -->  Bash script for OAR batch submission of GTSRB notebook 
+# <!-- TITLE --> [GTSRB10] - OAR batch script submission
+# <!-- DESC -->  Bash script for an OAR batch submission of an ipython code
 # <!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->
 
 # ==== Notebook parameters =========================================
diff --git a/GTSRB/batch_slurm.sh b/GTSRB/batch_slurm.sh
index 708fd53ee1c54b037fa38a622228e0777636eada..b81c7a52a83e29cd72c78afaed14acb75c981233 100755
--- a/GTSRB/batch_slurm.sh
+++ b/GTSRB/batch_slurm.sh
@@ -9,7 +9,7 @@
 # -----------------------------------------------
 #
 # <!-- TITLE --> [GTSRB11] - SLURM batch script
-# <!-- DESC --> Bash script for SLURM batch submission of GTSRB notebooks 
+# <!-- DESC --> Bash script for a Slurm batch submission of an ipython code
 # <!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->
 #
 # Soumission :  sbatch  /(...)/fidle/GTSRB/batch_slurm.sh
diff --git a/IMDB/01-Embedding-Keras.ipynb b/IMDB/01-Embedding-Keras.ipynb
index 275fb98aa0ce66ae0a3c940fc6d817942c9967e0..93627a972c9ad463fef5ddf112eed360e0e3412d 100644
--- a/IMDB/01-Embedding-Keras.ipynb
+++ b/IMDB/01-Embedding-Keras.ipynb
@@ -6,8 +6,8 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [IMDB1] - Text embedding with IMDB\n",
-    "<!-- DESC --> A very classical example of word embedding for text classification (sentiment analysis)\n",
+    "# <!-- TITLE --> [IMDB1] - Sentiment alalysis with text embedding\n",
+    "<!-- DESC --> A very classical example of word embedding with a dataset from Internet Movie Database (IMDB)\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
@@ -1021,7 +1021,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.7.7"
+   "version": "3.7.9"
   }
  },
  "nbformat": 4,
diff --git a/IMDB/02-Prediction.ipynb b/IMDB/02-Prediction.ipynb
index c8033a810ae8c28d076bd7bd214df86d54c90f38..353619724b9ce879b872838cc9d71442b95b865e 100644
--- a/IMDB/02-Prediction.ipynb
+++ b/IMDB/02-Prediction.ipynb
@@ -6,8 +6,8 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [IMDB2] - Text embedding with IMDB - Reloaded\n",
-    "<!-- DESC --> Example of reusing a previously saved model\n",
+    "# <!-- TITLE --> [IMDB2] - Reload and reuse a saved model\n",
+    "<!-- DESC --> Retrieving a saved model to perform a sentiment analysis (movie review)\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
     "## Objectives :\n",
diff --git a/IMDB/03-LSTM-Keras.ipynb b/IMDB/03-LSTM-Keras.ipynb
index 9f902e052d9dcc45ce645e2efda256bc06e433b8..430dfd68424882cea24f61dbbc1e620c0abb75b3 100644
--- a/IMDB/03-LSTM-Keras.ipynb
+++ b/IMDB/03-LSTM-Keras.ipynb
@@ -6,7 +6,7 @@
    "source": [
     "<img width=\"800px\" src=\"../fidle/img/00-Fidle-header-01.svg\"></img>\n",
     "\n",
-    "# <!-- TITLE --> [IMDB3] - Text embedding/LSTM model with IMDB\n",
+    "# <!-- TITLE --> [IMDB3] - Sentiment analysis with a LSTM network\n",
     "<!-- DESC --> Still the same problem, but with a network combining embedding and LSTM\n",
     "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
     "\n",
@@ -980,7 +980,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.7.7"
+   "version": "3.7.9"
   }
  },
  "nbformat": 4,
diff --git a/README.ipynb b/README.ipynb
index b7d638024f03481fe65f8e5f7f9d2e36713c092f..2adbfbf320ec5aca0066e6d6a808d9299271f028 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -5,10 +5,10 @@
    "execution_count": 1,
    "metadata": {
     "execution": {
-     "iopub.execute_input": "2021-01-08T10:27:08.480920Z",
-     "iopub.status.busy": "2021-01-08T10:27:08.480319Z",
-     "iopub.status.idle": "2021-01-08T10:27:08.483733Z",
-     "shell.execute_reply": "2021-01-08T10:27:08.483336Z"
+     "iopub.execute_input": "2021-01-08T21:53:24.359601Z",
+     "iopub.status.busy": "2021-01-08T21:53:24.359126Z",
+     "iopub.status.idle": "2021-01-08T21:53:24.367752Z",
+     "shell.execute_reply": "2021-01-08T21:53:24.368077Z"
     },
     "jupyter": {
      "source_hidden": true
@@ -85,31 +85,31 @@
        "An example of classification using a dense neural network for the famous MNIST dataset\n",
        "\n",
        "### Images classification with Convolutional Neural Networks (CNN)\n",
-       "- **[GTSRB1](GTSRB/01-Preparation-of-data.ipynb)** - [CNN with GTSRB dataset - Data analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)  \n",
-       "Episode 1 : Data analysis and creation of a usable dataset\n",
-       "- **[GTSRB2](GTSRB/02-First-convolutions.ipynb)** - [CNN with GTSRB dataset - First convolutions](GTSRB/02-First-convolutions.ipynb)  \n",
-       "Episode 2 : First convolutions and first results\n",
-       "- **[GTSRB3](GTSRB/03-Tracking-and-visualizing.ipynb)** - [CNN with GTSRB dataset - Monitoring ](GTSRB/03-Tracking-and-visualizing.ipynb)  \n",
-       "Episode 3 : Monitoring and analysing training, managing checkpoints\n",
-       "- **[GTSRB4](GTSRB/04-Data-augmentation.ipynb)** - [CNN with GTSRB dataset - Data augmentation ](GTSRB/04-Data-augmentation.ipynb)  \n",
-       "Episode 4 : Improving the results with data augmentation\n",
-       "- **[GTSRB5](GTSRB/05-Full-convolutions.ipynb)** - [CNN with GTSRB dataset - Full convolutions ](GTSRB/05-Full-convolutions.ipynb)  \n",
+       "- **[GTSRB1](GTSRB/01-Preparation-of-data.ipynb)** - [Dataset analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)  \n",
+       "Episode 1 : Analysis of the GTSRB dataset and creation of an enhanced dataset\n",
+       "- **[GTSRB2](GTSRB/02-First-convolutions.ipynb)** - [First convolutions](GTSRB/02-First-convolutions.ipynb)  \n",
+       "Episode 2 : First convolutions and first classification of our traffic signs\n",
+       "- **[GTSRB3](GTSRB/03-Tracking-and-visualizing.ipynb)** - [Training monitoring](GTSRB/03-Tracking-and-visualizing.ipynb)  \n",
+       "Episode 3 : Monitoring, analysis and check points during a training session\n",
+       "- **[GTSRB4](GTSRB/04-Data-augmentation.ipynb)** - [Data augmentation ](GTSRB/04-Data-augmentation.ipynb)  \n",
+       "Episode 4 : Adding data by data augmentation when we lack it, to improve our results\n",
+       "- **[GTSRB5](GTSRB/05-Full-convolutions.ipynb)** - [Full convolutions](GTSRB/05-Full-convolutions.ipynb)  \n",
        "Episode 5 : A lot of models, a lot of datasets and a lot of results.\n",
        "- **[GTSRB6](GTSRB/06-Notebook-as-a-batch.ipynb)** - [Full convolutions as a batch](GTSRB/06-Notebook-as-a-batch.ipynb)  \n",
-       "Episode 6 : Run Full convolution notebook as a batch\n",
-       "- **[GTSRB7](GTSRB/07-Show-report.ipynb)** - [CNN with GTSRB dataset - Show reports](GTSRB/07-Show-report.ipynb)  \n",
-       "Episode 7 : Displaying a jobs report\n",
-       "- **[GTSRB10](GTSRB/batch_oar.sh)** - [OAR batch submission](GTSRB/batch_oar.sh)  \n",
-       "Bash script for OAR batch submission of GTSRB notebook \n",
+       "Episode 6 : To compute bigger, use your notebook in batch mode\n",
+       "- **[GTSRB7](GTSRB/07-Show-report.ipynb)** - [Batch reportss](GTSRB/07-Show-report.ipynb)  \n",
+       "Episode 7 : Displaying our jobs report, and the winner is...\n",
+       "- **[GTSRB10](GTSRB/batch_oar.sh)** - [OAR batch script submission](GTSRB/batch_oar.sh)  \n",
+       "Bash script for an OAR batch submission of an ipython code\n",
        "- **[GTSRB11](GTSRB/batch_slurm.sh)** - [SLURM batch script](GTSRB/batch_slurm.sh)  \n",
-       "Bash script for SLURM batch submission of GTSRB notebooks \n",
+       "Bash script for a Slurm batch submission of an ipython code\n",
        "\n",
        "### Sentiment analysis with word embedding\n",
-       "- **[IMDB1](IMDB/01-Embedding-Keras.ipynb)** - [Text embedding with IMDB](IMDB/01-Embedding-Keras.ipynb)  \n",
-       "A very classical example of word embedding for text classification (sentiment analysis)\n",
-       "- **[IMDB2](IMDB/02-Prediction.ipynb)** - [Text embedding with IMDB - Reloaded](IMDB/02-Prediction.ipynb)  \n",
-       "Example of reusing a previously saved model\n",
-       "- **[IMDB3](IMDB/03-LSTM-Keras.ipynb)** - [Text embedding/LSTM model with IMDB](IMDB/03-LSTM-Keras.ipynb)  \n",
+       "- **[IMDB1](IMDB/01-Embedding-Keras.ipynb)** - [Sentiment alalysis with text embedding](IMDB/01-Embedding-Keras.ipynb)  \n",
+       "A very classical example of word embedding with a dataset from Internet Movie Database (IMDB)\n",
+       "- **[IMDB2](IMDB/02-Prediction.ipynb)** - [Reload and reuse a saved model](IMDB/02-Prediction.ipynb)  \n",
+       "Retrieving a saved model to perform a sentiment analysis (movie review)\n",
+       "- **[IMDB3](IMDB/03-LSTM-Keras.ipynb)** - [Sentiment analysis with a LSTM network](IMDB/03-LSTM-Keras.ipynb)  \n",
        "Still the same problem, but with a network combining embedding and LSTM\n",
        "\n",
        "### Time series with Recurrent Neural Network (RNN)\n",
@@ -187,6 +187,11 @@
   }
  ],
  "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
diff --git a/README.md b/README.md
index 57c65ab316b627d6391d6499c7434abb936d7a93..457cc4e345126d4d41f8cdc0fffc1ee8c8ea43ea 100644
--- a/README.md
+++ b/README.md
@@ -65,31 +65,31 @@ A more advanced implementation of the precedent example
 An example of classification using a dense neural network for the famous MNIST dataset
 
 ### Images classification with Convolutional Neural Networks (CNN)
-- **[GTSRB1](GTSRB/01-Preparation-of-data.ipynb)** - [CNN with GTSRB dataset - Data analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)  
-Episode 1 : Data analysis and creation of a usable dataset
-- **[GTSRB2](GTSRB/02-First-convolutions.ipynb)** - [CNN with GTSRB dataset - First convolutions](GTSRB/02-First-convolutions.ipynb)  
-Episode 2 : First convolutions and first results
-- **[GTSRB3](GTSRB/03-Tracking-and-visualizing.ipynb)** - [CNN with GTSRB dataset - Monitoring ](GTSRB/03-Tracking-and-visualizing.ipynb)  
-Episode 3 : Monitoring and analysing training, managing checkpoints
-- **[GTSRB4](GTSRB/04-Data-augmentation.ipynb)** - [CNN with GTSRB dataset - Data augmentation ](GTSRB/04-Data-augmentation.ipynb)  
-Episode 4 : Improving the results with data augmentation
-- **[GTSRB5](GTSRB/05-Full-convolutions.ipynb)** - [CNN with GTSRB dataset - Full convolutions ](GTSRB/05-Full-convolutions.ipynb)  
+- **[GTSRB1](GTSRB/01-Preparation-of-data.ipynb)** - [Dataset analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)  
+Episode 1 : Analysis of the GTSRB dataset and creation of an enhanced dataset
+- **[GTSRB2](GTSRB/02-First-convolutions.ipynb)** - [First convolutions](GTSRB/02-First-convolutions.ipynb)  
+Episode 2 : First convolutions and first classification of our traffic signs
+- **[GTSRB3](GTSRB/03-Tracking-and-visualizing.ipynb)** - [Training monitoring](GTSRB/03-Tracking-and-visualizing.ipynb)  
+Episode 3 : Monitoring, analysis and check points during a training session
+- **[GTSRB4](GTSRB/04-Data-augmentation.ipynb)** - [Data augmentation ](GTSRB/04-Data-augmentation.ipynb)  
+Episode 4 : Adding data by data augmentation when we lack it, to improve our results
+- **[GTSRB5](GTSRB/05-Full-convolutions.ipynb)** - [Full convolutions](GTSRB/05-Full-convolutions.ipynb)  
 Episode 5 : A lot of models, a lot of datasets and a lot of results.
 - **[GTSRB6](GTSRB/06-Notebook-as-a-batch.ipynb)** - [Full convolutions as a batch](GTSRB/06-Notebook-as-a-batch.ipynb)  
-Episode 6 : Run Full convolution notebook as a batch
-- **[GTSRB7](GTSRB/07-Show-report.ipynb)** - [CNN with GTSRB dataset - Show reports](GTSRB/07-Show-report.ipynb)  
-Episode 7 : Displaying a jobs report
-- **[GTSRB10](GTSRB/batch_oar.sh)** - [OAR batch submission](GTSRB/batch_oar.sh)  
-Bash script for OAR batch submission of GTSRB notebook 
+Episode 6 : To compute bigger, use your notebook in batch mode
+- **[GTSRB7](GTSRB/07-Show-report.ipynb)** - [Batch reportss](GTSRB/07-Show-report.ipynb)  
+Episode 7 : Displaying our jobs report, and the winner is...
+- **[GTSRB10](GTSRB/batch_oar.sh)** - [OAR batch script submission](GTSRB/batch_oar.sh)  
+Bash script for an OAR batch submission of an ipython code
 - **[GTSRB11](GTSRB/batch_slurm.sh)** - [SLURM batch script](GTSRB/batch_slurm.sh)  
-Bash script for SLURM batch submission of GTSRB notebooks 
+Bash script for a Slurm batch submission of an ipython code
 
 ### Sentiment analysis with word embedding
-- **[IMDB1](IMDB/01-Embedding-Keras.ipynb)** - [Text embedding with IMDB](IMDB/01-Embedding-Keras.ipynb)  
-A very classical example of word embedding for text classification (sentiment analysis)
-- **[IMDB2](IMDB/02-Prediction.ipynb)** - [Text embedding with IMDB - Reloaded](IMDB/02-Prediction.ipynb)  
-Example of reusing a previously saved model
-- **[IMDB3](IMDB/03-LSTM-Keras.ipynb)** - [Text embedding/LSTM model with IMDB](IMDB/03-LSTM-Keras.ipynb)  
+- **[IMDB1](IMDB/01-Embedding-Keras.ipynb)** - [Sentiment alalysis with text embedding](IMDB/01-Embedding-Keras.ipynb)  
+A very classical example of word embedding with a dataset from Internet Movie Database (IMDB)
+- **[IMDB2](IMDB/02-Prediction.ipynb)** - [Reload and reuse a saved model](IMDB/02-Prediction.ipynb)  
+Retrieving a saved model to perform a sentiment analysis (movie review)
+- **[IMDB3](IMDB/03-LSTM-Keras.ipynb)** - [Sentiment analysis with a LSTM network](IMDB/03-LSTM-Keras.ipynb)  
 Still the same problem, but with a network combining embedding and LSTM
 
 ### Time series with Recurrent Neural Network (RNN)
diff --git a/fidle/01 - Set and reset.ipynb b/fidle/01 - Set and reset.ipynb
index 53a5a2fc8e027b25e836aa4585ff8a9f11121b1a..7fbc4dfcd52f7edf8f4289e65afc357d29958a48 100644
--- a/fidle/01 - Set and reset.ipynb	
+++ b/fidle/01 - Set and reset.ipynb	
@@ -159,31 +159,31 @@
        "An example of classification using a dense neural network for the famous MNIST dataset\n",
        "\n",
        "### Images classification with Convolutional Neural Networks (CNN)\n",
-       "- **[GTSRB1](GTSRB/01-Preparation-of-data.ipynb)** - [CNN with GTSRB dataset - Data analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)  \n",
-       "Episode 1 : Data analysis and creation of a usable dataset\n",
-       "- **[GTSRB2](GTSRB/02-First-convolutions.ipynb)** - [CNN with GTSRB dataset - First convolutions](GTSRB/02-First-convolutions.ipynb)  \n",
-       "Episode 2 : First convolutions and first results\n",
-       "- **[GTSRB3](GTSRB/03-Tracking-and-visualizing.ipynb)** - [CNN with GTSRB dataset - Monitoring ](GTSRB/03-Tracking-and-visualizing.ipynb)  \n",
-       "Episode 3 : Monitoring and analysing training, managing checkpoints\n",
-       "- **[GTSRB4](GTSRB/04-Data-augmentation.ipynb)** - [CNN with GTSRB dataset - Data augmentation ](GTSRB/04-Data-augmentation.ipynb)  \n",
-       "Episode 4 : Improving the results with data augmentation\n",
-       "- **[GTSRB5](GTSRB/05-Full-convolutions.ipynb)** - [CNN with GTSRB dataset - Full convolutions ](GTSRB/05-Full-convolutions.ipynb)  \n",
+       "- **[GTSRB1](GTSRB/01-Preparation-of-data.ipynb)** - [Dataset analysis and preparation](GTSRB/01-Preparation-of-data.ipynb)  \n",
+       "Episode 1 : Analysis of the GTSRB dataset and creation of an enhanced dataset\n",
+       "- **[GTSRB2](GTSRB/02-First-convolutions.ipynb)** - [First convolutions](GTSRB/02-First-convolutions.ipynb)  \n",
+       "Episode 2 : First convolutions and first classification of our traffic signs\n",
+       "- **[GTSRB3](GTSRB/03-Tracking-and-visualizing.ipynb)** - [Training monitoring](GTSRB/03-Tracking-and-visualizing.ipynb)  \n",
+       "Episode 3 : Monitoring, analysis and check points during a training session\n",
+       "- **[GTSRB4](GTSRB/04-Data-augmentation.ipynb)** - [Data augmentation ](GTSRB/04-Data-augmentation.ipynb)  \n",
+       "Episode 4 : Adding data by data augmentation when we lack it, to improve our results\n",
+       "- **[GTSRB5](GTSRB/05-Full-convolutions.ipynb)** - [Full convolutions](GTSRB/05-Full-convolutions.ipynb)  \n",
        "Episode 5 : A lot of models, a lot of datasets and a lot of results.\n",
        "- **[GTSRB6](GTSRB/06-Notebook-as-a-batch.ipynb)** - [Full convolutions as a batch](GTSRB/06-Notebook-as-a-batch.ipynb)  \n",
-       "Episode 6 : Run Full convolution notebook as a batch\n",
-       "- **[GTSRB7](GTSRB/07-Show-report.ipynb)** - [CNN with GTSRB dataset - Show reports](GTSRB/07-Show-report.ipynb)  \n",
-       "Episode 7 : Displaying a jobs report\n",
-       "- **[GTSRB10](GTSRB/batch_oar.sh)** - [OAR batch submission](GTSRB/batch_oar.sh)  \n",
-       "Bash script for OAR batch submission of GTSRB notebook \n",
+       "Episode 6 : To compute bigger, use your notebook in batch mode\n",
+       "- **[GTSRB7](GTSRB/07-Show-report.ipynb)** - [Batch reportss](GTSRB/07-Show-report.ipynb)  \n",
+       "Episode 7 : Displaying our jobs report, and the winner is...\n",
+       "- **[GTSRB10](GTSRB/batch_oar.sh)** - [OAR batch script submission](GTSRB/batch_oar.sh)  \n",
+       "Bash script for an OAR batch submission of an ipython code\n",
        "- **[GTSRB11](GTSRB/batch_slurm.sh)** - [SLURM batch script](GTSRB/batch_slurm.sh)  \n",
-       "Bash script for SLURM batch submission of GTSRB notebooks \n",
+       "Bash script for a Slurm batch submission of an ipython code\n",
        "\n",
        "### Sentiment analysis with word embedding\n",
-       "- **[IMDB1](IMDB/01-Embedding-Keras.ipynb)** - [Text embedding with IMDB](IMDB/01-Embedding-Keras.ipynb)  \n",
-       "A very classical example of word embedding for text classification (sentiment analysis)\n",
-       "- **[IMDB2](IMDB/02-Prediction.ipynb)** - [Text embedding with IMDB - Reloaded](IMDB/02-Prediction.ipynb)  \n",
-       "Example of reusing a previously saved model\n",
-       "- **[IMDB3](IMDB/03-LSTM-Keras.ipynb)** - [Text embedding/LSTM model with IMDB](IMDB/03-LSTM-Keras.ipynb)  \n",
+       "- **[IMDB1](IMDB/01-Embedding-Keras.ipynb)** - [Sentiment alalysis with text embedding](IMDB/01-Embedding-Keras.ipynb)  \n",
+       "A very classical example of word embedding with a dataset from Internet Movie Database (IMDB)\n",
+       "- **[IMDB2](IMDB/02-Prediction.ipynb)** - [Reload and reuse a saved model](IMDB/02-Prediction.ipynb)  \n",
+       "Retrieving a saved model to perform a sentiment analysis (movie review)\n",
+       "- **[IMDB3](IMDB/03-LSTM-Keras.ipynb)** - [Sentiment analysis with a LSTM network](IMDB/03-LSTM-Keras.ipynb)  \n",
        "Still the same problem, but with a network combining embedding and LSTM\n",
        "\n",
        "### Time series with Recurrent Neural Network (RNN)\n",
@@ -388,6 +388,10 @@
     "new_cell['metadata']= { \"jupyter\": { \"source_hidden\": True} }\n",
     "notebook.cells.append(new_cell)\n",
     "\n",
+    "# --- Pour éviter une modification lors de l'ouverture du notebook\n",
+    "#     pas génante, mais nécessite de resauvegarder le document à la fermeture...\n",
+    "notebook['metadata'][\"kernelspec\"] = {\"display_name\": \"Python 3\", \"language\": \"python\", \"name\": \"python3\" }\n",
+    "\n",
     "# ---- Run it\n",
     "#\n",
     "ep = ExecutePreprocessor(timeout=600, kernel_name=\"python3\")\n",
diff --git a/fidle/log/catalog.json b/fidle/log/catalog.json
index fbb2733b7e1b5c8ea06af896a816a46d7a5de6d3..7d6b0abbf3a4cad142f855bc9a16c5b7d20b0063 100644
--- a/fidle/log/catalog.json
+++ b/fidle/log/catalog.json
@@ -59,35 +59,35 @@
         "id": "GTSRB1",
         "dirname": "GTSRB",
         "basename": "01-Preparation-of-data.ipynb",
-        "title": "CNN with GTSRB dataset - Data analysis and preparation",
-        "description": "Episode 1 : Data analysis and creation of a usable dataset"
+        "title": "Dataset analysis and preparation",
+        "description": "Episode 1 : Analysis of the GTSRB dataset and creation of an enhanced dataset"
     },
     "GTSRB2": {
         "id": "GTSRB2",
         "dirname": "GTSRB",
         "basename": "02-First-convolutions.ipynb",
-        "title": "CNN with GTSRB dataset - First convolutions",
-        "description": "Episode 2 : First convolutions and first results"
+        "title": "First convolutions",
+        "description": "Episode 2 : First convolutions and first classification of our traffic signs"
     },
     "GTSRB3": {
         "id": "GTSRB3",
         "dirname": "GTSRB",
         "basename": "03-Tracking-and-visualizing.ipynb",
-        "title": "CNN with GTSRB dataset - Monitoring ",
-        "description": "Episode 3 : Monitoring and analysing training, managing checkpoints"
+        "title": "Training monitoring",
+        "description": "Episode 3 : Monitoring, analysis and check points during a training session"
     },
     "GTSRB4": {
         "id": "GTSRB4",
         "dirname": "GTSRB",
         "basename": "04-Data-augmentation.ipynb",
-        "title": "CNN with GTSRB dataset - Data augmentation ",
-        "description": "Episode 4 : Improving the results with data augmentation"
+        "title": "Data augmentation ",
+        "description": "Episode 4 : Adding data by data augmentation when we lack it, to improve our results"
     },
     "GTSRB5": {
         "id": "GTSRB5",
         "dirname": "GTSRB",
         "basename": "05-Full-convolutions.ipynb",
-        "title": "CNN with GTSRB dataset - Full convolutions ",
+        "title": "Full convolutions",
         "description": "Episode 5 : A lot of models, a lot of datasets and a lot of results."
     },
     "GTSRB6": {
@@ -95,48 +95,48 @@
         "dirname": "GTSRB",
         "basename": "06-Notebook-as-a-batch.ipynb",
         "title": "Full convolutions as a batch",
-        "description": "Episode 6 : Run Full convolution notebook as a batch"
+        "description": "Episode 6 : To compute bigger, use your notebook in batch mode"
     },
     "GTSRB7": {
         "id": "GTSRB7",
         "dirname": "GTSRB",
         "basename": "07-Show-report.ipynb",
-        "title": "CNN with GTSRB dataset - Show reports",
-        "description": "Episode 7 : Displaying a jobs report"
+        "title": "Batch reportss",
+        "description": "Episode 7 : Displaying our jobs report, and the winner is..."
     },
     "GTSRB10": {
         "id": "GTSRB10",
         "dirname": "GTSRB",
         "basename": "batch_oar.sh",
-        "title": "OAR batch submission",
-        "description": "Bash script for OAR batch submission of GTSRB notebook "
+        "title": "OAR batch script submission",
+        "description": "Bash script for an OAR batch submission of an ipython code"
     },
     "GTSRB11": {
         "id": "GTSRB11",
         "dirname": "GTSRB",
         "basename": "batch_slurm.sh",
         "title": "SLURM batch script",
-        "description": "Bash script for SLURM batch submission of GTSRB notebooks "
+        "description": "Bash script for a Slurm batch submission of an ipython code"
     },
     "IMDB1": {
         "id": "IMDB1",
         "dirname": "IMDB",
         "basename": "01-Embedding-Keras.ipynb",
-        "title": "Text embedding with IMDB",
-        "description": "A very classical example of word embedding for text classification (sentiment analysis)"
+        "title": "Sentiment alalysis with text embedding",
+        "description": "A very classical example of word embedding with a dataset from Internet Movie Database (IMDB)"
     },
     "IMDB2": {
         "id": "IMDB2",
         "dirname": "IMDB",
         "basename": "02-Prediction.ipynb",
-        "title": "Text embedding with IMDB - Reloaded",
-        "description": "Example of reusing a previously saved model"
+        "title": "Reload and reuse a saved model",
+        "description": "Retrieving a saved model to perform a sentiment analysis (movie review)"
     },
     "IMDB3": {
         "id": "IMDB3",
         "dirname": "IMDB",
         "basename": "03-LSTM-Keras.ipynb",
-        "title": "Text embedding/LSTM model with IMDB",
+        "title": "Sentiment analysis with a LSTM network",
         "description": "Still the same problem, but with a network combining embedding and LSTM"
     },
     "SYNOP1": {