From 853282dd0101bfa45241c774e46aaf48f272b2be Mon Sep 17 00:00:00 2001
From: Jean-Luc Parouty <Jean-Luc.Parouty@simap.grenoble-inp.fr>
Date: Mon, 6 Jan 2025 16:33:44 +0100
Subject: [PATCH] Update to 3.0.14

---
 GTSRB.Keras3/04-Keras-cv.ipynb | 172 ---------------------------------
 README.ipynb                   |  26 ++---
 README.md                      |  12 +--
 fidle/about.yml                |   3 +-
 fidle/ci/default.yml           |  13 +--
 5 files changed, 14 insertions(+), 212 deletions(-)
 delete mode 100644 GTSRB.Keras3/04-Keras-cv.ipynb

diff --git a/GTSRB.Keras3/04-Keras-cv.ipynb b/GTSRB.Keras3/04-Keras-cv.ipynb
deleted file mode 100644
index 105d485..0000000
--- a/GTSRB.Keras3/04-Keras-cv.ipynb
+++ /dev/null
@@ -1,172 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "<img width=\"800px\" src=\"../fidle/img/header.svg\"></img>\n",
-    "\n",
-    "# <!-- TITLE --> [K3GTSRB4] - Hight level example (Keras-cv)\n",
-    "<!-- DESC --> An example of using a pre-trained model with Keras-cv\n",
-    "<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->\n",
-    "\n",
-    "## Objectives :\n",
-    "  - Using a pre-trained model\n",
-    "  \n",
-    "## What we're going to do :\n",
-    "\n",
-    " - Load and use a pre-trained model\n",
-    "\n",
-    " See : https://keras.io/guides/keras_cv/classification_with_keras_cv/  \n",
-    " Imagenet classes can be found at : https://gist.githubusercontent.com/LukeWood/62eebcd5c5c4a4d0e0b7845780f76d55/raw/fde63e5e4c09e2fa0a3436680f436bdcb8325aac/ImagenetClassnames.json\n",
-    "\n",
-    "## ATTENTION : A specific environment is required for this example !\n",
-    "This python environment required for this notebook is :\n",
-    "```\n",
-    "python3 -m venv fidle-kcv\n",
-    "pip install --upgrade keras-cv tensorflow torch torchvision torchaudio Matplotlib Jupyterlab\n",
-    "pip install --upgrade keras jupyterlab\n",
-    "```\n",
-    "Note: Tensorflow is not used for interference, and will no longer be required in later versions of Keras 3.\n",
-    "\n",
-    "## Step 1 - Import and init\n",
-    "\n",
-    "### 1.1 - Python stuffs"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "os.environ[\"KERAS_BACKEND\"] = \"torch\"  # @param [\"tensorflow\", \"jax\", \"torch\"]\n",
-    "\n",
-    "import json\n",
-    "import numpy as np\n",
-    "\n",
-    "import keras\n",
-    "import keras_cv\n",
-    "\n",
-    "from  modules.ImagenetClassnames import ImagenetClassnames"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 2 - Get some images"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "images_url=['https://i.imgur.com/2eOWImx.jpeg', 'https://i.imgur.com/YB8sG8R.jpeg', 'https://i.imgur.com/orZEMlv.jpeg']\n",
-    "\n",
-    "images=[]\n",
-    "for img_url in images_url:\n",
-    "    \n",
-    "    # Get images from urls in ~/.keras cache\n",
-    "    img_path = keras.utils.get_file(origin=img_url)\n",
-    "\n",
-    "    # Get image\n",
-    "    img = keras.utils.load_img(img_path, target_size=(256,256))\n",
-    "    images.append(img)\n",
-    "images=np.array(images)\n",
-    "\n",
-    "keras_cv.visualization.plot_image_gallery( images, rows=1, cols=3, value_range=(0, 255), show=True, scale=2)\n"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 3 - Get a nice pretrained classifier (and classes)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "classifier = keras_cv.models.ImageClassifier.from_preset( \"efficientnetv2_b0_imagenet_classifier\" )"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 4 - Try some predictions"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "predictions = classifier.predict(images)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Step 5 - Show result"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Get classes name \n",
-    "imc = ImagenetClassnames()\n",
-    "\n",
-    "for i,img in enumerate(images):\n",
-    "    # Get classes id instead classes probabilities\n",
-    "    classes_id   = predictions[i].argsort(axis=-1)\n",
-    "    # Get classes name instead classes id\n",
-    "    classes_name = imc.get(classes_id, top_n=2)\n",
-    "    # Plot it\n",
-    "    keras_cv.visualization.plot_image_gallery( np.array([img]), rows=1, cols=1, value_range=(0, 255), show=True, scale=2)\n",
-    "    print(classes_name)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "---\n",
-    "<img width=\"80px\" src=\"../fidle/img/logo-paysage.svg\"></img>"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "fidle-kcv",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}
diff --git a/README.ipynb b/README.ipynb
index 1ac248a..0c2e410 100644
--- a/README.ipynb
+++ b/README.ipynb
@@ -3,13 +3,13 @@
   {
    "cell_type": "code",
    "execution_count": 1,
-   "id": "632b4ee8",
+   "id": "f31b5632",
    "metadata": {
     "execution": {
-     "iopub.execute_input": "2024-12-22T17:56:42.379486Z",
-     "iopub.status.busy": "2024-12-22T17:56:42.379188Z",
-     "iopub.status.idle": "2024-12-22T17:56:42.384886Z",
-     "shell.execute_reply": "2024-12-22T17:56:42.384583Z"
+     "iopub.execute_input": "2025-01-06T15:33:06.393183Z",
+     "iopub.status.busy": "2025-01-06T15:33:06.392990Z",
+     "iopub.status.idle": "2025-01-06T15:33:06.401877Z",
+     "shell.execute_reply": "2025-01-06T15:33:06.401059Z"
     },
     "jupyter": {
      "source_hidden": true
@@ -53,7 +53,7 @@
        "For more information, you can contact us at :  \n",
        "[<img width=\"200px\" style=\"vertical-align:middle\" src=\"fidle/img/00-Mail_contact.svg\"></img>](#top)\n",
        "\n",
-       "Current Version : <!-- VERSION_BEGIN -->3.0.12<!-- VERSION_END -->\n",
+       "Current Version : <!-- VERSION_BEGIN -->3.0.14<!-- VERSION_END -->\n",
        "\n",
        "\n",
        "## Course materials\n",
@@ -68,7 +68,7 @@
        "## Jupyter notebooks\n",
        "\n",
        "<!-- TOC_BEGIN -->\n",
-       "<!-- Automatically generated on : 22/12/24 18:56:39 -->\n",
+       "<!-- Automatically generated on : 06/01/25 16:33:05 -->\n",
        "\n",
        "### Linear and logistic regression\n",
        "- **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  \n",
@@ -125,8 +125,6 @@
        "Episode 2 : First convolutions and first classification of our traffic signs, using Keras3\n",
        "- **[K3GTSRB3](GTSRB.Keras3/03-Better-convolutions.ipynb)** - [Training monitoring](GTSRB.Keras3/03-Better-convolutions.ipynb)  \n",
        "Episode 3 : Monitoring, analysis and check points during a training session, using Keras3\n",
-       "- **[K3GTSRB4](GTSRB.Keras3/04-Keras-cv.ipynb)** - [Hight level example (Keras-cv)](GTSRB.Keras3/04-Keras-cv.ipynb)  \n",
-       "An example of using a pre-trained model with Keras-cv\n",
        "- **[K3GTSRB10](GTSRB.Keras3/batch_oar.sh)** - [OAR batch script submission](GTSRB.Keras3/batch_oar.sh)  \n",
        "Bash script for an OAR batch submission of an ipython code\n",
        "- **[K3GTSRB11](GTSRB.Keras3/batch_slurm.sh)** - [SLURM batch script](GTSRB.Keras3/batch_slurm.sh)  \n",
@@ -148,12 +146,6 @@
        "- **[K3LADYB1](RNN.Keras3/01-Ladybug.ipynb)** - [Prediction of a 2D trajectory via RNN](RNN.Keras3/01-Ladybug.ipynb)  \n",
        "Artificial dataset generation and prediction attempt via a recurrent network, using Keras 3 and PyTorch\n",
        "\n",
-       "### Sentiment analysis with transformer, using PyTorch\n",
-       "- **[TRANS1](Transformers.PyTorch/01-Distilbert.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/01-Distilbert.ipynb)  \n",
-       "Using a Tranformer to perform a sentiment analysis (IMDB) - Jean Zay version\n",
-       "- **[TRANS2](Transformers.PyTorch/02-distilbert_colab.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/02-distilbert_colab.ipynb)  \n",
-       "Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version\n",
-       "\n",
        "### Graph Neural Networks\n",
        "\n",
        "### Unsupervised learning with an autoencoder neural network (AE), using Keras3\n",
@@ -243,7 +235,7 @@
     "from IPython.display import display,Markdown\n",
     "display(Markdown(open('README.md', 'r').read()))\n",
     "#\n",
-    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 22/12/24 18:56:40"
+    "# This README is visible under Jupiter Lab ;-)# Automatically generated on : 06/01/25 16:33:05"
    ]
   }
  ],
@@ -263,7 +255,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.12.7"
+   "version": "3.11.2"
   }
  },
  "nbformat": 4,
diff --git a/README.md b/README.md
index 0e4f31b..add0f1b 100644
--- a/README.md
+++ b/README.md
@@ -32,7 +32,7 @@ For more information, see **https://fidle.cnrs.fr** :
 For more information, you can contact us at :  
 [<img width="200px" style="vertical-align:middle" src="fidle/img/00-Mail_contact.svg"></img>](#top)
 
-Current Version : <!-- VERSION_BEGIN -->3.0.12<!-- VERSION_END -->
+Current Version : <!-- VERSION_BEGIN -->3.0.14<!-- VERSION_END -->
 
 
 ## Course materials
@@ -47,7 +47,7 @@ Have a look about **[How to get and install](https://fidle.cnrs.fr/installation)
 ## Jupyter notebooks
 
 <!-- TOC_BEGIN -->
-<!-- Automatically generated on : 22/12/24 18:56:39 -->
+<!-- Automatically generated on : 06/01/25 16:33:05 -->
 
 ### Linear and logistic regression
 - **[LINR1](LinearReg/01-Linear-Regression.ipynb)** - [Linear regression with direct resolution](LinearReg/01-Linear-Regression.ipynb)  
@@ -104,8 +104,6 @@ Episode 1 : Analysis of the GTSRB dataset and creation of an enhanced dataset
 Episode 2 : First convolutions and first classification of our traffic signs, using Keras3
 - **[K3GTSRB3](GTSRB.Keras3/03-Better-convolutions.ipynb)** - [Training monitoring](GTSRB.Keras3/03-Better-convolutions.ipynb)  
 Episode 3 : Monitoring, analysis and check points during a training session, using Keras3
-- **[K3GTSRB4](GTSRB.Keras3/04-Keras-cv.ipynb)** - [Hight level example (Keras-cv)](GTSRB.Keras3/04-Keras-cv.ipynb)  
-An example of using a pre-trained model with Keras-cv
 - **[K3GTSRB10](GTSRB.Keras3/batch_oar.sh)** - [OAR batch script submission](GTSRB.Keras3/batch_oar.sh)  
 Bash script for an OAR batch submission of an ipython code
 - **[K3GTSRB11](GTSRB.Keras3/batch_slurm.sh)** - [SLURM batch script](GTSRB.Keras3/batch_slurm.sh)  
@@ -127,12 +125,6 @@ Still the same problem, but with a network combining embedding and RNN, using Ke
 - **[K3LADYB1](RNN.Keras3/01-Ladybug.ipynb)** - [Prediction of a 2D trajectory via RNN](RNN.Keras3/01-Ladybug.ipynb)  
 Artificial dataset generation and prediction attempt via a recurrent network, using Keras 3 and PyTorch
 
-### Sentiment analysis with transformer, using PyTorch
-- **[TRANS1](Transformers.PyTorch/01-Distilbert.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/01-Distilbert.ipynb)  
-Using a Tranformer to perform a sentiment analysis (IMDB) - Jean Zay version
-- **[TRANS2](Transformers.PyTorch/02-distilbert_colab.ipynb)** - [IMDB, Sentiment analysis with Transformers ](Transformers.PyTorch/02-distilbert_colab.ipynb)  
-Using a Tranformer to perform a sentiment analysis (IMDB) - Colab version
-
 ### Graph Neural Networks
 
 ### Unsupervised learning with an autoencoder neural network (AE), using Keras3
diff --git a/fidle/about.yml b/fidle/about.yml
index 366f0bd..ab119fe 100644
--- a/fidle/about.yml
+++ b/fidle/about.yml
@@ -13,7 +13,7 @@
 #
 # This file describes the notebooks used by the Fidle training.
 
-version:                  3.0.12
+version:                  3.0.14
 content:                  notebooks
 name:                     Notebooks Fidle
 description:              All notebooks used by the Fidle training
@@ -36,7 +36,6 @@ toc:
   GTSRB.Keras3:           Images classification GTSRB with Convolutional Neural Networks (CNN), using Keras3/PyTorch
   Embedding.Keras3:       Sentiment analysis with word embedding, using Keras3/PyTorch
   RNN.Keras3:             Time series with Recurrent Neural Network (RNN), using Keras3/PyTorch
-  Transformers.PyTorch:   Sentiment analysis with transformer, using PyTorch
   GNN.PyTorch:            Graph Neural Networks
   AE.Keras3:              Unsupervised learning with an autoencoder neural network (AE), using Keras3
   VAE.Keras3:             Generative network with Variational Autoencoder (VAE), using Keras3
diff --git a/fidle/ci/default.yml b/fidle/ci/default.yml
index b70e0a7..bc46050 100644
--- a/fidle/ci/default.yml
+++ b/fidle/ci/default.yml
@@ -1,6 +1,6 @@
 campain:
   version: '1.0'
-  description: Automatically generated ci profile (22/12/24 18:56:39)
+  description: Automatically generated ci profile (06/01/25 16:33:05)
   directory: ./campains/default
   existing_notebook: 'remove    # remove|skip'
   report_template: 'fidle     # fidle|default'
@@ -114,8 +114,6 @@ K3GTSRB3:
     epochs: default
     scale: default
     fit_verbosity: default
-K3GTSRB4:
-  notebook: GTSRB.Keras3/04-Keras-cv.ipynb
 
 #
 # ------------ Embedding.Keras3
@@ -177,14 +175,7 @@ K3LADYB1:
     predict_len: default
     batch_size: default
     epochs: default
-
-#
-# ------------ Transformers.PyTorch
-#
-TRANS1:
-  notebook: Transformers.PyTorch/01-Distilbert.ipynb
-TRANS2:
-  notebook: Transformers.PyTorch/02-distilbert_colab.ipynb
+    fit_verbosity: default
 
 #
 # ------------ GNN.PyTorch
-- 
GitLab