Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Fidle
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Releases
Container Registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Talks
Fidle
Commits
26ceb97d
Commit
26ceb97d
authored
5 years ago
by
Jean-Luc Parouty
Browse files
Options
Downloads
Patches
Plain Diff
update GTSRB
parent
9f16240a
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
.gitignore
+1
-1
1 addition, 1 deletion
.gitignore
GTSRB/03-Tracking-and-visualizing.ipynb
+86
-61
86 additions, 61 deletions
GTSRB/03-Tracking-and-visualizing.ipynb
with
87 additions
and
62 deletions
.gitignore
+
1
−
1
View file @
26ceb97d
...
@@ -2,6 +2,6 @@
...
@@ -2,6 +2,6 @@
*/.ipynb_checkpoints/*
*/.ipynb_checkpoints/*
__pycache__
__pycache__
*/__pycache__/*
*/__pycache__/*
/
run/
**
run/
*/data/*
*/data/*
!/GTSRB/data/dataset.tar.gz
!/GTSRB/data/dataset.tar.gz
This diff is collapsed.
Click to expand it.
GTSRB/03-Tracking-and-visualizing.ipynb
+
86
−
61
View file @
26ceb97d
...
@@ -32,7 +32,7 @@
...
@@ -32,7 +32,7 @@
"text": [
"text": [
"IDLE 2020 - Practical Work Module\n",
"IDLE 2020 - Practical Work Module\n",
" Version : 0.1.1\n",
" Version : 0.1.1\n",
" Run time : Monday 6 January 2020, 2
0:52:54
\n",
" Run time : Monday 6 January 2020, 2
3:42:11
\n",
" Matplotlib style : idle/talk.mplstyle\n",
" Matplotlib style : idle/talk.mplstyle\n",
" TensorFlow version : 2.0.0\n",
" TensorFlow version : 2.0.0\n",
" Keras version : 2.2.4-tf\n"
" Keras version : 2.2.4-tf\n"
...
@@ -76,8 +76,8 @@
...
@@ -76,8 +76,8 @@
"text": [
"text": [
"Dataset loaded, size=247.6 Mo\n",
"Dataset loaded, size=247.6 Mo\n",
"\n",
"\n",
"CPU times: user 0 ns, sys:
297
ms, total:
297
ms\n",
"CPU times: user 0 ns, sys:
344
ms, total:
344
ms\n",
"Wall time:
330
ms\n"
"Wall time:
498
ms\n"
]
]
}
}
],
],
...
@@ -172,18 +172,18 @@
...
@@ -172,18 +172,18 @@
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
4
,
"execution_count":
23
,
"metadata": {},
"metadata": {},
"outputs": [],
"outputs": [],
"source": [
"source": [
"batch_size = 64\n",
"batch_size = 64\n",
"num_classes = 43\n",
"num_classes = 43\n",
"epochs =
2
0"
"epochs =
1
0"
]
]
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
5
,
"execution_count":
24
,
"metadata": {},
"metadata": {},
"outputs": [
"outputs": [
{
{
...
@@ -220,6 +220,8 @@
...
@@ -220,6 +220,8 @@
}
}
],
],
"source": [
"source": [
"tf.keras.backend.clear_session()\n",
"\n",
"model = keras.models.Sequential()\n",
"model = keras.models.Sequential()\n",
"model.add( keras.layers.Conv2D(96, (3,3), activation='relu', input_shape=(img_lx, img_ly, img_lz)))\n",
"model.add( keras.layers.Conv2D(96, (3,3), activation='relu', input_shape=(img_lx, img_ly, img_lz)))\n",
"model.add( keras.layers.MaxPooling2D((2, 2)))\n",
"model.add( keras.layers.MaxPooling2D((2, 2)))\n",
...
@@ -246,19 +248,23 @@
...
@@ -246,19 +248,23 @@
" - **TensorBoard** \n",
" - **TensorBoard** \n",
"Training logs, which can be visualised with Tensorboard. \n",
"Training logs, which can be visualised with Tensorboard. \n",
"`#tensorboard --logdir ./run/logs` \n",
"`#tensorboard --logdir ./run/logs` \n",
" - model backup"
"IMPORTANT : Relancer tensorboard à chaque run\n",
" - **model backup**"
]
]
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
7
,
"execution_count":
25
,
"metadata": {},
"metadata": {},
"outputs": [],
"outputs": [],
"source": [
"source": [
"# reload(ooo)\n",
"# ---- Callback tensorboard\n",
"# ---- Callback for tensorboard\n",
"log_dir = \"./run/logs/\" + ooo.tag_now()\n",
"log_dir=\"./run/logs/\" + ooo.tag_now()\n",
"tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n",
"tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)"
"\n",
"# ---- Callback ModelCheckpoint\n",
"save_dir = \"./run/models\"\n",
"checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)"
]
]
},
},
{
{
...
@@ -270,56 +276,46 @@
...
@@ -270,56 +276,46 @@
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
8
,
"execution_count":
26
,
"metadata": {},
"metadata": {},
"outputs": [
"outputs": [
{
{
"name": "stdout",
"name": "stdout",
"output_type": "stream",
"output_type": "stream",
"text": [
"text": [
"Train on 3000 samples, validate on 12630 samples\n",
"Train on 3000 samples, validate on 500 samples\n",
"Epoch 1/20\n",
"Epoch 1/10\n",
"3000/3000 [==============================] - 12s 4ms/sample - loss: 3.4839 - accuracy: 0.0740 - val_loss: 3.1409 - val_accuracy: 0.1995\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 3.4893 - accuracy: 0.0676INFO:tensorflow:Assets written to: ./run/models/assets\n",
"Epoch 2/20\n",
"3000/3000 [==============================] - 8s 3ms/sample - loss: 3.4847 - accuracy: 0.0690 - val_loss: 3.2739 - val_accuracy: 0.1640\n",
"3000/3000 [==============================] - 12s 4ms/sample - loss: 2.4676 - accuracy: 0.3067 - val_loss: 2.1162 - val_accuracy: 0.3968\n",
"Epoch 2/10\n",
"Epoch 3/20\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 2.4907 - accuracy: 0.3227INFO:tensorflow:Assets written to: ./run/models/assets\n",
"3000/3000 [==============================] - 13s 4ms/sample - loss: 1.4458 - accuracy: 0.5543 - val_loss: 1.4468 - val_accuracy: 0.5862\n",
"3000/3000 [==============================] - 7s 2ms/sample - loss: 2.4802 - accuracy: 0.3250 - val_loss: 2.0143 - val_accuracy: 0.3900\n",
"Epoch 4/20\n",
"Epoch 3/10\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.9291 - accuracy: 0.7067 - val_loss: 1.1903 - val_accuracy: 0.6622\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 1.3810 - accuracy: 0.5591INFO:tensorflow:Assets written to: ./run/models/assets\n",
"Epoch 5/20\n",
"3000/3000 [==============================] - 7s 2ms/sample - loss: 1.3794 - accuracy: 0.5593 - val_loss: 1.3322 - val_accuracy: 0.6200\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.6027 - accuracy: 0.8030 - val_loss: 0.8067 - val_accuracy: 0.7900\n",
"Epoch 4/10\n",
"Epoch 6/20\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 0.8501 - accuracy: 0.7286INFO:tensorflow:Assets written to: ./run/models/assets\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.3835 - accuracy: 0.8670 - val_loss: 0.8453 - val_accuracy: 0.7925\n",
"3000/3000 [==============================] - 7s 2ms/sample - loss: 0.8426 - accuracy: 0.7323 - val_loss: 1.1705 - val_accuracy: 0.6580\n",
"Epoch 7/20\n",
"Epoch 5/10\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.2666 - accuracy: 0.9157 - val_loss: 0.7578 - val_accuracy: 0.8256\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 0.5132 - accuracy: 0.8400INFO:tensorflow:Assets written to: ./run/models/assets\n",
"Epoch 8/20\n",
"3000/3000 [==============================] - 8s 3ms/sample - loss: 0.5087 - accuracy: 0.8413 - val_loss: 0.9281 - val_accuracy: 0.7360\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.2045 - accuracy: 0.9307 - val_loss: 0.8074 - val_accuracy: 0.8300\n",
"Epoch 6/10\n",
"Epoch 9/20\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 0.3740 - accuracy: 0.8787INFO:tensorflow:Assets written to: ./run/models/assets\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.1370 - accuracy: 0.9567 - val_loss: 0.7071 - val_accuracy: 0.8588\n",
"3000/3000 [==============================] - 8s 3ms/sample - loss: 0.3763 - accuracy: 0.8783 - val_loss: 0.9252 - val_accuracy: 0.7520\n",
"Epoch 10/20\n",
"Epoch 7/10\n",
"3000/3000 [==============================] - 12s 4ms/sample - loss: 0.0964 - accuracy: 0.9707 - val_loss: 0.7275 - val_accuracy: 0.8622\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 0.2671 - accuracy: 0.9141INFO:tensorflow:Assets written to: ./run/models/assets\n",
"Epoch 11/20\n",
"3000/3000 [==============================] - 8s 3ms/sample - loss: 0.2677 - accuracy: 0.9140 - val_loss: 0.8153 - val_accuracy: 0.8000\n",
"3000/3000 [==============================] - 13s 4ms/sample - loss: 0.0654 - accuracy: 0.9803 - val_loss: 0.7073 - val_accuracy: 0.8661\n",
"Epoch 8/10\n",
"Epoch 12/20\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 0.1599 - accuracy: 0.9535INFO:tensorflow:Assets written to: ./run/models/assets\n",
"3000/3000 [==============================] - 13s 4ms/sample - loss: 0.0806 - accuracy: 0.9757 - val_loss: 0.8265 - val_accuracy: 0.8637\n",
"3000/3000 [==============================] - 8s 3ms/sample - loss: 0.1588 - accuracy: 0.9543 - val_loss: 0.7000 - val_accuracy: 0.8500\n",
"Epoch 13/20\n",
"Epoch 9/10\n",
"3000/3000 [==============================] - 13s 4ms/sample - loss: 0.0854 - accuracy: 0.9743 - val_loss: 0.7547 - val_accuracy: 0.8635\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 0.1282 - accuracy: 0.9626INFO:tensorflow:Assets written to: ./run/models/assets\n",
"Epoch 14/20\n",
"3000/3000 [==============================] - 8s 3ms/sample - loss: 0.1271 - accuracy: 0.9633 - val_loss: 0.7833 - val_accuracy: 0.8080\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.0463 - accuracy: 0.9860 - val_loss: 0.7248 - val_accuracy: 0.8846\n",
"Epoch 10/10\n",
"Epoch 15/20\n",
"2944/3000 [============================>.] - ETA: 0s - loss: 0.0979 - accuracy: 0.9715INFO:tensorflow:Assets written to: ./run/models/assets\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.0425 - accuracy: 0.9917 - val_loss: 0.7266 - val_accuracy: 0.8785\n",
"3000/3000 [==============================] - 8s 3ms/sample - loss: 0.0971 - accuracy: 0.9720 - val_loss: 0.7634 - val_accuracy: 0.8420\n",
"Epoch 16/20\n",
"CPU times: user 4min 26s, sys: 1min 31s, total: 5min 58s\n",
"3000/3000 [==============================] - 12s 4ms/sample - loss: 0.0322 - accuracy: 0.9923 - val_loss: 0.7501 - val_accuracy: 0.8777\n",
"Wall time: 1min 15s\n"
"Epoch 17/20\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.0375 - accuracy: 0.9870 - val_loss: 0.7782 - val_accuracy: 0.8811\n",
"Epoch 18/20\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.0557 - accuracy: 0.9857 - val_loss: 0.6846 - val_accuracy: 0.8760\n",
"Epoch 19/20\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.0100 - accuracy: 0.9987 - val_loss: 0.7676 - val_accuracy: 0.8873\n",
"Epoch 20/20\n",
"3000/3000 [==============================] - 14s 5ms/sample - loss: 0.0025 - accuracy: 1.0000 - val_loss: 0.7481 - val_accuracy: 0.8993\n",
"CPU times: user 17min 9s, sys: 5min 55s, total: 23min 4s\n",
"Wall time: 4min 29s\n"
]
]
}
}
],
],
...
@@ -330,8 +326,8 @@
...
@@ -330,8 +326,8 @@
" batch_size=batch_size,\n",
" batch_size=batch_size,\n",
" epochs=epochs,\n",
" epochs=epochs,\n",
" verbose=1,\n",
" verbose=1,\n",
" validation_data=(x_test, y_test),\n",
" validation_data=(x_test
[:500]
, y_test
[:500]
),\n",
" callbacks=[tensorboard_callback] )"
" callbacks=[tensorboard_callback
, checkpoint_callback
] )"
]
]
},
},
{
{
...
@@ -382,10 +378,39 @@
...
@@ -382,10 +378,39 @@
},
},
{
{
"cell_type": "code",
"cell_type": "code",
"execution_count":
null
,
"execution_count":
29
,
"metadata": {},
"metadata": {},
"outputs": [],
"outputs": [
"source": []
{
"name": "stdout",
"output_type": "stream",
"text": [
"total 176\n",
"drwxr-xr-x 1 pjluc pjluc 512 Jan 7 00:16 assets\n",
"-rw-r--r-- 1 pjluc pjluc 168427 Jan 7 00:17 saved_model.pb\n",
"drwxr-xr-x 1 pjluc pjluc 512 Jan 7 00:17 variables\n",
"\u001b[01;34m./run/models\u001b[00m\n",
"├── \u001b[01;34massets\u001b[00m\n",
"├── saved_model.pb\n",
"└── \u001b[01;34mvariables\u001b[00m\n",
" ├── variables.data-00000-of-00001\n",
" └── variables.index\n",
"\n",
"2 directories, 3 files\n"
]
}
],
"source": [
"!ls -l {save_dir}\n",
"!tree {save_dir}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"A suivre : https://www.tensorflow.org/tutorials/keras/save_and_load"
]
}
}
],
],
"metadata": {
"metadata": {
...
...
%% Cell type:markdown id: tags:
%% Cell type:markdown id: tags:
German Traffic Sign Recognition Benchmark (GTSRB)
German Traffic Sign Recognition Benchmark (GTSRB)
=================================================
=================================================
---
---
Introduction au Deep Learning (IDLE) - S. Aria, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG -
2020
Introduction au Deep Learning (IDLE) - S. Aria, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG -
2020
## Episode 3 : Tracking and visualizing
## Episode 3 : Tracking and visualizing
Our main steps
:
Our main steps
:
-
Monitoring and understanding our model training
-
Monitoring and understanding our model training
-
Analyze the results
-
Analyze the results
-
Improving our model
-
Improving our model
-
Add recovery points
-
Add recovery points
## 1/ Import and init
## 1/ Import and init
%% Cell type:code id: tags
:
%% Cell type:code id: tags
:
```
python
```
python
import tensorflow as tf
import tensorflow as tf
from tensorflow import keras
from tensorflow import keras
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
import numpy as np
import matplotlib
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import time
import time
from importlib import reload
from importlib import reload
import idle.pwk as ooo
import idle.pwk as ooo
ooo.init()
ooo.init()
```
```
%%
Output
%%
Output
IDLE 2020 - Practical Work Module
IDLE 2020 - Practical Work Module
Version
:
0.1.1
Version
:
0.1.1
Run time
:
Monday 6 January 2020, 2
0:52:54
Run time
:
Monday 6 January 2020, 2
3:42:11
Matplotlib style
:
idle/talk.mplstyle
Matplotlib style
:
idle/talk.mplstyle
TensorFlow version
:
2.0.0
TensorFlow version
:
2.0.0
Keras version
:
2.2.4-tf
Keras version
:
2.2.4-tf
%% Cell type:markdown id: tags
:
%% Cell type:markdown id: tags
:
## 2/ Reload dataset
## 2/ Reload dataset
Dataset is one of the saved dataset
:
RGB25, RGB35, L25, L35, etc.
Dataset is one of the saved dataset
:
RGB25, RGB35, L25, L35, etc.
First of all, we're going to use the dataset
:
*
*L25
**
First of all, we're going to use the dataset
:
*
*L25
**
(with a GPU, it only takes 35'' compared to more than 5' with a CPU !)
(with a GPU, it only takes 35'' compared to more than 5' with a CPU !)
%% Cell type:code id: tags
:
%% Cell type:code id: tags
:
```
python
```
python
%%
time
%%
time
dataset ='L25'
dataset ='L25'
img_lx =
25
img_lx =
25
img_ly =
25
img_ly =
25
img_lz =
1
img_lz =
1
# ---- Read dataset
# ---- Read dataset
x_train = np.load('./data/{}/x_train.npy'.format(dataset))
x_train = np.load('./data/{}/x_train.npy'.format(dataset))
y_train = np.load('./data/{}/y_train.npy'.format(dataset))
y_train = np.load('./data/{}/y_train.npy'.format(dataset))
x_test = np.load('./data/{}/x_test.npy'.format(dataset))
x_test = np.load('./data/{}/x_test.npy'.format(dataset))
y_test = np.load('./data/{}/y_test.npy'.format(dataset))
y_test = np.load('./data/{}/y_test.npy'.format(dataset))
# ---- Reshape data
# ---- Reshape data
x_train = x_train.reshape( x_train.shape[0], img_lx, img_ly, img_lz)
x_train = x_train.reshape( x_train.shape[0], img_lx, img_ly, img_lz)
x_test = x_test.reshape( x_test.shape[0], img_lx, img_ly, img_lz)
x_test = x_test.reshape( x_test.shape[0], img_lx, img_ly, img_lz)
input_shape = (img_lx, img_ly, img_lz)
input_shape = (img_lx, img_ly, img_lz)
print("Dataset loaded, size={:.1f} Mo\n".format(ooo.get_directory_size('./data/'+dataset)))
print("Dataset loaded, size={:.1f} Mo\n".format(ooo.get_directory_size('./data/'+dataset)))
```
```
%%
Output
%%
Output
Dataset loaded, size=247.6 Mo
Dataset loaded, size=247.6 Mo
CPU times
:
user 0 ns, sys
:
297
ms, total
:
297
ms
CPU times
:
user 0 ns, sys
:
344
ms, total
:
344
ms
Wall time
:
330
ms
Wall time
:
498
ms
%% Cell type:markdown id: tags
:
%% Cell type:markdown id: tags
:
## 3/ Have a look to the dataset
## 3/ Have a look to the dataset
Note
:
Data must be reshape for matplotlib
Note
:
Data must be reshape for matplotlib
%% Cell type:code id: tags
:
%% Cell type:code id: tags
:
```
python
```
python
print("x_train
:
"
,
x_train.shape)
print("x_train
:
"
,
x_train.shape)
print("
y_train
:
"
,
y_train.shape)
print("
y_train
:
"
,
y_train.shape)
print("
x_test
:
"
,
x_test.shape)
print("
x_test
:
"
,
x_test.shape)
print("
y_test
:
"
,
y_test.shape)
print("
y_test
:
"
,
y_test.shape)
if
img_lz>1:
if
img_lz>1:
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly,img_lz),
y_train,
range(6),
columns=3,
x_size=4,
y_size=3)
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly,img_lz),
y_train,
range(6),
columns=3,
x_size=4,
y_size=3)
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly,img_lz),
y_train,
range(36),
columns=12,
x_size=1,
y_size=1)
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly,img_lz),
y_train,
range(36),
columns=12,
x_size=1,
y_size=1)
else:
else:
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly),
y_train,
range(6),
columns=6,
x_size=2,
y_size=2)
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly),
y_train,
range(6),
columns=6,
x_size=2,
y_size=2)
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly),
y_train,
range(36),
columns=12,
x_size=1,
y_size=1)
ooo.plot_images(x_train.reshape(-1,img_lx,img_ly),
y_train,
range(36),
columns=12,
x_size=1,
y_size=1)
```
```
%%
Output
%%
Output
x_train
:
(39209,
25,
25,
1)
x_train
:
(39209,
25,
25,
1)
y_train
:
(39209,)
y_train
:
(39209,)
x_test
:
(12630,
25,
25,
1)
x_test
:
(12630,
25,
25,
1)
y_test
:
(12630,)
y_test
:
(12630,)
%%
Cell
type:markdown
id:
tags:
%%
Cell
type:markdown
id:
tags:
##
4/
Create
model
##
4/
Create
model
%%
Cell
type:code
id:
tags:
%%
Cell
type:code
id:
tags:
```
python
```
python
batch_size
=
64
batch_size
=
64
num_classes
=
43
num_classes
=
43
epochs
=
2
0
epochs
=
1
0
```
```
%%
Cell
type:code
id:
tags:
%%
Cell
type:code
id:
tags:
```
python
```
python
tf.keras.backend.clear_session()
model
=
keras.models.Sequential()
model
=
keras.models.Sequential()
model.add(
keras.layers.Conv2D(96,
(3,3),
activation='relu',
input_shape=(img_lx,
img_ly,
img_lz)))
model.add(
keras.layers.Conv2D(96,
(3,3),
activation='relu',
input_shape=(img_lx,
img_ly,
img_lz)))
model.add(
keras.layers.MaxPooling2D((2,
2)))
model.add(
keras.layers.MaxPooling2D((2,
2)))
model.add(
keras.layers.Conv2D(192,
(3,
3),
activation='relu'))
model.add(
keras.layers.Conv2D(192,
(3,
3),
activation='relu'))
model.add(
keras.layers.MaxPooling2D((2,
2)))
model.add(
keras.layers.MaxPooling2D((2,
2)))
model.add(
keras.layers.Flatten())
model.add(
keras.layers.Flatten())
model.add(
keras.layers.Dense(3072,
activation='relu'))
model.add(
keras.layers.Dense(3072,
activation='relu'))
model.add(
keras.layers.Dense(500,
activation='relu'))
model.add(
keras.layers.Dense(500,
activation='relu'))
model.add(
keras.layers.Dense(500,
activation='relu'))
model.add(
keras.layers.Dense(500,
activation='relu'))
model.add(
keras.layers.Dense(43,
activation='softmax'))
model.add(
keras.layers.Dense(43,
activation='softmax'))
model.summary()
model.summary()
model.compile(optimizer='adam',
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
metrics=['accuracy'])
```
```
%%
Output
%%
Output
Model:
"sequential"
Model:
"sequential"
_________________________________________________________________
_________________________________________________________________
Layer (type) Output Shape Param
#
Layer (type) Output Shape Param
#
=================================================================
=================================================================
conv2d (Conv2D) (None, 23, 23, 96)
960
conv2d (Conv2D) (None, 23, 23, 96)
960
_________________________________________________________________
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 11, 11, 96)
0
max_pooling2d (MaxPooling2D) (None, 11, 11, 96)
0
_________________________________________________________________
_________________________________________________________________
conv2d_1 (Conv2D) (None, 9, 9, 192)
166080
conv2d_1 (Conv2D) (None, 9, 9, 192)
166080
_________________________________________________________________
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 4, 4, 192)
0
max_pooling2d_1 (MaxPooling2 (None, 4, 4, 192)
0
_________________________________________________________________
_________________________________________________________________
flatten (Flatten) (None, 3072)
0
flatten (Flatten) (None, 3072)
0
_________________________________________________________________
_________________________________________________________________
dense (Dense) (None, 3072)
9440256
dense (Dense) (None, 3072)
9440256
_________________________________________________________________
_________________________________________________________________
dense_1 (Dense) (None, 500)
1536500
dense_1 (Dense) (None, 500)
1536500
_________________________________________________________________
_________________________________________________________________
dense_2 (Dense) (None, 500)
250500
dense_2 (Dense) (None, 500)
250500
_________________________________________________________________
_________________________________________________________________
dense_3 (Dense) (None, 43)
21543
dense_3 (Dense) (None, 43)
21543
=================================================================
=================================================================
Total params
:
11,415,839
Total params
:
11,415,839
Trainable params
:
11,415,839
Trainable params
:
11,415,839
Non-trainable params
:
0
Non-trainable params
:
0
_________________________________________________________________
_________________________________________________________________
%% Cell type:markdown id: tags
:
%% Cell type:markdown id: tags
:
## 5/ Add callbacks
## 5/ Add callbacks
Nous allons ajouter 2 callbacks
:
Nous allons ajouter 2 callbacks
:
-
*
*TensorBoard
**
-
*
*TensorBoard
**
Training logs, which can be visualised with Tensorboard.
Training logs, which can be visualised with Tensorboard.
`
#tensorboard --logdir ./run/logs`
`
#tensorboard --logdir ./run/logs`
-
model backup
IMPORTANT
:
Relancer tensorboard à chaque run
- **model backup**
%% Cell type:code id: tags
:
%% Cell type:code id: tags
:
```
python
```
python
# reload(ooo)
# ---- Callback tensorboard
# ---- Callback for tensorboard
log_dir = "./run/logs/" + ooo.tag_now()
log_dir="./run/logs/" + ooo.tag_now()
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# ---- Callback ModelCheckpoint
save_dir = "./run/models"
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)
```
```
%% Cell type:markdown id: tags
:
%% Cell type:markdown id: tags
:
## 5/ Run model
## 5/ Run model
%% Cell type:code id: tags
:
%% Cell type:code id: tags
:
```
python
```
python
%%
time
%%
time
history = model.fit( x_train[:3000], y_train[:3000],
history = model.fit( x_train[:3000], y_train[:3000],
batch_size=batch_size,
batch_size=batch_size,
epochs=epochs,
epochs=epochs,
verbose=1,
verbose=1,
validation_data=(x_test, y_test),
validation_data=(x_test
[:500]
, y_test
[:500]
),
callbacks=[tensorboard_callback] )
callbacks=[tensorboard_callback
, checkpoint_callback
] )
```
```
%%
Output
%%
Output
Train on 3000 samples, validate on 12630 samples
Train on 3000 samples, validate on 500 samples
Epoch 1/20
Epoch 1/10
3000/3000 [==============================] - 12s 4ms/sample - loss
: 3.4839 - accuracy: 0.0740 - val_loss: 3.1409 - val_accuracy
:
0.1995
2944/3000 [============================>.] - ETA
: 0s - loss: 3.4893 - accuracy: 0.0676INFO:tensorflow:Assets written to
:
./run/models/assets
Epoch 2/20
3000/3000 [==============================] - 8s 3ms/sample - loss
: 3.4847 - accuracy: 0.0690 - val_loss: 3.2739 - val_accuracy
:
0.1640
3000/3000 [==============================] - 12s 4ms/sample - loss
: 2.4676 - accuracy: 0.3067 - val_loss: 2.1162 - val_accuracy
:
0.3968
Epoch 2/10
Epoch 3/20
2944/3000 [============================>.] - ETA
: 0s - loss: 2.4907 - accuracy: 0.3227INFO:tensorflow:Assets written to
:
./run/models/assets
3000/3000 [==============================] - 13s 4ms/sample - loss
: 1.4458 - accuracy: 0.5543 - val_loss: 1.4468 - val_accuracy
:
0.5862
3000/3000 [==============================] - 7s 2ms/sample - loss
: 2.4802 - accuracy: 0.3250 - val_loss: 2.0143 - val_accuracy
:
0.3900
Epoch 4/20
Epoch 3/10
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.9291 - accuracy: 0.7067 - val_loss: 1.1903 - val_accuracy
:
0.6622
2944/3000 [============================>.] - ETA
: 0s - loss: 1.3810 - accuracy: 0.5591INFO:tensorflow:Assets written to
:
./run/models/assets
Epoch 5/20
3000/3000 [==============================] - 7s 2ms/sample - loss
: 1.3794 - accuracy: 0.5593 - val_loss: 1.3322 - val_accuracy
:
0.6200
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.6027 - accuracy: 0.8030 - val_loss: 0.8067 - val_accuracy
:
0.7900
Epoch 4/10
Epoch 6/20
2944/3000 [============================>.] - ETA
: 0s - loss: 0.8501 - accuracy: 0.7286INFO:tensorflow:Assets written to
:
./run/models/assets
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.3835 - accuracy: 0.8670 - val_loss: 0.8453 - val_accuracy
:
0.7925
3000/3000 [==============================] - 7s 2ms/sample - loss
: 0.8426 - accuracy: 0.7323 - val_loss: 1.1705 - val_accuracy
:
0.6580
Epoch 7/20
Epoch 5/10
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.2666 - accuracy: 0.9157 - val_loss: 0.7578 - val_accuracy
:
0.8256
2944/3000 [============================>.] - ETA
: 0s - loss: 0.5132 - accuracy: 0.8400INFO:tensorflow:Assets written to
:
./run/models/assets
Epoch 8/20
3000/3000 [==============================] - 8s 3ms/sample - loss
: 0.5087 - accuracy: 0.8413 - val_loss: 0.9281 - val_accuracy
:
0.7360
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.2045 - accuracy: 0.9307 - val_loss: 0.8074 - val_accuracy
:
0.8300
Epoch 6/10
Epoch 9/20
2944/3000 [============================>.] - ETA
: 0s - loss: 0.3740 - accuracy: 0.8787INFO:tensorflow:Assets written to
:
./run/models/assets
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.1370 - accuracy: 0.9567 - val_loss: 0.7071 - val_accuracy
:
0.8588
3000/3000 [==============================] - 8s 3ms/sample - loss
: 0.3763 - accuracy: 0.8783 - val_loss: 0.9252 - val_accuracy
:
0.7520
Epoch 10/20
Epoch 7/10
3000/3000 [==============================] - 12s 4ms/sample - loss
: 0.0964 - accuracy: 0.9707 - val_loss: 0.7275 - val_accuracy
:
0.8622
2944/3000 [============================>.] - ETA
: 0s - loss: 0.2671 - accuracy: 0.9141INFO:tensorflow:Assets written to
:
./run/models/assets
Epoch 11/20
3000/3000 [==============================] - 8s 3ms/sample - loss
: 0.2677 - accuracy: 0.9140 - val_loss: 0.8153 - val_accuracy
:
0.8000
3000/3000 [==============================] - 13s 4ms/sample - loss
: 0.0654 - accuracy: 0.9803 - val_loss: 0.7073 - val_accuracy
:
0.8661
Epoch 8/10
Epoch 12/20
2944/3000 [============================>.] - ETA
: 0s - loss: 0.1599 - accuracy: 0.9535INFO:tensorflow:Assets written to
:
./run/models/assets
3000/3000 [==============================] - 13s 4ms/sample - loss
: 0.0806 - accuracy: 0.9757 - val_loss: 0.8265 - val_accuracy
:
0.8637
3000/3000 [==============================] - 8s 3ms/sample - loss
: 0.1588 - accuracy: 0.9543 - val_loss: 0.7000 - val_accuracy
:
0.8500
Epoch 13/20
Epoch 9/10
3000/3000 [==============================] - 13s 4ms/sample - loss
: 0.0854 - accuracy: 0.9743 - val_loss: 0.7547 - val_accuracy
:
0.8635
2944/3000 [============================>.] - ETA
: 0s - loss: 0.1282 - accuracy: 0.9626INFO:tensorflow:Assets written to
:
./run/models/assets
Epoch 14/20
3000/3000 [==============================] - 8s 3ms/sample - loss
: 0.1271 - accuracy: 0.9633 - val_loss: 0.7833 - val_accuracy
:
0.8080
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.0463 - accuracy: 0.9860 - val_loss: 0.7248 - val_accuracy
:
0.8846
Epoch 10/10
Epoch 15/20
2944/3000 [============================>.] - ETA
: 0s - loss: 0.0979 - accuracy: 0.9715INFO:tensorflow:Assets written to
:
./run/models/assets
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.0425 - accuracy: 0.9917 - val_loss: 0.7266 - val_accuracy
:
0.8785
3000/3000 [==============================] - 8s 3ms/sample - loss
: 0.0971 - accuracy: 0.9720 - val_loss: 0.7634 - val_accuracy
:
0.8420
Epoch 16/20
CPU times
:
user 4min 26s, sys
:
1min 31s, total
:
5min 58s
3000/3000 [==============================] - 12s 4ms/sample - loss
: 0.0322 - accuracy: 0.9923 - val_loss: 0.7501 - val_accuracy
:
0.8777
Wall time
:
1min 15s
Epoch 17/20
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.0375 - accuracy: 0.9870 - val_loss: 0.7782 - val_accuracy
:
0.8811
Epoch 18/20
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.0557 - accuracy: 0.9857 - val_loss: 0.6846 - val_accuracy
:
0.8760
Epoch 19/20
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.0100 - accuracy: 0.9987 - val_loss: 0.7676 - val_accuracy
:
0.8873
Epoch 20/20
3000/3000 [==============================] - 14s 5ms/sample - loss
: 0.0025 - accuracy: 1.0000 - val_loss: 0.7481 - val_accuracy
:
0.8993
CPU times
:
user 17min 9s, sys
:
5min 55s, total
:
23min 4s
Wall time
:
4min 29s
%% Cell type:markdown id: tags
:
%% Cell type:markdown id: tags
:
## 6/ Evaluation
## 6/ Evaluation
%% Cell type:code id: tags
:
%% Cell type:code id: tags
:
```
python
```
python
score = model.evaluate(x_test, y_test, verbose=0)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss
:
{:
5.4f
}
'
.format(score[0]))
print('Test loss
:
{:
5.4f
}
'
.format(score[0]))
print('
Test accuracy
:
{:
5.4f
}
'
.format(score[1]))
print('
Test accuracy
:
{:
5.4f
}
'
.format(score[1]))
```
```
%%
Cell
type:markdown
id:
tags:
%%
Cell
type:markdown
id:
tags:
##
7/
History
##
7/
History
The
return
of
model.fit()
returns
us
the
learning
history
The
return
of
model.fit()
returns
us
the
learning
history
%%
Cell
type:code
id:
tags:
%%
Cell
type:code
id:
tags:
```
python
```
python
ooo.plot_history(history)
ooo.plot_history(history)
```
```
%%
Cell
type:markdown
id:
tags:
%%
Cell
type:markdown
id:
tags:
---
---
### Results :
### Results :
L25 : size=250 Mo, 93.15%
L25 : size=250 Mo, 93.15%
...
...
%% Cell type:code id: tags:
%% Cell type:code id: tags:
```
python
```
python
!
ls
-
l
{
save_dir
}
!
tree
{
save_dir
}
```
```
%% Output
total 176
drwxr-xr-x 1 pjluc pjluc 512 Jan 7 00:16 assets
-rw-r--r-- 1 pjluc pjluc 168427 Jan 7 00:17 saved_model.pb
drwxr-xr-x 1 pjluc pjluc 512 Jan 7 00:17 variables
[01;34m./run/models[00m
├── [01;34massets[00m
├── saved_model.pb
└── [01;34mvariables[00m
├── variables.data-00000-of-00001
└── variables.index
2 directories, 3 files
%% Cell type:markdown id: tags:
A suivre : https://www.tensorflow.org/tutorials/keras/save_and_load
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment