Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Fidle
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package Registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Slim Karkar
Fidle
Commits
3e43760f
Commit
3e43760f
authored
5 years ago
by
Jean-Luc Parouty Jean-Luc.Parouty@simap.grenoble-inp.fr
Browse files
Options
Downloads
Plain Diff
Merge branch 'master' of gricad-gitlab.univ-grenoble-alpes.fr:talks/fidle
Former-commit-id:
a38078d4
parents
7815c869
300cffcc
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
GTSRB/05-Full-convolutions.ipynb
+41
-21
41 additions, 21 deletions
GTSRB/05-Full-convolutions.ipynb
GTSRB/05.1-Full-convolutions-batch.ipynb
+9
-8
9 additions, 8 deletions
GTSRB/05.1-Full-convolutions-batch.ipynb
with
50 additions
and
29 deletions
GTSRB/05-Full-convolutions.ipynb
+
41
−
21
View file @
3e43760f
...
...
@@ -21,7 +21,7 @@
},
{
"cell_type": "code",
"execution_count":
null
,
"execution_count":
2
,
"metadata": {},
"outputs": [],
"source": [
...
...
@@ -47,9 +47,26 @@
},
{
"cell_type": "code",
"execution_count":
null
,
"execution_count":
4
,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Full Convolutions Notebook\n",
" Version : 1.6\n",
" Now is : Tuesday 21 January 2020 - 00h11m24s\n",
" OAR id : ???\n",
" Tag id : 077605\n",
" Working directory : /home/pjluc/dev/fidle/GTSRB\n",
" TensorFlow version : 2.0.0\n",
" Keras version : 2.2.4-tf\n",
" for tensorboard : --logdir /home/pjluc/dev/fidle/GTSRB/run/logs_077605\n"
]
}
],
"source": [
"# ---- Where I am ?\n",
"now = time.strftime(\"%A %d %B %Y - %Hh%Mm%Ss\")\n",
...
...
@@ -83,7 +100,7 @@
},
{
"cell_type": "code",
"execution_count":
null
,
"execution_count":
5
,
"metadata": {},
"outputs": [],
"source": [
...
...
@@ -112,7 +129,7 @@
},
{
"cell_type": "code",
"execution_count":
null
,
"execution_count":
6
,
"metadata": {},
"outputs": [],
"source": [
...
...
@@ -196,7 +213,7 @@
},
{
"cell_type": "code",
"execution_count":
null
,
"execution_count":
7
,
"metadata": {},
"outputs": [],
"source": [
...
...
@@ -296,7 +313,7 @@
"metadata": {},
"outputs": [],
"source": [
"
%%time
\n",
"
start_time = time.time()
\n",
"\n",
"print('\\n---- Run','-'*50)\n",
"\n",
...
...
@@ -313,25 +330,25 @@
"# verbose = 0\n",
"#\n",
"# ---- All possibilities -> Run A\n",
"datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']\n",
"models = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}\n",
"batch_size = 64\n",
"epochs = 16\n",
"train_size = 1\n",
"test_size = 1\n",
"with_datagen = False\n",
"verbose = 0\n",
"#\n",
"# ---- Data augmentation -> Run B\n",
"# datasets = ['set-48x48-RGB']\n",
"# models = {'v2':get_model_v2}\n",
"# datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']\n",
"# models = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}\n",
"# batch_size = 64\n",
"# epochs =
20
\n",
"# epochs =
16
\n",
"# train_size = 1\n",
"# test_size = 1\n",
"# with_datagen =
Tru
e\n",
"# with_datagen =
Fals
e\n",
"# verbose = 0\n",
"#\n",
"# ---- Data augmentation -> Run B\n",
"datasets = ['set-48x48-RGB']\n",
"models = {'v2':get_model_v2}\n",
"batch_size = 64\n",
"epochs = 20\n",
"train_size = 1\n",
"test_size = 1\n",
"with_datagen = True\n",
"verbose = 0\n",
"#\n",
"# ---------------------------------------------------------------------------\n",
"\n",
"# ---- Data augmentation\n",
...
...
@@ -368,6 +385,9 @@
" json.dump(report, file)\n",
"\n",
"print('\\nReport saved as ',report_name)\n",
"end_time = time.time()\n",
"duration = end_time-start_time\n",
"print('Duration : {} s'.format(duration))\n",
"print('-'*59)\n"
]
},
...
...
%% Cell type:markdown id: tags:
German Traffic Sign Recognition Benchmark (GTSRB)
=================================================
---
Introduction au Deep Learning (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020
## Episode 5 : Full Convolutions
Our main steps:
-
Try n models with n datasets
-
Save a Pandas/h5 report
-
Write to be run in batch mode
## 1/ Import
%% Cell type:code id: tags:
```
python
import
tensorflow
as
tf
from
tensorflow
import
keras
import
numpy
as
np
import
h5py
import
os
,
time
,
json
import
random
from
IPython.display
import
display
VERSION
=
'
1.6
'
```
%% Cell type:markdown id: tags:
## 2/ Init and start
%% Cell type:code id: tags:
```
python
# ---- Where I am ?
now
=
time
.
strftime
(
"
%A %d %B %Y - %Hh%Mm%Ss
"
)
here
=
os
.
getcwd
()
random
.
seed
(
time
.
time
())
tag_id
=
'
{:06}
'
.
format
(
random
.
randint
(
0
,
99999
))
# ---- Who I am ?
if
'
OAR_JOB_ID
'
in
os
.
environ
:
oar_id
=
os
.
environ
[
'
OAR_JOB_ID
'
]
else
:
oar_id
=
'
???
'
print
(
'
\n
Full Convolutions Notebook
'
)
print
(
'
Version : {}
'
.
format
(
VERSION
))
print
(
'
Now is : {}
'
.
format
(
now
))
print
(
'
OAR id : {}
'
.
format
(
oar_id
))
print
(
'
Tag id : {}
'
.
format
(
tag_id
))
print
(
'
Working directory : {}
'
.
format
(
here
))
print
(
'
TensorFlow version :
'
,
tf
.
__version__
)
print
(
'
Keras version :
'
,
tf
.
keras
.
__version__
)
print
(
'
for tensorboard : --logdir {}/run/logs_{}
'
.
format
(
here
,
tag_id
))
```
%% Output
Full Convolutions Notebook
Version : 1.6
Now is : Tuesday 21 January 2020 - 00h11m24s
OAR id : ???
Tag id : 077605
Working directory : /home/pjluc/dev/fidle/GTSRB
TensorFlow version : 2.0.0
Keras version : 2.2.4-tf
for tensorboard : --logdir /home/pjluc/dev/fidle/GTSRB/run/logs_077605
%% Cell type:markdown id: tags:
## 3/ Dataset loading
%% Cell type:code id: tags:
```
python
def
read_dataset
(
name
):
'''
Reads h5 dataset from ./data
Arguments: dataset name, without .h5
Returns: x_train,y_train,x_test,y_test data
'''
# ---- Read dataset
filename
=
'
./data/
'
+
name
+
'
.h5
'
with
h5py
.
File
(
filename
,
'
r
'
)
as
f
:
x_train
=
f
[
'
x_train
'
][:]
y_train
=
f
[
'
y_train
'
][:]
x_test
=
f
[
'
x_test
'
][:]
y_test
=
f
[
'
y_test
'
][:]
return
x_train
,
y_train
,
x_test
,
y_test
```
%% Cell type:markdown id: tags:
## 4/ Models collection
%% Cell type:code id: tags:
```
python
# A basic model
#
def
get_model_v1
(
lx
,
ly
,
lz
):
model
=
keras
.
models
.
Sequential
()
model
.
add
(
keras
.
layers
.
Conv2D
(
96
,
(
3
,
3
),
activation
=
'
relu
'
,
input_shape
=
(
lx
,
ly
,
lz
)))
model
.
add
(
keras
.
layers
.
MaxPooling2D
((
2
,
2
)))
model
.
add
(
keras
.
layers
.
Dropout
(
0.2
))
model
.
add
(
keras
.
layers
.
Conv2D
(
192
,
(
3
,
3
),
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
MaxPooling2D
((
2
,
2
)))
model
.
add
(
keras
.
layers
.
Dropout
(
0.2
))
model
.
add
(
keras
.
layers
.
Flatten
())
model
.
add
(
keras
.
layers
.
Dense
(
1500
,
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
Dropout
(
0.5
))
model
.
add
(
keras
.
layers
.
Dense
(
43
,
activation
=
'
softmax
'
))
return
model
# A more sophisticated model
#
def
get_model_v2
(
lx
,
ly
,
lz
):
model
=
keras
.
models
.
Sequential
()
model
.
add
(
keras
.
layers
.
Conv2D
(
64
,
(
3
,
3
),
padding
=
'
same
'
,
input_shape
=
(
lx
,
ly
,
lz
),
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
Conv2D
(
64
,
(
3
,
3
),
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
keras
.
layers
.
Dropout
(
0.2
))
model
.
add
(
keras
.
layers
.
Conv2D
(
128
,
(
3
,
3
),
padding
=
'
same
'
,
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
Conv2D
(
128
,
(
3
,
3
),
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
keras
.
layers
.
Dropout
(
0.2
))
model
.
add
(
keras
.
layers
.
Conv2D
(
256
,
(
3
,
3
),
padding
=
'
same
'
,
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
Conv2D
(
256
,
(
3
,
3
),
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
keras
.
layers
.
Dropout
(
0.2
))
model
.
add
(
keras
.
layers
.
Flatten
())
model
.
add
(
keras
.
layers
.
Dense
(
512
,
activation
=
'
relu
'
))
model
.
add
(
keras
.
layers
.
Dropout
(
0.5
))
model
.
add
(
keras
.
layers
.
Dense
(
43
,
activation
=
'
softmax
'
))
return
model
def
get_model_v3
(
lx
,
ly
,
lz
):
model
=
keras
.
models
.
Sequential
()
model
.
add
(
tf
.
keras
.
layers
.
Conv2D
(
32
,
(
5
,
5
),
padding
=
'
same
'
,
activation
=
'
relu
'
,
input_shape
=
(
lx
,
ly
,
lz
)))
model
.
add
(
tf
.
keras
.
layers
.
BatchNormalization
(
axis
=-
1
))
model
.
add
(
tf
.
keras
.
layers
.
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
tf
.
keras
.
layers
.
Dropout
(
0.2
))
model
.
add
(
tf
.
keras
.
layers
.
Conv2D
(
64
,
(
5
,
5
),
padding
=
'
same
'
,
activation
=
'
relu
'
))
model
.
add
(
tf
.
keras
.
layers
.
BatchNormalization
(
axis
=-
1
))
model
.
add
(
tf
.
keras
.
layers
.
Conv2D
(
128
,
(
5
,
5
),
padding
=
'
same
'
,
activation
=
'
relu
'
))
model
.
add
(
tf
.
keras
.
layers
.
BatchNormalization
(
axis
=-
1
))
model
.
add
(
tf
.
keras
.
layers
.
MaxPooling2D
(
pool_size
=
(
2
,
2
)))
model
.
add
(
tf
.
keras
.
layers
.
Dropout
(
0.2
))
model
.
add
(
tf
.
keras
.
layers
.
Flatten
())
model
.
add
(
tf
.
keras
.
layers
.
Dense
(
512
,
activation
=
'
relu
'
))
model
.
add
(
tf
.
keras
.
layers
.
BatchNormalization
())
model
.
add
(
tf
.
keras
.
layers
.
Dropout
(
0.4
))
model
.
add
(
tf
.
keras
.
layers
.
Dense
(
43
,
activation
=
'
softmax
'
))
return
model
```
%% Cell type:markdown id: tags:
## 5/ Multiple datasets, multiple models ;-)
%% Cell type:code id: tags:
```
python
def
multi_run
(
datasets
,
models
,
datagen
=
None
,
train_size
=
1
,
test_size
=
1
,
batch_size
=
64
,
epochs
=
16
,
verbose
=
0
,
extension_dir
=
'
last
'
):
# ---- Logs and models dir
#
os
.
makedirs
(
'
./run/logs_{}
'
.
format
(
extension_dir
),
mode
=
0o750
,
exist_ok
=
True
)
os
.
makedirs
(
'
./run/models_{}
'
.
format
(
extension_dir
),
mode
=
0o750
,
exist_ok
=
True
)
# ---- Columns of output
#
output
=
{}
output
[
'
Dataset
'
]
=
[]
output
[
'
Size
'
]
=
[]
for
m
in
models
:
output
[
m
+
'
_Accuracy
'
]
=
[]
output
[
m
+
'
_Duration
'
]
=
[]
# ---- Let's go
#
for
d_name
in
datasets
:
print
(
"
\n
Dataset :
"
,
d_name
)
# ---- Read dataset
x_train
,
y_train
,
x_test
,
y_test
=
read_dataset
(
d_name
)
d_size
=
os
.
path
.
getsize
(
'
./data/
'
+
d_name
+
'
.h5
'
)
/
(
1024
*
1024
)
output
[
'
Dataset
'
].
append
(
d_name
)
output
[
'
Size
'
].
append
(
d_size
)
# ---- Get the shape
(
n
,
lx
,
ly
,
lz
)
=
x_train
.
shape
n_train
=
int
(
x_train
.
shape
[
0
]
*
train_size
)
n_test
=
int
(
x_test
.
shape
[
0
]
*
test_size
)
# ---- For each model
for
m_name
,
m_function
in
models
.
items
():
print
(
"
Run model {} :
"
.
format
(
m_name
),
end
=
''
)
# ---- get model
try
:
model
=
m_function
(
lx
,
ly
,
lz
)
# ---- Compile it
model
.
compile
(
optimizer
=
'
adam
'
,
loss
=
'
sparse_categorical_crossentropy
'
,
metrics
=
[
'
accuracy
'
])
# ---- Callbacks tensorboard
log_dir
=
"
./run/logs_{}/tb_{}_{}
"
.
format
(
extension_dir
,
d_name
,
m_name
)
tensorboard_callback
=
tf
.
keras
.
callbacks
.
TensorBoard
(
log_dir
=
log_dir
,
histogram_freq
=
1
)
# ---- Callbacks bestmodel
save_dir
=
"
./run/models_{}/model_{}_{}.h5
"
.
format
(
extension_dir
,
d_name
,
m_name
)
bestmodel_callback
=
tf
.
keras
.
callbacks
.
ModelCheckpoint
(
filepath
=
save_dir
,
verbose
=
0
,
monitor
=
'
accuracy
'
,
save_best_only
=
True
)
# ---- Train
start_time
=
time
.
time
()
if
datagen
==
None
:
# ---- No data augmentation (datagen=None) --------------------------------------
history
=
model
.
fit
(
x_train
[:
n_train
],
y_train
[:
n_train
],
batch_size
=
batch_size
,
epochs
=
epochs
,
verbose
=
verbose
,
validation_data
=
(
x_test
[:
n_test
],
y_test
[:
n_test
]),
callbacks
=
[
tensorboard_callback
,
bestmodel_callback
])
else
:
# ---- Data augmentation (datagen given) ----------------------------------------
datagen
.
fit
(
x_train
)
history
=
model
.
fit
(
datagen
.
flow
(
x_train
,
y_train
,
batch_size
=
batch_size
),
steps_per_epoch
=
int
(
n_train
/
batch_size
),
epochs
=
epochs
,
verbose
=
verbose
,
validation_data
=
(
x_test
[:
n_test
],
y_test
[:
n_test
]),
callbacks
=
[
tensorboard_callback
,
bestmodel_callback
])
# ---- Result
end_time
=
time
.
time
()
duration
=
end_time
-
start_time
accuracy
=
max
(
history
.
history
[
"
val_accuracy
"
])
*
100
#
output
[
m_name
+
'
_Accuracy
'
].
append
(
accuracy
)
output
[
m_name
+
'
_Duration
'
].
append
(
duration
)
print
(
"
Accuracy={:.2f} and Duration={:.2f})
"
.
format
(
accuracy
,
duration
))
except
:
output
[
m_name
+
'
_Accuracy
'
].
append
(
'
0
'
)
output
[
m_name
+
'
_Duration
'
].
append
(
'
999
'
)
print
(
'
-
'
)
return
output
```
%% Cell type:markdown id: tags:
## 6/ Run !
%% Cell type:code id: tags:
```
python
%%
time
start_time
=
time
.
time
()
print
(
'
\n
---- Run
'
,
'
-
'
*
50
)
# --------- Datasets, models, and more.. -----------------------------------
#
# ---- For tests
# datasets = ['set-24x24-L', 'set-24x24-RGB']
# models = {'v1':get_model_v1, 'v4':get_model_v2}
# batch_size = 64
# epochs = 2
# train_size = 0.1
# test_size = 0.1
# with_datagen = False
# verbose = 0
#
# ---- All possibilities -> Run A
datasets
=
[
'
set-24x24-L
'
,
'
set-24x24-RGB
'
,
'
set-48x48-L
'
,
'
set-48x48-RGB
'
,
'
set-24x24-L-LHE
'
,
'
set-24x24-RGB-HE
'
,
'
set-48x48-L-LHE
'
,
'
set-48x48-RGB-HE
'
]
models
=
{
'
v1
'
:
get_model_v1
,
'
v2
'
:
get_model_v2
,
'
v3
'
:
get_model_v3
}
batch_size
=
64
epochs
=
16
train_size
=
1
test_size
=
1
with_datagen
=
False
verbose
=
0
#
# ---- Data augmentation -> Run B
# datasets = ['set-48x48-RGB']
# models = {'v2':get_model_v2}
# datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']
# models = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}
# batch_size = 64
# epochs =
20
# epochs =
16
# train_size = 1
# test_size = 1
# with_datagen =
Tru
e
# with_datagen =
Fals
e
# verbose = 0
#
# ---- Data augmentation -> Run B
datasets
=
[
'
set-48x48-RGB
'
]
models
=
{
'
v2
'
:
get_model_v2
}
batch_size
=
64
epochs
=
20
train_size
=
1
test_size
=
1
with_datagen
=
True
verbose
=
0
#
# ---------------------------------------------------------------------------
# ---- Data augmentation
#
if
with_datagen
:
datagen
=
keras
.
preprocessing
.
image
.
ImageDataGenerator
(
featurewise_center
=
False
,
featurewise_std_normalization
=
False
,
width_shift_range
=
0.1
,
height_shift_range
=
0.1
,
zoom_range
=
0.2
,
shear_range
=
0.1
,
rotation_range
=
10.
)
else
:
datagen
=
None
# ---- Run
#
output
=
multi_run
(
datasets
,
models
,
datagen
=
datagen
,
train_size
=
train_size
,
test_size
=
test_size
,
batch_size
=
batch_size
,
epochs
=
epochs
,
verbose
=
verbose
,
extension_dir
=
tag_id
)
# ---- Save report
#
report
=
{}
report
[
'
output
'
]
=
output
report
[
'
description
'
]
=
'
train_size={} test_size={} batch_size={} epochs={} data_aug={}
'
.
format
(
train_size
,
test_size
,
batch_size
,
epochs
,
with_datagen
)
report_name
=
'
./run/report_{}.json
'
.
format
(
tag_id
)
with
open
(
report_name
,
'
w
'
)
as
file
:
json
.
dump
(
report
,
file
)
print
(
'
\n
Report saved as
'
,
report_name
)
end_time
=
time
.
time
()
duration
=
end_time
-
start_time
print
(
'
Duration : {} s
'
.
format
(
duration
))
print
(
'
-
'
*
59
)
```
%% Cell type:markdown id: tags:
## 7/ That's all folks..
%% Cell type:code id: tags:
```
python
print
(
'
\n
{}
'
.
format
(
time
.
strftime
(
"
%A %-d %B %Y, %H:%M:%S
"
)))
print
(
"
The work is done.
\n
"
)
```
%% Cell type:code id: tags:
```
python
```
...
...
This diff is collapsed.
Click to expand it.
GTSRB/05.1-Full-convolutions-batch.ipynb
+
9
−
8
View file @
3e43760f
...
...
@@ -54,7 +54,7 @@
"output_type": "stream",
"text": [
"[NbConvertApp] Converting notebook 05-Full-convolutions.ipynb to script\n",
"[NbConvertApp] Writing 1130
1
bytes to ./run/full_convolutions_
A
.py\n"
"[NbConvertApp] Writing 1130
5
bytes to ./run/full_convolutions_
B
.py\n"
]
}
],
...
...
@@ -63,7 +63,7 @@
"\n",
"# ---- This will convert a notebook to a notebook.py script\n",
"#\n",
"jupyter nbconvert --to script --output='./run/full_convolutions_
A
' '05-Full-convolutions.ipynb'"
"jupyter nbconvert --to script --output='./run/full_convolutions_
B
' '05-Full-convolutions.ipynb'"
]
},
{
...
...
@@ -75,7 +75,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"-rw
x
r-
x
r-
x
1 p
aroutyj l-simap
1130
1
Jan 2
0 22
:1
1
./run/full_convolutions_
A
.py\n"
"-rw
-
r-
-
r-
-
1 p
jluc pjluc
1130
5
Jan 2
1 00
:1
3
./run/full_convolutions_
B
.py\n"
]
}
],
...
...
@@ -100,12 +100,12 @@
"name": "stdout",
"output_type": "stream",
"text": [
"
Overw
riting ./run/batch_full_convolutions_
A
.sh\n"
"
W
riting ./run/batch_full_convolutions_
B
.sh\n"
]
}
],
"source": [
"%%writefile \"./run/batch_full_convolutions_
A
.sh\"\n",
"%%writefile \"./run/batch_full_convolutions_
B
.sh\"\n",
"#!/bin/bash\n",
"#OAR -n Full convolutions\n",
"#OAR -t gpu\n",
...
...
@@ -131,7 +131,7 @@
"\n",
"CONDA_ENV=deeplearning2\n",
"RUN_DIR=~/fidle/GTSRB\n",
"RUN_SCRIPT=./run/full_convolutions_
A
.py\n",
"RUN_SCRIPT=./run/full_convolutions_
B
.py\n",
"\n",
"# ---- Cuda Conda initialization\n",
"#\n",
...
...
@@ -159,8 +159,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"-rwxr-xr-x 1 paroutyj l-simap 1045 Jan 20 22:12 ./run/batch_full_convolutions_A.sh\n",
"-rwxr-xr-x 1 paroutyj l-simap 11301 Jan 20 22:11 ./run/full_convolutions_A.py\n"
"-rwxr-xr-x 1 pjluc pjluc 1045 Jan 21 00:15 ./run/batch_full_convolutions_B.sh\n",
"-rwxr-xr-x 1 pjluc pjluc 611 Jan 19 15:53 ./run/batch_full_convolutions.sh\n",
"-rwxr-xr-x 1 pjluc pjluc 11305 Jan 21 00:13 ./run/full_convolutions_B.py\n"
]
}
],
...
...
%% Cell type:markdown id: tags:
German Traffic Sign Recognition Benchmark (GTSRB)
=================================================
---
Introduction au Deep Learning (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020
## Episode 5.1 : Full Convolutions / run
Our main steps:
-
Run Full-convolution.ipynb as a batch :
-
Notebook mode
-
Script mode
-
Tensorboard follow up
## 1/ Run a notebook as a batch
To run a notebook :
```
jupyter nbconvert --to notebook --execute <notebook>```
%% Cell type:raw id: tags:
%%bash
# ---- This will execute and save a notebook
#
jupyter nbconvert --ExecutePreprocessor.timeout=-1 --to notebook --output='./run/full_convolutions' --execute '05-Full-convolutions.ipynb'
%% Cell type:markdown id: tags:
## 2/ Export as a script (better choice)
To export a notebook as a script :
```
jupyter nbconvert --to script
<notebook>
```
To run the script :
```
ipython
<script>
```
%% Cell type:code id: tags:
```
python
%%bash
# ---- This will convert a notebook to a notebook.py script
#
jupyter nbconvert --to script --output='./run/full_convolutions_
A
' '05-Full-convolutions.ipynb'
jupyter nbconvert --to script --output='./run/full_convolutions_
B
' '05-Full-convolutions.ipynb'
```
%% Output
[NbConvertApp] Converting notebook 05-Full-convolutions.ipynb to script
[NbConvertApp] Writing 1130
1
bytes to ./run/full_convolutions_
A
.py
[NbConvertApp] Writing 1130
5
bytes to ./run/full_convolutions_
B
.py
%% Cell type:code id: tags:
```
python
!ls -l ./run/
*
.py
```
%% Output
-rw
x
r-
x
r-
x
1 p
aroutyj l-simap
1130
1
Jan 2
0 22
:1
1
./run/full_convolutions_
A
.py
-rw
-
r-
-
r-
-
1 p
jluc pjluc
1130
5
Jan 2
1 00
:1
3
./run/full_convolutions_
B
.py
%% Cell type:markdown id: tags:
## 3/ Batch submission
Create batch script :
%% Cell type:code id: tags:
```
python
%%writefile "./run/batch_full_convolutions_
A
.sh"
%%writefile "./run/batch_full_convolutions_
B
.sh"
#!/bin/bash
#OAR -n Full convolutions
#OAR -t gpu
#OAR -l /nodes=1/gpudevice=1,walltime=01:00:00
#OAR --stdout _batch/full_convolutions_%jobid%.out
#OAR --stderr _batch/full_convolutions_%jobid%.err
#OAR --project deeplearningshs
#---- For cpu
# use :
# OAR -l /nodes=1/core=32,walltime=01:00:00
# and add a 2>/dev/null to ipython xxx
# ----------------------------------
# _ _ _
# | |__ __ _| |_ ___| |__
# | '_ \ / _` | __/ __| '_ \
# | |_) | (_| | || (__| | | |
# |_.__/ \__,_|\__\___|_| |_|
# Full convolutions
# ----------------------------------
#
CONDA_ENV=deeplearning2
RUN_DIR=~/fidle/GTSRB
RUN_SCRIPT=./run/full_convolutions_
A
.py
RUN_SCRIPT=./run/full_convolutions_
B
.py
# ---- Cuda Conda initialization
#
echo '------------------------------------------------------------'
echo "Start : $0"
echo '------------------------------------------------------------'
#
source /applis/environments/cuda_env.sh dahu 10.0
source /applis/environments/conda.sh
#
conda activate "$CONDA_ENV"
# ---- Run it...
#
cd $RUN_DIR
ipython $RUN_SCRIPT
```
%% Output
Overw
riting ./run/batch_full_convolutions_
A
.sh
W
riting ./run/batch_full_convolutions_
B
.sh
%% Cell type:code id: tags:
```
python
%%bash
chmod 755 ./run/
*
.sh
chmod 755 ./run/
*
.py
ls -l ./run/
*full_convolutions*
```
%% Output
-rwxr-xr-x 1 paroutyj l-simap 1045 Jan 20 22:12 ./run/batch_full_convolutions_A.sh
-rwxr-xr-x 1 paroutyj l-simap 11301 Jan 20 22:11 ./run/full_convolutions_A.py
-rwxr-xr-x 1 pjluc pjluc 1045 Jan 21 00:15 ./run/batch_full_convolutions_B.sh
-rwxr-xr-x 1 pjluc pjluc 611 Jan 19 15:53 ./run/batch_full_convolutions.sh
-rwxr-xr-x 1 pjluc pjluc 11305 Jan 21 00:13 ./run/full_convolutions_B.py
%% Cell type:raw id: tags:
%%bash
./run/batch_full_convolutions.sh
%% Cell type:code id: tags:
```
python
```
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment