Skip to content
Snippets Groups Projects
Commit a38078d4 authored by Jean-Luc Parouty Jean-Luc.Parouty@simap.grenoble-inp.fr's avatar Jean-Luc Parouty Jean-Luc.Parouty@simap.grenoble-inp.fr
Browse files

Merge branch 'master' of gricad-gitlab.univ-grenoble-alpes.fr:talks/fidle

parents d2328bc8 ae150f13
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
German Traffic Sign Recognition Benchmark (GTSRB) German Traffic Sign Recognition Benchmark (GTSRB)
================================================= =================================================
--- ---
Introduction au Deep Learning (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020 Introduction au Deep Learning (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020
## Episode 5 : Full Convolutions ## Episode 5 : Full Convolutions
Our main steps: Our main steps:
- Try n models with n datasets - Try n models with n datasets
- Save a Pandas/h5 report - Save a Pandas/h5 report
- Write to be run in batch mode - Write to be run in batch mode
## 1/ Import ## 1/ Import
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import tensorflow as tf import tensorflow as tf
from tensorflow import keras from tensorflow import keras
import numpy as np import numpy as np
import h5py import h5py
import os,time,json import os,time,json
import random import random
from IPython.display import display from IPython.display import display
VERSION='1.6' VERSION='1.6'
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 2/ Init and start ## 2/ Init and start
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# ---- Where I am ? # ---- Where I am ?
now = time.strftime("%A %d %B %Y - %Hh%Mm%Ss") now = time.strftime("%A %d %B %Y - %Hh%Mm%Ss")
here = os.getcwd() here = os.getcwd()
random.seed(time.time()) random.seed(time.time())
tag_id = '{:06}'.format(random.randint(0,99999)) tag_id = '{:06}'.format(random.randint(0,99999))
# ---- Who I am ? # ---- Who I am ?
if 'OAR_JOB_ID' in os.environ: if 'OAR_JOB_ID' in os.environ:
oar_id=os.environ['OAR_JOB_ID'] oar_id=os.environ['OAR_JOB_ID']
else: else:
oar_id='???' oar_id='???'
print('\nFull Convolutions Notebook') print('\nFull Convolutions Notebook')
print(' Version : {}'.format(VERSION)) print(' Version : {}'.format(VERSION))
print(' Now is : {}'.format(now)) print(' Now is : {}'.format(now))
print(' OAR id : {}'.format(oar_id)) print(' OAR id : {}'.format(oar_id))
print(' Tag id : {}'.format(tag_id)) print(' Tag id : {}'.format(tag_id))
print(' Working directory : {}'.format(here)) print(' Working directory : {}'.format(here))
print(' TensorFlow version :',tf.__version__) print(' TensorFlow version :',tf.__version__)
print(' Keras version :',tf.keras.__version__) print(' Keras version :',tf.keras.__version__)
print(' for tensorboard : --logdir {}/run/logs_{}'.format(here,tag_id)) print(' for tensorboard : --logdir {}/run/logs_{}'.format(here,tag_id))
``` ```
%% Output
Full Convolutions Notebook
Version : 1.6
Now is : Tuesday 21 January 2020 - 00h11m24s
OAR id : ???
Tag id : 077605
Working directory : /home/pjluc/dev/fidle/GTSRB
TensorFlow version : 2.0.0
Keras version : 2.2.4-tf
for tensorboard : --logdir /home/pjluc/dev/fidle/GTSRB/run/logs_077605
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 3/ Dataset loading ## 3/ Dataset loading
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def read_dataset(name): def read_dataset(name):
'''Reads h5 dataset from ./data '''Reads h5 dataset from ./data
Arguments: dataset name, without .h5 Arguments: dataset name, without .h5
Returns: x_train,y_train,x_test,y_test data''' Returns: x_train,y_train,x_test,y_test data'''
# ---- Read dataset # ---- Read dataset
filename='./data/'+name+'.h5' filename='./data/'+name+'.h5'
with h5py.File(filename,'r') as f: with h5py.File(filename,'r') as f:
x_train = f['x_train'][:] x_train = f['x_train'][:]
y_train = f['y_train'][:] y_train = f['y_train'][:]
x_test = f['x_test'][:] x_test = f['x_test'][:]
y_test = f['y_test'][:] y_test = f['y_test'][:]
return x_train,y_train,x_test,y_test return x_train,y_train,x_test,y_test
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 4/ Models collection ## 4/ Models collection
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# A basic model # A basic model
# #
def get_model_v1(lx,ly,lz): def get_model_v1(lx,ly,lz):
model = keras.models.Sequential() model = keras.models.Sequential()
model.add( keras.layers.Conv2D(96, (3,3), activation='relu', input_shape=(lx,ly,lz))) model.add( keras.layers.Conv2D(96, (3,3), activation='relu', input_shape=(lx,ly,lz)))
model.add( keras.layers.MaxPooling2D((2, 2))) model.add( keras.layers.MaxPooling2D((2, 2)))
model.add( keras.layers.Dropout(0.2)) model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Conv2D(192, (3, 3), activation='relu')) model.add( keras.layers.Conv2D(192, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D((2, 2))) model.add( keras.layers.MaxPooling2D((2, 2)))
model.add( keras.layers.Dropout(0.2)) model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Flatten()) model.add( keras.layers.Flatten())
model.add( keras.layers.Dense(1500, activation='relu')) model.add( keras.layers.Dense(1500, activation='relu'))
model.add( keras.layers.Dropout(0.5)) model.add( keras.layers.Dropout(0.5))
model.add( keras.layers.Dense(43, activation='softmax')) model.add( keras.layers.Dense(43, activation='softmax'))
return model return model
# A more sophisticated model # A more sophisticated model
# #
def get_model_v2(lx,ly,lz): def get_model_v2(lx,ly,lz):
model = keras.models.Sequential() model = keras.models.Sequential()
model.add( keras.layers.Conv2D(64, (3, 3), padding='same', input_shape=(lx,ly,lz), activation='relu')) model.add( keras.layers.Conv2D(64, (3, 3), padding='same', input_shape=(lx,ly,lz), activation='relu'))
model.add( keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add( keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add( keras.layers.Dropout(0.2)) model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu')) model.add( keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add( keras.layers.Conv2D(128, (3, 3), activation='relu')) model.add( keras.layers.Conv2D(128, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add( keras.layers.Dropout(0.2)) model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Conv2D(256, (3, 3), padding='same',activation='relu')) model.add( keras.layers.Conv2D(256, (3, 3), padding='same',activation='relu'))
model.add( keras.layers.Conv2D(256, (3, 3), activation='relu')) model.add( keras.layers.Conv2D(256, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add( keras.layers.Dropout(0.2)) model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Flatten()) model.add( keras.layers.Flatten())
model.add( keras.layers.Dense(512, activation='relu')) model.add( keras.layers.Dense(512, activation='relu'))
model.add( keras.layers.Dropout(0.5)) model.add( keras.layers.Dropout(0.5))
model.add( keras.layers.Dense(43, activation='softmax')) model.add( keras.layers.Dense(43, activation='softmax'))
return model return model
def get_model_v3(lx,ly,lz): def get_model_v3(lx,ly,lz):
model = keras.models.Sequential() model = keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, (5, 5), padding='same', activation='relu', input_shape=(lx,ly,lz))) model.add(tf.keras.layers.Conv2D(32, (5, 5), padding='same', activation='relu', input_shape=(lx,ly,lz)))
model.add(tf.keras.layers.BatchNormalization(axis=-1)) model.add(tf.keras.layers.BatchNormalization(axis=-1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='relu'))
model.add(tf.keras.layers.BatchNormalization(axis=-1)) model.add(tf.keras.layers.BatchNormalization(axis=-1))
model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='relu'))
model.add(tf.keras.layers.BatchNormalization(axis=-1)) model.add(tf.keras.layers.BatchNormalization(axis=-1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation='relu')) model.add(tf.keras.layers.Dense(512, activation='relu'))
model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dropout(0.4)) model.add(tf.keras.layers.Dropout(0.4))
model.add(tf.keras.layers.Dense(43, activation='softmax')) model.add(tf.keras.layers.Dense(43, activation='softmax'))
return model return model
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 5/ Multiple datasets, multiple models ;-) ## 5/ Multiple datasets, multiple models ;-)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
def multi_run(datasets, models, datagen=None, def multi_run(datasets, models, datagen=None,
train_size=1, test_size=1, batch_size=64, epochs=16, train_size=1, test_size=1, batch_size=64, epochs=16,
verbose=0, extension_dir='last'): verbose=0, extension_dir='last'):
# ---- Logs and models dir # ---- Logs and models dir
# #
os.makedirs('./run/logs_{}'.format(extension_dir), mode=0o750, exist_ok=True) os.makedirs('./run/logs_{}'.format(extension_dir), mode=0o750, exist_ok=True)
os.makedirs('./run/models_{}'.format(extension_dir), mode=0o750, exist_ok=True) os.makedirs('./run/models_{}'.format(extension_dir), mode=0o750, exist_ok=True)
# ---- Columns of output # ---- Columns of output
# #
output={} output={}
output['Dataset']=[] output['Dataset']=[]
output['Size'] =[] output['Size'] =[]
for m in models: for m in models:
output[m+'_Accuracy'] = [] output[m+'_Accuracy'] = []
output[m+'_Duration'] = [] output[m+'_Duration'] = []
# ---- Let's go # ---- Let's go
# #
for d_name in datasets: for d_name in datasets:
print("\nDataset : ",d_name) print("\nDataset : ",d_name)
# ---- Read dataset # ---- Read dataset
x_train,y_train,x_test,y_test = read_dataset(d_name) x_train,y_train,x_test,y_test = read_dataset(d_name)
d_size=os.path.getsize('./data/'+d_name+'.h5')/(1024*1024) d_size=os.path.getsize('./data/'+d_name+'.h5')/(1024*1024)
output['Dataset'].append(d_name) output['Dataset'].append(d_name)
output['Size'].append(d_size) output['Size'].append(d_size)
# ---- Get the shape # ---- Get the shape
(n,lx,ly,lz) = x_train.shape (n,lx,ly,lz) = x_train.shape
n_train = int(x_train.shape[0]*train_size) n_train = int(x_train.shape[0]*train_size)
n_test = int(x_test.shape[0]*test_size) n_test = int(x_test.shape[0]*test_size)
# ---- For each model # ---- For each model
for m_name,m_function in models.items(): for m_name,m_function in models.items():
print(" Run model {} : ".format(m_name), end='') print(" Run model {} : ".format(m_name), end='')
# ---- get model # ---- get model
try: try:
model=m_function(lx,ly,lz) model=m_function(lx,ly,lz)
# ---- Compile it # ---- Compile it
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# ---- Callbacks tensorboard # ---- Callbacks tensorboard
log_dir = "./run/logs_{}/tb_{}_{}".format(extension_dir, d_name, m_name) log_dir = "./run/logs_{}/tb_{}_{}".format(extension_dir, d_name, m_name)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# ---- Callbacks bestmodel # ---- Callbacks bestmodel
save_dir = "./run/models_{}/model_{}_{}.h5".format(extension_dir, d_name, m_name) save_dir = "./run/models_{}/model_{}_{}.h5".format(extension_dir, d_name, m_name)
bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True) bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)
# ---- Train # ---- Train
start_time = time.time() start_time = time.time()
if datagen==None: if datagen==None:
# ---- No data augmentation (datagen=None) -------------------------------------- # ---- No data augmentation (datagen=None) --------------------------------------
history = model.fit(x_train[:n_train], y_train[:n_train], history = model.fit(x_train[:n_train], y_train[:n_train],
batch_size = batch_size, batch_size = batch_size,
epochs = epochs, epochs = epochs,
verbose = verbose, verbose = verbose,
validation_data = (x_test[:n_test], y_test[:n_test]), validation_data = (x_test[:n_test], y_test[:n_test]),
callbacks = [tensorboard_callback, bestmodel_callback]) callbacks = [tensorboard_callback, bestmodel_callback])
else: else:
# ---- Data augmentation (datagen given) ---------------------------------------- # ---- Data augmentation (datagen given) ----------------------------------------
datagen.fit(x_train) datagen.fit(x_train)
history = model.fit(datagen.flow(x_train, y_train, batch_size=batch_size), history = model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch = int(n_train/batch_size), steps_per_epoch = int(n_train/batch_size),
epochs = epochs, epochs = epochs,
verbose = verbose, verbose = verbose,
validation_data = (x_test[:n_test], y_test[:n_test]), validation_data = (x_test[:n_test], y_test[:n_test]),
callbacks = [tensorboard_callback, bestmodel_callback]) callbacks = [tensorboard_callback, bestmodel_callback])
# ---- Result # ---- Result
end_time = time.time() end_time = time.time()
duration = end_time-start_time duration = end_time-start_time
accuracy = max(history.history["val_accuracy"])*100 accuracy = max(history.history["val_accuracy"])*100
# #
output[m_name+'_Accuracy'].append(accuracy) output[m_name+'_Accuracy'].append(accuracy)
output[m_name+'_Duration'].append(duration) output[m_name+'_Duration'].append(duration)
print("Accuracy={:.2f} and Duration={:.2f})".format(accuracy,duration)) print("Accuracy={:.2f} and Duration={:.2f})".format(accuracy,duration))
except: except:
output[m_name+'_Accuracy'].append('0') output[m_name+'_Accuracy'].append('0')
output[m_name+'_Duration'].append('999') output[m_name+'_Duration'].append('999')
print('-') print('-')
return output return output
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 6/ Run ! ## 6/ Run !
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
%%time start_time = time.time()
print('\n---- Run','-'*50) print('\n---- Run','-'*50)
# --------- Datasets, models, and more.. ----------------------------------- # --------- Datasets, models, and more.. -----------------------------------
# #
# ---- For tests # ---- For tests
# datasets = ['set-24x24-L', 'set-24x24-RGB'] # datasets = ['set-24x24-L', 'set-24x24-RGB']
# models = {'v1':get_model_v1, 'v4':get_model_v2} # models = {'v1':get_model_v1, 'v4':get_model_v2}
# batch_size = 64 # batch_size = 64
# epochs = 2 # epochs = 2
# train_size = 0.1 # train_size = 0.1
# test_size = 0.1 # test_size = 0.1
# with_datagen = False # with_datagen = False
# verbose = 0 # verbose = 0
# #
# ---- All possibilities -> Run A # ---- All possibilities -> Run A
datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE'] # datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']
models = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3} # models = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}
batch_size = 64
epochs = 16
train_size = 1
test_size = 1
with_datagen = False
verbose = 0
#
# ---- Data augmentation -> Run B
# datasets = ['set-48x48-RGB']
# models = {'v2':get_model_v2}
# batch_size = 64 # batch_size = 64
# epochs = 20 # epochs = 16
# train_size = 1 # train_size = 1
# test_size = 1 # test_size = 1
# with_datagen = True # with_datagen = False
# verbose = 0 # verbose = 0
# #
# ---- Data augmentation -> Run B
datasets = ['set-48x48-RGB']
models = {'v2':get_model_v2}
batch_size = 64
epochs = 20
train_size = 1
test_size = 1
with_datagen = True
verbose = 0
#
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# ---- Data augmentation # ---- Data augmentation
# #
if with_datagen : if with_datagen :
datagen = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False, datagen = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
featurewise_std_normalization=False, featurewise_std_normalization=False,
width_shift_range=0.1, width_shift_range=0.1,
height_shift_range=0.1, height_shift_range=0.1,
zoom_range=0.2, zoom_range=0.2,
shear_range=0.1, shear_range=0.1,
rotation_range=10.) rotation_range=10.)
else: else:
datagen=None datagen=None
# ---- Run # ---- Run
# #
output = multi_run(datasets, models, output = multi_run(datasets, models,
datagen=datagen, datagen=datagen,
train_size=train_size, test_size=test_size, train_size=train_size, test_size=test_size,
batch_size=batch_size, epochs=epochs, batch_size=batch_size, epochs=epochs,
verbose=verbose, verbose=verbose,
extension_dir=tag_id) extension_dir=tag_id)
# ---- Save report # ---- Save report
# #
report={} report={}
report['output']=output report['output']=output
report['description']='train_size={} test_size={} batch_size={} epochs={} data_aug={}'.format(train_size,test_size,batch_size,epochs,with_datagen) report['description']='train_size={} test_size={} batch_size={} epochs={} data_aug={}'.format(train_size,test_size,batch_size,epochs,with_datagen)
report_name='./run/report_{}.json'.format(tag_id) report_name='./run/report_{}.json'.format(tag_id)
with open(report_name, 'w') as file: with open(report_name, 'w') as file:
json.dump(report, file) json.dump(report, file)
print('\nReport saved as ',report_name) print('\nReport saved as ',report_name)
end_time = time.time()
duration = end_time-start_time
print('Duration : {} s'.format(duration))
print('-'*59) print('-'*59)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 7/ That's all folks.. ## 7/ That's all folks..
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
print('\n{}'.format(time.strftime("%A %-d %B %Y, %H:%M:%S"))) print('\n{}'.format(time.strftime("%A %-d %B %Y, %H:%M:%S")))
print("The work is done.\n") print("The work is done.\n")
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
``` ```
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
German Traffic Sign Recognition Benchmark (GTSRB) German Traffic Sign Recognition Benchmark (GTSRB)
================================================= =================================================
--- ---
Introduction au Deep Learning (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020 Introduction au Deep Learning (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020
## Episode 5.1 : Full Convolutions / run ## Episode 5.1 : Full Convolutions / run
Our main steps: Our main steps:
- Run Full-convolution.ipynb as a batch : - Run Full-convolution.ipynb as a batch :
- Notebook mode - Notebook mode
- Script mode - Script mode
- Tensorboard follow up - Tensorboard follow up
## 1/ Run a notebook as a batch ## 1/ Run a notebook as a batch
To run a notebook : To run a notebook :
```jupyter nbconvert --to notebook --execute <notebook>``` ```jupyter nbconvert --to notebook --execute <notebook>```
%% Cell type:raw id: tags: %% Cell type:raw id: tags:
%%bash %%bash
# ---- This will execute and save a notebook # ---- This will execute and save a notebook
# #
jupyter nbconvert --ExecutePreprocessor.timeout=-1 --to notebook --output='./run/full_convolutions' --execute '05-Full-convolutions.ipynb' jupyter nbconvert --ExecutePreprocessor.timeout=-1 --to notebook --output='./run/full_convolutions' --execute '05-Full-convolutions.ipynb'
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 2/ Export as a script (better choice) ## 2/ Export as a script (better choice)
To export a notebook as a script : To export a notebook as a script :
```jupyter nbconvert --to script <notebook>``` ```jupyter nbconvert --to script <notebook>```
To run the script : To run the script :
```ipython <script>``` ```ipython <script>```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
%%bash %%bash
# ---- This will convert a notebook to a notebook.py script # ---- This will convert a notebook to a notebook.py script
# #
jupyter nbconvert --to script --output='./run/full_convolutions_A' '05-Full-convolutions.ipynb' jupyter nbconvert --to script --output='./run/full_convolutions_B' '05-Full-convolutions.ipynb'
``` ```
%% Output %% Output
[NbConvertApp] Converting notebook 05-Full-convolutions.ipynb to script [NbConvertApp] Converting notebook 05-Full-convolutions.ipynb to script
[NbConvertApp] Writing 11301 bytes to ./run/full_convolutions_A.py [NbConvertApp] Writing 11305 bytes to ./run/full_convolutions_B.py
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
!ls -l ./run/*.py !ls -l ./run/*.py
``` ```
%% Output %% Output
-rwxr-xr-x 1 paroutyj l-simap 11301 Jan 20 22:11 ./run/full_convolutions_A.py -rw-r--r-- 1 pjluc pjluc 11305 Jan 21 00:13 ./run/full_convolutions_B.py
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## 3/ Batch submission ## 3/ Batch submission
Create batch script : Create batch script :
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
%%writefile "./run/batch_full_convolutions_A.sh" %%writefile "./run/batch_full_convolutions_B.sh"
#!/bin/bash #!/bin/bash
#OAR -n Full convolutions #OAR -n Full convolutions
#OAR -t gpu #OAR -t gpu
#OAR -l /nodes=1/gpudevice=1,walltime=01:00:00 #OAR -l /nodes=1/gpudevice=1,walltime=01:00:00
#OAR --stdout _batch/full_convolutions_%jobid%.out #OAR --stdout _batch/full_convolutions_%jobid%.out
#OAR --stderr _batch/full_convolutions_%jobid%.err #OAR --stderr _batch/full_convolutions_%jobid%.err
#OAR --project deeplearningshs #OAR --project deeplearningshs
#---- For cpu #---- For cpu
# use : # use :
# OAR -l /nodes=1/core=32,walltime=01:00:00 # OAR -l /nodes=1/core=32,walltime=01:00:00
# and add a 2>/dev/null to ipython xxx # and add a 2>/dev/null to ipython xxx
# ---------------------------------- # ----------------------------------
# _ _ _ # _ _ _
# | |__ __ _| |_ ___| |__ # | |__ __ _| |_ ___| |__
# | '_ \ / _` | __/ __| '_ \ # | '_ \ / _` | __/ __| '_ \
# | |_) | (_| | || (__| | | | # | |_) | (_| | || (__| | | |
# |_.__/ \__,_|\__\___|_| |_| # |_.__/ \__,_|\__\___|_| |_|
# Full convolutions # Full convolutions
# ---------------------------------- # ----------------------------------
# #
CONDA_ENV=deeplearning2 CONDA_ENV=deeplearning2
RUN_DIR=~/fidle/GTSRB RUN_DIR=~/fidle/GTSRB
RUN_SCRIPT=./run/full_convolutions_A.py RUN_SCRIPT=./run/full_convolutions_B.py
# ---- Cuda Conda initialization # ---- Cuda Conda initialization
# #
echo '------------------------------------------------------------' echo '------------------------------------------------------------'
echo "Start : $0" echo "Start : $0"
echo '------------------------------------------------------------' echo '------------------------------------------------------------'
# #
source /applis/environments/cuda_env.sh dahu 10.0 source /applis/environments/cuda_env.sh dahu 10.0
source /applis/environments/conda.sh source /applis/environments/conda.sh
# #
conda activate "$CONDA_ENV" conda activate "$CONDA_ENV"
# ---- Run it... # ---- Run it...
# #
cd $RUN_DIR cd $RUN_DIR
ipython $RUN_SCRIPT ipython $RUN_SCRIPT
``` ```
%% Output %% Output
Overwriting ./run/batch_full_convolutions_A.sh Writing ./run/batch_full_convolutions_B.sh
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
%%bash %%bash
chmod 755 ./run/*.sh chmod 755 ./run/*.sh
chmod 755 ./run/*.py chmod 755 ./run/*.py
ls -l ./run/*full_convolutions* ls -l ./run/*full_convolutions*
``` ```
%% Output %% Output
-rwxr-xr-x 1 paroutyj l-simap 1045 Jan 20 22:12 ./run/batch_full_convolutions_A.sh -rwxr-xr-x 1 pjluc pjluc 1045 Jan 21 00:15 ./run/batch_full_convolutions_B.sh
-rwxr-xr-x 1 paroutyj l-simap 11301 Jan 20 22:11 ./run/full_convolutions_A.py -rwxr-xr-x 1 pjluc pjluc 611 Jan 19 15:53 ./run/batch_full_convolutions.sh
-rwxr-xr-x 1 pjluc pjluc 11305 Jan 21 00:13 ./run/full_convolutions_B.py
%% Cell type:raw id: tags: %% Cell type:raw id: tags:
%%bash %%bash
./run/batch_full_convolutions.sh ./run/batch_full_convolutions.sh
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment