Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • daconcea/fidle
  • bossardl/fidle
  • Julie.Remenant/fidle
  • abijolao/fidle
  • monsimau/fidle
  • karkars/fidle
  • guilgautier/fidle
  • cailletr/fidle
  • talks/fidle
9 results
Show changes
Showing
with 2964 additions and 660 deletions
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/ DataGenerator
# |_| |_|\__,_|_|\___| for clustered CelebA sataset
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
# ------------------------------------------------------------------
# Initial version by JL Parouty, feb 2020
import numpy as np
import pandas as pd
import math
import os,glob
from tensorflow.keras.utils import Sequence
class DataGenerator(Sequence):
version = '0.4.1'
def __init__(self, clusters_dir='./data', batch_size=32, debug=False, k_size=1):
'''
Instanciation of the data generator
args:
cluster_dir : Directory of the clusters files
batch_size : Batch size (32)
debug : debug mode (False)
'''
if debug : self.about()
#
# ---- Get the list of clusters
#
clusters_name = [ os.path.splitext(f)[0] for f in glob.glob( f'{clusters_dir}/*.npy') ]
clusters_size = len(clusters_name)
#
# ---- Read each cluster description
# because we need the full dataset size
#
dataset_size = 0
for c in clusters_name:
df = pd.read_csv(c+'.csv', header=0)
dataset_size+=len(df.index)
#
# ---- If we only want to use a part of the dataset...
#
dataset_size = int(dataset_size * k_size)
#
if debug:
print(f'\nClusters nb : {len(clusters_name)} files')
print(f'Dataset size : {dataset_size}')
print(f'Batch size : {batch_size}')
#
# ---- Remember all of that
#
self.clusters_dir = clusters_dir
self.batch_size = batch_size
self.clusters_name = clusters_name
self.clusters_size = clusters_size
self.dataset_size = dataset_size
self.debug = debug
#
# ---- Read a first cluster
#
self.cluster_i = clusters_size
self.read_next_cluster()
def __len__(self):
return math.floor(self.dataset_size / self.batch_size)
def __getitem__(self, idx):
#
# ---- Get the next item index
#
i=self.data_i
#
# ---- Get a batch
#
batch = self.data[i:i+self.batch_size]
#
# ---- Cluster is large enough
#
if len(batch) == self.batch_size:
self.data_i += self.batch_size
if self.debug: print(f'({len(batch)}) ',end='')
return batch,batch
#
# ---- Not enough...
#
if self.debug: print(f'({len(batch)}..) ',end='')
#
self.read_next_cluster()
batch2 = self.data[ 0:self.batch_size-len(batch) ]
self.data_i = self.batch_size-len(batch)
batch = np.concatenate( (batch,batch2) )
#
if self.debug: print(f'(..{len(batch2)}) ',end='')
return batch, batch
def on_epoch_end(self):
self.cluster_i = self.clusters_size
self.read_next_cluster()
def read_next_cluster(self):
#
# ---- Get the next cluster name
# If we have reached the end of the list, we mix and
# start again from the beginning.
#
i = self.cluster_i + 1
if i >= self.clusters_size:
np.random.shuffle(self.clusters_name)
i = 0
if self.debug : print(f'\n[shuffle!]')
#
# ---- Read it (images still normalized)
#
data = np.load( self.clusters_name[i]+'.npy', mmap_mode='r' )
#
# ---- Remember all of that
#
self.data = data
self.data_i = 0
self.cluster_i = i
#
if self.debug: print(f'\n[Load {self.cluster_i:02d},s={len(self.data):3d}] ',end='')
@classmethod
def about(cls):
print('\nFIDLE 2020 - DataGenerator')
print('Version :', cls.version)
\ No newline at end of file
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/
# |_| |_|\__,_|_|\___|
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
# ------------------------------------------------------------------
# Initial version by JL Parouty, feb 2020
import numpy as np
import tensorflow as tf
import tensorflow.keras.datasets.mnist as mnist
class Loader_MNIST():
version = '0.1'
def __init__(self):
pass
@classmethod
def about(cls):
print('\nFIDLE 2020 - Very basic MNIST dataset loader)')
print('TensorFlow version :',tf.__version__)
print('Loader version :', cls.version)
@classmethod
def load(normalize=True, expand=True, verbose=1):
# ---- Get data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if verbose>0: print('Dataset loaded.')
# ---- Normalization
if normalize:
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype( 'float32') / 255.
if verbose>0: print('Normalized.')
# ---- Reshape : (28,28) -> (28,28,1)
if expand:
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
if verbose>0: print(f'Reshaped to {x_train.shape}')
return (x_train,y_train),(x_test,y_test)
\ No newline at end of file
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/
# |_| |_|\__,_|_|\___|
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
# ------------------------------------------------------------------
# by JL Parouty (feb 2020), based on David Foster examples.
import numpy as np
import math
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Reshape, Lambda
from tensorflow.keras.layers import Activation, BatchNormalization, LeakyReLU, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
from modules.callbacks import ImagesCallback
from modules.data_generator import DataGenerator
import os, json, time, datetime
class VariationalAutoencoder():
version = '1.28'
def __init__(self, input_shape=None, encoder_layers=None, decoder_layers=None, z_dim=None, run_tag='000', verbose=0):
self.name = 'Variational AutoEncoder'
self.input_shape = list(input_shape)
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.z_dim = z_dim
self.run_tag = str(run_tag)
self.verbose = verbose
self.run_directory = f'./run/{run_tag}'
# ---- Create run directories
for d in ('','/models','/figs','/logs','/images'):
os.makedirs(self.run_directory+d, mode=0o750, exist_ok=True)
# ==== Encoder ================================================================
# ---- Input layer
encoder_input = Input(shape=self.input_shape, name='encoder_input')
x = encoder_input
# ---- Add next layers
i=1
for l_config in encoder_layers:
l_type = l_config['type']
l_params = l_config.copy()
l_params.pop('type')
if l_type=='Conv2D':
layer = Conv2D(**l_params)
if l_type=='Dropout':
layer = Dropout(**l_params)
x = layer(x)
i+=1
# ---- Flatten
shape_before_flattening = K.int_shape(x)[1:]
x = Flatten()(x)
# ---- mu <-> log_var
self.mu = Dense(self.z_dim, name='mu')(x)
self.log_var = Dense(self.z_dim, name='log_var')(x)
self.encoder_mu_log_var = Model(encoder_input, (self.mu, self.log_var))
# ---- output layer
def sampling(args):
mu, log_var = args
epsilon = K.random_normal(shape=K.shape(mu), mean=0., stddev=1.)
return mu + K.exp(log_var / 2) * epsilon
encoder_output = Lambda(sampling, name='encoder_output')([self.mu, self.log_var])
self.encoder = Model(encoder_input, encoder_output)
# ==== Decoder ================================================================
# ---- Input layer
decoder_input = Input(shape=(self.z_dim,), name='decoder_input')
# ---- First dense layer
x = Dense(np.prod(shape_before_flattening))(decoder_input)
x = Reshape(shape_before_flattening)(x)
# ---- Add next layers
i=1
for l_config in decoder_layers:
l_type = l_config['type']
l_params = l_config.copy()
l_params.pop('type')
if l_type=='Conv2DTranspose':
layer = Conv2DTranspose(**l_params)
if l_type=='Dropout':
layer = Dropout(**l_params)
x = layer(x)
i+=1
decoder_output = x
self.decoder = Model(decoder_input, decoder_output)
# ==== Encoder-Decoder ========================================================
model_input = encoder_input
model_output = self.decoder(encoder_output)
self.model = Model(model_input, model_output)
# ==== Verbosity ==============================================================
print('Model initialized.')
print(f'Outputs will be in : {self.run_directory}')
if verbose>0 :
print('\n','-'*10,'Encoder','-'*50,'\n')
self.encoder.summary()
print('\n','-'*10,'Encoder','-'*50,'\n')
self.decoder.summary()
self.plot_model()
def compile(self, optimizer='adam', r_loss_factor='1000'):
self.r_loss_factor = r_loss_factor
def vae_r_loss(y_true, y_pred):
r_loss = K.mean(K.square(y_true - y_pred), axis = [1,2,3])
return r_loss_factor * r_loss
def vae_kl_loss(y_true, y_pred):
kl_loss = -0.5 * K.sum(1 + self.log_var - K.square(self.mu) - K.exp(self.log_var), axis = 1)
return kl_loss
def vae_loss(y_true, y_pred):
r_loss = vae_r_loss(y_true, y_pred)
kl_loss = vae_kl_loss(y_true, y_pred)
return r_loss + kl_loss
self.model.compile(optimizer=optimizer,
loss = vae_loss,
metrics = [vae_r_loss, vae_kl_loss],
experimental_run_tf_function=False)
print('Compiled.')
def train(self,
x_train=None,
x_test=None,
data_generator=None,
batch_size=32,
epochs=20,
initial_epoch=0,
k_size=1
):
# ---- Data given or via generator
mode_data = (data_generator is None)
# ---- Size of the dataset we are going to use
# k_size ==1 : mean 100%
# Unused with data generator
#
if mode_data:
n_train = int(x_train.shape[0] * k_size)
n_test = int(x_test.shape[0] * k_size)
# ---- Callback : Images
filename = self.run_directory+"/images/image-{epoch:03d}-{i:02d}.jpg"
callbacks_images = ImagesCallback(filename, z_dim=self.z_dim, decoder=self.decoder)
# ---- Callback : Checkpoint
filename = self.run_directory+"/models/model-{epoch:03d}.h5"
callback_chkpts = ModelCheckpoint(filename, save_freq='epoch' ,verbose=0)
# ---- Callback : Best model
filename = self.run_directory+"/models/best_model.h5"
callback_bestmodel = ModelCheckpoint(filename, save_best_only=True, mode='min',monitor='val_loss',verbose=0)
# ---- Callback tensorboard
dirname = self.run_directory+"/logs"
callback_tensorboard = TensorBoard(log_dir=dirname, histogram_freq=1)
callbacks_list = [callbacks_images, callback_chkpts, callback_bestmodel, callback_tensorboard]
# callbacks_list = [callback_chkpts, callback_bestmodel, callback_tensorboard]
# ---- Let's go...
start_time = time.time()
if mode_data:
#
# ---- With pure data (x_train) -----------------------------------------
#
self.history = self.model.fit(x_train[:n_train], x_train[:n_train],
batch_size = batch_size,
shuffle = True,
epochs = epochs,
initial_epoch = initial_epoch,
callbacks = callbacks_list,
validation_data = (x_test[:n_test], x_test[:n_test])
)
#
else:
# ---- With Data Generator ----------------------------------------------
#
self.history = self.model.fit(data_generator,
shuffle = True,
epochs = epochs,
initial_epoch = initial_epoch,
callbacks = callbacks_list,
validation_data = (x_test, x_test)
)
end_time = time.time()
dt = end_time-start_time
dth = str(datetime.timedelta(seconds=int(dt)))
self.duration = dt
print(f'\nTrain duration : {dt:.2f} sec. - {dth:}')
def plot_model(self):
d=self.run_directory+'/figs'
plot_model(self.model, to_file=f'{d}/model.png', show_shapes = True, show_layer_names = True, expand_nested=True)
plot_model(self.encoder, to_file=f'{d}/encoder.png', show_shapes = True, show_layer_names = True)
plot_model(self.decoder, to_file=f'{d}/decoder.png', show_shapes = True, show_layer_names = True)
def save(self,config='vae_config.json', model='model.h5', force=False):
# ---- Check if the place is still used
if os.path.isfile(self.run_directory+'/models/best_model.h5') and not force:
print('\n*** Oops. There are already stuff in the target folder !\n')
assert False, f'Tag directory {self.run_directory} is not empty...'
# ---- Save config in json
if config!=None:
to_save = ['input_shape', 'encoder_layers', 'decoder_layers', 'z_dim', 'run_tag', 'verbose']
data = { i:self.__dict__[i] for i in to_save }
filename = self.run_directory+'/models/'+config
with open(filename, 'w') as outfile:
json.dump(data, outfile)
print(f'Config saved in : {filename}')
# ---- Save model
if model!=None:
filename = self.run_directory+'/models/'+model
self.model.save(filename)
print(f'Model saved in : {filename}')
def load_weights(self,model='model.h5'):
filename = self.run_directory+'/models/'+model
self.model.load_weights(filename)
print(f'Weights loaded from : {filename}')
@classmethod
def load(cls, run_tag='000', config='vae_config.json', weights='model.h5'):
# ---- Instantiate a new vae
filename = f'./run/{run_tag}/models/{config}'
with open(filename, 'r') as infile:
params=json.load(infile)
vae=cls( **params)
# ---- weights==None, just return it
if weights==None: return vae
# ---- weights!=None, get weights
vae.load_weights(weights)
return vae
@classmethod
def about(cls):
print('\nFIDLE 2020 - Variational AutoEncoder (VAE)')
print('TensorFlow version :',tf.__version__)
print('VAE version :', cls.version)
\ No newline at end of file
%% Cell type:markdown id: tags:
<img width="800px" src="../fidle/img/header.svg"></img>
# <!-- TITLE --> [K3WINE1] - Wine quality prediction with a Dense Network (DNN)
<!-- DESC --> Another example of regression, with a wine quality prediction, using Keras 3 and PyTorch
<!-- AUTHOR : Jean-Luc Parouty (CNRS/SIMaP) -->
## Objectives :
- Predict the **quality of wines**, based on their analysis
- Understanding the principle and the architecture of a regression with a dense neural network with backup and restore of the trained model.
The **[Wine Quality datasets](https://archive.ics.uci.edu/ml/datasets/wine+Quality)** are made up of analyses of a large number of wines, with an associated quality (between 0 and 10)
This dataset is provide by :
Paulo Cortez, University of Minho, Guimarães, Portugal, http://www3.dsi.uminho.pt/pcortez
A. Cerdeira, F. Almeida, T. Matos and J. Reis, Viticulture Commission of the Vinho Verde Region(CVRVV), Porto, Portugal, @2009
This dataset can be retreive at [University of California Irvine (UCI)](https://archive.ics.uci.edu/dataset/186/wine+quality)
Due to privacy and logistic issues, only physicochemical and sensory variables are available
There is no data about grape types, wine brand, wine selling price, etc.
- fixed acidity
- volatile acidity
- citric acid
- residual sugar
- chlorides
- free sulfur dioxide
- total sulfur dioxide
- density
- pH
- sulphates
- alcohol
- quality (score between 0 and 10)
## What we're going to do :
- (Retrieve data)
- (Preparing the data)
- (Build a model)
- Train and save the model
- Restore saved model
- Evaluate the model
- Make some predictions
%% Cell type:markdown id: tags:
## Step 1 - Import and init
%% Cell type:code id: tags:
``` python
import os
os.environ['KERAS_BACKEND'] = 'torch'
import keras
import numpy as np
import pandas as pd
import fidle
# Init Fidle environment
run_id, run_dir, datasets_dir = fidle.init('K3WINE1')
```
%% Cell type:markdown id: tags:
Verbosity during training :
- 0 = silent
- 1 = progress bar
- 2 = one line per epoch
%% Cell type:code id: tags:
``` python
fit_verbosity = 1
dataset_name = 'winequality-red.csv'
```
%% Cell type:markdown id: tags:
Override parameters (batch mode) - Just forget this cell
%% Cell type:code id: tags:
``` python
fidle.override('fit_verbosity', 'dataset_name')
```
%% Cell type:markdown id: tags:
## Step 2 - Retrieve data
%% Cell type:code id: tags:
``` python
data = pd.read_csv(f'{datasets_dir}/WineQuality/origine/{dataset_name}', header=0,sep=';')
display(data.head(5).style.format("{0:.2f}"))
print('Missing Data : ',data.isna().sum().sum(), ' Shape is : ', data.shape)
```
%% Cell type:markdown id: tags:
## Step 3 - Preparing the data
### 3.1 - Split data
We will use 80% of the data for training and 20% for validation.
x will be the data of the analysis and y the quality
%% Cell type:code id: tags:
``` python
# ---- Split => train, test
#
data = data.sample(frac=1., axis=0) # Shuffle
data_train = data.sample(frac=0.8, axis=0) # get 80 %
data_test = data.drop(data_train.index) # test = all - train
# ---- Split => x,y (medv is price)
#
x_train = data_train.drop('quality', axis=1)
y_train = data_train['quality']
x_test = data_test.drop('quality', axis=1)
y_test = data_test['quality']
print('Original data shape was : ',data.shape)
print('x_train : ',x_train.shape, 'y_train : ',y_train.shape)
print('x_test : ',x_test.shape, 'y_test : ',y_test.shape)
```
%% Cell type:markdown id: tags:
### 3.2 - Data normalization
**Note :**
- All input data must be normalized, train and test.
- To do this we will subtract the mean and divide by the standard deviation.
- But test data should not be used in any way, even for normalization.
- The mean and the standard deviation will therefore only be calculated with the train data.
%% Cell type:code id: tags:
``` python
display(x_train.describe().style.format("{0:.2f}").set_caption("Before normalization :"))
mean = x_train.mean()
std = x_train.std()
x_train = (x_train - mean) / std
x_test = (x_test - mean) / std
display(x_train.describe().style.format("{0:.2f}").set_caption("After normalization :"))
# Convert ou DataFrame to numpy array
x_train, y_train = np.array(x_train), np.array(y_train)
x_test, y_test = np.array(x_test), np.array(y_test)
```
%% Cell type:markdown id: tags:
## Step 4 - Build a model
More informations about :
- [Optimizer](https://keras.io/api/optimizers)
- [Activation](https://keras.io/api/layers/activations)
- [Loss](https://keras.io/api/losses)
- [Metrics](https://keras.io/api/metrics)
%% Cell type:code id: tags:
``` python
def get_model_v1(shape):
model = keras.models.Sequential()
model.add(keras.layers.Input(shape, name="InputLayer"))
model.add(keras.layers.Dense(64, activation='relu', name='Dense_n1'))
model.add(keras.layers.Dense(64, activation='relu', name='Dense_n2'))
model.add(keras.layers.Dense(1, name='Output'))
model.compile(optimizer = 'rmsprop',
loss = 'mse',
metrics = ['mae', 'mse'] )
return model
```
%% Cell type:markdown id: tags:
## 5 - Train the model
### 5.1 - Get it
%% Cell type:code id: tags:
``` python
model=get_model_v1( (11,) )
model.summary()
```
%% Cell type:markdown id: tags:
### 5.2 - Add callback
%% Cell type:code id: tags:
``` python
os.makedirs('./run/models', mode=0o750, exist_ok=True)
save_dir = "./run/models/best_model.keras"
savemodel_callback = keras.callbacks.ModelCheckpoint( filepath=save_dir, monitor='val_mae', mode='max', save_best_only=True)
```
%% Cell type:markdown id: tags:
### 5.3 - Train it
%% Cell type:code id: tags:
``` python
history = model.fit(x_train,
y_train,
epochs = 100,
batch_size = 10,
verbose = fit_verbosity,
validation_data = (x_test, y_test),
callbacks = [savemodel_callback])
```
%% Cell type:markdown id: tags:
## Step 6 - Evaluate
### 6.1 - Model evaluation
MAE = Mean Absolute Error (between the labels and predictions)
A mae equal to 3 represents an average error in prediction of $3k.
%% Cell type:code id: tags:
``` python
score = model.evaluate(x_test, y_test, verbose=0)
print('x_test / loss : {:5.4f}'.format(score[0]))
print('x_test / mae : {:5.4f}'.format(score[1]))
print('x_test / mse : {:5.4f}'.format(score[2]))
```
%% Cell type:markdown id: tags:
### 6.2 - Training history
What was the best result during our training ?
%% Cell type:code id: tags:
``` python
print("min( val_mae ) : {:.4f}".format( min(history.history["val_mae"]) ) )
```
%% Cell type:code id: tags:
``` python
fidle.scrawler.history( history, plot={'MSE' :['mse', 'val_mse'],
'MAE' :['mae', 'val_mae'],
'LOSS':['loss','val_loss']}, save_as='01-history')
```
%% Cell type:markdown id: tags:
## Step 7 - Restore a model :
%% Cell type:markdown id: tags:
### 7.1 - Reload model
%% Cell type:code id: tags:
``` python
loaded_model = keras.models.load_model('./run/models/best_model.keras')
loaded_model.summary()
print("Loaded.")
```
%% Cell type:markdown id: tags:
### 7.2 - Evaluate it :
%% Cell type:code id: tags:
``` python
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('x_test / loss : {:5.4f}'.format(score[0]))
print('x_test / mae : {:5.4f}'.format(score[1]))
print('x_test / mse : {:5.4f}'.format(score[2]))
```
%% Cell type:markdown id: tags:
### 7.3 - Make a prediction
%% Cell type:code id: tags:
``` python
# ---- Pick n entries from our test set
n = 200
ii = np.random.randint(1,len(x_test),n)
x_sample = x_test[ii]
y_sample = y_test[ii]
```
%% Cell type:code id: tags:
``` python
# ---- Make a predictions
y_pred = loaded_model.predict( x_sample, verbose=2 )
```
%% Cell type:code id: tags:
``` python
# ---- Show it
print('Wine Prediction Real Delta')
for i in range(n):
pred = y_pred[i][0]
real = y_sample[i]
delta = real-pred
print(f'{i:03d} {pred:.2f} {real} {delta:+.2f} ')
```
%% Cell type:markdown id: tags:
### Few questions :
- Can this model be used for red wines from Bordeaux and/or Beaujolais?
- What are the limitations of this model?
- What are the limitations of this dataset?
%% Cell type:code id: tags:
``` python
fidle.end()
```
%% Cell type:markdown id: tags:
---
<img width="80px" src="../fidle/img/logo-paysage.svg"></img>
%% Cell type:markdown id: tags:
<img width="800px" src="../fidle/img/header.svg"></img>
# <!-- TITLE --> [LWINE1] - Wine quality prediction with a Dense Network (DNN)
<!-- DESC --> Another example of regression, with a wine quality prediction, using PyTorch Lightning
<!-- AUTHOR : Achille Mbogol Touye (EFFILIA-MIAI/SIMaP) -->
## Objectives :
- Predict the **quality of wines**, based on their analysis
- Understanding the principle and the architecture of a regression with a dense neural network with backup and restore of the trained model.
The **[Wine Quality datasets](https://archive.ics.uci.edu/ml/datasets/wine+Quality)** are made up of analyses of a large number of wines, with an associated quality (between 0 and 10)
This dataset is provide by :
Paulo Cortez, University of Minho, Guimarães, Portugal, http://www3.dsi.uminho.pt/pcortez
A. Cerdeira, F. Almeida, T. Matos and J. Reis, Viticulture Commission of the Vinho Verde Region(CVRVV), Porto, Portugal, @2009
This dataset can be retreive at [University of California Irvine (UCI)](https://archive-beta.ics.uci.edu/ml/datasets/wine+quality)
Due to privacy and logistic issues, only physicochemical and sensory variables are available
There is no data about grape types, wine brand, wine selling price, etc.
- fixed acidity
- volatile acidity
- citric acid
- residual sugar
- chlorides
- free sulfur dioxide
- total sulfur dioxide
- density
- pH
- sulphates
- alcohol
- quality (score between 0 and 10)
## What we're going to do :
- (Retrieve data)
- (Preparing the data)
- (Build a model)
- Train and save the model
- Restore saved model
- Evaluate the model
- Make some predictions
%% Cell type:markdown id: tags:
## Step 1 - Import and init
%% Cell type:code id: tags:
``` python
# Import some packages
import os
import sys
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import lightning.pytorch as pl
import torch.nn.functional as F
import torchvision.transforms as T
from importlib import reload
from IPython.display import Markdown
from torch.utils.data import Dataset, DataLoader, random_split
from modules.progressbar import CustomTrainProgressBar
from modules.data_load import WineQualityDataset, Normalize, ToTensor
from lightning.pytorch.loggers.tensorboard import TensorBoardLogger
from torchmetrics.functional.regression import mean_absolute_error, mean_squared_error
import fidle
# Init Fidle environment
run_id, run_dir, datasets_dir = fidle.init('LWINE1')
```
%% Cell type:markdown id: tags:
Verbosity during training :
- 0 = silent
- 1 = progress bar
- 2 = one line per epoch
%% Cell type:code id: tags:
``` python
fit_verbosity = 1
dataset_name = 'winequality-red.csv'
```
%% Cell type:markdown id: tags:
Override parameters (batch mode) - Just forget this cell
%% Cell type:code id: tags:
``` python
fidle.override('fit_verbosity', 'dataset_name')
```
%% Cell type:markdown id: tags:
## Step 2 - Retrieve data
%% Cell type:code id: tags:
``` python
csv_file_path=f'{datasets_dir}/WineQuality/origine/{dataset_name}'
datasets=WineQualityDataset(csv_file_path)
display(datasets.data.head(5).style.format("{0:.2f}"))
print('Missing Data : ',datasets.data.isna().sum().sum(), ' Shape is : ', datasets.data.shape)
```
%% Cell type:markdown id: tags:
## Step 3 - Preparing the data
%% Cell type:markdown id: tags:
### 3.1 - Data normalization
**Note :**
- All input features must be normalized.
- To do this we will subtract the mean and divide by the standard deviation for each input features.
- Then we convert numpy array features and target **(quality)** to torch tensor
%% Cell type:code id: tags:
``` python
transforms=T.Compose([Normalize(csv_file_path), ToTensor()])
dataset=WineQualityDataset(csv_file_path,transform=transforms)
```
%% Cell type:code id: tags:
``` python
display(Markdown("before normalization :"))
display(datasets[:]["features"])
print()
display(Markdown("After normalization :"))
display(dataset[:]["features"])
```
%% Cell type:markdown id: tags:
### 3.2 - Split data
We will use 80% of the data for training and 20% for validation.
x will be the features data of the analysis and y the target (quality)
%% Cell type:code id: tags:
``` python
# ---- Split => train, test
#
data_train_len = int(len(dataset)*0.8) # get 80 %
data_test_len = len(dataset) -data_train_len # test = all - train
# ---- Split => x,y with random_split
#
data_train_subset, data_test_subset=random_split(dataset, [data_train_len, data_test_len])
x_train = data_train_subset[:]["features"]
y_train = data_train_subset[:]["quality" ]
x_test = data_test_subset [:]["features"]
y_test = data_test_subset [:]["quality" ]
print('Original data shape was : ',dataset.data.shape)
print('x_train : ',x_train.shape, 'y_train : ',y_train.shape)
print('x_test : ',x_test.shape, 'y_test : ',y_test.shape)
```
%% Cell type:markdown id: tags:
### 3.3 - For Training model use Dataloader
The Dataset retrieves our dataset’s features and labels one sample at a time. While training a model, we typically want to pass samples in minibatches, reshuffle the data at every epoch to reduce model overfitting. DataLoader is an iterable that abstracts this complexity for us in an easy API.
%% Cell type:code id: tags:
``` python
# train bacth data
train_loader= DataLoader(
dataset=data_train_subset,
shuffle=True,
batch_size=20,
num_workers=2
)
# test bacth data
test_loader= DataLoader(
dataset=data_test_subset,
shuffle=False,
batch_size=20,
num_workers=2
)
```
%% Cell type:markdown id: tags:
## Step 4 - Build a model
More informations about :
- [Optimizer](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers)
- [Activation](https://www.tensorflow.org/api_docs/python/tf/keras/activations)
- [Loss](https://www.tensorflow.org/api_docs/python/tf/keras/losses)
- [Metrics](https://www.tensorflow.org/api_docs/python/tf/keras/metrics)
%% Cell type:code id: tags:
``` python
class LitRegression(pl.LightningModule):
def __init__(self,in_features=11):
super().__init__()
self.model = nn.Sequential(
nn.Linear(in_features, 128), # hidden layer 1
nn.ReLU(), # activation function
nn.Linear(128, 128), # hidden layer 2
nn.ReLU(), # activation function
nn.Linear(128, 1)) # output layer
def forward(self, x): # forward pass
x = self.model(x)
return x
# optimizer
def configure_optimizers(self):
optimizer = torch.optim.RMSprop(self.parameters(),lr=1e-4)
return optimizer
def training_step(self, batch, batch_idx):
# defines the train loop.
x_features, y_target = batch["features"],batch["quality"]
# forward pass
y_pred = self.model(x_features)
# loss function MSE
loss = F.mse_loss(y_pred, y_target)
# metrics mae
mae = mean_absolute_error(y_pred,y_target)
# metrics mse
mse = mean_squared_error(y_pred,y_target)
metrics= {"train_loss": loss,
"train_mae" : mae,
"train_mse" : mse
}
# logs metrics for each training_step
self.log_dict(metrics,
on_step = False,
on_epoch = True,
logger = True,
prog_bar = True,
)
return loss
def validation_step(self, batch, batch_idx):
# defines the val loop.
x_features, y_target = batch["features"],batch["quality"]
# forward pass
y_pred = self.model(x_features)
# loss function MSE
loss = F.mse_loss(y_pred, y_target)
# metrics
mae = mean_absolute_error(y_pred,y_target)
# metrics
mse = mean_squared_error(y_pred,y_target)
metrics= {"val_loss": loss,
"val_mae" : mae,
"val_mse" : mse
}
# logs metrics for each validation_step
self.log_dict(metrics,
on_step = False,
on_epoch = True,
logger = True,
prog_bar = True,
)
return metrics
```
%% Cell type:markdown id: tags:
## 5 - Train the model
### 5.1 - Get it
%% Cell type:code id: tags:
``` python
reg=LitRegression(in_features=11)
print(reg)
```
%% Cell type:markdown id: tags:
### 5.2 - Add callback
%% Cell type:code id: tags:
``` python
os.makedirs('./run/models', exist_ok=True)
save_dir = "./run/models/"
filename ='best-model-{epoch}-{val_loss:.2f}'
savemodel_callback = pl.callbacks.ModelCheckpoint(dirpath=save_dir,
filename=filename,
save_top_k=1,
verbose=False,
monitor="val_loss"
)
```
%% Cell type:markdown id: tags:
### 5.3 - Train it
%% Cell type:code id: tags:
``` python
# loggers data
os.makedirs(f'{run_dir}/logs', mode=0o750, exist_ok=True)
logger= TensorBoardLogger(save_dir=f'{run_dir}/logs',name="reg_logs")
```
%% Cell type:code id: tags:
``` python
# train model
trainer = pl.Trainer(accelerator='auto',
max_epochs=100,
logger=logger,
num_sanity_val_steps=0,
callbacks=[savemodel_callback,CustomTrainProgressBar()])
trainer.fit(model=reg, train_dataloaders=train_loader, val_dataloaders=test_loader)
```
%% Cell type:markdown id: tags:
## Step 6 - Evaluate
### 6.1 - Model evaluation
MAE = Mean Absolute Error (between the labels and predictions)
A mae equal to 3 represents an average error in prediction of $3k.
%% Cell type:code id: tags:
``` python
score=trainer.validate(model=reg, dataloaders=test_loader, verbose=False)
print('x_test / loss : {:5.4f}'.format(score[0]['val_loss']))
print('x_test / mae : {:5.4f}'.format(score[0]['val_mae']))
print('x_test / mse : {:5.4f}'.format(score[0]['val_mse']))
```
%% Cell type:markdown id: tags:
### 6.2 - Training history
To access logs with tensorboad :
- Under **Docker**, from a terminal launched via the jupyterlab launcher, use the following command:<br>
```tensorboard --logdir <path-to-logs> --host 0.0.0.0```
- If you're **not using Docker**, from a terminal :<br>
```tensorboard --logdir <path-to-logs>```
**Note:** One tensorboard instance can be used simultaneously.
%% Cell type:markdown id: tags:
## Step 7 - Restore a model :
%% Cell type:markdown id: tags:
### 7.1 - Reload model
%% Cell type:code id: tags:
``` python
# Load the model from a checkpoint
loaded_model = LitRegression.load_from_checkpoint(savemodel_callback.best_model_path)
print("Loaded:")
print(loaded_model)
```
%% Cell type:markdown id: tags:
### 7.2 - Evaluate it :
%% Cell type:code id: tags:
``` python
score=trainer.validate(model=loaded_model, dataloaders=test_loader, verbose=False)
print('x_test / loss : {:5.4f}'.format(score[0]['val_loss']))
print('x_test / mae : {:5.4f}'.format(score[0]['val_mae']))
print('x_test / mse : {:5.4f}'.format(score[0]['val_mse']))
```
%% Cell type:markdown id: tags:
### 7.3 - Make a prediction
%% Cell type:code id: tags:
``` python
# ---- Pick n entries from our test set
n = 200
ii = np.random.randint(1,len(x_test),n)
x_sample = x_test[ii]
y_sample = y_test[ii]
```
%% Cell type:code id: tags:
``` python
# ---- Make a predictions :
# Sets the model in evaluation mode.
loaded_model.eval()
# Perform inference using the loaded model
y_pred = loaded_model( x_sample )
```
%% Cell type:code id: tags:
``` python
# ---- Show it
print('Wine Prediction Real Delta')
for i in range(n):
pred = y_pred[i][0].item()
real = y_sample[i][0].item()
delta = real-pred
print(f'{i:03d} {pred:.2f} {real} {delta:+.2f} ')
```
%% Cell type:code id: tags:
``` python
fidle.end()
```
%% Cell type:markdown id: tags:
---
<img width="80px" src="../fidle/img/logo-paysage.svg"></img>
%% Cell type:code id: tags:
``` python
```
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/
# |_| |_|\__,_|_|\___|
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2023
# ------------------------------------------------------------------
# 2.0 version by Achille Mbogol Touye (EFELIA-MIAI/SIMAP¨), sep 2023
import torch
import pandas as pd
import lightning.pytorch as pl
class WineQualityDataset(pl.LightningDataModule):
"""Wine Quality dataset."""
def __init__(self, csv_file, transform=None):
"""
Args:
csv_file (string): Path to the csv file.
transform (callable, optional): Optional transform to be applied on a sample.
"""
super().__init__()
self.csv_file=csv_file
self.data = pd.read_csv(self.csv_file, header=0, sep=';')
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
features = self.data.iloc[idx, :-1].values.astype('float32')
target = self.data.iloc[idx, -1:].values.astype('float32')
sample = {'features':features, 'quality':target}
if self.transform:
sample = self.transform(sample)
return sample
class Normalize(WineQualityDataset):
"""normalize data"""
def __init__(self, csv_file):
mean,std=self.compute_mean_and_std(csv_file)
self.mean=mean
self.std=std
def compute_mean_and_std(self, csv_file):
"""Compute the mean and std for each feature."""
dataset= WineQualityDataset(csv_file)
mean = dataset.data.iloc[:,:-1].mean(axis=0).values.astype('float32')
std = dataset.data.iloc[:,:-1].std(axis=0).values.astype('float32')
return mean,std
def __call__(self, sample):
features, target = sample['features'],sample['quality']
norm_features = (features - self.mean) / self.std # normalize features
return {'features':norm_features,
'quality':target
}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
features, target = sample['features'], sample['quality']
return {'features': torch.from_numpy(features),
'quality' : torch.from_numpy(target)
}
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/
# |_| |_|\__,_|_|\___|
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2023
# ------------------------------------------------------------------
# 2.0 version by Achille Mbogol Touye (EFELIA-MIAI/SIMAP¨), sep 2023
from tqdm import tqdm as _tqdm
from lightning.pytorch.callbacks import TQDMProgressBar
# Créez un callback de barre de progression pour afficher les métriques d'entraînement
class CustomTrainProgressBar(TQDMProgressBar):
def __init__(self):
super().__init__()
self._val_progress_bar = _tqdm()
def init_train_tqdm(self):
bar=super().init_train_tqdm()
bar.set_description("Training")
return bar
@property
def val_progress_bar(self):
if self._val_progress_bar is None:
raise ValueError("The `_val_progress_bar` reference has not been set yet.")
return self._val_progress_bar
def on_validation_start(self, trainer, pl_module):
# Désactivez l'affichage de la barre de progression de validation
self.val_progress_bar.disable = True
\ No newline at end of file
# base image
ARG PYTHON_VERSION=3.9
ARG docker_image_base=python:${PYTHON_VERSION}-slim
FROM ${docker_image_base}
# maintainers
LABEL maintainer1=soraya.arias@inria.fr maintainer2=jean-luc.parouty@simap.grenoble-inp.fr
ARG ARCH_VERSION=cpu
ARG BRANCH=pre-master
# Ensure a sane environment
ENV TZ=Europe/Paris LANG=C.UTF-8 LC_ALL=C.UTF-8 DEBIAN_FRONTEND=noninteractive
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \
apt update --fix-missing && \
apt install -y --no-install-recommends apt-utils \
procps \
python3-venv \
python3-pip && \
apt -y dist-upgrade && \
apt clean && \
rm -fr /var/lib/apt/lists/*
# copy Python requirement packages list in docker image
COPY requirements-${ARCH_VERSION}.txt /root/requirements-${ARCH_VERSION}.txt
# Update Python tools and install requirements packages for Fidle
RUN python3 -m pip install --upgrade pip && \
pip3 install --no-cache-dir --upgrade -r /root/requirements-${ARCH_VERSION}.txt
# Install tensorboard & update jupyter
RUN pip3 install --no-cache-dir --upgrade tensorboard tensorboardX jupyter ipywidgets
# Move default logo python
RUN bin/rm /usr/local/share/jupyter/kernels/python3/logo*
# Change default logo and name kernels
COPY images/env-keras3.png /usr/local/share/jupyter/kernels/python3/logo-64x64.png
COPY images/env-keras3.svg /usr/local/share/jupyter/kernels/python3/logo-svg.svg
# Get Fidle datasets
RUN mkdir /data && \
fid install_datasets --quiet --install_dir /data
# Get Fidle notebooks and create link
RUN mkdir /notebooks/ && \
fid install_notebooks --notebooks fidle-${BRANCH} --quiet --install_dir /notebooks && \
ln -s $(ls -1td /notebooks/* | head -1) /notebooks/last
# Add Jupyter configuration (no browser, listen all interfaces, ...)
COPY jupyter_lab_config.py /root/.jupyter/jupyter_lab_config.py
COPY notebook.json /root/.jupyter/nbconfig/notebook.json
# Jupyter notebook uses 8888
EXPOSE 8888
# tensorboard uses 6006
EXPOSE 6006
VOLUME /notebooks
WORKDIR /notebooks
# Set Keras backend
ENV KERAS_BACKEND torch
# Set Python path to add fidle path
ENV PYTHONPATH=/notebooks/fidle-master/:$PYTHONPATH
# Set default shell (useful in the notebooks)
ENV SHELL=/bin/bash
# Set Fidle dataset directory variable
ENV FIDLE_DATASETS_DIR=/data/datasets-fidle
# Run a notebook by default
CMD ["jupyter", "lab"]
\ No newline at end of file
docker/images/env-keras3.png

2.91 KiB

<?xml version="1.0" encoding="UTF-8"?>
<svg id="Calque_2" data-name="Calque 2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
<defs>
<style>
.cls-1 {
fill: #d00000;
}
.cls-1, .cls-2, .cls-3, .cls-4, .cls-5 {
stroke-width: 0px;
}
.cls-2 {
fill: none;
}
.cls-3 {
fill: #fff;
}
.cls-4 {
fill: #e12229;
}
.cls-5 {
fill: #ee4c2c;
}
</style>
</defs>
<g id="Mode_Isolation" data-name="Mode Isolation">
<g>
<rect class="cls-3" width="100" height="100"/>
<g id="group">
<path id="Path" class="cls-5" d="M84.64,15.79l-3.09,3.09c5.06,5.06,5.06,13.21,0,18.17-5.06,5.06-13.21,5.06-18.17,0-5.06-5.06-5.06-13.21,0-18.17l8.01-8.01,1.12-1.12V3.7l-12.08,12.08c-6.75,6.75-6.75,17.61,0,24.36,6.75,6.75,17.61,6.75,24.22,0,6.75-6.79,6.75-17.61,0-24.36Z"/>
<path id="Path-1" class="cls-5" d="M80.85,12.79c0,1.24-1.01,2.25-2.25,2.25s-2.25-1.01-2.25-2.25,1.01-2.25,2.25-2.25,2.25,1.01,2.25,2.25Z"/>
</g>
<g>
<g>
<path class="cls-2" d="M52.97,86.43c-4.89,1.33-6.52,1.26-7.02,1.15.37-.75,2.11-2.39,3.93-3.69.43-.31.54-.91.24-1.35-.3-.44-.89-.55-1.33-.24-2.58,1.83-5.48,4.39-4.67,6.16.31.67.95,1.12,2.5,1.12,1.4,0,3.55-.37,6.85-1.27.51-.14.81-.67.67-1.19-.13-.52-.66-.83-1.17-.69Z"/>
<g>
<path class="cls-4" d="M68.15,44.5c-.34,0-.63-.17-.87-.5-.3-.42-.64-.57-1.3-.57-.2,0-.4.01-.59.03-.22.01-.42.03-.62.03-.32,0-.79-.03-1.23-.27-1.36-.77-1.86-2.52-1.11-3.9.5-.92,1.46-1.5,2.49-1.5.48,0,.96.12,1.38.36,1.06.59,2.99,4.78,2.77,5.62l-.18.7-.74.02Z"/>
<path class="cls-3" d="M64.93,38.75c.31,0,.63.08.92.24.85.48,2.51,4.58,2.3,4.58-.02,0-.05-.03-.1-.11-.58-.82-1.33-.97-2.06-.97-.43,0-.84.05-1.21.05-.29,0-.56-.03-.77-.15-.92-.52-1.26-1.7-.75-2.64.35-.64,1-1.01,1.67-1.01M64.93,36.87c-1.38,0-2.66.76-3.32,1.99-.99,1.83-.33,4.15,1.48,5.16.62.35,1.26.39,1.68.39.21,0,.44-.01.68-.03.17-.01.35-.02.53-.02.41,0,.45.05.53.17.55.79,1.26.9,1.64.9h1.45l.38-1.41c.11-.43.24-.93-.94-3.48-1.06-2.29-1.74-2.9-2.27-3.2-.56-.32-1.2-.48-1.84-.48h0Z"/>
</g>
<path class="cls-4" d="M62.06,75.3c-.39-.47-.34-1.18.12-1.58.46-.4,1.16-.35,1.55.13,5.79,6.92,15.18,8.77,24.52,4.83.95-2.66,1.42-5.45,1.49-8.18,0-7.41-3.53-14.26-9.52-18.38-2.78-1.91-9.2-4.45-17.62-3.04-6.19,1.04-12.61,5.82-15.12,7.97-1.51,1.29-19.5,18.68-27,15.22-5.07-2.35,3.99-10.88-.17-18.68-.11-.21-.41-.23-.55-.04-2.12,2.91-4.18,6.41-7,4.84-1.26-.7-2.39-2.94-3.26-4.36-.18-.28-.61-.14-.6.19.32,9.8,4.97,17.01,8.71,21.57,6.47,7.9,17.8,17.09,36.12,18.95,18.88,1.75,28.93-4.73,33.3-13.21-2.84.96-5.67,1.44-8.4,1.44-6.45,0-12.34-2.63-16.56-7.67ZM53.46,88.31c-3.3.9-5.45,1.27-6.85,1.27-1.55,0-2.19-.45-2.5-1.12-.81-1.77,2.1-4.32,4.67-6.16.43-.3,1.03-.2,1.33.24.3.44.19,1.05-.24,1.35-1.83,1.3-3.56,2.94-3.93,3.69.5.11,2.14.18,7.02-1.15.51-.14,1.03.17,1.17.69.14.52-.16,1.05-.67,1.19Z"/>
<g>
<path class="cls-4" d="M70.65,47.4c-.36,0-.83-.21-1-.82-.32-1.15.43-5.99,2.83-7.43.42-.25.9-.39,1.39-.39,1.04,0,2,.58,2.5,1.51.75,1.38.25,3.13-1.11,3.9-.15.09-.33.18-.53.28-.93.49-2.34,1.22-3.2,2.45-.3.42-.68.49-.88.49h0Z"/>
<path class="cls-3" d="M73.88,39.71c.67,0,1.33.38,1.67,1.02.51.94.17,2.12-.75,2.64s-2.86,1.33-4.04,3.01c-.04.06-.08.09-.11.09-.43,0,.1-5.18,2.31-6.51.29-.17.6-.25.91-.25M73.88,37.83c-.66,0-1.31.18-1.88.52-2.91,1.74-3.65,7.04-3.25,8.48.25.9,1.01,1.5,1.9,1.5.65,0,1.25-.32,1.64-.89.73-1.04,1.97-1.69,2.87-2.16.21-.11.39-.21.55-.29,1.81-1.02,2.47-3.33,1.48-5.17-.67-1.23-1.94-2-3.32-2h0Z"/>
</g>
<g>
<path class="cls-4" d="M70.32,38.97c-.19,0-.68-.07-.96-.67-.34-.73-.85-3.85.48-5.42.42-.5,1.03-.78,1.67-.78.54,0,1.05.2,1.44.56.86.8.91,2.2.09,3.11-.08.09-.17.19-.28.3-.48.5-1.19,1.26-1.48,2.17-.17.54-.62.73-.96.73h0Z"/>
<path class="cls-3" d="M71.52,33.04c.29,0,.58.1.8.31.49.46.51,1.26.03,1.8s-1.54,1.5-1.95,2.81c-.02.06-.04.08-.06.08-.28,0-.88-3.23.23-4.55.25-.3.61-.45.96-.45M71.52,31.16c-.92,0-1.79.41-2.39,1.11-1.6,1.89-1.08,5.4-.61,6.42.52,1.13,1.52,1.22,1.81,1.22.85,0,1.58-.54,1.85-1.39.22-.7.83-1.34,1.27-1.81.11-.12.21-.23.3-.32,1.15-1.29,1.08-3.27-.15-4.42-.56-.52-1.3-.81-2.07-.81h0Z"/>
</g>
</g>
<g>
<ellipse class="cls-3" cx="75.51" cy="68.45" rx="3.52" ry="3.88"/>
<ellipse class="cls-4" cx="76.93" cy="69.31" rx="2.38" ry="2.42"/>
</g>
</g>
<g>
<path class="cls-3" d="M43.24,43.2s0,0,0,0H11.89s0,0,0,0V11.85s0,0,0,0h31.35s0,0,0,0v31.35h0Z"/>
<path class="cls-1" d="M42.72,42.68s0,0,0,0H12.41s0,0,0,0V12.37s0,0,0,0h30.31s0,0,0,0v30.31h0Z"/>
<path class="cls-3" d="M20.68,35.76s.01.05.03.07l.52.52s.05.03.07.03h1.78s.05-.01.07-.03l.52-.52s.03-.05.03-.07v-5.63s.01-.05.03-.07l2.26-2.15s.04-.01.05,0l5.7,8.44s.04.03.06.03h2.52s.05-.02.06-.04l.46-.88s0-.05,0-.07l-6.67-9.66s-.01-.05,0-.06l6.13-6.1s.03-.05.03-.07v-.11s0-.06-.02-.08l-.35-.81s-.04-.04-.06-.04h-2.49s-.05.01-.07.03l-7.62,7.64s-.03.01-.03-.01v-7.01s-.01-.06-.03-.07l-.51-.55s-.05-.03-.07-.03h-1.79s-.05.01-.07.03l-.52.56s-.03.05-.03.07v16.65h0Z"/>
</g>
</g>
</g>
</svg>
\ No newline at end of file
# Configuration file for lab.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## Set the log level by value or name.
# Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
# Default: 30
c.Application.log_level = 'INFO'
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
# Default: False
# c.JupyterApp.answer_yes = False
## Full path of a config file.
# Default: ''
# c.JupyterApp.config_file = ''
## Specify a config file to load.
# Default: ''
# c.JupyterApp.config_file_name = ''
## Generate default config file.
# Default: False
# c.JupyterApp.generate_config = False
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.JupyterApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.JupyterApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.JupyterApp.log_level = 30
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.JupyterApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.JupyterApp.show_config_json = False
#------------------------------------------------------------------------------
# ExtensionApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Base class for configurable Jupyter Server Extension Applications.
#
# ExtensionApp subclasses can be initialized two ways:
# 1. Extension is listed as a jpserver_extension, and ServerApp calls
# its load_jupyter_server_extension classmethod. This is the
# classic way of loading a server extension.
# 2. Extension is launched directly by calling its `launch_instance`
# class method. This method can be set as a entry_point in
# the extensions setup.py
## Answer yes to any prompts.
# See also: JupyterApp.answer_yes
# c.ExtensionApp.answer_yes = False
## Full path of a config file.
# See also: JupyterApp.config_file
# c.ExtensionApp.config_file = ''
## Specify a config file to load.
# See also: JupyterApp.config_file_name
# c.ExtensionApp.config_file_name = ''
# Default: ''
# c.ExtensionApp.default_url = ''
## Generate default config file.
# See also: JupyterApp.generate_config
# c.ExtensionApp.generate_config = False
## Handlers appended to the server.
# Default: []
# c.ExtensionApp.handlers = []
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.ExtensionApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.ExtensionApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.ExtensionApp.log_level = 30
## Whether to open in a browser after starting.
# The specific browser used is platform dependent and
# determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser
# (ServerApp.browser) configuration option.
# Default: False
# c.ExtensionApp.open_browser = False
## Settings that will passed to the server.
# Default: {}
# c.ExtensionApp.settings = {}
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.ExtensionApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.ExtensionApp.show_config_json = False
## paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server machine,
# or overriding individual files in the IPython
# Default: []
# c.ExtensionApp.static_paths = []
## Url where the static assets for the extension are served.
# Default: ''
# c.ExtensionApp.static_url_prefix = ''
## Paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# Default: []
# c.ExtensionApp.template_paths = []
#------------------------------------------------------------------------------
# LabServerApp(ExtensionApp) configuration
#------------------------------------------------------------------------------
## A Lab Server Application that runs out-of-the-box
## "A list of comma-separated URIs to get the allowed extensions list
#
# .. versionchanged:: 2.0.0
# `LabServerApp.whitetlist_uris` renamed to `allowed_extensions_uris`
# Default: ''
# c.LabServerApp.allowed_extensions_uris = ''
## Answer yes to any prompts.
# See also: JupyterApp.answer_yes
# c.LabServerApp.answer_yes = False
## The application settings directory.
# Default: ''
# c.LabServerApp.app_settings_dir = ''
## The url path for the application.
# Default: '/lab'
# c.LabServerApp.app_url = '/lab'
## Deprecated, use `LabServerApp.blocked_extensions_uris`
# Default: ''
# c.LabServerApp.blacklist_uris = ''
## A list of comma-separated URIs to get the blocked extensions list
#
# .. versionchanged:: 2.0.0
# `LabServerApp.blacklist_uris` renamed to `blocked_extensions_uris`
# Default: ''
# c.LabServerApp.blocked_extensions_uris = ''
## Whether to cache files on the server. This should be `True` except in dev
# mode.
# Default: True
# c.LabServerApp.cache_files = True
## Full path of a config file.
# See also: JupyterApp.config_file
# c.LabServerApp.config_file = ''
## Specify a config file to load.
# See also: JupyterApp.config_file_name
# c.LabServerApp.config_file_name = ''
## Extra paths to look for federated JupyterLab extensions
# Default: []
# c.LabServerApp.extra_labextensions_path = []
## Generate default config file.
# See also: JupyterApp.generate_config
# c.LabServerApp.generate_config = False
## Handlers appended to the server.
# See also: ExtensionApp.handlers
# c.LabServerApp.handlers = []
## Options to pass to the jinja2 environment for this
# Default: {}
# c.LabServerApp.jinja2_options = {}
## The standard paths to look in for federated JupyterLab extensions
# Default: []
# c.LabServerApp.labextensions_path = []
## The url for federated JupyterLab extensions
# Default: ''
# c.LabServerApp.labextensions_url = ''
## The interval delay in seconds to refresh the lists
# Default: 3600
# c.LabServerApp.listings_refresh_seconds = 3600
## The optional kwargs to use for the listings HTTP requests as
# described on https://2.python-requests.org/en/v2.7.0/api/#requests.request
# Default: {}
# c.LabServerApp.listings_request_options = {}
## The listings url.
# Default: ''
# c.LabServerApp.listings_url = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.LabServerApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.LabServerApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.LabServerApp.log_level = 30
## Whether to open in a browser after starting.
# See also: ExtensionApp.open_browser
# c.LabServerApp.open_browser = False
## The optional location of the settings schemas directory. If given, a handler
# will be added for settings.
# Default: ''
# c.LabServerApp.schemas_dir = ''
## Settings that will passed to the server.
# See also: ExtensionApp.settings
# c.LabServerApp.settings = {}
## The url path of the settings handler.
# Default: ''
# c.LabServerApp.settings_url = ''
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.LabServerApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.LabServerApp.show_config_json = False
## The optional location of local static files. If given, a static file handler
# will be added.
# Default: ''
# c.LabServerApp.static_dir = ''
## paths to search for serving static files.
# See also: ExtensionApp.static_paths
# c.LabServerApp.static_paths = []
## Url where the static assets for the extension are served.
# See also: ExtensionApp.static_url_prefix
# c.LabServerApp.static_url_prefix = ''
## Paths to search for serving jinja templates.
# See also: ExtensionApp.template_paths
# c.LabServerApp.template_paths = []
## The application templates directory.
# Default: ''
# c.LabServerApp.templates_dir = ''
## The optional location of the themes directory. If given, a handler will be
# added for themes.
# Default: ''
# c.LabServerApp.themes_dir = ''
## The theme url.
# Default: ''
# c.LabServerApp.themes_url = ''
## The url path of the translations handler.
# Default: ''
# c.LabServerApp.translations_api_url = ''
## The url path of the tree handler.
# Default: ''
# c.LabServerApp.tree_url = ''
## The optional location of the user settings directory.
# Default: ''
# c.LabServerApp.user_settings_dir = ''
## Deprecated, use `LabServerApp.allowed_extensions_uris`
# Default: ''
# c.LabServerApp.whitelist_uris = ''
## The url path of the workspaces API.
# Default: ''
# c.LabServerApp.workspaces_api_url = ''
## The optional location of the saved workspaces directory. If given, a handler
# will be added for workspaces.
# Default: ''
# c.LabServerApp.workspaces_dir = ''
#------------------------------------------------------------------------------
# LabApp(LabServerApp) configuration
#------------------------------------------------------------------------------
##
# See also: LabServerApp.allowed_extensions_uris
# c.LabApp.allowed_extensions_uris = ''
## Answer yes to any prompts.
# See also: JupyterApp.answer_yes
# c.LabApp.answer_yes = False
## The app directory to launch JupyterLab from.
# Default: None
# c.LabApp.app_dir = None
## The application settings directory.
# Default: ''
# c.LabApp.app_settings_dir = ''
## The url path for the application.
# Default: '/lab'
# c.LabApp.app_url = '/lab'
## Deprecated, use `LabServerApp.blocked_extensions_uris`
# See also: LabServerApp.blacklist_uris
# c.LabApp.blacklist_uris = ''
##
# See also: LabServerApp.blocked_extensions_uris
# c.LabApp.blocked_extensions_uris = ''
## Whether to cache files on the server. This should be `True` except in dev
# mode.
# Default: True
# c.LabApp.cache_files = True
## Whether to enable collaborative mode (experimental).
# Default: False
# c.LabApp.collaborative = False
## Full path of a config file.
# See also: JupyterApp.config_file
# c.LabApp.config_file = ''
## Specify a config file to load.
# See also: JupyterApp.config_file_name
# c.LabApp.config_file_name = ''
## Whether to start the app in core mode. In this mode, JupyterLab
# will run using the JavaScript assets that are within the installed
# JupyterLab Python package. In core mode, third party extensions are disabled.
# The `--dev-mode` flag is an alias to this to be used when the Python package
# itself is installed in development mode (`pip install -e .`).
# Default: False
# c.LabApp.core_mode = False
## The default URL to redirect to from `/`
# Default: '/lab'
c.LabApp.default_url = '/lab/tree/README.ipynb'
## Whether to start the app in dev mode. Uses the unpublished local
# JavaScript packages in the `dev_mode` folder. In this case JupyterLab will
# show a red stripe at the top of the page. It can only be used if JupyterLab
# is installed as `pip install -e .`.
# Default: False
# c.LabApp.dev_mode = False
## Whether to expose the global app instance to browser via window.jupyterlab
# Default: False
# c.LabApp.expose_app_in_browser = False
## Whether to load prebuilt extensions in dev mode. This may be
# useful to run and test prebuilt extensions in development installs of
# JupyterLab. APIs in a JupyterLab development install may be
# incompatible with published packages, so prebuilt extensions compiled
# against published packages may not work correctly.
# Default: False
# c.LabApp.extensions_in_dev_mode = False
## Extra paths to look for federated JupyterLab extensions
# Default: []
# c.LabApp.extra_labextensions_path = []
## Generate default config file.
# See also: JupyterApp.generate_config
# c.LabApp.generate_config = False
## Handlers appended to the server.
# See also: ExtensionApp.handlers
# c.LabApp.handlers = []
## Options to pass to the jinja2 environment for this
# Default: {}
# c.LabApp.jinja2_options = {}
## The standard paths to look in for federated JupyterLab extensions
# Default: []
# c.LabApp.labextensions_path = []
## The url for federated JupyterLab extensions
# Default: ''
# c.LabApp.labextensions_url = ''
## The interval delay in seconds to refresh the lists
# See also: LabServerApp.listings_refresh_seconds
# c.LabApp.listings_refresh_seconds = 3600
## The optional kwargs to use for the listings HTTP requests as
# described on https://2.python-requests.org/en/v2.7.0/api/#requests.request
# See also: LabServerApp.listings_request_options
# c.LabApp.listings_request_options = {}
## The listings url.
# Default: ''
# c.LabApp.listings_url = ''
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.LabApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.LabApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.LabApp.log_level = 30
## Whether to open in a browser after starting.
# See also: ExtensionApp.open_browser
# c.LabApp.open_browser = False
## The override url for static lab assets, typically a CDN.
# Default: ''
# c.LabApp.override_static_url = ''
## The override url for static lab theme assets, typically a CDN.
# Default: ''
# c.LabApp.override_theme_url = ''
## The optional location of the settings schemas directory. If given, a handler
# will be added for settings.
# Default: ''
# c.LabApp.schemas_dir = ''
## Settings that will passed to the server.
# See also: ExtensionApp.settings
# c.LabApp.settings = {}
## The url path of the settings handler.
# Default: ''
# c.LabApp.settings_url = ''
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.LabApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.LabApp.show_config_json = False
## Splice source packages into app directory.
# Default: False
# c.LabApp.splice_source = False
## The optional location of local static files. If given, a static file handler
# will be added.
# Default: ''
# c.LabApp.static_dir = ''
## paths to search for serving static files.
# See also: ExtensionApp.static_paths
# c.LabApp.static_paths = []
## Url where the static assets for the extension are served.
# See also: ExtensionApp.static_url_prefix
# c.LabApp.static_url_prefix = ''
## Paths to search for serving jinja templates.
# See also: ExtensionApp.template_paths
# c.LabApp.template_paths = []
## The application templates directory.
# Default: ''
# c.LabApp.templates_dir = ''
## The optional location of the themes directory. If given, a handler will be
# added for themes.
# Default: ''
# c.LabApp.themes_dir = ''
## The theme url.
# Default: ''
# c.LabApp.themes_url = ''
## The url path of the translations handler.
# Default: ''
# c.LabApp.translations_api_url = ''
## The url path of the tree handler.
# Default: ''
# c.LabApp.tree_url = ''
## The directory for user settings.
# Default: '/root/.jupyter/lab/user-settings'
# c.LabApp.user_settings_dir = '/root/.jupyter/lab/user-settings'
## Whether to serve the app in watch mode
# Default: False
# c.LabApp.watch = False
## Deprecated, use `LabServerApp.allowed_extensions_uris`
# See also: LabServerApp.whitelist_uris
# c.LabApp.whitelist_uris = ''
## The url path of the workspaces API.
# Default: ''
# c.LabApp.workspaces_api_url = ''
## The directory for workspaces
# Default: '/root/.jupyter/lab/workspaces'
# c.LabApp.workspaces_dir = '/root/.jupyter/lab/workspaces'
#------------------------------------------------------------------------------
# ServerApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
# Default: False
# c.ServerApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# Default: ''
# c.ServerApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# Default: ''
# c.ServerApp.allow_origin_pat = ''
## Allow password to be changed at login for the Jupyter server.
#
# While logging in with a token, the Jupyter server UI will give the opportunity to
# the user to enter a new password at the same time that will replace
# the token login mechanism.
#
# This can be set to false to prevent changing password from
# the UI/API.
# Default: True
c.ServerApp.allow_password_change = False
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header
# shows that the browser thinks it's on a non-local domain.
# Setting this option to True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a
# local IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local,
# along with hostnames configured in local_hostnames.
# Default: False
# c.ServerApp.allow_remote_access = False
## Whether to allow the user to run the server as root.
# Default: False
c.ServerApp.allow_root = True
## Answer yes to any prompts.
# See also: JupyterApp.answer_yes
# c.ServerApp.answer_yes = False
## "
# Require authentication to access prometheus metrics.
# Default: True
# c.ServerApp.authenticate_prometheus = True
## Reload the webapp when changes are made to any Python src files.
# Default: False
# c.ServerApp.autoreload = False
## The base URL for the Jupyter server.
#
# Leading and trailing slashes can be omitted,
# and will automatically be added.
# Default: '/'
# c.ServerApp.base_url = '/'
## Specify what command to use to invoke a web
# browser when starting the server. If not specified, the
# default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the
# BROWSER environment variable to override it.
# Default: ''
# c.ServerApp.browser = ''
## The full path to an SSL/TLS certificate file.
# Default: ''
# c.ServerApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
# Default: ''
# c.ServerApp.client_ca = ''
## Full path of a config file.
# See also: JupyterApp.config_file
# c.ServerApp.config_file = ''
## Specify a config file to load.
# See also: JupyterApp.config_file_name
# c.ServerApp.config_file_name = ''
## The config manager class to use
# Default: 'jupyter_server.services.config.manager.ConfigManager'
# c.ServerApp.config_manager_class = 'jupyter_server.services.config.manager.ConfigManager'
## The content manager class to use.
# Default: 'jupyter_server.services.contents.largefilemanager.LargeFileManager'
# c.ServerApp.contents_manager_class = 'jupyter_server.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
# Default: {}
# c.ServerApp.cookie_options = {}
## The random bytes used to secure cookies.
# By default this is a new random number every time you start the server.
# Set it to a value in a config file to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# Default: b''
# c.ServerApp.cookie_secret = b''
## The file where the cookie secret is stored.
# Default: ''
# c.ServerApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL,
# with the given value when displaying URL to the users. Do not change
# the actual connection URL. If authentication token is enabled, the
# token is added to the custom URL automatically.
#
# This option is intended to be used when the URL to display to the user
# cannot be determined reliably by the Jupyter server (proxified
# or containerized setups for example).
# Default: ''
# c.ServerApp.custom_display_url = ''
## The default URL to redirect to from `/`
# Default: '/'
c.ServerApp.default_url = '/lab/tree/README.ipynb'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
# requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and token), or
# - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication.
# These services can disable all authentication and security checks,
# with the full knowledge of what that implies.
# Default: False
# c.ServerApp.disable_check_xsrf = False
## handlers that should be loaded at higher priority than the default services
# Default: []
# c.ServerApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the Jupyter server machine,
# or overriding individual files in the IPython
# Default: []
# c.ServerApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from jupyter_server.templates.
# Default: []
# c.ServerApp.extra_template_paths = []
## Open the named file when the application is launched.
# Default: ''
# c.ServerApp.file_to_run = ''
## The URL prefix where files are opened directly.
# Default: 'notebooks'
# c.ServerApp.file_url_prefix = 'notebooks'
## Generate default config file.
# See also: JupyterApp.generate_config
# c.ServerApp.generate_config = False
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
# get_secure_cookie docs for details.
# Default: {}
# c.ServerApp.get_secure_cookie_kwargs = {}
## (bytes/sec)
# Maximum rate at which stream output can be sent on iopub before they are
# limited.
# Default: 1000000
# c.ServerApp.iopub_data_rate_limit = 1000000
## (msgs/sec)
# Maximum rate at which messages can be sent on iopub before they are
# limited.
# Default: 1000
# c.ServerApp.iopub_msg_rate_limit = 1000
## The IP address the Jupyter server will listen on.
# Default: 'localhost'
c.ServerApp.ip = '0.0.0.0'
## Supply extra arguments that will be passed to Jinja environment.
# Default: {}
# c.ServerApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
# Default: {}
# c.ServerApp.jinja_template_vars = {}
## Dict of Python modules to load as Jupyter server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
# Default: {}
# c.ServerApp.jpserver_extensions = {}
## The kernel manager class to use.
# Default: 'jupyter_server.services.kernels.kernelmanager.AsyncMappingKernelManager'
# c.ServerApp.kernel_manager_class = 'jupyter_server.services.kernels.kernelmanager.AsyncMappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# Default: 'jupyter_client.kernelspec.KernelSpecManager'
# c.ServerApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## Preferred kernel message protocol over websocket to use (default: None). If an
# empty string is passed, select the legacy protocol. If None, the selected
# protocol will depend on what the front-end supports (usually the most recent
# protocol supported by the back-end and the front-end).
# Default: None
# c.ServerApp.kernel_ws_protocol = None
## The full path to a private key file for usage with SSL/TLS.
# Default: ''
# c.ServerApp.keyfile = ''
## Whether to limit the rate of IOPub messages (default: True). If True, use
# iopub_msg_rate_limit, iopub_data_rate_limit and/or rate_limit_window to tune
# the rate.
# Default: True
# c.ServerApp.limit_rate = True
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted
# as local as well.
# Default: ['localhost']
# c.ServerApp.local_hostnames = ['localhost']
## The date format used by logging formatters for %(asctime)s
# See also: Application.log_datefmt
# c.ServerApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
# See also: Application.log_format
# c.ServerApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
# See also: Application.log_level
# c.ServerApp.log_level = 30
## The login handler class to use.
# Default: 'jupyter_server.auth.login.LoginHandler'
# c.ServerApp.login_handler_class = 'jupyter_server.auth.login.LoginHandler'
## The logout handler class to use.
# Default: 'jupyter_server.auth.logout.LogoutHandler'
# c.ServerApp.logout_handler_class = 'jupyter_server.auth.logout.LogoutHandler'
## Sets the maximum allowed size of the client request body, specified in the
# Content-Length request header field. If the size in a request exceeds the
# configured value, a malformed HTTP message is returned to the client.
#
# Note: max_body_size is applied even in streaming mode.
# Default: 536870912
# c.ServerApp.max_body_size = 536870912
## Gets or sets the maximum amount of memory, in bytes, that is allocated for use
# by the buffer manager.
# Default: 536870912
# c.ServerApp.max_buffer_size = 536870912
## Gets or sets a lower bound on the open file handles process resource limit.
# This may need to be increased if you run into an OSError: [Errno 24] Too many
# open files. This is not applicable when running on Windows.
# Default: 0
# c.ServerApp.min_open_files_limit = 0
## DEPRECATED, use root_dir.
# Default: ''
# c.ServerApp.notebook_dir = ''
## Whether to open in a browser after starting.
# The specific browser used is platform dependent and
# determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser
# (ServerApp.browser) configuration option.
# Default: False
c.ServerApp.open_browser = False
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from jupyter_server.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-
# password.
# Default: ''
c.ServerApp.password = ''
## Forces users to use a password for the Jupyter server.
# This is useful in a multi user environment, for instance when
# everybody in the LAN can access each other's machine through ssh.
#
# In such a case, serving on localhost is not secure since
# any user can connect to the Jupyter server via ssh.
# Default: False
# c.ServerApp.password_required = False
## The port the server will listen on (env: JUPYTER_PORT).
# Default: 0
c.ServerApp.port = 8888
## The number of additional ports to try if the specified port is not available
# (env: JUPYTER_PORT_RETRIES).
# Default: 50
# c.ServerApp.port_retries = 50
## Preferred starting directory to use for notebooks and kernels.
# Default: ''
# c.ServerApp.preferred_dir = ''
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# Default: 'disabled'
# c.ServerApp.pylab = 'disabled'
## If True, display controls to shut down the Jupyter server, such as menu items
# or buttons.
# Default: True
c.ServerApp.quit_button = True
## (sec) Time window used to
# check the message and data rate limits.
# Default: 3
# c.ServerApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
# Default: False
# c.ServerApp.reraise_server_extension_failures = False
## The directory to use for notebooks and kernels.
# Default: ''
import os
#os.environ['FIDLE_MASTER_VERSION'] = '2.4.1'
#fidle_master_version = os.environ.get('FIDLE_MASTER_VERSION')
c.ServerApp.root_dir = f'/notebooks/last'
## The session manager class to use.
# Default: 'jupyter_server.services.sessions.sessionmanager.SessionManager'
# c.ServerApp.session_manager_class = 'jupyter_server.services.sessions.sessionmanager.SessionManager'
## Instead of starting the Application, dump configuration to stdout
# See also: Application.show_config
# c.ServerApp.show_config = False
## Instead of starting the Application, dump configuration to stdout (as JSON)
# See also: Application.show_config_json
# c.ServerApp.show_config_json = False
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the Jupyter server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
# Default: 0
# c.ServerApp.shutdown_no_activity_timeout = 0
## The UNIX socket the Jupyter server will listen on.
# Default: ''
# c.ServerApp.sock = ''
## The permissions mode for UNIX socket creation (default: 0600).
# Default: '0600'
# c.ServerApp.sock_mode = '0600'
## Supply SSL options for the tornado HTTPServer.
# See the tornado docs for details.
# Default: {}
# c.ServerApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
# Default: {}
# c.ServerApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the server more secure by itself.
# Anything the user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package
# is not available.
# Default: True
# c.ServerApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# The token can be read from the file referenced by JUPYTER_TOKEN_FILE or set directly
# with the JUPYTER_TOKEN environment variable.
#
# When no password is enabled,
# the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which
# is NOT RECOMMENDED.
# Default: '<generated>'
# c.ServerApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter server uses.
# Default: {}
# c.ServerApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# Default: False
# c.ServerApp.trust_xheaders = False
## Disable launching browser by redirect file
# For versions of notebook > 5.7.2, a security feature measure was added that
# prevented the authentication token used to launch the browser from being visible.
# This feature makes it difficult for other users on a multi-user system from
# running code in your Jupyter session as you.
# However, some environments (like Windows Subsystem for Linux (WSL) and Chromebooks),
# launching a browser using a redirect file can lead the browser failing to load.
# This is because of the difference in file structures/paths between the runtime and
# the browser.
#
# Disabling this setting to False will disable this behavior, allowing the browser
# to launch by using a URL and visible token (as before).
# Default: True
# c.ServerApp.use_redirect_file = True
## Specify where to open the server on startup. This is the
# `new` argument passed to the standard library method `webbrowser.open`.
# The behaviour is not guaranteed, but depends on browser support. Valid
# values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
# Default: 2
# c.ServerApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
# Default: None
# c.ServerApp.websocket_compression_options = None
## The base URL for websockets,
# if it differs from the HTTP server (hint: it almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# Default: ''
# c.ServerApp.websocket_url = ''
# ----------------------------------------------------
# ______ _ _ _ __ __
# | ____(_) | | | \ \ / /
# | |__ _ __| | | ___ \ \ / /__ _ ____ __
# | __| | |/ _` | |/ _ \ \ \/ / _ \ '_ \ \ / /
# | | | | (_| | | __/ \ / __/ | | \ V /
# |_| |_|\__,_|_|\___| \/ \___|_| |_|\_/
# Fidle pip virtual env
# ----------------------------------------------------
#
# To install your Fidle env, see https://fidle.cnrs.fr/installation
#
# Keras 3 / PyTorch version (Python 3.9.2)
--extra-index-url https://download.pytorch.org/whl/cpu
torch
torchvision
torch-geometric
torchtext
torchdata
lightning
tensorboard
keras
transformers
numpy
Scikit-image
Scikit-learn
Matplotlib
plotly
seaborn
einops
datasets
barviz
pyarrow
Pandas
Pandoc
pyyaml
Jupyterlab
fidle
# ----------------------------------------------------
# ______ _ _ _ __ __
# | ____(_) | | | \ \ / /
# | |__ _ __| | | ___ \ \ / /__ _ ____ __
# | __| | |/ _` | |/ _ \ \ \/ / _ \ '_ \ \ / /
# | | | | (_| | | __/ \ / __/ | | \ V /
# |_| |_|\__,_|_|\___| \/ \___|_| |_|\_/
# Fidle pip virtual env
# ----------------------------------------------------
#
# To install your Fidle env, see https://fidle.cnrs.fr/installation
#
# Keras 3 / PyTorch version (Python 3.9.2)
torch
torchvision
torch-geometric
torchtext
torchdata
lightning
tensorboard
keras
transformers
numpy
Scikit-image
Scikit-learn
Matplotlib
plotly
seaborn
einops
datasets
barviz
pyarrow
Pandas
Pandoc
pyyaml
Jupyterlab
fidle
name: fidle
channels:
- defaults
dependencies:
- _libgcc_mutex=0.1=main
- _tflow_select=2.1.0=gpu
- absl-py=0.9.0=py37_0
- astor=0.8.0=py37_0
- attrs=19.3.0=py_0
- backcall=0.1.0=py37_0
- blas=1.0=mkl
- bleach=3.1.0=py37_0
- blosc=1.16.3=hd408876_0
- bzip2=1.0.8=h7b6447c_0
- c-ares=1.15.0=h7b6447c_1001
- ca-certificates=2020.1.1=0
- cairo=1.14.12=h8948797_3
- certifi=2019.11.28=py37_0
- cloudpickle=1.3.0=py_0
- cudatoolkit=10.0.130=0
- cudnn=7.6.5=cuda10.0_0
- cupti=10.0.130=0
- cycler=0.10.0=py37_0
- cytoolz=0.10.1=py37h7b6447c_0
- dask-core=2.10.1=py_0
- dbus=1.13.12=h746ee38_0
- decorator=4.4.1=py_0
- defusedxml=0.6.0=py_0
- entrypoints=0.3=py37_0
- expat=2.2.6=he6710b0_0
- fontconfig=2.13.0=h9420a91_0
- freetype=2.9.1=h8a8886c_1
- fribidi=1.0.5=h7b6447c_0
- gast=0.2.2=py37_0
- glib=2.63.1=h5a9c865_0
- gmp=6.1.2=h6c8ec71_1
- google-pasta=0.1.8=py_0
- graphite2=1.3.13=h23475e2_0
- graphviz=2.40.1=h21bd128_2
- grpcio=1.16.1=py37hf8bcb03_1
- gst-plugins-base=1.14.0=hbbd80ab_1
- gstreamer=1.14.0=hb453b48_1
- h5py=2.10.0=py37h7918eee_0
- harfbuzz=1.8.8=hffaf4a1_0
- hdf5=1.10.4=hb1b8bf9_0
- icu=58.2=h9c2bf20_1
- imageio=2.6.1=py37_0
- importlib_metadata=1.5.0=py37_0
- intel-openmp=2020.0=166
- ipykernel=5.1.4=py37h39e3cac_0
- ipython=7.12.0=py37h5ca1d4c_0
- ipython_genutils=0.2.0=py37_0
- jedi=0.16.0=py37_0
- jinja2=2.11.1=py_0
- joblib=0.14.1=py_0
- jpeg=9b=h024ee3a_2
- json5=0.9.1=py_0
- jsonschema=3.2.0=py37_0
- jupyter_client=5.3.4=py37_0
- jupyter_core=4.6.1=py37_0
- jupyterlab=1.2.6=pyhf63ae98_0
- jupyterlab_server=1.0.6=py_0
- keras-applications=1.0.8=py_0
- keras-preprocessing=1.1.0=py_1
- kiwisolver=1.1.0=py37he6710b0_0
- ld_impl_linux-64=2.33.1=h53a641e_7
- libedit=3.1.20181209=hc058e9b_0
- libffi=3.2.1=hd88cf55_4
- libgcc-ng=9.1.0=hdf63c60_0
- libgfortran-ng=7.3.0=hdf63c60_0
- libpng=1.6.37=hbc83047_0
- libprotobuf=3.11.3=hd408876_0
- libsodium=1.0.16=h1bed415_0
- libstdcxx-ng=9.1.0=hdf63c60_0
- libtiff=4.1.0=h2733197_0
- libuuid=1.0.3=h1bed415_2
- libxcb=1.13=h1bed415_1
- libxml2=2.9.9=hea5a465_1
- lz4-c=1.8.1.2=h14c3975_0
- lzo=2.10=h49e0be7_2
- markdown=3.1.1=py37_0
- markupsafe=1.1.1=py37h7b6447c_0
- matplotlib=3.1.3=py37_0
- matplotlib-base=3.1.3=py37hef1b27d_0
- mistune=0.8.4=py37h7b6447c_0
- mkl=2020.0=166
- mkl-service=2.3.0=py37he904b0f_0
- mkl_fft=1.0.15=py37ha843d7b_0
- mkl_random=1.1.0=py37hd6b4f25_0
- mock=4.0.1=py_0
- more-itertools=8.2.0=py_0
- nbconvert=5.6.1=py37_0
- nbformat=5.0.4=py_0
- ncurses=6.1=he6710b0_1
- networkx=2.4=py_0
- notebook=6.0.3=py37_0
- numexpr=2.7.1=py37h423224d_0
- numpy=1.18.1=py37h4f9e942_0
- numpy-base=1.18.1=py37hde5b4d6_1
- olefile=0.46=py37_0
- openssl=1.1.1d=h7b6447c_4
- opt_einsum=3.1.0=py_0
- pandas=1.0.1=py37h0573a6f_0
- pandoc=2.2.3.2=0
- pandocfilters=1.4.2=py37_1
- pango=1.42.4=h049681c_0
- parso=0.6.1=py_0
- patsy=0.5.1=py37_0
- pcre=8.43=he6710b0_0
- pexpect=4.8.0=py37_0
- pickleshare=0.7.5=py37_0
- pillow=7.0.0=py37hb39fc2d_0
- pip=20.0.2=py37_1
- pixman=0.38.0=h7b6447c_0
- prometheus_client=0.7.1=py_0
- prompt_toolkit=3.0.3=py_0
- protobuf=3.11.3=py37he6710b0_0
- ptyprocess=0.6.0=py37_0
- pydot=1.4.1=py37_0
- pygments=2.5.2=py_0
- pyparsing=2.4.6=py_0
- pyqt=5.9.2=py37h05f1152_2
- pyrsistent=0.15.7=py37h7b6447c_0
- pytables=3.6.1=py37h71ec239_0
- python=3.7.6=h0371630_2
- python-dateutil=2.8.1=py_0
- pytz=2019.3=py_0
- pywavelets=1.1.1=py37h7b6447c_0
- pyzmq=18.1.1=py37he6710b0_0
- qt=5.9.7=h5867ecd_1
- readline=7.0=h7b6447c_5
- scikit-image=0.16.2=py37h0573a6f_0
- scikit-learn=0.22.1=py37hd81dba3_0
- scipy=1.4.1=py37h0b6359f_0
- seaborn=0.10.0=py_0
- send2trash=1.5.0=py37_0
- setuptools=45.2.0=py37_0
- sip=4.19.8=py37hf484d3e_0
- six=1.14.0=py37_0
- snappy=1.1.7=hbae5bb6_3
- sqlite=3.31.1=h7b6447c_0
- statsmodels=0.11.0=py37h7b6447c_0
- tensorboard=2.0.0=pyhb38c66f_1
- tensorflow=2.0.0=gpu_py37h768510d_0
- tensorflow-base=2.0.0=gpu_py37h0ec5d1f_0
- tensorflow-estimator=2.0.0=pyh2649769_0
- tensorflow-gpu=2.0.0=h0d30ee6_0
- termcolor=1.1.0=py37_1
- terminado=0.8.3=py37_0
- testpath=0.4.4=py_0
- tk=8.6.8=hbc83047_0
- toolz=0.10.0=py_0
- tornado=6.0.3=py37h7b6447c_3
- traitlets=4.3.3=py37_0
- wcwidth=0.1.8=py_0
- webencodings=0.5.1=py37_1
- werkzeug=0.16.0=py_0
- wheel=0.34.2=py37_0
- wrapt=1.11.2=py37h7b6447c_0
- xz=5.2.4=h14c3975_4
- zeromq=4.3.1=he6710b0_3
- zipp=2.2.0=py_0
- zlib=1.2.11=h7b6447c_3
- zstd=1.3.7=h0b5b093_0
VERSION='0.1a'
\ No newline at end of file
#--------------------------------------------------------------------
# ______ _ _ _ _____ _ _
# | ____(_) | | | / ____| | | | |
# | |__ _ __| | | ___ | | ___ _ __ | |_ ___ _ __ | |_ ___
# | __| | |/ _` | |/ _ \ | | / _ \| '_ \| __/ _ \ '_ \| __/ __|
# | | | | (_| | | __/ | |___| (_) | | | | || __/ | | | |_\__ \
# |_| |_|\__,_|_|\___| \_____\___/|_| |_|\__\___|_| |_|\__|___/
#
# Formation Introduction au Deep Learning - 2024
#--------------------------------------------------------------------
# Formation Introduction au Deep Learning https://fidle.cnrs.fr
# By MIAI/CNRS/UGA 2023/24
#
# This file describes the notebooks used by the Fidle training.
version: 3.0.15
content: notebooks
name: Notebooks Fidle
description: All notebooks used by the Fidle training
readme_md: README.md
readme_ipynb: README.ipynb
default_ci: fidle/ci/default.yml
toc:
LinearReg: Linear and logistic regression
Perceptron: Perceptron Model 1957
BHPD.Keras3: BHPD regression (DNN), using Keras3/PyTorch
BHPD.PyTorch: BHPD regression (DNN), using PyTorch
Wine.Keras3: Wine Quality prediction (DNN), using Keras3/PyTorch
Wine.Lightning: Wine Quality prediction (DNN), using PyTorch/Lightning
MNIST.Keras3: MNIST classification (DNN,CNN), using Keras3/PyTorch
MNIST.PyTorch: MNIST classification (DNN,CNN), using PyTorch
MNIST.Lightning: MNIST classification (DNN,CNN), using PyTorch/Lightning
GTSRB.Keras3: Images classification GTSRB with Convolutional Neural Networks (CNN), using Keras3/PyTorch
Embedding.Keras3: Sentiment analysis with word embedding, using Keras3/PyTorch
RNN.Keras3: Time series with Recurrent Neural Network (RNN), using Keras3/PyTorch
GNN.PyTorch: Graph Neural Networks
AE.Keras3: Unsupervised learning with an autoencoder neural network (AE), using Keras3
VAE.Keras3: Generative network with Variational Autoencoder (VAE), using Keras3
DCGAN.Lightning: Generative Adversarial Networks (GANs), using Lightning
DDPM.PyTorch: Diffusion Model (DDPM) using PyTorch
Optimization.PyTorch: Training optimization, using PyTorch
DRL.PyTorch: Deep Reinforcement Learning (DRL), using PyTorch
Misc: Miscellaneous things, but very important!
#!/bin/bash
# -----------------------------------------------
# _ _ _
# | |__ __ _| |_ ___| |__
# | '_ \ / _` | __/ __| '_ \
# | |_) | (_| | || (__| | | |
# |_.__/ \__,_|\__\___|_| |_|
# Fidle at IDRIS
# -----------------------------------------------
#
# SLURM batch script
# Bash script for SLURM batch submission of ci notebooks
# by Jean-Luc Parouty (CNRS/SIMaP)
#
# Soumission : sbatch /(...)/batch_slurm.sh
# Suivi : squeue -u $USER
# ==== Job parameters ==============================================
#SBATCH --job-name="Fidle ci" # nom du job
#SBATCH --ntasks=1 # nombre de tâche (un unique processus ici)
#SBATCH --gres=gpu:1 # nombre de GPU à réserver (un unique GPU ici)
#SBATCH --cpus-per-task=10 # nombre de coeurs à réserver (un quart du noeud)
#SBATCH --hint=nomultithread # on réserve des coeurs physiques et non logiques
#SBATCH --time=05:00:00 # temps exécution maximum demande (HH:MM:SS)
#SBATCH --output="FIDLE_CI_%j.out" # nom du fichier de sortie
#SBATCH --error="FIDLE_CI_%j.err" # nom du fichier des erreurs
#SBATCH --mail-user=Someone@somewhere.fr
#SBATCH --mail-type=END,FAIL
# ==== Parameters ==================================================
MODULE_ENV="pytorch-gpu/py3/2.1.1"
RUN_DIR="$WORK/fidle-project/fidle"
CAMPAIN_PROFILE="./fidle/ci/gpu-scale1.yml"
FILTER=( '.*' )
# ==================================================================
echo '------------------------------------------------------------'
echo "Start : $0"
echo '------------------------------------------------------------'
echo "Job id : $SLURM_JOB_ID"
echo "Job name : $SLURM_JOB_NAME"
echo "Job node list : $SLURM_JOB_NODELIST"
echo '------------------------------------------------------------'
echo "module loaded : $MODULE_ENV"
echo "run dir : $RUN_DIR"
echo "campain profile : $CAMPAIN_PROFILE"
echo "filter : ${FILTER[@]}"
echo '------------------------------------------------------------'
# ---- Module + env.
module purge
module load "$MODULE_ENV"
export PYTHONUSERBASE=$WORK/local/fidle-k3
export PATH=$PATH:$PYTHONUSERBASE/bin
# ---- Run it...
cd "$RUN_DIR"
fid run_ci --quiet --campain "$CAMPAIN_PROFILE" --filter ${FILTER[@]}
echo 'Done.'
campain:
version: '1.0'
description: Notebook test on CPU, default settings.
directory: ./campains/cpu-default
existing_notebook: 'skip' # remove|skip
report_template: 'fidle' # fidle|default
timeout: 6000
environment_vars:
FIDLE_SAVE_FIGS: true
#
# ------------ LinearReg
#
LINR1:
notebook: LinearReg/01-Linear-Regression.ipynb
GRAD1:
notebook: LinearReg/02-Gradient-descent.ipynb
POLR1:
notebook: LinearReg/03-Polynomial-Regression.ipynb
LOGR1:
notebook: LinearReg/04-Logistic-Regression.ipynb
#
# ------------ Perceptron
#
PER57:
notebook: Perceptron/01-Simple-Perceptron.ipynb
#
# ------------ BHPD.Keras3
#
K3BHPD1:
notebook: BHPD.Keras3/01-DNN-Regression.ipynb
overrides:
fit_verbosity: 2
K3BHPD2:
notebook: BHPD.Keras3/02-DNN-Regression-Premium.ipynb
overrides:
fit_verbosity: 2
#
# ------------ BHPD.PyTorch
#
PBHPD1:
notebook: BHPD.PyTorch/01-DNN-Regression_PyTorch.ipynb
#
# ------------ Wine.Keras3
#
K3WINE1:
notebook: Wine.Keras3/01-DNN-Wine-Regression.ipynb
overrides:
fit_verbosity: 2
dataset_name: default
#
# ------------ Wine.Lightning
#
LWINE1:
notebook: Wine.Lightning/01-DNN-Wine-Regression-lightning.ipynb
overrides:
fit_verbosity: 2
dataset_name: default
#
# ------------ MNIST.Keras3
#
K3MNIST1:
notebook: MNIST.Keras3/01-DNN-MNIST.ipynb
overrides:
fit_verbosity: 2
K3MNIST2:
notebook: MNIST.Keras3/02-CNN-MNIST.ipynb
overrides:
fit_verbosity: 2
#
# ------------ MNIST.PyTorch
#
#PMNIST1:
# notebook: MNIST.PyTorch/01-DNN-MNIST_PyTorch.ipynb
#
# ------------ MNIST.Lightning
#
#LMNIST2:
# notebook: MNIST.Lightning/02-CNN-MNIST_Lightning.ipynb
#
# ------------ GTSRB.Keras3
#
K3GTSRB1:
notebook: GTSRB.Keras3/01-Preparation-of-data.ipynb
overrides:
scale: default
output_dir: default
progress_verbosity: default
K3GTSRB2:
notebook: GTSRB.Keras3/02-First-convolutions.ipynb
after: K3GTSRB1
overrides:
enhanced_dir: default
dataset_name: default
batch_size: default
epochs: default
scale: default
fit_verbosity: 2
K3GTSRB3:
notebook: GTSRB.Keras3/03-Better-convolutions.ipynb
after: K3GTSRB1
overrides:
enhanced_dir: default
model_name: default
dataset_name: default
batch_size: default
epochs: default
scale: default
fit_verbosity: 2
#
# ------------ Embedding.Keras3
#
K3IMDB1:
notebook: Embedding.Keras3/01-One-hot-encoding.ipynb
overrides:
vocab_size: default
hide_most_frequently: default
batch_size: default
epochs: default
fit_verbosity: 2
K3IMDB2:
notebook: Embedding.Keras3/02-Keras-embedding.ipynb
overrides:
vocab_size: default
hide_most_frequently: default
review_len: default
dense_vector_size: default
batch_size: default
epochs: default
output_dir: default
fit_verbosity: 2
K3IMDB3:
notebook: Embedding.Keras3/03-Prediction.ipynb
after: K3IMDB2
overrides:
vocab_size: default
review_len: default
saved_models: default
dictionaries_dir: default
K3IMDB4:
notebook: Embedding.Keras3/04-Show-vectors.ipynb
after: K3IMDB2
overrides:
vocab_size: default
review_len: default
saved_models: default
dictionaries_dir: default
K3IMDB5:
notebook: Embedding.Keras3/05-LSTM-Keras.ipynb
overrides:
vocab_size: default
hide_most_frequently: default
review_len: default
dense_vector_size: default
batch_size: default
epochs: default
fit_verbosity: 2
scale: default
#
# ------------ RNN.Keras3
#
K3LADYB1:
notebook: RNN.Keras3/01-Ladybug.ipynb
overrides:
scale: default
train_prop: default
sequence_len: default
predict_len: default
batch_size: default
epochs: default
#
# ------------ AE.Keras3
#
K3AE1:
notebook: AE.Keras3/01-Prepare-MNIST-dataset.ipynb
overrides:
prepared_dataset: default
scale: 0.1
progress_verbosity: default
K3AE2:
notebook: AE.Keras3/02-AE-with-MNIST.ipynb
after: K3AE1
overrides:
prepared_dataset: default
dataset_seed: default
scale: 1
latent_dim: default
train_prop: default
batch_size: default
epochs: 4
fit_verbosity: default
K3AE3:
notebook: AE.Keras3/03-AE-with-MNIST-post.ipynb
after: K3AE2
overrides:
prepared_dataset: default
dataset_seed: default
scale: 1
train_prop: default
K3AE4:
notebook: AE.Keras3/04-ExtAE-with-MNIST.ipynb
after: K3AE1
overrides:
prepared_dataset: default
dataset_seed: default
scale: 1
train_prop: default
batch_size: default
epochs: 4
fit_verbosity: default
K3AE5:
notebook: AE.Keras3/05-ExtAE-with-MNIST.ipynb
after: K3AE1
overrides:
prepared_dataset: default
dataset_seed: default
scale: 1
train_prop: default
batch_size: default
epochs: 4
fit_verbosity: default
#
# ------------ VAE.Keras3
#
K3VAE1:
notebook: VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb
overrides:
latent_dim: default
loss_weights: default
scale: 0.1
seed: default
batch_size: default
epochs: 4
fit_verbosity: default
K3VAE2:
notebook: VAE.Keras3/02-VAE-with-MNIST.ipynb
overrides:
latent_dim: default
loss_weights: default
scale: 0.1
seed: default
batch_size: default
epochs: 4
fit_verbosity: default
K3VAE3:
notebook: VAE.Keras3/03-VAE-with-MNIST-post.ipynb
after: K3VAE2
overrides:
scale: 0.1
seed: default
models_dir: default
#
# ------------ DCGAN.Lightning
#
PLSHEEP3:
notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
overrides:
latent_dim: default
gan_name: GAN
generator_name: default
discriminator_name: default
epochs: 4
lr: default
b1: default
b2: default
batch_size: default
num_img: default
fit_verbosity: default
dataset_file: default
data_shape: default
scale: 0.001
num_workers: default
PLSHEEP3:
notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
overrides:
latent_dim: default
gan_name: WGANGP
generator_name: default
discriminator_name: default
epochs: 4
lr: default
b1: default
b2: default
batch_size: default
num_img: default
fit_verbosity: default
dataset_file: default
data_shape: default
scale: 0.001
num_workers: default
#
# ------------ Misc
#
NP1:
notebook: Misc/00-Numpy.ipynb
ACTF1:
notebook: Misc/01-Activation-Functions.ipynb
PANDAS1:
notebook: Misc/02-Using-pandas.ipynb
FID1:
notebook: Misc/07-Fid-Example.ipynb
overrides:
scale: .1
x: 345
batch_size: default
\ No newline at end of file
campain:
version: '1.0'
description: Automatically generated ci profile (06/01/25 16:42:30)
directory: ./campains/default
existing_notebook: 'remove # remove|skip'
report_template: 'fidle # fidle|default'
timeout: 6000
#
# ------------ LinearReg
#
LINR1:
notebook: LinearReg/01-Linear-Regression.ipynb
GRAD1:
notebook: LinearReg/02-Gradient-descent.ipynb
POLR1:
notebook: LinearReg/03-Polynomial-Regression.ipynb
LOGR1:
notebook: LinearReg/04-Logistic-Regression.ipynb
#
# ------------ Perceptron
#
PER57:
notebook: Perceptron/01-Simple-Perceptron.ipynb
#
# ------------ BHPD.Keras3
#
K3BHPD1:
notebook: BHPD.Keras3/01-DNN-Regression.ipynb
overrides:
fit_verbosity: default
K3BHPD2:
notebook: BHPD.Keras3/02-DNN-Regression-Premium.ipynb
overrides:
fit_verbosity: default
#
# ------------ BHPD.PyTorch
#
PBHPD1:
notebook: BHPD.PyTorch/01-DNN-Regression_PyTorch.ipynb
#
# ------------ Wine.Keras3
#
K3WINE1:
notebook: Wine.Keras3/01-DNN-Wine-Regression.ipynb
overrides:
fit_verbosity: default
dataset_name: default
#
# ------------ Wine.Lightning
#
LWINE1:
notebook: Wine.Lightning/01-DNN-Wine-Regression-lightning.ipynb
overrides:
fit_verbosity: default
dataset_name: default
#
# ------------ MNIST.Keras3
#
K3MNIST1:
notebook: MNIST.Keras3/01-DNN-MNIST.ipynb
overrides:
fit_verbosity: default
K3MNIST2:
notebook: MNIST.Keras3/02-CNN-MNIST.ipynb
overrides:
fit_verbosity: default
#
# ------------ MNIST.PyTorch
#
PMNIST1:
notebook: MNIST.PyTorch/01-DNN-MNIST_PyTorch.ipynb
#
# ------------ MNIST.Lightning
#
LMNIST1:
notebook: MNIST.Lightning/01-DNN-MNIST_Lightning.ipynb
LMNIST2:
notebook: MNIST.Lightning/02-CNN-MNIST_Lightning.ipynb
#
# ------------ GTSRB.Keras3
#
K3GTSRB1:
notebook: GTSRB.Keras3/01-Preparation-of-data.ipynb
overrides:
scale: default
output_dir: default
progress_verbosity: default
K3GTSRB2:
notebook: GTSRB.Keras3/02-First-convolutions.ipynb
overrides:
enhanced_dir: default
dataset_name: default
batch_size: default
epochs: default
scale: default
fit_verbosity: default
K3GTSRB3:
notebook: GTSRB.Keras3/03-Better-convolutions.ipynb
overrides:
enhanced_dir: default
model_name: default
dataset_name: default
batch_size: default
epochs: default
scale: default
fit_verbosity: default
#
# ------------ Embedding.Keras3
#
K3IMDB1:
notebook: Embedding.Keras3/01-One-hot-encoding.ipynb
overrides:
vocab_size: default
hide_most_frequently: default
batch_size: default
epochs: default
fit_verbosity: default
K3IMDB2:
notebook: Embedding.Keras3/02-Keras-embedding.ipynb
overrides:
vocab_size: default
hide_most_frequently: default
review_len: default
dense_vector_size: default
batch_size: default
epochs: default
output_dir: default
fit_verbosity: default
K3IMDB3:
notebook: Embedding.Keras3/03-Prediction.ipynb
overrides:
vocab_size: default
review_len: default
saved_models: default
dictionaries_dir: default
K3IMDB4:
notebook: Embedding.Keras3/04-Show-vectors.ipynb
overrides:
vocab_size: default
review_len: default
saved_models: default
dictionaries_dir: default
K3IMDB5:
notebook: Embedding.Keras3/05-LSTM-Keras.ipynb
overrides:
vocab_size: default
hide_most_frequently: default
review_len: default
dense_vector_size: default
batch_size: default
epochs: default
fit_verbosity: default
scale: default
#
# ------------ RNN.Keras3
#
K3LADYB1:
notebook: RNN.Keras3/01-Ladybug.ipynb
overrides:
scale: default
train_prop: default
sequence_len: default
predict_len: default
batch_size: default
epochs: default
fit_verbosity: default
#
# ------------ GNN.PyTorch
#
{}
#
# ------------ AE.Keras3
#
K3AE1:
notebook: AE.Keras3/01-Prepare-MNIST-dataset.ipynb
overrides:
prepared_dataset: default
scale: default
progress_verbosity: default
K3AE2:
notebook: AE.Keras3/02-AE-with-MNIST.ipynb
overrides:
prepared_dataset: default
dataset_seed: default
scale: default
latent_dim: default
train_prop: default
batch_size: default
epochs: default
fit_verbosity: default
K3AE3:
notebook: AE.Keras3/03-AE-with-MNIST-post.ipynb
overrides:
prepared_dataset: default
dataset_seed: default
scale: default
train_prop: default
K3AE4:
notebook: AE.Keras3/04-ExtAE-with-MNIST.ipynb
overrides:
prepared_dataset: default
dataset_seed: default
scale: default
train_prop: default
batch_size: default
epochs: default
fit_verbosity: default
K3AE5:
notebook: AE.Keras3/05-ExtAE-with-MNIST.ipynb
overrides:
prepared_dataset: default
dataset_seed: default
scale: default
train_prop: default
batch_size: default
epochs: default
fit_verbosity: default
#
# ------------ VAE.Keras3
#
K3VAE1:
notebook: VAE.Keras3/01-VAE-with-MNIST-LossLayer.ipynb
overrides:
latent_dim: default
loss_weights: default
scale: default
seed: default
batch_size: default
epochs: default
fit_verbosity: default
K3VAE2:
notebook: VAE.Keras3/02-VAE-with-MNIST.ipynb
overrides:
latent_dim: default
loss_weights: default
scale: default
seed: default
batch_size: default
epochs: default
fit_verbosity: default
K3VAE3:
notebook: VAE.Keras3/03-VAE-with-MNIST-post.ipynb
overrides:
scale: default
seed: default
models_dir: default
#
# ------------ DCGAN.Lightning
#
PLSHEEP3:
notebook: DCGAN.Lightning/01-DCGAN-PL.ipynb
overrides:
latent_dim: default
gan_name: default
generator_name: default
discriminator_name: default
epochs: default
lr: default
b1: default
b2: default
batch_size: default
num_img: default
fit_verbosity: default
dataset_file: default
data_shape: default
scale: default
num_workers: default
#
# ------------ DDPM.PyTorch
#
DDPM1:
notebook: DDPM.PyTorch/01-ddpm.ipynb
#
# ------------ Optimization.PyTorch
#
OPT1:
notebook: Optimization.PyTorch/01-Apprentissages-rapides-et-Optimisations.ipynb
#
# ------------ DRL.PyTorch
#
DRL1:
notebook: DRL.PyTorch/FIDLE_DQNfromScratch.ipynb
DRL2:
notebook: DRL.PyTorch/FIDLE_rl_baselines_zoo.ipynb
#
# ------------ Misc
#
NP1:
notebook: Misc/00-Numpy.ipynb
ACTF1:
notebook: Misc/01-Activation-Functions.ipynb
PANDAS1:
notebook: Misc/02-Using-pandas.ipynb
PYTORCH1:
notebook: Misc/03-Using-Pytorch.ipynb
TSB1:
notebook: Misc/04-Using-Tensorboard.ipynb
overrides: ??
K3LSTM1:
notebook: Misc/05-RNN.ipynb
PGRAD1:
notebook: Misc/06-Gradients.ipynb
FID1:
notebook: Misc/99-Fid-Example.ipynb
overrides:
scale: default
x: default
batch_size: default