Skip to content
Snippets Groups Projects

Add/Update VAE data_generator

parent a1fc73a4
No related branches found
No related tags found
No related merge requests found
%% Cell type:markdown id: tags:
German Traffic Sign Recognition Benchmark (GTSRB)
=================================================
---
Introduction au Deep Learning (IDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020
## Episode 5 : Full Convolutions
Our main steps:
- Try n models with n datasets
- Save a Pandas/h5 report
- Write to be run in batch mode
## 1/ Import
%% Cell type:code id: tags:
``` python
import tensorflow as tf
from tensorflow import keras
import numpy as np
import h5py
import os,time,json
import random
from IPython.display import display
VERSION='1.6'
```
%% Cell type:markdown id: tags:
## 2/ Init and start
%% Cell type:code id: tags:
``` python
# ---- Where I am ?
now = time.strftime("%A %d %B %Y - %Hh%Mm%Ss")
here = os.getcwd()
random.seed(time.time())
tag_id = '{:06}'.format(random.randint(0,99999))
# ---- Who I am ?
if 'OAR_JOB_ID' in os.environ:
oar_id=os.environ['OAR_JOB_ID']
else:
oar_id='???'
print('\nFull Convolutions Notebook')
print(' Version : {}'.format(VERSION))
print(' Now is : {}'.format(now))
print(' OAR id : {}'.format(oar_id))
print(' Tag id : {}'.format(tag_id))
print(' Working directory : {}'.format(here))
print(' TensorFlow version :',tf.__version__)
print(' Keras version :',tf.keras.__version__)
print(' for tensorboard : --logdir {}/run/logs_{}'.format(here,tag_id))
```
%% Cell type:markdown id: tags:
## 3/ Dataset loading
%% Cell type:code id: tags:
``` python
def read_dataset(name):
'''Reads h5 dataset from ./data
Arguments: dataset name, without .h5
Returns: x_train,y_train,x_test,y_test data'''
# ---- Read dataset
filename='./data/'+name+'.h5'
with h5py.File(filename,'r') as f:
x_train = f['x_train'][:]
y_train = f['y_train'][:]
x_test = f['x_test'][:]
y_test = f['y_test'][:]
return x_train,y_train,x_test,y_test
```
%% Cell type:markdown id: tags:
## 4/ Models collection
%% Cell type:code id: tags:
``` python
# A basic model
#
def get_model_v1(lx,ly,lz):
model = keras.models.Sequential()
model.add( keras.layers.Conv2D(96, (3,3), activation='relu', input_shape=(lx,ly,lz)))
model.add( keras.layers.MaxPooling2D((2, 2)))
model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Conv2D(192, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D((2, 2)))
model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Flatten())
model.add( keras.layers.Dense(1500, activation='relu'))
model.add( keras.layers.Dropout(0.5))
model.add( keras.layers.Dense(43, activation='softmax'))
return model
# A more sophisticated model
#
def get_model_v2(lx,ly,lz):
model = keras.models.Sequential()
model.add( keras.layers.Conv2D(64, (3, 3), padding='same', input_shape=(lx,ly,lz), activation='relu'))
model.add( keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add( keras.layers.Conv2D(128, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Conv2D(256, (3, 3), padding='same',activation='relu'))
model.add( keras.layers.Conv2D(256, (3, 3), activation='relu'))
model.add( keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add( keras.layers.Dropout(0.2))
model.add( keras.layers.Flatten())
model.add( keras.layers.Dense(512, activation='relu'))
model.add( keras.layers.Dropout(0.5))
model.add( keras.layers.Dense(43, activation='softmax'))
return model
def get_model_v3(lx,ly,lz):
model = keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, (5, 5), padding='same', activation='relu', input_shape=(lx,ly,lz)))
model.add(tf.keras.layers.BatchNormalization(axis=-1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='relu'))
model.add(tf.keras.layers.BatchNormalization(axis=-1))
model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='relu'))
model.add(tf.keras.layers.BatchNormalization(axis=-1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation='relu'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dropout(0.4))
model.add(tf.keras.layers.Dense(43, activation='softmax'))
return model
```
%% Cell type:markdown id: tags:
## 5/ Multiple datasets, multiple models ;-)
%% Cell type:code id: tags:
``` python
def multi_run(datasets, models, datagen=None,
train_size=1, test_size=1, batch_size=64, epochs=16,
verbose=0, extension_dir='last'):
# ---- Logs and models dir
#
os.makedirs('./run/logs_{}'.format(extension_dir), mode=0o750, exist_ok=True)
os.makedirs('./run/models_{}'.format(extension_dir), mode=0o750, exist_ok=True)
# ---- Columns of output
#
output={}
output['Dataset']=[]
output['Size'] =[]
for m in models:
output[m+'_Accuracy'] = []
output[m+'_Duration'] = []
# ---- Let's go
#
for d_name in datasets:
print("\nDataset : ",d_name)
# ---- Read dataset
x_train,y_train,x_test,y_test = read_dataset(d_name)
d_size=os.path.getsize('./data/'+d_name+'.h5')/(1024*1024)
output['Dataset'].append(d_name)
output['Size'].append(d_size)
# ---- Get the shape
(n,lx,ly,lz) = x_train.shape
n_train = int(x_train.shape[0]*train_size)
n_test = int(x_test.shape[0]*test_size)
# ---- For each model
for m_name,m_function in models.items():
print(" Run model {} : ".format(m_name), end='')
# ---- get model
try:
model=m_function(lx,ly,lz)
# ---- Compile it
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# ---- Callbacks tensorboard
log_dir = "./run/logs_{}/tb_{}_{}".format(extension_dir, d_name, m_name)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# ---- Callbacks bestmodel
save_dir = "./run/models_{}/model_{}_{}.h5".format(extension_dir, d_name, m_name)
bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=save_dir, verbose=0, monitor='accuracy', save_best_only=True)
# ---- Train
start_time = time.time()
if datagen==None:
# ---- No data augmentation (datagen=None) --------------------------------------
history = model.fit(x_train[:n_train], y_train[:n_train],
batch_size = batch_size,
epochs = epochs,
verbose = verbose,
validation_data = (x_test[:n_test], y_test[:n_test]),
callbacks = [tensorboard_callback, bestmodel_callback])
else:
# ---- Data augmentation (datagen given) ----------------------------------------
datagen.fit(x_train)
history = model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch = int(n_train/batch_size),
epochs = epochs,
verbose = verbose,
validation_data = (x_test[:n_test], y_test[:n_test]),
callbacks = [tensorboard_callback, bestmodel_callback])
# ---- Result
end_time = time.time()
duration = end_time-start_time
accuracy = max(history.history["val_accuracy"])*100
#
output[m_name+'_Accuracy'].append(accuracy)
output[m_name+'_Duration'].append(duration)
print("Accuracy={:.2f} and Duration={:.2f})".format(accuracy,duration))
except:
output[m_name+'_Accuracy'].append('0')
output[m_name+'_Duration'].append('999')
print('-')
return output
```
%% Cell type:markdown id: tags:
## 6/ Run !
%% Cell type:code id: tags:
``` python
start_time = time.time()
print('\n---- Run','-'*50)
# --------- Datasets, models, and more.. -----------------------------------
#
# ---- For tests
# datasets = ['set-24x24-L', 'set-24x24-RGB']
# models = {'v1':get_model_v1, 'v4':get_model_v2}
# batch_size = 64
# epochs = 2
# train_size = 0.1
# test_size = 0.1
# with_datagen = False
# verbose = 0
#
# ---- All possibilities -> Run A
# datasets = ['set-24x24-L', 'set-24x24-RGB', 'set-48x48-L', 'set-48x48-RGB', 'set-24x24-L-LHE', 'set-24x24-RGB-HE', 'set-48x48-L-LHE', 'set-48x48-RGB-HE']
# models = {'v1':get_model_v1, 'v2':get_model_v2, 'v3':get_model_v3}
# batch_size = 64
# epochs = 16
# train_size = 1
# test_size = 1
# with_datagen = False
# verbose = 0
#
# ---- Data augmentation -> Run B
datasets = ['set-48x48-RGB']
models = {'v2':get_model_v2}
batch_size = 64
epochs = 20
train_size = 1
test_size = 1
with_datagen = True
verbose = 0
#
# ---------------------------------------------------------------------------
# ---- Data augmentation
#
if with_datagen :
datagen = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
featurewise_std_normalization=False,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
shear_range=0.1,
rotation_range=10.)
else:
datagen=None
# ---- Run
#
output = multi_run(datasets, models,
datagen=datagen,
train_size=train_size, test_size=test_size,
batch_size=batch_size, epochs=epochs,
verbose=verbose,
extension_dir=tag_id)
# ---- Save report
#
report={}
report['output']=output
report['description']='train_size={} test_size={} batch_size={} epochs={} data_aug={}'.format(train_size,test_size,batch_size,epochs,with_datagen)
report_name='./run/report_{}.json'.format(tag_id)
with open(report_name, 'w') as file:
json.dump(report, file)
print('\nReport saved as ',report_name)
end_time = time.time()
duration = end_time-start_time
print('Duration : {} s'.format(duration))
print(f'Duration : {duration:.2f} s')
print('-'*59)
```
%% Cell type:markdown id: tags:
## 7/ That's all folks..
%% Cell type:code id: tags:
``` python
print('\n{}'.format(time.strftime("%A %-d %B %Y, %H:%M:%S")))
print("The work is done.\n")
```
%% Cell type:code id: tags:
``` python
```
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
%% Cell type:markdown id: tags:
Variational AutoEncoder (VAE) with CelebA
=========================================
---
Formation Introduction au Deep Learning (FIDLE) - S. Arias, E. Maldonado, JL. Parouty - CNRS/SARI/DEVLOG - 2020
## Episode 1 - Train a model
- Defining a VAE model
- Build the model
- Train it
- Follow the learning process with Tensorboard
%% Cell type:markdown id: tags:
## Step 1 - Init python stuff
%% Cell type:code id: tags:
``` python
import numpy as np
import sys, importlib
import modules.vae
import modules.data_generator
importlib.reload(modules.vae)
importlib.reload(modules.data_generator)
from modules.vae import VariationalAutoencoder
from modules.data_generator import Data_generator
VariationalAutoencoder.about()
Data_generator.about()
```
%% Output
FIDLE 2020 - Variational AutoEncoder (VAE)
TensorFlow version : 2.0.0
VAE version : 1.24
FIDLE 2020 - Data_generator
Version : 0.1
%% Cell type:markdown id: tags:
## Step 2 - Prepare data
### 2.1 - Dataset localisation
%% Cell type:code id: tags:
``` python
dataset_dir = '/bettik/PROJECTS/pr-fidle/datasets/celeba'
```
%% Cell type:markdown id: tags:
### 2.2 - Testing our data generator (Keras Sequence)
Just to understand a little bit how our data_generator works
%% Cell type:code id: tags:
``` python
# ---- A very small dataset
clusters_dir = f'{dataset_dir}/clusters-test'
# ---- Instanciate
data_gen = Data_generator(clusters_dir,32, debug=True)
batch_sizes=[]
for i in range( len(data_gen) ):
x,y=data_gen[i]
batch_sizes.append(len(x))
print(f'\n\ntotal number of items : {sum(batch_sizes)}')
print(f'batch sizes : {batch_sizes}')
```
%% Output
Clusters nb : 10 files
Dataset size : 932
Batch size : 32
[shuffle!]
[Load 00,s=100] (32) (32) (32) (4..)
[Load 01,s=100] (..28) (32) (32) (8..)
[Load 02,s=100] (..24) (32) (32) (12..)
[Load 03,s=100] (..20) (32) (32) (16..)
[Load 04,s=100] (..16) (32) (32) (20..)
[Load 05,s=100] (..12) (32) (32) (24..)
[Load 06,s= 32] (..8) (24..)
[Load 07,s=100] (..8) (32) (32) (28..)
[Load 08,s=100] (..4) (32) (32) (32) (0..)
[Load 09,s=100] (..32) (32) (32)
total number of items : 928
batch sizes : [32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32]
%% Cell type:markdown id: tags:
## Step 2 - Get data
%% Cell type:code id: tags:
``` python
(x_train, y_train), (x_test, y_test) = load_MNIST()
```
%% Cell type:markdown id: tags:
## Step 3 - Get VAE model
%% Cell type:code id: tags:
``` python
tag = '001'
input_shape = (28,28,1)
z_dim = 2
verbose = 0
encoder= [ {'type':'Conv2D', 'filters':32, 'kernel_size':(3,3), 'strides':1, 'padding':'same', 'activation':'relu'},
{'type':'Conv2D', 'filters':64, 'kernel_size':(3,3), 'strides':2, 'padding':'same', 'activation':'relu'},
{'type':'Conv2D', 'filters':64, 'kernel_size':(3,3), 'strides':2, 'padding':'same', 'activation':'relu'},
{'type':'Conv2D', 'filters':64, 'kernel_size':(3,3), 'strides':1, 'padding':'same', 'activation':'relu'}
]
decoder= [ {'type':'Conv2DTranspose', 'filters':64, 'kernel_size':(3,3), 'strides':1, 'padding':'same', 'activation':'relu'},
{'type':'Conv2DTranspose', 'filters':64, 'kernel_size':(3,3), 'strides':2, 'padding':'same', 'activation':'relu'},
{'type':'Conv2DTranspose', 'filters':32, 'kernel_size':(3,3), 'strides':2, 'padding':'same', 'activation':'relu'},
{'type':'Conv2DTranspose', 'filters':1, 'kernel_size':(3,3), 'strides':1, 'padding':'same', 'activation':'sigmoid'}
]
vae = modules.vae.VariationalAutoencoder(input_shape = input_shape,
encoder_layers = encoder,
decoder_layers = decoder,
z_dim = z_dim,
verbose = verbose,
run_tag = tag)
vae.save(model=None)
```
%% Cell type:markdown id: tags:
## Step 4 - Compile it
%% Cell type:code id: tags:
``` python
learning_rate = 0.0005
r_loss_factor = 1000
vae.compile(learning_rate, r_loss_factor)
```
%% Cell type:markdown id: tags:
## Step 5 - Train
%% Cell type:code id: tags:
``` python
batch_size = 100
epochs = 100
image_periodicity = 1 # for each epoch
chkpt_periodicity = 2 # for each epoch
initial_epoch = 0
dataset_size = 1
```
%% Cell type:code id: tags:
``` python
vae.train(x_train,
x_test,
batch_size = batch_size,
epochs = epochs,
image_periodicity = image_periodicity,
chkpt_periodicity = chkpt_periodicity,
initial_epoch = initial_epoch,
dataset_size = dataset_size
)
```
%% Cell type:code id: tags:
``` python
```
%% Cell type:code id: tags:
``` python
```
......@@ -32,15 +32,3 @@ class ImagesCallback(Callback):
def on_epoch_begin(self, epoch, logs={}):
self.epoch += 1
def step_decay_schedule(initial_lr, decay_factor=0.5, step_size=1):
'''
Wrapper function to create a LearningRateScheduler with step decay schedule.
'''
def schedule(epoch):
new_lr = initial_lr * (decay_factor ** np.floor(epoch/step_size))
return new_lr
return LearningRateScheduler(schedule)
\ No newline at end of file
import numpy as np
import tensorflow as tf
import tensorflow.keras.datasets.mnist as mnist
def load_MNIST():
# ---- Get data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# ---- Normalization
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype( 'float32') / 255.
# ---- Reshape : (28,28) -> (28,28,1)
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
print('Dataset loaded.')
print('Resized and normalized.')
print(f'x_train shape : {x_train.shape}\nx_test_shape : {x_test.shape}')
return (x_train,y_train),(x_test,y_test)
\ No newline at end of file
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/ Data_generator
# |_| |_|\__,_|_|\___| for clustered CelebA sataset
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
# ------------------------------------------------------------------
# Initial version by JL Parouty, feb 2020
import numpy as np
import pandas as pd
import math
import os,glob
from tensorflow.keras.utils import Sequence
class Data_generator(Sequence):
version = 0.1
def __init__(self, clusters_dir='./data', batch_size=32, debug=False):
'''
Instanciation of the data generator
args:
cluster_dir : Directory of the clusters files
batch_size : Batch size (32)
debug : debug mode (False)
'''
#
# ---- Get the list of clusters
#
clusters_name = [ os.path.splitext(f)[0] for f in glob.glob( f'{clusters_dir}/*.npy') ]
clusters_size = len(clusters_name)
#
# ---- Read each cluster description
# because we need the full dataset size
#
dataset_size = 0
for c in clusters_name:
df = pd.read_csv(c+'.csv', header=0)
dataset_size+=len(df.index)
if debug:
print(f'Clusters nb : {len(clusters_name)} files')
print(f'Dataset size : {dataset_size}')
print(f'Batch size : {batch_size}')
#
# ---- Remember all of that
#
self.clusters_dir = clusters_dir
self.batch_size = batch_size
self.clusters_name = clusters_name
self.clusters_size = clusters_size
self.dataset_size = dataset_size
self.debug = debug
#
# ---- Read a first cluster
#
self.cluster_i = clusters_size
self.read_next_cluster()
def __len__(self):
return math.floor(self.dataset_size / self.batch_size)
def __getitem__(self, idx):
#
# ---- Get the next item index
#
i=self.data_i
#
# ---- Get a batch
#
batch = self.data[i:i+self.batch_size]
#
# ---- Cluster is large enough
#
if len(batch) == self.batch_size:
self.data_i += self.batch_size
if self.debug: print(f'({len(batch)}) ',end='')
return batch,batch
#
# ---- Not enough...
#
if self.debug: print(f'({len(batch)}..) ',end='')
#
self.read_next_cluster()
batch2 = self.data[ 0:self.batch_size-len(batch) ]
self.data_i = self.batch_size-len(batch)
batch = np.concatenate( (batch,batch2) )
#
if self.debug: print(f'(..{len(batch2)}) ',end='')
return batch, batch
def read_next_cluster(self):
#
# ---- Get the next cluster name
# If we have reached the end of the list, we mix and
# start again from the beginning.
#
i = self.cluster_i + 1
if i >= self.clusters_size:
np.random.shuffle(self.clusters_name)
i = 0
if self.debug : print(f'\n[shuffle!]')
#
# ---- Read it, and normalize
#
data = np.load( self.clusters_name[i]+'.npy' )
data = data/255
#
# ---- Remember all of that
#
self.data = data
self.data_i = 0
self.cluster_i = i
#
if self.debug: print(f'\n[Load {self.cluster_i:02d},s={len(self.data):3d}] ',end='')
@classmethod
def about(cls):
print('\nFIDLE 2020 - Data_generator')
print('Version :', cls.version)
\ No newline at end of file
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/
# |_| |_|\__,_|_|\___|
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
# ------------------------------------------------------------------
# Initial version by JL Parouty, feb 2020
import numpy as np
import tensorflow as tf
import tensorflow.keras.datasets.mnist as mnist
class Loader_MNIST():
version = '0.1'
def __init__(self):
pass
@classmethod
def about(cls):
print('\nFIDLE 2020 - Very basic MNIST dataset loader)')
print('TensorFlow version :',tf.__version__)
print('Loader version :', cls.version)
@classmethod
def load(normalize=True, expand=True, verbose=1):
# ---- Get data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if verbose>0: print('Dataset loaded.')
# ---- Normalization
if normalize:
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype( 'float32') / 255.
if verbose>0: print('Normalized.')
# ---- Reshape : (28,28) -> (28,28,1)
if expand:
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
if verbose>0: print(f'Reshaped to {x_train.shape}')
return (x_train,y_train),(x_test,y_test)
\ No newline at end of file
# ------------------------------------------------------------------
# _____ _ _ _
# | ___(_) __| | | ___
# | |_ | |/ _` | |/ _ \
# | _| | | (_| | | __/
# |_| |_|\__,_|_|\___|
# ------------------------------------------------------------------
# Formation Introduction au Deep Learning (FIDLE)
# CNRS/SARI/DEVLOG 2020 - S. Arias, E. Maldonado, JL. Parouty
# ------------------------------------------------------------------
# by JL Parouty (feb 2020), based on David Foster examples.
import numpy as np
import tensorflow as tf
......@@ -157,8 +170,8 @@ class VariationalAutoencoder():
image_periodicity=1,
chkpt_periodicity=2,
initial_epoch=0,
dataset_size=1,
lr_decay=1):
dataset_size=1
):
# ---- Dataset size
n_train = int(x_train.shape[0] * dataset_size)
......@@ -172,9 +185,6 @@ class VariationalAutoencoder():
# ---- Callback : Images
callbacks_images = ImagesCallback(initial_epoch, image_periodicity, self)
# ---- Callback : Learning rate scheduler
#lr_sched = modules.callbacks.step_decay_schedule(initial_lr=self.learning_rate, decay_factor=lr_decay, step_size=1)
# ---- Callback : Checkpoint
filename = self.run_directory+"/models/model-{epoch:03d}-{loss:.2f}.h5"
callback_chkpts = ModelCheckpoint(filename, save_freq=n_train*chkpt_periodicity ,verbose=0)
......
<style>
div.warn {
background-color: #fcf2f2;
border-color: #dFb5b4;
border-left: 5px solid #dfb5b4;
padding: 0.5em;
font-weight: bold;
font-size: 1.1em;;
}
div.nota {
background-color: #DAFFDE;
border-left: 5px solid #92CC99;
padding: 0.5em;
}
</style>
File moved
......@@ -29,25 +29,25 @@ import matplotlib
import matplotlib.pyplot as plt
import seaborn as sn
from IPython.display import display, Markdown
from IPython.display import display,Markdown,HTML
VERSION='0.2.6'
VERSION='0.2.7'
# -------------------------------------------------------------
# init_all
# -------------------------------------------------------------
#
def init(mplstyle='../fidle/talk.mplstyle'):
def init(mplstyle='../fidle/custom.mplstyle', cssfile='../fidle/custom.css'):
global VERSION
# ---- matplotlib
# ---- matplotlib and css
matplotlib.style.use(mplstyle)
load_cssfile(cssfile)
# ---- Hello world
# now = datetime.datetime.now()
print('\nFIDLE 2020 - Practical Work Module')
print('Version :', VERSION)
print('Run time : {}'.format(time.strftime("%A %-d %B %Y, %H:%M:%S")))
print('Matplotlib style :', mplstyle)
print('TensorFlow version :',tf.__version__)
print('Keras version :',tf.keras.__version__)
......@@ -92,7 +92,7 @@ def shuffle_np_dataset(x, y):
return x[p], y[p]
def update_progress(what,i,imax):
def update_progress(what,i,imax, redraw=False):
"""
Display a text progress bar, as :
My progress bar : ############# 34%
......@@ -104,7 +104,7 @@ def update_progress(what,i,imax):
nothing
"""
bar_length = min(40,imax)
if (i%int(imax/bar_length))!=0 and i<imax:
if (i%int(imax/bar_length))!=0 and i<imax and not redraw:
return
progress = float(i/imax)
block = int(round(bar_length * progress))
......@@ -155,7 +155,9 @@ def rmin(l):
# show_images
# -------------------------------------------------------------
#
def plot_images(x,y=None, indices='all', columns=12, x_size=1, y_size=1, colorbar=False, y_pred=None, cm='binary',y_padding=0.35, spines_alpha=1):
def plot_images(x,y=None, indices='all', columns=12, x_size=1, y_size=1,
colorbar=False, y_pred=None, cm='binary',y_padding=0.35, spines_alpha=1,
fontsize=20):
"""
Show some images in a grid, with legends
args:
......@@ -176,7 +178,6 @@ def plot_images(x,y=None, indices='all', columns=12, x_size=1, y_size=1, colorba
rows = math.ceil(len(indices)/columns)
fig=plt.figure(figsize=(columns*x_size, rows*(y_size+y_padding)))
n=1
errors=0
for i in indices:
axs=fig.add_subplot(rows, columns, n)
n+=1
......@@ -202,14 +203,13 @@ def plot_images(x,y=None, indices='all', columns=12, x_size=1, y_size=1, colorba
axs.set_yticks([])
axs.set_xticks([])
if draw_labels and not draw_pred:
axs.set_xlabel(y[i])
axs.set_xlabel(y[i],fontsize=fontsize)
if draw_labels and draw_pred:
if y[i]!=y_pred[i]:
axs.set_xlabel('{} ({})'.format(y_pred[i],y[i]))
axs.set_xlabel(f'{y_pred[i]} ({y[i]})',fontsize=fontsize)
axs.xaxis.label.set_color('red')
errors+=1
else:
axs.set_xlabel(y[i])
axs.set_xlabel(y[i],fontsize=fontsize)
if colorbar:
fig.colorbar(img,orientation="vertical", shrink=0.65)
plt.show()
......@@ -362,4 +362,19 @@ def plot_donut(values, labels, colors=["lightsteelblue","coral"], figsize=(6,6),
plt.show()
def display_md(md_text):
display(Markdown(md_text))
\ No newline at end of file
display(Markdown(md_text))
def hdelay(sec):
return str(datetime.timedelta(seconds=int(sec)))
def hsize(num, suffix='o'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return f'{num:3.1f} {unit}{suffix}'
num /= 1024.0
return f'{num:.1f} Y{suffix}'
def load_cssfile(cssfile):
if cssfile is None: return
styles = open("../fidle/custom.css", "r").read()
display(HTML(styles))
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment