Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Fidle
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Releases
Container Registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Talks
Fidle
Commits
3bd41252
Commit
3bd41252
authored
2 years ago
by
Jean-Luc Parouty
Browse files
Options
Downloads
Patches
Plain Diff
Add Discriminator_2 :-)
parent
21789e50
No related branches found
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
DCGAN-PyTorch/01-DCGAN-PL.ipynb
+3
-3
3 additions, 3 deletions
DCGAN-PyTorch/01-DCGAN-PL.ipynb
DCGAN-PyTorch/modules/Discriminators.py
+45
-1
45 additions, 1 deletion
DCGAN-PyTorch/modules/Discriminators.py
DCGAN-PyTorch/modules/Generators.py
+2
-2
2 additions, 2 deletions
DCGAN-PyTorch/modules/Generators.py
with
50 additions
and
6 deletions
DCGAN-PyTorch/01-DCGAN-PL.ipynb
+
3
−
3
View file @
3bd41252
...
...
@@ -80,7 +80,7 @@
"latent_dim = 128\n",
" \n",
"generator_class = 'Generator_2'\n",
"discriminator_class = 'Discriminator_
1
' \n",
"discriminator_class = 'Discriminator_
2
' \n",
" \n",
"scale = .01\n",
"epochs = 5\n",
...
...
@@ -170,7 +170,7 @@
"source": [
"print('\\nInstantiation :\\n')\n",
"generator = Generator_2(latent_dim=latent_dim, data_shape=data_shape)\n",
"discriminator = Discriminator_
1
(latent_dim=latent_dim, data_shape=data_shape)\n",
"discriminator = Discriminator_
2
(latent_dim=latent_dim, data_shape=data_shape)\n",
"\n",
"print('\\nFew tests :\\n')\n",
"z = torch.randn(batch_size, latent_dim)\n",
...
...
@@ -296,7 +296,7 @@
"metadata": {},
"outputs": [],
"source": [
"gan = GAN.load_from_checkpoint('./run/SHEEP3/models/last.ckpt')"
"gan = GAN.load_from_checkpoint('./run/SHEEP3/models/last
-v1
.ckpt')"
]
},
{
...
...
%% Cell type:markdown id: tags:
# GAN using PyTorch Lightning
See :
-
https://pytorch-lightning.readthedocs.io/en/stable/notebooks/lightning_examples/basic-gan.html
-
https://www.assemblyai.com/blog/pytorch-lightning-for-dummies/
Note : Need
```
pip install ipywidgets lightning tqdm```
%% Cell type:markdown id: tags:
## Step 1 - Init and parameters
#### Python init
%% Cell type:code id: tags:
```
python
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from lightning import LightningDataModule, LightningModule, Trainer
from lightning.pytorch.callbacks.progress.tqdm_progress import TQDMProgressBar
from lightning.pytorch.callbacks.progress.base import ProgressBarBase
from lightning.pytorch.callbacks import ModelCheckpoint
from lightning.pytorch.loggers.tensorboard import TensorBoardLogger
from tqdm import tqdm
from torch.utils.data import DataLoader
import fidle
from modules.SmartProgressBar import SmartProgressBar
from modules.QuickDrawDataModule import QuickDrawDataModule
from modules.GAN import GAN
from modules.Generators import
*
from modules.Discriminators import
*
# Init Fidle environment
run_id, run_dir, datasets_dir = fidle.init('SHEEP3')
```
%% Cell type:markdown id: tags:
#### Few parameters
%% Cell type:code id: tags:
```
python
latent_dim = 128
generator_class = 'Generator_2'
discriminator_class = 'Discriminator_
1
'
discriminator_class = 'Discriminator_
2
'
scale = .01
epochs = 5
batch_size = 32
num_img = 36
fit_verbosity = 2
dataset_file = datasets_dir+'/QuickDraw/origine/sheep.npy'
data_shape = (28,28,1)
```
%% Cell type:markdown id: tags:
## Step 2 - Get some nice data
%% Cell type:markdown id: tags:
#### Get a Nice DataModule
Our DataModule is defined in [./modules/QuickDrawDataModule.py](./modules/QuickDrawDataModule.py)
This is a [LightningDataModule](https://pytorch-lightning.readthedocs.io/en/stable/data/datamodule.html)
%% Cell type:code id: tags:
```
python
dm = QuickDrawDataModule(dataset_file, scale, batch_size, num_workers=8)
dm.setup()
```
%% Cell type:markdown id: tags:
#### Have a look
%% Cell type:code id: tags:
```
python
dl = dm.train_dataloader()
batch_data = next(iter(dl))
fidle.scrawler.images( batch_data.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1,
y_padding=0,spines_alpha=0, save_as='01-Sheeps')
```
%% Cell type:markdown id: tags:
## Step 3 - Get a nice GAN model
Our Generators are defined in [./modules/Generators.py](./modules/Generators.py)
Our Discriminators are defined in [./modules/Discriminators.py](./modules/Discriminators.py)
Our GAN is defined in [./modules/GAN.py](./modules/GAN.py)
%% Cell type:markdown id: tags:
#### Basic test - Just to be sure it (could) works... ;-)
%% Cell type:code id: tags:
```
python
print('
\n
Instantiation :
\n
')
generator = Generator_2(latent_dim=latent_dim, data_shape=data_shape)
discriminator = Discriminator_
1
(latent_dim=latent_dim, data_shape=data_shape)
discriminator = Discriminator_
2
(latent_dim=latent_dim, data_shape=data_shape)
print('
\n
Few tests :
\n
')
z = torch.randn(batch_size, latent_dim)
print('z size : ',z.size())
fake_img = generator.forward(z)
print('fake_img : ', fake_img.size())
p = discriminator.forward(fake_img)
print('pred fake : ', p.size())
print('batch_data : ',batch_data.size())
p = discriminator.forward(batch_data)
print('pred real : ', p.size())
nimg = fake_img.detach().numpy()
fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(batch_size), columns=12, x_size=1, y_size=1,
y_padding=0,spines_alpha=0, save_as='01-Sheeps')
```
%% Cell type:markdown id: tags:
#### GAN model
To simplify our code, the GAN class is defined separately in the module [./modules/GAN.py](./modules/GAN.py)
Passing the classe names for generator/discriminator by parameter allows to stay modular and to use the PL checkpoints.
%% Cell type:code id: tags:
```
python
gan = GAN( data_shape = data_shape,
batch_size = batch_size,
latent_dim = latent_dim,
generator_class = generator_class,
discriminator_class = discriminator_class,
lr=0.0001)
```
%% Cell type:markdown id: tags:
## Step 5 - Train it !
#### Instantiate Callbacks, Logger & co.
More about :
- [Checkpoints](https://pytorch-lightning.readthedocs.io/en/stable/common/checkpointing_basic.html)
- [modelCheckpoint](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.callbacks.ModelCheckpoint.html#pytorch_lightning.callbacks.ModelCheckpoint)
%% Cell type:code id: tags:
```
python
# ---- for tensorboard logs
#
logger = TensorBoardLogger( save_dir = f'{run_dir}',
name = 'tb_logs' )
# ---- To save checkpoints
#
callback_checkpoints = ModelCheckpoint( dirpath = f'{run_dir}/models',
filename = 'bestModel',
save_top_k = 1,
save_last = True,
every_n_epochs = 1,
monitor = "g_loss")
# ---- To have a nive progress bar
#
callback_progressBar = SmartProgressBar(verbosity=2) # Usable evertywhere
# progress_bar = TQDMProgressBar(refresh_rate=1) # Usable in real jupyter lab (bug in vscode)
```
%% Cell type:markdown id: tags:
#### Train it
%% Cell type:code id: tags:
```
python
trainer = Trainer(
accelerator = "auto",
# devices = 1 if torch.cuda.is_available() else None, # limiting got iPython runs
max_epochs = epochs,
callbacks = [callback_progressBar, callback_checkpoints],
log_every_n_steps = batch_size,
logger = logger
)
trainer.fit(gan, dm)
```
%% Cell type:markdown id: tags:
## Step 6 - Reload a checkpoint
%% Cell type:code id: tags:
```
python
gan = GAN.load_from_checkpoint('./run/SHEEP3/models/last.ckpt')
gan = GAN.load_from_checkpoint('./run/SHEEP3/models/last
-v1
.ckpt')
```
%% Cell type:code id: tags:
```
python
nb_images = 32
# z = np.random.normal(size=(nb_images,latent_dim))
z = torch.randn(nb_images, latent_dim)
print('z size : ',z.size())
fake_img = gan.generator.forward(z)
print('fake_img : ', fake_img.size())
nimg = fake_img.detach().numpy()
fidle.scrawler.images( nimg.reshape(-1,28,28), indices=range(nb_images), columns=12, x_size=1, y_size=1,
y_padding=0,spines_alpha=0, save_as='01-Sheeps')
```
%% Cell type:code id: tags:
```
python
```
%% Cell type:code id: tags:
```
python
```
...
...
This diff is collapsed.
Click to expand it.
DCGAN-PyTorch/modules/Discriminators.py
+
45
−
1
View file @
3bd41252
...
...
@@ -19,7 +19,7 @@ class Discriminator_1(nn.Module):
super
().
__init__
()
self
.
img_shape
=
data_shape
print
(
'
init discriminator
:
'
,
data_shape
,
'
to sigmoid
'
)
print
(
'
init discriminator
1
:
'
,
data_shape
,
'
to sigmoid
'
)
self
.
model
=
nn
.
Sequential
(
...
...
@@ -38,4 +38,48 @@ class Discriminator_1(nn.Module):
# img_flat = img.view(img.size(0), -1)
validity
=
self
.
model
(
img
)
return
validity
class
Discriminator_2
(
nn
.
Module
):
def
__init__
(
self
,
latent_dim
=
None
,
data_shape
=
None
):
super
().
__init__
()
self
.
img_shape
=
data_shape
print
(
'
init discriminator 2 :
'
,
data_shape
,
'
to sigmoid
'
)
self
.
model
=
nn
.
Sequential
(
nn
.
Conv2d
(
1
,
32
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
),
nn
.
ReLU
(),
nn
.
BatchNorm2d
(
32
),
nn
.
Dropout2d
(
0.25
),
nn
.
Conv2d
(
32
,
64
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
),
nn
.
ReLU
(),
nn
.
BatchNorm2d
(
64
),
nn
.
Dropout2d
(
0.25
),
nn
.
Conv2d
(
64
,
128
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
),
nn
.
ReLU
(),
nn
.
BatchNorm2d
(
128
),
nn
.
Dropout2d
(
0.25
),
nn
.
Conv2d
(
128
,
256
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
),
nn
.
ReLU
(),
nn
.
BatchNorm2d
(
256
),
nn
.
Dropout2d
(
0.25
),
nn
.
Flatten
(),
nn
.
Linear
(
12544
,
1
),
nn
.
Sigmoid
(),
)
def
forward
(
self
,
img
):
img_nchw
=
img
.
permute
(
0
,
3
,
1
,
2
)
# from NHWC to NCHW
validity
=
self
.
model
(
img_nchw
)
return
validity
\ No newline at end of file
This diff is collapsed.
Click to expand it.
DCGAN-PyTorch/modules/Generators.py
+
2
−
2
View file @
3bd41252
...
...
@@ -70,14 +70,14 @@ class Generator_2(nn.Module):
nn
.
UpsamplingNearest2d
(
scale_factor
=
2
),
# nn.UpsamplingBilinear2d( scale_factor=2 ),
nn
.
Conv2d
(
64
,
128
,
(
3
,
3
),
stride
=
(
1
,
1
),
padding
=
(
1
,
1
)
),
nn
.
BatchNorm2d
(
128
),
nn
.
ReLU
(),
nn
.
BatchNorm2d
(
128
),
nn
.
UpsamplingNearest2d
(
scale_factor
=
2
),
# nn.UpsamplingBilinear2d( scale_factor=2 ),
nn
.
Conv2d
(
128
,
256
,
(
3
,
3
),
stride
=
(
1
,
1
),
padding
=
(
1
,
1
)),
nn
.
BatchNorm2d
(
256
),
nn
.
ReLU
(),
nn
.
BatchNorm2d
(
256
),
nn
.
Conv2d
(
256
,
1
,
(
5
,
5
),
stride
=
(
1
,
1
),
padding
=
(
2
,
2
)),
nn
.
Sigmoid
()
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment