Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • samanost/sicom_image_analysis_project
  • gerayelk/sicom_image_analysis_project
  • jelassiy/sicom_image_analysis_project
  • chardoto/sicom_image_analysis_project
  • chaarim/sicom_image_analysis_project
  • domers/sicom_image_analysis_project
  • elmurrt/sicom_image_analysis_project
  • sadonest/sicom_image_analysis_project
  • kouddann/sicom_image_analysis_project
  • mirabitj/sicom-image-analysis-project-mirabito
  • plotj/sicom_image_analysis_project
  • torrem/sicom-image-analysis-project-maxime-torre
  • dzike/sicom_image_analysis_project
  • daip/sicom_image_analysis_project
  • casanovv/sicom_image_analysis_project
  • girmarti/sicom_image_analysis_project
  • lioretn/sicom_image_analysis_project
  • lemoinje/sicom_image_analysis_project
  • ouahmanf/sicom_image_analysis_project
  • vouilloa/sicom_image_analysis_project
  • diopb/sicom_image_analysis_project
  • davidale/sicom_image_analysis_project
  • enza/sicom_image_analysis_project
  • conversb/sicom_image_analysis_project
  • mullemat/sicom_image_analysis_project
25 results
Show changes
Showing
with 703 additions and 0 deletions
This diff is collapsed.
src/methods/domer/image-colorization/input/0.jpg

24.3 KiB

src/methods/domer/image-colorization/input/1.jpg

182 KiB

src/methods/domer/image-colorization/input/2.jpg

159 KiB

src/methods/domer/image-colorization/input/3.jpg

159 KiB

src/methods/domer/image-colorization/input/6.jpg

13.4 KiB

src/methods/domer/image-colorization/input/7.jpg

14.9 KiB

File added
File added
%% Cell type:code id: tags:
```
from network import ColorizeNet
model = ColorizeNet()
model
```
%% Output
ColorizeNet(
(encoder): Sequential(
(0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(5): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(decoder): Sequential(
(0): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU(inplace=True)
)
)
(1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU(inplace=True)
)
)
(2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(32, 2, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(32, 2, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(2, 2, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): Sigmoid()
)
)
)
)
%% Cell type:code id: tags:
```
import torch
# to check if our model is working as expected
model(torch.rand((2, 1, 224, 224))).shape
```
%% Output
torch.Size([2, 2, 224, 224])
%% Cell type:code id: tags:
```
from utils import count_params
count_params(model) # no of trainable parameters
```
%% Output
1166016
import torch.nn as nn
import torchvision.models as models
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels,
activation=None, upsample=None):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=5, stride=1, padding=2, bias=False
)
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels,
kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
if activation is not None:
self.activation = activation
else:
self.res_conv = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=1, bias=False
)
self.res_bn = nn.BatchNorm2d(num_features=out_channels)
self.upsample = upsample
def forward(self, t):
res = t
t = self.conv1(t)
t = self.bn1(t)
t = self.relu(t)
t = self.conv2(t)
t = self.bn2(t)
if self.upsample is not None:
res = self.res_conv(res)
res = self.res_bn(res)
t += res
t = self.relu(t)
t = self.upsample(t)
else:
t += res
t = self.activation(t)
return t
class ColorizeNet(nn.Module):
def __init__(self):
super().__init__()
# make pretrained=True before starting the training process
resnet18 = models.resnet18(pretrained=False)
# change first conv layer to accept single channel (grayscale)
resnet18.conv1.weight = nn.Parameter(
resnet18.conv1.weight.mean(dim=1).unsqueeze(dim=1))
# use first 3 layers of ResNet-18 as encoder
self.encoder = nn.Sequential(
*list(resnet18.children())[:6]
)
self.decoder = nn.Sequential(
self._make_layer(BasicBlock, 128, 64, nn.ReLU(inplace=True)),
self._make_layer(BasicBlock, 64, 32, nn.ReLU(inplace=True)),
self._make_layer(BasicBlock, 32, 2, nn.Sigmoid())
)
def _make_layer(self, block, in_channels, out_channels, activation):
upsample = nn.Upsample(scale_factor=2, mode='nearest')
layers = []
layers.append(block(in_channels, out_channels, upsample=upsample))
layers.append(block(out_channels, out_channels, activation=activation))
return nn.Sequential(*layers)
def forward(self, t):
t = self.encoder(t)
t = self.decoder(t)
return t
src/methods/domer/image-colorization/output/0.jpg

25.8 KiB

src/methods/domer/image-colorization/output/1.jpg

188 KiB

src/methods/domer/image-colorization/output/2.jpg

167 KiB

src/methods/domer/image-colorization/output/3.jpg

166 KiB

src/methods/domer/image-colorization/output/6.jpg

14 KiB

src/methods/domer/image-colorization/output/7.jpg

15.6 KiB

matplotlib==3.3.2
numpy==1.19.4
Pillow==8.0.1
scikit-image==0.17.2
torch==1.7.0+cpu
torchvision==0.8.1+cpu
tqdm==4.51.0
\ No newline at end of file
This diff is collapsed.
import torch
from torchvision import transforms, datasets
import numpy as np
from PIL import Image
from skimage.color import rgb2lab, rgb2gray, lab2rgb
def count_params(model):
'''
returns the number of trainable parameters in some model
'''
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class GrayscaleImageFolder(datasets.ImageFolder):
'''
Custom dataloader for various operations on images before loading them.
'''
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img_orig = self.transform(img) # apply transforms
img_orig = np.asarray(img_orig) # convert to numpy array
img_lab = rgb2lab(img_orig) # convert RGB image to LAB
img_ab = img_lab[:, :, 1:3] # separate AB channels from LAB
img_ab = (img_ab + 128) / 255 # normalize the pixel values
# transpose image from HxWxC to CxHxW and turn it into a tensor
img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1))).float()
img_orig = rgb2gray(img_orig) # convert RGB to grayscale
# add a channel axis to grascale image and turn it into a tensor
img_orig = torch.from_numpy(img_orig).unsqueeze(0).float()
if self.target_transform is not None:
target = self.target_transform(target)
return img_orig, img_ab, target
def load_gray(path, max_size=360, shape=None):
'''
load an image as grayscale, change the shape as per input,
perform transformations and convert it to model compatable shape.
'''
img_gray = Image.open(path).convert('L')
if max(img_gray.size) > max_size:
size = max_size
else:
size = max(img_gray.size)
if shape is not None:
size = shape
img_transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
img_gray = img_transform(img_gray).unsqueeze(0)
return img_gray
def to_rgb(img_l, img_ab):
'''
concatinates Lightness (grayscale) and AB channels,
and converts the resulting LAB image to RGB
'''
if img_l.shape == img_ab.shape:
img_lab = torch.cat((img_l, img_ab), 1).numpy().squeeze()
else:
img_lab = torch.cat(
(img_l, img_ab[:, :, :img_l.size(2), :img_l.size(3)]),
dim=1
).numpy().squeeze()
img_lab = img_lab.transpose(1, 2, 0) # transpose image to HxWxC
img_lab[:, :, 0] = img_lab[:, :, 0] * 100 # range pixel values from 0-100
img_lab[:, :, 1:] = img_lab[:, :, 1:] * 255 - 128 # un-normalize
img_rgb = lab2rgb(img_lab.astype(np.float64)) # convert LAB image to RGB
return img_rgb