Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • samanost/sicom_image_analysis_project
  • gerayelk/sicom_image_analysis_project
  • jelassiy/sicom_image_analysis_project
  • chardoto/sicom_image_analysis_project
  • chaarim/sicom_image_analysis_project
  • domers/sicom_image_analysis_project
  • elmurrt/sicom_image_analysis_project
  • sadonest/sicom_image_analysis_project
  • kouddann/sicom_image_analysis_project
  • mirabitj/sicom-image-analysis-project-mirabito
  • plotj/sicom_image_analysis_project
  • torrem/sicom-image-analysis-project-maxime-torre
  • dzike/sicom_image_analysis_project
  • daip/sicom_image_analysis_project
  • casanovv/sicom_image_analysis_project
  • girmarti/sicom_image_analysis_project
  • lioretn/sicom_image_analysis_project
  • lemoinje/sicom_image_analysis_project
  • ouahmanf/sicom_image_analysis_project
  • vouilloa/sicom_image_analysis_project
  • diopb/sicom_image_analysis_project
  • davidale/sicom_image_analysis_project
  • enza/sicom_image_analysis_project
  • conversb/sicom_image_analysis_project
  • mullemat/sicom_image_analysis_project
25 results
Show changes
Showing
with 1574 additions and 0 deletions
File added
%% Cell type:code id: tags:
```
from network import ColorizeNet
model = ColorizeNet()
model
```
%% Output
ColorizeNet(
(encoder): Sequential(
(0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(5): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(decoder): Sequential(
(0): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU(inplace=True)
)
)
(1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU(inplace=True)
)
)
(2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(32, 2, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(32, 2, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(2, 2, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): Sigmoid()
)
)
)
)
%% Cell type:code id: tags:
```
import torch
# to check if our model is working as expected
model(torch.rand((2, 1, 224, 224))).shape
```
%% Output
torch.Size([2, 2, 224, 224])
%% Cell type:code id: tags:
```
from utils import count_params
count_params(model) # no of trainable parameters
```
%% Output
1166016
import torch.nn as nn
import torchvision.models as models
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels,
activation=None, upsample=None):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=5, stride=1, padding=2, bias=False
)
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels,
kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
if activation is not None:
self.activation = activation
else:
self.res_conv = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=1, bias=False
)
self.res_bn = nn.BatchNorm2d(num_features=out_channels)
self.upsample = upsample
def forward(self, t):
res = t
t = self.conv1(t)
t = self.bn1(t)
t = self.relu(t)
t = self.conv2(t)
t = self.bn2(t)
if self.upsample is not None:
res = self.res_conv(res)
res = self.res_bn(res)
t += res
t = self.relu(t)
t = self.upsample(t)
else:
t += res
t = self.activation(t)
return t
class ColorizeNet(nn.Module):
def __init__(self):
super().__init__()
# make pretrained=True before starting the training process
resnet18 = models.resnet18(pretrained=False)
# change first conv layer to accept single channel (grayscale)
resnet18.conv1.weight = nn.Parameter(
resnet18.conv1.weight.mean(dim=1).unsqueeze(dim=1))
# use first 3 layers of ResNet-18 as encoder
self.encoder = nn.Sequential(
*list(resnet18.children())[:6]
)
self.decoder = nn.Sequential(
self._make_layer(BasicBlock, 128, 64, nn.ReLU(inplace=True)),
self._make_layer(BasicBlock, 64, 32, nn.ReLU(inplace=True)),
self._make_layer(BasicBlock, 32, 2, nn.Sigmoid())
)
def _make_layer(self, block, in_channels, out_channels, activation):
upsample = nn.Upsample(scale_factor=2, mode='nearest')
layers = []
layers.append(block(in_channels, out_channels, upsample=upsample))
layers.append(block(out_channels, out_channels, activation=activation))
return nn.Sequential(*layers)
def forward(self, t):
t = self.encoder(t)
t = self.decoder(t)
return t
src/methods/domer/image-colorization/output/0.jpg

25.8 KiB

src/methods/domer/image-colorization/output/1.jpg

188 KiB

src/methods/domer/image-colorization/output/2.jpg

167 KiB

src/methods/domer/image-colorization/output/3.jpg

166 KiB

src/methods/domer/image-colorization/output/6.jpg

14 KiB

src/methods/domer/image-colorization/output/7.jpg

15.6 KiB

matplotlib==3.3.2
numpy==1.19.4
Pillow==8.0.1
scikit-image==0.17.2
torch==1.7.0+cpu
torchvision==0.8.1+cpu
tqdm==4.51.0
\ No newline at end of file
%% Cell type:code id: tags:
```
# from google.colab import drive
# drive.mount('/content/drive')
```
%% Output
Mounted at /content/drive
%% Cell type:code id: tags:
```
# Download and unzip (2.2GB)
# !wget http://data.csail.mit.edu/places/places205/testSetPlaces205_resize.tar.gz
# !mkdir images/
# !tar -xzf drive/MyDrive/testSetPlaces205_resize.tar.gz -C 'images/'
```
%% Cell type:code id: tags:
```
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from tqdm import tqdm
from network import ColorizeNet
from utils import GrayscaleImageFolder
```
%% Cell type:code id: tags:
```
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
```
%% Cell type:code id: tags:
```
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip()
])
train_set = GrayscaleImageFolder('images/', transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True, num_workers=4)
```
%% Cell type:code id: tags:
```
criterion = nn.MSELoss()
model = ColorizeNet().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[8, 24], gamma=1/3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=1/3, patience=5, verbose=True)
```
%% Cell type:code id: tags:
```
try:
checkpoint = torch.load('drive/MyDrive/checkpoint_0.001.pth', map_location=device)
start_epoch = checkpoint['next_epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
except:
start_epoch = 0
```
%% Cell type:code id: tags:
```
num_epochs = 64
for epoch in range(start_epoch, num_epochs):
train_loss = 0
loop = tqdm(train_loader)
for batch in loop:
in_gray, in_ab = batch[0].to(device), batch[1].to(device)
out_ab = model(in_gray)
loss = criterion(out_ab, in_ab)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()*in_gray.size(0)
loop.set_description(f'Epoch [{epoch+1:2d}/{num_epochs}]')
loop.set_postfix(loss=train_loss)
scheduler.step(train_loss)
checkpoint = {
'next_epoch': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}
torch.save(checkpoint, f'drive/MyDrive/checkpoint_{scheduler._last_lr[0]}.pth')
```
%% Output
Epoch [39/64]: 100%|██████████| 321/321 [12:00<00:00, 2.24s/it, loss=97.4]
Epoch [40/64]: 100%|██████████| 321/321 [11:30<00:00, 2.15s/it, loss=98]
Epoch [41/64]: 100%|██████████| 321/321 [11:18<00:00, 2.12s/it, loss=97.4]
Epoch [42/64]: 100%|██████████| 321/321 [11:28<00:00, 2.15s/it, loss=103]
Epoch [43/64]: 100%|██████████| 321/321 [11:23<00:00, 2.13s/it, loss=98.4]
Epoch [44/64]: 100%|██████████| 321/321 [11:15<00:00, 2.10s/it, loss=97.3]
Epoch [45/64]: 100%|██████████| 321/321 [11:29<00:00, 2.15s/it, loss=97.6]
Epoch [46/64]: 100%|██████████| 321/321 [11:25<00:00, 2.14s/it, loss=96.5]
Epoch [47/64]: 100%|██████████| 321/321 [11:40<00:00, 2.18s/it, loss=96.7]
Epoch [48/64]: 100%|██████████| 321/321 [11:44<00:00, 2.19s/it, loss=96.3]
Epoch [49/64]: 100%|██████████| 321/321 [11:13<00:00, 2.10s/it, loss=96.5]
Epoch [50/64]: 100%|██████████| 321/321 [11:09<00:00, 2.09s/it, loss=96.4]
Epoch [51/64]: 100%|██████████| 321/321 [11:09<00:00, 2.09s/it, loss=95.7]
Epoch [52/64]: 100%|██████████| 321/321 [11:13<00:00, 2.10s/it, loss=95.9]
Epoch [53/64]: 100%|██████████| 321/321 [11:08<00:00, 2.08s/it, loss=95.8]
Epoch [54/64]: 100%|██████████| 321/321 [11:31<00:00, 2.15s/it, loss=95.4]
Epoch [55/64]: 100%|██████████| 321/321 [11:54<00:00, 2.23s/it, loss=95.5]
Epoch [56/64]: 100%|██████████| 321/321 [12:11<00:00, 2.28s/it, loss=95.5]
Epoch [57/64]: 100%|██████████| 321/321 [12:00<00:00, 2.24s/it, loss=94.9]
Epoch [58/64]: 100%|██████████| 321/321 [11:54<00:00, 2.23s/it, loss=94.9]
Epoch [59/64]: 100%|██████████| 321/321 [11:56<00:00, 2.23s/it, loss=94.9]
Epoch [60/64]: 100%|██████████| 321/321 [12:06<00:00, 2.26s/it, loss=94.3]
Epoch [61/64]: 100%|██████████| 321/321 [12:03<00:00, 2.26s/it, loss=95]
Epoch [62/64]: 100%|██████████| 321/321 [12:01<00:00, 2.25s/it, loss=95]
Epoch [63/64]: 100%|██████████| 321/321 [12:05<00:00, 2.26s/it, loss=96.7]
Epoch [64/64]: 100%|██████████| 321/321 [12:10<00:00, 2.27s/it, loss=95]
%% Cell type:code id: tags:
```
torch.save(model.state_dict(), f'./models/model.pth') # save the model separately from the checkpoint file
```
import torch
from torchvision import transforms, datasets
import numpy as np
from PIL import Image
from skimage.color import rgb2lab, rgb2gray, lab2rgb
def count_params(model):
'''
returns the number of trainable parameters in some model
'''
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class GrayscaleImageFolder(datasets.ImageFolder):
'''
Custom dataloader for various operations on images before loading them.
'''
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img_orig = self.transform(img) # apply transforms
img_orig = np.asarray(img_orig) # convert to numpy array
img_lab = rgb2lab(img_orig) # convert RGB image to LAB
img_ab = img_lab[:, :, 1:3] # separate AB channels from LAB
img_ab = (img_ab + 128) / 255 # normalize the pixel values
# transpose image from HxWxC to CxHxW and turn it into a tensor
img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1))).float()
img_orig = rgb2gray(img_orig) # convert RGB to grayscale
# add a channel axis to grascale image and turn it into a tensor
img_orig = torch.from_numpy(img_orig).unsqueeze(0).float()
if self.target_transform is not None:
target = self.target_transform(target)
return img_orig, img_ab, target
def load_gray(path, max_size=360, shape=None):
'''
load an image as grayscale, change the shape as per input,
perform transformations and convert it to model compatable shape.
'''
img_gray = Image.open(path).convert('L')
if max(img_gray.size) > max_size:
size = max_size
else:
size = max(img_gray.size)
if shape is not None:
size = shape
img_transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
img_gray = img_transform(img_gray).unsqueeze(0)
return img_gray
def to_rgb(img_l, img_ab):
'''
concatinates Lightness (grayscale) and AB channels,
and converts the resulting LAB image to RGB
'''
if img_l.shape == img_ab.shape:
img_lab = torch.cat((img_l, img_ab), 1).numpy().squeeze()
else:
img_lab = torch.cat(
(img_l, img_ab[:, :, :img_l.size(2), :img_l.size(3)]),
dim=1
).numpy().squeeze()
img_lab = img_lab.transpose(1, 2, 0) # transpose image to HxWxC
img_lab[:, :, 0] = img_lab[:, :, 0] * 100 # range pixel values from 0-100
img_lab[:, :, 1:] = img_lab[:, :, 1:] * 255 - 128 # un-normalize
img_rgb = lab2rgb(img_lab.astype(np.float64)) # convert LAB image to RGB
return img_rgb
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 19:35:12 2024
@author: stani1
"""
"""The main file for the baseline reconstruction.
This file should NOT be modified.
"""
import numpy as np
from PIL import Image
import subprocess
import os
from src.forward_model import CFA
from src.methods.baseline.demo_reconstruction import naive_interpolation
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
current_directory = os.getcwd() #we take the current directory to be sure we will have the correct one
input_path='src/methods/domer/image-colorization/input/0.jpg'
input_path = os.path.join(current_directory, input_path)#location to store the image
output_path='src/methods/domer/image-colorization/output/0.jpg'
output_path = os.path.join(current_directory, output_path)
# venv_path='src/methods/domer/image-colorization/image-colorization/bin/activate'
# venv_path = os.path.join(current_directory, venv_path) #activation of the venv, but no need for a lot of versions of libraries
y2=Image.fromarray(y)
y2=y2.convert('L')
y2.save(input_path)
resolution=np.shape(y)[0]
# activation_venv = ['source ', venv_path]
# subprocess.run(activation_venv, shell=True)
n=y2.size[0]
# for i in range (n) :
# for j in range (n) :
# y2.putpixel((i,j),y.getpixel((i,j))*255)
command = [
'python', 'src/methods/domer/image-colorization/colorize.py',
'-i', input_path,
'-o', output_path,
'-r', str(resolution)
]
subprocess.run(command) #calling of the NN with the location of the input and output and the resolution
# deactivation_venv = f"deactivate"
# subprocess.run(deactivation_venv, shell=True)
res=Image.open(output_path) #We take the result of the NN
res=np.asarray(res)#reseting the result in a numpy array
return res
File added
This diff is collapsed.
import numpy as np
from src.forward_model import CFA
def HQ_interpolation(op: CFA, y: np.ndarray) -> np.ndarray:
"""Performs a simple interpolation of the lost pixels.
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
z = op.adjoint(y)
if op.cfa == 'bayer':
res = np.empty(op.input_shape)
res[:, :, 0] = varying_kernel_convolution_HQ(y[:, :], K_red_list)
res[:, :, 1] = varying_kernel_convolution_HQ(y[:, :], K_green_list)
res[:, :, 2] = varying_kernel_convolution_HQ(y[:, :], K_blue_list)
else:
res = np.empty(op.input_shape)
bayer_y = quad_bayer_to_bayer(y)
res[:, :, 0] = varying_kernel_convolution_HQ(bayer_y[:, :], K_red_list)
res[:, :, 1] = varying_kernel_convolution_HQ(bayer_y[:, :], K_green_list)
res[:, :, 2] = varying_kernel_convolution_HQ(bayer_y[:, :], K_blue_list)
return res
def extract_padded(M, size, i, j):
N_i, N_j = M.shape
res = np.zeros((size, size))
middle_size = int((size - 1) / 2)
for ii in range(- middle_size, middle_size + 1):
for jj in range(- middle_size, middle_size + 1):
if i + ii >= 0 and i + ii < N_i and j + jj >= 0 and j + jj < N_j:
res[middle_size + ii, middle_size + jj] = M[i + ii, j + jj]
return res
def varying_kernel_convolution(M, K_list):
N_i, N_j = M.shape
res = np.zeros_like(M)
for i in range(N_i):
for j in range(N_j):
res[i, j] = np.sum(extract_padded(M, K_list[4 * (i % 4) + j % 4].shape[0], i, j) * K_list[4 * (i % 4) + j % 4])
np.clip(res, 0, 1, res)
return res
def varying_kernel_convolution_HQ(M, K_list):
res = np.zeros_like(M)
M_padded = np.pad(M,2,"constant",constant_values=0)
N_i, N_j = M_padded.shape
for i in range(2,N_i-2):
for j in range(2,N_j-2):
res[i-2, j-2] = np.sum(extract_padded(M_padded, 5, i ,j) * K_list[2* (i % 2) + j % 2])
np.clip(res, 0, 1, res)
return res
def quad_bayer_to_bayer(y):
res = y
N_i, N_j = y.shape
for j in range(int(N_j/4)):
buff = res[:,j*4+1]
res[:,j*4+1] = res[:,j*4+2]
res[:,j*4+2] = buff
for i in range(int(N_i/4)):
buff = res[i*4+1,:]
res[i*4+1,:] = res[i*4+2,:]
res[i*4+2,:] = buff
for i in range(int(N_i/4)):
for j in range(int(N_j/4)):
buff = res[i*4+2,j*4+1]
res[i*4+2,j*4+1] = res[i*4+1,j*4+2]
res[i*4+1,j*4+2] = buff
return res
K_identity = np.zeros((5,5))
K_identity[2,2] = 1
K_g_r_and_b = np.array([[0, 0, -1, 0, 0], [0, 0, 2, 0, 0], [-1, 2, 4, 2, -1], [0, 0, 2, 0, 0], [0, 0, -1, 0, 0]]) / 8
K_r_and_b_g_rrow_bcol = np.array([[0, 0, 1/2, 0, 0], [0, -1, 0, -1, 0], [-1, 4, 5, 4, -1], [0, -1, 0, -1, 0], [0, 0, 1/2, 0, 0]]) / 8
K_r_and_b_g_brow_rcol = K_r_and_b_g_rrow_bcol.T
K_r_b = np.array([[0, 0, -3/2, 0, 0], [0, 2, 0, 2, 0], [-3/2, 0, 6, 0, -3/2], [0, 2, 0, 2, 0], [0, 0, -3/2, 0, 0]]) / 8
K_green_list = [K_identity,
K_g_r_and_b,
K_g_r_and_b,
K_identity]
K_red_list = [K_r_and_b_g_rrow_bcol,
K_identity,
K_r_b,
K_r_and_b_g_brow_rcol]
K_blue_list = [K_r_and_b_g_brow_rcol,
K_r_b,
K_identity,
K_r_and_b_g_rrow_bcol]
\ No newline at end of file
import numpy as np
from src.forward_model import CFA
from src.methods.girardey.methods import HQ_interpolation
def run_reconstruction_HQ(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
res = HQ_interpolation(op, y)
return res
File added
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.khattabi.utils import HA
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Performing the reconstruction.
# TODO
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
y_adjoint=op.adjoint(y)
res = HA(y_adjoint)
return res
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
"""A file containing a (pretty useless) reconstruction.
It serves as example of how the project works.
This file should NOT be modified.
"""
import numpy as np
from scipy.signal import convolve2d
from src.forward_model import CFA
def HA(y_adjoint):
height, width = y_adjoint.shape[:2]
# Green channel interpolation
for i in range(2 , height-2):
for j in range(2, width-2):
# Red pixels
if y_adjoint[i,j,1] == 0 and y_adjoint[i,j,0] != 0:
grad_y = np.abs(y_adjoint[i-1,j,1] - y_adjoint[i+1,j,1]) + np.abs(2*y_adjoint[i,j,0] - y_adjoint[i-2,j,0] - y_adjoint[i+2,j,0])
grad_x = np.abs(y_adjoint[i,j-1,1] - y_adjoint[i,j+1,1]) + np.abs(2*y_adjoint[i,j,0] - y_adjoint[i,j-2,0] - y_adjoint[i,j+2,0])
if grad_x < grad_y:
y_adjoint[i,j,1] = (y_adjoint[i,j-1,1] + y_adjoint[i,j+1,1])/2 + (2*y_adjoint[i,j,0] - y_adjoint[i,j-2,0] - y_adjoint[i,j+2,0])/4
elif grad_x > grad_y:
y_adjoint[i,j,1] = (y_adjoint[i-1,j,1] + y_adjoint[i+1,j,1])/2 + (2*y_adjoint[i,j,0] - y_adjoint[i-2,j,0] - y_adjoint[i+2,j,0])/4
else:
y_adjoint[i,j,1] = (y_adjoint[i-1,j,1] + y_adjoint[i+1,j,1] + y_adjoint[i,j-1,1] + y_adjoint[i,j+1,1])/4 + \
(2*y_adjoint[i,j,0] - y_adjoint[i,j-2,0] - y_adjoint[i,j+2,0] + 2*y_adjoint[i,j,0] - y_adjoint[i-2,j,0] - y_adjoint[i+2,j,0])/8
# Blue Pixels
elif y_adjoint[i,j,1] == 0 and y_adjoint[i,j,2] != 0:
grad_y = np.abs(y_adjoint[i-1,j,1] - y_adjoint[i+1,j,1]) + np.abs(2*y_adjoint[i,j,2] - y_adjoint[i-2,j,2] - y_adjoint[i+2,j,2])
grad_x = np.abs(y_adjoint[i,j-1,1] - y_adjoint[i,j+1,1]) + np.abs(2*y_adjoint[i,j,2] - y_adjoint[i,j-2,2] - y_adjoint[i,j+2,2])
if grad_x < grad_y:
y_adjoint[i,j,1] = (y_adjoint[i,j-1,1] + y_adjoint[i,j+1,1])/2 + (2*y_adjoint[i,j,2] - y_adjoint[i,j-2,2] - y_adjoint[i,j+2,2])/4
elif grad_x > grad_y:
y_adjoint[i,j,1] = (y_adjoint[i-1,j,1] + y_adjoint[i+1,j,1])/2 + (2*y_adjoint[i,j,2] - y_adjoint[i-2,j,2] - y_adjoint[i+2,j,2])/4
else:
y_adjoint[i,j,1] = (y_adjoint[i-1,j,1] + y_adjoint[i+1,j,1] + y_adjoint[i,j-1,1] + y_adjoint[i,j+1,1])/4 + \
(2*y_adjoint[i,j,2] - y_adjoint[i,j-2,2] - y_adjoint[i,j+2,2] + 2*y_adjoint[i,j,2] - y_adjoint[i-2,j,2] - y_adjoint[i+2,j,2])/8
# Red and blue channel interpolation
for i in range(1 , height-1):
for j in range(1, width-1):
if y_adjoint[i,j,2] != 0 :
y_adjoint[i,j,0] = (y_adjoint[i-1,j-1,0] + y_adjoint[i-1,j+1,0] + y_adjoint[i+1,j-1,0] + y_adjoint[i+1,j+1,0]) / 4
elif y_adjoint[i,j,0] != 0:
y_adjoint[i,j,2] = (y_adjoint[i-1,j-1,2] + y_adjoint[i-1,j+1,2] + y_adjoint[i+1,j-1,2] + y_adjoint[i+1,j+1,2]) / 4
else:
y_adjoint[i,j] = y_adjoint[i,j]
for i in range(1 , height-1):
for j in range(1, width-1):
if y_adjoint[i,j,0] == y_adjoint[i,j,2]:
y_adjoint[i,j,0] = (y_adjoint[i-1,j,0] + y_adjoint[i,j-1,0] + y_adjoint[i+1,j,0] + y_adjoint[i,j+1,0]) / 4
y_adjoint[i,j,2] = (y_adjoint[i-1,j,2] + y_adjoint[i,j-1,2] + y_adjoint[i+1,j,2] + y_adjoint[i,j+1,2]) / 4
return y_adjoint
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller