Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • samanost/sicom_image_analysis_project
  • gerayelk/sicom_image_analysis_project
  • jelassiy/sicom_image_analysis_project
  • chardoto/sicom_image_analysis_project
  • chaarim/sicom_image_analysis_project
  • domers/sicom_image_analysis_project
  • elmurrt/sicom_image_analysis_project
  • sadonest/sicom_image_analysis_project
  • kouddann/sicom_image_analysis_project
  • mirabitj/sicom-image-analysis-project-mirabito
  • plotj/sicom_image_analysis_project
  • torrem/sicom-image-analysis-project-maxime-torre
  • dzike/sicom_image_analysis_project
  • daip/sicom_image_analysis_project
  • casanovv/sicom_image_analysis_project
  • girmarti/sicom_image_analysis_project
  • lioretn/sicom_image_analysis_project
  • lemoinje/sicom_image_analysis_project
  • ouahmanf/sicom_image_analysis_project
  • vouilloa/sicom_image_analysis_project
  • diopb/sicom_image_analysis_project
  • davidale/sicom_image_analysis_project
  • enza/sicom_image_analysis_project
  • conversb/sicom_image_analysis_project
  • mullemat/sicom_image_analysis_project
25 results
Show changes
Showing
with 1881 additions and 0 deletions
File added
%% Cell type:code id: tags:
```
from network import ColorizeNet
model = ColorizeNet()
model
```
%% Output
ColorizeNet(
(encoder): Sequential(
(0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(inplace=True)
(3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(5): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(decoder): Sequential(
(0): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU(inplace=True)
)
)
(1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): ReLU(inplace=True)
)
)
(2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(32, 2, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(res_conv): Conv2d(32, 2, kernel_size=(1, 1), stride=(1, 1), bias=False)
(res_bn): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(upsample): Upsample(scale_factor=2.0, mode=nearest)
)
(1): BasicBlock(
(conv1): Conv2d(2, 2, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), bias=False)
(bn1): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(activation): Sigmoid()
)
)
)
)
%% Cell type:code id: tags:
```
import torch
# to check if our model is working as expected
model(torch.rand((2, 1, 224, 224))).shape
```
%% Output
torch.Size([2, 2, 224, 224])
%% Cell type:code id: tags:
```
from utils import count_params
count_params(model) # no of trainable parameters
```
%% Output
1166016
import torch.nn as nn
import torchvision.models as models
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels,
activation=None, upsample=None):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=5, stride=1, padding=2, bias=False
)
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
in_channels=out_channels, out_channels=out_channels,
kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
if activation is not None:
self.activation = activation
else:
self.res_conv = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=1, bias=False
)
self.res_bn = nn.BatchNorm2d(num_features=out_channels)
self.upsample = upsample
def forward(self, t):
res = t
t = self.conv1(t)
t = self.bn1(t)
t = self.relu(t)
t = self.conv2(t)
t = self.bn2(t)
if self.upsample is not None:
res = self.res_conv(res)
res = self.res_bn(res)
t += res
t = self.relu(t)
t = self.upsample(t)
else:
t += res
t = self.activation(t)
return t
class ColorizeNet(nn.Module):
def __init__(self):
super().__init__()
# make pretrained=True before starting the training process
resnet18 = models.resnet18(pretrained=False)
# change first conv layer to accept single channel (grayscale)
resnet18.conv1.weight = nn.Parameter(
resnet18.conv1.weight.mean(dim=1).unsqueeze(dim=1))
# use first 3 layers of ResNet-18 as encoder
self.encoder = nn.Sequential(
*list(resnet18.children())[:6]
)
self.decoder = nn.Sequential(
self._make_layer(BasicBlock, 128, 64, nn.ReLU(inplace=True)),
self._make_layer(BasicBlock, 64, 32, nn.ReLU(inplace=True)),
self._make_layer(BasicBlock, 32, 2, nn.Sigmoid())
)
def _make_layer(self, block, in_channels, out_channels, activation):
upsample = nn.Upsample(scale_factor=2, mode='nearest')
layers = []
layers.append(block(in_channels, out_channels, upsample=upsample))
layers.append(block(out_channels, out_channels, activation=activation))
return nn.Sequential(*layers)
def forward(self, t):
t = self.encoder(t)
t = self.decoder(t)
return t
src/methods/domer/image-colorization/output/0.jpg

25.8 KiB

src/methods/domer/image-colorization/output/1.jpg

188 KiB

src/methods/domer/image-colorization/output/2.jpg

167 KiB

src/methods/domer/image-colorization/output/3.jpg

166 KiB

src/methods/domer/image-colorization/output/6.jpg

14 KiB

src/methods/domer/image-colorization/output/7.jpg

15.6 KiB

matplotlib==3.3.2
numpy==1.19.4
Pillow==8.0.1
scikit-image==0.17.2
torch==1.7.0+cpu
torchvision==0.8.1+cpu
tqdm==4.51.0
\ No newline at end of file
%% Cell type:code id: tags:
```
# from google.colab import drive
# drive.mount('/content/drive')
```
%% Output
Mounted at /content/drive
%% Cell type:code id: tags:
```
# Download and unzip (2.2GB)
# !wget http://data.csail.mit.edu/places/places205/testSetPlaces205_resize.tar.gz
# !mkdir images/
# !tar -xzf drive/MyDrive/testSetPlaces205_resize.tar.gz -C 'images/'
```
%% Cell type:code id: tags:
```
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from tqdm import tqdm
from network import ColorizeNet
from utils import GrayscaleImageFolder
```
%% Cell type:code id: tags:
```
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
```
%% Cell type:code id: tags:
```
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip()
])
train_set = GrayscaleImageFolder('images/', transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True, num_workers=4)
```
%% Cell type:code id: tags:
```
criterion = nn.MSELoss()
model = ColorizeNet().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
# scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[8, 24], gamma=1/3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=1/3, patience=5, verbose=True)
```
%% Cell type:code id: tags:
```
try:
checkpoint = torch.load('drive/MyDrive/checkpoint_0.001.pth', map_location=device)
start_epoch = checkpoint['next_epoch']
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
except:
start_epoch = 0
```
%% Cell type:code id: tags:
```
num_epochs = 64
for epoch in range(start_epoch, num_epochs):
train_loss = 0
loop = tqdm(train_loader)
for batch in loop:
in_gray, in_ab = batch[0].to(device), batch[1].to(device)
out_ab = model(in_gray)
loss = criterion(out_ab, in_ab)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()*in_gray.size(0)
loop.set_description(f'Epoch [{epoch+1:2d}/{num_epochs}]')
loop.set_postfix(loss=train_loss)
scheduler.step(train_loss)
checkpoint = {
'next_epoch': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}
torch.save(checkpoint, f'drive/MyDrive/checkpoint_{scheduler._last_lr[0]}.pth')
```
%% Output
Epoch [39/64]: 100%|██████████| 321/321 [12:00<00:00, 2.24s/it, loss=97.4]
Epoch [40/64]: 100%|██████████| 321/321 [11:30<00:00, 2.15s/it, loss=98]
Epoch [41/64]: 100%|██████████| 321/321 [11:18<00:00, 2.12s/it, loss=97.4]
Epoch [42/64]: 100%|██████████| 321/321 [11:28<00:00, 2.15s/it, loss=103]
Epoch [43/64]: 100%|██████████| 321/321 [11:23<00:00, 2.13s/it, loss=98.4]
Epoch [44/64]: 100%|██████████| 321/321 [11:15<00:00, 2.10s/it, loss=97.3]
Epoch [45/64]: 100%|██████████| 321/321 [11:29<00:00, 2.15s/it, loss=97.6]
Epoch [46/64]: 100%|██████████| 321/321 [11:25<00:00, 2.14s/it, loss=96.5]
Epoch [47/64]: 100%|██████████| 321/321 [11:40<00:00, 2.18s/it, loss=96.7]
Epoch [48/64]: 100%|██████████| 321/321 [11:44<00:00, 2.19s/it, loss=96.3]
Epoch [49/64]: 100%|██████████| 321/321 [11:13<00:00, 2.10s/it, loss=96.5]
Epoch [50/64]: 100%|██████████| 321/321 [11:09<00:00, 2.09s/it, loss=96.4]
Epoch [51/64]: 100%|██████████| 321/321 [11:09<00:00, 2.09s/it, loss=95.7]
Epoch [52/64]: 100%|██████████| 321/321 [11:13<00:00, 2.10s/it, loss=95.9]
Epoch [53/64]: 100%|██████████| 321/321 [11:08<00:00, 2.08s/it, loss=95.8]
Epoch [54/64]: 100%|██████████| 321/321 [11:31<00:00, 2.15s/it, loss=95.4]
Epoch [55/64]: 100%|██████████| 321/321 [11:54<00:00, 2.23s/it, loss=95.5]
Epoch [56/64]: 100%|██████████| 321/321 [12:11<00:00, 2.28s/it, loss=95.5]
Epoch [57/64]: 100%|██████████| 321/321 [12:00<00:00, 2.24s/it, loss=94.9]
Epoch [58/64]: 100%|██████████| 321/321 [11:54<00:00, 2.23s/it, loss=94.9]
Epoch [59/64]: 100%|██████████| 321/321 [11:56<00:00, 2.23s/it, loss=94.9]
Epoch [60/64]: 100%|██████████| 321/321 [12:06<00:00, 2.26s/it, loss=94.3]
Epoch [61/64]: 100%|██████████| 321/321 [12:03<00:00, 2.26s/it, loss=95]
Epoch [62/64]: 100%|██████████| 321/321 [12:01<00:00, 2.25s/it, loss=95]
Epoch [63/64]: 100%|██████████| 321/321 [12:05<00:00, 2.26s/it, loss=96.7]
Epoch [64/64]: 100%|██████████| 321/321 [12:10<00:00, 2.27s/it, loss=95]
%% Cell type:code id: tags:
```
torch.save(model.state_dict(), f'./models/model.pth') # save the model separately from the checkpoint file
```
import torch
from torchvision import transforms, datasets
import numpy as np
from PIL import Image
from skimage.color import rgb2lab, rgb2gray, lab2rgb
def count_params(model):
'''
returns the number of trainable parameters in some model
'''
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class GrayscaleImageFolder(datasets.ImageFolder):
'''
Custom dataloader for various operations on images before loading them.
'''
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img_orig = self.transform(img) # apply transforms
img_orig = np.asarray(img_orig) # convert to numpy array
img_lab = rgb2lab(img_orig) # convert RGB image to LAB
img_ab = img_lab[:, :, 1:3] # separate AB channels from LAB
img_ab = (img_ab + 128) / 255 # normalize the pixel values
# transpose image from HxWxC to CxHxW and turn it into a tensor
img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1))).float()
img_orig = rgb2gray(img_orig) # convert RGB to grayscale
# add a channel axis to grascale image and turn it into a tensor
img_orig = torch.from_numpy(img_orig).unsqueeze(0).float()
if self.target_transform is not None:
target = self.target_transform(target)
return img_orig, img_ab, target
def load_gray(path, max_size=360, shape=None):
'''
load an image as grayscale, change the shape as per input,
perform transformations and convert it to model compatable shape.
'''
img_gray = Image.open(path).convert('L')
if max(img_gray.size) > max_size:
size = max_size
else:
size = max(img_gray.size)
if shape is not None:
size = shape
img_transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
img_gray = img_transform(img_gray).unsqueeze(0)
return img_gray
def to_rgb(img_l, img_ab):
'''
concatinates Lightness (grayscale) and AB channels,
and converts the resulting LAB image to RGB
'''
if img_l.shape == img_ab.shape:
img_lab = torch.cat((img_l, img_ab), 1).numpy().squeeze()
else:
img_lab = torch.cat(
(img_l, img_ab[:, :, :img_l.size(2), :img_l.size(3)]),
dim=1
).numpy().squeeze()
img_lab = img_lab.transpose(1, 2, 0) # transpose image to HxWxC
img_lab[:, :, 0] = img_lab[:, :, 0] * 100 # range pixel values from 0-100
img_lab[:, :, 1:] = img_lab[:, :, 1:] * 255 - 128 # un-normalize
img_rgb = lab2rgb(img_lab.astype(np.float64)) # convert LAB image to RGB
return img_rgb
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 19:35:12 2024
@author: stani1
"""
"""The main file for the baseline reconstruction.
This file should NOT be modified.
"""
import numpy as np
from PIL import Image
import subprocess
import os
from src.forward_model import CFA
from src.methods.baseline.demo_reconstruction import naive_interpolation
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
current_directory = os.getcwd() #we take the current directory to be sure we will have the correct one
input_path='src/methods/domer/image-colorization/input/0.jpg'
input_path = os.path.join(current_directory, input_path)#location to store the image
output_path='src/methods/domer/image-colorization/output/0.jpg'
output_path = os.path.join(current_directory, output_path)
# venv_path='src/methods/domer/image-colorization/image-colorization/bin/activate'
# venv_path = os.path.join(current_directory, venv_path) #activation of the venv, but no need for a lot of versions of libraries
y2=Image.fromarray(y)
y2=y2.convert('L')
y2.save(input_path)
resolution=np.shape(y)[0]
# activation_venv = ['source ', venv_path]
# subprocess.run(activation_venv, shell=True)
n=y2.size[0]
# for i in range (n) :
# for j in range (n) :
# y2.putpixel((i,j),y.getpixel((i,j))*255)
command = [
'python', 'src/methods/domer/image-colorization/colorize.py',
'-i', input_path,
'-o', output_path,
'-r', str(resolution)
]
subprocess.run(command) #calling of the NN with the location of the input and output and the resolution
# deactivation_venv = f"deactivate"
# subprocess.run(deactivation_venv, shell=True)
res=Image.open(output_path) #We take the result of the NN
res=np.asarray(res)#reseting the result in a numpy array
return res
######################################################################################
# Demosaicking algorithm based on "A Demosaicking Algorithme with
# Adaptive Inter-Channel Correlation" paper from Joan Duran and Antoni Buades
######################################################################################
import numpy as np
def cvt_YUV_space(R: np.ndarray, G: np.ndarray, B: np.ndarray):
"""Convert RGB into YUV
Args:
R (np.ndarray): Red channel of image
G (np.ndarray): Green channel of image
B (np.ndarray): Blue channel of image
Returns:
np.ndarray: Image in the YUV space
"""
Y = 0.299*R + 0.587*G + 0.114*B
U = R-Y
V = B-Y
return Y, U, V
def compute_gradient(L: int, U: np.ndarray, V: np.ndarray, i: int, j: int, dir: str):
"""Compute the gradient
Args:
L (int): Size of the local neighborhood
U (np.ndarray): U channel of the image
V (np.ndarray): V channel of the image
i (int): position in line
j (int): position in column
dir (string) : direction of interpolation
Returns:
float: Gradient
"""
height, width = U.shape
gradient_U = 0
gradient_V = 0
if dir == "north":
if j < L+1: max = j+1
else : max = L+1
for l in range(1, max):
gradient_U += (U[j-l, i] - U[j, i])**2
gradient_V += (V[j-l, i] - V[j, i])**2
elif dir == "south":
if height-j < L+1: max = height-j
else: max = L+1
for l in range(1, max):
gradient_U += (U[j+l, i] - U[j, i])**2
gradient_V += (V[j+l, i] - V[j, i])**2
elif dir == "east":
if width-i < L+1: max = width-i
else: max = L+1
for l in range(1, max):
gradient_U += (U[j, i+l] - U[j, i])**2
gradient_V += (V[j, i+l] - V[j, i])**2
elif dir == "west":
if i < L+1: max = i+1
else: max = L+1
for l in range(1, max):
gradient_U += (U[j, i-l] - U[j, i])**2
gradient_V += (V[j, i-l] - V[j, i])**2
if max == L+1: max = L
gradient = (np.sqrt(gradient_U) + np.sqrt(gradient_V)) / max
return gradient
def local_int(R0: np.ndarray, G0: np.ndarray, B0: np.ndarray, beta: float, L: int , eps: float):
"""Local direction interpolation algorithm
Args:
R0 (np.ndarray): Red mosaiked image
G0 (np.ndarray): Green mosaiked image
B0 (np.ndarray): Blue mosaiked image
beta (float): Channel correlation parameter [0; 1]
L (int): Size of the local neighborhood
eps (float): tresholding parameter > 0
Returns:
np.ndarray: The interpolated image (R, G, B)
"""
assert R0.shape == G0.shape and R0.shape == B0.shape
assert eps > 0
lin, col = G0.shape
# Initalisation
Rn, Rs, Re, Rw = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
Gn, Gs, Ge, Gw = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
Bn, Bs, Be, Bw = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
RGn, RGs, RGe, RGw = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
BGn, BGs, BGe, BGw = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
Gradn, Grads, Grade, Gradw = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
low_wn, low_ws, low_we, low_ww = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
W = np.zeros((lin, col))
R, G, B = np.zeros((lin, col)), np.zeros((lin, col)), np.zeros((lin, col))
# Computation
for i in range(col):
for j in range(lin):
# Directionnal interpolation of green channel
if G0[j, i] != 0:
Gn[j, i], Gs[j, i], Ge[j, i], Gw[j, i] = G0[j, i], G0[j, i], G0[j, i], G0[j, i]
else:
if j-2 >= 0: # North
# If we are on a Red square: B0[j, i] = B0[j-2, i] = 0
# The same if we are on a Blue square
Gn[j, i] = G0[j-1, i] + 0.5*beta*(R0[j, i] - R0[j-2, i] + B0[j, i] - B0[j-2, i])
else: # R0[j-2, i] = 0 or B0[j-2, i] = 0
if j-1 >= 0:
Gn[j, i] = G0[j-1, i] + beta*(R0[j, i] + B0[j, i])
else: # G0[j-1, i] = 0
Gn[j, i] = beta*(R0[j, i] + B0[j, i])
if j+2 < lin: # South
Gs[j, i] = G0[j+1, i] + 0.5*beta*(R0[j, i] - R0[j+2, i] + B0[j, i] - B0[j+2, i])
else:
if j+1 < lin:
Gs[j, i] = G0[j+1, i] + beta*(R0[j, i] + B0[j, i])
else:
Gs[j, i] = beta*(R0[j, i] + B0[j, i])
if i+2 < col: # East
Ge[j, i] = G0[j, i+1] + 0.5*beta*(R0[j, i] - R0[j, i+2] + B0[j, i] - B0[j, i+2])
else:
if i+1 < col:
Ge[j, i] = G0[j, i+1] + beta*(R0[j, i] + B0[j, i])
else:
Ge[j, i] = beta*(R0[j, i] + B0[j, i])
if i-2 >= 0: # West
Gw[j, i] = G0[j, i-1] + 0.5*beta*(R0[j, i] - R0[j, i-2] + B0[j, i] - B0[j, i-2])
else:
if i-1 >= 0:
Gw[j, i] = G0[j, i-1] + beta*(R0[j, i] + B0[j, i])
else:
Gw[j, i] = beta*(R0[j, i] + B0[j, i])
# Bilinear interpolation of red and blue channels
if R0[j, i] != 0:
Rn[j, i], Rs[j, i], Re[j, i], Rw[j, i] = R0[j, i], R0[j, i], R0[j, i], R0[j, i]
RGn[j, i] = R0[j, i] - beta*Gn[j, i]
RGs[j, i] = R0[j, i] - beta*Gs[j, i]
RGe[j, i] = R0[j, i] - beta*Ge[j, i]
RGw[j, i] = R0[j, i] - beta*Gw[j, i]
if B0[j, i] != 0:
Bn[j, i], Bs[j, i], Be[j, i], Bw[j, i] = B0[j, i], B0[j, i], B0[j, i], B0[j, i]
BGn[j, i] = B0[j, i] - beta*Gn[j, i]
BGs[j, i] = B0[j, i] - beta*Gs[j, i]
BGe[j, i] = B0[j, i] - beta*Ge[j, i]
BGw[j, i] = B0[j, i] - beta*Gw[j, i]
if i+1 < col:
if G0[j, i] != 0 and R0[j, i+1] != 0: # Red square next to us
if i-1 >=0 :
Rn[j, i] = 0.5*(RGn[j, i-1] + RGn[j, i+1]) + beta*Gn[j, i]
Rs[j, i] = 0.5*(RGs[j, i-1] + RGs[j, i+1]) + beta*Gs[j, i]
Re[j, i] = 0.5*(RGe[j, i-1] + RGe[j, i+1]) + beta*Ge[j, i]
Rw[j, i] = 0.5*(RGw[j, i-1] + RGw[j, i+1]) + beta*Gw[j, i]
# The case nest to us is red, so it can't be blue
if j-1 >=0:
if j+1 < lin:
Bn[j, i] = 0.5*(BGn[j-1, i] + BGn[j+1, i]) + beta*Gn[j, i]
Bs[j, i] = 0.5*(BGs[j-1, i] + BGs[j+1, i]) + beta*Gs[j, i]
Be[j, i] = 0.5*(BGe[j-1, i] + BGe[j+1, i]) + beta*Ge[j, i]
Bw[j, i] = 0.5*(BGw[j-1, i] + BGw[j+1, i]) + beta*Gw[j, i]
else:
Bn[j, i] = BGn[j-1, i] + beta*Gn[j, i]
Bs[j, i] = BGs[j-1, i] + beta*Gs[j, i]
Be[j, i] = BGe[j-1, i] + beta*Ge[j, i]
Bw[j, i] = BGw[j-1, i] + beta*Gw[j, i]
else:
Bn[j, i] = BGn[j+1, i] + beta*Gn[j, i]
Bs[j, i] = BGs[j+1, i] + beta*Gs[j, i]
Be[j, i] = BGe[j+1, i] + beta*Ge[j, i]
Bw[j, i] = BGw[j+1, i] + beta*Gw[j, i]
if G0[j, i] != 0 and B0[j, i+1] != 0: # Blue square next to us
if i-1 >=0 :
Bn[j, i] = 0.5*(BGn[j, i-1] + BGn[j, i+1]) + beta*Gn[j, i]
Bs[j, i] = 0.5*(BGs[j, i-1] + BGs[j, i+1]) + beta*Gs[j, i]
Be[j, i] = 0.5*(BGe[j, i-1] + BGe[j, i+1]) + beta*Ge[j, i]
Bw[j, i] = 0.5*(BGw[j, i-1] + BGw[j, i+1]) + beta*Gw[j, i]
# The case nest to us is blue, so it can't be red
if j-1 >=0:
if j+1 < lin:
Rn[j, i] = 0.5*(RGn[j-1, i] + RGn[j+1, i]) + beta*Gn[j, i]
Rs[j, i] = 0.5*(RGs[j-1, i] + RGs[j+1, i]) + beta*Gs[j, i]
Re[j, i] = 0.5*(RGe[j-1, i] + RGe[j+1, i]) + beta*Ge[j, i]
Rw[j, i] = 0.5*(RGw[j-1, i] + RGw[j+1, i]) + beta*Gw[j, i]
else:
Rn[j, i] = RGn[j-1, i] + beta*Gn[j, i]
Rs[j, i] = RGs[j-1, i] + beta*Gs[j, i]
Re[j, i] = RGe[j-1, i] + beta*Ge[j, i]
Rw[j, i] = RGw[j-1, i] + beta*Gw[j, i]
if R0[j, i] == 0 and G0[j, i] == 0:
if i-1 >= 0:
if i+1 < col:
if j-1 >= 0:
if j+1 < lin:
Rn[j, i] = 0.25*(RGn[j-1, i-1] + RGn[j-1, i+1] + RGn[j+1, i-1] + RGn[j+1, i+1]) + beta*Gn[j, i]
Rs[j, i] = 0.25*(RGs[j-1, i-1] + RGs[j-1, i+1] + RGs[j+1, i-1] + RGs[j+1, i+1]) + beta*Gs[j, i]
Re[j, i] = 0.25*(RGe[j-1, i-1] + RGe[j-1, i+1] + RGe[j+1, i-1] + RGe[j+1, i+1]) + beta*Ge[j, i]
Rw[j, i] = 0.25*(RGw[j-1, i-1] + RGw[j-1, i+1] + RGw[j+1, i-1] + RGw[j+1, i+1]) + beta*Gw[j, i]
else:
Rn[j, i] = 0.5*(RGn[j-1, i-1] + RGn[j-1, i+1]) + beta*Gn[j, i]
Rs[j, i] = 0.5*(RGs[j-1, i-1] + RGs[j-1, i+1]) + beta*Gs[j, i]
Re[j, i] = 0.5*(RGe[j-1, i-1] + RGe[j-1, i+1]) + beta*Ge[j, i]
Rw[j, i] = 0.5*(RGw[j-1, i-1] + RGw[j-1, i+1]) + beta*Gw[j, i]
else:
Rn[j, i] = 0.5*(RGn[j+1, i-1] + RGn[j+1, i+1]) + beta*Gn[j, i]
Rs[j, i] = 0.5*(RGs[j+1, i-1] + RGs[j+1, i+1]) + beta*Gs[j, i]
Re[j, i] = 0.5*(RGe[j+1, i-1] + RGe[j+1, i+1]) + beta*Ge[j, i]
Rw[j, i] = 0.5*(RGw[j+1, i-1] + RGw[j+1, i+1]) + beta*Gw[j, i]
else:
if j-1 >= 0:
if j+1 < lin:
Rn[j, i] = 0.5*(RGn[j-1, i-1] + RGn[j+1, i-1]) + beta*Gn[j, i]
Rs[j, i] = 0.5*(RGs[j-1, i-1] + RGs[j+1, i-1]) + beta*Gs[j, i]
Re[j, i] = 0.5*(RGe[j-1, i-1] + RGe[j+1, i-1]) + beta*Ge[j, i]
Rw[j, i] = 0.5*(RGw[j-1, i-1] + RGw[j+1, i-1]) + beta*Gw[j, i]
else:
Rn[j, i] = RGn[j-1, i-1] + beta*Gn[j, i]
Rs[j, i] = RGs[j-1, i-1] + beta*Gs[j, i]
Re[j, i] = RGe[j-1, i-1] + beta*Ge[j, i]
Rw[j, i] = RGw[j-1, i-1] + beta*Gw[j, i]
else:
Rn[j, i] = beta*Gn[j, i]
Rs[j, i] = beta*Gs[j, i]
Re[j, i] = beta*Ge[j, i]
Rw[j, i] = beta*Gw[j, i]
else:
if j-1 >= 0:
if j+1 < lin:
Rn[j, i] = 0.5*(RGn[j-1, i+1] + RGn[j+1, i+1]) + beta*Gn[j, i]
Rs[j, i] = 0.5*(RGs[j-1, i+1] + RGs[j+1, i+1]) + beta*Gs[j, i]
Re[j, i] = 0.5*(RGe[j-1, i+1] + RGe[j+1, i+1]) + beta*Ge[j, i]
Rw[j, i] = 0.5*(RGw[j-1, i+1] + RGw[j+1, i+1]) + beta*Gw[j, i]
else:
Rn[j, i] = RGn[j-1, i+1] + beta*Gn[j, i]
Rs[j, i] = RGs[j-1, i+1] + beta*Gs[j, i]
Re[j, i] = RGe[j-1, i+1] + beta*Ge[j, i]
Rw[j, i] = RGw[j-1, i+1] + beta*Gw[j, i]
else:
Rn[j, i] = beta*Gn[j, i]
Rs[j, i] = beta*Gs[j, i]
Re[j, i] = beta*Ge[j, i]
Rw[j, i] = beta*Gw[j, i]
if B0[j, i] == 0 and G0[j, i] == 0:
if i-1 >= 0:
if i+1 < col:
if j-1 >= 0:
if j+1 < lin:
Bn[j, i] = 0.25*(BGn[j-1, i-1] + BGn[j-1, i+1] + BGn[j+1, i-1] + BGn[j+1, i+1]) + beta*Gn[j, i]
Bs[j, i] = 0.25*(BGs[j-1, i-1] + BGs[j-1, i+1] + BGs[j+1, i-1] + BGs[j+1, i+1]) + beta*Gs[j, i]
Be[j, i] = 0.25*(BGe[j-1, i-1] + BGe[j-1, i+1] + BGe[j+1, i-1] + BGe[j+1, i+1]) + beta*Ge[j, i]
Bw[j, i] = 0.25*(BGw[j-1, i-1] + BGw[j-1, i+1] + BGw[j+1, i-1] + BGw[j+1, i+1]) + beta*Gw[j, i]
else:
Bn[j, i] = 0.5*(BGn[j-1, i-1] + BGn[j-1, i+1]) + beta*Gn[j, i]
Bs[j, i] = 0.5*(BGs[j-1, i-1] + BGs[j-1, i+1]) + beta*Gs[j, i]
Be[j, i] = 0.5*(BGe[j-1, i-1] + BGe[j-1, i+1]) + beta*Ge[j, i]
Bw[j, i] = 0.5*(BGw[j-1, i-1] + BGw[j-1, i+1]) + beta*Gw[j, i]
else:
Bn[j, i] = 0.5*(BGn[j+1, i-1] + BGn[j+1, i+1]) + beta*Gn[j, i]
Bs[j, i] = 0.5*(BGs[j+1, i-1] + BGs[j+1, i+1]) + beta*Gs[j, i]
Be[j, i] = 0.5*(BGe[j+1, i-1] + BGe[j+1, i+1]) + beta*Ge[j, i]
Bw[j, i] = 0.5*(BGw[j+1, i-1] + BGw[j+1, i+1]) + beta*Gw[j, i]
else:
if j-1 >= 0:
if j+1 < lin:
Bn[j, i] = 0.5*(BGn[j-1, i-1] + BGn[j+1, i-1]) + beta*Gn[j, i]
Bs[j, i] = 0.5*(BGs[j-1, i-1] + BGs[j+1, i-1]) + beta*Gs[j, i]
Be[j, i] = 0.5*(BGe[j-1, i-1] + BGe[j+1, i-1]) + beta*Ge[j, i]
Bw[j, i] = 0.5*(BGw[j-1, i-1] + BGw[j+1, i-1]) + beta*Gw[j, i]
else:
Bn[j, i] = BGn[j-1, i-1] + beta*Gn[j, i]
Bs[j, i] = BGs[j-1, i-1] + beta*Gs[j, i]
Be[j, i] = BGe[j-1, i-1] + beta*Ge[j, i]
Bw[j, i] = BGw[j-1, i-1] + beta*Gw[j, i]
else:
Bn[j, i] = beta*Gn[j, i]
Bs[j, i] = beta*Gs[j, i]
Be[j, i] = beta*Ge[j, i]
Bw[j, i] = beta*Gw[j, i]
else:
if j-1 >= 0:
if j+1 < lin:
Bn[j, i] = 0.5*(BGn[j-1, i+1] + BGn[j+1, i+1]) + beta*Gn[j, i]
Bs[j, i] = 0.5*(BGs[j-1, i+1] + BGs[j+1, i+1]) + beta*Gs[j, i]
Be[j, i] = 0.5*(BGe[j-1, i+1] + BGe[j+1, i+1]) + beta*Ge[j, i]
Bw[j, i] = 0.5*(BGw[j-1, i+1] + BGw[j+1, i+1]) + beta*Gw[j, i]
else:
Bn[j, i] = BGn[j-1, i+1] + beta*Gn[j, i]
Bs[j, i] = BGs[j-1, i+1] + beta*Gs[j, i]
Be[j, i] = BGe[j-1, i+1] + beta*Ge[j, i]
Bw[j, i] = BGw[j-1, i+1] + beta*Gw[j, i]
else:
Bn[j, i] = beta*Gn[j, i]
Bs[j, i] = beta*Gs[j, i]
Be[j, i] = beta*Ge[j, i]
Bw[j, i] = beta*Gw[j, i]
# Pixel-level fusion of full color interpolated images
# Computation of the YUV space
_, Un, Vn = cvt_YUV_space(Rn, Gn, Bn)
_, Us, Vs = cvt_YUV_space(Rs, Gs, Bs)
_, Ue, Ve = cvt_YUV_space(Re, Ge, Be)
_, Uw, Vw = cvt_YUV_space(Rw, Gw, Bw)
# Computation of the gradients
Gradn[j, i] = compute_gradient(L, Un, Vn, i, j, "north")
Grads[j, i] = compute_gradient(L, Us, Vs, i, j, "south")
Grade[j, i] = compute_gradient(L, Ue, Ve, i, j, "east")
Gradw[j, i] = compute_gradient(L, Uw, Vw, i, j, "west")
low_wn[j, i] = 1 / (Gradn[j, i] + eps)
low_ws[j, i] = 1 / (Grads[j, i] + eps)
low_we[j, i] = 1 / (Grade[j, i] + eps)
low_ww[j, i] = 1 / (Gradw[j, i] + eps)
W[j, i] = low_wn[j, i] + low_ws[j, i] + low_we[j, i] + low_ww[j, i]
low_wn[j, i] = low_wn[j, i] / W[j, i]
low_ws[j, i] = low_ws[j, i] / W[j, i]
low_we[j, i] = low_we[j, i] / W[j, i]
low_ww[j, i] = low_ww[j, i] / W[j, i]
R[j, i] = low_wn[j, i]*Rn[j, i] + low_ws[j, i]*Rs[j, i] + low_we[j, i]*Re[j, i] + low_ww[j, i]*Rw[j, i]
G[j, i] = low_wn[j, i]*Gn[j, i] + low_ws[j, i]*Gs[j, i] + low_we[j, i]*Ge[j, i] + low_ww[j, i]*Gw[j, i]
B[j, i] = low_wn[j, i]*Bn[j, i] + low_ws[j, i]*Bs[j, i] + low_we[j, i]*Be[j, i] + low_ww[j, i]*Bw[j, i]
return R, G, B
\ No newline at end of file
File added
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.fortin_come.DemosaickingInterChannel import local_int
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Performing the reconstruction.
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
z = op.adjoint(y)
R0 = z[:, :, 0]
G0 = z[:, :, 1]
B0 = z[:, :, 2]
beta = 0.9 # Inter-channel parameter
L = 5 # Size of the local neighborhood
eps = 0.00000001 # Tresholding parameter > 0
R1, G1, B1 = local_int(R0, G0, B0, beta, L, eps)
img_demosaicked = np.zeros((R1.shape[0], R1.shape[1], 3))
img_demosaicked[:, :, 0] = R1
img_demosaicked[:, :, 1] = G1
img_demosaicked[:, :, 2] = B1
return img_demosaicked
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: FORTIN Côme
File added
source diff could not be displayed: it is too large. Options to address this: view the blob.
import numpy as np
from src.forward_model import CFA
def HQ_interpolation(op: CFA, y: np.ndarray) -> np.ndarray:
"""Performs a simple interpolation of the lost pixels.
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
z = op.adjoint(y)
if op.cfa == 'bayer':
res = np.empty(op.input_shape)
res[:, :, 0] = varying_kernel_convolution_HQ(y[:, :], K_red_list)
res[:, :, 1] = varying_kernel_convolution_HQ(y[:, :], K_green_list)
res[:, :, 2] = varying_kernel_convolution_HQ(y[:, :], K_blue_list)
else:
res = np.empty(op.input_shape)
bayer_y = quad_bayer_to_bayer(y)
res[:, :, 0] = varying_kernel_convolution_HQ(bayer_y[:, :], K_red_list)
res[:, :, 1] = varying_kernel_convolution_HQ(bayer_y[:, :], K_green_list)
res[:, :, 2] = varying_kernel_convolution_HQ(bayer_y[:, :], K_blue_list)
return res
def extract_padded(M, size, i, j):
N_i, N_j = M.shape
res = np.zeros((size, size))
middle_size = int((size - 1) / 2)
for ii in range(- middle_size, middle_size + 1):
for jj in range(- middle_size, middle_size + 1):
if i + ii >= 0 and i + ii < N_i and j + jj >= 0 and j + jj < N_j:
res[middle_size + ii, middle_size + jj] = M[i + ii, j + jj]
return res
def varying_kernel_convolution(M, K_list):
N_i, N_j = M.shape
res = np.zeros_like(M)
for i in range(N_i):
for j in range(N_j):
res[i, j] = np.sum(extract_padded(M, K_list[4 * (i % 4) + j % 4].shape[0], i, j) * K_list[4 * (i % 4) + j % 4])
np.clip(res, 0, 1, res)
return res
def varying_kernel_convolution_HQ(M, K_list):
res = np.zeros_like(M)
M_padded = np.pad(M,2,"constant",constant_values=0)
N_i, N_j = M_padded.shape
for i in range(2,N_i-2):
for j in range(2,N_j-2):
res[i-2, j-2] = np.sum(extract_padded(M_padded, 5, i ,j) * K_list[2* (i % 2) + j % 2])
np.clip(res, 0, 1, res)
return res
def quad_bayer_to_bayer(y):
res = y
N_i, N_j = y.shape
for j in range(int(N_j/4)):
buff = res[:,j*4+1]
res[:,j*4+1] = res[:,j*4+2]
res[:,j*4+2] = buff
for i in range(int(N_i/4)):
buff = res[i*4+1,:]
res[i*4+1,:] = res[i*4+2,:]
res[i*4+2,:] = buff
for i in range(int(N_i/4)):
for j in range(int(N_j/4)):
buff = res[i*4+2,j*4+1]
res[i*4+2,j*4+1] = res[i*4+1,j*4+2]
res[i*4+1,j*4+2] = buff
return res
K_identity = np.zeros((5,5))
K_identity[2,2] = 1
K_g_r_and_b = np.array([[0, 0, -1, 0, 0], [0, 0, 2, 0, 0], [-1, 2, 4, 2, -1], [0, 0, 2, 0, 0], [0, 0, -1, 0, 0]]) / 8
K_r_and_b_g_rrow_bcol = np.array([[0, 0, 1/2, 0, 0], [0, -1, 0, -1, 0], [-1, 4, 5, 4, -1], [0, -1, 0, -1, 0], [0, 0, 1/2, 0, 0]]) / 8
K_r_and_b_g_brow_rcol = K_r_and_b_g_rrow_bcol.T
K_r_b = np.array([[0, 0, -3/2, 0, 0], [0, 2, 0, 2, 0], [-3/2, 0, 6, 0, -3/2], [0, 2, 0, 2, 0], [0, 0, -3/2, 0, 0]]) / 8
K_green_list = [K_identity,
K_g_r_and_b,
K_g_r_and_b,
K_identity]
K_red_list = [K_r_and_b_g_rrow_bcol,
K_identity,
K_r_b,
K_r_and_b_g_brow_rcol]
K_blue_list = [K_r_and_b_g_brow_rcol,
K_r_b,
K_identity,
K_r_and_b_g_rrow_bcol]
\ No newline at end of file
import numpy as np
from src.forward_model import CFA
from src.methods.girardey.methods import HQ_interpolation
def run_reconstruction_HQ(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
res = HQ_interpolation(op, y)
return res