Skip to content
Snippets Groups Projects
Commit 610f4b41 authored by Matthieu Muller's avatar Matthieu Muller
Browse files

Merge branch 'master' into 'master'

Image analysis project

See merge request mullemat/sicom_image_analysis_project!30
parents e8db5ceb c6cc6034
No related branches found
No related tags found
No related merge requests found
"""A file containing the main function for the chosen interpolation (Adams-Hamilton Algorithm).
"""
### BE CAREFUL TO ADD THE MODULE TORCH TO THE FILE requirements.txt
import numpy as np
from scipy.signal import convolve2d
import torch
import torch.nn.functional as F
from src.forward_model import CFA
# Initial kernels for the bilinear interpolation
ker_bayer_red_blue = np.array([[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]) / 4
ker_bayer_green = np.array([[0, 1, 0],
[1, 4, 1],
[0, 1, 0]]) / 4
# Kernels for the quad_bayer filter
ker_quad_red_blue = np.array([[1, 1, 2, 2, 0, 1],
[1, 1, 2, 2, 1, 1],
[2, 2, 4, 4, 2, 2],
[2, 2, 4, 4, 2, 2],
[1, 1, 2, 2, 1, 1],
[1, 1, 2, 2, 1, 1]])/16
ker_quad_green = np.array([[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[1, 1, 4, 4, 1, 1],
[1, 1, 4, 4, 1, 1],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0]])/16
def second_naive_interpolation(op: CFA, y: np.ndarray) -> np.ndarray:
"""Performs a naive interpolation of the lost pixels. for the quad_bayer, performs a convolution
with kernels (above: ker_quad_red_blue and ker_quad_green) inspired from the classic bilnear interpolation
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
z = op.adjoint(y)
if op.cfa == 'bayer':
res = np.empty(op.input_shape)
res[:, :, 0] = convolve2d(z[:, :, 0], ker_bayer_red_blue, mode='same')
res[:, :, 1] = convolve2d(z[:, :, 1], ker_bayer_green, mode='same')
res[:, :, 2] = convolve2d(z[:, :, 2], ker_bayer_red_blue, mode='same')
else:
res = np.empty(op.input_shape)
res[:, :, 0] = convolve2d(z[:, :, 0], ker_quad_red_blue, mode='same')
res[:, :, 1] = convolve2d(z[:, :, 1], ker_quad_green, mode='same')
res[:, :, 2] = convolve2d(z[:, :, 2], ker_quad_red_blue, mode='same')
#res[:, :, 0] = convolution_pad_stride(z[:, :, 0], ker_quad_red_blue)
#res[:, :, 1] = convolution_pad_stride(z[:, :, 1], ker_quad_green)
#res[:, :, 2] = convolution_pad_stride(z[:, :, 2], ker_quad_red_blue)
return res
import torch
import torch.nn.functional as F
def convolution_pad_stride(input, kernel):
"""An attempt of convolution with stride and padding.
Args:
input(np.ndarray): input (mosaicked) image.
kernel (np.ndarray): convolution kernel.
Returns:
np.ndarray: Convolution of the image with the kernel with stride of 2 and padding of 512.
"""
input_tensor = torch.tensor(np.expand_dims(input, axis=(0,1)))
kernel_tensor = torch.tensor(np.expand_dims(kernel, axis=(0,1)))
output = F.conv2d(input_tensor, kernel_tensor, stride=2, padding=514)
output = output.numpy().squeeze()
return output
import numpy as np
def take(array2d, i, j):
"""
Helper function that returns the indices of an array, prevents index outbound
Args:
array2d (np.ndarray): input array
i: row index
j: column index
Returns:
np.float64(0): array value at position[i,j]
"""
if 0 <= i < array2d.shape[0] and 0 <= j < array2d.shape[1]:
return array2d[i, j]
return np.float64(0)
def red_blue_positions(img):
"""
Helper function that yields the red and blue positions in the mosaicked image
Args:
img (np.ndarray): mosaicked image
Returns:
None
"""
first_non_green = 1
for i in range(img.shape[0]):
for j in range(first_non_green, img.shape[1], 2):
yield i, j
first_non_green = 1 - first_non_green
def directional_green_interpolation(img):
"""
Function that performs green interpolation in horizontal and vertical directions.
Args:
img (np.ndarray): input (mosaicked) image
Returns:
green_h, green_v (np.ndarray): horizontally and vertically interpolated green components
"""
green_h = img.copy() # green positions are copied
green_v = img.copy() # other values will be replaced
for i, j in red_blue_positions(img):
r = lambda k: take(img, i, j + k) # r - relative indexing
green_h[i, j] = (r(1) + r(-1) + r(0)) / 2 - (r(2) + r(-2)) / 4
r = lambda k: take(img, i + k, j)
green_v[i, j] = (r(1) + r(-1) + r(0)) / 2 - (r(2) + r(-2)) / 4
return green_h, green_v
def green_decision(img, green_h, green_v, cardinal_directions_improvement = True):
"""
Function that performs the green decision between the chrominance components based on the color difference uniformity
by calculating the horizontal and the vertical gradients.
Args:
img (np.ndarray): input (mosaicked) image
green_h (np.ndarray): horizontally interpolated green component
green_v (np.ndarray): vertically interpolated green component
cardinal_directions_improvement (bool) = True (default) : parameter that allows to adjust a given window weight in order
to improve the gradients calculation
Returns:
green (np.ndarray): interpolated green image
delta_h (np.ndarray): horizontal gradient image
delta_v (np.ndarray): vertical gradient image
"""
height, width = img.shape
# "chrominance" is R - G in red locations, B - G in blue locations
# and 0 in green locations
chrominance_h = img - green_h
chrominance_v = img - green_v
# also 0 in green locations, this will be useful
gradient_h = chrominance_h.copy()
gradient_v = chrominance_v.copy()
for i, j in red_blue_positions(img):
gradient_h[i, j] -= take(chrominance_h, i, j + 2)
gradient_v[i, j] -= take(chrominance_v, i + 2, j)
gradient_h = np.abs(gradient_h)
gradient_v = np.abs(gradient_v)
# could be easily rewritten without loops
window = np.ones(shape=(5, 5), dtype=np.float64)
if cardinal_directions_improvement:
window[2, :] = 3
window[:, 2] = 3
delta_h = np.zeros(shape=(img.shape), dtype=np.float64)
delta_v = delta_h.copy()
padded_grad_h = np.zeros(shape=(img.shape[0] + 4, img.shape[1] + 4), dtype=np.float64)
padded_grad_v = padded_grad_h.copy()
padded_grad_h[2 : img.shape[0] + 2, 2 : img.shape[1] + 2] = gradient_h
padded_grad_v[2 : img.shape[0] + 2, 2 : img.shape[1] + 2] = gradient_v
green = green_h.copy()
for i, j in red_blue_positions(img):
delta_h[i, j] = np.sum(window * padded_grad_h[i : i + 5, j : j + 5])
delta_v[i, j] = np.sum(window * padded_grad_v[i : i + 5, j : j + 5])
if delta_v[i, j] < delta_h[i, j]:
green[i, j] = green_v[i, j]
return green, delta_h, delta_v
def red_blue_interpolation(img, green, delta_h, delta_v):
"""
Function that performs the red and blue components interpolation.
Args:
img (np.ndarray): input (mosaicked) image
green (np.ndarray): interpolated green image
delta_h (np.ndarray): horizontal gradient image
delta_v (np.ndarray): vertical gradient image
Returns:
red, blue (np.ndarray): interpolated red and blue image components
"""
height, width = img.shape
red = img.copy()
blue = img.copy()
# green positions first
for i in range(0, height, 2): # green-red rows
for j in range(0, width, 2):
red[i, j] = (take(img, i, j - 1) +
take(img, i, j + 1)) / 2
blue[i, j] = (take(img, i - 1, j) +
take(img, i + 1, j)) / 2
for i in range(1, height, 2): # green-blue rows
for j in range(1, width, 2):
blue[i, j] = (take(img, i, j - 1) +
take(img, i, j + 1)) / 2
red[i, j] = (take(img, i - 1, j) +
take(img, i + 1, j)) / 2
# now red in blue positions, blue in red positions
red_minus_blue = red - blue
for i in range(1, height, 2):
for j in range(0, width, 2):
if delta_v[i, j] < delta_h[i, j]:
red[i, j] = blue[i, j] + (take(red_minus_blue, i - 1, j) +
take(red_minus_blue, i + 1, j)) / 2
else:
red[i, j] = blue[i, j] + (take(red_minus_blue, i, j - 1) +
take(red_minus_blue, i, j + 1)) / 2
for i in range(0, height, 2):
for j in range(1, width, 2):
if delta_v[i, j] < delta_h[i, j]:
blue[i, j] = red[i, j] - (take(red_minus_blue, i - 1, j) +
take(red_minus_blue, i + 1, j)) / 2
else:
blue[i, j] = red[i, j] - (take(red_minus_blue, i, j - 1) +
take(red_minus_blue, i, j + 1)) / 2
return red, blue
def demosaicking_algorithm(img):
"""
Main function of the Daniele Menon demosaicking algorithm.
Args:
img (np.ndarray): input (mosaicked image)
Returns:
np.ndarray: reconstructed image
"""
green_h, green_v = directional_green_interpolation(img)
green, delta_h, delta_v = green_decision(img, green_h, green_v)
red, blue = red_blue_interpolation(img, green, delta_h, delta_v)
return np.clip(np.dstack((red, green, blue)), 0, 1)
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
\ No newline at end of file
File added
source diff could not be displayed: it is too large. Options to address this: view the blob.
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.nada_kouddane.functions import second_naive_interpolation, demosaicking_algorithm
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
res = demosaicking_algorithm(y)
return res
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment