Skip to content
Snippets Groups Projects
Commit b2705b24 authored by Matthieu Muller's avatar Matthieu Muller
Browse files

Merge branch 'merge-SADONES' into 'master'

Commit SADONES

See merge request !31
parents e2467053 cd70a737
No related branches found
No related tags found
1 merge request!31Commit SADONES
File added
"""A file containing a (pretty useless) reconstruction.
It serves as example of how the project works.
This file should NOT be modified.
"""
import numpy as np
from scipy.signal import convolve2d
from src.forward_model import CFA
def naive_interpolation(op: CFA, y: np.ndarray) -> np.ndarray:
"""Performs a simple interpolation of the lost pixels.
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
z = op.adjoint(y)
if op.cfa == 'bayer':
res = np.empty(op.input_shape)
res[:, :, 0] = convolve2d(z[:, :, 0], ker_bayer_red_blue, mode='same')
res[:, :, 1] = convolve2d(z[:, :, 1], ker_bayer_green, mode='same')
res[:, :, 2] = convolve2d(z[:, :, 2], ker_bayer_red_blue, mode='same')
else:
res = np.empty(op.input_shape)
res[:, :, 0] = varying_kernel_convolution(z[:, :, 0], K_list_red)
res[:, :, 1] = varying_kernel_convolution(z[:, :, 1], K_list_green)
res[:, :, 2] = varying_kernel_convolution(z[:, :, 2], K_list_blue)
return res
def extract_padded(M, size, i, j):
N_i, N_j = M.shape
res = np.zeros((size, size))
middle_size = int((size - 1) / 2)
for ii in range(- middle_size, middle_size + 1):
for jj in range(- middle_size, middle_size + 1):
if i + ii >= 0 and i + ii < N_i and j + jj >= 0 and j + jj < N_j:
res[middle_size + ii, middle_size + jj] = M[i + ii, j + jj]
return res
def varying_kernel_convolution(M, K_list):
N_i, N_j = M.shape
res = np.zeros_like(M)
for i in range(N_i):
for j in range(N_j):
res[i, j] = np.sum(extract_padded(M, K_list[4 * (i % 4) + j % 4].shape[0], i, j) * K_list[4 * (i % 4) + j % 4])
np.clip(res, 0, 1, res)
return res
K_identity = np.zeros((5, 5))
K_identity[2, 2] = 1
K_red_0 = np.zeros((5, 5))
K_red_0[2, :] = np.array([-3, 13, 0, 0, 2]) / 12
K_red_1 = np.zeros((5, 5))
K_red_1[2, :] = np.array([2, 0, 0, 13, -3]) / 12
K_red_8 = np.zeros((5, 5))
K_red_8[:2, :2] = np.array([[-1, -1], [-1, 9]]) / 6
K_red_9 = np.zeros((5, 5))
K_red_9[:2, 3:] = np.array([[-1, -1], [9, -1]]) / 6
K_red_10 = np.zeros((5, 5))
K_red_10[:, 2] = np.array([-3, 13, 0, 0, 2]) / 12
K_red_12 = np.zeros((5, 5))
K_red_12[3:, :2] = np.array([[-1, 9], [-1, -1]]) / 6
K_red_13 = np.zeros((5, 5))
K_red_13[3:, 3:] = np.array([[9, -1], [-1, -1]]) / 6
K_red_14 = np.zeros((5, 5))
K_red_14[:, 2] = np.array([2, 0, 0, 13, -3]) / 12
K_list_red = [K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_8, K_red_9, K_red_10, K_red_10, K_red_12, K_red_13, K_red_14, K_red_14]
K_green_2 = np.zeros((5, 5))
K_green_2[2, :] = [-3, 13, 0, 0, 2]
K_green_2[:, 2] = [-3, 13, 0, 0, 2]
K_green_2 = K_green_2 / 24
K_green_3 = np.zeros((5, 5))
K_green_3[2, :] = [2, 0, 0, 13, -3]
K_green_3[:, 2] = [-3, 13, 0, 0, 2]
K_green_3 = K_green_3 / 24
K_green_6 = np.zeros((5, 5))
K_green_6[2, :] = [-3, 13, 0, 0, 2]
K_green_6[:, 2] = [2, 0, 0, 13, -3]
K_green_6 = K_green_6 / 24
K_green_7 = np.zeros((5, 5))
K_green_7[2, :] = [2, 0, 0, 13, -3]
K_green_7[:, 2] = [2, 0, 0, 13, -3]
K_green_7 = K_green_7 / 24
K_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]
K_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]
ker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4
ker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
from scipy.signal import convolve2d
import numpy as np
def compute_gradients(R_directions, G_directions, B_directions, L):
"""
This function will compute the gradients of the reconstructed channels, in each direction and in the YUV space
Inputs :
- R_directions, G_directions, B_directions : the reconstructed channels
- L : the size of the neighborhood on which to compute the gradients
Outputs :
- gradients : the gradients of the reconstructed channels in each direction
"""
Y = 0.299 * R_directions + 0.587 * G_directions + 0.114 * B_directions
U = R_directions - Y
V = B_directions - Y
# U_roll_north = np.roll(U, -L, axis=0)
# U_roll_south = np.roll(U, L, axis=0)
# U_roll_east = np.roll(U, -L, axis=1)
# U_roll_west = np.roll(U, L, axis=1)
# V_roll_north = np.roll(V, -L, axis=0)
# V_roll_south = np.roll(V, L, axis=0)
# V_roll_east = np.roll(V, -L, axis=1)
# V_roll_west = np.roll(V, L, axis=1)
H, W = U.shape[0], U.shape[1]
gradients = np.zeros((H, W, 4))
for height in range(U.shape[0]):
for width in range(U.shape[1]):
somme_u = [0, 0, 0, 0]
somme_v = [0, 0, 0, 0]
for l in range(1, L+1):
somme_u[0] += (U[height, width, 0] -
U[(height-l) % H, width, 0])**2
somme_u[1] += (U[height, width, 1] -
U[(height+l) % H, width, 1])**2
somme_u[2] += (U[height, width, 2] -
U[height, (width-l) % W, 2])**2
somme_u[3] += (U[height, width, 3] -
U[height, (width+l) % W, 3])**2
somme_v[0] += (V[height, width, 0] -
V[(height-l) % H, width, 0])**2
somme_v[1] += (V[height, width, 1] -
V[(height+l) % H, width, 1])**2
somme_v[2] += (V[height, width, 2] -
V[height, (width-l) % W, 2])**2
somme_v[3] += (V[height, width, 3] -
V[height, (width+l) % W, 3])**2
gradients[height, width] = 1/L * \
(np.sqrt(somme_u) + np.sqrt(somme_v))
return gradients
def local_int(CFA_image, beta, L, epsilon, cfa_mask):
"""
This function will compute the reconstructed image, using directional interpolation, and will then select the best direction for each pixel.
Inputs :
- CFA_image : the three channels of the image (CFA)
- beta : the parameter of intercorrelation between the channels
- L : the size of the neighborhood on which to test the direction
- epsilon : the threshold for the selection of the direction
Outputs :
- R, G, B : the reconstructed channels
"""
G_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
R_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
B_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
RG_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
BG_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
# Directionnal interpolation of green channel
G_directions[:, :, 0] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], -1, axis=0) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], -2, axis=0))
G_directions[:, :, 1] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], 1, axis=0) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], 2, axis=0))
G_directions[:, :, 2] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], -1, axis=1) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], -2, axis=1))
G_directions[:, :, 3] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], 1, axis=1) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], 2, axis=1))
# Bilinear interpolation of red and blue channels
R_directions[:, :, 0] += CFA_image[:, :, 0]
R_directions[:, :, 1] += CFA_image[:, :, 0]
R_directions[:, :, 2] += CFA_image[:, :, 0]
R_directions[:, :, 3] += CFA_image[:, :, 0]
B_directions[:, :, 0] += CFA_image[:, :, 2]
B_directions[:, :, 1] += CFA_image[:, :, 2]
B_directions[:, :, 2] += CFA_image[:, :, 2]
B_directions[:, :, 3] += CFA_image[:, :, 2]
RG_directions[:, :, 0] += RG_directions[:, :, 0] - \
beta*G_directions[:, :, 0]
RG_directions[:, :, 1] += RG_directions[:, :, 1] - \
beta*G_directions[:, :, 1]
RG_directions[:, :, 2] += RG_directions[:, :, 2] - \
beta*G_directions[:, :, 2]
RG_directions[:, :, 3] += RG_directions[:, :, 3] - \
beta*G_directions[:, :, 3]
BG_directions[:, :, 0] += BG_directions[:, :, 0] - \
beta*G_directions[:, :, 0]
BG_directions[:, :, 1] += BG_directions[:, :, 1] - \
beta*G_directions[:, :, 1]
BG_directions[:, :, 2] += BG_directions[:, :, 2] - \
beta*G_directions[:, :, 2]
BG_directions[:, :, 3] += BG_directions[:, :, 3] - \
beta*G_directions[:, :, 3]
# Bi-linear interpolation
kernel = 1 / 4 * np.array([
[1, 2, 1],
[2, 4, 2],
[1, 2, 1]
])
R_directions[:, :, 0] = convolve2d(
R_directions[:, :, 0], kernel, mode='same')
R_directions[:, :, 1] = convolve2d(
R_directions[:, :, 1], kernel, mode='same')
R_directions[:, :, 2] = convolve2d(
R_directions[:, :, 2], kernel, mode='same')
R_directions[:, :, 3] = convolve2d(
R_directions[:, :, 3], kernel, mode='same')
B_directions[:, :, 0] = convolve2d(
B_directions[:, :, 0], kernel, mode='same')
B_directions[:, :, 1] = convolve2d(
B_directions[:, :, 1], kernel, mode='same')
B_directions[:, :, 2] = convolve2d(
B_directions[:, :, 2], kernel, mode='same')
B_directions[:, :, 3] = convolve2d(
B_directions[:, :, 3], kernel, mode='same')
R_directions += beta * G_directions
B_directions += beta * G_directions
gradients = compute_gradients(R_directions, G_directions, B_directions, L)
inv_gradients = 1/(gradients+epsilon)
sommme_gradients = np.sum(inv_gradients, axis=2)
inv_gradients[:, :, 0] = inv_gradients[:, :, 0] / sommme_gradients
inv_gradients[:, :, 1] = inv_gradients[:, :, 1] / sommme_gradients
inv_gradients[:, :, 2] = inv_gradients[:, :, 2] / sommme_gradients
inv_gradients[:, :, 3] = inv_gradients[:, :, 3] / sommme_gradients
R_1 = np.sum(R_directions * inv_gradients, axis=2)
G_1 = np.sum(G_directions * inv_gradients, axis=2)
B_1 = np.sum(B_directions * inv_gradients, axis=2)
return R_1, G_1, B_1
\ No newline at end of file
import numpy as np
def nonlocal_int(R_0, G_0, B_0, beta, h, k, rho, N, cfa_mask) :
"""
Nonlocal interpolation algorithm
Inputs :
- R_0, G_0, B_0 : The initial reconstructed channels
- beta : the parameter of intercorrelation between the channels
- h : the filtering parameter
- k : the half-size of the search window
- rho : the size of the
- N : the number of iterations
Outputs :
- R, G, B : the reconstructed channels
"""
height, width = R_0.shape
u_0 = np.stack((R_0, G_0, B_0), axis=2)
d = np.ones((height, width, height, width)) * np.inf
w = np.zeros((height, width, N))
ksi = np.zeros((height, width,2))
# Compute weight distribution on initialization u0
R = np.zeros_like(R_0)
G = np.zeros_like(G_0)
B = np.zeros_like(B_0)
index_image = np.zeros((height, width,N, 2))
for i in range(height) :
for j in range(width) :
# p = (i,j)
for h in range(max(0, i-k), min(height, i+k+1)) :
for w in range(max(0, j-k), min(width, j+k+1)) :
# q = m,n
d[i,j,h,w] = np.sum((u_0[i-rho:i+rho+1, j-rho:j+rho+1] - u_0[h-rho:h+rho+1, w-rho:w+rho+1])**2)
d[i,j,i,j] = np.inf
d[i,j,i,j] = np.min(d[i,j])
sorted_image = np.sort(d[i,j], axis=None)
reconstructed_image = np.zeros((height, width))
for t in range(height*width) :
reconstructed_image[t//width, t%width] = sorted_image[t]
for n in range(N) :
index_image[i,j,n] = np.where(d[i,j] == sorted_image[n])
w[i,j,n] = np.exp(-d[i,j,0,n]/(h**2))
ksi[i,j] = np.sum(w[i,j])
for n in range(N) :
w[i,j,n] = w[i,j,n]/ksi[i,j]
# Enhancement of green channel
for i in range(height) :
for j in range(width) :
if not cfa_mask[i,j,1] == 1 : # Not in green CFA
for n in range(N) :
green_qn = G_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])] # G0(qn)
red_qn = R_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
blue_qn = B_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
G[i,j] += w[i,j,n] * (green_qn - beta * (red_qn + blue_qn)) + beta * (R_0[i,j] + B_0[i,j])
# Enhancement of red and blue channels
for i in range(height) :
for j in range(width) :
if not cfa_mask[i,j,0] == 1 : # Not in red CFA
for n in range(N) :
red_qn = R_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])] # R0(qn)
G_qn = G_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
R[i,j] += w[i,j,n] * (red_qn - beta * G_qn) + beta * G_0[i,j]
if not cfa_mask[i,j,2] == 1 : # Not in blue CFA
for n in range(N) :
blue_qn = B_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])] # B0(qn)
G_qn = G_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
B[i,j] += w[i,j,n] * (blue_qn - beta * G_qn) + beta * G_0[i,j]
return R, G, B
\ No newline at end of file
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
import matplotlib.pyplot as plt
from local_int import local_int
from forward_model import CFA
from demo_reconstruction import naive_interpolation
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
if cfa == "quad_bayer":
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
return naive_interpolation(op, y)
# Performing the reconstruction.
cfa_obj = CFA(cfa, y.shape)
cfa_mask = cfa_obj.mask
R_0, G_0, B_0 = local_int(y, 0.7, 12, 1e-8, cfa_mask)
return np.stack((R_0, G_0, B_0), axis=2)
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment