Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • samanost/sicom_image_analysis_project
  • gerayelk/sicom_image_analysis_project
  • jelassiy/sicom_image_analysis_project
  • chardoto/sicom_image_analysis_project
  • chaarim/sicom_image_analysis_project
  • domers/sicom_image_analysis_project
  • elmurrt/sicom_image_analysis_project
  • sadonest/sicom_image_analysis_project
  • kouddann/sicom_image_analysis_project
  • mirabitj/sicom-image-analysis-project-mirabito
  • plotj/sicom_image_analysis_project
  • torrem/sicom-image-analysis-project-maxime-torre
  • dzike/sicom_image_analysis_project
  • daip/sicom_image_analysis_project
  • casanovv/sicom_image_analysis_project
  • girmarti/sicom_image_analysis_project
  • lioretn/sicom_image_analysis_project
  • lemoinje/sicom_image_analysis_project
  • ouahmanf/sicom_image_analysis_project
  • vouilloa/sicom_image_analysis_project
  • diopb/sicom_image_analysis_project
  • davidale/sicom_image_analysis_project
  • enza/sicom_image_analysis_project
  • conversb/sicom_image_analysis_project
  • mullemat/sicom_image_analysis_project
25 results
Show changes
Showing
with 1351 additions and 0 deletions
File added
"""This file contains all the functions that are used in order to perform a Kimmel algorithm demosaicking"""
##### Importations #####
import numpy as np
import matplotlib.pyplot as plt
from src.forward_model import CFA
##### Some functions used #####
def neigh(i, j, colour_chan, img):
"""Finds the neighbours of the pixel (i, j) of a colour channel
Args:
i: column of the pixel
j: line of the pixel
colour_chan: colour channel chosen
img: image
Returns:
List of the neighbours of the pixel (i, j) and the pixel (i, j)
"""
X = img[:, :, colour_chan].shape[0]
Y = img[:, :, colour_chan].shape[1]
# Place each Pi compared to P5 (i horizontal and j vertical)
# See P1 to P9 drawing, page 2 of http://ultra.sdk.free.fr/docs/Image-Processing/Demosaic/An%20Improved%20Demosaicing%20Algorithm.pdf
# % (modulo) to make sure it is a multiple (avoid error index 1024 out of bounds)
P1 = img[(i-1)%Y, (j-1)%X, colour_chan]
P2 = img[i%Y, (j-1)%X, colour_chan]
P3 = img[(i+1)%Y, (j-1)%X, colour_chan]
P4 = img[(i-1)%Y, j%X, colour_chan]
P5 = img[i%Y, j%X, colour_chan]
P6 = img[(i+1)%Y, j%X, colour_chan]
P7 = img[(i-1)%Y, (j+1)%X, colour_chan]
P8 = img[i%Y, (j+1)%X, colour_chan]
P9 = img[(i+1)%Y, (j+1)%X, colour_chan]
#print(f'Size of P1: {P1.size}') # 1
return [P1, P2, P3, P4, P5, P6, P7, P8, P9]
def derivat(neighbour_list):
"""Compute directional derivatives of pixel
Args:
neighbour_list: list of neighbours
Returns:
List of the directional derivatives
"""
# Must be 9 (9P neigbours)
#print(f'neighbour_list len to derivate: {len(neighbour_list)}') # 9
# Assign P neighbours to the neighbour_list (to then compute the derivatives)
[P1, P2, P3, P4, P5, P6, P7, P8, P9] = neighbour_list
# Compute the derivatives (independant of the colour component)
Dx = (P4 - P6) / 2
Dy = (P2 - P8) / 2
Dxd = (P3 - P7) / (2 * np.sqrt(2))
Dyd = (P1 - P9) / (2 * np.sqrt(2))
# Formula given at the top of page 3 (but very long computing time, and does not provide better results)
#Dxd = np.max( ( np.abs((P3 - P5) / (np.sqrt(2))), np.abs((P7 - P5) / (np.sqrt(2))) ) )
#Dyd = np.max( ( np.abs((P1 - P5) / (np.sqrt(2))), np.abs((P9 - P5) / (np.sqrt(2))) ) )
# Store the computed values in deriv_list
deriv_list = [Dx, Dy, Dxd, Dyd]
#print(f'Len de deriv_list: {len(deriv_list)}') # 4
return deriv_list
def weight_fct(i, j, neighbour_list, deriv_list, colour_chan, img):
"""Compute the weights Ei
Args:
i: column of the pixel
j: line of the pixel
neighbour_list: neighbour_list: list of neighbours
deriv_list: list of derivatives
colour_chan:colour channel chosen
img: image
Returns:
List of the weights
"""
# Assignment
[P1, P2, P3, P4, P5, P6, P7, P8, P9] = neighbour_list
[Dx, Dy, Dxd, Dyd] = deriv_list
X = img[:,:,colour_chan].shape[0]
Y = img[:,:,colour_chan].shape[1]
# E list to complete (weighting function)
E = []
# Fix the start
cur = 1
# Neighbourhood
for step in range(-1, 2): # from -1 to 1 (2-1)
for step in range(-1, 2): # otherwise only 3 values in E
# Find the neigbours
n = neigh(i+step, j+step, colour_chan, img)
# Derivatives
d = derivat(n)
# See P1 to P9 drawing, page 2 of http://ultra.sdk.free.fr/docs/Image-Processing/Demosaic/An%20Improved%20Demosaicing%20Algorithm.pdf
if cur==4 or cur==6:
E.append( 1 / np.sqrt(1 + Dx**2 + d[0]**2) )
elif cur==2 or cur==8:
E.append( 1 / np.sqrt(1 + Dy**2 + d[1]**2) )
elif cur==7 or cur==3:
E.append( 1 / np.sqrt(1 + Dxd**2 + d[2]**2) )
else: # 1 or 9
E.append( 1 / np.sqrt(1 + Dyd**2 + d[3]**2) )
cur = cur+1
#print(f'Len of E: {len(E)}') # 9
# other way to code (Page 3/6 of http://elynxsdk.free.fr/ext-docs/Demosaicing/more/news0/Optimal%20Recovery%20Demosaicing.pdf, method 1)), but I did not understand what is or how T was chosen?
return E
def clip_extreme(img):
"""Clip the values of the image, so they are between 0-1
Args:
img: image
Returns:
The clipped image
"""
#max_val = np.amax(img)
#min_val = np.amin(img)
#print(f'Max value before clipping: {max_val}')
#print(f'Min value before clipping: {min_val}')
# Put extreme values between 0 and 1
img = np.clip(img, 0, 1)
return img
def quad_to_bayer(img, op):
"""Performs a conversion from Quad_bayer to Bayer pattern
Args:
img: image
op: CFA op
Returns:
The image Bayer pattern
"""
# Based on p28/32 https://pyxalis.com/wp-content/uploads/2021/12/PYX-ImageViewer-User_Guide.pdf
### 1. Swap 2 col every 2 columns
# Start: 2nd col --> 1 (bc start counting at 0) / Step: 4 (look at drawing of 1st swap p28 Pyxalis) / End: input_shape[0]-2 --> input_shape[0]-1
for j in range (1, img.shape[0]-1, 4):
store = np.copy(img[:, j]) # Store col j of the img
img[:, j] = np.copy(img[:, j+1]) # Col j becomes like col j+1
img[:, j+1] = store # Col j+1 become like col j (previously saved otherwise value lost)
store_op = np.copy(op.mask[:, j])
op.mask[:, j] = np.copy(op.mask[:, j+1])
op.mask[:, j+1] = store_op
### 2. Swap 2 lines every 2 lines (same process for the lines)
for i in range (1, img.shape[1]-1, 4):
store = np.copy(img[i, :])
img[i, :] = np.copy(img[i+1, :])
img[i+1, :] = store
store_op = np.copy(op.mask[i, :])
op.mask[i, :] = np.copy(op.mask[i+1, :])
op.mask[i+1, :] = store_op
### 3. Swap back some diagonal greens
# Starts: 0 and 2 / Steps: 4 / End: img.shape[nb]-1 --> img.shape[nb]
for k in range(0, img.shape[0], 4):
for l in range(2, img.shape[1], 4):
store = np.copy(img[k, l])
img[k, l] = np.copy(img[k+1, l-1])
img[k+1, l-1] = store
store_op = op.mask[k, l]
op.mask[k, l] = np.copy(op.mask[k+1, l-1])
op.mask[k+1, l-1] = store_op
return img
##### 1. Interpolation of green colour #####
def interpol_green(neighbour_list, weights_list):
"""Interpolates missing green pixel
Args:
neighbour_list: list of neighbours
weights_list: list of weights
Returns:
The interpolated pixel
"""
# Assignment
[P1, P2, P3, P4, P5, P6, P7, P8, P9] = neighbour_list
[E1, E2, E3, E4, E5, E6, E7, E8, E9] = weights_list
# Compute missing green pixel (combination of 4 nearest neighbours)
G5 = (E2*P2 + E4*P4 + E6*P6 + E8*P8) / (E2+E4+E6+E8)
#print(f"G5 size: {G5.size}") # 1
return G5
##### 2. Interpolation of red and blue colours #####
def interpol_red_blue(neighbour_list, weights_list, green_neighbour):
"""Interpolates missing blue/red pixel
Args:
neighbour_list: list of neighbours in blue/red channel
weights_list: list of weights
green_neighbour: list of the neighbours in green channel
Returns:
The interpolated pixel (in blue/red channel)
"""
# Assignment
[P1, P2, P3, P4, P5, P6, P7, P8, P9] = neighbour_list
[E1, E2, E3, E4, E5, E6, E7, E8, E9] = weights_list
[G1, G2, G3, G4, G5, G6, G7, G8, G9] = green_neighbour
# Compute missing red/blue pixel (combination of 4 nearest neighbours)
R5_num = E1*(P1/G1) + E3*(P3/G3) + E7*(P7/G7) + E9*(P9/G9)
R5_denom = E1+E3+E7+E9
R5 = G5 * R5_num / R5_denom
#print(f"R5 size: {R5.size}") # 1
return R5
##### 3. Correction stage #####
def green_correction(red_neighbour, green_neighbour, blue_neighbour, weights_list):
"""Correction of green pixels
Args:
red_neighbour: neighbours in red channel
green_neighbour: neighbours in green channel
blue_neighbour: neighbours in blue channel
weights_list: list of weights
Returns:
Corrected green pixel
"""
# Assignment
[G1, G2, G3, G4, G5, G6, G7, G8, G9] = green_neighbour
[R1, R2, R3, R4, R5, R6, R7, R8, R9] = red_neighbour
[B1, B2, B3, B4, B5, B6, B7, B8, B9] = blue_neighbour
[E1, E2, E3, E4, E5, E6, E7, E8, E9] = weights_list
# Compute correction
GB5_num = E2*(G2/B2) + E4*(G4/B4) + E6*(G6/B6) + E8*(G8/B8)
GR5_num = E2*(G2/R2) + E4*(G4/R4) + E6*(G6/R6) + E8*(G8/R8)
G5_denom = E2+E4+E6+E8
GB5 = B5 * GB5_num / G5_denom
GR5 = R5 * GR5_num / G5_denom
#print(f"GB5 size: {GB5.size}") # 1
print(f"GB5: {GB5}")
#print(f"GR5 size: {GR5.size}") # 1
print(f"GR5: {GR5}")
G5 = (GR5 + GB5) / 2
print(f"G5 size: {G5.size}") # 1
print(f"G5: {G5}")
return G5
def blue_correction(green_neighbour, blue_neighbour, weights_list):
"""Correction of blue pixels
Args:
green_neighbour: neighbours in green channel
blue_neighbour: neighbours in blue channel
weights_list: list of weights
Returns:
Corrected blue pixel
"""
# Assignment
[G1, G2, G3, G4, G5, G6, G7, G8, G9] = green_neighbour
[B1, B2, B3, B4, B5, B6, B7, B8, B9] = blue_neighbour
[E1, E2, E3, E4, E5, E6, E7, E8, E9] = weights_list
# Compute correction
B5_num = 0
B5_denom = 0
print(f'Len de weights_list: {len(weights_list)}')
for i in range(len(weights_list)):
if i != 5:
B5_num += weights_list[i] * blue_neighbour[i] / green_neighbour[i]
B5_denom += weights_list[i]
B5 = G5 * B5_num / B5_denom
#print(f"B5_denom size: {B5_denom.size}") # 1
print(f"B5_denom: {B5_denom}") # number
#print(f"B5_num size: {B5_num.size}") # 1
print(f"B5_num: {B5_num}") # nan
#print(f"B5 size: {B5.size}") # 1
print(f"B5: {B5}")
return B5
def red_correction(red_neighbour, green_neighbour, weights_list):
"""Correction of red pixels
Args:
red_neighbour: neighbours in red channel
green_neighbour: neighbours in green channel
weights_list: list of weights
Returns:
Corrected red pixel
"""
# Assignment
[G1, G2, G3, G4, G5, G6, G7, G8, G9] = green_neighbour
[R1, R2, R3, R4, R5, R6, R7, R8, R9] = red_neighbour
[E1, E2, E3, E4, E5, E6, E7, E8, E9] = weights_list
# Compute correction
R5_num = 0
R5_denom = 0
for i in range(len(weights_list)):
if i != 5:
R5_num += weights_list[i] * red_neighbour[i] / green_neighbour[i]
R5_denom += weights_list[i]
R5 = G5 * R5_num / R5_denom
return R5
\ No newline at end of file
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.laporte_auriane.fct_to_demosaick import *
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
#print(f'y shape: {y.shape}') # (1024, 1024)
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
########## TODO ##########
# Colour channels
red = 0
green = 1
blue = 2
correction = 0
# In case of Quad_bayer op
if op.cfa == 'quad_bayer':
print("Transformation into Bayer pattern")
y = quad_to_bayer(y, op)
print('Quad_bayer to Bayer done\n\nDemoisaicking processing...')
z = op.adjoint(y)
# 1st and 2nd dim of a colour channel
X = z[:,:,0].shape[0]
Y = z[:,:,0].shape[1]
#print(f"X: {X}") # 1024
#print(f"Y: {Y}") # 1024
### Kimmel algorithm ###
# About 3 to 4 minutes
# Interpolation of green colour
for i in range (X):
for j in range (Y):
# Control if value 0 in green channel
if z[i,j,1] == 0:
# Find neigbours
n = neigh(i, j, green, z)
# Derivatives
d = derivat(n)
# Weighting fct
w = weight_fct(i, j, n, d, green, z)
# Green interpolation (cross)
z[i,j,1] = interpol_green(n, w)
# Clip (avoid values our of range 0-1)
z = clip_extreme(z)
# 2 steps for the blues
# Interpolate missing blues at the red locations
for i in range (0, X, 2):
for j in range (1, Y, 2):
# Find neigbours + green
n = neigh(i, j, blue, z)
n_G = neigh(i, j, green, z)
d = derivat(n_G)
w = weight_fct(i, j, n, d, green, z)
# Blue interpolation (4 corners)
z[i,j,2] = interpol_red_blue(n, w, n_G)
# Interpolate missing blues at the green locations
for i in range (X):
for j in range (Y):
# Control if value 0 in blue channel
if z[i,j,2] == 0:
# Find neigbours
n_B = neigh(i, j, blue, z)
d = derivat(n_B)
w = weight_fct(i, j, n_B, d, blue, z)
# Blue interpolation (cross)
z[i,j,2] = interpol_green(n_B,w)
z = clip_extreme(z)
# 2 steps for the reds
# Interpolate missing reds at the blue locations
for i in range (1, X, 2):
for j in range (0, Y, 2):
# Find neigbours + green
n = neigh(i, j, red, z)
n_G = neigh(i, j, green, z)
d = derivat(n_G)
w = weight_fct(i, j, n_G, d, green, z)
# Red interpolation (4 corners)
z[i,j,0] = interpol_red_blue(n, w, n_G)
# Interpolate missing reds at the green locations
for i in range (X):
for j in range (Y):
# Control if value 0 in red channel
if z[i,j,0] == 0:
# Find neigbours
n_R = neigh(i, j, red, z)
d = derivat(n_R)
w = weight_fct(i, j, n_R, d, red, z)
# Red interpolation (cross)
z[i,j,0] = interpol_green(n_R,w)
z = clip_extreme(z)
# Correction step (repeated 3 times)
if correction == 1:
for i in range(3):
n_G[4] = green_correction(n_R, n_G, n_B, w)
n_B[4] = blue_correction(n_G, n_B, w)
n_R[4] = red_correction(n_R, n_G, w)
# nan
#print(f'Image reconstructed shape: {z.shape}') # 1024, 1024, 3
return z
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
File added
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from scipy.signal import medfilt2d
def freeman_median_demosaicking(op: CFA, y: np.ndarray) -> np.ndarray:
"""Performs the Freeman's method with median for demosaicking.
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
z = op.adjoint(y)
res = np.empty(op.input_shape)
mask_r = np.zeros_like(z[:, :, 0], dtype = float)
mask_g = np.zeros_like(z[:, :, 1], dtype = float)
mask_b = np.zeros_like(z[:, :, 2], dtype = float)
D_rg = z[:, :, 0] - z[:, :, 1]
M1 = medfilt2d(D_rg, kernel_size=5)
D_gb = z[:, :, 1] - z[:, :, 2]
M2 = medfilt2d(D_gb, kernel_size=5)
D_rb = z[:, :, 0] - z[:, :, 2]
M3 = medfilt2d(D_rb, kernel_size=5)
res[:, :, 0] = z[:, :, 0] + (M1 * mask_g + z[:, :, 1]) + (M3 * mask_b + z[:, :, 2])
res[:, :, 1] = z[:, :, 1] + (z[:, :, 0] - M1 * mask_r) + (M2 * mask_b + z[:, :, 2])
res[:, :, 2] = z[:, :, 2] + (z[:, :, 1] - M2 * mask_g) + (z[:, :, 0] - M3 * mask_r)
return res
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
res = freeman_median_demosaicking(op, y)
return res
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
"""A file containing the Malvar et al. reconstruction.
"""
import matplotlib.pyplot as plt
import numpy as np
from src.forward_model import CFA
from scipy.ndimage.filters import convolve
# Masks definition
M0 = np.multiply(1/8,[ # Recover G at R and B locations
[ 0, 0, -1, 0, 0],
[ 0, 0, 2, 0, 0],
[-1, 2, 4, 2, -1],
[ 0, 0, 2, 0, 0],
[ 0, 0, -1, 0, 0]
] )
M1 = np.multiply(1/8,[ # Recover R at G in R row, B col; Recover B at G in B row, R col
[ 0, 0, 1/2, 0, 0],
[ 0, -1, 0, -1, 0],
[-1, 4, 5, 4, -1],
[ 0, -1, 0, -1, 0],
[ 0, 0, 1/2, 0, 0]
] )
M2 = M1.T # Recover R at G in B row, R col; Recover B at G in R row, B col
M3 = np.multiply(1/8,[ # Recover R at B in B row, B col; Recover B at R in R row, R col
[ 0, 0, -3/2, 0, 0],
[ 0, 2, 0, 2, 0],
[-3/2, 0, 6, 0, -3/2],
[ 0, 2, 0, 2, 0],
[ 0, 0, -3/2, 0, 0]
] )
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs a high quality linear interpolation of the lost pixels from Malvar et al's (2004) paper.
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
# If CFA is Quadratic it needs be transformed into simple Bayer pattern
if op.cfa == 'quad_bayer':
op.mask, y = adapt_quad(op, y)
# Applies the adjoint operation to y
z = op.adjoint(y)
# Construct the CFA masks (R,G,B) of z
R = np.where(z[:,:,0] != 0., 1, 0) # Red
G = np.where(z[:,:,1] != 0., 1, 0) # Green
B = np.where(z[:,:,2] != 0., 1, 0) # Blue
# R, G, B channels of the image to be reconstructed
R_demosaic = z[:,:,0]
G_demosaic = z[:,:,1]
B_demosaic = z[:,:,2]
# Padding to recover the edges
y_pad = np.pad(y, 2, 'constant', constant_values = 0)
# Recover red and blue columns/rows
# Red rows
Rr = np.any(R == 1, axis = 1)[None].T * np.ones(R.shape)
# Red columns
Rc = np.any(R == 1, axis = 0)[None] * np.ones(R.shape)
# Blue rows
Br = np.any(B == 1, axis = 1)[None].T * np.ones(B.shape)
# Blue columns
Bc = np.any(B == 1, axis = 0)[None] * np.ones(B.shape)
# Recover the lost pixels
# Red channel
R_demosaic = np.where(np.logical_and(Rr == 1, Bc == 1), convolve(y_pad[2:y_pad.shape[0]-2,2:y_pad.shape[1]-2], M1), R_demosaic )
R_demosaic = np.where(np.logical_and(Br == 1, Rc == 1), convolve(y_pad[2:y_pad.shape[0]-2,2:y_pad.shape[1]-2], M2), R_demosaic )
R_demosaic = np.where(np.logical_and(Br == 1, Bc == 1), convolve(y_pad[2:y_pad.shape[0]-2,2:y_pad.shape[1]-2], M3), R_demosaic )
# Green channel
G_demosaic = np.where(np.logical_or(R == 1, B == 1), convolve(y_pad[2:y_pad.shape[0]-2,2:y_pad.shape[1]-2], M0), G_demosaic )
# Blue channel
B_demosaic = np.where(np.logical_and(Br == 1, Rc == 1), convolve(y_pad[2:y_pad.shape[0]-2,2:y_pad.shape[1]-2], M1), B_demosaic )
B_demosaic = np.where(np.logical_and(Rr == 1, Bc == 1), convolve(y_pad[2:y_pad.shape[0]-2,2:y_pad.shape[1]-2], M2), B_demosaic )
B_demosaic = np.where(np.logical_and(Rr == 1, Rc == 1), convolve(y_pad[2:y_pad.shape[0]-2,2:y_pad.shape[1]-2], M3), B_demosaic )
# Stack the 3 channels
demosaiced = np.stack((R_demosaic, G_demosaic, B_demosaic), axis=2)
return np.clip(demosaiced, 0, 1, demosaiced)
def adapt_quad(op: CFA, y: np.ndarray):
"""Performs an adaptation of the quadratic bayer pattern to a simple bayer pattern.
Args:
op (CFA): CFA operator with Quadratic Bayer pattern.
y (np.ndarray): Mosaicked image.
Returns:
bayer (np.ndarray): CFA operator with simple Bayer pattern.
new (np.ndarray): Demosaicked image re-arranged.
"""
L, l, d = op.mask.shape
bayer = op.mask.copy()
new = y.copy()
# Swap 2 columns every 2 columns
for j in range(1, l, 4):
bayer[:,j], bayer[:,j+1] = bayer[:,j+1].copy(), bayer[:,j].copy()
new[:,j], new[:,j+1] = new[:,j+1].copy(), new[:,j].copy()
# Swap 2 lines every 2 lines
for i in range(1, L, 4):
bayer[i, :], bayer[i+1,:] = bayer[i+1,:].copy(), bayer[i,:].copy()
new[i, :], new[i+1,:] = new[i+1,:].copy(), new[i,:].copy()
# Swap back some diagonal greens
for i in range(1, L, 4):
for j in range(1, 1, 4):
bayer[i,j], bayer[i+1,j+1] = op.mask[i+1,j+1].copy(), op.mask[i,j].copy()
new[i,j], new[i+1,j+1] = new[i+1,j+1].copy(), new[i,j].copy()
return bayer, new
\ No newline at end of file
File added
Quite simple to use : reconstruct.py file calls demosaicking.py (where is made the demosaicking) and is called by the main.ipynb
\ No newline at end of file
"""A file containing a (pretty useless) reconstruction.
It serves as example of how the project works.
This file should NOT be modified.
"""
import numpy as np
from scipy.signal import convolve2d
from src.forward_model import CFA
# High quality linear interpolation filters for bayer mosaic
bayer_g_at_r = np.array([[ 0, 0, -1, 0, 0],
[ 0, 0, 2, 0, 0],
[-1, 2, 4, 2, -1],
[ 0, 0, 2, 0, 0],
[ 0, 0, -1, 0, 0]]) / 8
bayer_g_at_b = bayer_g_at_r
bayer_r_at_green_rrow_bcol = np.array([[ 0, 0, 1/2, 0, 0],
[ 0, -1, 0, -1, 0],
[-1, 4, 5, 4, -1],
[ 0, -1, 0, -1, 0],
[ 0, 0, 1/2, 0, 0]]) / 8
bayer_r_at_green_brow_rcol = bayer_r_at_green_rrow_bcol.T
bayer_b_at_green_rrow_bcol = bayer_r_at_green_brow_rcol
bayer_b_at_green_brow_rcol = bayer_r_at_green_rrow_bcol
bayer_r_at_b = np.array([[ 0, 0, -3/2, 0, 0],
[ 0, 2, 0, 2, 0],
[-3/2, 0, 6, 0, -3/2],
[ 0, 2, 0, 2, 0],
[ 0, 0, -3/2, 0, 0]]) / 8
bayer_b_at_r = bayer_r_at_b
# High quality linear interpolation filters for quad mosaic
quad_g_at_r = np.array([[ 0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[ 0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[-1, -1, 2, 2, 4, 4, 2, 2, -1, -1],
[-1, -1, 2, 2, 4, 4, 2, 2, -1, -1],
[ 0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[ 0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[ 0, 0, 0, 0, -1, -1, 0, 0, 0, 0],
[ 0, 0, 0, 0, -1, -1, 0, 0, 0, 0]]) / 32
quad_g_at_b = quad_g_at_r
quad_r_at_green_rrow_bcol = np.array([[ 0, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0],
[ 0, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0],
[ 0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[ 0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[-1, -1, 4, 4, 5, 5, 4, 4, -1, -1],
[-1, -1, 4, 4, 5, 5, 4, 4, -1, -1],
[ 0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[ 0, 0, -1, -1, 0, 0, -1, -1, 0, 0],
[ 0, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0],
[ 0, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0]]) / 32
quad_r_at_green_brow_rcol = quad_r_at_green_rrow_bcol.T
quad_b_at_green_rrow_bcol = quad_r_at_green_brow_rcol
quad_b_at_green_brow_rcol = quad_r_at_green_rrow_bcol
quad_r_at_b = np.array([[ 0, 0, 0, 0, -3/2, -3/2, 0, 0, 0, 0],
[ 0, 0, 0, 0, -3/2, -3/2, 0, 0, 0, 0],
[ 0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[ 0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[-3/2, -3/2, 0, 0, 6, 6, 0, 0, -3/2, -3/2],
[-3/2, -3/2, 0, 0, 6, 6, 0, 0, -3/2, -3/2],
[ 0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[ 0, 0, 2, 2, 0, 0, 2, 2, 0, 0],
[ 0, 0, 0, 0, -3/2, -3/2, 0, 0, 0, 0],
[ 0, 0, 0, 0, -3/2, -3/2, 0, 0, 0, 0]]) / 32
quad_b_at_r = quad_r_at_b
def high_quality_linear_interpolation(op : CFA, y : np.ndarray) -> np.ndarray:
z = op.adjoint(y)
res = np.empty(op.input_shape)
mask = op.mask
R = 0 ; G = 1 ; B = 2
if op.cfa == 'bayer':
y_pad = np.pad(y, 2, 'constant', constant_values=0)
for i in range(y.shape[0]):
for j in range(y.shape[1]):
i_pad = i+2 ; j_pad = j+2
if mask[i, j, G] == 1: # We must estimate R and B components
res[i, j, G] = y[i, j]
# Estimate R (B at left and right or B at top and bottom)
if mask[i, max(0, j-1), R] == 1 or mask[i, min(y.shape[1]-1, j+1), R] == 1:
res[i, j, R] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_r_at_green_rrow_bcol, mode='valid'))
else:
res[i, j, R] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_r_at_green_brow_rcol, mode='valid'))
# Estimate B (R at left and right or R at top and bottom)
if mask[i, max(0, j-1), B] == 1 or mask[i, min(y.shape[1]-1, j+1), B] == 1:
res[i, j, B] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_b_at_green_brow_rcol, mode='valid'))
else:
res[i, j, B] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_b_at_green_rrow_bcol, mode='valid'))
elif mask[i, j, R] == 1: # We must estimate G and B components
res[i, j, R] = y[i, j]
res[i, j, G] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_g_at_r, mode='valid'))
res[i, j, B] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_b_at_r, mode='valid'))
else: # We must estimate R and G components
res[i, j, B] = y[i, j]
res[i, j, G] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_g_at_b, mode='valid'))
res[i, j, R] = float(convolve2d(y_pad[i_pad-2:i_pad+3, j_pad-2:j_pad+3], bayer_r_at_b, mode='valid'))
else:
y_pad = np.pad(y, 4, 'constant', constant_values=0)
for i in range(0, y.shape[0], 2):
for j in range(0, y.shape[1], 2):
i_pad = i+4 ; j_pad = j+4
if mask[i, j, G] == 1: # We must estimate R and B components
res[i:i+2, j:j+2, G] = y[i:i+2, j:j+2]
# Estimate R (B at left and right or B at top and bottom)
if mask[i, max(0, j-1), R] == 1 or mask[i, min(y.shape[1]-1, j+1), R] == 1:
res[i:i+2, j:j+2, R] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_r_at_green_rrow_bcol, mode='valid'))
else:
res[i:i+2, j:j+2, R] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_r_at_green_brow_rcol, mode='valid'))
# Estimate B (R at left and right or R at top and bottom)
if mask[i, max(0, j-1), B] == 1 or mask[i, min(y.shape[1]-1, j+1), B] == 1:
res[i:i+2, j:j+2, B] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_b_at_green_brow_rcol, mode='valid'))
else:
res[i:i+2, j:j+2, B] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_b_at_green_rrow_bcol, mode='valid'))
elif mask[i, j, R] == 1: # We must estimate G and B components
res[i:i+2, j:j+2, R] = y[i:i+2, j:j+2]
res[i:i+2, j:j+2, G] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_g_at_r, mode='valid'))
res[i:i+2, j:j+2, B] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_b_at_r, mode='valid'))
else: # We must estimate R and G components
res[i:i+2, j:j+2, B] = y[i:i+2, j:j+2]
res[i:i+2, j:j+2, G] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_g_at_b, mode='valid'))
res[i:i+2, j:j+2, R] = float(convolve2d(y_pad[i_pad-4:i_pad+6, j_pad-4:j_pad+6], quad_r_at_b, mode='valid'))
return res
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
File added
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.lioretn.demosaicking import high_quality_linear_interpolation
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Performing the reconstruction.
# TODO
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
res = high_quality_linear_interpolation(op, y)
return res
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
File added
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.quelletl.some_function import interpolation
import cv2
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from scipy.signal import convolve2d
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
res = interpolation(y, op)
return res
def quad_to_bayer(y):
for i in range(1, y.shape[0], 4):
save = np.copy(y[:,i])
y[:,i] = y[:,i+1]
y[:,i+1] = save
for j in range(1, y.shape[0], 4):
save = np.copy(y[j,:])
y[j,:] = y[j+1,:]
y[j+1,:] = save
for i in range(1, y.shape[0], 4):
for j in range(1, y.shape[0], 4):
save = np.copy(y[i,j])
y[i,j] = y[i+1,j+1]
y[i+1,j+1] = save
return y
ker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
import numpy as np
def bayer_blue_red(res, y, i, j) :
"""
Compute estimated blue/red pixel in red/blue bayer pixel
Args :
res : estimated image
y : image to reconstruct
i, j : indices
Return :
value : value of the estimated pixels
"""
K2 = res[i-1,j-1,1] - y[i-1,j-1]
K4 = res[i-1,j+1,1] - y[i-1,j+1]
K10 = res[i+1,j-1,1] - y[i+1,j-1]
K12 = res[i+1,j+1,1] - y[i+1,j+1]
value = res[i,j,1]-1/4*(K2+K4+K10+K12)
return value
def bayer_green_vert(res, y, i, j) :
"""
Compute estimated blue/red pixel in green bayer pixel in vertical direction
Args :
res : estimated image
y : image to reconstruct
i, j : indices
Return :
value : value of the estimated pixels
"""
k1 = res[i-1,j,1] - y[i-1,j]
k2 = res[i+1,j,1] - y[i+1,j]
value = y[i,j] - 1/2*(k1+k2)
return value
def bayer_green_hor(res, y, i, j):
"""
Compute estimated blue/red pixel in green bayer pixel in horizontal direction
Args :
res : estimated image
y : image to reconstruct
i, j : indices
Return :
value : value of the estimated pixels
"""
k1 = res[i,j-1,1] - y[i,j-1]
k2 = res[i,j+1,1] - y[i,j+1]
value = y[i,j] - 1/2*(k1+k2)
return value
def interpolate_green(res, y, z):
"""
Directional interpolation of the green channel
Args :
res : estimated image
y : image to reconstruct
z : bayer pattern
Return :
res : reconstructed image
"""
for i in range(2,y.shape[0]-1):
for j in range(2,y.shape[1]-1):
# Vertical and horizontal gradient
if z[i,j,1]==0:
d_h = np.abs(y[i,j-1]-y[i,j+1])
d_v = np.abs(y[i-1,j]-y[i+1,j])
if d_h > d_v:
green = (y[i-1,j]+y[i+1,j])/2
elif d_v > d_h:
green = (y[i,j-1]+y[i,j+1])/2
else :
green = (y[i,j-1]+y[i,j+1]+y[i-1,j]+y[i+1,j])/4
else :
green = y[i,j]
res[i,j,1] = green
return res
def quad_to_bayer(y):
"""
Convert Quad Bayer to Bayer
Args :
res : estimated image
y : image to reconstruct
i, j : indices
Return :
value : value of the estimated pixels
"""
for i in range(1, y.shape[0], 4):
save = np.copy(y[:,i])
y[:,i] = y[:,i+1]
y[:,i+1] = save
for j in range(1, y.shape[0], 4):
save = np.copy(y[j,:])
y[j,:] = y[j+1,:]
y[j+1,:] = save
for i in range(1, y.shape[0], 4):
for j in range(1, y.shape[0], 4):
save = np.copy(y[i,j])
y[i,j] = y[i+1,j+1]
y[i+1,j+1] = save
return y
def interpolation(y, op):
"""
Reconstruct image
Args :
y : image to reconstruct
op : CFA operator
Return :
np.ndarray: Demosaicked image.
"""
if op.cfa == 'quad_bayer':
y = quad_to_bayer(y)
op.mask = quad_to_bayer(op.mask)
z = op.adjoint(y)
res = np.empty(op.input_shape)
# Interpolation of green channel
res = interpolate_green(res, y, z)
# Interpolation of R and B channels using channel correlation
for i in range(2,y.shape[0]-2):
for j in range(2, y.shape[1]-2):
# Bayer is Green
if z[i,j,1] != 0 :
# Green is between 2 vertical bleu
if z[i+1,j,0] == 0:
red = bayer_green_hor(res, y, i, j) # Compute Red channel
blue = bayer_green_vert(res, y, i, j) # Compute Blue channel
# Green is between 2 vertical red
else:
blue = bayer_green_hor(res, y, i, j) # Compute Blue channel
red = bayer_green_vert(res, y, i, j) # Compute Red channel
# Bayer is red
elif z[i,j,0] != 0 :
red = y[i,j] # Red channel
blue = bayer_blue_red(res, y, i, j) # Blue channel
# Bayer is bleue
elif z[i,j,2] != 0 :
blue = y[i,j] # Bleu channel
red = bayer_blue_red(res, y, i, j) # Res channel
res[i,j,0] = np.clip(red, 0, 255)
res[i,j,2] = np.clip(blue,0,255)
return res
import numpy as np
from src.forward_model import CFA
import cv2 as cv2
def hamilton_adams(y, input_shape):
n,p = input_shape[0], input_shape[1]
z = np.copy(y)
for i in range(2 , n-2): #green interpolation by gradient comparison for every red and blue pixels
for j in range(2, p-2):
if z[i,j,1] == 0 and z[i,j,0] != 0: #red pixel
#Vertical and horizontal gradient
grad_y = np.abs(z[i-1,j,1] - z[i+1,j,1]) + np.abs(2*z[i,j,0] - z[i-2,j,0] - z[i+2,j,0])
grad_x = np.abs(z[i,j-1,1] - z[i,j+1,1]) + np.abs(2*z[i,j,0] - z[i,j-2,0] - z[i,j+2,0])
if grad_x < grad_y:
z[i,j,1] = (z[i,j-1,1] + z[i,j+1,1])/2 + (2*z[i,j,0] - z[i,j-2,0] - z[i,j+2,0])/4
elif grad_x > grad_y:
z[i,j,1] = (z[i-1,j,1] + z[i+1,j,1])/2 + (2*z[i,j,0] - z[i-2,j,0] - z[i+2,j,0])/4
else:
z[i,j,1] = (z[i-1,j,1] + z[i+1,j,1] + z[i,j-1,1] + z[i,j+1,1])/4 + (2*z[i,j,0] - z[i,j-2,0] - z[i,j+2,0] + 2*z[i,j,0] - z[i-2,j,0] - z[i+2,j,0])/8
elif z[i,j,1] == 0 and z[i,j,2] != 0: #blue pixel
#Vertical and horizontal gradient
grad_y = np.abs(z[i-1,j,1] - z[i+1,j,1]) + np.abs(2*z[i,j,2] - z[i-2,j,2] - z[i+2,j,2])
grad_x = np.abs(z[i,j-1,1] - z[i,j+1,1]) + np.abs(2*z[i,j,2] - z[i,j-2,2] - z[i,j+2,2])
if grad_x < grad_y:
z[i,j,1] = (z[i,j-1,1] + z[i,j+1,1])/2 + (2*z[i,j,2] - z[i,j-2,2] - z[i,j+2,2])/4
elif grad_x > grad_y:
z[i,j,1] = (z[i-1,j,1] + z[i+1,j,1])/2 + (2*z[i,j,2] - z[i-2,j,2] - z[i+2,j,2])/4
else:
z[i,j,1] = (z[i-1,j,1] + z[i+1,j,1] + z[i,j-1,1] + z[i,j+1,1])/4 + (2*z[i,j,2] - z[i,j-2,2] - z[i,j+2,2] + 2*z[i,j,2] - z[i-2,j,2] - z[i+2,j,2])/8
for i in range(1 , n-1): #red/blue interpolation by bilinear interpolation on blue/red pixels
for j in range(1, p-1):
if z[i,j,2] != 0 :
z[i,j,0] = (z[i-1,j-1,0] + z[i-1,j+1,0] + z[i+1,j-1,0] + z[i+1,j+1,0]) / 4
elif z[i,j,0] != 0:
z[i,j,2] = (z[i-1,j-1,2] + z[i-1,j+1,2] + z[i+1,j-1,2] + z[i+1,j+1,2]) / 4
else:
z[i,j] = z[i,j]
for i in range(1 , n-1): #blue and red interpolation by bilinear interpolation on green pixels
for j in range(1, p-1):
if z[i,j,0] == z[i,j,2]:
z[i,j,0] = (z[i-1,j,0] + z[i,j-1,0] + z[i+1,j,0] + z[i,j+1,0]) / 4
z[i,j,2] = (z[i-1,j,2] + z[i,j-1,2] + z[i+1,j,2] + z[i,j+1,2]) / 4
return z
# def SSD(y, cfa_img, input_shape): #SSD algo
# n,p = input_shape[0], input_shape[1]
# hlist = [16,4,1]
# res = np.copy(y)
# for h in hlist:
# res = NLh(res, cfa_img ,n,p,h)
# res = CR(res,n,p)
# return res
# def NLh(y, cfa_img, n, p, h): #NLh part
# res = np.copy(cfa_img)
# for i in range(4,n-3):
# for j in range(4,p-3):
# if cfa_img[i,j,0] != 0:
# res[i,j,1] = NLh_calc(y, cfa_img, i, j, 1, h)
# res[i,j,2] = NLh_calc(y, cfa_img, i, j, 2, h)
# print((i,j))
# elif cfa_img[i,j,1] != 0:
# res[i,j,0] = NLh_calc(y, cfa_img, i, j, 0, h)
# res[i,j,2] = NLh_calc(y, cfa_img, i, j, 2, h)
# print((i,j))
# else:
# res[i,j,0] = NLh_calc(y, cfa_img, i, j, 0, h)
# res[i,j,1] = NLh_calc(y, cfa_img, i, j, 1, h)
# print((i,j))
# return res
# def NLh_calc(y, cfa_img, i, j, channel, h): #aux function to calculate the main sum for each pixel
# sum = 0
# norm = 0
# for k in range(-2,3):
# for l in range(-2,3):
# if k!=0 and j!=0:
# a = poids(y, channel, i,j,k,l,h)
# sum += a * cfa_img[i+k,j+l,channel]
# norm += a
# return sum / norm
# def poids(y, channel, i,j,k,l,h): #aux function to calcultate the weight for a given p=(i,j) , q=(k,l)
# som = 0
# # for tx in range(-1,2):
# # for ty in range(-1,2):
# som += (np.abs(y[i-1 , j-1, channel] - y[k+1 , l-1, channel]))**2
# som += (np.abs(y[i-1 , j, channel] - y[k+1 , l, channel]))**2
# som += (np.abs(y[i-1 , j+1, channel] - y[k+1 , l+1, channel]))**2
# som += (np.abs(y[i, j-1, channel] - y[k, l-1, channel]))**2
# som += (np.abs(y[i, j, channel] - y[k, l, channel]))**2
# som += (np.abs(y[i, j+1, channel] - y[k, l+1, channel]))**2
# som += (np.abs(y[i+1 , j-1, channel] - y[k+1 , l-1, channel]))**2
# som += (np.abs(y[i+1 , j, channel] - y[k+1 , l, channel]))**2
# som += (np.abs(y[i+1 , j+1, channel] - y[k+1 , l+1, channel]))**2
# res = np.exp((-1/h**2) * som)
# return res
# def CR(img_rgb): #Chrominance median
# res = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2YUV)
# y, u, v = cv2.split(res)
# U_med = cv2.medianBlur(u, 3)
# V_med = cv2.medianBlur(v, 3)
# y = cv2.cvtColor(y, cv2.COLOR_GRAY2RGB)
# u = cv2.cvtColor(u, cv2.COLOR_GRAY2RGB)
# v = cv2.cvtColor(v, cv2.COLOR_GRAY2RGB)
# res = np.vstack([y, u, v])
# return res
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.ramanantsitonta_harizo.fonctions import hamilton_adams #, SSD
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Performing the reconstruction.
# TODO
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
cfa_img = op.adjoint(y)
res = hamilton_adams(cfa_img, input_shape)
#res = SSD(res, cfa_img, input_shape)
return res
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
File added