diff --git a/src/methods/babacar/babacar_diop.pdf b/src/methods/babacar/babacar_diop.pdf new file mode 100644 index 0000000000000000000000000000000000000000..76bdc85d9fbc9965cbd73e9c90ff2f98b0c33a93 Binary files /dev/null and b/src/methods/babacar/babacar_diop.pdf differ diff --git a/src/methods/babacar/demoisaicing_fct.py b/src/methods/babacar/demoisaicing_fct.py new file mode 100644 index 0000000000000000000000000000000000000000..3930007e78ffd7ecc3db498dbbc53d8d19ff6306 --- /dev/null +++ b/src/methods/babacar/demoisaicing_fct.py @@ -0,0 +1,258 @@ +"""A file containing a (pretty useless) reconstruction. +It serves as example of how the project works. +This file should NOT be modified. +""" + + +import numpy as np +from scipy.signal import convolve2d + +from src.forward_model import CFA + + +def High_Quality_Linear_Interpolation(op: CFA, y: np.ndarray) -> np.ndarray: + """Performs a High-Quality Linear Interpolation of the lost pixels. + + Args: + op (CFA): CFA operator. + y (np.ndarray): Mosaicked image. + + Returns: + np.ndarray: Demosaicked image. + """ + z = op.adjoint(y) + #operation for the bayer pattern + if op.cfa == 'bayer': + res = np.empty(op.input_shape) + mask = op.mask + R = 0 + G = 1 + B = 2 + + y_padded = np.pad(y,2, 'constant', constant_values=0) + + for i in range(2,y_padded.shape[0]-2): + for j in range(2,y_padded.shape[1]-2): + for c in range(op.input_shape[2]): + i_real = i-2 + j_real = j-2 + if (c==R and mask[i_real,j_real,R] == 1) or (c==G and mask[i_real,j_real,G] == 1)or (c==B and mask[i_real,j_real,B] == 1): #no need to find the pixel value + res[i_real,j_real,c] = y_padded[i,j] + elif (c == G) and (mask[i_real,j_real,B] == 1 or mask[i_real,j_real,R] == 1): #green channel and pixel value = B or R + res[i_real,j_real,c] = float(convolve2d(y_padded[i-2:i+3, j-2:j+3], hq_g_r_and_b, mode='valid')) + + elif (c == R or c== B )and (mask[i_real,j_real,G] == 1) and (mask[i_real,j_real-1,c] == 0): #red or blue channel and pixel value = G and left pixel isn't in the recovered layer + res[i_real,j_real,c] = float(convolve2d(y_padded[i-2:i+3, j-2:j+3], hq_r_and_b_g_brow_rcol, mode='valid')) + + elif (c == R or c == B) and (mask[i_real,j_real,G] == 1) and (mask[i_real-1,j_real,c] == 0): #red or blue channel and pixel value = G and up pixel isn't in the recovered layer + res[i_real,j_real,c] = float(convolve2d(y_padded[i-2:i+3, j-2:j+3], hq_r_and_b_g_rrow_bcol, mode='valid')) + + elif (c == R and mask[i_real,j_real,B] == 1) or (c == B and mask[i_real,j_real,R] == 1) : #red channel and pixel value = B or inverse + res[i_real,j_real,c] = float(convolve2d(y_padded[i-2:i+3, j-2:j+3], hq_r_b, mode='valid')) + else : print("error") + + #operation for the quad bayer pattern + else: + res = np.empty(op.input_shape) + mask = op.mask + R = 0 + G = 1 + B = 2 + + y_padded = np.pad(y,4, 'constant', constant_values=0) + + for i in range(4,y_padded.shape[0]-4, 2): + for j in range(4,y_padded.shape[1]-4, 2): + for c in range(op.input_shape[2]): + i_real = i-4 + j_real = j-4 + if (c==R and mask[i_real,j_real,R] == 1) or (c==G and mask[i_real,j_real,G] == 1)or (c==B and mask[i_real,j_real,B] == 1): #no need to find the pixel value + res[i_real:i_real+2,j_real:j_real+2,c] = y_padded[i:i+2,j:j+2] + + elif (c == G) and (mask[i_real,j_real,B] == 1 or mask[i_real,j_real,R] == 1): #green channel and pixel value = B or R + # res[i_real:i_real+2,j_real:j_real+2,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], hq_quad_g_r_and_b, mode='valid')) + + res[i_real,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], A_hq_quad_g_r_and_b, mode='valid')) + res[i_real,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], B_hq_quad_g_r_and_b, mode='valid')) + res[i_real+1,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], C_hq_quad_g_r_and_b, mode='valid')) + res[i_real+1,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], D_hq_quad_g_r_and_b, mode='valid')) + + elif (c == R or c== B )and (mask[i_real,j_real,G] == 1) and (mask[i_real,j_real-2,c] == 0): #red or blue channel and pixel value = G and left pixel isn't in the recovered layer + # res[i_real:i_real+2,j_real:j_real+2,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], hq_quad_r_and_b_g_brow_rcol, mode='valid')) + + + res[i_real,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], A_hq_quad_r_and_b_g_brow_rcol, mode='valid')) + res[i_real,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], B_hq_quad_r_and_b_g_brow_rcol, mode='valid')) + res[i_real+1,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], C_hq_quad_r_and_b_g_brow_rcol, mode='valid')) + res[i_real+1,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], D_hq_quad_r_and_b_g_brow_rcol, mode='valid')) + + elif (c == R or c == B) and (mask[i_real,j_real,G] == 1) and (mask[i_real-2,j_real,c] == 0): #red or blue channel and pixel value = G and up pixel isn't in the recovered layer + # res[i_real:i_real+2,j_real:j_real+2,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], hq_quad_r_and_b_g_rrow_bcol, mode='valid')) + + res[i_real,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], A_hq_quad_r_and_b_g_rrow_bcol, mode='valid')) + res[i_real,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], B_hq_quad_r_and_b_g_rrow_bcol, mode='valid')) + res[i_real+1,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], C_hq_quad_r_and_b_g_rrow_bcol, mode='valid')) + res[i_real+1,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], D_hq_quad_r_and_b_g_rrow_bcol, mode='valid')) + + + elif (c == R and mask[i_real,j_real,B] == 1) or (c == B and mask[i_real,j_real,R] == 1) : #red channel and pixel value = B or inverse + # res[i_real:i_real+2,j_real:j_real+2,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], hq_quad_r_b, mode='valid')) + + res[i_real,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], A_hq_quad_r_b, mode='valid')) + res[i_real,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], B_hq_quad_r_b, mode='valid')) + res[i_real+1,j_real,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], C_hq_quad_r_b, mode='valid')) + res[i_real+1,j_real+1,c] = float(convolve2d(y_padded[i-4:i+6, j-4:j+6], D_hq_quad_r_b, mode='valid')) + else : print("error") + np.clip(res, 0, 1, res) + return res + + +# Bayer masks + +hq_g_r_and_b = np.array([[0, 0, -1, 0, 0], [0, 0, 2, 0, 0], [-1, 2, 4, 2, -1], [0, 0, 2, 0, 0], [0, 0, -1, 0, 0]]) / 8 + +hq_r_and_b_g_rrow_bcol = np.array([[0, 0, 1/2, 0, 0], [0, -1, 0, -1, 0], [-1, 4, 5, 4, -1], [0, -1, 0, -1, 0], [0, 0, 1/2, 0, 0]]) / 8 + +hq_r_and_b_g_brow_rcol = hq_r_and_b_g_rrow_bcol.T + +hq_r_b = np.array([[0, 0, -3/2, 0, 0], [0, 2, 0, 2, 0], [-3/2, 0, 6, 0, -3/2], [0, 2, 0, 2, 0], [0, 0, -3/2, 0, 0]]) / 8 + + +# Quad bayer masks + +## Calcul the value of 4 pixels with one mask, then the value is apply for the 4 pixels + + +hq_quad_g_r_and_b = np.array([[0,0, 0,0, -1,-1, 0,0, 0,0],[0,0, 0,0, -1,-1, 0,0, 0,0], + [0,0,0, 0,2, 2, 0,0, 0,0],[0,0,0, 0,2, 2, 0,0, 0,0], + [-1,-1, 2,2, 4,4, 2,2, -1,-1],[-1,-1, 2,2, 4,4, 2,2, -1,-1], + [0,0, 0,0, 2,2, 0,0, 0,0],[0,0, 0,0, 2,2, 0,0, 0,0], + [0,0, 0,0, -1,-1, 0,0, 0,0],[0,0, 0,0, -1,-1, 0,0, 0,0]]) / 16 + +hq_quad_r_and_b_g_rrow_bcol = np.array([[0,0, 0,0, 1/2,1/2, 0,0, 0,0],[0,0, 0,0, 1/2,1/2, 0,0, 0,0], + [0,0, -1,-1, 0,0, -1,-1, 0,0],[0,0, -1,-1, 0,0, -1,-1, 0,0], + [-1,-1, 4,4, 5,5, 4,4, -1,-1],[-1,-1, 4,4, 5,5, 4,4, -1,-1], + [0,0, -1,-1, 0,0, -1,-1, 0,0],[0,0, -1,-1, 0,0, -1,-1, 0,0], + [0,0, 0,0, 1/2,1/2, 0,0, 0,0],[0,0, 0,0, 1/2,1/2, 0,0, 0,0]]) / 16 + +hq_quad_r_and_b_g_brow_rcol = hq_quad_r_and_b_g_rrow_bcol.T + +hq_quad_r_b = np.array([[0,0, 0,0, -3/2,-3/2, 0,0, 0,0],[0,0, 0,0, -3/2,-3/2, 0,0, 0,0], + [0,0, 2,2, 0,0, 2,2, 0,0],[0,0, 2,2, 0,0, 2,2, 0,0], + [-3/2,-3/2, 0,0, 6,6, 0,0, -3/2,-3/2],[-3/2,-3/2, 0,0, 6,6, 0,0, -3/2,-3/2], + [0,0, 2,2, 0,0, 2,2, 0,0],[0,0, 2,2, 0,0, 2,2, 0,0], + [0,0, 0,0, -3/2,-3/2, 0,0, 0,0],[0,0, 0,0, -3/2,-3/2, 0,0, 0,0]]) / 16 + + +## Calcul the value of one pixel of the 2x2 matrix independentely +# A = up left pixel / B = up right pixel / C = down left pixel / D = down right pixel + +A_hq_quad_g_r_and_b = np.array([[0,0, 0,0, -1,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 2,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [-1,0, 2,0, 4,0, 2,0, -1,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 2,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, -1,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0]]) / 8 + +B_hq_quad_g_r_and_b = np.array([[0,0, 0,0, 0,-1, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 0,2, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,-1, 0,2, 0,4, 0,2, 0,-1],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 0,2, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 0,-1, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0]]) / 8 + +C_hq_quad_g_r_and_b = np.array([[0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, -1,0, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 2,0, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[-1,0, 2,0, 4,0, 2,0, -1,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 2,0, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, -1,0, 0,0, 0,0]]) / 8 + +D_hq_quad_g_r_and_b = np.array([[0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,-1, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,2, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,-1, 0,2, 0,4, 0,2, 0,-1], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,2, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,-1, 0,0, 0,0]]) / 8 + + +A_hq_quad_r_and_b_g_rrow_bcol = np.array([[0,0, 0,0, 1/2,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, -1,0, 0,0, -1,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [-1,0, 4,0, 5,0, 4,0, -1,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, -1,0, 0,0, -1,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 1/2,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0]]) / 8 + +B_hq_quad_r_and_b_g_rrow_bcol = np.array([[0,0, 0,0, 0,1/2, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,-1, 0,0, 0,-1, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,-1, 0,4, 0,5, 0,4, 0,-1],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,-1, 0,0, 0,-1, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 0,-1/2, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0]]) / 8 + +C_hq_quad_r_and_b_g_rrow_bcol = np.array([[0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 1/2,0, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, -1,0, 0,0, -1,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[-1,0, 4,0, 5,0, 4,0, -1,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, -1,0, 0,0, -1,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 1/2,0, 0,0, 0,0]]) / 8 + +D_hq_quad_r_and_b_g_rrow_bcol = np.array([[0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,1/2, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,-1, 0,0, 0,-1, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,-1, 0,4, 0,5, 0,4, 0,-1], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,-1, 0,0, 0,-1, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,-1/2, 0,0, 0,0]]) / 8 + + +A_hq_quad_r_and_b_g_brow_rcol = A_hq_quad_r_and_b_g_rrow_bcol.T +B_hq_quad_r_and_b_g_brow_rcol = B_hq_quad_r_and_b_g_rrow_bcol.T +C_hq_quad_r_and_b_g_brow_rcol = C_hq_quad_r_and_b_g_rrow_bcol.T +D_hq_quad_r_and_b_g_brow_rcol = D_hq_quad_r_and_b_g_rrow_bcol.T + + + + +A_hq_quad_r_b = np.array([[0,0, 0,0, -3/2,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 2,0, 0,0, 2,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [-3/2,0, 0,0, 6,0, 0,0, -3/2,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 2,0, 0,0, 2,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, -3/2,0, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0]]) / 8 + +B_hq_quad_r_b = np.array([[0,0, 0,0, 0,-3/2, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,2, 0,0, 0,2, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,-3/2, 0,0, 0,6, 0,0, 0,-3/2],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,2, 0,0, 0,2, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0], + [0,0, 0,0, 0,-3/2, 0,0, 0,0],[0,0, 0,0, 0,0, 0,0, 0,0]]) / 8 + + +C_hq_quad_r_b = np.array([[0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, -3/2,0, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 2,0, 0,0, 2,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[-3/2,0, 0,0, 6,0, 0,0, -3/2,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 2,0, 0,0, 2,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, -3/2,0, 0,0, 0,0]]) / 8 + +D_hq_quad_r_b = np.array([[0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,-3/2, 0,0, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,2, 0,0, 0,2, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,-3/2, 0,0, 0,6, 0,0, 0,-3/2], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,2, 0,0, 0,2, 0,0], + [0,0, 0,0, 0,0, 0,0, 0,0],[0,0, 0,0, 0,-3/2, 0,0, 0,0]]) / 8 + + + +#### +#### +#### + +#### #### #### ############# +#### ###### #### ################## +#### ######## #### #################### +#### ########## #### #### ######## +#### ############ #### #### #### +#### #### ######## #### #### #### +#### #### ######## #### #### #### +#### #### ######## #### #### #### +#### #### ## ###### #### #### ###### +#### #### #### ## #### #### ############ +#### #### ###### #### #### ########## +#### #### ########## #### #### ######## +#### #### ######## #### #### +#### #### ############ #### +#### #### ########## #### +#### #### ######## #### +#### #### ###### #### + +# 2023 +# Authors: Mauro Dalla Mura and Matthieu Muller diff --git a/src/methods/babacar/reconstruct.py b/src/methods/babacar/reconstruct.py new file mode 100644 index 0000000000000000000000000000000000000000..acd1cb1227d3232f5f8862f30f29578424ce7092 --- /dev/null +++ b/src/methods/babacar/reconstruct.py @@ -0,0 +1,54 @@ +"""The main file for the reconstruction. +This file should NOT be modified except the body of the 'run_reconstruction' function. +Students can call their functions (declared in others files of src/methods/your_name). +""" + + +import numpy as np + +from src.forward_model import CFA +import src.methods.babacar.demoisaicing_fct as b + + +def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray: + """Performs demosaicking on y. + + Args: + y (np.ndarray): Mosaicked image to be reconstructed. + cfa (str): Name of the CFA. Can be bayer or quad_bayer. + + Returns: + np.ndarray: Demosaicked image. + """ + # Performing the reconstruction. + + input_shape = (y.shape[0], y.shape[1], 3) + op = CFA(cfa, input_shape) + res = b.High_Quality_Linear_Interpolation(op, y) + return res + + +#### +#### +#### + +#### #### #### ############# +#### ###### #### ################## +#### ######## #### #################### +#### ########## #### #### ######## +#### ############ #### #### #### +#### #### ######## #### #### #### +#### #### ######## #### #### #### +#### #### ######## #### #### #### +#### #### ## ###### #### #### ###### +#### #### #### ## #### #### ############ +#### #### ###### #### #### ########## +#### #### ########## #### #### ######## +#### #### ######## #### #### +#### #### ############ #### +#### #### ########## #### +#### #### ######## #### +#### #### ###### #### + +# 2023 +# Authors: Mauro Dalla Mura and Matthieu Muller diff --git a/src/methods/template/reconstruct.py b/src/methods/template/reconstruct.py index a97bd3f6e3c68df763b36c46b2727461af078bd2..73454e4b33143911d637056cdc1f49f6f0efd504 100644 --- a/src/methods/template/reconstruct.py +++ b/src/methods/template/reconstruct.py @@ -23,7 +23,6 @@ def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray: # TODO input_shape = (y.shape[0], y.shape[1], 3) op = CFA(cfa, input_shape) - return np.zeros(op.input_shape)