Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • samanost/sicom_image_analysis_project
  • gerayelk/sicom_image_analysis_project
  • jelassiy/sicom_image_analysis_project
  • chardoto/sicom_image_analysis_project
  • chaarim/sicom_image_analysis_project
  • domers/sicom_image_analysis_project
  • elmurrt/sicom_image_analysis_project
  • sadonest/sicom_image_analysis_project
  • kouddann/sicom_image_analysis_project
  • mirabitj/sicom-image-analysis-project-mirabito
  • plotj/sicom_image_analysis_project
  • torrem/sicom-image-analysis-project-maxime-torre
  • dzike/sicom_image_analysis_project
  • daip/sicom_image_analysis_project
  • casanovv/sicom_image_analysis_project
  • girmarti/sicom_image_analysis_project
  • lioretn/sicom_image_analysis_project
  • lemoinje/sicom_image_analysis_project
  • ouahmanf/sicom_image_analysis_project
  • vouilloa/sicom_image_analysis_project
  • diopb/sicom_image_analysis_project
  • davidale/sicom_image_analysis_project
  • enza/sicom_image_analysis_project
  • conversb/sicom_image_analysis_project
  • mullemat/sicom_image_analysis_project
25 results
Show changes
Showing
with 988 additions and 0 deletions
import numpy as np
def clip_values(img: np.ndarray) -> np.ndarray:
"""Clip values of an image between 0 and 1
Args:
img (np.ndarray): image to clip
Returns:
np.ndarray: clipped image
"""
# Clip values between 0 and 1
img[img < 0] = 0
img[img > 1] = 1
return img
import numpy as np
def find_direct_neighbors(neighbors: list) -> list:
"""
Calculate the gradients in horizontal, vertical, and diagonal directions for a pixel
based on its neighboring pixels.
Args:
neighbors (list): The neighbors of a pixel in a 3x3 grid. The order of neighbors
is assumed to be [top-left, top, top-right, right, center, left,
bottom-left, bottom, bottom-right].
Returns:
list: The gradients [horizontal, vertical, diagonal-x, diagonal-y].
"""
# Horizontal gradient: Difference between the right and left neighbors
horizontal_gradient = (neighbors[3] - neighbors[5]) / 2
# Vertical gradient: Difference between the top and bottom neighbors
vertical_gradient = (neighbors[1] - neighbors[7]) / 2
# Diagonal gradient along x-axis: Difference between top-right and bottom-left neighbors
diagonal_gradient_x = (neighbors[2] - neighbors[6]) / (2 * np.sqrt(2))
# Diagonal gradient along y-axis: Difference between top-left and bottom-right neighbors
diagonal_gradient_y = (neighbors[0] - neighbors[8]) / (2 * np.sqrt(2))
return [horizontal_gradient, vertical_gradient, diagonal_gradient_x, diagonal_gradient_y]
import numpy as np
def find_neighbors(img: np.ndarray, channel: int, i: int, j: int, N: int, M: int) -> list:
"""
Retrieve the 3x3 neighborhood of a pixel in a specified channel of an image.
The function wraps around the image edges to handle border pixels.
Args:
img (np.ndarray): The image to process.
channel (int): The index of the channel to process.
i (int): The row index of the pixel.
j (int): The column index of the pixel.
N (int): Height of the image.
M (int): Width of the image.
Returns:
np.ndarray: An array of neighbor pixel values in the specified channel.
"""
neighbors = []
for di in [-1, 0, 1]:
for dj in [-1, 0, 1]:
neighbor_i = (i + di) % N
neighbor_j = (j + dj) % M
neighbors.append(img[neighbor_i, neighbor_j, channel])
return np.array(neighbors)
import numpy as np
from .find_neighbors import find_neighbors
from .find_direct_neighbors import find_direct_neighbors
def find_weights(img: np.ndarray, direct_neighbors: list, channel: int, i: int, j: int, N: int, M: int) -> list:
"""
Find the weights of the neighbors of a pixel in the image.
Args:
img (np.ndarray): The image to process.
direct_neighbors (list): The list of direct neighbors of the pixel.
channel (int): The index of the channel to process.
i (int): The row index of the pixel.
j (int): The column index of the pixel.
N (int): Height of the image.
M (int): Width of the image.
Returns:
list: The list of weights of the neighbors of the pixel.
"""
[Dx, Dy, Dxx, Dyy] = direct_neighbors
E = []
c = 1
for k in [-1, 0, 1]:
for l in [-1, 0, 1]:
n = find_neighbors(img, channel, i + k, j + l, N, M)
dd = find_direct_neighbors(n)
sqrt_arguments = {
1: 1 + Dyy * 2 + dd[3] * 2,
3: 1 + Dxx * 2 + dd[2] * 2,
2: 1 + Dy * 2 + dd[1] * 2,
4: 1 + Dx * 2 + dd[0] * 2
}
value = sqrt_arguments.get(c, 1)
if value < 0:
E.append(0)
else:
E.append(1 / np.sqrt(value))
c += 1
return E
def interpolate(neighbors: list, weights: list) -> float:
"""
Interpolate the pixel value.
Args:
neighbors (list): The neighbors of the pixel.
weights (list): The weights of the direct neighbors.
Returns:
float: The interpolated value.
"""
return (weights[1] * neighbors[1] + weights[3] * neighbors[3] + weights[4] * neighbors[5] + weights[6] * neighbors[7]) / (weights[1] + weights[1] + weights[4] + weights[6])
\ No newline at end of file
def interpolate_neighboring(neighbors_0: list, neighbors_1: list, weights: list) -> float:
"""
Interpolate the neighboring pixels.
Args:
neighbors_0 (list): The neighbors of the pixel in the channel to process.
neighbors_1 (list): The neighbors of the pixel in the green channel.
weights (list): The weights of the direct neighbors.
Returns:
float: The interpolated value.
"""
return neighbors_1[4] * (((weights[0] * neighbors_0[0]) / neighbors_1[0]) + ((weights[2] * neighbors_0[2]) / neighbors_1[2]) + ((weights[5] * neighbors_0[6]) / neighbors_1[6]) + ((weights[7] * neighbors_0[8]) / neighbors_1[8])) / (weights[0] + weights[2] + weights[5] + weights[7])
\ No newline at end of file
%% Cell type:markdown id: tags:
## Main notebook for experimenting with the algorithms
Here is a simple notebook to test your code. You can modify it as you please.
Remember to restart the jupyter kernel each time you modify a file.
%% Cell type:code id: tags:
``` python
%%capture
%pip install -r requirements.txt
```
%% Cell type:code id: tags:
``` python
import matplotlib.pyplot as plt
from src.utils import load_image, save_image, psnr, ssim
from src.forward_model import CFA
from src.methods.RHOUCH_Oussama.reconstruct import run_reconstruction
```
%% Cell type:markdown id: tags:
### Load the input image
%% Cell type:code id: tags:
``` python
image_path = 'images/img_4.png'
img = load_image(image_path)
```
%% Cell type:code id: tags:
``` python
# Shows some information on the image
plt.imshow(img)
plt.show()
print(f'Shape of the image: {img.shape}.')
```
%% Cell type:markdown id: tags:
### Definition of the forward model
To setup the forward operator we just need to instanciate the `CFA` class. This class needs two arguments: `cfa_name` being the kind of pattern (bayer or kodak), and `input_shape` the shape of the inputs of the operator.
This operation is linear and can be represented by a matrix $A$ but no direct access to this matrix is given (one can create it if needed). However the method `direct` allows to perform $A$'s operation. Likewise the method `adjoint` will perform the operation of $A^T$.
For example let $X \in \mathbb R^{M \times N \times 3}$ the input RGB image in natural shape. Then we got $x \in \mathbb R^{3MN}$ (vectorized version of $X$) and $A \in \mathbb R^{MN \times 3MN}$, leading to:
\begin{equation*}
y = Ax \in \mathbb R^{MN} \quad \text{and} \quad z = A^Ty \in \mathbb R^{3MN}
\end{equation*}
However thanks to `direct` and `adjoint` there is no need to work with vectorized images, except if it is interesting to create the matrix $A$ explicitly.
%% Cell type:code id: tags:
``` python
cfa_name = 'bayer' # bayer or quad_bayer
op = CFA(cfa_name, img.shape)
```
%% Cell type:code id: tags:
``` python
# Shows the mask
plt.imshow(op.mask[:10, :10])
plt.show()
print(f'Shape of the mask: {op.mask.shape}.')
```
%% Cell type:code id: tags:
``` python
# Applies the mask to the image
y = op.direct(img)
```
%% Cell type:code id: tags:
``` python
# Applies the adjoint operation to y
z = op.adjoint(y)
```
%% Cell type:code id: tags:
``` python
fig, axs = plt.subplots(2, 3, figsize=(15, 10))
axs[0, 0].imshow(img)
axs[0, 0].set_title('Input image')
axs[0, 1].imshow(y, cmap='gray')
axs[0, 1].set_title('Output image')
axs[0, 2].imshow(z)
axs[0, 2].set_title('Adjoint image')
axs[1, 0].imshow(img[800:864, 450:514])
axs[1, 0].set_title('Zoomed input image')
axs[1, 1].imshow(y[800:864, 450:514], cmap='gray')
axs[1, 1].set_title('Zoomed output image')
axs[1, 2].imshow(z[800:864, 450:514])
axs[1, 2].set_title('Zoomed adjoint image')
plt.show()
```
%% Cell type:markdown id: tags:
### Run the reconstruction
Here the goal is to reconstruct the image `img` using only `y` and `op` (using `img` is forbidden).
To run the reconstruction we simply call the function `run_reconstruction`. This function takes in argument the image to reconstruct and the kind of CFA used (bayer or kodak). All the parameters related to the reconstruction itself must be written inside `run_reconstruction`.
%% Cell type:code id: tags:
``` python
res = run_reconstruction(y, cfa_name)
```
%% Cell type:code id: tags:
``` python
# Prints some information on the reconstruction
print(f'Size of the reconstruction: {res.shape}.')
```
%% Cell type:markdown id: tags:
### Quantitative and qualitative results
%% Cell type:code id: tags:
``` python
fig, axs = plt.subplots(1, 2, figsize=(15, 15))
axs[0].imshow(img)
axs[0].set_title('Original image')
axs[1].imshow(res)
axs[1].set_title('Reconstructed image')
plt.show()
```
%% Cell type:code id: tags:
``` python
# Computes some metrics
print(f'PSNR: {psnr(img, res):.2f}')
print(f'SSIM: {ssim(img, res):.4f}')
```
%% Cell type:code id: tags:
``` python
reconstructed_path = 'output/reconstructed_image.png'
save_image(reconstructed_path, res)
```
%% Cell type:code id: tags:
``` python
```
import numpy as np
from .find_neighbors import find_neighbors
from .find_direct_neighbors import find_direct_neighbors
from .find_weights import find_weights
from .interpolate import interpolate
from .interpolate_neighboring import interpolate_neighboring
from .clip_values import clip_values
from .process_channel import process_channel
from .process_interpolation import process_interpolation
def process_blue_channel(img: np.ndarray, N: int, M: int) -> np.ndarray:
"""
Process the blue channel of an image
Args:
img (np.ndarray): image to process
N (int): height of the image
M (int): width of the image
Returns:
np.ndarray: processed blue channel of the image
"""
img = process_interpolation(img, 2, N, M)
img = process_channel(img, 2, N, M)
img = clip_values(img)
return img
\ No newline at end of file
import numpy as np
from .find_neighbors import find_neighbors
from .find_direct_neighbors import find_direct_neighbors
from .find_weights import find_weights
from .interpolate import interpolate
def process_channel(img: np.ndarray, channel_index: int, N: int, M: int) -> np.ndarray:
"""
Generic function to process a specific channel in the image.
Args:
img (np.ndarray): The image to process.
channel_index (int): The index of the channel to process.
N (int): Height of the image.
M (int): Width of the image.
Returns:
np.ndarray: The processed image.
"""
for i in range(N):
for j in range(M):
if(img[i, j, channel_index] == 0):
neighbors = find_neighbors(img, channel_index, i, j, N, M)
direct_neighbors = find_direct_neighbors(neighbors)
weights = find_weights(img, direct_neighbors, channel_index, i, j, N, M)
img[i, j, channel_index] = interpolate(neighbors, weights)
return img
import numpy as np
from .clip_values import clip_values
from .process_channel import process_channel
def process_green_channel(img: np.ndarray, N: int, M: int) -> np.ndarray:
"""Process the green channel of an image
Args:
img (np.ndarray): image to process
N (int): height of the image
M (int): width of the image
Returns:
np.ndarray: processed green channel of the image
"""
img = process_channel(img, 1, N, M)
img = clip_values(img)
return img
\ No newline at end of file
import numpy as np
from .find_neighbors import find_neighbors
from .find_direct_neighbors import find_direct_neighbors
from .find_weights import find_weights
from .interpolate_neighboring import interpolate_neighboring
def process_interpolation(img, channel_index, N, M):
"""
Process specific interpolation for a given channel.
Args:
img (np.ndarray): The image to process.
channel_index (int): The index of the channel to process (0 for red, 2 for blue).
N (int): Height of the image.
M (int): Width of the image.
Returns:
np.ndarray: The image with the specified channel processed.
"""
start_i = 1 if channel_index == 0 else 0 # Start from 1 for red and 0 for blue
start_j = 0 if channel_index == 0 else 1 # Start from 0 for red and 1 for blue
for i in range(start_i, N, 2):
for j in range(start_j, M, 2):
neighbors_0 = find_neighbors(img, channel_index, i, j, N, M)
neighbors_1 = find_neighbors(img, 1, i, j, N, M)
direct_neighbors = find_direct_neighbors(neighbors_1)
weights = find_weights(img, direct_neighbors, 1, i, j, N, M)
img[i, j, channel_index] = interpolate_neighboring(neighbors_0, neighbors_1, weights)
return img
import numpy as np
from .clip_values import clip_values
from .process_channel import process_channel
from .process_interpolation import process_interpolation
def process_red_channel(img: np.ndarray, N: int, M: int) -> np.ndarray:
"""Process the red channel of an image
Args:
img (np.ndarray): image to process
N (int): height of the image
M (int): width of the image
Returns:
np.ndarray: processed image
"""
img = process_interpolation(img, 0, N, M)
img = process_channel(img, 0, N, M)
img = clip_values(img)
return img
\ No newline at end of file
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
from src.forward_model import CFA
from src.methods.RHOUCH_Oussama.process_green_channel import process_green_channel
from src.methods.RHOUCH_Oussama.process_red_channel import process_red_channel
from src.methods.RHOUCH_Oussama.process_blue_channel import process_blue_channel
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
# Performing the reconstruction.
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
img_restored = op.adjoint(y)
N, M = img_restored.shape[:2]
img_restored = process_green_channel(img_restored, N, M)
img_restored = process_red_channel(img_restored, N, M)
img_restored = process_blue_channel(img_restored, N, M)
return img_restored
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
File added
"""A file containing a (pretty useless) reconstruction.
It serves as example of how the project works.
This file should NOT be modified.
"""
import numpy as np
from scipy.signal import convolve2d
from src.forward_model import CFA
def naive_interpolation(op: CFA, y: np.ndarray) -> np.ndarray:
"""Performs a simple interpolation of the lost pixels.
Args:
op (CFA): CFA operator.
y (np.ndarray): Mosaicked image.
Returns:
np.ndarray: Demosaicked image.
"""
z = op.adjoint(y)
if op.cfa == 'bayer':
res = np.empty(op.input_shape)
res[:, :, 0] = convolve2d(z[:, :, 0], ker_bayer_red_blue, mode='same')
res[:, :, 1] = convolve2d(z[:, :, 1], ker_bayer_green, mode='same')
res[:, :, 2] = convolve2d(z[:, :, 2], ker_bayer_red_blue, mode='same')
else:
res = np.empty(op.input_shape)
res[:, :, 0] = varying_kernel_convolution(z[:, :, 0], K_list_red)
res[:, :, 1] = varying_kernel_convolution(z[:, :, 1], K_list_green)
res[:, :, 2] = varying_kernel_convolution(z[:, :, 2], K_list_blue)
return res
def extract_padded(M, size, i, j):
N_i, N_j = M.shape
res = np.zeros((size, size))
middle_size = int((size - 1) / 2)
for ii in range(- middle_size, middle_size + 1):
for jj in range(- middle_size, middle_size + 1):
if i + ii >= 0 and i + ii < N_i and j + jj >= 0 and j + jj < N_j:
res[middle_size + ii, middle_size + jj] = M[i + ii, j + jj]
return res
def varying_kernel_convolution(M, K_list):
N_i, N_j = M.shape
res = np.zeros_like(M)
for i in range(N_i):
for j in range(N_j):
res[i, j] = np.sum(extract_padded(M, K_list[4 * (i % 4) + j % 4].shape[0], i, j) * K_list[4 * (i % 4) + j % 4])
np.clip(res, 0, 1, res)
return res
K_identity = np.zeros((5, 5))
K_identity[2, 2] = 1
K_red_0 = np.zeros((5, 5))
K_red_0[2, :] = np.array([-3, 13, 0, 0, 2]) / 12
K_red_1 = np.zeros((5, 5))
K_red_1[2, :] = np.array([2, 0, 0, 13, -3]) / 12
K_red_8 = np.zeros((5, 5))
K_red_8[:2, :2] = np.array([[-1, -1], [-1, 9]]) / 6
K_red_9 = np.zeros((5, 5))
K_red_9[:2, 3:] = np.array([[-1, -1], [9, -1]]) / 6
K_red_10 = np.zeros((5, 5))
K_red_10[:, 2] = np.array([-3, 13, 0, 0, 2]) / 12
K_red_12 = np.zeros((5, 5))
K_red_12[3:, :2] = np.array([[-1, 9], [-1, -1]]) / 6
K_red_13 = np.zeros((5, 5))
K_red_13[3:, 3:] = np.array([[9, -1], [-1, -1]]) / 6
K_red_14 = np.zeros((5, 5))
K_red_14[:, 2] = np.array([2, 0, 0, 13, -3]) / 12
K_list_red = [K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_8, K_red_9, K_red_10, K_red_10, K_red_12, K_red_13, K_red_14, K_red_14]
K_green_2 = np.zeros((5, 5))
K_green_2[2, :] = [-3, 13, 0, 0, 2]
K_green_2[:, 2] = [-3, 13, 0, 0, 2]
K_green_2 = K_green_2 / 24
K_green_3 = np.zeros((5, 5))
K_green_3[2, :] = [2, 0, 0, 13, -3]
K_green_3[:, 2] = [-3, 13, 0, 0, 2]
K_green_3 = K_green_3 / 24
K_green_6 = np.zeros((5, 5))
K_green_6[2, :] = [-3, 13, 0, 0, 2]
K_green_6[:, 2] = [2, 0, 0, 13, -3]
K_green_6 = K_green_6 / 24
K_green_7 = np.zeros((5, 5))
K_green_7[2, :] = [2, 0, 0, 13, -3]
K_green_7[:, 2] = [2, 0, 0, 13, -3]
K_green_7 = K_green_7 / 24
K_list_green = [K_identity, K_identity, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_green_2, K_green_3, K_identity, K_identity, K_green_6, K_green_7, K_identity, K_identity]
K_list_blue = [K_red_10, K_red_10, K_red_8, K_red_9, K_red_14, K_red_14, K_red_12, K_red_13, K_identity, K_identity, K_red_0, K_red_1, K_identity, K_identity, K_red_0, K_red_1]
ker_bayer_red_blue = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4
ker_bayer_green = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
from scipy.signal import convolve2d
import numpy as np
def compute_gradients(R_directions, G_directions, B_directions, L):
"""
This function will compute the gradients of the reconstructed channels, in each direction and in the YUV space
Inputs :
- R_directions, G_directions, B_directions : the reconstructed channels
- L : the size of the neighborhood on which to compute the gradients
Outputs :
- gradients : the gradients of the reconstructed channels in each direction
"""
Y = 0.299 * R_directions + 0.587 * G_directions + 0.114 * B_directions
U = R_directions - Y
V = B_directions - Y
# U_roll_north = np.roll(U, -L, axis=0)
# U_roll_south = np.roll(U, L, axis=0)
# U_roll_east = np.roll(U, -L, axis=1)
# U_roll_west = np.roll(U, L, axis=1)
# V_roll_north = np.roll(V, -L, axis=0)
# V_roll_south = np.roll(V, L, axis=0)
# V_roll_east = np.roll(V, -L, axis=1)
# V_roll_west = np.roll(V, L, axis=1)
H, W = U.shape[0], U.shape[1]
gradients = np.zeros((H, W, 4))
for height in range(U.shape[0]):
for width in range(U.shape[1]):
somme_u = [0, 0, 0, 0]
somme_v = [0, 0, 0, 0]
for l in range(1, L+1):
somme_u[0] += (U[height, width, 0] -
U[(height-l) % H, width, 0])**2
somme_u[1] += (U[height, width, 1] -
U[(height+l) % H, width, 1])**2
somme_u[2] += (U[height, width, 2] -
U[height, (width-l) % W, 2])**2
somme_u[3] += (U[height, width, 3] -
U[height, (width+l) % W, 3])**2
somme_v[0] += (V[height, width, 0] -
V[(height-l) % H, width, 0])**2
somme_v[1] += (V[height, width, 1] -
V[(height+l) % H, width, 1])**2
somme_v[2] += (V[height, width, 2] -
V[height, (width-l) % W, 2])**2
somme_v[3] += (V[height, width, 3] -
V[height, (width+l) % W, 3])**2
gradients[height, width] = 1/L * \
(np.sqrt(somme_u) + np.sqrt(somme_v))
return gradients
def local_int(CFA_image, beta, L, epsilon, cfa_mask):
"""
This function will compute the reconstructed image, using directional interpolation, and will then select the best direction for each pixel.
Inputs :
- CFA_image : the three channels of the image (CFA)
- beta : the parameter of intercorrelation between the channels
- L : the size of the neighborhood on which to test the direction
- epsilon : the threshold for the selection of the direction
Outputs :
- R, G, B : the reconstructed channels
"""
G_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
R_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
B_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
RG_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
BG_directions = np.zeros((CFA_image.shape[0], CFA_image.shape[1], 4))
# Directionnal interpolation of green channel
G_directions[:, :, 0] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], -1, axis=0) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], -2, axis=0))
G_directions[:, :, 1] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], 1, axis=0) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], 2, axis=0))
G_directions[:, :, 2] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], -1, axis=1) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], -2, axis=1))
G_directions[:, :, 3] = CFA_image[:, :, 1] + \
np.roll(CFA_image[:, :, 1], 1, axis=1) + \
beta/2 * (CFA_image[:, :, 0] - np.roll(CFA_image[:, :, 0], 2, axis=1))
# Bilinear interpolation of red and blue channels
R_directions[:, :, 0] += CFA_image[:, :, 0]
R_directions[:, :, 1] += CFA_image[:, :, 0]
R_directions[:, :, 2] += CFA_image[:, :, 0]
R_directions[:, :, 3] += CFA_image[:, :, 0]
B_directions[:, :, 0] += CFA_image[:, :, 2]
B_directions[:, :, 1] += CFA_image[:, :, 2]
B_directions[:, :, 2] += CFA_image[:, :, 2]
B_directions[:, :, 3] += CFA_image[:, :, 2]
RG_directions[:, :, 0] += RG_directions[:, :, 0] - \
beta*G_directions[:, :, 0]
RG_directions[:, :, 1] += RG_directions[:, :, 1] - \
beta*G_directions[:, :, 1]
RG_directions[:, :, 2] += RG_directions[:, :, 2] - \
beta*G_directions[:, :, 2]
RG_directions[:, :, 3] += RG_directions[:, :, 3] - \
beta*G_directions[:, :, 3]
BG_directions[:, :, 0] += BG_directions[:, :, 0] - \
beta*G_directions[:, :, 0]
BG_directions[:, :, 1] += BG_directions[:, :, 1] - \
beta*G_directions[:, :, 1]
BG_directions[:, :, 2] += BG_directions[:, :, 2] - \
beta*G_directions[:, :, 2]
BG_directions[:, :, 3] += BG_directions[:, :, 3] - \
beta*G_directions[:, :, 3]
# Bi-linear interpolation
kernel = 1 / 4 * np.array([
[1, 2, 1],
[2, 4, 2],
[1, 2, 1]
])
R_directions[:, :, 0] = convolve2d(
R_directions[:, :, 0], kernel, mode='same')
R_directions[:, :, 1] = convolve2d(
R_directions[:, :, 1], kernel, mode='same')
R_directions[:, :, 2] = convolve2d(
R_directions[:, :, 2], kernel, mode='same')
R_directions[:, :, 3] = convolve2d(
R_directions[:, :, 3], kernel, mode='same')
B_directions[:, :, 0] = convolve2d(
B_directions[:, :, 0], kernel, mode='same')
B_directions[:, :, 1] = convolve2d(
B_directions[:, :, 1], kernel, mode='same')
B_directions[:, :, 2] = convolve2d(
B_directions[:, :, 2], kernel, mode='same')
B_directions[:, :, 3] = convolve2d(
B_directions[:, :, 3], kernel, mode='same')
R_directions += beta * G_directions
B_directions += beta * G_directions
gradients = compute_gradients(R_directions, G_directions, B_directions, L)
inv_gradients = 1/(gradients+epsilon)
sommme_gradients = np.sum(inv_gradients, axis=2)
inv_gradients[:, :, 0] = inv_gradients[:, :, 0] / sommme_gradients
inv_gradients[:, :, 1] = inv_gradients[:, :, 1] / sommme_gradients
inv_gradients[:, :, 2] = inv_gradients[:, :, 2] / sommme_gradients
inv_gradients[:, :, 3] = inv_gradients[:, :, 3] / sommme_gradients
R_1 = np.sum(R_directions * inv_gradients, axis=2)
G_1 = np.sum(G_directions * inv_gradients, axis=2)
B_1 = np.sum(B_directions * inv_gradients, axis=2)
return R_1, G_1, B_1
\ No newline at end of file
import numpy as np
def nonlocal_int(R_0, G_0, B_0, beta, h, k, rho, N, cfa_mask) :
"""
Nonlocal interpolation algorithm
Inputs :
- R_0, G_0, B_0 : The initial reconstructed channels
- beta : the parameter of intercorrelation between the channels
- h : the filtering parameter
- k : the half-size of the search window
- rho : the size of the
- N : the number of iterations
Outputs :
- R, G, B : the reconstructed channels
"""
height, width = R_0.shape
u_0 = np.stack((R_0, G_0, B_0), axis=2)
d = np.ones((height, width, height, width)) * np.inf
w = np.zeros((height, width, N))
ksi = np.zeros((height, width,2))
# Compute weight distribution on initialization u0
R = np.zeros_like(R_0)
G = np.zeros_like(G_0)
B = np.zeros_like(B_0)
index_image = np.zeros((height, width,N, 2))
for i in range(height) :
for j in range(width) :
# p = (i,j)
for h in range(max(0, i-k), min(height, i+k+1)) :
for w in range(max(0, j-k), min(width, j+k+1)) :
# q = m,n
d[i,j,h,w] = np.sum((u_0[i-rho:i+rho+1, j-rho:j+rho+1] - u_0[h-rho:h+rho+1, w-rho:w+rho+1])**2)
d[i,j,i,j] = np.inf
d[i,j,i,j] = np.min(d[i,j])
sorted_image = np.sort(d[i,j], axis=None)
reconstructed_image = np.zeros((height, width))
for t in range(height*width) :
reconstructed_image[t//width, t%width] = sorted_image[t]
for n in range(N) :
index_image[i,j,n] = np.where(d[i,j] == sorted_image[n])
w[i,j,n] = np.exp(-d[i,j,0,n]/(h**2))
ksi[i,j] = np.sum(w[i,j])
for n in range(N) :
w[i,j,n] = w[i,j,n]/ksi[i,j]
# Enhancement of green channel
for i in range(height) :
for j in range(width) :
if not cfa_mask[i,j,1] == 1 : # Not in green CFA
for n in range(N) :
green_qn = G_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])] # G0(qn)
red_qn = R_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
blue_qn = B_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
G[i,j] += w[i,j,n] * (green_qn - beta * (red_qn + blue_qn)) + beta * (R_0[i,j] + B_0[i,j])
# Enhancement of red and blue channels
for i in range(height) :
for j in range(width) :
if not cfa_mask[i,j,0] == 1 : # Not in red CFA
for n in range(N) :
red_qn = R_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])] # R0(qn)
G_qn = G_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
R[i,j] += w[i,j,n] * (red_qn - beta * G_qn) + beta * G_0[i,j]
if not cfa_mask[i,j,2] == 1 : # Not in blue CFA
for n in range(N) :
blue_qn = B_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])] # B0(qn)
G_qn = G_0[int(index_image[i,j,n,0]), int(index_image[i,j,n,1])]
B[i,j] += w[i,j,n] * (blue_qn - beta * G_qn) + beta * G_0[i,j]
return R, G, B
\ No newline at end of file
"""The main file for the reconstruction.
This file should NOT be modified except the body of the 'run_reconstruction' function.
Students can call their functions (declared in others files of src/methods/your_name).
"""
import numpy as np
import matplotlib.pyplot as plt
from local_int import local_int
from forward_model import CFA
from demo_reconstruction import naive_interpolation
def run_reconstruction(y: np.ndarray, cfa: str) -> np.ndarray:
"""Performs demosaicking on y.
Args:
y (np.ndarray): Mosaicked image to be reconstructed.
cfa (str): Name of the CFA. Can be bayer or quad_bayer.
Returns:
np.ndarray: Demosaicked image.
"""
if cfa == "quad_bayer":
input_shape = (y.shape[0], y.shape[1], 3)
op = CFA(cfa, input_shape)
return naive_interpolation(op, y)
# Performing the reconstruction.
cfa_obj = CFA(cfa, y.shape)
cfa_mask = cfa_obj.mask
R_0, G_0, B_0 = local_int(y, 0.7, 12, 1e-8, cfa_mask)
return np.stack((R_0, G_0, B_0), axis=2)
####
####
####
#### #### #### #############
#### ###### #### ##################
#### ######## #### ####################
#### ########## #### #### ########
#### ############ #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ######## #### #### ####
#### #### ## ###### #### #### ######
#### #### #### ## #### #### ############
#### #### ###### #### #### ##########
#### #### ########## #### #### ########
#### #### ######## #### ####
#### #### ############ ####
#### #### ########## ####
#### #### ######## ####
#### #### ###### ####
# 2023
# Authors: Mauro Dalla Mura and Matthieu Muller
### How to use ?
There are 2 functions that can be called in reconstruct.py :
f.cfa_reconstruction_1 that uses the method 1
and f.cfa_reconstruction that uses the method 2
They use the same arguments which are y , cfa and input_shape.
File added